[go: nahoru, domu]

1/*
2 * Driver for the Cirrus Logic EP93xx DMA Controller
3 *
4 * Copyright (C) 2011 Mika Westerberg
5 *
6 * DMA M2P implementation is based on the original
7 * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8 *
9 *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10 *   Copyright (C) 2006 Applied Data Systems
11 *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12 *
13 * This driver is based on dw_dmac and amba-pl08x drivers.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 */
20
21#include <linux/clk.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29#include <linux/platform_data/dma-ep93xx.h>
30
31#include "dmaengine.h"
32
33/* M2P registers */
34#define M2P_CONTROL			0x0000
35#define M2P_CONTROL_STALLINT		BIT(0)
36#define M2P_CONTROL_NFBINT		BIT(1)
37#define M2P_CONTROL_CH_ERROR_INT	BIT(3)
38#define M2P_CONTROL_ENABLE		BIT(4)
39#define M2P_CONTROL_ICE			BIT(6)
40
41#define M2P_INTERRUPT			0x0004
42#define M2P_INTERRUPT_STALL		BIT(0)
43#define M2P_INTERRUPT_NFB		BIT(1)
44#define M2P_INTERRUPT_ERROR		BIT(3)
45
46#define M2P_PPALLOC			0x0008
47#define M2P_STATUS			0x000c
48
49#define M2P_MAXCNT0			0x0020
50#define M2P_BASE0			0x0024
51#define M2P_MAXCNT1			0x0030
52#define M2P_BASE1			0x0034
53
54#define M2P_STATE_IDLE			0
55#define M2P_STATE_STALL			1
56#define M2P_STATE_ON			2
57#define M2P_STATE_NEXT			3
58
59/* M2M registers */
60#define M2M_CONTROL			0x0000
61#define M2M_CONTROL_DONEINT		BIT(2)
62#define M2M_CONTROL_ENABLE		BIT(3)
63#define M2M_CONTROL_START		BIT(4)
64#define M2M_CONTROL_DAH			BIT(11)
65#define M2M_CONTROL_SAH			BIT(12)
66#define M2M_CONTROL_PW_SHIFT		9
67#define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
68#define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
69#define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
70#define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
71#define M2M_CONTROL_TM_SHIFT		13
72#define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT		BIT(21)
75#define M2M_CONTROL_RSS_SHIFT		22
76#define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
77#define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
78#define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
79#define M2M_CONTROL_NO_HDSK		BIT(24)
80#define M2M_CONTROL_PWSC_SHIFT		25
81
82#define M2M_INTERRUPT			0x0004
83#define M2M_INTERRUPT_MASK		6
84
85#define M2M_STATUS			0x000c
86#define M2M_STATUS_CTL_SHIFT		1
87#define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT		4
94#define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE			BIT(6)
99
100#define M2M_BCR0			0x0010
101#define M2M_BCR1			0x0014
102#define M2M_SAR_BASE0			0x0018
103#define M2M_SAR_BASE1			0x001c
104#define M2M_DAR_BASE0			0x002c
105#define M2M_DAR_BASE1			0x0030
106
107#define DMA_MAX_CHAN_BYTES		0xffff
108#define DMA_MAX_CHAN_DESCRIPTORS	32
109
110struct ep93xx_dma_engine;
111
112/**
113 * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114 * @src_addr: source address of the transaction
115 * @dst_addr: destination address of the transaction
116 * @size: size of the transaction (in bytes)
117 * @complete: this descriptor is completed
118 * @txd: dmaengine API descriptor
119 * @tx_list: list of linked descriptors
120 * @node: link used for putting this into a channel queue
121 */
122struct ep93xx_dma_desc {
123	u32				src_addr;
124	u32				dst_addr;
125	size_t				size;
126	bool				complete;
127	struct dma_async_tx_descriptor	txd;
128	struct list_head		tx_list;
129	struct list_head		node;
130};
131
132/**
133 * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134 * @chan: dmaengine API channel
135 * @edma: pointer to to the engine device
136 * @regs: memory mapped registers
137 * @irq: interrupt number of the channel
138 * @clk: clock used by this channel
139 * @tasklet: channel specific tasklet used for callbacks
140 * @lock: lock protecting the fields following
141 * @flags: flags for the channel
142 * @buffer: which buffer to use next (0/1)
143 * @active: flattened chain of descriptors currently being processed
144 * @queue: pending descriptors which are handled next
145 * @free_list: list of free descriptors which can be used
146 * @runtime_addr: physical address currently used as dest/src (M2M only). This
147 *                is set via %DMA_SLAVE_CONFIG before slave operation is
148 *                prepared
149 * @runtime_ctrl: M2M runtime values for the control register.
150 *
151 * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152 * will have slightly different scheme here: @active points to a head of
153 * flattened DMA descriptor chain.
154 *
155 * @queue holds pending transactions. These are linked through the first
156 * descriptor in the chain. When a descriptor is moved to the @active queue,
157 * the first and chained descriptors are flattened into a single list.
158 *
159 * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160 * necessary channel configuration information. For memcpy channels this must
161 * be %NULL.
162 */
163struct ep93xx_dma_chan {
164	struct dma_chan			chan;
165	const struct ep93xx_dma_engine	*edma;
166	void __iomem			*regs;
167	int				irq;
168	struct clk			*clk;
169	struct tasklet_struct		tasklet;
170	/* protects the fields following */
171	spinlock_t			lock;
172	unsigned long			flags;
173/* Channel is configured for cyclic transfers */
174#define EP93XX_DMA_IS_CYCLIC		0
175
176	int				buffer;
177	struct list_head		active;
178	struct list_head		queue;
179	struct list_head		free_list;
180	u32				runtime_addr;
181	u32				runtime_ctrl;
182};
183
184/**
185 * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186 * @dma_dev: holds the dmaengine device
187 * @m2m: is this an M2M or M2P device
188 * @hw_setup: method which sets the channel up for operation
189 * @hw_shutdown: shuts the channel down and flushes whatever is left
190 * @hw_submit: pushes active descriptor(s) to the hardware
191 * @hw_interrupt: handle the interrupt
192 * @num_channels: number of channels for this instance
193 * @channels: array of channels
194 *
195 * There is one instance of this struct for the M2P channels and one for the
196 * M2M channels. hw_xxx() methods are used to perform operations which are
197 * different on M2M and M2P channels. These methods are called with channel
198 * lock held and interrupts disabled so they cannot sleep.
199 */
200struct ep93xx_dma_engine {
201	struct dma_device	dma_dev;
202	bool			m2m;
203	int			(*hw_setup)(struct ep93xx_dma_chan *);
204	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
205	void			(*hw_submit)(struct ep93xx_dma_chan *);
206	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
207#define INTERRUPT_UNKNOWN	0
208#define INTERRUPT_DONE		1
209#define INTERRUPT_NEXT_BUFFER	2
210
211	size_t			num_channels;
212	struct ep93xx_dma_chan	channels[];
213};
214
215static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216{
217	return &edmac->chan.dev->device;
218}
219
220static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221{
222	return container_of(chan, struct ep93xx_dma_chan, chan);
223}
224
225/**
226 * ep93xx_dma_set_active - set new active descriptor chain
227 * @edmac: channel
228 * @desc: head of the new active descriptor chain
229 *
230 * Sets @desc to be the head of the new active descriptor chain. This is the
231 * chain which is processed next. The active list must be empty before calling
232 * this function.
233 *
234 * Called with @edmac->lock held and interrupts disabled.
235 */
236static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237				  struct ep93xx_dma_desc *desc)
238{
239	BUG_ON(!list_empty(&edmac->active));
240
241	list_add_tail(&desc->node, &edmac->active);
242
243	/* Flatten the @desc->tx_list chain into @edmac->active list */
244	while (!list_empty(&desc->tx_list)) {
245		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246			struct ep93xx_dma_desc, node);
247
248		/*
249		 * We copy the callback parameters from the first descriptor
250		 * to all the chained descriptors. This way we can call the
251		 * callback without having to find out the first descriptor in
252		 * the chain. Useful for cyclic transfers.
253		 */
254		d->txd.callback = desc->txd.callback;
255		d->txd.callback_param = desc->txd.callback_param;
256
257		list_move_tail(&d->node, &edmac->active);
258	}
259}
260
261/* Called with @edmac->lock held and interrupts disabled */
262static struct ep93xx_dma_desc *
263ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264{
265	if (list_empty(&edmac->active))
266		return NULL;
267
268	return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
269}
270
271/**
272 * ep93xx_dma_advance_active - advances to the next active descriptor
273 * @edmac: channel
274 *
275 * Function advances active descriptor to the next in the @edmac->active and
276 * returns %true if we still have descriptors in the chain to process.
277 * Otherwise returns %false.
278 *
279 * When the channel is in cyclic mode always returns %true.
280 *
281 * Called with @edmac->lock held and interrupts disabled.
282 */
283static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284{
285	struct ep93xx_dma_desc *desc;
286
287	list_rotate_left(&edmac->active);
288
289	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290		return true;
291
292	desc = ep93xx_dma_get_active(edmac);
293	if (!desc)
294		return false;
295
296	/*
297	 * If txd.cookie is set it means that we are back in the first
298	 * descriptor in the chain and hence done with it.
299	 */
300	return !desc->txd.cookie;
301}
302
303/*
304 * M2P DMA implementation
305 */
306
307static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308{
309	writel(control, edmac->regs + M2P_CONTROL);
310	/*
311	 * EP93xx User's Guide states that we must perform a dummy read after
312	 * write to the control register.
313	 */
314	readl(edmac->regs + M2P_CONTROL);
315}
316
317static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318{
319	struct ep93xx_dma_data *data = edmac->chan.private;
320	u32 control;
321
322	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323
324	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325		| M2P_CONTROL_ENABLE;
326	m2p_set_control(edmac, control);
327
328	return 0;
329}
330
331static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
332{
333	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
334}
335
336static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
337{
338	u32 control;
339
340	control = readl(edmac->regs + M2P_CONTROL);
341	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
342	m2p_set_control(edmac, control);
343
344	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
345		cpu_relax();
346
347	m2p_set_control(edmac, 0);
348
349	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
350		cpu_relax();
351}
352
353static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
354{
355	struct ep93xx_dma_desc *desc;
356	u32 bus_addr;
357
358	desc = ep93xx_dma_get_active(edmac);
359	if (!desc) {
360		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
361		return;
362	}
363
364	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
365		bus_addr = desc->src_addr;
366	else
367		bus_addr = desc->dst_addr;
368
369	if (edmac->buffer == 0) {
370		writel(desc->size, edmac->regs + M2P_MAXCNT0);
371		writel(bus_addr, edmac->regs + M2P_BASE0);
372	} else {
373		writel(desc->size, edmac->regs + M2P_MAXCNT1);
374		writel(bus_addr, edmac->regs + M2P_BASE1);
375	}
376
377	edmac->buffer ^= 1;
378}
379
380static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
381{
382	u32 control = readl(edmac->regs + M2P_CONTROL);
383
384	m2p_fill_desc(edmac);
385	control |= M2P_CONTROL_STALLINT;
386
387	if (ep93xx_dma_advance_active(edmac)) {
388		m2p_fill_desc(edmac);
389		control |= M2P_CONTROL_NFBINT;
390	}
391
392	m2p_set_control(edmac, control);
393}
394
395static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
396{
397	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
398	u32 control;
399
400	if (irq_status & M2P_INTERRUPT_ERROR) {
401		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
402
403		/* Clear the error interrupt */
404		writel(1, edmac->regs + M2P_INTERRUPT);
405
406		/*
407		 * It seems that there is no easy way of reporting errors back
408		 * to client so we just report the error here and continue as
409		 * usual.
410		 *
411		 * Revisit this when there is a mechanism to report back the
412		 * errors.
413		 */
414		dev_err(chan2dev(edmac),
415			"DMA transfer failed! Details:\n"
416			"\tcookie	: %d\n"
417			"\tsrc_addr	: 0x%08x\n"
418			"\tdst_addr	: 0x%08x\n"
419			"\tsize		: %zu\n",
420			desc->txd.cookie, desc->src_addr, desc->dst_addr,
421			desc->size);
422	}
423
424	switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
425	case M2P_INTERRUPT_STALL:
426		/* Disable interrupts */
427		control = readl(edmac->regs + M2P_CONTROL);
428		control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
429		m2p_set_control(edmac, control);
430
431		return INTERRUPT_DONE;
432
433	case M2P_INTERRUPT_NFB:
434		if (ep93xx_dma_advance_active(edmac))
435			m2p_fill_desc(edmac);
436
437		return INTERRUPT_NEXT_BUFFER;
438	}
439
440	return INTERRUPT_UNKNOWN;
441}
442
443/*
444 * M2M DMA implementation
445 */
446
447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448{
449	const struct ep93xx_dma_data *data = edmac->chan.private;
450	u32 control = 0;
451
452	if (!data) {
453		/* This is memcpy channel, nothing to configure */
454		writel(control, edmac->regs + M2M_CONTROL);
455		return 0;
456	}
457
458	switch (data->port) {
459	case EP93XX_DMA_SSP:
460		/*
461		 * This was found via experimenting - anything less than 5
462		 * causes the channel to perform only a partial transfer which
463		 * leads to problems since we don't get DONE interrupt then.
464		 */
465		control = (5 << M2M_CONTROL_PWSC_SHIFT);
466		control |= M2M_CONTROL_NO_HDSK;
467
468		if (data->direction == DMA_MEM_TO_DEV) {
469			control |= M2M_CONTROL_DAH;
470			control |= M2M_CONTROL_TM_TX;
471			control |= M2M_CONTROL_RSS_SSPTX;
472		} else {
473			control |= M2M_CONTROL_SAH;
474			control |= M2M_CONTROL_TM_RX;
475			control |= M2M_CONTROL_RSS_SSPRX;
476		}
477		break;
478
479	case EP93XX_DMA_IDE:
480		/*
481		 * This IDE part is totally untested. Values below are taken
482		 * from the EP93xx Users's Guide and might not be correct.
483		 */
484		if (data->direction == DMA_MEM_TO_DEV) {
485			/* Worst case from the UG */
486			control = (3 << M2M_CONTROL_PWSC_SHIFT);
487			control |= M2M_CONTROL_DAH;
488			control |= M2M_CONTROL_TM_TX;
489		} else {
490			control = (2 << M2M_CONTROL_PWSC_SHIFT);
491			control |= M2M_CONTROL_SAH;
492			control |= M2M_CONTROL_TM_RX;
493		}
494
495		control |= M2M_CONTROL_NO_HDSK;
496		control |= M2M_CONTROL_RSS_IDE;
497		control |= M2M_CONTROL_PW_16;
498		break;
499
500	default:
501		return -EINVAL;
502	}
503
504	writel(control, edmac->regs + M2M_CONTROL);
505	return 0;
506}
507
508static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509{
510	/* Just disable the channel */
511	writel(0, edmac->regs + M2M_CONTROL);
512}
513
514static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515{
516	struct ep93xx_dma_desc *desc;
517
518	desc = ep93xx_dma_get_active(edmac);
519	if (!desc) {
520		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521		return;
522	}
523
524	if (edmac->buffer == 0) {
525		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527		writel(desc->size, edmac->regs + M2M_BCR0);
528	} else {
529		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531		writel(desc->size, edmac->regs + M2M_BCR1);
532	}
533
534	edmac->buffer ^= 1;
535}
536
537static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538{
539	struct ep93xx_dma_data *data = edmac->chan.private;
540	u32 control = readl(edmac->regs + M2M_CONTROL);
541
542	/*
543	 * Since we allow clients to configure PW (peripheral width) we always
544	 * clear PW bits here and then set them according what is given in
545	 * the runtime configuration.
546	 */
547	control &= ~M2M_CONTROL_PW_MASK;
548	control |= edmac->runtime_ctrl;
549
550	m2m_fill_desc(edmac);
551	control |= M2M_CONTROL_DONEINT;
552
553	if (ep93xx_dma_advance_active(edmac)) {
554		m2m_fill_desc(edmac);
555		control |= M2M_CONTROL_NFBINT;
556	}
557
558	/*
559	 * Now we can finally enable the channel. For M2M channel this must be
560	 * done _after_ the BCRx registers are programmed.
561	 */
562	control |= M2M_CONTROL_ENABLE;
563	writel(control, edmac->regs + M2M_CONTROL);
564
565	if (!data) {
566		/*
567		 * For memcpy channels the software trigger must be asserted
568		 * in order to start the memcpy operation.
569		 */
570		control |= M2M_CONTROL_START;
571		writel(control, edmac->regs + M2M_CONTROL);
572	}
573}
574
575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586{
587	u32 status = readl(edmac->regs + M2M_STATUS);
588	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590	bool done = status & M2M_STATUS_DONE;
591	bool last_done;
592	u32 control;
593	struct ep93xx_dma_desc *desc;
594
595	/* Accept only DONE and NFB interrupts */
596	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
597		return INTERRUPT_UNKNOWN;
598
599	if (done) {
600		/* Clear the DONE bit */
601		writel(0, edmac->regs + M2M_INTERRUPT);
602	}
603
604	/*
605	 * Check whether we are done with descriptors or not. This, together
606	 * with DMA channel state, determines action to take in interrupt.
607	 */
608	desc = ep93xx_dma_get_active(edmac);
609	last_done = !desc || desc->txd.cookie;
610
611	/*
612	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
613	 * DMA channel. Using DONE and NFB bits from channel status register
614	 * or bits from channel interrupt register is not reliable.
615	 */
616	if (!last_done &&
617	    (buf_fsm == M2M_STATUS_BUF_NO ||
618	     buf_fsm == M2M_STATUS_BUF_ON)) {
619		/*
620		 * Two buffers are ready for update when Buffer FSM is in
621		 * DMA_NO_BUF state. Only one buffer can be prepared without
622		 * disabling the channel or polling the DONE bit.
623		 * To simplify things, always prepare only one buffer.
624		 */
625		if (ep93xx_dma_advance_active(edmac)) {
626			m2m_fill_desc(edmac);
627			if (done && !edmac->chan.private) {
628				/* Software trigger for memcpy channel */
629				control = readl(edmac->regs + M2M_CONTROL);
630				control |= M2M_CONTROL_START;
631				writel(control, edmac->regs + M2M_CONTROL);
632			}
633			return INTERRUPT_NEXT_BUFFER;
634		} else {
635			last_done = true;
636		}
637	}
638
639	/*
640	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641	 * and Control FSM is in DMA_STALL state.
642	 */
643	if (last_done &&
644	    buf_fsm == M2M_STATUS_BUF_NO &&
645	    ctl_fsm == M2M_STATUS_CTL_STALL) {
646		/* Disable interrupts and the channel */
647		control = readl(edmac->regs + M2M_CONTROL);
648		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649			    | M2M_CONTROL_ENABLE);
650		writel(control, edmac->regs + M2M_CONTROL);
651		return INTERRUPT_DONE;
652	}
653
654	/*
655	 * Nothing to do this time.
656	 */
657	return INTERRUPT_NEXT_BUFFER;
658}
659
660/*
661 * DMA engine API implementation
662 */
663
664static struct ep93xx_dma_desc *
665ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666{
667	struct ep93xx_dma_desc *desc, *_desc;
668	struct ep93xx_dma_desc *ret = NULL;
669	unsigned long flags;
670
671	spin_lock_irqsave(&edmac->lock, flags);
672	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673		if (async_tx_test_ack(&desc->txd)) {
674			list_del_init(&desc->node);
675
676			/* Re-initialize the descriptor */
677			desc->src_addr = 0;
678			desc->dst_addr = 0;
679			desc->size = 0;
680			desc->complete = false;
681			desc->txd.cookie = 0;
682			desc->txd.callback = NULL;
683			desc->txd.callback_param = NULL;
684
685			ret = desc;
686			break;
687		}
688	}
689	spin_unlock_irqrestore(&edmac->lock, flags);
690	return ret;
691}
692
693static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694				struct ep93xx_dma_desc *desc)
695{
696	if (desc) {
697		unsigned long flags;
698
699		spin_lock_irqsave(&edmac->lock, flags);
700		list_splice_init(&desc->tx_list, &edmac->free_list);
701		list_add(&desc->node, &edmac->free_list);
702		spin_unlock_irqrestore(&edmac->lock, flags);
703	}
704}
705
706/**
707 * ep93xx_dma_advance_work - start processing the next pending transaction
708 * @edmac: channel
709 *
710 * If we have pending transactions queued and we are currently idling, this
711 * function takes the next queued transaction from the @edmac->queue and
712 * pushes it to the hardware for execution.
713 */
714static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715{
716	struct ep93xx_dma_desc *new;
717	unsigned long flags;
718
719	spin_lock_irqsave(&edmac->lock, flags);
720	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721		spin_unlock_irqrestore(&edmac->lock, flags);
722		return;
723	}
724
725	/* Take the next descriptor from the pending queue */
726	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727	list_del_init(&new->node);
728
729	ep93xx_dma_set_active(edmac, new);
730
731	/* Push it to the hardware */
732	edmac->edma->hw_submit(edmac);
733	spin_unlock_irqrestore(&edmac->lock, flags);
734}
735
736static void ep93xx_dma_tasklet(unsigned long data)
737{
738	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
739	struct ep93xx_dma_desc *desc, *d;
740	dma_async_tx_callback callback = NULL;
741	void *callback_param = NULL;
742	LIST_HEAD(list);
743
744	spin_lock_irq(&edmac->lock);
745	/*
746	 * If dma_terminate_all() was called before we get to run, the active
747	 * list has become empty. If that happens we aren't supposed to do
748	 * anything more than call ep93xx_dma_advance_work().
749	 */
750	desc = ep93xx_dma_get_active(edmac);
751	if (desc) {
752		if (desc->complete) {
753			/* mark descriptor complete for non cyclic case only */
754			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
755				dma_cookie_complete(&desc->txd);
756			list_splice_init(&edmac->active, &list);
757		}
758		callback = desc->txd.callback;
759		callback_param = desc->txd.callback_param;
760	}
761	spin_unlock_irq(&edmac->lock);
762
763	/* Pick up the next descriptor from the queue */
764	ep93xx_dma_advance_work(edmac);
765
766	/* Now we can release all the chained descriptors */
767	list_for_each_entry_safe(desc, d, &list, node) {
768		dma_descriptor_unmap(&desc->txd);
769		ep93xx_dma_desc_put(edmac, desc);
770	}
771
772	if (callback)
773		callback(callback_param);
774}
775
776static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
777{
778	struct ep93xx_dma_chan *edmac = dev_id;
779	struct ep93xx_dma_desc *desc;
780	irqreturn_t ret = IRQ_HANDLED;
781
782	spin_lock(&edmac->lock);
783
784	desc = ep93xx_dma_get_active(edmac);
785	if (!desc) {
786		dev_warn(chan2dev(edmac),
787			 "got interrupt while active list is empty\n");
788		spin_unlock(&edmac->lock);
789		return IRQ_NONE;
790	}
791
792	switch (edmac->edma->hw_interrupt(edmac)) {
793	case INTERRUPT_DONE:
794		desc->complete = true;
795		tasklet_schedule(&edmac->tasklet);
796		break;
797
798	case INTERRUPT_NEXT_BUFFER:
799		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
800			tasklet_schedule(&edmac->tasklet);
801		break;
802
803	default:
804		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
805		ret = IRQ_NONE;
806		break;
807	}
808
809	spin_unlock(&edmac->lock);
810	return ret;
811}
812
813/**
814 * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
815 * @tx: descriptor to be executed
816 *
817 * Function will execute given descriptor on the hardware or if the hardware
818 * is busy, queue the descriptor to be executed later on. Returns cookie which
819 * can be used to poll the status of the descriptor.
820 */
821static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
822{
823	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
824	struct ep93xx_dma_desc *desc;
825	dma_cookie_t cookie;
826	unsigned long flags;
827
828	spin_lock_irqsave(&edmac->lock, flags);
829	cookie = dma_cookie_assign(tx);
830
831	desc = container_of(tx, struct ep93xx_dma_desc, txd);
832
833	/*
834	 * If nothing is currently prosessed, we push this descriptor
835	 * directly to the hardware. Otherwise we put the descriptor
836	 * to the pending queue.
837	 */
838	if (list_empty(&edmac->active)) {
839		ep93xx_dma_set_active(edmac, desc);
840		edmac->edma->hw_submit(edmac);
841	} else {
842		list_add_tail(&desc->node, &edmac->queue);
843	}
844
845	spin_unlock_irqrestore(&edmac->lock, flags);
846	return cookie;
847}
848
849/**
850 * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
851 * @chan: channel to allocate resources
852 *
853 * Function allocates necessary resources for the given DMA channel and
854 * returns number of allocated descriptors for the channel. Negative errno
855 * is returned in case of failure.
856 */
857static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
858{
859	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
860	struct ep93xx_dma_data *data = chan->private;
861	const char *name = dma_chan_name(chan);
862	int ret, i;
863
864	/* Sanity check the channel parameters */
865	if (!edmac->edma->m2m) {
866		if (!data)
867			return -EINVAL;
868		if (data->port < EP93XX_DMA_I2S1 ||
869		    data->port > EP93XX_DMA_IRDA)
870			return -EINVAL;
871		if (data->direction != ep93xx_dma_chan_direction(chan))
872			return -EINVAL;
873	} else {
874		if (data) {
875			switch (data->port) {
876			case EP93XX_DMA_SSP:
877			case EP93XX_DMA_IDE:
878				if (!is_slave_direction(data->direction))
879					return -EINVAL;
880				break;
881			default:
882				return -EINVAL;
883			}
884		}
885	}
886
887	if (data && data->name)
888		name = data->name;
889
890	ret = clk_enable(edmac->clk);
891	if (ret)
892		return ret;
893
894	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
895	if (ret)
896		goto fail_clk_disable;
897
898	spin_lock_irq(&edmac->lock);
899	dma_cookie_init(&edmac->chan);
900	ret = edmac->edma->hw_setup(edmac);
901	spin_unlock_irq(&edmac->lock);
902
903	if (ret)
904		goto fail_free_irq;
905
906	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
907		struct ep93xx_dma_desc *desc;
908
909		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
910		if (!desc) {
911			dev_warn(chan2dev(edmac), "not enough descriptors\n");
912			break;
913		}
914
915		INIT_LIST_HEAD(&desc->tx_list);
916
917		dma_async_tx_descriptor_init(&desc->txd, chan);
918		desc->txd.flags = DMA_CTRL_ACK;
919		desc->txd.tx_submit = ep93xx_dma_tx_submit;
920
921		ep93xx_dma_desc_put(edmac, desc);
922	}
923
924	return i;
925
926fail_free_irq:
927	free_irq(edmac->irq, edmac);
928fail_clk_disable:
929	clk_disable(edmac->clk);
930
931	return ret;
932}
933
934/**
935 * ep93xx_dma_free_chan_resources - release resources for the channel
936 * @chan: channel
937 *
938 * Function releases all the resources allocated for the given channel.
939 * The channel must be idle when this is called.
940 */
941static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
942{
943	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
944	struct ep93xx_dma_desc *desc, *d;
945	unsigned long flags;
946	LIST_HEAD(list);
947
948	BUG_ON(!list_empty(&edmac->active));
949	BUG_ON(!list_empty(&edmac->queue));
950
951	spin_lock_irqsave(&edmac->lock, flags);
952	edmac->edma->hw_shutdown(edmac);
953	edmac->runtime_addr = 0;
954	edmac->runtime_ctrl = 0;
955	edmac->buffer = 0;
956	list_splice_init(&edmac->free_list, &list);
957	spin_unlock_irqrestore(&edmac->lock, flags);
958
959	list_for_each_entry_safe(desc, d, &list, node)
960		kfree(desc);
961
962	clk_disable(edmac->clk);
963	free_irq(edmac->irq, edmac);
964}
965
966/**
967 * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
968 * @chan: channel
969 * @dest: destination bus address
970 * @src: source bus address
971 * @len: size of the transaction
972 * @flags: flags for the descriptor
973 *
974 * Returns a valid DMA descriptor or %NULL in case of failure.
975 */
976static struct dma_async_tx_descriptor *
977ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
978			   dma_addr_t src, size_t len, unsigned long flags)
979{
980	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
981	struct ep93xx_dma_desc *desc, *first;
982	size_t bytes, offset;
983
984	first = NULL;
985	for (offset = 0; offset < len; offset += bytes) {
986		desc = ep93xx_dma_desc_get(edmac);
987		if (!desc) {
988			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
989			goto fail;
990		}
991
992		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
993
994		desc->src_addr = src + offset;
995		desc->dst_addr = dest + offset;
996		desc->size = bytes;
997
998		if (!first)
999			first = desc;
1000		else
1001			list_add_tail(&desc->node, &first->tx_list);
1002	}
1003
1004	first->txd.cookie = -EBUSY;
1005	first->txd.flags = flags;
1006
1007	return &first->txd;
1008fail:
1009	ep93xx_dma_desc_put(edmac, first);
1010	return NULL;
1011}
1012
1013/**
1014 * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1015 * @chan: channel
1016 * @sgl: list of buffers to transfer
1017 * @sg_len: number of entries in @sgl
1018 * @dir: direction of tha DMA transfer
1019 * @flags: flags for the descriptor
1020 * @context: operation context (ignored)
1021 *
1022 * Returns a valid DMA descriptor or %NULL in case of failure.
1023 */
1024static struct dma_async_tx_descriptor *
1025ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1026			 unsigned int sg_len, enum dma_transfer_direction dir,
1027			 unsigned long flags, void *context)
1028{
1029	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1030	struct ep93xx_dma_desc *desc, *first;
1031	struct scatterlist *sg;
1032	int i;
1033
1034	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1035		dev_warn(chan2dev(edmac),
1036			 "channel was configured with different direction\n");
1037		return NULL;
1038	}
1039
1040	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1041		dev_warn(chan2dev(edmac),
1042			 "channel is already used for cyclic transfers\n");
1043		return NULL;
1044	}
1045
1046	first = NULL;
1047	for_each_sg(sgl, sg, sg_len, i) {
1048		size_t sg_len = sg_dma_len(sg);
1049
1050		if (sg_len > DMA_MAX_CHAN_BYTES) {
1051			dev_warn(chan2dev(edmac), "too big transfer size %d\n",
1052				 sg_len);
1053			goto fail;
1054		}
1055
1056		desc = ep93xx_dma_desc_get(edmac);
1057		if (!desc) {
1058			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1059			goto fail;
1060		}
1061
1062		if (dir == DMA_MEM_TO_DEV) {
1063			desc->src_addr = sg_dma_address(sg);
1064			desc->dst_addr = edmac->runtime_addr;
1065		} else {
1066			desc->src_addr = edmac->runtime_addr;
1067			desc->dst_addr = sg_dma_address(sg);
1068		}
1069		desc->size = sg_len;
1070
1071		if (!first)
1072			first = desc;
1073		else
1074			list_add_tail(&desc->node, &first->tx_list);
1075	}
1076
1077	first->txd.cookie = -EBUSY;
1078	first->txd.flags = flags;
1079
1080	return &first->txd;
1081
1082fail:
1083	ep93xx_dma_desc_put(edmac, first);
1084	return NULL;
1085}
1086
1087/**
1088 * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1089 * @chan: channel
1090 * @dma_addr: DMA mapped address of the buffer
1091 * @buf_len: length of the buffer (in bytes)
1092 * @period_len: length of a single period
1093 * @dir: direction of the operation
1094 * @flags: tx descriptor status flags
1095 *
1096 * Prepares a descriptor for cyclic DMA operation. This means that once the
1097 * descriptor is submitted, we will be submitting in a @period_len sized
1098 * buffers and calling callback once the period has been elapsed. Transfer
1099 * terminates only when client calls dmaengine_terminate_all() for this
1100 * channel.
1101 *
1102 * Returns a valid DMA descriptor or %NULL in case of failure.
1103 */
1104static struct dma_async_tx_descriptor *
1105ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1106			   size_t buf_len, size_t period_len,
1107			   enum dma_transfer_direction dir, unsigned long flags)
1108{
1109	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1110	struct ep93xx_dma_desc *desc, *first;
1111	size_t offset = 0;
1112
1113	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1114		dev_warn(chan2dev(edmac),
1115			 "channel was configured with different direction\n");
1116		return NULL;
1117	}
1118
1119	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1120		dev_warn(chan2dev(edmac),
1121			 "channel is already used for cyclic transfers\n");
1122		return NULL;
1123	}
1124
1125	if (period_len > DMA_MAX_CHAN_BYTES) {
1126		dev_warn(chan2dev(edmac), "too big period length %d\n",
1127			 period_len);
1128		return NULL;
1129	}
1130
1131	/* Split the buffer into period size chunks */
1132	first = NULL;
1133	for (offset = 0; offset < buf_len; offset += period_len) {
1134		desc = ep93xx_dma_desc_get(edmac);
1135		if (!desc) {
1136			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1137			goto fail;
1138		}
1139
1140		if (dir == DMA_MEM_TO_DEV) {
1141			desc->src_addr = dma_addr + offset;
1142			desc->dst_addr = edmac->runtime_addr;
1143		} else {
1144			desc->src_addr = edmac->runtime_addr;
1145			desc->dst_addr = dma_addr + offset;
1146		}
1147
1148		desc->size = period_len;
1149
1150		if (!first)
1151			first = desc;
1152		else
1153			list_add_tail(&desc->node, &first->tx_list);
1154	}
1155
1156	first->txd.cookie = -EBUSY;
1157
1158	return &first->txd;
1159
1160fail:
1161	ep93xx_dma_desc_put(edmac, first);
1162	return NULL;
1163}
1164
1165/**
1166 * ep93xx_dma_terminate_all - terminate all transactions
1167 * @edmac: channel
1168 *
1169 * Stops all DMA transactions. All descriptors are put back to the
1170 * @edmac->free_list and callbacks are _not_ called.
1171 */
1172static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1173{
1174	struct ep93xx_dma_desc *desc, *_d;
1175	unsigned long flags;
1176	LIST_HEAD(list);
1177
1178	spin_lock_irqsave(&edmac->lock, flags);
1179	/* First we disable and flush the DMA channel */
1180	edmac->edma->hw_shutdown(edmac);
1181	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1182	list_splice_init(&edmac->active, &list);
1183	list_splice_init(&edmac->queue, &list);
1184	/*
1185	 * We then re-enable the channel. This way we can continue submitting
1186	 * the descriptors by just calling ->hw_submit() again.
1187	 */
1188	edmac->edma->hw_setup(edmac);
1189	spin_unlock_irqrestore(&edmac->lock, flags);
1190
1191	list_for_each_entry_safe(desc, _d, &list, node)
1192		ep93xx_dma_desc_put(edmac, desc);
1193
1194	return 0;
1195}
1196
1197static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1198				   struct dma_slave_config *config)
1199{
1200	enum dma_slave_buswidth width;
1201	unsigned long flags;
1202	u32 addr, ctrl;
1203
1204	if (!edmac->edma->m2m)
1205		return -EINVAL;
1206
1207	switch (config->direction) {
1208	case DMA_DEV_TO_MEM:
1209		width = config->src_addr_width;
1210		addr = config->src_addr;
1211		break;
1212
1213	case DMA_MEM_TO_DEV:
1214		width = config->dst_addr_width;
1215		addr = config->dst_addr;
1216		break;
1217
1218	default:
1219		return -EINVAL;
1220	}
1221
1222	switch (width) {
1223	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1224		ctrl = 0;
1225		break;
1226	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1227		ctrl = M2M_CONTROL_PW_16;
1228		break;
1229	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1230		ctrl = M2M_CONTROL_PW_32;
1231		break;
1232	default:
1233		return -EINVAL;
1234	}
1235
1236	spin_lock_irqsave(&edmac->lock, flags);
1237	edmac->runtime_addr = addr;
1238	edmac->runtime_ctrl = ctrl;
1239	spin_unlock_irqrestore(&edmac->lock, flags);
1240
1241	return 0;
1242}
1243
1244/**
1245 * ep93xx_dma_control - manipulate all pending operations on a channel
1246 * @chan: channel
1247 * @cmd: control command to perform
1248 * @arg: optional argument
1249 *
1250 * Controls the channel. Function returns %0 in case of success or negative
1251 * error in case of failure.
1252 */
1253static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1254			      unsigned long arg)
1255{
1256	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1257	struct dma_slave_config *config;
1258
1259	switch (cmd) {
1260	case DMA_TERMINATE_ALL:
1261		return ep93xx_dma_terminate_all(edmac);
1262
1263	case DMA_SLAVE_CONFIG:
1264		config = (struct dma_slave_config *)arg;
1265		return ep93xx_dma_slave_config(edmac, config);
1266
1267	default:
1268		break;
1269	}
1270
1271	return -ENOSYS;
1272}
1273
1274/**
1275 * ep93xx_dma_tx_status - check if a transaction is completed
1276 * @chan: channel
1277 * @cookie: transaction specific cookie
1278 * @state: state of the transaction is stored here if given
1279 *
1280 * This function can be used to query state of a given transaction.
1281 */
1282static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1283					    dma_cookie_t cookie,
1284					    struct dma_tx_state *state)
1285{
1286	return dma_cookie_status(chan, cookie, state);
1287}
1288
1289/**
1290 * ep93xx_dma_issue_pending - push pending transactions to the hardware
1291 * @chan: channel
1292 *
1293 * When this function is called, all pending transactions are pushed to the
1294 * hardware and executed.
1295 */
1296static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1297{
1298	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1299}
1300
1301static int __init ep93xx_dma_probe(struct platform_device *pdev)
1302{
1303	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1304	struct ep93xx_dma_engine *edma;
1305	struct dma_device *dma_dev;
1306	size_t edma_size;
1307	int ret, i;
1308
1309	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1310	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1311	if (!edma)
1312		return -ENOMEM;
1313
1314	dma_dev = &edma->dma_dev;
1315	edma->m2m = platform_get_device_id(pdev)->driver_data;
1316	edma->num_channels = pdata->num_channels;
1317
1318	INIT_LIST_HEAD(&dma_dev->channels);
1319	for (i = 0; i < pdata->num_channels; i++) {
1320		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1321		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1322
1323		edmac->chan.device = dma_dev;
1324		edmac->regs = cdata->base;
1325		edmac->irq = cdata->irq;
1326		edmac->edma = edma;
1327
1328		edmac->clk = clk_get(NULL, cdata->name);
1329		if (IS_ERR(edmac->clk)) {
1330			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1331				 cdata->name);
1332			continue;
1333		}
1334
1335		spin_lock_init(&edmac->lock);
1336		INIT_LIST_HEAD(&edmac->active);
1337		INIT_LIST_HEAD(&edmac->queue);
1338		INIT_LIST_HEAD(&edmac->free_list);
1339		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1340			     (unsigned long)edmac);
1341
1342		list_add_tail(&edmac->chan.device_node,
1343			      &dma_dev->channels);
1344	}
1345
1346	dma_cap_zero(dma_dev->cap_mask);
1347	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1348	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1349
1350	dma_dev->dev = &pdev->dev;
1351	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1352	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355	dma_dev->device_control = ep93xx_dma_control;
1356	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1357	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1358
1359	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1360
1361	if (edma->m2m) {
1362		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1363		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1364
1365		edma->hw_setup = m2m_hw_setup;
1366		edma->hw_shutdown = m2m_hw_shutdown;
1367		edma->hw_submit = m2m_hw_submit;
1368		edma->hw_interrupt = m2m_hw_interrupt;
1369	} else {
1370		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1371
1372		edma->hw_setup = m2p_hw_setup;
1373		edma->hw_shutdown = m2p_hw_shutdown;
1374		edma->hw_submit = m2p_hw_submit;
1375		edma->hw_interrupt = m2p_hw_interrupt;
1376	}
1377
1378	ret = dma_async_device_register(dma_dev);
1379	if (unlikely(ret)) {
1380		for (i = 0; i < edma->num_channels; i++) {
1381			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1382			if (!IS_ERR_OR_NULL(edmac->clk))
1383				clk_put(edmac->clk);
1384		}
1385		kfree(edma);
1386	} else {
1387		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1388			 edma->m2m ? "M" : "P");
1389	}
1390
1391	return ret;
1392}
1393
1394static struct platform_device_id ep93xx_dma_driver_ids[] = {
1395	{ "ep93xx-dma-m2p", 0 },
1396	{ "ep93xx-dma-m2m", 1 },
1397	{ },
1398};
1399
1400static struct platform_driver ep93xx_dma_driver = {
1401	.driver		= {
1402		.name	= "ep93xx-dma",
1403	},
1404	.id_table	= ep93xx_dma_driver_ids,
1405};
1406
1407static int __init ep93xx_dma_module_init(void)
1408{
1409	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1410}
1411subsys_initcall(ep93xx_dma_module_init);
1412
1413MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1414MODULE_DESCRIPTION("EP93xx DMA driver");
1415MODULE_LICENSE("GPL");
1416