[go: nahoru, domu]

1/*
2 * Copyright 2012 Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/err.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/interrupt.h>
14#include <linux/dma-mapping.h>
15#include <linux/slab.h>
16#include <linux/dmaengine.h>
17#include <linux/platform_device.h>
18#include <linux/device.h>
19#include <linux/platform_data/mmp_dma.h>
20#include <linux/dmapool.h>
21#include <linux/of_device.h>
22#include <linux/of_dma.h>
23#include <linux/of.h>
24#include <linux/dma/mmp-pdma.h>
25
26#include "dmaengine.h"
27
28#define DCSR		0x0000
29#define DALGN		0x00a0
30#define DINT		0x00f0
31#define DDADR		0x0200
32#define DSADR(n)	(0x0204 + ((n) << 4))
33#define DTADR(n)	(0x0208 + ((n) << 4))
34#define DCMD		0x020c
35
36#define DCSR_RUN	BIT(31)	/* Run Bit (read / write) */
37#define DCSR_NODESC	BIT(30)	/* No-Descriptor Fetch (read / write) */
38#define DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (read / write) */
39#define DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
40#define DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
41#define DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
42#define DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
43#define DCSR_BUSERR	BIT(0)	/* Bus Error Interrupt (read / write) */
44
45#define DCSR_EORIRQEN	BIT(28)	/* End of Receive Interrupt Enable (R/W) */
46#define DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
47#define DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
48#define DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
49#define DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
50#define DCSR_CMPST	BIT(10)	/* The Descriptor Compare Status */
51#define DCSR_EORINTR	BIT(9)	/* The end of Receive */
52
53#define DRCMR(n)	((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
54#define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
55#define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
56
57#define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
58#define DDADR_STOP	BIT(0)	/* Stop (read / write) */
59
60#define DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
61#define DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
62#define DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
63#define DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
64#define DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
65#define DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
66#define DCMD_ENDIAN	BIT(18)	/* Device Endian-ness. */
67#define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
68#define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
69#define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
70#define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
71#define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
72#define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
73#define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
74
75#define PDMA_ALIGNMENT		3
76#define PDMA_MAX_DESC_BYTES	DCMD_LENGTH
77
78struct mmp_pdma_desc_hw {
79	u32 ddadr;	/* Points to the next descriptor + flags */
80	u32 dsadr;	/* DSADR value for the current transfer */
81	u32 dtadr;	/* DTADR value for the current transfer */
82	u32 dcmd;	/* DCMD value for the current transfer */
83} __aligned(32);
84
85struct mmp_pdma_desc_sw {
86	struct mmp_pdma_desc_hw desc;
87	struct list_head node;
88	struct list_head tx_list;
89	struct dma_async_tx_descriptor async_tx;
90};
91
92struct mmp_pdma_phy;
93
94struct mmp_pdma_chan {
95	struct device *dev;
96	struct dma_chan chan;
97	struct dma_async_tx_descriptor desc;
98	struct mmp_pdma_phy *phy;
99	enum dma_transfer_direction dir;
100
101	struct mmp_pdma_desc_sw *cyclic_first;	/* first desc_sw if channel
102						 * is in cyclic mode */
103
104	/* channel's basic info */
105	struct tasklet_struct tasklet;
106	u32 dcmd;
107	u32 drcmr;
108	u32 dev_addr;
109
110	/* list for desc */
111	spinlock_t desc_lock;		/* Descriptor list lock */
112	struct list_head chain_pending;	/* Link descriptors queue for pending */
113	struct list_head chain_running;	/* Link descriptors queue for running */
114	bool idle;			/* channel statue machine */
115	bool byte_align;
116
117	struct dma_pool *desc_pool;	/* Descriptors pool */
118};
119
120struct mmp_pdma_phy {
121	int idx;
122	void __iomem *base;
123	struct mmp_pdma_chan *vchan;
124};
125
126struct mmp_pdma_device {
127	int				dma_channels;
128	void __iomem			*base;
129	struct device			*dev;
130	struct dma_device		device;
131	struct mmp_pdma_phy		*phy;
132	spinlock_t phy_lock; /* protect alloc/free phy channels */
133};
134
135#define tx_to_mmp_pdma_desc(tx)					\
136	container_of(tx, struct mmp_pdma_desc_sw, async_tx)
137#define to_mmp_pdma_desc(lh)					\
138	container_of(lh, struct mmp_pdma_desc_sw, node)
139#define to_mmp_pdma_chan(dchan)					\
140	container_of(dchan, struct mmp_pdma_chan, chan)
141#define to_mmp_pdma_dev(dmadev)					\
142	container_of(dmadev, struct mmp_pdma_device, device)
143
144static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
145{
146	u32 reg = (phy->idx << 4) + DDADR;
147
148	writel(addr, phy->base + reg);
149}
150
151static void enable_chan(struct mmp_pdma_phy *phy)
152{
153	u32 reg, dalgn;
154
155	if (!phy->vchan)
156		return;
157
158	reg = DRCMR(phy->vchan->drcmr);
159	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
160
161	dalgn = readl(phy->base + DALGN);
162	if (phy->vchan->byte_align)
163		dalgn |= 1 << phy->idx;
164	else
165		dalgn &= ~(1 << phy->idx);
166	writel(dalgn, phy->base + DALGN);
167
168	reg = (phy->idx << 2) + DCSR;
169	writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
170}
171
172static void disable_chan(struct mmp_pdma_phy *phy)
173{
174	u32 reg;
175
176	if (!phy)
177		return;
178
179	reg = (phy->idx << 2) + DCSR;
180	writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
181}
182
183static int clear_chan_irq(struct mmp_pdma_phy *phy)
184{
185	u32 dcsr;
186	u32 dint = readl(phy->base + DINT);
187	u32 reg = (phy->idx << 2) + DCSR;
188
189	if (!(dint & BIT(phy->idx)))
190		return -EAGAIN;
191
192	/* clear irq */
193	dcsr = readl(phy->base + reg);
194	writel(dcsr, phy->base + reg);
195	if ((dcsr & DCSR_BUSERR) && (phy->vchan))
196		dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
197
198	return 0;
199}
200
201static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
202{
203	struct mmp_pdma_phy *phy = dev_id;
204
205	if (clear_chan_irq(phy) != 0)
206		return IRQ_NONE;
207
208	tasklet_schedule(&phy->vchan->tasklet);
209	return IRQ_HANDLED;
210}
211
212static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
213{
214	struct mmp_pdma_device *pdev = dev_id;
215	struct mmp_pdma_phy *phy;
216	u32 dint = readl(pdev->base + DINT);
217	int i, ret;
218	int irq_num = 0;
219
220	while (dint) {
221		i = __ffs(dint);
222		dint &= (dint - 1);
223		phy = &pdev->phy[i];
224		ret = mmp_pdma_chan_handler(irq, phy);
225		if (ret == IRQ_HANDLED)
226			irq_num++;
227	}
228
229	if (irq_num)
230		return IRQ_HANDLED;
231
232	return IRQ_NONE;
233}
234
235/* lookup free phy channel as descending priority */
236static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
237{
238	int prio, i;
239	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
240	struct mmp_pdma_phy *phy, *found = NULL;
241	unsigned long flags;
242
243	/*
244	 * dma channel priorities
245	 * ch 0 - 3,  16 - 19  <--> (0)
246	 * ch 4 - 7,  20 - 23  <--> (1)
247	 * ch 8 - 11, 24 - 27  <--> (2)
248	 * ch 12 - 15, 28 - 31  <--> (3)
249	 */
250
251	spin_lock_irqsave(&pdev->phy_lock, flags);
252	for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
253		for (i = 0; i < pdev->dma_channels; i++) {
254			if (prio != (i & 0xf) >> 2)
255				continue;
256			phy = &pdev->phy[i];
257			if (!phy->vchan) {
258				phy->vchan = pchan;
259				found = phy;
260				goto out_unlock;
261			}
262		}
263	}
264
265out_unlock:
266	spin_unlock_irqrestore(&pdev->phy_lock, flags);
267	return found;
268}
269
270static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
271{
272	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
273	unsigned long flags;
274	u32 reg;
275
276	if (!pchan->phy)
277		return;
278
279	/* clear the channel mapping in DRCMR */
280	reg = DRCMR(pchan->drcmr);
281	writel(0, pchan->phy->base + reg);
282
283	spin_lock_irqsave(&pdev->phy_lock, flags);
284	pchan->phy->vchan = NULL;
285	pchan->phy = NULL;
286	spin_unlock_irqrestore(&pdev->phy_lock, flags);
287}
288
289/**
290 * start_pending_queue - transfer any pending transactions
291 * pending list ==> running list
292 */
293static void start_pending_queue(struct mmp_pdma_chan *chan)
294{
295	struct mmp_pdma_desc_sw *desc;
296
297	/* still in running, irq will start the pending list */
298	if (!chan->idle) {
299		dev_dbg(chan->dev, "DMA controller still busy\n");
300		return;
301	}
302
303	if (list_empty(&chan->chain_pending)) {
304		/* chance to re-fetch phy channel with higher prio */
305		mmp_pdma_free_phy(chan);
306		dev_dbg(chan->dev, "no pending list\n");
307		return;
308	}
309
310	if (!chan->phy) {
311		chan->phy = lookup_phy(chan);
312		if (!chan->phy) {
313			dev_dbg(chan->dev, "no free dma channel\n");
314			return;
315		}
316	}
317
318	/*
319	 * pending -> running
320	 * reintilize pending list
321	 */
322	desc = list_first_entry(&chan->chain_pending,
323				struct mmp_pdma_desc_sw, node);
324	list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
325
326	/*
327	 * Program the descriptor's address into the DMA controller,
328	 * then start the DMA transaction
329	 */
330	set_desc(chan->phy, desc->async_tx.phys);
331	enable_chan(chan->phy);
332	chan->idle = false;
333}
334
335
336/* desc->tx_list ==> pending list */
337static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
338{
339	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
340	struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
341	struct mmp_pdma_desc_sw *child;
342	unsigned long flags;
343	dma_cookie_t cookie = -EBUSY;
344
345	spin_lock_irqsave(&chan->desc_lock, flags);
346
347	list_for_each_entry(child, &desc->tx_list, node) {
348		cookie = dma_cookie_assign(&child->async_tx);
349	}
350
351	/* softly link to pending list - desc->tx_list ==> pending list */
352	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
353
354	spin_unlock_irqrestore(&chan->desc_lock, flags);
355
356	return cookie;
357}
358
359static struct mmp_pdma_desc_sw *
360mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
361{
362	struct mmp_pdma_desc_sw *desc;
363	dma_addr_t pdesc;
364
365	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
366	if (!desc) {
367		dev_err(chan->dev, "out of memory for link descriptor\n");
368		return NULL;
369	}
370
371	memset(desc, 0, sizeof(*desc));
372	INIT_LIST_HEAD(&desc->tx_list);
373	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
374	/* each desc has submit */
375	desc->async_tx.tx_submit = mmp_pdma_tx_submit;
376	desc->async_tx.phys = pdesc;
377
378	return desc;
379}
380
381/**
382 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
383 *
384 * This function will create a dma pool for descriptor allocation.
385 * Request irq only when channel is requested
386 * Return - The number of allocated descriptors.
387 */
388
389static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
390{
391	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
392
393	if (chan->desc_pool)
394		return 1;
395
396	chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
397					  chan->dev,
398					  sizeof(struct mmp_pdma_desc_sw),
399					  __alignof__(struct mmp_pdma_desc_sw),
400					  0);
401	if (!chan->desc_pool) {
402		dev_err(chan->dev, "unable to allocate descriptor pool\n");
403		return -ENOMEM;
404	}
405
406	mmp_pdma_free_phy(chan);
407	chan->idle = true;
408	chan->dev_addr = 0;
409	return 1;
410}
411
412static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
413				    struct list_head *list)
414{
415	struct mmp_pdma_desc_sw *desc, *_desc;
416
417	list_for_each_entry_safe(desc, _desc, list, node) {
418		list_del(&desc->node);
419		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
420	}
421}
422
423static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
424{
425	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
426	unsigned long flags;
427
428	spin_lock_irqsave(&chan->desc_lock, flags);
429	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
430	mmp_pdma_free_desc_list(chan, &chan->chain_running);
431	spin_unlock_irqrestore(&chan->desc_lock, flags);
432
433	dma_pool_destroy(chan->desc_pool);
434	chan->desc_pool = NULL;
435	chan->idle = true;
436	chan->dev_addr = 0;
437	mmp_pdma_free_phy(chan);
438	return;
439}
440
441static struct dma_async_tx_descriptor *
442mmp_pdma_prep_memcpy(struct dma_chan *dchan,
443		     dma_addr_t dma_dst, dma_addr_t dma_src,
444		     size_t len, unsigned long flags)
445{
446	struct mmp_pdma_chan *chan;
447	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
448	size_t copy = 0;
449
450	if (!dchan)
451		return NULL;
452
453	if (!len)
454		return NULL;
455
456	chan = to_mmp_pdma_chan(dchan);
457	chan->byte_align = false;
458
459	if (!chan->dir) {
460		chan->dir = DMA_MEM_TO_MEM;
461		chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
462		chan->dcmd |= DCMD_BURST32;
463	}
464
465	do {
466		/* Allocate the link descriptor from DMA pool */
467		new = mmp_pdma_alloc_descriptor(chan);
468		if (!new) {
469			dev_err(chan->dev, "no memory for desc\n");
470			goto fail;
471		}
472
473		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
474		if (dma_src & 0x7 || dma_dst & 0x7)
475			chan->byte_align = true;
476
477		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
478		new->desc.dsadr = dma_src;
479		new->desc.dtadr = dma_dst;
480
481		if (!first)
482			first = new;
483		else
484			prev->desc.ddadr = new->async_tx.phys;
485
486		new->async_tx.cookie = 0;
487		async_tx_ack(&new->async_tx);
488
489		prev = new;
490		len -= copy;
491
492		if (chan->dir == DMA_MEM_TO_DEV) {
493			dma_src += copy;
494		} else if (chan->dir == DMA_DEV_TO_MEM) {
495			dma_dst += copy;
496		} else if (chan->dir == DMA_MEM_TO_MEM) {
497			dma_src += copy;
498			dma_dst += copy;
499		}
500
501		/* Insert the link descriptor to the LD ring */
502		list_add_tail(&new->node, &first->tx_list);
503	} while (len);
504
505	first->async_tx.flags = flags; /* client is in control of this ack */
506	first->async_tx.cookie = -EBUSY;
507
508	/* last desc and fire IRQ */
509	new->desc.ddadr = DDADR_STOP;
510	new->desc.dcmd |= DCMD_ENDIRQEN;
511
512	chan->cyclic_first = NULL;
513
514	return &first->async_tx;
515
516fail:
517	if (first)
518		mmp_pdma_free_desc_list(chan, &first->tx_list);
519	return NULL;
520}
521
522static struct dma_async_tx_descriptor *
523mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
524		       unsigned int sg_len, enum dma_transfer_direction dir,
525		       unsigned long flags, void *context)
526{
527	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
528	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
529	size_t len, avail;
530	struct scatterlist *sg;
531	dma_addr_t addr;
532	int i;
533
534	if ((sgl == NULL) || (sg_len == 0))
535		return NULL;
536
537	chan->byte_align = false;
538
539	for_each_sg(sgl, sg, sg_len, i) {
540		addr = sg_dma_address(sg);
541		avail = sg_dma_len(sgl);
542
543		do {
544			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
545			if (addr & 0x7)
546				chan->byte_align = true;
547
548			/* allocate and populate the descriptor */
549			new = mmp_pdma_alloc_descriptor(chan);
550			if (!new) {
551				dev_err(chan->dev, "no memory for desc\n");
552				goto fail;
553			}
554
555			new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
556			if (dir == DMA_MEM_TO_DEV) {
557				new->desc.dsadr = addr;
558				new->desc.dtadr = chan->dev_addr;
559			} else {
560				new->desc.dsadr = chan->dev_addr;
561				new->desc.dtadr = addr;
562			}
563
564			if (!first)
565				first = new;
566			else
567				prev->desc.ddadr = new->async_tx.phys;
568
569			new->async_tx.cookie = 0;
570			async_tx_ack(&new->async_tx);
571			prev = new;
572
573			/* Insert the link descriptor to the LD ring */
574			list_add_tail(&new->node, &first->tx_list);
575
576			/* update metadata */
577			addr += len;
578			avail -= len;
579		} while (avail);
580	}
581
582	first->async_tx.cookie = -EBUSY;
583	first->async_tx.flags = flags;
584
585	/* last desc and fire IRQ */
586	new->desc.ddadr = DDADR_STOP;
587	new->desc.dcmd |= DCMD_ENDIRQEN;
588
589	chan->dir = dir;
590	chan->cyclic_first = NULL;
591
592	return &first->async_tx;
593
594fail:
595	if (first)
596		mmp_pdma_free_desc_list(chan, &first->tx_list);
597	return NULL;
598}
599
600static struct dma_async_tx_descriptor *
601mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
602			 dma_addr_t buf_addr, size_t len, size_t period_len,
603			 enum dma_transfer_direction direction,
604			 unsigned long flags)
605{
606	struct mmp_pdma_chan *chan;
607	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
608	dma_addr_t dma_src, dma_dst;
609
610	if (!dchan || !len || !period_len)
611		return NULL;
612
613	/* the buffer length must be a multiple of period_len */
614	if (len % period_len != 0)
615		return NULL;
616
617	if (period_len > PDMA_MAX_DESC_BYTES)
618		return NULL;
619
620	chan = to_mmp_pdma_chan(dchan);
621
622	switch (direction) {
623	case DMA_MEM_TO_DEV:
624		dma_src = buf_addr;
625		dma_dst = chan->dev_addr;
626		break;
627	case DMA_DEV_TO_MEM:
628		dma_dst = buf_addr;
629		dma_src = chan->dev_addr;
630		break;
631	default:
632		dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
633		return NULL;
634	}
635
636	chan->dir = direction;
637
638	do {
639		/* Allocate the link descriptor from DMA pool */
640		new = mmp_pdma_alloc_descriptor(chan);
641		if (!new) {
642			dev_err(chan->dev, "no memory for desc\n");
643			goto fail;
644		}
645
646		new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
647				  (DCMD_LENGTH & period_len));
648		new->desc.dsadr = dma_src;
649		new->desc.dtadr = dma_dst;
650
651		if (!first)
652			first = new;
653		else
654			prev->desc.ddadr = new->async_tx.phys;
655
656		new->async_tx.cookie = 0;
657		async_tx_ack(&new->async_tx);
658
659		prev = new;
660		len -= period_len;
661
662		if (chan->dir == DMA_MEM_TO_DEV)
663			dma_src += period_len;
664		else
665			dma_dst += period_len;
666
667		/* Insert the link descriptor to the LD ring */
668		list_add_tail(&new->node, &first->tx_list);
669	} while (len);
670
671	first->async_tx.flags = flags; /* client is in control of this ack */
672	first->async_tx.cookie = -EBUSY;
673
674	/* make the cyclic link */
675	new->desc.ddadr = first->async_tx.phys;
676	chan->cyclic_first = first;
677
678	return &first->async_tx;
679
680fail:
681	if (first)
682		mmp_pdma_free_desc_list(chan, &first->tx_list);
683	return NULL;
684}
685
686static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
687			    unsigned long arg)
688{
689	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
690	struct dma_slave_config *cfg = (void *)arg;
691	unsigned long flags;
692	u32 maxburst = 0, addr = 0;
693	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
694
695	if (!dchan)
696		return -EINVAL;
697
698	switch (cmd) {
699	case DMA_TERMINATE_ALL:
700		disable_chan(chan->phy);
701		mmp_pdma_free_phy(chan);
702		spin_lock_irqsave(&chan->desc_lock, flags);
703		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
704		mmp_pdma_free_desc_list(chan, &chan->chain_running);
705		spin_unlock_irqrestore(&chan->desc_lock, flags);
706		chan->idle = true;
707		break;
708	case DMA_SLAVE_CONFIG:
709		if (cfg->direction == DMA_DEV_TO_MEM) {
710			chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
711			maxburst = cfg->src_maxburst;
712			width = cfg->src_addr_width;
713			addr = cfg->src_addr;
714		} else if (cfg->direction == DMA_MEM_TO_DEV) {
715			chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
716			maxburst = cfg->dst_maxburst;
717			width = cfg->dst_addr_width;
718			addr = cfg->dst_addr;
719		}
720
721		if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
722			chan->dcmd |= DCMD_WIDTH1;
723		else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
724			chan->dcmd |= DCMD_WIDTH2;
725		else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
726			chan->dcmd |= DCMD_WIDTH4;
727
728		if (maxburst == 8)
729			chan->dcmd |= DCMD_BURST8;
730		else if (maxburst == 16)
731			chan->dcmd |= DCMD_BURST16;
732		else if (maxburst == 32)
733			chan->dcmd |= DCMD_BURST32;
734
735		chan->dir = cfg->direction;
736		chan->dev_addr = addr;
737		/* FIXME: drivers should be ported over to use the filter
738		 * function. Once that's done, the following two lines can
739		 * be removed.
740		 */
741		if (cfg->slave_id)
742			chan->drcmr = cfg->slave_id;
743		break;
744	default:
745		return -ENOSYS;
746	}
747
748	return 0;
749}
750
751static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
752				     dma_cookie_t cookie)
753{
754	struct mmp_pdma_desc_sw *sw;
755	u32 curr, residue = 0;
756	bool passed = false;
757	bool cyclic = chan->cyclic_first != NULL;
758
759	/*
760	 * If the channel does not have a phy pointer anymore, it has already
761	 * been completed. Therefore, its residue is 0.
762	 */
763	if (!chan->phy)
764		return 0;
765
766	if (chan->dir == DMA_DEV_TO_MEM)
767		curr = readl(chan->phy->base + DTADR(chan->phy->idx));
768	else
769		curr = readl(chan->phy->base + DSADR(chan->phy->idx));
770
771	list_for_each_entry(sw, &chan->chain_running, node) {
772		u32 start, end, len;
773
774		if (chan->dir == DMA_DEV_TO_MEM)
775			start = sw->desc.dtadr;
776		else
777			start = sw->desc.dsadr;
778
779		len = sw->desc.dcmd & DCMD_LENGTH;
780		end = start + len;
781
782		/*
783		 * 'passed' will be latched once we found the descriptor which
784		 * lies inside the boundaries of the curr pointer. All
785		 * descriptors that occur in the list _after_ we found that
786		 * partially handled descriptor are still to be processed and
787		 * are hence added to the residual bytes counter.
788		 */
789
790		if (passed) {
791			residue += len;
792		} else if (curr >= start && curr <= end) {
793			residue += end - curr;
794			passed = true;
795		}
796
797		/*
798		 * Descriptors that have the ENDIRQEN bit set mark the end of a
799		 * transaction chain, and the cookie assigned with it has been
800		 * returned previously from mmp_pdma_tx_submit().
801		 *
802		 * In case we have multiple transactions in the running chain,
803		 * and the cookie does not match the one the user asked us
804		 * about, reset the state variables and start over.
805		 *
806		 * This logic does not apply to cyclic transactions, where all
807		 * descriptors have the ENDIRQEN bit set, and for which we
808		 * can't have multiple transactions on one channel anyway.
809		 */
810		if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
811			continue;
812
813		if (sw->async_tx.cookie == cookie) {
814			return residue;
815		} else {
816			residue = 0;
817			passed = false;
818		}
819	}
820
821	/* We should only get here in case of cyclic transactions */
822	return residue;
823}
824
825static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
826					  dma_cookie_t cookie,
827					  struct dma_tx_state *txstate)
828{
829	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
830	enum dma_status ret;
831
832	ret = dma_cookie_status(dchan, cookie, txstate);
833	if (likely(ret != DMA_ERROR))
834		dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
835
836	return ret;
837}
838
839/**
840 * mmp_pdma_issue_pending - Issue the DMA start command
841 * pending list ==> running list
842 */
843static void mmp_pdma_issue_pending(struct dma_chan *dchan)
844{
845	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
846	unsigned long flags;
847
848	spin_lock_irqsave(&chan->desc_lock, flags);
849	start_pending_queue(chan);
850	spin_unlock_irqrestore(&chan->desc_lock, flags);
851}
852
853/*
854 * dma_do_tasklet
855 * Do call back
856 * Start pending list
857 */
858static void dma_do_tasklet(unsigned long data)
859{
860	struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
861	struct mmp_pdma_desc_sw *desc, *_desc;
862	LIST_HEAD(chain_cleanup);
863	unsigned long flags;
864
865	if (chan->cyclic_first) {
866		dma_async_tx_callback cb = NULL;
867		void *cb_data = NULL;
868
869		spin_lock_irqsave(&chan->desc_lock, flags);
870		desc = chan->cyclic_first;
871		cb = desc->async_tx.callback;
872		cb_data = desc->async_tx.callback_param;
873		spin_unlock_irqrestore(&chan->desc_lock, flags);
874
875		if (cb)
876			cb(cb_data);
877
878		return;
879	}
880
881	/* submit pending list; callback for each desc; free desc */
882	spin_lock_irqsave(&chan->desc_lock, flags);
883
884	list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
885		/*
886		 * move the descriptors to a temporary list so we can drop
887		 * the lock during the entire cleanup operation
888		 */
889		list_move(&desc->node, &chain_cleanup);
890
891		/*
892		 * Look for the first list entry which has the ENDIRQEN flag
893		 * set. That is the descriptor we got an interrupt for, so
894		 * complete that transaction and its cookie.
895		 */
896		if (desc->desc.dcmd & DCMD_ENDIRQEN) {
897			dma_cookie_t cookie = desc->async_tx.cookie;
898			dma_cookie_complete(&desc->async_tx);
899			dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
900			break;
901		}
902	}
903
904	/*
905	 * The hardware is idle and ready for more when the
906	 * chain_running list is empty.
907	 */
908	chan->idle = list_empty(&chan->chain_running);
909
910	/* Start any pending transactions automatically */
911	start_pending_queue(chan);
912	spin_unlock_irqrestore(&chan->desc_lock, flags);
913
914	/* Run the callback for each descriptor, in order */
915	list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
916		struct dma_async_tx_descriptor *txd = &desc->async_tx;
917
918		/* Remove from the list of transactions */
919		list_del(&desc->node);
920		/* Run the link descriptor callback function */
921		if (txd->callback)
922			txd->callback(txd->callback_param);
923
924		dma_pool_free(chan->desc_pool, desc, txd->phys);
925	}
926}
927
928static int mmp_pdma_remove(struct platform_device *op)
929{
930	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
931
932	dma_async_device_unregister(&pdev->device);
933	return 0;
934}
935
936static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
937{
938	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
939	struct mmp_pdma_chan *chan;
940	int ret;
941
942	chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
943	if (chan == NULL)
944		return -ENOMEM;
945
946	phy->idx = idx;
947	phy->base = pdev->base;
948
949	if (irq) {
950		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
951				       IRQF_SHARED, "pdma", phy);
952		if (ret) {
953			dev_err(pdev->dev, "channel request irq fail!\n");
954			return ret;
955		}
956	}
957
958	spin_lock_init(&chan->desc_lock);
959	chan->dev = pdev->dev;
960	chan->chan.device = &pdev->device;
961	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
962	INIT_LIST_HEAD(&chan->chain_pending);
963	INIT_LIST_HEAD(&chan->chain_running);
964
965	/* register virt channel to dma engine */
966	list_add_tail(&chan->chan.device_node, &pdev->device.channels);
967
968	return 0;
969}
970
971static struct of_device_id mmp_pdma_dt_ids[] = {
972	{ .compatible = "marvell,pdma-1.0", },
973	{}
974};
975MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
976
977static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
978					   struct of_dma *ofdma)
979{
980	struct mmp_pdma_device *d = ofdma->of_dma_data;
981	struct dma_chan *chan;
982
983	chan = dma_get_any_slave_channel(&d->device);
984	if (!chan)
985		return NULL;
986
987	to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
988
989	return chan;
990}
991
992static int mmp_pdma_probe(struct platform_device *op)
993{
994	struct mmp_pdma_device *pdev;
995	const struct of_device_id *of_id;
996	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
997	struct resource *iores;
998	int i, ret, irq = 0;
999	int dma_channels = 0, irq_num = 0;
1000
1001	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1002	if (!pdev)
1003		return -ENOMEM;
1004
1005	pdev->dev = &op->dev;
1006
1007	spin_lock_init(&pdev->phy_lock);
1008
1009	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1010	pdev->base = devm_ioremap_resource(pdev->dev, iores);
1011	if (IS_ERR(pdev->base))
1012		return PTR_ERR(pdev->base);
1013
1014	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
1015	if (of_id)
1016		of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1017				     &dma_channels);
1018	else if (pdata && pdata->dma_channels)
1019		dma_channels = pdata->dma_channels;
1020	else
1021		dma_channels = 32;	/* default 32 channel */
1022	pdev->dma_channels = dma_channels;
1023
1024	for (i = 0; i < dma_channels; i++) {
1025		if (platform_get_irq(op, i) > 0)
1026			irq_num++;
1027	}
1028
1029	pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
1030				 GFP_KERNEL);
1031	if (pdev->phy == NULL)
1032		return -ENOMEM;
1033
1034	INIT_LIST_HEAD(&pdev->device.channels);
1035
1036	if (irq_num != dma_channels) {
1037		/* all chan share one irq, demux inside */
1038		irq = platform_get_irq(op, 0);
1039		ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1040				       IRQF_SHARED, "pdma", pdev);
1041		if (ret)
1042			return ret;
1043	}
1044
1045	for (i = 0; i < dma_channels; i++) {
1046		irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1047		ret = mmp_pdma_chan_init(pdev, i, irq);
1048		if (ret)
1049			return ret;
1050	}
1051
1052	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1053	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
1054	dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1055	dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1056	pdev->device.dev = &op->dev;
1057	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1058	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1059	pdev->device.device_tx_status = mmp_pdma_tx_status;
1060	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1061	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1062	pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1063	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1064	pdev->device.device_control = mmp_pdma_control;
1065	pdev->device.copy_align = PDMA_ALIGNMENT;
1066
1067	if (pdev->dev->coherent_dma_mask)
1068		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1069	else
1070		dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1071
1072	ret = dma_async_device_register(&pdev->device);
1073	if (ret) {
1074		dev_err(pdev->device.dev, "unable to register\n");
1075		return ret;
1076	}
1077
1078	if (op->dev.of_node) {
1079		/* Device-tree DMA controller registration */
1080		ret = of_dma_controller_register(op->dev.of_node,
1081						 mmp_pdma_dma_xlate, pdev);
1082		if (ret < 0) {
1083			dev_err(&op->dev, "of_dma_controller_register failed\n");
1084			return ret;
1085		}
1086	}
1087
1088	platform_set_drvdata(op, pdev);
1089	dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1090	return 0;
1091}
1092
1093static const struct platform_device_id mmp_pdma_id_table[] = {
1094	{ "mmp-pdma", },
1095	{ },
1096};
1097
1098static struct platform_driver mmp_pdma_driver = {
1099	.driver		= {
1100		.name	= "mmp-pdma",
1101		.owner  = THIS_MODULE,
1102		.of_match_table = mmp_pdma_dt_ids,
1103	},
1104	.id_table	= mmp_pdma_id_table,
1105	.probe		= mmp_pdma_probe,
1106	.remove		= mmp_pdma_remove,
1107};
1108
1109bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
1110{
1111	struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
1112
1113	if (chan->device->dev->driver != &mmp_pdma_driver.driver)
1114		return false;
1115
1116	c->drcmr = *(unsigned int *)param;
1117
1118	return true;
1119}
1120EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
1121
1122module_platform_driver(mmp_pdma_driver);
1123
1124MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1125MODULE_AUTHOR("Marvell International Ltd.");
1126MODULE_LICENSE("GPL v2");
1127