[go: nahoru, domu]

1/*
2 * SA11x0 DMAengine support
3 *
4 * Copyright (C) 2012 Russell King
5 *   Derived in part from arch/arm/mach-sa1100/dma.c,
6 *   Copyright (C) 2000, 2001 by Nicolas Pitre
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/sched.h>
13#include <linux/device.h>
14#include <linux/dmaengine.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/sa11x0-dma.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23
24#include "virt-dma.h"
25
26#define NR_PHY_CHAN	6
27#define DMA_ALIGN	3
28#define DMA_MAX_SIZE	0x1fff
29#define DMA_CHUNK_SIZE	0x1000
30
31#define DMA_DDAR	0x00
32#define DMA_DCSR_S	0x04
33#define DMA_DCSR_C	0x08
34#define DMA_DCSR_R	0x0c
35#define DMA_DBSA	0x10
36#define DMA_DBTA	0x14
37#define DMA_DBSB	0x18
38#define DMA_DBTB	0x1c
39#define DMA_SIZE	0x20
40
41#define DCSR_RUN	(1 << 0)
42#define DCSR_IE		(1 << 1)
43#define DCSR_ERROR	(1 << 2)
44#define DCSR_DONEA	(1 << 3)
45#define DCSR_STRTA	(1 << 4)
46#define DCSR_DONEB	(1 << 5)
47#define DCSR_STRTB	(1 << 6)
48#define DCSR_BIU	(1 << 7)
49
50#define DDAR_RW		(1 << 0)	/* 0 = W, 1 = R */
51#define DDAR_E		(1 << 1)	/* 0 = LE, 1 = BE */
52#define DDAR_BS		(1 << 2)	/* 0 = BS4, 1 = BS8 */
53#define DDAR_DW		(1 << 3)	/* 0 = 8b, 1 = 16b */
54#define DDAR_Ser0UDCTr	(0x0 << 4)
55#define DDAR_Ser0UDCRc	(0x1 << 4)
56#define DDAR_Ser1SDLCTr	(0x2 << 4)
57#define DDAR_Ser1SDLCRc	(0x3 << 4)
58#define DDAR_Ser1UARTTr	(0x4 << 4)
59#define DDAR_Ser1UARTRc	(0x5 << 4)
60#define DDAR_Ser2ICPTr	(0x6 << 4)
61#define DDAR_Ser2ICPRc	(0x7 << 4)
62#define DDAR_Ser3UARTTr	(0x8 << 4)
63#define DDAR_Ser3UARTRc	(0x9 << 4)
64#define DDAR_Ser4MCP0Tr	(0xa << 4)
65#define DDAR_Ser4MCP0Rc	(0xb << 4)
66#define DDAR_Ser4MCP1Tr	(0xc << 4)
67#define DDAR_Ser4MCP1Rc	(0xd << 4)
68#define DDAR_Ser4SSPTr	(0xe << 4)
69#define DDAR_Ser4SSPRc	(0xf << 4)
70
71struct sa11x0_dma_sg {
72	u32			addr;
73	u32			len;
74};
75
76struct sa11x0_dma_desc {
77	struct virt_dma_desc	vd;
78
79	u32			ddar;
80	size_t			size;
81	unsigned		period;
82	bool			cyclic;
83
84	unsigned		sglen;
85	struct sa11x0_dma_sg	sg[0];
86};
87
88struct sa11x0_dma_phy;
89
90struct sa11x0_dma_chan {
91	struct virt_dma_chan	vc;
92
93	/* protected by c->vc.lock */
94	struct sa11x0_dma_phy	*phy;
95	enum dma_status		status;
96
97	/* protected by d->lock */
98	struct list_head	node;
99
100	u32			ddar;
101	const char		*name;
102};
103
104struct sa11x0_dma_phy {
105	void __iomem		*base;
106	struct sa11x0_dma_dev	*dev;
107	unsigned		num;
108
109	struct sa11x0_dma_chan	*vchan;
110
111	/* Protected by c->vc.lock */
112	unsigned		sg_load;
113	struct sa11x0_dma_desc	*txd_load;
114	unsigned		sg_done;
115	struct sa11x0_dma_desc	*txd_done;
116	u32			dbs[2];
117	u32			dbt[2];
118	u32			dcsr;
119};
120
121struct sa11x0_dma_dev {
122	struct dma_device	slave;
123	void __iomem		*base;
124	spinlock_t		lock;
125	struct tasklet_struct	task;
126	struct list_head	chan_pending;
127	struct sa11x0_dma_phy	phy[NR_PHY_CHAN];
128};
129
130static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
131{
132	return container_of(chan, struct sa11x0_dma_chan, vc.chan);
133}
134
135static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
136{
137	return container_of(dmadev, struct sa11x0_dma_dev, slave);
138}
139
140static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
141{
142	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
143
144	return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
145}
146
147static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
148{
149	kfree(container_of(vd, struct sa11x0_dma_desc, vd));
150}
151
152static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
153{
154	list_del(&txd->vd.node);
155	p->txd_load = txd;
156	p->sg_load = 0;
157
158	dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
159		p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
160}
161
162static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
163	struct sa11x0_dma_chan *c)
164{
165	struct sa11x0_dma_desc *txd = p->txd_load;
166	struct sa11x0_dma_sg *sg;
167	void __iomem *base = p->base;
168	unsigned dbsx, dbtx;
169	u32 dcsr;
170
171	if (!txd)
172		return;
173
174	dcsr = readl_relaxed(base + DMA_DCSR_R);
175
176	/* Don't try to load the next transfer if both buffers are started */
177	if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
178		return;
179
180	if (p->sg_load == txd->sglen) {
181		if (!txd->cyclic) {
182			struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
183
184			/*
185			 * We have reached the end of the current descriptor.
186			 * Peek at the next descriptor, and if compatible with
187			 * the current, start processing it.
188			 */
189			if (txn && txn->ddar == txd->ddar) {
190				txd = txn;
191				sa11x0_dma_start_desc(p, txn);
192			} else {
193				p->txd_load = NULL;
194				return;
195			}
196		} else {
197			/* Cyclic: reset back to beginning */
198			p->sg_load = 0;
199		}
200	}
201
202	sg = &txd->sg[p->sg_load++];
203
204	/* Select buffer to load according to channel status */
205	if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
206	    ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
207		dbsx = DMA_DBSA;
208		dbtx = DMA_DBTA;
209		dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
210	} else {
211		dbsx = DMA_DBSB;
212		dbtx = DMA_DBTB;
213		dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
214	}
215
216	writel_relaxed(sg->addr, base + dbsx);
217	writel_relaxed(sg->len, base + dbtx);
218	writel(dcsr, base + DMA_DCSR_S);
219
220	dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
221		p->num, dcsr,
222		'A' + (dbsx == DMA_DBSB), sg->addr,
223		'A' + (dbtx == DMA_DBTB), sg->len);
224}
225
226static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
227	struct sa11x0_dma_chan *c)
228{
229	struct sa11x0_dma_desc *txd = p->txd_done;
230
231	if (++p->sg_done == txd->sglen) {
232		if (!txd->cyclic) {
233			vchan_cookie_complete(&txd->vd);
234
235			p->sg_done = 0;
236			p->txd_done = p->txd_load;
237
238			if (!p->txd_done)
239				tasklet_schedule(&p->dev->task);
240		} else {
241			if ((p->sg_done % txd->period) == 0)
242				vchan_cyclic_callback(&txd->vd);
243
244			/* Cyclic: reset back to beginning */
245			p->sg_done = 0;
246		}
247	}
248
249	sa11x0_dma_start_sg(p, c);
250}
251
252static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
253{
254	struct sa11x0_dma_phy *p = dev_id;
255	struct sa11x0_dma_dev *d = p->dev;
256	struct sa11x0_dma_chan *c;
257	u32 dcsr;
258
259	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
260	if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
261		return IRQ_NONE;
262
263	/* Clear reported status bits */
264	writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
265		p->base + DMA_DCSR_C);
266
267	dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
268
269	if (dcsr & DCSR_ERROR) {
270		dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
271			p->num, dcsr,
272			readl_relaxed(p->base + DMA_DDAR),
273			readl_relaxed(p->base + DMA_DBSA),
274			readl_relaxed(p->base + DMA_DBTA),
275			readl_relaxed(p->base + DMA_DBSB),
276			readl_relaxed(p->base + DMA_DBTB));
277	}
278
279	c = p->vchan;
280	if (c) {
281		unsigned long flags;
282
283		spin_lock_irqsave(&c->vc.lock, flags);
284		/*
285		 * Now that we're holding the lock, check that the vchan
286		 * really is associated with this pchan before touching the
287		 * hardware.  This should always succeed, because we won't
288		 * change p->vchan or c->phy while the channel is actively
289		 * transferring.
290		 */
291		if (c->phy == p) {
292			if (dcsr & DCSR_DONEA)
293				sa11x0_dma_complete(p, c);
294			if (dcsr & DCSR_DONEB)
295				sa11x0_dma_complete(p, c);
296		}
297		spin_unlock_irqrestore(&c->vc.lock, flags);
298	}
299
300	return IRQ_HANDLED;
301}
302
303static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
304{
305	struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
306
307	/* If the issued list is empty, we have no further txds to process */
308	if (txd) {
309		struct sa11x0_dma_phy *p = c->phy;
310
311		sa11x0_dma_start_desc(p, txd);
312		p->txd_done = txd;
313		p->sg_done = 0;
314
315		/* The channel should not have any transfers started */
316		WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
317				      (DCSR_STRTA | DCSR_STRTB));
318
319		/* Clear the run and start bits before changing DDAR */
320		writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
321			       p->base + DMA_DCSR_C);
322		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
323
324		/* Try to start both buffers */
325		sa11x0_dma_start_sg(p, c);
326		sa11x0_dma_start_sg(p, c);
327	}
328}
329
330static void sa11x0_dma_tasklet(unsigned long arg)
331{
332	struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
333	struct sa11x0_dma_phy *p;
334	struct sa11x0_dma_chan *c;
335	unsigned pch, pch_alloc = 0;
336
337	dev_dbg(d->slave.dev, "tasklet enter\n");
338
339	list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
340		spin_lock_irq(&c->vc.lock);
341		p = c->phy;
342		if (p && !p->txd_done) {
343			sa11x0_dma_start_txd(c);
344			if (!p->txd_done) {
345				/* No current txd associated with this channel */
346				dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
347
348				/* Mark this channel free */
349				c->phy = NULL;
350				p->vchan = NULL;
351			}
352		}
353		spin_unlock_irq(&c->vc.lock);
354	}
355
356	spin_lock_irq(&d->lock);
357	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
358		p = &d->phy[pch];
359
360		if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
361			c = list_first_entry(&d->chan_pending,
362				struct sa11x0_dma_chan, node);
363			list_del_init(&c->node);
364
365			pch_alloc |= 1 << pch;
366
367			/* Mark this channel allocated */
368			p->vchan = c;
369
370			dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
371		}
372	}
373	spin_unlock_irq(&d->lock);
374
375	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
376		if (pch_alloc & (1 << pch)) {
377			p = &d->phy[pch];
378			c = p->vchan;
379
380			spin_lock_irq(&c->vc.lock);
381			c->phy = p;
382
383			sa11x0_dma_start_txd(c);
384			spin_unlock_irq(&c->vc.lock);
385		}
386	}
387
388	dev_dbg(d->slave.dev, "tasklet exit\n");
389}
390
391
392static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan)
393{
394	return 0;
395}
396
397static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
398{
399	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
400	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
401	unsigned long flags;
402
403	spin_lock_irqsave(&d->lock, flags);
404	list_del_init(&c->node);
405	spin_unlock_irqrestore(&d->lock, flags);
406
407	vchan_free_chan_resources(&c->vc);
408}
409
410static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
411{
412	unsigned reg;
413	u32 dcsr;
414
415	dcsr = readl_relaxed(p->base + DMA_DCSR_R);
416
417	if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
418	    (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
419		reg = DMA_DBSA;
420	else
421		reg = DMA_DBSB;
422
423	return readl_relaxed(p->base + reg);
424}
425
426static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
427	dma_cookie_t cookie, struct dma_tx_state *state)
428{
429	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
430	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
431	struct sa11x0_dma_phy *p;
432	struct virt_dma_desc *vd;
433	unsigned long flags;
434	enum dma_status ret;
435
436	ret = dma_cookie_status(&c->vc.chan, cookie, state);
437	if (ret == DMA_COMPLETE)
438		return ret;
439
440	if (!state)
441		return c->status;
442
443	spin_lock_irqsave(&c->vc.lock, flags);
444	p = c->phy;
445
446	/*
447	 * If the cookie is on our issue queue, then the residue is
448	 * its total size.
449	 */
450	vd = vchan_find_desc(&c->vc, cookie);
451	if (vd) {
452		state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
453	} else if (!p) {
454		state->residue = 0;
455	} else {
456		struct sa11x0_dma_desc *txd;
457		size_t bytes = 0;
458
459		if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
460			txd = p->txd_done;
461		else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
462			txd = p->txd_load;
463		else
464			txd = NULL;
465
466		ret = c->status;
467		if (txd) {
468			dma_addr_t addr = sa11x0_dma_pos(p);
469			unsigned i;
470
471			dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr);
472
473			for (i = 0; i < txd->sglen; i++) {
474				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
475					i, txd->sg[i].addr, txd->sg[i].len);
476				if (addr >= txd->sg[i].addr &&
477				    addr < txd->sg[i].addr + txd->sg[i].len) {
478					unsigned len;
479
480					len = txd->sg[i].len -
481						(addr - txd->sg[i].addr);
482					dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
483						i, len);
484					bytes += len;
485					i++;
486					break;
487				}
488			}
489			for (; i < txd->sglen; i++) {
490				dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
491					i, txd->sg[i].addr, txd->sg[i].len);
492				bytes += txd->sg[i].len;
493			}
494		}
495		state->residue = bytes;
496	}
497	spin_unlock_irqrestore(&c->vc.lock, flags);
498
499	dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue);
500
501	return ret;
502}
503
504/*
505 * Move pending txds to the issued list, and re-init pending list.
506 * If not already pending, add this channel to the list of pending
507 * channels and trigger the tasklet to run.
508 */
509static void sa11x0_dma_issue_pending(struct dma_chan *chan)
510{
511	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
512	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
513	unsigned long flags;
514
515	spin_lock_irqsave(&c->vc.lock, flags);
516	if (vchan_issue_pending(&c->vc)) {
517		if (!c->phy) {
518			spin_lock(&d->lock);
519			if (list_empty(&c->node)) {
520				list_add_tail(&c->node, &d->chan_pending);
521				tasklet_schedule(&d->task);
522				dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
523			}
524			spin_unlock(&d->lock);
525		}
526	} else
527		dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
528	spin_unlock_irqrestore(&c->vc.lock, flags);
529}
530
531static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
532	struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
533	enum dma_transfer_direction dir, unsigned long flags, void *context)
534{
535	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
536	struct sa11x0_dma_desc *txd;
537	struct scatterlist *sgent;
538	unsigned i, j = sglen;
539	size_t size = 0;
540
541	/* SA11x0 channels can only operate in their native direction */
542	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
543		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
544			&c->vc, c->ddar, dir);
545		return NULL;
546	}
547
548	/* Do not allow zero-sized txds */
549	if (sglen == 0)
550		return NULL;
551
552	for_each_sg(sg, sgent, sglen, i) {
553		dma_addr_t addr = sg_dma_address(sgent);
554		unsigned int len = sg_dma_len(sgent);
555
556		if (len > DMA_MAX_SIZE)
557			j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
558		if (addr & DMA_ALIGN) {
559			dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n",
560				&c->vc, addr);
561			return NULL;
562		}
563	}
564
565	txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC);
566	if (!txd) {
567		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
568		return NULL;
569	}
570
571	j = 0;
572	for_each_sg(sg, sgent, sglen, i) {
573		dma_addr_t addr = sg_dma_address(sgent);
574		unsigned len = sg_dma_len(sgent);
575
576		size += len;
577
578		do {
579			unsigned tlen = len;
580
581			/*
582			 * Check whether the transfer will fit.  If not, try
583			 * to split the transfer up such that we end up with
584			 * equal chunks - but make sure that we preserve the
585			 * alignment.  This avoids small segments.
586			 */
587			if (tlen > DMA_MAX_SIZE) {
588				unsigned mult = DIV_ROUND_UP(tlen,
589					DMA_MAX_SIZE & ~DMA_ALIGN);
590
591				tlen = (tlen / mult) & ~DMA_ALIGN;
592			}
593
594			txd->sg[j].addr = addr;
595			txd->sg[j].len = tlen;
596
597			addr += tlen;
598			len -= tlen;
599			j++;
600		} while (len);
601	}
602
603	txd->ddar = c->ddar;
604	txd->size = size;
605	txd->sglen = j;
606
607	dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n",
608		&c->vc, &txd->vd, txd->size, txd->sglen);
609
610	return vchan_tx_prep(&c->vc, &txd->vd, flags);
611}
612
613static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
614	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
615	enum dma_transfer_direction dir, unsigned long flags)
616{
617	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
618	struct sa11x0_dma_desc *txd;
619	unsigned i, j, k, sglen, sgperiod;
620
621	/* SA11x0 channels can only operate in their native direction */
622	if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
623		dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
624			&c->vc, c->ddar, dir);
625		return NULL;
626	}
627
628	sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
629	sglen = size * sgperiod / period;
630
631	/* Do not allow zero-sized txds */
632	if (sglen == 0)
633		return NULL;
634
635	txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC);
636	if (!txd) {
637		dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
638		return NULL;
639	}
640
641	for (i = k = 0; i < size / period; i++) {
642		size_t tlen, len = period;
643
644		for (j = 0; j < sgperiod; j++, k++) {
645			tlen = len;
646
647			if (tlen > DMA_MAX_SIZE) {
648				unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
649				tlen = (tlen / mult) & ~DMA_ALIGN;
650			}
651
652			txd->sg[k].addr = addr;
653			txd->sg[k].len = tlen;
654			addr += tlen;
655			len -= tlen;
656		}
657
658		WARN_ON(len != 0);
659	}
660
661	WARN_ON(k != sglen);
662
663	txd->ddar = c->ddar;
664	txd->size = size;
665	txd->sglen = sglen;
666	txd->cyclic = 1;
667	txd->period = sgperiod;
668
669	return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
670}
671
672static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg)
673{
674	u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
675	dma_addr_t addr;
676	enum dma_slave_buswidth width;
677	u32 maxburst;
678
679	if (ddar & DDAR_RW) {
680		addr = cfg->src_addr;
681		width = cfg->src_addr_width;
682		maxburst = cfg->src_maxburst;
683	} else {
684		addr = cfg->dst_addr;
685		width = cfg->dst_addr_width;
686		maxburst = cfg->dst_maxburst;
687	}
688
689	if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
690	     width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
691	    (maxburst != 4 && maxburst != 8))
692		return -EINVAL;
693
694	if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
695		ddar |= DDAR_DW;
696	if (maxburst == 8)
697		ddar |= DDAR_BS;
698
699	dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n",
700		&c->vc, addr, width, maxburst);
701
702	c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
703
704	return 0;
705}
706
707static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
708	unsigned long arg)
709{
710	struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
711	struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
712	struct sa11x0_dma_phy *p;
713	LIST_HEAD(head);
714	unsigned long flags;
715	int ret;
716
717	switch (cmd) {
718	case DMA_SLAVE_CONFIG:
719		return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg);
720
721	case DMA_TERMINATE_ALL:
722		dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
723		/* Clear the tx descriptor lists */
724		spin_lock_irqsave(&c->vc.lock, flags);
725		vchan_get_all_descriptors(&c->vc, &head);
726
727		p = c->phy;
728		if (p) {
729			dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
730			/* vchan is assigned to a pchan - stop the channel */
731			writel(DCSR_RUN | DCSR_IE |
732				DCSR_STRTA | DCSR_DONEA |
733				DCSR_STRTB | DCSR_DONEB,
734				p->base + DMA_DCSR_C);
735
736			if (p->txd_load) {
737				if (p->txd_load != p->txd_done)
738					list_add_tail(&p->txd_load->vd.node, &head);
739				p->txd_load = NULL;
740			}
741			if (p->txd_done) {
742				list_add_tail(&p->txd_done->vd.node, &head);
743				p->txd_done = NULL;
744			}
745			c->phy = NULL;
746			spin_lock(&d->lock);
747			p->vchan = NULL;
748			spin_unlock(&d->lock);
749			tasklet_schedule(&d->task);
750		}
751		spin_unlock_irqrestore(&c->vc.lock, flags);
752		vchan_dma_desc_free_list(&c->vc, &head);
753		ret = 0;
754		break;
755
756	case DMA_PAUSE:
757		dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
758		spin_lock_irqsave(&c->vc.lock, flags);
759		if (c->status == DMA_IN_PROGRESS) {
760			c->status = DMA_PAUSED;
761
762			p = c->phy;
763			if (p) {
764				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
765			} else {
766				spin_lock(&d->lock);
767				list_del_init(&c->node);
768				spin_unlock(&d->lock);
769			}
770		}
771		spin_unlock_irqrestore(&c->vc.lock, flags);
772		ret = 0;
773		break;
774
775	case DMA_RESUME:
776		dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
777		spin_lock_irqsave(&c->vc.lock, flags);
778		if (c->status == DMA_PAUSED) {
779			c->status = DMA_IN_PROGRESS;
780
781			p = c->phy;
782			if (p) {
783				writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
784			} else if (!list_empty(&c->vc.desc_issued)) {
785				spin_lock(&d->lock);
786				list_add_tail(&c->node, &d->chan_pending);
787				spin_unlock(&d->lock);
788			}
789		}
790		spin_unlock_irqrestore(&c->vc.lock, flags);
791		ret = 0;
792		break;
793
794	default:
795		ret = -ENXIO;
796		break;
797	}
798
799	return ret;
800}
801
802struct sa11x0_dma_channel_desc {
803	u32 ddar;
804	const char *name;
805};
806
807#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
808static const struct sa11x0_dma_channel_desc chan_desc[] = {
809	CD(Ser0UDCTr, 0),
810	CD(Ser0UDCRc, DDAR_RW),
811	CD(Ser1SDLCTr, 0),
812	CD(Ser1SDLCRc, DDAR_RW),
813	CD(Ser1UARTTr, 0),
814	CD(Ser1UARTRc, DDAR_RW),
815	CD(Ser2ICPTr, 0),
816	CD(Ser2ICPRc, DDAR_RW),
817	CD(Ser3UARTTr, 0),
818	CD(Ser3UARTRc, DDAR_RW),
819	CD(Ser4MCP0Tr, 0),
820	CD(Ser4MCP0Rc, DDAR_RW),
821	CD(Ser4MCP1Tr, 0),
822	CD(Ser4MCP1Rc, DDAR_RW),
823	CD(Ser4SSPTr, 0),
824	CD(Ser4SSPRc, DDAR_RW),
825};
826
827static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
828	struct device *dev)
829{
830	unsigned i;
831
832	dmadev->chancnt = ARRAY_SIZE(chan_desc);
833	INIT_LIST_HEAD(&dmadev->channels);
834	dmadev->dev = dev;
835	dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
836	dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
837	dmadev->device_control = sa11x0_dma_control;
838	dmadev->device_tx_status = sa11x0_dma_tx_status;
839	dmadev->device_issue_pending = sa11x0_dma_issue_pending;
840
841	for (i = 0; i < dmadev->chancnt; i++) {
842		struct sa11x0_dma_chan *c;
843
844		c = kzalloc(sizeof(*c), GFP_KERNEL);
845		if (!c) {
846			dev_err(dev, "no memory for channel %u\n", i);
847			return -ENOMEM;
848		}
849
850		c->status = DMA_IN_PROGRESS;
851		c->ddar = chan_desc[i].ddar;
852		c->name = chan_desc[i].name;
853		INIT_LIST_HEAD(&c->node);
854
855		c->vc.desc_free = sa11x0_dma_free_desc;
856		vchan_init(&c->vc, dmadev);
857	}
858
859	return dma_async_device_register(dmadev);
860}
861
862static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
863	void *data)
864{
865	int irq = platform_get_irq(pdev, nr);
866
867	if (irq <= 0)
868		return -ENXIO;
869
870	return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
871}
872
873static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
874	void *data)
875{
876	int irq = platform_get_irq(pdev, nr);
877	if (irq > 0)
878		free_irq(irq, data);
879}
880
881static void sa11x0_dma_free_channels(struct dma_device *dmadev)
882{
883	struct sa11x0_dma_chan *c, *cn;
884
885	list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
886		list_del(&c->vc.chan.device_node);
887		tasklet_kill(&c->vc.task);
888		kfree(c);
889	}
890}
891
892static int sa11x0_dma_probe(struct platform_device *pdev)
893{
894	struct sa11x0_dma_dev *d;
895	struct resource *res;
896	unsigned i;
897	int ret;
898
899	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
900	if (!res)
901		return -ENXIO;
902
903	d = kzalloc(sizeof(*d), GFP_KERNEL);
904	if (!d) {
905		ret = -ENOMEM;
906		goto err_alloc;
907	}
908
909	spin_lock_init(&d->lock);
910	INIT_LIST_HEAD(&d->chan_pending);
911
912	d->base = ioremap(res->start, resource_size(res));
913	if (!d->base) {
914		ret = -ENOMEM;
915		goto err_ioremap;
916	}
917
918	tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
919
920	for (i = 0; i < NR_PHY_CHAN; i++) {
921		struct sa11x0_dma_phy *p = &d->phy[i];
922
923		p->dev = d;
924		p->num = i;
925		p->base = d->base + i * DMA_SIZE;
926		writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
927			DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
928			p->base + DMA_DCSR_C);
929		writel_relaxed(0, p->base + DMA_DDAR);
930
931		ret = sa11x0_dma_request_irq(pdev, i, p);
932		if (ret) {
933			while (i) {
934				i--;
935				sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
936			}
937			goto err_irq;
938		}
939	}
940
941	dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
942	dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
943	d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
944	d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
945	ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
946	if (ret) {
947		dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
948			ret);
949		goto err_slave_reg;
950	}
951
952	platform_set_drvdata(pdev, d);
953	return 0;
954
955 err_slave_reg:
956	sa11x0_dma_free_channels(&d->slave);
957	for (i = 0; i < NR_PHY_CHAN; i++)
958		sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
959 err_irq:
960	tasklet_kill(&d->task);
961	iounmap(d->base);
962 err_ioremap:
963	kfree(d);
964 err_alloc:
965	return ret;
966}
967
968static int sa11x0_dma_remove(struct platform_device *pdev)
969{
970	struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
971	unsigned pch;
972
973	dma_async_device_unregister(&d->slave);
974
975	sa11x0_dma_free_channels(&d->slave);
976	for (pch = 0; pch < NR_PHY_CHAN; pch++)
977		sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
978	tasklet_kill(&d->task);
979	iounmap(d->base);
980	kfree(d);
981
982	return 0;
983}
984
985static int sa11x0_dma_suspend(struct device *dev)
986{
987	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
988	unsigned pch;
989
990	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
991		struct sa11x0_dma_phy *p = &d->phy[pch];
992		u32 dcsr, saved_dcsr;
993
994		dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
995		if (dcsr & DCSR_RUN) {
996			writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
997			dcsr = readl_relaxed(p->base + DMA_DCSR_R);
998		}
999
1000		saved_dcsr &= DCSR_RUN | DCSR_IE;
1001		if (dcsr & DCSR_BIU) {
1002			p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1003			p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1004			p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1005			p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1006			saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1007				      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1008		} else {
1009			p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1010			p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1011			p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1012			p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1013			saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1014		}
1015		p->dcsr = saved_dcsr;
1016
1017		writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1018	}
1019
1020	return 0;
1021}
1022
1023static int sa11x0_dma_resume(struct device *dev)
1024{
1025	struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1026	unsigned pch;
1027
1028	for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1029		struct sa11x0_dma_phy *p = &d->phy[pch];
1030		struct sa11x0_dma_desc *txd = NULL;
1031		u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1032
1033		WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1034
1035		if (p->txd_done)
1036			txd = p->txd_done;
1037		else if (p->txd_load)
1038			txd = p->txd_load;
1039
1040		if (!txd)
1041			continue;
1042
1043		writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1044
1045		writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1046		writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1047		writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1048		writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1049		writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1050	}
1051
1052	return 0;
1053}
1054
1055static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1056	.suspend_noirq = sa11x0_dma_suspend,
1057	.resume_noirq = sa11x0_dma_resume,
1058	.freeze_noirq = sa11x0_dma_suspend,
1059	.thaw_noirq = sa11x0_dma_resume,
1060	.poweroff_noirq = sa11x0_dma_suspend,
1061	.restore_noirq = sa11x0_dma_resume,
1062};
1063
1064static struct platform_driver sa11x0_dma_driver = {
1065	.driver = {
1066		.name	= "sa11x0-dma",
1067		.owner	= THIS_MODULE,
1068		.pm	= &sa11x0_dma_pm_ops,
1069	},
1070	.probe		= sa11x0_dma_probe,
1071	.remove		= sa11x0_dma_remove,
1072};
1073
1074bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1075{
1076	if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1077		struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1078		const char *p = param;
1079
1080		return !strcmp(c->name, p);
1081	}
1082	return false;
1083}
1084EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1085
1086static int __init sa11x0_dma_init(void)
1087{
1088	return platform_driver_register(&sa11x0_dma_driver);
1089}
1090subsys_initcall(sa11x0_dma_init);
1091
1092static void __exit sa11x0_dma_exit(void)
1093{
1094	platform_driver_unregister(&sa11x0_dma_driver);
1095}
1096module_exit(sa11x0_dma_exit);
1097
1098MODULE_AUTHOR("Russell King");
1099MODULE_DESCRIPTION("SA-11x0 DMA driver");
1100MODULE_LICENSE("GPL v2");
1101MODULE_ALIAS("platform:sa11x0-dma");
1102