[go: nahoru, domu]

1/*
2 * Freescale MXS SPI master driver
3 *
4 * Copyright 2012 DENX Software Engineering, GmbH.
5 * Copyright 2012 Freescale Semiconductor, Inc.
6 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
7 *
8 * Rework and transition to new API by:
9 * Marek Vasut <marex@denx.de>
10 *
11 * Based on previous attempt by:
12 * Fabio Estevam <fabio.estevam@freescale.com>
13 *
14 * Based on code from U-Boot bootloader by:
15 * Marek Vasut <marex@denx.de>
16 *
17 * Based on spi-stmp.c, which is:
18 * Author: Dmitry Pervushin <dimka@embeddedalley.com>
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 */
30
31#include <linux/kernel.h>
32#include <linux/ioport.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
35#include <linux/of_gpio.h>
36#include <linux/platform_device.h>
37#include <linux/delay.h>
38#include <linux/interrupt.h>
39#include <linux/dma-mapping.h>
40#include <linux/dmaengine.h>
41#include <linux/highmem.h>
42#include <linux/clk.h>
43#include <linux/err.h>
44#include <linux/completion.h>
45#include <linux/gpio.h>
46#include <linux/regulator/consumer.h>
47#include <linux/module.h>
48#include <linux/stmp_device.h>
49#include <linux/spi/spi.h>
50#include <linux/spi/mxs-spi.h>
51
52#define DRIVER_NAME		"mxs-spi"
53
54/* Use 10S timeout for very long transfers, it should suffice. */
55#define SSP_TIMEOUT		10000
56
57#define SG_MAXLEN		0xff00
58
59/*
60 * Flags for txrx functions.  More efficient that using an argument register for
61 * each one.
62 */
63#define TXRX_WRITE		(1<<0)	/* This is a write */
64#define TXRX_DEASSERT_CS	(1<<1)	/* De-assert CS at end of txrx */
65
66struct mxs_spi {
67	struct mxs_ssp		ssp;
68	struct completion	c;
69	unsigned int		sck;	/* Rate requested (vs actual) */
70};
71
72static int mxs_spi_setup_transfer(struct spi_device *dev,
73				  const struct spi_transfer *t)
74{
75	struct mxs_spi *spi = spi_master_get_devdata(dev->master);
76	struct mxs_ssp *ssp = &spi->ssp;
77	const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
78
79	if (hz == 0) {
80		dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
81		return -EINVAL;
82	}
83
84	if (hz != spi->sck) {
85		mxs_ssp_set_clk_rate(ssp, hz);
86		/*
87		 * Save requested rate, hz, rather than the actual rate,
88		 * ssp->clk_rate.  Otherwise we would set the rate every transfer
89		 * when the actual rate is not quite the same as requested rate.
90		 */
91		spi->sck = hz;
92		/*
93		 * Perhaps we should return an error if the actual clock is
94		 * nowhere close to what was requested?
95		 */
96	}
97
98	writel(BM_SSP_CTRL0_LOCK_CS,
99		ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
100
101	writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
102	       BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
103	       ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
104	       ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
105	       ssp->base + HW_SSP_CTRL1(ssp));
106
107	writel(0x0, ssp->base + HW_SSP_CMD0);
108	writel(0x0, ssp->base + HW_SSP_CMD1);
109
110	return 0;
111}
112
113static u32 mxs_spi_cs_to_reg(unsigned cs)
114{
115	u32 select = 0;
116
117	/*
118	 * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
119	 *
120	 * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
121	 * in HW_SSP_CTRL0 register do have multiple usage, please refer to
122	 * the datasheet for further details. In SPI mode, they are used to
123	 * toggle the chip-select lines (nCS pins).
124	 */
125	if (cs & 1)
126		select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
127	if (cs & 2)
128		select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
129
130	return select;
131}
132
133static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
134{
135	const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
136	struct mxs_ssp *ssp = &spi->ssp;
137	u32 reg;
138
139	do {
140		reg = readl_relaxed(ssp->base + offset);
141
142		if (!set)
143			reg = ~reg;
144
145		reg &= mask;
146
147		if (reg == mask)
148			return 0;
149	} while (time_before(jiffies, timeout));
150
151	return -ETIMEDOUT;
152}
153
154static void mxs_ssp_dma_irq_callback(void *param)
155{
156	struct mxs_spi *spi = param;
157
158	complete(&spi->c);
159}
160
161static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
162{
163	struct mxs_ssp *ssp = dev_id;
164
165	dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
166		__func__, __LINE__,
167		readl(ssp->base + HW_SSP_CTRL1(ssp)),
168		readl(ssp->base + HW_SSP_STATUS(ssp)));
169	return IRQ_HANDLED;
170}
171
172static int mxs_spi_txrx_dma(struct mxs_spi *spi,
173			    unsigned char *buf, int len,
174			    unsigned int flags)
175{
176	struct mxs_ssp *ssp = &spi->ssp;
177	struct dma_async_tx_descriptor *desc = NULL;
178	const bool vmalloced_buf = is_vmalloc_addr(buf);
179	const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
180	const int sgs = DIV_ROUND_UP(len, desc_len);
181	int sg_count;
182	int min, ret;
183	u32 ctrl0;
184	struct page *vm_page;
185	void *sg_buf;
186	struct {
187		u32			pio[4];
188		struct scatterlist	sg;
189	} *dma_xfer;
190
191	if (!len)
192		return -EINVAL;
193
194	dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
195	if (!dma_xfer)
196		return -ENOMEM;
197
198	reinit_completion(&spi->c);
199
200	/* Chip select was already programmed into CTRL0 */
201	ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
202	ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
203		 BM_SSP_CTRL0_READ);
204	ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
205
206	if (!(flags & TXRX_WRITE))
207		ctrl0 |= BM_SSP_CTRL0_READ;
208
209	/* Queue the DMA data transfer. */
210	for (sg_count = 0; sg_count < sgs; sg_count++) {
211		/* Prepare the transfer descriptor. */
212		min = min(len, desc_len);
213
214		/*
215		 * De-assert CS on last segment if flag is set (i.e., no more
216		 * transfers will follow)
217		 */
218		if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
219			ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
220
221		if (ssp->devid == IMX23_SSP) {
222			ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
223			ctrl0 |= min;
224		}
225
226		dma_xfer[sg_count].pio[0] = ctrl0;
227		dma_xfer[sg_count].pio[3] = min;
228
229		if (vmalloced_buf) {
230			vm_page = vmalloc_to_page(buf);
231			if (!vm_page) {
232				ret = -ENOMEM;
233				goto err_vmalloc;
234			}
235			sg_buf = page_address(vm_page) +
236				((size_t)buf & ~PAGE_MASK);
237		} else {
238			sg_buf = buf;
239		}
240
241		sg_init_one(&dma_xfer[sg_count].sg, sg_buf, min);
242		ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
243			(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
244
245		len -= min;
246		buf += min;
247
248		/* Queue the PIO register write transfer. */
249		desc = dmaengine_prep_slave_sg(ssp->dmach,
250				(struct scatterlist *)dma_xfer[sg_count].pio,
251				(ssp->devid == IMX23_SSP) ? 1 : 4,
252				DMA_TRANS_NONE,
253				sg_count ? DMA_PREP_INTERRUPT : 0);
254		if (!desc) {
255			dev_err(ssp->dev,
256				"Failed to get PIO reg. write descriptor.\n");
257			ret = -EINVAL;
258			goto err_mapped;
259		}
260
261		desc = dmaengine_prep_slave_sg(ssp->dmach,
262				&dma_xfer[sg_count].sg, 1,
263				(flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
264				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
265
266		if (!desc) {
267			dev_err(ssp->dev,
268				"Failed to get DMA data write descriptor.\n");
269			ret = -EINVAL;
270			goto err_mapped;
271		}
272	}
273
274	/*
275	 * The last descriptor must have this callback,
276	 * to finish the DMA transaction.
277	 */
278	desc->callback = mxs_ssp_dma_irq_callback;
279	desc->callback_param = spi;
280
281	/* Start the transfer. */
282	dmaengine_submit(desc);
283	dma_async_issue_pending(ssp->dmach);
284
285	ret = wait_for_completion_timeout(&spi->c,
286				msecs_to_jiffies(SSP_TIMEOUT));
287	if (!ret) {
288		dev_err(ssp->dev, "DMA transfer timeout\n");
289		ret = -ETIMEDOUT;
290		dmaengine_terminate_all(ssp->dmach);
291		goto err_vmalloc;
292	}
293
294	ret = 0;
295
296err_vmalloc:
297	while (--sg_count >= 0) {
298err_mapped:
299		dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
300			(flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
301	}
302
303	kfree(dma_xfer);
304
305	return ret;
306}
307
308static int mxs_spi_txrx_pio(struct mxs_spi *spi,
309			    unsigned char *buf, int len,
310			    unsigned int flags)
311{
312	struct mxs_ssp *ssp = &spi->ssp;
313
314	writel(BM_SSP_CTRL0_IGNORE_CRC,
315	       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
316
317	while (len--) {
318		if (len == 0 && (flags & TXRX_DEASSERT_CS))
319			writel(BM_SSP_CTRL0_IGNORE_CRC,
320			       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
321
322		if (ssp->devid == IMX23_SSP) {
323			writel(BM_SSP_CTRL0_XFER_COUNT,
324				ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
325			writel(1,
326				ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
327		} else {
328			writel(1, ssp->base + HW_SSP_XFER_SIZE);
329		}
330
331		if (flags & TXRX_WRITE)
332			writel(BM_SSP_CTRL0_READ,
333				ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
334		else
335			writel(BM_SSP_CTRL0_READ,
336				ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
337
338		writel(BM_SSP_CTRL0_RUN,
339				ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
340
341		if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
342			return -ETIMEDOUT;
343
344		if (flags & TXRX_WRITE)
345			writel(*buf, ssp->base + HW_SSP_DATA(ssp));
346
347		writel(BM_SSP_CTRL0_DATA_XFER,
348			     ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
349
350		if (!(flags & TXRX_WRITE)) {
351			if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
352						BM_SSP_STATUS_FIFO_EMPTY, 0))
353				return -ETIMEDOUT;
354
355			*buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
356		}
357
358		if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
359			return -ETIMEDOUT;
360
361		buf++;
362	}
363
364	if (len <= 0)
365		return 0;
366
367	return -ETIMEDOUT;
368}
369
370static int mxs_spi_transfer_one(struct spi_master *master,
371				struct spi_message *m)
372{
373	struct mxs_spi *spi = spi_master_get_devdata(master);
374	struct mxs_ssp *ssp = &spi->ssp;
375	struct spi_transfer *t;
376	unsigned int flag;
377	int status = 0;
378
379	/* Program CS register bits here, it will be used for all transfers. */
380	writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
381	       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
382	writel(mxs_spi_cs_to_reg(m->spi->chip_select),
383	       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
384
385	list_for_each_entry(t, &m->transfers, transfer_list) {
386
387		status = mxs_spi_setup_transfer(m->spi, t);
388		if (status)
389			break;
390
391		/* De-assert on last transfer, inverted by cs_change flag */
392		flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
393		       TXRX_DEASSERT_CS : 0;
394
395		/*
396		 * Small blocks can be transfered via PIO.
397		 * Measured by empiric means:
398		 *
399		 * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
400		 *
401		 * DMA only: 2.164808 seconds, 473.0KB/s
402		 * Combined: 1.676276 seconds, 610.9KB/s
403		 */
404		if (t->len < 32) {
405			writel(BM_SSP_CTRL1_DMA_ENABLE,
406				ssp->base + HW_SSP_CTRL1(ssp) +
407				STMP_OFFSET_REG_CLR);
408
409			if (t->tx_buf)
410				status = mxs_spi_txrx_pio(spi,
411						(void *)t->tx_buf,
412						t->len, flag | TXRX_WRITE);
413			if (t->rx_buf)
414				status = mxs_spi_txrx_pio(spi,
415						t->rx_buf, t->len,
416						flag);
417		} else {
418			writel(BM_SSP_CTRL1_DMA_ENABLE,
419				ssp->base + HW_SSP_CTRL1(ssp) +
420				STMP_OFFSET_REG_SET);
421
422			if (t->tx_buf)
423				status = mxs_spi_txrx_dma(spi,
424						(void *)t->tx_buf, t->len,
425						flag | TXRX_WRITE);
426			if (t->rx_buf)
427				status = mxs_spi_txrx_dma(spi,
428						t->rx_buf, t->len,
429						flag);
430		}
431
432		if (status) {
433			stmp_reset_block(ssp->base);
434			break;
435		}
436
437		m->actual_length += t->len;
438	}
439
440	m->status = status;
441	spi_finalize_current_message(master);
442
443	return status;
444}
445
446static const struct of_device_id mxs_spi_dt_ids[] = {
447	{ .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
448	{ .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
449	{ /* sentinel */ }
450};
451MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
452
453static int mxs_spi_probe(struct platform_device *pdev)
454{
455	const struct of_device_id *of_id =
456			of_match_device(mxs_spi_dt_ids, &pdev->dev);
457	struct device_node *np = pdev->dev.of_node;
458	struct spi_master *master;
459	struct mxs_spi *spi;
460	struct mxs_ssp *ssp;
461	struct resource *iores;
462	struct clk *clk;
463	void __iomem *base;
464	int devid, clk_freq;
465	int ret = 0, irq_err;
466
467	/*
468	 * Default clock speed for the SPI core. 160MHz seems to
469	 * work reasonably well with most SPI flashes, so use this
470	 * as a default. Override with "clock-frequency" DT prop.
471	 */
472	const int clk_freq_default = 160000000;
473
474	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
475	irq_err = platform_get_irq(pdev, 0);
476	if (irq_err < 0)
477		return irq_err;
478
479	base = devm_ioremap_resource(&pdev->dev, iores);
480	if (IS_ERR(base))
481		return PTR_ERR(base);
482
483	clk = devm_clk_get(&pdev->dev, NULL);
484	if (IS_ERR(clk))
485		return PTR_ERR(clk);
486
487	devid = (enum mxs_ssp_id) of_id->data;
488	ret = of_property_read_u32(np, "clock-frequency",
489				   &clk_freq);
490	if (ret)
491		clk_freq = clk_freq_default;
492
493	master = spi_alloc_master(&pdev->dev, sizeof(*spi));
494	if (!master)
495		return -ENOMEM;
496
497	master->transfer_one_message = mxs_spi_transfer_one;
498	master->bits_per_word_mask = SPI_BPW_MASK(8);
499	master->mode_bits = SPI_CPOL | SPI_CPHA;
500	master->num_chipselect = 3;
501	master->dev.of_node = np;
502	master->flags = SPI_MASTER_HALF_DUPLEX;
503
504	spi = spi_master_get_devdata(master);
505	ssp = &spi->ssp;
506	ssp->dev = &pdev->dev;
507	ssp->clk = clk;
508	ssp->base = base;
509	ssp->devid = devid;
510
511	init_completion(&spi->c);
512
513	ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
514			       DRIVER_NAME, ssp);
515	if (ret)
516		goto out_master_free;
517
518	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
519	if (!ssp->dmach) {
520		dev_err(ssp->dev, "Failed to request DMA\n");
521		ret = -ENODEV;
522		goto out_master_free;
523	}
524
525	ret = clk_prepare_enable(ssp->clk);
526	if (ret)
527		goto out_dma_release;
528
529	clk_set_rate(ssp->clk, clk_freq);
530
531	ret = stmp_reset_block(ssp->base);
532	if (ret)
533		goto out_disable_clk;
534
535	platform_set_drvdata(pdev, master);
536
537	ret = devm_spi_register_master(&pdev->dev, master);
538	if (ret) {
539		dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
540		goto out_disable_clk;
541	}
542
543	return 0;
544
545out_disable_clk:
546	clk_disable_unprepare(ssp->clk);
547out_dma_release:
548	dma_release_channel(ssp->dmach);
549out_master_free:
550	spi_master_put(master);
551	return ret;
552}
553
554static int mxs_spi_remove(struct platform_device *pdev)
555{
556	struct spi_master *master;
557	struct mxs_spi *spi;
558	struct mxs_ssp *ssp;
559
560	master = platform_get_drvdata(pdev);
561	spi = spi_master_get_devdata(master);
562	ssp = &spi->ssp;
563
564	clk_disable_unprepare(ssp->clk);
565	dma_release_channel(ssp->dmach);
566
567	return 0;
568}
569
570static struct platform_driver mxs_spi_driver = {
571	.probe	= mxs_spi_probe,
572	.remove	= mxs_spi_remove,
573	.driver	= {
574		.name	= DRIVER_NAME,
575		.owner	= THIS_MODULE,
576		.of_match_table = mxs_spi_dt_ids,
577	},
578};
579
580module_platform_driver(mxs_spi_driver);
581
582MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
583MODULE_DESCRIPTION("MXS SPI master driver");
584MODULE_LICENSE("GPL");
585MODULE_ALIAS("platform:mxs-spi");
586