[go: nahoru, domu]

1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
42#include <linux/slab.h>
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
46#include <crypto/des.h>
47#include <crypto/sha.h>
48#include <crypto/md5.h>
49#include <crypto/aead.h>
50#include <crypto/authenc.h>
51#include <crypto/skcipher.h>
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
54#include <crypto/scatterwalk.h>
55
56#include "talitos.h"
57
58static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
59{
60	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
61	talitos_ptr->eptr = upper_32_bits(dma_addr);
62}
63
64/*
65 * map virtual single (contiguous) pointer to h/w descriptor pointer
66 */
67static void map_single_talitos_ptr(struct device *dev,
68				   struct talitos_ptr *talitos_ptr,
69				   unsigned short len, void *data,
70				   unsigned char extent,
71				   enum dma_data_direction dir)
72{
73	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
74
75	talitos_ptr->len = cpu_to_be16(len);
76	to_talitos_ptr(talitos_ptr, dma_addr);
77	talitos_ptr->j_extent = extent;
78}
79
80/*
81 * unmap bus single (contiguous) h/w descriptor pointer
82 */
83static void unmap_single_talitos_ptr(struct device *dev,
84				     struct talitos_ptr *talitos_ptr,
85				     enum dma_data_direction dir)
86{
87	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
88			 be16_to_cpu(talitos_ptr->len), dir);
89}
90
91static int reset_channel(struct device *dev, int ch)
92{
93	struct talitos_private *priv = dev_get_drvdata(dev);
94	unsigned int timeout = TALITOS_TIMEOUT;
95
96	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
97
98	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
99	       && --timeout)
100		cpu_relax();
101
102	if (timeout == 0) {
103		dev_err(dev, "failed to reset channel %d\n", ch);
104		return -EIO;
105	}
106
107	/* set 36-bit addressing, done writeback enable and done IRQ enable */
108	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
109		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
110
111	/* and ICCR writeback, if available */
112	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
113		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
114		          TALITOS_CCCR_LO_IWSE);
115
116	return 0;
117}
118
119static int reset_device(struct device *dev)
120{
121	struct talitos_private *priv = dev_get_drvdata(dev);
122	unsigned int timeout = TALITOS_TIMEOUT;
123	u32 mcr = TALITOS_MCR_SWR;
124
125	setbits32(priv->reg + TALITOS_MCR, mcr);
126
127	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
128	       && --timeout)
129		cpu_relax();
130
131	if (priv->irq[1]) {
132		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
133		setbits32(priv->reg + TALITOS_MCR, mcr);
134	}
135
136	if (timeout == 0) {
137		dev_err(dev, "failed to reset device\n");
138		return -EIO;
139	}
140
141	return 0;
142}
143
144/*
145 * Reset and initialize the device
146 */
147static int init_device(struct device *dev)
148{
149	struct talitos_private *priv = dev_get_drvdata(dev);
150	int ch, err;
151
152	/*
153	 * Master reset
154	 * errata documentation: warning: certain SEC interrupts
155	 * are not fully cleared by writing the MCR:SWR bit,
156	 * set bit twice to completely reset
157	 */
158	err = reset_device(dev);
159	if (err)
160		return err;
161
162	err = reset_device(dev);
163	if (err)
164		return err;
165
166	/* reset channels */
167	for (ch = 0; ch < priv->num_channels; ch++) {
168		err = reset_channel(dev, ch);
169		if (err)
170			return err;
171	}
172
173	/* enable channel done and error interrupts */
174	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
175	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
176
177	/* disable integrity check error interrupts (use writeback instead) */
178	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
179		setbits32(priv->reg + TALITOS_MDEUICR_LO,
180		          TALITOS_MDEUICR_LO_ICE);
181
182	return 0;
183}
184
185/**
186 * talitos_submit - submits a descriptor to the device for processing
187 * @dev:	the SEC device to be used
188 * @ch:		the SEC device channel to be used
189 * @desc:	the descriptor to be processed by the device
190 * @callback:	whom to call when processing is complete
191 * @context:	a handle for use by caller (optional)
192 *
193 * desc must contain valid dma-mapped (bus physical) address pointers.
194 * callback must check err and feedback in descriptor header
195 * for device processing status.
196 */
197int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
198		   void (*callback)(struct device *dev,
199				    struct talitos_desc *desc,
200				    void *context, int error),
201		   void *context)
202{
203	struct talitos_private *priv = dev_get_drvdata(dev);
204	struct talitos_request *request;
205	unsigned long flags;
206	int head;
207
208	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
209
210	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
211		/* h/w fifo is full */
212		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
213		return -EAGAIN;
214	}
215
216	head = priv->chan[ch].head;
217	request = &priv->chan[ch].fifo[head];
218
219	/* map descriptor and save caller data */
220	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
221					   DMA_BIDIRECTIONAL);
222	request->callback = callback;
223	request->context = context;
224
225	/* increment fifo head */
226	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
227
228	smp_wmb();
229	request->desc = desc;
230
231	/* GO! */
232	wmb();
233	out_be32(priv->chan[ch].reg + TALITOS_FF,
234		 upper_32_bits(request->dma_desc));
235	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
236		 lower_32_bits(request->dma_desc));
237
238	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
239
240	return -EINPROGRESS;
241}
242EXPORT_SYMBOL(talitos_submit);
243
244/*
245 * process what was done, notify callback of error if not
246 */
247static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
248{
249	struct talitos_private *priv = dev_get_drvdata(dev);
250	struct talitos_request *request, saved_req;
251	unsigned long flags;
252	int tail, status;
253
254	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
255
256	tail = priv->chan[ch].tail;
257	while (priv->chan[ch].fifo[tail].desc) {
258		request = &priv->chan[ch].fifo[tail];
259
260		/* descriptors with their done bits set don't get the error */
261		rmb();
262		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
263			status = 0;
264		else
265			if (!error)
266				break;
267			else
268				status = error;
269
270		dma_unmap_single(dev, request->dma_desc,
271				 sizeof(struct talitos_desc),
272				 DMA_BIDIRECTIONAL);
273
274		/* copy entries so we can call callback outside lock */
275		saved_req.desc = request->desc;
276		saved_req.callback = request->callback;
277		saved_req.context = request->context;
278
279		/* release request entry in fifo */
280		smp_wmb();
281		request->desc = NULL;
282
283		/* increment fifo tail */
284		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
285
286		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
287
288		atomic_dec(&priv->chan[ch].submit_count);
289
290		saved_req.callback(dev, saved_req.desc, saved_req.context,
291				   status);
292		/* channel may resume processing in single desc error case */
293		if (error && !reset_ch && status == error)
294			return;
295		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
296		tail = priv->chan[ch].tail;
297	}
298
299	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
300}
301
302/*
303 * process completed requests for channels that have done status
304 */
305#define DEF_TALITOS_DONE(name, ch_done_mask)				\
306static void talitos_done_##name(unsigned long data)			\
307{									\
308	struct device *dev = (struct device *)data;			\
309	struct talitos_private *priv = dev_get_drvdata(dev);		\
310	unsigned long flags;						\
311									\
312	if (ch_done_mask & 1)						\
313		flush_channel(dev, 0, 0, 0);				\
314	if (priv->num_channels == 1)					\
315		goto out;						\
316	if (ch_done_mask & (1 << 2))					\
317		flush_channel(dev, 1, 0, 0);				\
318	if (ch_done_mask & (1 << 4))					\
319		flush_channel(dev, 2, 0, 0);				\
320	if (ch_done_mask & (1 << 6))					\
321		flush_channel(dev, 3, 0, 0);				\
322									\
323out:									\
324	/* At this point, all completed channels have been processed */	\
325	/* Unmask done interrupts for channels completed later on. */	\
326	spin_lock_irqsave(&priv->reg_lock, flags);			\
327	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
328	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
329	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
330}
331DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
332DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
333DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
334
335/*
336 * locate current (offending) descriptor
337 */
338static u32 current_desc_hdr(struct device *dev, int ch)
339{
340	struct talitos_private *priv = dev_get_drvdata(dev);
341	int tail, iter;
342	dma_addr_t cur_desc;
343
344	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
345	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
346
347	if (!cur_desc) {
348		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
349		return 0;
350	}
351
352	tail = priv->chan[ch].tail;
353
354	iter = tail;
355	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
356		iter = (iter + 1) & (priv->fifo_len - 1);
357		if (iter == tail) {
358			dev_err(dev, "couldn't locate current descriptor\n");
359			return 0;
360		}
361	}
362
363	return priv->chan[ch].fifo[iter].desc->hdr;
364}
365
366/*
367 * user diagnostics; report root cause of error based on execution unit status
368 */
369static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
370{
371	struct talitos_private *priv = dev_get_drvdata(dev);
372	int i;
373
374	if (!desc_hdr)
375		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
376
377	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
378	case DESC_HDR_SEL0_AFEU:
379		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
380			in_be32(priv->reg + TALITOS_AFEUISR),
381			in_be32(priv->reg + TALITOS_AFEUISR_LO));
382		break;
383	case DESC_HDR_SEL0_DEU:
384		dev_err(dev, "DEUISR 0x%08x_%08x\n",
385			in_be32(priv->reg + TALITOS_DEUISR),
386			in_be32(priv->reg + TALITOS_DEUISR_LO));
387		break;
388	case DESC_HDR_SEL0_MDEUA:
389	case DESC_HDR_SEL0_MDEUB:
390		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
391			in_be32(priv->reg + TALITOS_MDEUISR),
392			in_be32(priv->reg + TALITOS_MDEUISR_LO));
393		break;
394	case DESC_HDR_SEL0_RNG:
395		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
396			in_be32(priv->reg + TALITOS_RNGUISR),
397			in_be32(priv->reg + TALITOS_RNGUISR_LO));
398		break;
399	case DESC_HDR_SEL0_PKEU:
400		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
401			in_be32(priv->reg + TALITOS_PKEUISR),
402			in_be32(priv->reg + TALITOS_PKEUISR_LO));
403		break;
404	case DESC_HDR_SEL0_AESU:
405		dev_err(dev, "AESUISR 0x%08x_%08x\n",
406			in_be32(priv->reg + TALITOS_AESUISR),
407			in_be32(priv->reg + TALITOS_AESUISR_LO));
408		break;
409	case DESC_HDR_SEL0_CRCU:
410		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
411			in_be32(priv->reg + TALITOS_CRCUISR),
412			in_be32(priv->reg + TALITOS_CRCUISR_LO));
413		break;
414	case DESC_HDR_SEL0_KEU:
415		dev_err(dev, "KEUISR 0x%08x_%08x\n",
416			in_be32(priv->reg + TALITOS_KEUISR),
417			in_be32(priv->reg + TALITOS_KEUISR_LO));
418		break;
419	}
420
421	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
422	case DESC_HDR_SEL1_MDEUA:
423	case DESC_HDR_SEL1_MDEUB:
424		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
425			in_be32(priv->reg + TALITOS_MDEUISR),
426			in_be32(priv->reg + TALITOS_MDEUISR_LO));
427		break;
428	case DESC_HDR_SEL1_CRCU:
429		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
430			in_be32(priv->reg + TALITOS_CRCUISR),
431			in_be32(priv->reg + TALITOS_CRCUISR_LO));
432		break;
433	}
434
435	for (i = 0; i < 8; i++)
436		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
437			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
438			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
439}
440
441/*
442 * recover from error interrupts
443 */
444static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
445{
446	struct talitos_private *priv = dev_get_drvdata(dev);
447	unsigned int timeout = TALITOS_TIMEOUT;
448	int ch, error, reset_dev = 0, reset_ch = 0;
449	u32 v, v_lo;
450
451	for (ch = 0; ch < priv->num_channels; ch++) {
452		/* skip channels without errors */
453		if (!(isr & (1 << (ch * 2 + 1))))
454			continue;
455
456		error = -EINVAL;
457
458		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
459		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
460
461		if (v_lo & TALITOS_CCPSR_LO_DOF) {
462			dev_err(dev, "double fetch fifo overflow error\n");
463			error = -EAGAIN;
464			reset_ch = 1;
465		}
466		if (v_lo & TALITOS_CCPSR_LO_SOF) {
467			/* h/w dropped descriptor */
468			dev_err(dev, "single fetch fifo overflow error\n");
469			error = -EAGAIN;
470		}
471		if (v_lo & TALITOS_CCPSR_LO_MDTE)
472			dev_err(dev, "master data transfer error\n");
473		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
474			dev_err(dev, "s/g data length zero error\n");
475		if (v_lo & TALITOS_CCPSR_LO_FPZ)
476			dev_err(dev, "fetch pointer zero error\n");
477		if (v_lo & TALITOS_CCPSR_LO_IDH)
478			dev_err(dev, "illegal descriptor header error\n");
479		if (v_lo & TALITOS_CCPSR_LO_IEU)
480			dev_err(dev, "invalid execution unit error\n");
481		if (v_lo & TALITOS_CCPSR_LO_EU)
482			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
483		if (v_lo & TALITOS_CCPSR_LO_GB)
484			dev_err(dev, "gather boundary error\n");
485		if (v_lo & TALITOS_CCPSR_LO_GRL)
486			dev_err(dev, "gather return/length error\n");
487		if (v_lo & TALITOS_CCPSR_LO_SB)
488			dev_err(dev, "scatter boundary error\n");
489		if (v_lo & TALITOS_CCPSR_LO_SRL)
490			dev_err(dev, "scatter return/length error\n");
491
492		flush_channel(dev, ch, error, reset_ch);
493
494		if (reset_ch) {
495			reset_channel(dev, ch);
496		} else {
497			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
498				  TALITOS_CCCR_CONT);
499			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
500			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
501			       TALITOS_CCCR_CONT) && --timeout)
502				cpu_relax();
503			if (timeout == 0) {
504				dev_err(dev, "failed to restart channel %d\n",
505					ch);
506				reset_dev = 1;
507			}
508		}
509	}
510	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
511		dev_err(dev, "done overflow, internal time out, or rngu error: "
512		        "ISR 0x%08x_%08x\n", isr, isr_lo);
513
514		/* purge request queues */
515		for (ch = 0; ch < priv->num_channels; ch++)
516			flush_channel(dev, ch, -EIO, 1);
517
518		/* reset and reinitialize the device */
519		init_device(dev);
520	}
521}
522
523#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
524static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
525{									       \
526	struct device *dev = data;					       \
527	struct talitos_private *priv = dev_get_drvdata(dev);		       \
528	u32 isr, isr_lo;						       \
529	unsigned long flags;						       \
530									       \
531	spin_lock_irqsave(&priv->reg_lock, flags);			       \
532	isr = in_be32(priv->reg + TALITOS_ISR);				       \
533	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
534	/* Acknowledge interrupt */					       \
535	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
536	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
537									       \
538	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
539		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
540		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
541	}								       \
542	else {								       \
543		if (likely(isr & ch_done_mask)) {			       \
544			/* mask further done interrupts. */		       \
545			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
546			/* done_task will unmask done interrupts at exit */    \
547			tasklet_schedule(&priv->done_task[tlet]);	       \
548		}							       \
549		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
550	}								       \
551									       \
552	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
553								IRQ_NONE;      \
554}
555DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
556DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
557DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
558
559/*
560 * hwrng
561 */
562static int talitos_rng_data_present(struct hwrng *rng, int wait)
563{
564	struct device *dev = (struct device *)rng->priv;
565	struct talitos_private *priv = dev_get_drvdata(dev);
566	u32 ofl;
567	int i;
568
569	for (i = 0; i < 20; i++) {
570		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
571		      TALITOS_RNGUSR_LO_OFL;
572		if (ofl || !wait)
573			break;
574		udelay(10);
575	}
576
577	return !!ofl;
578}
579
580static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
581{
582	struct device *dev = (struct device *)rng->priv;
583	struct talitos_private *priv = dev_get_drvdata(dev);
584
585	/* rng fifo requires 64-bit accesses */
586	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
587	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
588
589	return sizeof(u32);
590}
591
592static int talitos_rng_init(struct hwrng *rng)
593{
594	struct device *dev = (struct device *)rng->priv;
595	struct talitos_private *priv = dev_get_drvdata(dev);
596	unsigned int timeout = TALITOS_TIMEOUT;
597
598	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
599	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
600	       && --timeout)
601		cpu_relax();
602	if (timeout == 0) {
603		dev_err(dev, "failed to reset rng hw\n");
604		return -ENODEV;
605	}
606
607	/* start generating */
608	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
609
610	return 0;
611}
612
613static int talitos_register_rng(struct device *dev)
614{
615	struct talitos_private *priv = dev_get_drvdata(dev);
616
617	priv->rng.name		= dev_driver_string(dev),
618	priv->rng.init		= talitos_rng_init,
619	priv->rng.data_present	= talitos_rng_data_present,
620	priv->rng.data_read	= talitos_rng_data_read,
621	priv->rng.priv		= (unsigned long)dev;
622
623	return hwrng_register(&priv->rng);
624}
625
626static void talitos_unregister_rng(struct device *dev)
627{
628	struct talitos_private *priv = dev_get_drvdata(dev);
629
630	hwrng_unregister(&priv->rng);
631}
632
633/*
634 * crypto alg
635 */
636#define TALITOS_CRA_PRIORITY		3000
637#define TALITOS_MAX_KEY_SIZE		96
638#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
639
640#define MD5_BLOCK_SIZE    64
641
642struct talitos_ctx {
643	struct device *dev;
644	int ch;
645	__be32 desc_hdr_template;
646	u8 key[TALITOS_MAX_KEY_SIZE];
647	u8 iv[TALITOS_MAX_IV_LENGTH];
648	unsigned int keylen;
649	unsigned int enckeylen;
650	unsigned int authkeylen;
651	unsigned int authsize;
652};
653
654#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
655#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
656
657struct talitos_ahash_req_ctx {
658	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
659	unsigned int hw_context_size;
660	u8 buf[HASH_MAX_BLOCK_SIZE];
661	u8 bufnext[HASH_MAX_BLOCK_SIZE];
662	unsigned int swinit;
663	unsigned int first;
664	unsigned int last;
665	unsigned int to_hash_later;
666	u64 nbuf;
667	struct scatterlist bufsl[2];
668	struct scatterlist *psrc;
669};
670
671static int aead_setauthsize(struct crypto_aead *authenc,
672			    unsigned int authsize)
673{
674	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
675
676	ctx->authsize = authsize;
677
678	return 0;
679}
680
681static int aead_setkey(struct crypto_aead *authenc,
682		       const u8 *key, unsigned int keylen)
683{
684	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
685	struct crypto_authenc_keys keys;
686
687	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
688		goto badkey;
689
690	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
691		goto badkey;
692
693	memcpy(ctx->key, keys.authkey, keys.authkeylen);
694	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
695
696	ctx->keylen = keys.authkeylen + keys.enckeylen;
697	ctx->enckeylen = keys.enckeylen;
698	ctx->authkeylen = keys.authkeylen;
699
700	return 0;
701
702badkey:
703	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
704	return -EINVAL;
705}
706
707/*
708 * talitos_edesc - s/w-extended descriptor
709 * @assoc_nents: number of segments in associated data scatterlist
710 * @src_nents: number of segments in input scatterlist
711 * @dst_nents: number of segments in output scatterlist
712 * @assoc_chained: whether assoc is chained or not
713 * @src_chained: whether src is chained or not
714 * @dst_chained: whether dst is chained or not
715 * @iv_dma: dma address of iv for checking continuity and link table
716 * @dma_len: length of dma mapped link_tbl space
717 * @dma_link_tbl: bus physical address of link_tbl
718 * @desc: h/w descriptor
719 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
720 *
721 * if decrypting (with authcheck), or either one of src_nents or dst_nents
722 * is greater than 1, an integrity check value is concatenated to the end
723 * of link_tbl data
724 */
725struct talitos_edesc {
726	int assoc_nents;
727	int src_nents;
728	int dst_nents;
729	bool assoc_chained;
730	bool src_chained;
731	bool dst_chained;
732	dma_addr_t iv_dma;
733	int dma_len;
734	dma_addr_t dma_link_tbl;
735	struct talitos_desc desc;
736	struct talitos_ptr link_tbl[0];
737};
738
739static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
740			  unsigned int nents, enum dma_data_direction dir,
741			  bool chained)
742{
743	if (unlikely(chained))
744		while (sg) {
745			dma_map_sg(dev, sg, 1, dir);
746			sg = scatterwalk_sg_next(sg);
747		}
748	else
749		dma_map_sg(dev, sg, nents, dir);
750	return nents;
751}
752
753static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
754				   enum dma_data_direction dir)
755{
756	while (sg) {
757		dma_unmap_sg(dev, sg, 1, dir);
758		sg = scatterwalk_sg_next(sg);
759	}
760}
761
762static void talitos_sg_unmap(struct device *dev,
763			     struct talitos_edesc *edesc,
764			     struct scatterlist *src,
765			     struct scatterlist *dst)
766{
767	unsigned int src_nents = edesc->src_nents ? : 1;
768	unsigned int dst_nents = edesc->dst_nents ? : 1;
769
770	if (src != dst) {
771		if (edesc->src_chained)
772			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
773		else
774			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
775
776		if (dst) {
777			if (edesc->dst_chained)
778				talitos_unmap_sg_chain(dev, dst,
779						       DMA_FROM_DEVICE);
780			else
781				dma_unmap_sg(dev, dst, dst_nents,
782					     DMA_FROM_DEVICE);
783		}
784	} else
785		if (edesc->src_chained)
786			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
787		else
788			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
789}
790
791static void ipsec_esp_unmap(struct device *dev,
792			    struct talitos_edesc *edesc,
793			    struct aead_request *areq)
794{
795	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
796	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
797	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
798	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
799
800	if (edesc->assoc_chained)
801		talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
802	else if (areq->assoclen)
803		/* assoc_nents counts also for IV in non-contiguous cases */
804		dma_unmap_sg(dev, areq->assoc,
805			     edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
806			     DMA_TO_DEVICE);
807
808	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
809
810	if (edesc->dma_len)
811		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
812				 DMA_BIDIRECTIONAL);
813}
814
815/*
816 * ipsec_esp descriptor callbacks
817 */
818static void ipsec_esp_encrypt_done(struct device *dev,
819				   struct talitos_desc *desc, void *context,
820				   int err)
821{
822	struct aead_request *areq = context;
823	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
824	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
825	struct talitos_edesc *edesc;
826	struct scatterlist *sg;
827	void *icvdata;
828
829	edesc = container_of(desc, struct talitos_edesc, desc);
830
831	ipsec_esp_unmap(dev, edesc, areq);
832
833	/* copy the generated ICV to dst */
834	if (edesc->dst_nents) {
835		icvdata = &edesc->link_tbl[edesc->src_nents +
836					   edesc->dst_nents + 2 +
837					   edesc->assoc_nents];
838		sg = sg_last(areq->dst, edesc->dst_nents);
839		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
840		       icvdata, ctx->authsize);
841	}
842
843	kfree(edesc);
844
845	aead_request_complete(areq, err);
846}
847
848static void ipsec_esp_decrypt_swauth_done(struct device *dev,
849					  struct talitos_desc *desc,
850					  void *context, int err)
851{
852	struct aead_request *req = context;
853	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
854	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
855	struct talitos_edesc *edesc;
856	struct scatterlist *sg;
857	void *icvdata;
858
859	edesc = container_of(desc, struct talitos_edesc, desc);
860
861	ipsec_esp_unmap(dev, edesc, req);
862
863	if (!err) {
864		/* auth check */
865		if (edesc->dma_len)
866			icvdata = &edesc->link_tbl[edesc->src_nents +
867						   edesc->dst_nents + 2 +
868						   edesc->assoc_nents];
869		else
870			icvdata = &edesc->link_tbl[0];
871
872		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
873		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
874			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
875	}
876
877	kfree(edesc);
878
879	aead_request_complete(req, err);
880}
881
882static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
883					  struct talitos_desc *desc,
884					  void *context, int err)
885{
886	struct aead_request *req = context;
887	struct talitos_edesc *edesc;
888
889	edesc = container_of(desc, struct talitos_edesc, desc);
890
891	ipsec_esp_unmap(dev, edesc, req);
892
893	/* check ICV auth status */
894	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
895		     DESC_HDR_LO_ICCR1_PASS))
896		err = -EBADMSG;
897
898	kfree(edesc);
899
900	aead_request_complete(req, err);
901}
902
903/*
904 * convert scatterlist to SEC h/w link table format
905 * stop at cryptlen bytes
906 */
907static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
908			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
909{
910	int n_sg = sg_count;
911
912	while (n_sg--) {
913		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
914		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
915		link_tbl_ptr->j_extent = 0;
916		link_tbl_ptr++;
917		cryptlen -= sg_dma_len(sg);
918		sg = scatterwalk_sg_next(sg);
919	}
920
921	/* adjust (decrease) last one (or two) entry's len to cryptlen */
922	link_tbl_ptr--;
923	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
924		/* Empty this entry, and move to previous one */
925		cryptlen += be16_to_cpu(link_tbl_ptr->len);
926		link_tbl_ptr->len = 0;
927		sg_count--;
928		link_tbl_ptr--;
929	}
930	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
931
932	/* tag end of link table */
933	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
934
935	return sg_count;
936}
937
938/*
939 * fill in and submit ipsec_esp descriptor
940 */
941static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
942		     u64 seq, void (*callback) (struct device *dev,
943						struct talitos_desc *desc,
944						void *context, int error))
945{
946	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
947	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
948	struct device *dev = ctx->dev;
949	struct talitos_desc *desc = &edesc->desc;
950	unsigned int cryptlen = areq->cryptlen;
951	unsigned int authsize = ctx->authsize;
952	unsigned int ivsize = crypto_aead_ivsize(aead);
953	int sg_count, ret;
954	int sg_link_tbl_len;
955
956	/* hmac key */
957	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
958			       0, DMA_TO_DEVICE);
959
960	/* hmac data */
961	desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
962	if (edesc->assoc_nents) {
963		int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
964		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
965
966		to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
967			       sizeof(struct talitos_ptr));
968		desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
969
970		/* assoc_nents - 1 entries for assoc, 1 for IV */
971		sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
972					  areq->assoclen, tbl_ptr);
973
974		/* add IV to link table */
975		tbl_ptr += sg_count - 1;
976		tbl_ptr->j_extent = 0;
977		tbl_ptr++;
978		to_talitos_ptr(tbl_ptr, edesc->iv_dma);
979		tbl_ptr->len = cpu_to_be16(ivsize);
980		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
981
982		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
983					   edesc->dma_len, DMA_BIDIRECTIONAL);
984	} else {
985		if (areq->assoclen)
986			to_talitos_ptr(&desc->ptr[1],
987				       sg_dma_address(areq->assoc));
988		else
989			to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
990		desc->ptr[1].j_extent = 0;
991	}
992
993	/* cipher iv */
994	to_talitos_ptr(&desc->ptr[2], edesc->iv_dma);
995	desc->ptr[2].len = cpu_to_be16(ivsize);
996	desc->ptr[2].j_extent = 0;
997	/* Sync needed for the aead_givencrypt case */
998	dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
999
1000	/* cipher key */
1001	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1002			       (char *)&ctx->key + ctx->authkeylen, 0,
1003			       DMA_TO_DEVICE);
1004
1005	/*
1006	 * cipher in
1007	 * map and adjust cipher len to aead request cryptlen.
1008	 * extent is bytes of HMAC postpended to ciphertext,
1009	 * typically 12 for ipsec
1010	 */
1011	desc->ptr[4].len = cpu_to_be16(cryptlen);
1012	desc->ptr[4].j_extent = authsize;
1013
1014	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1015				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1016							   : DMA_TO_DEVICE,
1017				  edesc->src_chained);
1018
1019	if (sg_count == 1) {
1020		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1021	} else {
1022		sg_link_tbl_len = cryptlen;
1023
1024		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1025			sg_link_tbl_len = cryptlen + authsize;
1026
1027		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1028					  &edesc->link_tbl[0]);
1029		if (sg_count > 1) {
1030			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1031			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1032			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1033						   edesc->dma_len,
1034						   DMA_BIDIRECTIONAL);
1035		} else {
1036			/* Only one segment now, so no link tbl needed */
1037			to_talitos_ptr(&desc->ptr[4],
1038				       sg_dma_address(areq->src));
1039		}
1040	}
1041
1042	/* cipher out */
1043	desc->ptr[5].len = cpu_to_be16(cryptlen);
1044	desc->ptr[5].j_extent = authsize;
1045
1046	if (areq->src != areq->dst)
1047		sg_count = talitos_map_sg(dev, areq->dst,
1048					  edesc->dst_nents ? : 1,
1049					  DMA_FROM_DEVICE, edesc->dst_chained);
1050
1051	if (sg_count == 1) {
1052		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1053	} else {
1054		int tbl_off = edesc->src_nents + 1;
1055		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1056
1057		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1058			       tbl_off * sizeof(struct talitos_ptr));
1059		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1060					  tbl_ptr);
1061
1062		/* Add an entry to the link table for ICV data */
1063		tbl_ptr += sg_count - 1;
1064		tbl_ptr->j_extent = 0;
1065		tbl_ptr++;
1066		tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1067		tbl_ptr->len = cpu_to_be16(authsize);
1068
1069		/* icv data follows link tables */
1070		to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1071			       (tbl_off + edesc->dst_nents + 1 +
1072				edesc->assoc_nents) *
1073			       sizeof(struct talitos_ptr));
1074		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1075		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1076					   edesc->dma_len, DMA_BIDIRECTIONAL);
1077	}
1078
1079	/* iv out */
1080	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1081			       DMA_FROM_DEVICE);
1082
1083	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1084	if (ret != -EINPROGRESS) {
1085		ipsec_esp_unmap(dev, edesc, areq);
1086		kfree(edesc);
1087	}
1088	return ret;
1089}
1090
1091/*
1092 * derive number of elements in scatterlist
1093 */
1094static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
1095{
1096	struct scatterlist *sg = sg_list;
1097	int sg_nents = 0;
1098
1099	*chained = false;
1100	while (nbytes > 0) {
1101		sg_nents++;
1102		nbytes -= sg->length;
1103		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1104			*chained = true;
1105		sg = scatterwalk_sg_next(sg);
1106	}
1107
1108	return sg_nents;
1109}
1110
1111/*
1112 * allocate and map the extended descriptor
1113 */
1114static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1115						 struct scatterlist *assoc,
1116						 struct scatterlist *src,
1117						 struct scatterlist *dst,
1118						 u8 *iv,
1119						 unsigned int assoclen,
1120						 unsigned int cryptlen,
1121						 unsigned int authsize,
1122						 unsigned int ivsize,
1123						 int icv_stashing,
1124						 u32 cryptoflags,
1125						 bool encrypt)
1126{
1127	struct talitos_edesc *edesc;
1128	int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1129	bool assoc_chained = false, src_chained = false, dst_chained = false;
1130	dma_addr_t iv_dma = 0;
1131	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1132		      GFP_ATOMIC;
1133
1134	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1135		dev_err(dev, "length exceeds h/w max limit\n");
1136		return ERR_PTR(-EINVAL);
1137	}
1138
1139	if (ivsize)
1140		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1141
1142	if (assoclen) {
1143		/*
1144		 * Currently it is assumed that iv is provided whenever assoc
1145		 * is.
1146		 */
1147		BUG_ON(!iv);
1148
1149		assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1150		talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1151			       assoc_chained);
1152		assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1153
1154		if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1155			assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1156	}
1157
1158	if (!dst || dst == src) {
1159		src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1160		src_nents = (src_nents == 1) ? 0 : src_nents;
1161		dst_nents = dst ? src_nents : 0;
1162	} else { /* dst && dst != src*/
1163		src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1164				     &src_chained);
1165		src_nents = (src_nents == 1) ? 0 : src_nents;
1166		dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1167				     &dst_chained);
1168		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1169	}
1170
1171	/*
1172	 * allocate space for base edesc plus the link tables,
1173	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1174	 * and the ICV data itself
1175	 */
1176	alloc_len = sizeof(struct talitos_edesc);
1177	if (assoc_nents || src_nents || dst_nents) {
1178		dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1179			  sizeof(struct talitos_ptr) + authsize;
1180		alloc_len += dma_len;
1181	} else {
1182		dma_len = 0;
1183		alloc_len += icv_stashing ? authsize : 0;
1184	}
1185
1186	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1187	if (!edesc) {
1188		if (assoc_chained)
1189			talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1190		else if (assoclen)
1191			dma_unmap_sg(dev, assoc,
1192				     assoc_nents ? assoc_nents - 1 : 1,
1193				     DMA_TO_DEVICE);
1194
1195		if (iv_dma)
1196			dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1197
1198		dev_err(dev, "could not allocate edescriptor\n");
1199		return ERR_PTR(-ENOMEM);
1200	}
1201
1202	edesc->assoc_nents = assoc_nents;
1203	edesc->src_nents = src_nents;
1204	edesc->dst_nents = dst_nents;
1205	edesc->assoc_chained = assoc_chained;
1206	edesc->src_chained = src_chained;
1207	edesc->dst_chained = dst_chained;
1208	edesc->iv_dma = iv_dma;
1209	edesc->dma_len = dma_len;
1210	if (dma_len)
1211		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1212						     edesc->dma_len,
1213						     DMA_BIDIRECTIONAL);
1214
1215	return edesc;
1216}
1217
1218static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1219					      int icv_stashing, bool encrypt)
1220{
1221	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1222	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1223	unsigned int ivsize = crypto_aead_ivsize(authenc);
1224
1225	return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1226				   iv, areq->assoclen, areq->cryptlen,
1227				   ctx->authsize, ivsize, icv_stashing,
1228				   areq->base.flags, encrypt);
1229}
1230
1231static int aead_encrypt(struct aead_request *req)
1232{
1233	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1234	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1235	struct talitos_edesc *edesc;
1236
1237	/* allocate extended descriptor */
1238	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1239	if (IS_ERR(edesc))
1240		return PTR_ERR(edesc);
1241
1242	/* set encrypt */
1243	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1244
1245	return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
1246}
1247
1248static int aead_decrypt(struct aead_request *req)
1249{
1250	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1251	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1252	unsigned int authsize = ctx->authsize;
1253	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1254	struct talitos_edesc *edesc;
1255	struct scatterlist *sg;
1256	void *icvdata;
1257
1258	req->cryptlen -= authsize;
1259
1260	/* allocate extended descriptor */
1261	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1262	if (IS_ERR(edesc))
1263		return PTR_ERR(edesc);
1264
1265	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1266	    ((!edesc->src_nents && !edesc->dst_nents) ||
1267	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1268
1269		/* decrypt and check the ICV */
1270		edesc->desc.hdr = ctx->desc_hdr_template |
1271				  DESC_HDR_DIR_INBOUND |
1272				  DESC_HDR_MODE1_MDEU_CICV;
1273
1274		/* reset integrity check result bits */
1275		edesc->desc.hdr_lo = 0;
1276
1277		return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
1278	}
1279
1280	/* Have to check the ICV with software */
1281	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1282
1283	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1284	if (edesc->dma_len)
1285		icvdata = &edesc->link_tbl[edesc->src_nents +
1286					   edesc->dst_nents + 2 +
1287					   edesc->assoc_nents];
1288	else
1289		icvdata = &edesc->link_tbl[0];
1290
1291	sg = sg_last(req->src, edesc->src_nents ? : 1);
1292
1293	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1294	       ctx->authsize);
1295
1296	return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
1297}
1298
1299static int aead_givencrypt(struct aead_givcrypt_request *req)
1300{
1301	struct aead_request *areq = &req->areq;
1302	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1303	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1304	struct talitos_edesc *edesc;
1305
1306	/* allocate extended descriptor */
1307	edesc = aead_edesc_alloc(areq, req->giv, 0, true);
1308	if (IS_ERR(edesc))
1309		return PTR_ERR(edesc);
1310
1311	/* set encrypt */
1312	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1313
1314	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1315	/* avoid consecutive packets going out with same IV */
1316	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1317
1318	return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
1319}
1320
1321static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1322			     const u8 *key, unsigned int keylen)
1323{
1324	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1325
1326	memcpy(&ctx->key, key, keylen);
1327	ctx->keylen = keylen;
1328
1329	return 0;
1330}
1331
1332static void common_nonsnoop_unmap(struct device *dev,
1333				  struct talitos_edesc *edesc,
1334				  struct ablkcipher_request *areq)
1335{
1336	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1337	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1338	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1339
1340	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1341
1342	if (edesc->dma_len)
1343		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1344				 DMA_BIDIRECTIONAL);
1345}
1346
1347static void ablkcipher_done(struct device *dev,
1348			    struct talitos_desc *desc, void *context,
1349			    int err)
1350{
1351	struct ablkcipher_request *areq = context;
1352	struct talitos_edesc *edesc;
1353
1354	edesc = container_of(desc, struct talitos_edesc, desc);
1355
1356	common_nonsnoop_unmap(dev, edesc, areq);
1357
1358	kfree(edesc);
1359
1360	areq->base.complete(&areq->base, err);
1361}
1362
1363static int common_nonsnoop(struct talitos_edesc *edesc,
1364			   struct ablkcipher_request *areq,
1365			   void (*callback) (struct device *dev,
1366					     struct talitos_desc *desc,
1367					     void *context, int error))
1368{
1369	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1370	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1371	struct device *dev = ctx->dev;
1372	struct talitos_desc *desc = &edesc->desc;
1373	unsigned int cryptlen = areq->nbytes;
1374	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1375	int sg_count, ret;
1376
1377	/* first DWORD empty */
1378	desc->ptr[0].len = 0;
1379	to_talitos_ptr(&desc->ptr[0], 0);
1380	desc->ptr[0].j_extent = 0;
1381
1382	/* cipher iv */
1383	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma);
1384	desc->ptr[1].len = cpu_to_be16(ivsize);
1385	desc->ptr[1].j_extent = 0;
1386
1387	/* cipher key */
1388	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1389			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1390
1391	/*
1392	 * cipher in
1393	 */
1394	desc->ptr[3].len = cpu_to_be16(cryptlen);
1395	desc->ptr[3].j_extent = 0;
1396
1397	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1398				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1399							   : DMA_TO_DEVICE,
1400				  edesc->src_chained);
1401
1402	if (sg_count == 1) {
1403		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1404	} else {
1405		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1406					  &edesc->link_tbl[0]);
1407		if (sg_count > 1) {
1408			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1409			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1410			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1411						   edesc->dma_len,
1412						   DMA_BIDIRECTIONAL);
1413		} else {
1414			/* Only one segment now, so no link tbl needed */
1415			to_talitos_ptr(&desc->ptr[3],
1416				       sg_dma_address(areq->src));
1417		}
1418	}
1419
1420	/* cipher out */
1421	desc->ptr[4].len = cpu_to_be16(cryptlen);
1422	desc->ptr[4].j_extent = 0;
1423
1424	if (areq->src != areq->dst)
1425		sg_count = talitos_map_sg(dev, areq->dst,
1426					  edesc->dst_nents ? : 1,
1427					  DMA_FROM_DEVICE, edesc->dst_chained);
1428
1429	if (sg_count == 1) {
1430		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1431	} else {
1432		struct talitos_ptr *link_tbl_ptr =
1433			&edesc->link_tbl[edesc->src_nents + 1];
1434
1435		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1436					      (edesc->src_nents + 1) *
1437					      sizeof(struct talitos_ptr));
1438		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1439		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1440					  link_tbl_ptr);
1441		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1442					   edesc->dma_len, DMA_BIDIRECTIONAL);
1443	}
1444
1445	/* iv out */
1446	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1447			       DMA_FROM_DEVICE);
1448
1449	/* last DWORD empty */
1450	desc->ptr[6].len = 0;
1451	to_talitos_ptr(&desc->ptr[6], 0);
1452	desc->ptr[6].j_extent = 0;
1453
1454	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1455	if (ret != -EINPROGRESS) {
1456		common_nonsnoop_unmap(dev, edesc, areq);
1457		kfree(edesc);
1458	}
1459	return ret;
1460}
1461
1462static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1463						    areq, bool encrypt)
1464{
1465	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1466	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1467	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1468
1469	return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1470				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1471				   areq->base.flags, encrypt);
1472}
1473
1474static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1475{
1476	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1477	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1478	struct talitos_edesc *edesc;
1479
1480	/* allocate extended descriptor */
1481	edesc = ablkcipher_edesc_alloc(areq, true);
1482	if (IS_ERR(edesc))
1483		return PTR_ERR(edesc);
1484
1485	/* set encrypt */
1486	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1487
1488	return common_nonsnoop(edesc, areq, ablkcipher_done);
1489}
1490
1491static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1492{
1493	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1494	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1495	struct talitos_edesc *edesc;
1496
1497	/* allocate extended descriptor */
1498	edesc = ablkcipher_edesc_alloc(areq, false);
1499	if (IS_ERR(edesc))
1500		return PTR_ERR(edesc);
1501
1502	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1503
1504	return common_nonsnoop(edesc, areq, ablkcipher_done);
1505}
1506
1507static void common_nonsnoop_hash_unmap(struct device *dev,
1508				       struct talitos_edesc *edesc,
1509				       struct ahash_request *areq)
1510{
1511	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1512
1513	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1514
1515	/* When using hashctx-in, must unmap it. */
1516	if (edesc->desc.ptr[1].len)
1517		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1518					 DMA_TO_DEVICE);
1519
1520	if (edesc->desc.ptr[2].len)
1521		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1522					 DMA_TO_DEVICE);
1523
1524	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1525
1526	if (edesc->dma_len)
1527		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1528				 DMA_BIDIRECTIONAL);
1529
1530}
1531
1532static void ahash_done(struct device *dev,
1533		       struct talitos_desc *desc, void *context,
1534		       int err)
1535{
1536	struct ahash_request *areq = context;
1537	struct talitos_edesc *edesc =
1538		 container_of(desc, struct talitos_edesc, desc);
1539	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1540
1541	if (!req_ctx->last && req_ctx->to_hash_later) {
1542		/* Position any partial block for next update/final/finup */
1543		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1544		req_ctx->nbuf = req_ctx->to_hash_later;
1545	}
1546	common_nonsnoop_hash_unmap(dev, edesc, areq);
1547
1548	kfree(edesc);
1549
1550	areq->base.complete(&areq->base, err);
1551}
1552
1553static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1554				struct ahash_request *areq, unsigned int length,
1555				void (*callback) (struct device *dev,
1556						  struct talitos_desc *desc,
1557						  void *context, int error))
1558{
1559	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1560	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1561	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1562	struct device *dev = ctx->dev;
1563	struct talitos_desc *desc = &edesc->desc;
1564	int sg_count, ret;
1565
1566	/* first DWORD empty */
1567	desc->ptr[0] = zero_entry;
1568
1569	/* hash context in */
1570	if (!req_ctx->first || req_ctx->swinit) {
1571		map_single_talitos_ptr(dev, &desc->ptr[1],
1572				       req_ctx->hw_context_size,
1573				       (char *)req_ctx->hw_context, 0,
1574				       DMA_TO_DEVICE);
1575		req_ctx->swinit = 0;
1576	} else {
1577		desc->ptr[1] = zero_entry;
1578		/* Indicate next op is not the first. */
1579		req_ctx->first = 0;
1580	}
1581
1582	/* HMAC key */
1583	if (ctx->keylen)
1584		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1585				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1586	else
1587		desc->ptr[2] = zero_entry;
1588
1589	/*
1590	 * data in
1591	 */
1592	desc->ptr[3].len = cpu_to_be16(length);
1593	desc->ptr[3].j_extent = 0;
1594
1595	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1596				  edesc->src_nents ? : 1,
1597				  DMA_TO_DEVICE, edesc->src_chained);
1598
1599	if (sg_count == 1) {
1600		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1601	} else {
1602		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1603					  &edesc->link_tbl[0]);
1604		if (sg_count > 1) {
1605			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1606			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1607			dma_sync_single_for_device(ctx->dev,
1608						   edesc->dma_link_tbl,
1609						   edesc->dma_len,
1610						   DMA_BIDIRECTIONAL);
1611		} else {
1612			/* Only one segment now, so no link tbl needed */
1613			to_talitos_ptr(&desc->ptr[3],
1614				       sg_dma_address(req_ctx->psrc));
1615		}
1616	}
1617
1618	/* fifth DWORD empty */
1619	desc->ptr[4] = zero_entry;
1620
1621	/* hash/HMAC out -or- hash context out */
1622	if (req_ctx->last)
1623		map_single_talitos_ptr(dev, &desc->ptr[5],
1624				       crypto_ahash_digestsize(tfm),
1625				       areq->result, 0, DMA_FROM_DEVICE);
1626	else
1627		map_single_talitos_ptr(dev, &desc->ptr[5],
1628				       req_ctx->hw_context_size,
1629				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
1630
1631	/* last DWORD empty */
1632	desc->ptr[6] = zero_entry;
1633
1634	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1635	if (ret != -EINPROGRESS) {
1636		common_nonsnoop_hash_unmap(dev, edesc, areq);
1637		kfree(edesc);
1638	}
1639	return ret;
1640}
1641
1642static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1643					       unsigned int nbytes)
1644{
1645	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1646	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1647	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1648
1649	return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
1650				   nbytes, 0, 0, 0, areq->base.flags, false);
1651}
1652
1653static int ahash_init(struct ahash_request *areq)
1654{
1655	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1656	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1657
1658	/* Initialize the context */
1659	req_ctx->nbuf = 0;
1660	req_ctx->first = 1; /* first indicates h/w must init its context */
1661	req_ctx->swinit = 0; /* assume h/w init of context */
1662	req_ctx->hw_context_size =
1663		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1664			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1665			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1666
1667	return 0;
1668}
1669
1670/*
1671 * on h/w without explicit sha224 support, we initialize h/w context
1672 * manually with sha224 constants, and tell it to run sha256.
1673 */
1674static int ahash_init_sha224_swinit(struct ahash_request *areq)
1675{
1676	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1677
1678	ahash_init(areq);
1679	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1680
1681	req_ctx->hw_context[0] = SHA224_H0;
1682	req_ctx->hw_context[1] = SHA224_H1;
1683	req_ctx->hw_context[2] = SHA224_H2;
1684	req_ctx->hw_context[3] = SHA224_H3;
1685	req_ctx->hw_context[4] = SHA224_H4;
1686	req_ctx->hw_context[5] = SHA224_H5;
1687	req_ctx->hw_context[6] = SHA224_H6;
1688	req_ctx->hw_context[7] = SHA224_H7;
1689
1690	/* init 64-bit count */
1691	req_ctx->hw_context[8] = 0;
1692	req_ctx->hw_context[9] = 0;
1693
1694	return 0;
1695}
1696
1697static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1698{
1699	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1700	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1701	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1702	struct talitos_edesc *edesc;
1703	unsigned int blocksize =
1704			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1705	unsigned int nbytes_to_hash;
1706	unsigned int to_hash_later;
1707	unsigned int nsg;
1708	bool chained;
1709
1710	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1711		/* Buffer up to one whole block */
1712		sg_copy_to_buffer(areq->src,
1713				  sg_count(areq->src, nbytes, &chained),
1714				  req_ctx->buf + req_ctx->nbuf, nbytes);
1715		req_ctx->nbuf += nbytes;
1716		return 0;
1717	}
1718
1719	/* At least (blocksize + 1) bytes are available to hash */
1720	nbytes_to_hash = nbytes + req_ctx->nbuf;
1721	to_hash_later = nbytes_to_hash & (blocksize - 1);
1722
1723	if (req_ctx->last)
1724		to_hash_later = 0;
1725	else if (to_hash_later)
1726		/* There is a partial block. Hash the full block(s) now */
1727		nbytes_to_hash -= to_hash_later;
1728	else {
1729		/* Keep one block buffered */
1730		nbytes_to_hash -= blocksize;
1731		to_hash_later = blocksize;
1732	}
1733
1734	/* Chain in any previously buffered data */
1735	if (req_ctx->nbuf) {
1736		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1737		sg_init_table(req_ctx->bufsl, nsg);
1738		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1739		if (nsg > 1)
1740			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1741		req_ctx->psrc = req_ctx->bufsl;
1742	} else
1743		req_ctx->psrc = areq->src;
1744
1745	if (to_hash_later) {
1746		int nents = sg_count(areq->src, nbytes, &chained);
1747		sg_pcopy_to_buffer(areq->src, nents,
1748				      req_ctx->bufnext,
1749				      to_hash_later,
1750				      nbytes - to_hash_later);
1751	}
1752	req_ctx->to_hash_later = to_hash_later;
1753
1754	/* Allocate extended descriptor */
1755	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1756	if (IS_ERR(edesc))
1757		return PTR_ERR(edesc);
1758
1759	edesc->desc.hdr = ctx->desc_hdr_template;
1760
1761	/* On last one, request SEC to pad; otherwise continue */
1762	if (req_ctx->last)
1763		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1764	else
1765		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1766
1767	/* request SEC to INIT hash. */
1768	if (req_ctx->first && !req_ctx->swinit)
1769		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1770
1771	/* When the tfm context has a keylen, it's an HMAC.
1772	 * A first or last (ie. not middle) descriptor must request HMAC.
1773	 */
1774	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1775		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1776
1777	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1778				    ahash_done);
1779}
1780
1781static int ahash_update(struct ahash_request *areq)
1782{
1783	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1784
1785	req_ctx->last = 0;
1786
1787	return ahash_process_req(areq, areq->nbytes);
1788}
1789
1790static int ahash_final(struct ahash_request *areq)
1791{
1792	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1793
1794	req_ctx->last = 1;
1795
1796	return ahash_process_req(areq, 0);
1797}
1798
1799static int ahash_finup(struct ahash_request *areq)
1800{
1801	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1802
1803	req_ctx->last = 1;
1804
1805	return ahash_process_req(areq, areq->nbytes);
1806}
1807
1808static int ahash_digest(struct ahash_request *areq)
1809{
1810	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1811	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1812
1813	ahash->init(areq);
1814	req_ctx->last = 1;
1815
1816	return ahash_process_req(areq, areq->nbytes);
1817}
1818
1819struct keyhash_result {
1820	struct completion completion;
1821	int err;
1822};
1823
1824static void keyhash_complete(struct crypto_async_request *req, int err)
1825{
1826	struct keyhash_result *res = req->data;
1827
1828	if (err == -EINPROGRESS)
1829		return;
1830
1831	res->err = err;
1832	complete(&res->completion);
1833}
1834
1835static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1836		   u8 *hash)
1837{
1838	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1839
1840	struct scatterlist sg[1];
1841	struct ahash_request *req;
1842	struct keyhash_result hresult;
1843	int ret;
1844
1845	init_completion(&hresult.completion);
1846
1847	req = ahash_request_alloc(tfm, GFP_KERNEL);
1848	if (!req)
1849		return -ENOMEM;
1850
1851	/* Keep tfm keylen == 0 during hash of the long key */
1852	ctx->keylen = 0;
1853	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1854				   keyhash_complete, &hresult);
1855
1856	sg_init_one(&sg[0], key, keylen);
1857
1858	ahash_request_set_crypt(req, sg, hash, keylen);
1859	ret = crypto_ahash_digest(req);
1860	switch (ret) {
1861	case 0:
1862		break;
1863	case -EINPROGRESS:
1864	case -EBUSY:
1865		ret = wait_for_completion_interruptible(
1866			&hresult.completion);
1867		if (!ret)
1868			ret = hresult.err;
1869		break;
1870	default:
1871		break;
1872	}
1873	ahash_request_free(req);
1874
1875	return ret;
1876}
1877
1878static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1879			unsigned int keylen)
1880{
1881	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1882	unsigned int blocksize =
1883			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1884	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1885	unsigned int keysize = keylen;
1886	u8 hash[SHA512_DIGEST_SIZE];
1887	int ret;
1888
1889	if (keylen <= blocksize)
1890		memcpy(ctx->key, key, keysize);
1891	else {
1892		/* Must get the hash of the long key */
1893		ret = keyhash(tfm, key, keylen, hash);
1894
1895		if (ret) {
1896			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1897			return -EINVAL;
1898		}
1899
1900		keysize = digestsize;
1901		memcpy(ctx->key, hash, digestsize);
1902	}
1903
1904	ctx->keylen = keysize;
1905
1906	return 0;
1907}
1908
1909
1910struct talitos_alg_template {
1911	u32 type;
1912	union {
1913		struct crypto_alg crypto;
1914		struct ahash_alg hash;
1915	} alg;
1916	__be32 desc_hdr_template;
1917};
1918
1919static struct talitos_alg_template driver_algs[] = {
1920	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
1921	{	.type = CRYPTO_ALG_TYPE_AEAD,
1922		.alg.crypto = {
1923			.cra_name = "authenc(hmac(sha1),cbc(aes))",
1924			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1925			.cra_blocksize = AES_BLOCK_SIZE,
1926			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1927			.cra_aead = {
1928				.ivsize = AES_BLOCK_SIZE,
1929				.maxauthsize = SHA1_DIGEST_SIZE,
1930			}
1931		},
1932		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1933			             DESC_HDR_SEL0_AESU |
1934		                     DESC_HDR_MODE0_AESU_CBC |
1935		                     DESC_HDR_SEL1_MDEUA |
1936		                     DESC_HDR_MODE1_MDEU_INIT |
1937		                     DESC_HDR_MODE1_MDEU_PAD |
1938		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1939	},
1940	{	.type = CRYPTO_ALG_TYPE_AEAD,
1941		.alg.crypto = {
1942			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1943			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1944			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1945			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1946			.cra_aead = {
1947				.ivsize = DES3_EDE_BLOCK_SIZE,
1948				.maxauthsize = SHA1_DIGEST_SIZE,
1949			}
1950		},
1951		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1952			             DESC_HDR_SEL0_DEU |
1953		                     DESC_HDR_MODE0_DEU_CBC |
1954		                     DESC_HDR_MODE0_DEU_3DES |
1955		                     DESC_HDR_SEL1_MDEUA |
1956		                     DESC_HDR_MODE1_MDEU_INIT |
1957		                     DESC_HDR_MODE1_MDEU_PAD |
1958		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
1959	},
1960	{       .type = CRYPTO_ALG_TYPE_AEAD,
1961		.alg.crypto = {
1962			.cra_name = "authenc(hmac(sha224),cbc(aes))",
1963			.cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
1964			.cra_blocksize = AES_BLOCK_SIZE,
1965			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1966			.cra_aead = {
1967				.ivsize = AES_BLOCK_SIZE,
1968				.maxauthsize = SHA224_DIGEST_SIZE,
1969			}
1970		},
1971		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1972				     DESC_HDR_SEL0_AESU |
1973				     DESC_HDR_MODE0_AESU_CBC |
1974				     DESC_HDR_SEL1_MDEUA |
1975				     DESC_HDR_MODE1_MDEU_INIT |
1976				     DESC_HDR_MODE1_MDEU_PAD |
1977				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1978	},
1979	{	.type = CRYPTO_ALG_TYPE_AEAD,
1980		.alg.crypto = {
1981			.cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
1982			.cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
1983			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1984			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
1985			.cra_aead = {
1986				.ivsize = DES3_EDE_BLOCK_SIZE,
1987				.maxauthsize = SHA224_DIGEST_SIZE,
1988			}
1989		},
1990		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1991			             DESC_HDR_SEL0_DEU |
1992		                     DESC_HDR_MODE0_DEU_CBC |
1993		                     DESC_HDR_MODE0_DEU_3DES |
1994		                     DESC_HDR_SEL1_MDEUA |
1995		                     DESC_HDR_MODE1_MDEU_INIT |
1996		                     DESC_HDR_MODE1_MDEU_PAD |
1997		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
1998	},
1999	{	.type = CRYPTO_ALG_TYPE_AEAD,
2000		.alg.crypto = {
2001			.cra_name = "authenc(hmac(sha256),cbc(aes))",
2002			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2003			.cra_blocksize = AES_BLOCK_SIZE,
2004			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2005			.cra_aead = {
2006				.ivsize = AES_BLOCK_SIZE,
2007				.maxauthsize = SHA256_DIGEST_SIZE,
2008			}
2009		},
2010		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2011			             DESC_HDR_SEL0_AESU |
2012		                     DESC_HDR_MODE0_AESU_CBC |
2013		                     DESC_HDR_SEL1_MDEUA |
2014		                     DESC_HDR_MODE1_MDEU_INIT |
2015		                     DESC_HDR_MODE1_MDEU_PAD |
2016		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2017	},
2018	{	.type = CRYPTO_ALG_TYPE_AEAD,
2019		.alg.crypto = {
2020			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2021			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2022			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2023			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2024			.cra_aead = {
2025				.ivsize = DES3_EDE_BLOCK_SIZE,
2026				.maxauthsize = SHA256_DIGEST_SIZE,
2027			}
2028		},
2029		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2030			             DESC_HDR_SEL0_DEU |
2031		                     DESC_HDR_MODE0_DEU_CBC |
2032		                     DESC_HDR_MODE0_DEU_3DES |
2033		                     DESC_HDR_SEL1_MDEUA |
2034		                     DESC_HDR_MODE1_MDEU_INIT |
2035		                     DESC_HDR_MODE1_MDEU_PAD |
2036		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2037	},
2038	{	.type = CRYPTO_ALG_TYPE_AEAD,
2039		.alg.crypto = {
2040			.cra_name = "authenc(hmac(sha384),cbc(aes))",
2041			.cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2042			.cra_blocksize = AES_BLOCK_SIZE,
2043			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2044			.cra_aead = {
2045				.ivsize = AES_BLOCK_SIZE,
2046				.maxauthsize = SHA384_DIGEST_SIZE,
2047			}
2048		},
2049		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2050			             DESC_HDR_SEL0_AESU |
2051		                     DESC_HDR_MODE0_AESU_CBC |
2052		                     DESC_HDR_SEL1_MDEUB |
2053		                     DESC_HDR_MODE1_MDEU_INIT |
2054		                     DESC_HDR_MODE1_MDEU_PAD |
2055		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2056	},
2057	{	.type = CRYPTO_ALG_TYPE_AEAD,
2058		.alg.crypto = {
2059			.cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2060			.cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2061			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2062			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2063			.cra_aead = {
2064				.ivsize = DES3_EDE_BLOCK_SIZE,
2065				.maxauthsize = SHA384_DIGEST_SIZE,
2066			}
2067		},
2068		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2069			             DESC_HDR_SEL0_DEU |
2070		                     DESC_HDR_MODE0_DEU_CBC |
2071		                     DESC_HDR_MODE0_DEU_3DES |
2072		                     DESC_HDR_SEL1_MDEUB |
2073		                     DESC_HDR_MODE1_MDEU_INIT |
2074		                     DESC_HDR_MODE1_MDEU_PAD |
2075		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2076	},
2077	{	.type = CRYPTO_ALG_TYPE_AEAD,
2078		.alg.crypto = {
2079			.cra_name = "authenc(hmac(sha512),cbc(aes))",
2080			.cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2081			.cra_blocksize = AES_BLOCK_SIZE,
2082			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2083			.cra_aead = {
2084				.ivsize = AES_BLOCK_SIZE,
2085				.maxauthsize = SHA512_DIGEST_SIZE,
2086			}
2087		},
2088		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2089			             DESC_HDR_SEL0_AESU |
2090		                     DESC_HDR_MODE0_AESU_CBC |
2091		                     DESC_HDR_SEL1_MDEUB |
2092		                     DESC_HDR_MODE1_MDEU_INIT |
2093		                     DESC_HDR_MODE1_MDEU_PAD |
2094		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2095	},
2096	{	.type = CRYPTO_ALG_TYPE_AEAD,
2097		.alg.crypto = {
2098			.cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2099			.cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2100			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2101			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2102			.cra_aead = {
2103				.ivsize = DES3_EDE_BLOCK_SIZE,
2104				.maxauthsize = SHA512_DIGEST_SIZE,
2105			}
2106		},
2107		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2108			             DESC_HDR_SEL0_DEU |
2109		                     DESC_HDR_MODE0_DEU_CBC |
2110		                     DESC_HDR_MODE0_DEU_3DES |
2111		                     DESC_HDR_SEL1_MDEUB |
2112		                     DESC_HDR_MODE1_MDEU_INIT |
2113		                     DESC_HDR_MODE1_MDEU_PAD |
2114		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2115	},
2116	{	.type = CRYPTO_ALG_TYPE_AEAD,
2117		.alg.crypto = {
2118			.cra_name = "authenc(hmac(md5),cbc(aes))",
2119			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2120			.cra_blocksize = AES_BLOCK_SIZE,
2121			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2122			.cra_aead = {
2123				.ivsize = AES_BLOCK_SIZE,
2124				.maxauthsize = MD5_DIGEST_SIZE,
2125			}
2126		},
2127		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2128			             DESC_HDR_SEL0_AESU |
2129		                     DESC_HDR_MODE0_AESU_CBC |
2130		                     DESC_HDR_SEL1_MDEUA |
2131		                     DESC_HDR_MODE1_MDEU_INIT |
2132		                     DESC_HDR_MODE1_MDEU_PAD |
2133		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2134	},
2135	{	.type = CRYPTO_ALG_TYPE_AEAD,
2136		.alg.crypto = {
2137			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2138			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2139			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2140			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2141			.cra_aead = {
2142				.ivsize = DES3_EDE_BLOCK_SIZE,
2143				.maxauthsize = MD5_DIGEST_SIZE,
2144			}
2145		},
2146		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2147			             DESC_HDR_SEL0_DEU |
2148		                     DESC_HDR_MODE0_DEU_CBC |
2149		                     DESC_HDR_MODE0_DEU_3DES |
2150		                     DESC_HDR_SEL1_MDEUA |
2151		                     DESC_HDR_MODE1_MDEU_INIT |
2152		                     DESC_HDR_MODE1_MDEU_PAD |
2153		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2154	},
2155	/* ABLKCIPHER algorithms. */
2156	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2157		.alg.crypto = {
2158			.cra_name = "cbc(aes)",
2159			.cra_driver_name = "cbc-aes-talitos",
2160			.cra_blocksize = AES_BLOCK_SIZE,
2161			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2162                                     CRYPTO_ALG_ASYNC,
2163			.cra_ablkcipher = {
2164				.min_keysize = AES_MIN_KEY_SIZE,
2165				.max_keysize = AES_MAX_KEY_SIZE,
2166				.ivsize = AES_BLOCK_SIZE,
2167			}
2168		},
2169		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2170				     DESC_HDR_SEL0_AESU |
2171				     DESC_HDR_MODE0_AESU_CBC,
2172	},
2173	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2174		.alg.crypto = {
2175			.cra_name = "cbc(des3_ede)",
2176			.cra_driver_name = "cbc-3des-talitos",
2177			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2178			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2179                                     CRYPTO_ALG_ASYNC,
2180			.cra_ablkcipher = {
2181				.min_keysize = DES3_EDE_KEY_SIZE,
2182				.max_keysize = DES3_EDE_KEY_SIZE,
2183				.ivsize = DES3_EDE_BLOCK_SIZE,
2184			}
2185		},
2186		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2187			             DESC_HDR_SEL0_DEU |
2188		                     DESC_HDR_MODE0_DEU_CBC |
2189		                     DESC_HDR_MODE0_DEU_3DES,
2190	},
2191	/* AHASH algorithms. */
2192	{	.type = CRYPTO_ALG_TYPE_AHASH,
2193		.alg.hash = {
2194			.halg.digestsize = MD5_DIGEST_SIZE,
2195			.halg.base = {
2196				.cra_name = "md5",
2197				.cra_driver_name = "md5-talitos",
2198				.cra_blocksize = MD5_BLOCK_SIZE,
2199				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2200					     CRYPTO_ALG_ASYNC,
2201			}
2202		},
2203		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2204				     DESC_HDR_SEL0_MDEUA |
2205				     DESC_HDR_MODE0_MDEU_MD5,
2206	},
2207	{	.type = CRYPTO_ALG_TYPE_AHASH,
2208		.alg.hash = {
2209			.halg.digestsize = SHA1_DIGEST_SIZE,
2210			.halg.base = {
2211				.cra_name = "sha1",
2212				.cra_driver_name = "sha1-talitos",
2213				.cra_blocksize = SHA1_BLOCK_SIZE,
2214				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2215					     CRYPTO_ALG_ASYNC,
2216			}
2217		},
2218		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2219				     DESC_HDR_SEL0_MDEUA |
2220				     DESC_HDR_MODE0_MDEU_SHA1,
2221	},
2222	{	.type = CRYPTO_ALG_TYPE_AHASH,
2223		.alg.hash = {
2224			.halg.digestsize = SHA224_DIGEST_SIZE,
2225			.halg.base = {
2226				.cra_name = "sha224",
2227				.cra_driver_name = "sha224-talitos",
2228				.cra_blocksize = SHA224_BLOCK_SIZE,
2229				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2230					     CRYPTO_ALG_ASYNC,
2231			}
2232		},
2233		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2234				     DESC_HDR_SEL0_MDEUA |
2235				     DESC_HDR_MODE0_MDEU_SHA224,
2236	},
2237	{	.type = CRYPTO_ALG_TYPE_AHASH,
2238		.alg.hash = {
2239			.halg.digestsize = SHA256_DIGEST_SIZE,
2240			.halg.base = {
2241				.cra_name = "sha256",
2242				.cra_driver_name = "sha256-talitos",
2243				.cra_blocksize = SHA256_BLOCK_SIZE,
2244				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2245					     CRYPTO_ALG_ASYNC,
2246			}
2247		},
2248		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2249				     DESC_HDR_SEL0_MDEUA |
2250				     DESC_HDR_MODE0_MDEU_SHA256,
2251	},
2252	{	.type = CRYPTO_ALG_TYPE_AHASH,
2253		.alg.hash = {
2254			.halg.digestsize = SHA384_DIGEST_SIZE,
2255			.halg.base = {
2256				.cra_name = "sha384",
2257				.cra_driver_name = "sha384-talitos",
2258				.cra_blocksize = SHA384_BLOCK_SIZE,
2259				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2260					     CRYPTO_ALG_ASYNC,
2261			}
2262		},
2263		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2264				     DESC_HDR_SEL0_MDEUB |
2265				     DESC_HDR_MODE0_MDEUB_SHA384,
2266	},
2267	{	.type = CRYPTO_ALG_TYPE_AHASH,
2268		.alg.hash = {
2269			.halg.digestsize = SHA512_DIGEST_SIZE,
2270			.halg.base = {
2271				.cra_name = "sha512",
2272				.cra_driver_name = "sha512-talitos",
2273				.cra_blocksize = SHA512_BLOCK_SIZE,
2274				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2275					     CRYPTO_ALG_ASYNC,
2276			}
2277		},
2278		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2279				     DESC_HDR_SEL0_MDEUB |
2280				     DESC_HDR_MODE0_MDEUB_SHA512,
2281	},
2282	{	.type = CRYPTO_ALG_TYPE_AHASH,
2283		.alg.hash = {
2284			.halg.digestsize = MD5_DIGEST_SIZE,
2285			.halg.base = {
2286				.cra_name = "hmac(md5)",
2287				.cra_driver_name = "hmac-md5-talitos",
2288				.cra_blocksize = MD5_BLOCK_SIZE,
2289				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2290					     CRYPTO_ALG_ASYNC,
2291			}
2292		},
2293		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2294				     DESC_HDR_SEL0_MDEUA |
2295				     DESC_HDR_MODE0_MDEU_MD5,
2296	},
2297	{	.type = CRYPTO_ALG_TYPE_AHASH,
2298		.alg.hash = {
2299			.halg.digestsize = SHA1_DIGEST_SIZE,
2300			.halg.base = {
2301				.cra_name = "hmac(sha1)",
2302				.cra_driver_name = "hmac-sha1-talitos",
2303				.cra_blocksize = SHA1_BLOCK_SIZE,
2304				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2305					     CRYPTO_ALG_ASYNC,
2306			}
2307		},
2308		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2309				     DESC_HDR_SEL0_MDEUA |
2310				     DESC_HDR_MODE0_MDEU_SHA1,
2311	},
2312	{	.type = CRYPTO_ALG_TYPE_AHASH,
2313		.alg.hash = {
2314			.halg.digestsize = SHA224_DIGEST_SIZE,
2315			.halg.base = {
2316				.cra_name = "hmac(sha224)",
2317				.cra_driver_name = "hmac-sha224-talitos",
2318				.cra_blocksize = SHA224_BLOCK_SIZE,
2319				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2320					     CRYPTO_ALG_ASYNC,
2321			}
2322		},
2323		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2324				     DESC_HDR_SEL0_MDEUA |
2325				     DESC_HDR_MODE0_MDEU_SHA224,
2326	},
2327	{	.type = CRYPTO_ALG_TYPE_AHASH,
2328		.alg.hash = {
2329			.halg.digestsize = SHA256_DIGEST_SIZE,
2330			.halg.base = {
2331				.cra_name = "hmac(sha256)",
2332				.cra_driver_name = "hmac-sha256-talitos",
2333				.cra_blocksize = SHA256_BLOCK_SIZE,
2334				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2335					     CRYPTO_ALG_ASYNC,
2336			}
2337		},
2338		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2339				     DESC_HDR_SEL0_MDEUA |
2340				     DESC_HDR_MODE0_MDEU_SHA256,
2341	},
2342	{	.type = CRYPTO_ALG_TYPE_AHASH,
2343		.alg.hash = {
2344			.halg.digestsize = SHA384_DIGEST_SIZE,
2345			.halg.base = {
2346				.cra_name = "hmac(sha384)",
2347				.cra_driver_name = "hmac-sha384-talitos",
2348				.cra_blocksize = SHA384_BLOCK_SIZE,
2349				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2350					     CRYPTO_ALG_ASYNC,
2351			}
2352		},
2353		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2354				     DESC_HDR_SEL0_MDEUB |
2355				     DESC_HDR_MODE0_MDEUB_SHA384,
2356	},
2357	{	.type = CRYPTO_ALG_TYPE_AHASH,
2358		.alg.hash = {
2359			.halg.digestsize = SHA512_DIGEST_SIZE,
2360			.halg.base = {
2361				.cra_name = "hmac(sha512)",
2362				.cra_driver_name = "hmac-sha512-talitos",
2363				.cra_blocksize = SHA512_BLOCK_SIZE,
2364				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2365					     CRYPTO_ALG_ASYNC,
2366			}
2367		},
2368		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2369				     DESC_HDR_SEL0_MDEUB |
2370				     DESC_HDR_MODE0_MDEUB_SHA512,
2371	}
2372};
2373
2374struct talitos_crypto_alg {
2375	struct list_head entry;
2376	struct device *dev;
2377	struct talitos_alg_template algt;
2378};
2379
2380static int talitos_cra_init(struct crypto_tfm *tfm)
2381{
2382	struct crypto_alg *alg = tfm->__crt_alg;
2383	struct talitos_crypto_alg *talitos_alg;
2384	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2385	struct talitos_private *priv;
2386
2387	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2388		talitos_alg = container_of(__crypto_ahash_alg(alg),
2389					   struct talitos_crypto_alg,
2390					   algt.alg.hash);
2391	else
2392		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2393					   algt.alg.crypto);
2394
2395	/* update context with ptr to dev */
2396	ctx->dev = talitos_alg->dev;
2397
2398	/* assign SEC channel to tfm in round-robin fashion */
2399	priv = dev_get_drvdata(ctx->dev);
2400	ctx->ch = atomic_inc_return(&priv->last_chan) &
2401		  (priv->num_channels - 1);
2402
2403	/* copy descriptor header template value */
2404	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2405
2406	/* select done notification */
2407	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2408
2409	return 0;
2410}
2411
2412static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2413{
2414	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2415
2416	talitos_cra_init(tfm);
2417
2418	/* random first IV */
2419	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2420
2421	return 0;
2422}
2423
2424static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2425{
2426	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2427
2428	talitos_cra_init(tfm);
2429
2430	ctx->keylen = 0;
2431	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2432				 sizeof(struct talitos_ahash_req_ctx));
2433
2434	return 0;
2435}
2436
2437/*
2438 * given the alg's descriptor header template, determine whether descriptor
2439 * type and primary/secondary execution units required match the hw
2440 * capabilities description provided in the device tree node.
2441 */
2442static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2443{
2444	struct talitos_private *priv = dev_get_drvdata(dev);
2445	int ret;
2446
2447	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2448	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2449
2450	if (SECONDARY_EU(desc_hdr_template))
2451		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2452		              & priv->exec_units);
2453
2454	return ret;
2455}
2456
2457static int talitos_remove(struct platform_device *ofdev)
2458{
2459	struct device *dev = &ofdev->dev;
2460	struct talitos_private *priv = dev_get_drvdata(dev);
2461	struct talitos_crypto_alg *t_alg, *n;
2462	int i;
2463
2464	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2465		switch (t_alg->algt.type) {
2466		case CRYPTO_ALG_TYPE_ABLKCIPHER:
2467		case CRYPTO_ALG_TYPE_AEAD:
2468			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2469			break;
2470		case CRYPTO_ALG_TYPE_AHASH:
2471			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2472			break;
2473		}
2474		list_del(&t_alg->entry);
2475		kfree(t_alg);
2476	}
2477
2478	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2479		talitos_unregister_rng(dev);
2480
2481	for (i = 0; i < priv->num_channels; i++)
2482		kfree(priv->chan[i].fifo);
2483
2484	kfree(priv->chan);
2485
2486	for (i = 0; i < 2; i++)
2487		if (priv->irq[i]) {
2488			free_irq(priv->irq[i], dev);
2489			irq_dispose_mapping(priv->irq[i]);
2490		}
2491
2492	tasklet_kill(&priv->done_task[0]);
2493	if (priv->irq[1])
2494		tasklet_kill(&priv->done_task[1]);
2495
2496	iounmap(priv->reg);
2497
2498	kfree(priv);
2499
2500	return 0;
2501}
2502
2503static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2504						    struct talitos_alg_template
2505						           *template)
2506{
2507	struct talitos_private *priv = dev_get_drvdata(dev);
2508	struct talitos_crypto_alg *t_alg;
2509	struct crypto_alg *alg;
2510
2511	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2512	if (!t_alg)
2513		return ERR_PTR(-ENOMEM);
2514
2515	t_alg->algt = *template;
2516
2517	switch (t_alg->algt.type) {
2518	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2519		alg = &t_alg->algt.alg.crypto;
2520		alg->cra_init = talitos_cra_init;
2521		alg->cra_type = &crypto_ablkcipher_type;
2522		alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2523		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2524		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2525		alg->cra_ablkcipher.geniv = "eseqiv";
2526		break;
2527	case CRYPTO_ALG_TYPE_AEAD:
2528		alg = &t_alg->algt.alg.crypto;
2529		alg->cra_init = talitos_cra_init_aead;
2530		alg->cra_type = &crypto_aead_type;
2531		alg->cra_aead.setkey = aead_setkey;
2532		alg->cra_aead.setauthsize = aead_setauthsize;
2533		alg->cra_aead.encrypt = aead_encrypt;
2534		alg->cra_aead.decrypt = aead_decrypt;
2535		alg->cra_aead.givencrypt = aead_givencrypt;
2536		alg->cra_aead.geniv = "<built-in>";
2537		break;
2538	case CRYPTO_ALG_TYPE_AHASH:
2539		alg = &t_alg->algt.alg.hash.halg.base;
2540		alg->cra_init = talitos_cra_init_ahash;
2541		alg->cra_type = &crypto_ahash_type;
2542		t_alg->algt.alg.hash.init = ahash_init;
2543		t_alg->algt.alg.hash.update = ahash_update;
2544		t_alg->algt.alg.hash.final = ahash_final;
2545		t_alg->algt.alg.hash.finup = ahash_finup;
2546		t_alg->algt.alg.hash.digest = ahash_digest;
2547		t_alg->algt.alg.hash.setkey = ahash_setkey;
2548
2549		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2550		    !strncmp(alg->cra_name, "hmac", 4)) {
2551			kfree(t_alg);
2552			return ERR_PTR(-ENOTSUPP);
2553		}
2554		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2555		    (!strcmp(alg->cra_name, "sha224") ||
2556		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2557			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2558			t_alg->algt.desc_hdr_template =
2559					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2560					DESC_HDR_SEL0_MDEUA |
2561					DESC_HDR_MODE0_MDEU_SHA256;
2562		}
2563		break;
2564	default:
2565		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2566		return ERR_PTR(-EINVAL);
2567	}
2568
2569	alg->cra_module = THIS_MODULE;
2570	alg->cra_priority = TALITOS_CRA_PRIORITY;
2571	alg->cra_alignmask = 0;
2572	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2573	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2574
2575	t_alg->dev = dev;
2576
2577	return t_alg;
2578}
2579
2580static int talitos_probe_irq(struct platform_device *ofdev)
2581{
2582	struct device *dev = &ofdev->dev;
2583	struct device_node *np = ofdev->dev.of_node;
2584	struct talitos_private *priv = dev_get_drvdata(dev);
2585	int err;
2586
2587	priv->irq[0] = irq_of_parse_and_map(np, 0);
2588	if (!priv->irq[0]) {
2589		dev_err(dev, "failed to map irq\n");
2590		return -EINVAL;
2591	}
2592
2593	priv->irq[1] = irq_of_parse_and_map(np, 1);
2594
2595	/* get the primary irq line */
2596	if (!priv->irq[1]) {
2597		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2598				  dev_driver_string(dev), dev);
2599		goto primary_out;
2600	}
2601
2602	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2603			  dev_driver_string(dev), dev);
2604	if (err)
2605		goto primary_out;
2606
2607	/* get the secondary irq line */
2608	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2609			  dev_driver_string(dev), dev);
2610	if (err) {
2611		dev_err(dev, "failed to request secondary irq\n");
2612		irq_dispose_mapping(priv->irq[1]);
2613		priv->irq[1] = 0;
2614	}
2615
2616	return err;
2617
2618primary_out:
2619	if (err) {
2620		dev_err(dev, "failed to request primary irq\n");
2621		irq_dispose_mapping(priv->irq[0]);
2622		priv->irq[0] = 0;
2623	}
2624
2625	return err;
2626}
2627
2628static int talitos_probe(struct platform_device *ofdev)
2629{
2630	struct device *dev = &ofdev->dev;
2631	struct device_node *np = ofdev->dev.of_node;
2632	struct talitos_private *priv;
2633	const unsigned int *prop;
2634	int i, err;
2635
2636	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2637	if (!priv)
2638		return -ENOMEM;
2639
2640	INIT_LIST_HEAD(&priv->alg_list);
2641
2642	dev_set_drvdata(dev, priv);
2643
2644	priv->ofdev = ofdev;
2645
2646	spin_lock_init(&priv->reg_lock);
2647
2648	err = talitos_probe_irq(ofdev);
2649	if (err)
2650		goto err_out;
2651
2652	if (!priv->irq[1]) {
2653		tasklet_init(&priv->done_task[0], talitos_done_4ch,
2654			     (unsigned long)dev);
2655	} else {
2656		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2657			     (unsigned long)dev);
2658		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2659			     (unsigned long)dev);
2660	}
2661
2662	priv->reg = of_iomap(np, 0);
2663	if (!priv->reg) {
2664		dev_err(dev, "failed to of_iomap\n");
2665		err = -ENOMEM;
2666		goto err_out;
2667	}
2668
2669	/* get SEC version capabilities from device tree */
2670	prop = of_get_property(np, "fsl,num-channels", NULL);
2671	if (prop)
2672		priv->num_channels = *prop;
2673
2674	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2675	if (prop)
2676		priv->chfifo_len = *prop;
2677
2678	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2679	if (prop)
2680		priv->exec_units = *prop;
2681
2682	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2683	if (prop)
2684		priv->desc_types = *prop;
2685
2686	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2687	    !priv->exec_units || !priv->desc_types) {
2688		dev_err(dev, "invalid property data in device tree node\n");
2689		err = -EINVAL;
2690		goto err_out;
2691	}
2692
2693	if (of_device_is_compatible(np, "fsl,sec3.0"))
2694		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2695
2696	if (of_device_is_compatible(np, "fsl,sec2.1"))
2697		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2698				  TALITOS_FTR_SHA224_HWINIT |
2699				  TALITOS_FTR_HMAC_OK;
2700
2701	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2702			     priv->num_channels, GFP_KERNEL);
2703	if (!priv->chan) {
2704		dev_err(dev, "failed to allocate channel management space\n");
2705		err = -ENOMEM;
2706		goto err_out;
2707	}
2708
2709	for (i = 0; i < priv->num_channels; i++) {
2710		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2711		if (!priv->irq[1] || !(i & 1))
2712			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2713	}
2714
2715	for (i = 0; i < priv->num_channels; i++) {
2716		spin_lock_init(&priv->chan[i].head_lock);
2717		spin_lock_init(&priv->chan[i].tail_lock);
2718	}
2719
2720	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2721
2722	for (i = 0; i < priv->num_channels; i++) {
2723		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2724					     priv->fifo_len, GFP_KERNEL);
2725		if (!priv->chan[i].fifo) {
2726			dev_err(dev, "failed to allocate request fifo %d\n", i);
2727			err = -ENOMEM;
2728			goto err_out;
2729		}
2730	}
2731
2732	for (i = 0; i < priv->num_channels; i++)
2733		atomic_set(&priv->chan[i].submit_count,
2734			   -(priv->chfifo_len - 1));
2735
2736	dma_set_mask(dev, DMA_BIT_MASK(36));
2737
2738	/* reset and initialize the h/w */
2739	err = init_device(dev);
2740	if (err) {
2741		dev_err(dev, "failed to initialize device\n");
2742		goto err_out;
2743	}
2744
2745	/* register the RNG, if available */
2746	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2747		err = talitos_register_rng(dev);
2748		if (err) {
2749			dev_err(dev, "failed to register hwrng: %d\n", err);
2750			goto err_out;
2751		} else
2752			dev_info(dev, "hwrng\n");
2753	}
2754
2755	/* register crypto algorithms the device supports */
2756	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2757		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2758			struct talitos_crypto_alg *t_alg;
2759			char *name = NULL;
2760
2761			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2762			if (IS_ERR(t_alg)) {
2763				err = PTR_ERR(t_alg);
2764				if (err == -ENOTSUPP)
2765					continue;
2766				goto err_out;
2767			}
2768
2769			switch (t_alg->algt.type) {
2770			case CRYPTO_ALG_TYPE_ABLKCIPHER:
2771			case CRYPTO_ALG_TYPE_AEAD:
2772				err = crypto_register_alg(
2773						&t_alg->algt.alg.crypto);
2774				name = t_alg->algt.alg.crypto.cra_driver_name;
2775				break;
2776			case CRYPTO_ALG_TYPE_AHASH:
2777				err = crypto_register_ahash(
2778						&t_alg->algt.alg.hash);
2779				name =
2780				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2781				break;
2782			}
2783			if (err) {
2784				dev_err(dev, "%s alg registration failed\n",
2785					name);
2786				kfree(t_alg);
2787			} else
2788				list_add_tail(&t_alg->entry, &priv->alg_list);
2789		}
2790	}
2791	if (!list_empty(&priv->alg_list))
2792		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2793			 (char *)of_get_property(np, "compatible", NULL));
2794
2795	return 0;
2796
2797err_out:
2798	talitos_remove(ofdev);
2799
2800	return err;
2801}
2802
2803static const struct of_device_id talitos_match[] = {
2804	{
2805		.compatible = "fsl,sec2.0",
2806	},
2807	{},
2808};
2809MODULE_DEVICE_TABLE(of, talitos_match);
2810
2811static struct platform_driver talitos_driver = {
2812	.driver = {
2813		.name = "talitos",
2814		.owner = THIS_MODULE,
2815		.of_match_table = talitos_match,
2816	},
2817	.probe = talitos_probe,
2818	.remove = talitos_remove,
2819};
2820
2821module_platform_driver(talitos_driver);
2822
2823MODULE_LICENSE("GPL");
2824MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2825MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
2826