[go: nahoru, domu]

1/*
2 * caam - Freescale FSL CAAM support for crypto API
3 *
4 * Copyright 2008-2011 Freescale Semiconductor, Inc.
5 *
6 * Based on talitos crypto API driver.
7 *
8 * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008):
9 *
10 * ---------------                     ---------------
11 * | JobDesc #1  |-------------------->|  ShareDesc  |
12 * | *(packet 1) |                     |   (PDB)     |
13 * ---------------      |------------->|  (hashKey)  |
14 *       .              |              | (cipherKey) |
15 *       .              |    |-------->| (operation) |
16 * ---------------      |    |         ---------------
17 * | JobDesc #2  |------|    |
18 * | *(packet 2) |           |
19 * ---------------           |
20 *       .                   |
21 *       .                   |
22 * ---------------           |
23 * | JobDesc #3  |------------
24 * | *(packet 3) |
25 * ---------------
26 *
27 * The SharedDesc never changes for a connection unless rekeyed, but
28 * each packet will likely be in a different place. So all we need
29 * to know to process the packet is where the input is, where the
30 * output goes, and what context we want to process with. Context is
31 * in the SharedDesc, packet references in the JobDesc.
32 *
33 * So, a job desc looks like:
34 *
35 * ---------------------
36 * | Header            |
37 * | ShareDesc Pointer |
38 * | SEQ_OUT_PTR       |
39 * | (output buffer)   |
40 * | (output length)   |
41 * | SEQ_IN_PTR        |
42 * | (input buffer)    |
43 * | (input length)    |
44 * ---------------------
45 */
46
47#include "compat.h"
48
49#include "regs.h"
50#include "intern.h"
51#include "desc_constr.h"
52#include "jr.h"
53#include "error.h"
54#include "sg_sw_sec4.h"
55#include "key_gen.h"
56
57/*
58 * crypto alg
59 */
60#define CAAM_CRA_PRIORITY		3000
61/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
62#define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
63					 SHA512_DIGEST_SIZE * 2)
64/* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
65#define CAAM_MAX_IV_LENGTH		16
66
67/* length of descriptors text */
68#define DESC_AEAD_BASE			(4 * CAAM_CMD_SZ)
69#define DESC_AEAD_ENC_LEN		(DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
70#define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
71#define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
72
73#define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
74#define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
75#define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
76
77#define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
78#define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
79					 20 * CAAM_CMD_SZ)
80#define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
81					 15 * CAAM_CMD_SZ)
82
83#define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
84					 CAAM_MAX_KEY_SIZE)
85#define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
86
87#ifdef DEBUG
88/* for print_hex_dumps with line references */
89#define debug(format, arg...) printk(format, arg)
90#else
91#define debug(format, arg...)
92#endif
93static struct list_head alg_list;
94
95/* Set DK bit in class 1 operation if shared */
96static inline void append_dec_op1(u32 *desc, u32 type)
97{
98	u32 *jump_cmd, *uncond_jump_cmd;
99
100	/* DK bit is valid only for AES */
101	if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
102		append_operation(desc, type | OP_ALG_AS_INITFINAL |
103				 OP_ALG_DECRYPT);
104		return;
105	}
106
107	jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
108	append_operation(desc, type | OP_ALG_AS_INITFINAL |
109			 OP_ALG_DECRYPT);
110	uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
111	set_jump_tgt_here(desc, jump_cmd);
112	append_operation(desc, type | OP_ALG_AS_INITFINAL |
113			 OP_ALG_DECRYPT | OP_ALG_AAI_DK);
114	set_jump_tgt_here(desc, uncond_jump_cmd);
115}
116
117/*
118 * For aead functions, read payload and write payload,
119 * both of which are specified in req->src and req->dst
120 */
121static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
122{
123	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
124	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
125			     KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
126}
127
128/*
129 * For aead encrypt and decrypt, read iv for both classes
130 */
131static inline void aead_append_ld_iv(u32 *desc, int ivsize)
132{
133	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
134		   LDST_CLASS_1_CCB | ivsize);
135	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
136}
137
138/*
139 * For ablkcipher encrypt and decrypt, read from req->src and
140 * write to req->dst
141 */
142static inline void ablkcipher_append_src_dst(u32 *desc)
143{
144	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
145	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
146	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 |
147			     KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
148	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
149}
150
151/*
152 * If all data, including src (with assoc and iv) or dst (with iv only) are
153 * contiguous
154 */
155#define GIV_SRC_CONTIG		1
156#define GIV_DST_CONTIG		(1 << 1)
157
158/*
159 * per-session context
160 */
161struct caam_ctx {
162	struct device *jrdev;
163	u32 sh_desc_enc[DESC_MAX_USED_LEN];
164	u32 sh_desc_dec[DESC_MAX_USED_LEN];
165	u32 sh_desc_givenc[DESC_MAX_USED_LEN];
166	dma_addr_t sh_desc_enc_dma;
167	dma_addr_t sh_desc_dec_dma;
168	dma_addr_t sh_desc_givenc_dma;
169	u32 class1_alg_type;
170	u32 class2_alg_type;
171	u32 alg_op;
172	u8 key[CAAM_MAX_KEY_SIZE];
173	dma_addr_t key_dma;
174	unsigned int enckeylen;
175	unsigned int split_key_len;
176	unsigned int split_key_pad_len;
177	unsigned int authsize;
178};
179
180static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
181			    int keys_fit_inline)
182{
183	if (keys_fit_inline) {
184		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
185				  ctx->split_key_len, CLASS_2 |
186				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
187		append_key_as_imm(desc, (void *)ctx->key +
188				  ctx->split_key_pad_len, ctx->enckeylen,
189				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
190	} else {
191		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
192			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
193		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
194			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
195	}
196}
197
198static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
199				  int keys_fit_inline)
200{
201	u32 *key_jump_cmd;
202
203	init_sh_desc(desc, HDR_SHARE_SERIAL);
204
205	/* Skip if already shared */
206	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
207				   JUMP_COND_SHRD);
208
209	append_key_aead(desc, ctx, keys_fit_inline);
210
211	set_jump_tgt_here(desc, key_jump_cmd);
212}
213
214static int aead_null_set_sh_desc(struct crypto_aead *aead)
215{
216	struct aead_tfm *tfm = &aead->base.crt_aead;
217	struct caam_ctx *ctx = crypto_aead_ctx(aead);
218	struct device *jrdev = ctx->jrdev;
219	bool keys_fit_inline = false;
220	u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
221	u32 *desc;
222
223	/*
224	 * Job Descriptor and Shared Descriptors
225	 * must all fit into the 64-word Descriptor h/w Buffer
226	 */
227	if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
228	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
229		keys_fit_inline = true;
230
231	/* aead_encrypt shared descriptor */
232	desc = ctx->sh_desc_enc;
233
234	init_sh_desc(desc, HDR_SHARE_SERIAL);
235
236	/* Skip if already shared */
237	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
238				   JUMP_COND_SHRD);
239	if (keys_fit_inline)
240		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
241				  ctx->split_key_len, CLASS_2 |
242				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
243	else
244		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
245			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
246	set_jump_tgt_here(desc, key_jump_cmd);
247
248	/* cryptlen = seqoutlen - authsize */
249	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
250
251	/*
252	 * NULL encryption; IV is zero
253	 * assoclen = (assoclen + cryptlen) - cryptlen
254	 */
255	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
256
257	/* read assoc before reading payload */
258	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
259			     KEY_VLF);
260
261	/* Prepare to read and write cryptlen bytes */
262	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
263	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
264
265	/*
266	 * MOVE_LEN opcode is not available in all SEC HW revisions,
267	 * thus need to do some magic, i.e. self-patch the descriptor
268	 * buffer.
269	 */
270	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
271				    MOVE_DEST_MATH3 |
272				    (0x6 << MOVE_LEN_SHIFT));
273	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
274				     MOVE_DEST_DESCBUF |
275				     MOVE_WAITCOMP |
276				     (0x8 << MOVE_LEN_SHIFT));
277
278	/* Class 2 operation */
279	append_operation(desc, ctx->class2_alg_type |
280			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
281
282	/* Read and write cryptlen bytes */
283	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
284
285	set_move_tgt_here(desc, read_move_cmd);
286	set_move_tgt_here(desc, write_move_cmd);
287	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
288	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
289		    MOVE_AUX_LS);
290
291	/* Write ICV */
292	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
293			 LDST_SRCDST_BYTE_CONTEXT);
294
295	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
296					      desc_bytes(desc),
297					      DMA_TO_DEVICE);
298	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
299		dev_err(jrdev, "unable to map shared descriptor\n");
300		return -ENOMEM;
301	}
302#ifdef DEBUG
303	print_hex_dump(KERN_ERR,
304		       "aead null enc shdesc@"__stringify(__LINE__)": ",
305		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
306		       desc_bytes(desc), 1);
307#endif
308
309	/*
310	 * Job Descriptor and Shared Descriptors
311	 * must all fit into the 64-word Descriptor h/w Buffer
312	 */
313	keys_fit_inline = false;
314	if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
315	    ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
316		keys_fit_inline = true;
317
318	desc = ctx->sh_desc_dec;
319
320	/* aead_decrypt shared descriptor */
321	init_sh_desc(desc, HDR_SHARE_SERIAL);
322
323	/* Skip if already shared */
324	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
325				   JUMP_COND_SHRD);
326	if (keys_fit_inline)
327		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
328				  ctx->split_key_len, CLASS_2 |
329				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
330	else
331		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
332			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
333	set_jump_tgt_here(desc, key_jump_cmd);
334
335	/* Class 2 operation */
336	append_operation(desc, ctx->class2_alg_type |
337			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
338
339	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
340	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
341				ctx->authsize + tfm->ivsize);
342	/* assoclen = (assoclen + cryptlen) - cryptlen */
343	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
344	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
345
346	/* read assoc before reading payload */
347	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
348			     KEY_VLF);
349
350	/* Prepare to read and write cryptlen bytes */
351	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
352	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
353
354	/*
355	 * MOVE_LEN opcode is not available in all SEC HW revisions,
356	 * thus need to do some magic, i.e. self-patch the descriptor
357	 * buffer.
358	 */
359	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
360				    MOVE_DEST_MATH2 |
361				    (0x6 << MOVE_LEN_SHIFT));
362	write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
363				     MOVE_DEST_DESCBUF |
364				     MOVE_WAITCOMP |
365				     (0x8 << MOVE_LEN_SHIFT));
366
367	/* Read and write cryptlen bytes */
368	aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
369
370	/*
371	 * Insert a NOP here, since we need at least 4 instructions between
372	 * code patching the descriptor buffer and the location being patched.
373	 */
374	jump_cmd = append_jump(desc, JUMP_TEST_ALL);
375	set_jump_tgt_here(desc, jump_cmd);
376
377	set_move_tgt_here(desc, read_move_cmd);
378	set_move_tgt_here(desc, write_move_cmd);
379	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
380	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
381		    MOVE_AUX_LS);
382	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
383
384	/* Load ICV */
385	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
386			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
387
388	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
389					      desc_bytes(desc),
390					      DMA_TO_DEVICE);
391	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
392		dev_err(jrdev, "unable to map shared descriptor\n");
393		return -ENOMEM;
394	}
395#ifdef DEBUG
396	print_hex_dump(KERN_ERR,
397		       "aead null dec shdesc@"__stringify(__LINE__)": ",
398		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
399		       desc_bytes(desc), 1);
400#endif
401
402	return 0;
403}
404
405static int aead_set_sh_desc(struct crypto_aead *aead)
406{
407	struct aead_tfm *tfm = &aead->base.crt_aead;
408	struct caam_ctx *ctx = crypto_aead_ctx(aead);
409	struct device *jrdev = ctx->jrdev;
410	bool keys_fit_inline = false;
411	u32 geniv, moveiv;
412	u32 *desc;
413
414	if (!ctx->authsize)
415		return 0;
416
417	/* NULL encryption / decryption */
418	if (!ctx->enckeylen)
419		return aead_null_set_sh_desc(aead);
420
421	/*
422	 * Job Descriptor and Shared Descriptors
423	 * must all fit into the 64-word Descriptor h/w Buffer
424	 */
425	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
426	    ctx->split_key_pad_len + ctx->enckeylen <=
427	    CAAM_DESC_BYTES_MAX)
428		keys_fit_inline = true;
429
430	/* aead_encrypt shared descriptor */
431	desc = ctx->sh_desc_enc;
432
433	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
434
435	/* Class 2 operation */
436	append_operation(desc, ctx->class2_alg_type |
437			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
438
439	/* cryptlen = seqoutlen - authsize */
440	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
441
442	/* assoclen + cryptlen = seqinlen - ivsize */
443	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
444
445	/* assoclen = (assoclen + cryptlen) - cryptlen */
446	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
447
448	/* read assoc before reading payload */
449	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
450			     KEY_VLF);
451	aead_append_ld_iv(desc, tfm->ivsize);
452
453	/* Class 1 operation */
454	append_operation(desc, ctx->class1_alg_type |
455			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
456
457	/* Read and write cryptlen bytes */
458	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
459	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
460	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
461
462	/* Write ICV */
463	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
464			 LDST_SRCDST_BYTE_CONTEXT);
465
466	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
467					      desc_bytes(desc),
468					      DMA_TO_DEVICE);
469	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
470		dev_err(jrdev, "unable to map shared descriptor\n");
471		return -ENOMEM;
472	}
473#ifdef DEBUG
474	print_hex_dump(KERN_ERR, "aead enc shdesc@"__stringify(__LINE__)": ",
475		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
476		       desc_bytes(desc), 1);
477#endif
478
479	/*
480	 * Job Descriptor and Shared Descriptors
481	 * must all fit into the 64-word Descriptor h/w Buffer
482	 */
483	keys_fit_inline = false;
484	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
485	    ctx->split_key_pad_len + ctx->enckeylen <=
486	    CAAM_DESC_BYTES_MAX)
487		keys_fit_inline = true;
488
489	/* aead_decrypt shared descriptor */
490	desc = ctx->sh_desc_dec;
491
492	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
493
494	/* Class 2 operation */
495	append_operation(desc, ctx->class2_alg_type |
496			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
497
498	/* assoclen + cryptlen = seqinlen - ivsize - authsize */
499	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
500				ctx->authsize + tfm->ivsize);
501	/* assoclen = (assoclen + cryptlen) - cryptlen */
502	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
503	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
504
505	/* read assoc before reading payload */
506	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
507			     KEY_VLF);
508
509	aead_append_ld_iv(desc, tfm->ivsize);
510
511	append_dec_op1(desc, ctx->class1_alg_type);
512
513	/* Read and write cryptlen bytes */
514	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
515	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
516	aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
517
518	/* Load ICV */
519	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
520			     FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
521
522	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
523					      desc_bytes(desc),
524					      DMA_TO_DEVICE);
525	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
526		dev_err(jrdev, "unable to map shared descriptor\n");
527		return -ENOMEM;
528	}
529#ifdef DEBUG
530	print_hex_dump(KERN_ERR, "aead dec shdesc@"__stringify(__LINE__)": ",
531		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
532		       desc_bytes(desc), 1);
533#endif
534
535	/*
536	 * Job Descriptor and Shared Descriptors
537	 * must all fit into the 64-word Descriptor h/w Buffer
538	 */
539	keys_fit_inline = false;
540	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
541	    ctx->split_key_pad_len + ctx->enckeylen <=
542	    CAAM_DESC_BYTES_MAX)
543		keys_fit_inline = true;
544
545	/* aead_givencrypt shared descriptor */
546	desc = ctx->sh_desc_givenc;
547
548	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
549
550	/* Generate IV */
551	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
552		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
553		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
554	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
555			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
556	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
557	append_move(desc, MOVE_SRC_INFIFO |
558		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
559	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
560
561	/* Copy IV to class 1 context */
562	append_move(desc, MOVE_SRC_CLASS1CTX |
563		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
564
565	/* Return to encryption */
566	append_operation(desc, ctx->class2_alg_type |
567			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
568
569	/* ivsize + cryptlen = seqoutlen - authsize */
570	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
571
572	/* assoclen = seqinlen - (ivsize + cryptlen) */
573	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
574
575	/* read assoc before reading payload */
576	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
577			     KEY_VLF);
578
579	/* Copy iv from class 1 ctx to class 2 fifo*/
580	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
581		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
582	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
583			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
584	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
585			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
586
587	/* Class 1 operation */
588	append_operation(desc, ctx->class1_alg_type |
589			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
590
591	/* Will write ivsize + cryptlen */
592	append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
593
594	/* Not need to reload iv */
595	append_seq_fifo_load(desc, tfm->ivsize,
596			     FIFOLD_CLASS_SKIP);
597
598	/* Will read cryptlen */
599	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
600	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
601
602	/* Write ICV */
603	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
604			 LDST_SRCDST_BYTE_CONTEXT);
605
606	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
607						 desc_bytes(desc),
608						 DMA_TO_DEVICE);
609	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
610		dev_err(jrdev, "unable to map shared descriptor\n");
611		return -ENOMEM;
612	}
613#ifdef DEBUG
614	print_hex_dump(KERN_ERR, "aead givenc shdesc@"__stringify(__LINE__)": ",
615		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
616		       desc_bytes(desc), 1);
617#endif
618
619	return 0;
620}
621
622static int aead_setauthsize(struct crypto_aead *authenc,
623				    unsigned int authsize)
624{
625	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
626
627	ctx->authsize = authsize;
628	aead_set_sh_desc(authenc);
629
630	return 0;
631}
632
633static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
634			      u32 authkeylen)
635{
636	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
637			       ctx->split_key_pad_len, key_in, authkeylen,
638			       ctx->alg_op);
639}
640
641static int aead_setkey(struct crypto_aead *aead,
642			       const u8 *key, unsigned int keylen)
643{
644	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
645	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
646	struct caam_ctx *ctx = crypto_aead_ctx(aead);
647	struct device *jrdev = ctx->jrdev;
648	struct crypto_authenc_keys keys;
649	int ret = 0;
650
651	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
652		goto badkey;
653
654	/* Pick class 2 key length from algorithm submask */
655	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
656				      OP_ALG_ALGSEL_SHIFT] * 2;
657	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
658
659	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
660		goto badkey;
661
662#ifdef DEBUG
663	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
664	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
665	       keys.authkeylen);
666	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
667	       ctx->split_key_len, ctx->split_key_pad_len);
668	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
669		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
670#endif
671
672	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
673	if (ret) {
674		goto badkey;
675	}
676
677	/* postpend encryption key to auth split key */
678	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
679
680	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
681				      keys.enckeylen, DMA_TO_DEVICE);
682	if (dma_mapping_error(jrdev, ctx->key_dma)) {
683		dev_err(jrdev, "unable to map key i/o memory\n");
684		return -ENOMEM;
685	}
686#ifdef DEBUG
687	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
688		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
689		       ctx->split_key_pad_len + keys.enckeylen, 1);
690#endif
691
692	ctx->enckeylen = keys.enckeylen;
693
694	ret = aead_set_sh_desc(aead);
695	if (ret) {
696		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
697				 keys.enckeylen, DMA_TO_DEVICE);
698	}
699
700	return ret;
701badkey:
702	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
703	return -EINVAL;
704}
705
706static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
707			     const u8 *key, unsigned int keylen)
708{
709	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
710	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
711	struct device *jrdev = ctx->jrdev;
712	int ret = 0;
713	u32 *key_jump_cmd;
714	u32 *desc;
715
716#ifdef DEBUG
717	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
718		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
719#endif
720
721	memcpy(ctx->key, key, keylen);
722	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
723				      DMA_TO_DEVICE);
724	if (dma_mapping_error(jrdev, ctx->key_dma)) {
725		dev_err(jrdev, "unable to map key i/o memory\n");
726		return -ENOMEM;
727	}
728	ctx->enckeylen = keylen;
729
730	/* ablkcipher_encrypt shared descriptor */
731	desc = ctx->sh_desc_enc;
732	init_sh_desc(desc, HDR_SHARE_SERIAL);
733	/* Skip if already shared */
734	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
735				   JUMP_COND_SHRD);
736
737	/* Load class1 key only */
738	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
739			  ctx->enckeylen, CLASS_1 |
740			  KEY_DEST_CLASS_REG);
741
742	set_jump_tgt_here(desc, key_jump_cmd);
743
744	/* Load iv */
745	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
746		   LDST_CLASS_1_CCB | tfm->ivsize);
747
748	/* Load operation */
749	append_operation(desc, ctx->class1_alg_type |
750			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
751
752	/* Perform operation */
753	ablkcipher_append_src_dst(desc);
754
755	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
756					      desc_bytes(desc),
757					      DMA_TO_DEVICE);
758	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
759		dev_err(jrdev, "unable to map shared descriptor\n");
760		return -ENOMEM;
761	}
762#ifdef DEBUG
763	print_hex_dump(KERN_ERR,
764		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
765		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
766		       desc_bytes(desc), 1);
767#endif
768	/* ablkcipher_decrypt shared descriptor */
769	desc = ctx->sh_desc_dec;
770
771	init_sh_desc(desc, HDR_SHARE_SERIAL);
772	/* Skip if already shared */
773	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
774				   JUMP_COND_SHRD);
775
776	/* Load class1 key only */
777	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
778			  ctx->enckeylen, CLASS_1 |
779			  KEY_DEST_CLASS_REG);
780
781	set_jump_tgt_here(desc, key_jump_cmd);
782
783	/* load IV */
784	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
785		   LDST_CLASS_1_CCB | tfm->ivsize);
786
787	/* Choose operation */
788	append_dec_op1(desc, ctx->class1_alg_type);
789
790	/* Perform operation */
791	ablkcipher_append_src_dst(desc);
792
793	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
794					      desc_bytes(desc),
795					      DMA_TO_DEVICE);
796	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
797		dev_err(jrdev, "unable to map shared descriptor\n");
798		return -ENOMEM;
799	}
800
801#ifdef DEBUG
802	print_hex_dump(KERN_ERR,
803		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
804		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
805		       desc_bytes(desc), 1);
806#endif
807
808	return ret;
809}
810
811/*
812 * aead_edesc - s/w-extended aead descriptor
813 * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
814 * @assoc_chained: if source is chained
815 * @src_nents: number of segments in input scatterlist
816 * @src_chained: if source is chained
817 * @dst_nents: number of segments in output scatterlist
818 * @dst_chained: if destination is chained
819 * @iv_dma: dma address of iv for checking continuity and link table
820 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
821 * @sec4_sg_bytes: length of dma mapped sec4_sg space
822 * @sec4_sg_dma: bus physical mapped address of h/w link table
823 * @hw_desc: the h/w job descriptor followed by any referenced link tables
824 */
825struct aead_edesc {
826	int assoc_nents;
827	bool assoc_chained;
828	int src_nents;
829	bool src_chained;
830	int dst_nents;
831	bool dst_chained;
832	dma_addr_t iv_dma;
833	int sec4_sg_bytes;
834	dma_addr_t sec4_sg_dma;
835	struct sec4_sg_entry *sec4_sg;
836	u32 hw_desc[0];
837};
838
839/*
840 * ablkcipher_edesc - s/w-extended ablkcipher descriptor
841 * @src_nents: number of segments in input scatterlist
842 * @src_chained: if source is chained
843 * @dst_nents: number of segments in output scatterlist
844 * @dst_chained: if destination is chained
845 * @iv_dma: dma address of iv for checking continuity and link table
846 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
847 * @sec4_sg_bytes: length of dma mapped sec4_sg space
848 * @sec4_sg_dma: bus physical mapped address of h/w link table
849 * @hw_desc: the h/w job descriptor followed by any referenced link tables
850 */
851struct ablkcipher_edesc {
852	int src_nents;
853	bool src_chained;
854	int dst_nents;
855	bool dst_chained;
856	dma_addr_t iv_dma;
857	int sec4_sg_bytes;
858	dma_addr_t sec4_sg_dma;
859	struct sec4_sg_entry *sec4_sg;
860	u32 hw_desc[0];
861};
862
863static void caam_unmap(struct device *dev, struct scatterlist *src,
864		       struct scatterlist *dst, int src_nents,
865		       bool src_chained, int dst_nents, bool dst_chained,
866		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
867		       int sec4_sg_bytes)
868{
869	if (dst != src) {
870		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
871				     src_chained);
872		dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE,
873				     dst_chained);
874	} else {
875		dma_unmap_sg_chained(dev, src, src_nents ? : 1,
876				     DMA_BIDIRECTIONAL, src_chained);
877	}
878
879	if (iv_dma)
880		dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
881	if (sec4_sg_bytes)
882		dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
883				 DMA_TO_DEVICE);
884}
885
886static void aead_unmap(struct device *dev,
887		       struct aead_edesc *edesc,
888		       struct aead_request *req)
889{
890	struct crypto_aead *aead = crypto_aead_reqtfm(req);
891	int ivsize = crypto_aead_ivsize(aead);
892
893	dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents,
894			     DMA_TO_DEVICE, edesc->assoc_chained);
895
896	caam_unmap(dev, req->src, req->dst,
897		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
898		   edesc->dst_chained, edesc->iv_dma, ivsize,
899		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
900}
901
902static void ablkcipher_unmap(struct device *dev,
903			     struct ablkcipher_edesc *edesc,
904			     struct ablkcipher_request *req)
905{
906	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
907	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
908
909	caam_unmap(dev, req->src, req->dst,
910		   edesc->src_nents, edesc->src_chained, edesc->dst_nents,
911		   edesc->dst_chained, edesc->iv_dma, ivsize,
912		   edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
913}
914
915static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
916				   void *context)
917{
918	struct aead_request *req = context;
919	struct aead_edesc *edesc;
920#ifdef DEBUG
921	struct crypto_aead *aead = crypto_aead_reqtfm(req);
922	struct caam_ctx *ctx = crypto_aead_ctx(aead);
923	int ivsize = crypto_aead_ivsize(aead);
924
925	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
926#endif
927
928	edesc = (struct aead_edesc *)((char *)desc -
929		 offsetof(struct aead_edesc, hw_desc));
930
931	if (err)
932		caam_jr_strstatus(jrdev, err);
933
934	aead_unmap(jrdev, edesc, req);
935
936#ifdef DEBUG
937	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
938		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
939		       req->assoclen , 1);
940	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
941		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
942		       edesc->src_nents ? 100 : ivsize, 1);
943	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
944		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
945		       edesc->src_nents ? 100 : req->cryptlen +
946		       ctx->authsize + 4, 1);
947#endif
948
949	kfree(edesc);
950
951	aead_request_complete(req, err);
952}
953
954static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
955				   void *context)
956{
957	struct aead_request *req = context;
958	struct aead_edesc *edesc;
959#ifdef DEBUG
960	struct crypto_aead *aead = crypto_aead_reqtfm(req);
961	struct caam_ctx *ctx = crypto_aead_ctx(aead);
962	int ivsize = crypto_aead_ivsize(aead);
963
964	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
965#endif
966
967	edesc = (struct aead_edesc *)((char *)desc -
968		 offsetof(struct aead_edesc, hw_desc));
969
970#ifdef DEBUG
971	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
972		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
973		       ivsize, 1);
974	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
975		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
976		       req->cryptlen - ctx->authsize, 1);
977#endif
978
979	if (err)
980		caam_jr_strstatus(jrdev, err);
981
982	aead_unmap(jrdev, edesc, req);
983
984	/*
985	 * verify hw auth check passed else return -EBADMSG
986	 */
987	if ((err & JRSTA_CCBERR_ERRID_MASK) == JRSTA_CCBERR_ERRID_ICVCHK)
988		err = -EBADMSG;
989
990#ifdef DEBUG
991	print_hex_dump(KERN_ERR, "iphdrout@"__stringify(__LINE__)": ",
992		       DUMP_PREFIX_ADDRESS, 16, 4,
993		       ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
994		       sizeof(struct iphdr) + req->assoclen +
995		       ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
996		       ctx->authsize + 36, 1);
997	if (!err && edesc->sec4_sg_bytes) {
998		struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
999		print_hex_dump(KERN_ERR, "sglastout@"__stringify(__LINE__)": ",
1000			       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
1001			sg->length + ctx->authsize + 16, 1);
1002	}
1003#endif
1004
1005	kfree(edesc);
1006
1007	aead_request_complete(req, err);
1008}
1009
1010static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1011				   void *context)
1012{
1013	struct ablkcipher_request *req = context;
1014	struct ablkcipher_edesc *edesc;
1015#ifdef DEBUG
1016	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1017	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1018
1019	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1020#endif
1021
1022	edesc = (struct ablkcipher_edesc *)((char *)desc -
1023		 offsetof(struct ablkcipher_edesc, hw_desc));
1024
1025	if (err)
1026		caam_jr_strstatus(jrdev, err);
1027
1028#ifdef DEBUG
1029	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1030		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1031		       edesc->src_nents > 1 ? 100 : ivsize, 1);
1032	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1033		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1034		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1035#endif
1036
1037	ablkcipher_unmap(jrdev, edesc, req);
1038	kfree(edesc);
1039
1040	ablkcipher_request_complete(req, err);
1041}
1042
1043static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
1044				    void *context)
1045{
1046	struct ablkcipher_request *req = context;
1047	struct ablkcipher_edesc *edesc;
1048#ifdef DEBUG
1049	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1050	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1051
1052	dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
1053#endif
1054
1055	edesc = (struct ablkcipher_edesc *)((char *)desc -
1056		 offsetof(struct ablkcipher_edesc, hw_desc));
1057	if (err)
1058		caam_jr_strstatus(jrdev, err);
1059
1060#ifdef DEBUG
1061	print_hex_dump(KERN_ERR, "dstiv  @"__stringify(__LINE__)": ",
1062		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1063		       ivsize, 1);
1064	print_hex_dump(KERN_ERR, "dst    @"__stringify(__LINE__)": ",
1065		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1066		       edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
1067#endif
1068
1069	ablkcipher_unmap(jrdev, edesc, req);
1070	kfree(edesc);
1071
1072	ablkcipher_request_complete(req, err);
1073}
1074
1075/*
1076 * Fill in aead job descriptor
1077 */
1078static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
1079			  struct aead_edesc *edesc,
1080			  struct aead_request *req,
1081			  bool all_contig, bool encrypt)
1082{
1083	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1084	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1085	int ivsize = crypto_aead_ivsize(aead);
1086	int authsize = ctx->authsize;
1087	u32 *desc = edesc->hw_desc;
1088	u32 out_options = 0, in_options;
1089	dma_addr_t dst_dma, src_dma;
1090	int len, sec4_sg_index = 0;
1091
1092#ifdef DEBUG
1093	debug("assoclen %d cryptlen %d authsize %d\n",
1094	      req->assoclen, req->cryptlen, authsize);
1095	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1096		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1097		       req->assoclen , 1);
1098	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1099		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1100		       edesc->src_nents ? 100 : ivsize, 1);
1101	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1102		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1103			edesc->src_nents ? 100 : req->cryptlen, 1);
1104	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1105		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1106		       desc_bytes(sh_desc), 1);
1107#endif
1108
1109	len = desc_len(sh_desc);
1110	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1111
1112	if (all_contig) {
1113		src_dma = sg_dma_address(req->assoc);
1114		in_options = 0;
1115	} else {
1116		src_dma = edesc->sec4_sg_dma;
1117		sec4_sg_index += (edesc->assoc_nents ? : 1) + 1 +
1118				 (edesc->src_nents ? : 1);
1119		in_options = LDST_SGF;
1120	}
1121
1122	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1123			  in_options);
1124
1125	if (likely(req->src == req->dst)) {
1126		if (all_contig) {
1127			dst_dma = sg_dma_address(req->src);
1128		} else {
1129			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1130				  ((edesc->assoc_nents ? : 1) + 1);
1131			out_options = LDST_SGF;
1132		}
1133	} else {
1134		if (!edesc->dst_nents) {
1135			dst_dma = sg_dma_address(req->dst);
1136		} else {
1137			dst_dma = edesc->sec4_sg_dma +
1138				  sec4_sg_index *
1139				  sizeof(struct sec4_sg_entry);
1140			out_options = LDST_SGF;
1141		}
1142	}
1143	if (encrypt)
1144		append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize,
1145				   out_options);
1146	else
1147		append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
1148				   out_options);
1149}
1150
1151/*
1152 * Fill in aead givencrypt job descriptor
1153 */
1154static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
1155			      struct aead_edesc *edesc,
1156			      struct aead_request *req,
1157			      int contig)
1158{
1159	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1160	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1161	int ivsize = crypto_aead_ivsize(aead);
1162	int authsize = ctx->authsize;
1163	u32 *desc = edesc->hw_desc;
1164	u32 out_options = 0, in_options;
1165	dma_addr_t dst_dma, src_dma;
1166	int len, sec4_sg_index = 0;
1167
1168#ifdef DEBUG
1169	debug("assoclen %d cryptlen %d authsize %d\n",
1170	      req->assoclen, req->cryptlen, authsize);
1171	print_hex_dump(KERN_ERR, "assoc  @"__stringify(__LINE__)": ",
1172		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
1173		       req->assoclen , 1);
1174	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1175		       DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
1176	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1177		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1178			edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
1179	print_hex_dump(KERN_ERR, "shrdesc@"__stringify(__LINE__)": ",
1180		       DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
1181		       desc_bytes(sh_desc), 1);
1182#endif
1183
1184	len = desc_len(sh_desc);
1185	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1186
1187	if (contig & GIV_SRC_CONTIG) {
1188		src_dma = sg_dma_address(req->assoc);
1189		in_options = 0;
1190	} else {
1191		src_dma = edesc->sec4_sg_dma;
1192		sec4_sg_index += edesc->assoc_nents + 1 + edesc->src_nents;
1193		in_options = LDST_SGF;
1194	}
1195	append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen,
1196			  in_options);
1197
1198	if (contig & GIV_DST_CONTIG) {
1199		dst_dma = edesc->iv_dma;
1200	} else {
1201		if (likely(req->src == req->dst)) {
1202			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
1203				  edesc->assoc_nents;
1204			out_options = LDST_SGF;
1205		} else {
1206			dst_dma = edesc->sec4_sg_dma +
1207				  sec4_sg_index *
1208				  sizeof(struct sec4_sg_entry);
1209			out_options = LDST_SGF;
1210		}
1211	}
1212
1213	append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize,
1214			   out_options);
1215}
1216
1217/*
1218 * Fill in ablkcipher job descriptor
1219 */
1220static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
1221				struct ablkcipher_edesc *edesc,
1222				struct ablkcipher_request *req,
1223				bool iv_contig)
1224{
1225	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1226	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1227	u32 *desc = edesc->hw_desc;
1228	u32 out_options = 0, in_options;
1229	dma_addr_t dst_dma, src_dma;
1230	int len, sec4_sg_index = 0;
1231
1232#ifdef DEBUG
1233	print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
1234		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
1235		       ivsize, 1);
1236	print_hex_dump(KERN_ERR, "src    @"__stringify(__LINE__)": ",
1237		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1238		       edesc->src_nents ? 100 : req->nbytes, 1);
1239#endif
1240
1241	len = desc_len(sh_desc);
1242	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
1243
1244	if (iv_contig) {
1245		src_dma = edesc->iv_dma;
1246		in_options = 0;
1247	} else {
1248		src_dma = edesc->sec4_sg_dma;
1249		sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
1250		in_options = LDST_SGF;
1251	}
1252	append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
1253
1254	if (likely(req->src == req->dst)) {
1255		if (!edesc->src_nents && iv_contig) {
1256			dst_dma = sg_dma_address(req->src);
1257		} else {
1258			dst_dma = edesc->sec4_sg_dma +
1259				sizeof(struct sec4_sg_entry);
1260			out_options = LDST_SGF;
1261		}
1262	} else {
1263		if (!edesc->dst_nents) {
1264			dst_dma = sg_dma_address(req->dst);
1265		} else {
1266			dst_dma = edesc->sec4_sg_dma +
1267				sec4_sg_index * sizeof(struct sec4_sg_entry);
1268			out_options = LDST_SGF;
1269		}
1270	}
1271	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
1272}
1273
1274/*
1275 * allocate and map the aead extended descriptor
1276 */
1277static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
1278					   int desc_bytes, bool *all_contig_ptr,
1279					   bool encrypt)
1280{
1281	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1282	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1283	struct device *jrdev = ctx->jrdev;
1284	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1285		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1286	int assoc_nents, src_nents, dst_nents = 0;
1287	struct aead_edesc *edesc;
1288	dma_addr_t iv_dma = 0;
1289	int sgc;
1290	bool all_contig = true;
1291	bool assoc_chained = false, src_chained = false, dst_chained = false;
1292	int ivsize = crypto_aead_ivsize(aead);
1293	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1294	unsigned int authsize = ctx->authsize;
1295
1296	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1297
1298	if (unlikely(req->dst != req->src)) {
1299		src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1300		dst_nents = sg_count(req->dst,
1301				     req->cryptlen +
1302					(encrypt ? authsize : (-authsize)),
1303				     &dst_chained);
1304	} else {
1305		src_nents = sg_count(req->src,
1306				     req->cryptlen +
1307					(encrypt ? authsize : 0),
1308				     &src_chained);
1309	}
1310
1311	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1312				 DMA_TO_DEVICE, assoc_chained);
1313	if (likely(req->src == req->dst)) {
1314		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1315					 DMA_BIDIRECTIONAL, src_chained);
1316	} else {
1317		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1318					 DMA_TO_DEVICE, src_chained);
1319		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1320					 DMA_FROM_DEVICE, dst_chained);
1321	}
1322
1323	iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
1324	if (dma_mapping_error(jrdev, iv_dma)) {
1325		dev_err(jrdev, "unable to map IV\n");
1326		return ERR_PTR(-ENOMEM);
1327	}
1328
1329	/* Check if data are contiguous */
1330	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1331	    iv_dma || src_nents || iv_dma + ivsize !=
1332	    sg_dma_address(req->src)) {
1333		all_contig = false;
1334		assoc_nents = assoc_nents ? : 1;
1335		src_nents = src_nents ? : 1;
1336		sec4_sg_len = assoc_nents + 1 + src_nents;
1337	}
1338	sec4_sg_len += dst_nents;
1339
1340	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1341
1342	/* allocate space for base edesc and hw desc commands, link tables */
1343	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1344			sec4_sg_bytes, GFP_DMA | flags);
1345	if (!edesc) {
1346		dev_err(jrdev, "could not allocate extended descriptor\n");
1347		return ERR_PTR(-ENOMEM);
1348	}
1349
1350	edesc->assoc_nents = assoc_nents;
1351	edesc->assoc_chained = assoc_chained;
1352	edesc->src_nents = src_nents;
1353	edesc->src_chained = src_chained;
1354	edesc->dst_nents = dst_nents;
1355	edesc->dst_chained = dst_chained;
1356	edesc->iv_dma = iv_dma;
1357	edesc->sec4_sg_bytes = sec4_sg_bytes;
1358	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1359			 desc_bytes;
1360	*all_contig_ptr = all_contig;
1361
1362	sec4_sg_index = 0;
1363	if (!all_contig) {
1364		sg_to_sec4_sg(req->assoc,
1365			      (assoc_nents ? : 1),
1366			      edesc->sec4_sg +
1367			      sec4_sg_index, 0);
1368		sec4_sg_index += assoc_nents ? : 1;
1369		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1370				   iv_dma, ivsize, 0);
1371		sec4_sg_index += 1;
1372		sg_to_sec4_sg_last(req->src,
1373				   (src_nents ? : 1),
1374				   edesc->sec4_sg +
1375				   sec4_sg_index, 0);
1376		sec4_sg_index += src_nents ? : 1;
1377	}
1378	if (dst_nents) {
1379		sg_to_sec4_sg_last(req->dst, dst_nents,
1380				   edesc->sec4_sg + sec4_sg_index, 0);
1381	}
1382	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1383					    sec4_sg_bytes, DMA_TO_DEVICE);
1384	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1385		dev_err(jrdev, "unable to map S/G table\n");
1386		return ERR_PTR(-ENOMEM);
1387	}
1388
1389	return edesc;
1390}
1391
1392static int aead_encrypt(struct aead_request *req)
1393{
1394	struct aead_edesc *edesc;
1395	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1396	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1397	struct device *jrdev = ctx->jrdev;
1398	bool all_contig;
1399	u32 *desc;
1400	int ret = 0;
1401
1402	/* allocate extended descriptor */
1403	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1404				 CAAM_CMD_SZ, &all_contig, true);
1405	if (IS_ERR(edesc))
1406		return PTR_ERR(edesc);
1407
1408	/* Create and submit job descriptor */
1409	init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
1410		      all_contig, true);
1411#ifdef DEBUG
1412	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1413		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1414		       desc_bytes(edesc->hw_desc), 1);
1415#endif
1416
1417	desc = edesc->hw_desc;
1418	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1419	if (!ret) {
1420		ret = -EINPROGRESS;
1421	} else {
1422		aead_unmap(jrdev, edesc, req);
1423		kfree(edesc);
1424	}
1425
1426	return ret;
1427}
1428
1429static int aead_decrypt(struct aead_request *req)
1430{
1431	struct aead_edesc *edesc;
1432	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1433	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1434	struct device *jrdev = ctx->jrdev;
1435	bool all_contig;
1436	u32 *desc;
1437	int ret = 0;
1438
1439	/* allocate extended descriptor */
1440	edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
1441				 CAAM_CMD_SZ, &all_contig, false);
1442	if (IS_ERR(edesc))
1443		return PTR_ERR(edesc);
1444
1445#ifdef DEBUG
1446	print_hex_dump(KERN_ERR, "dec src@"__stringify(__LINE__)": ",
1447		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1448		       req->cryptlen, 1);
1449#endif
1450
1451	/* Create and submit job descriptor*/
1452	init_aead_job(ctx->sh_desc_dec,
1453		      ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
1454#ifdef DEBUG
1455	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1456		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1457		       desc_bytes(edesc->hw_desc), 1);
1458#endif
1459
1460	desc = edesc->hw_desc;
1461	ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
1462	if (!ret) {
1463		ret = -EINPROGRESS;
1464	} else {
1465		aead_unmap(jrdev, edesc, req);
1466		kfree(edesc);
1467	}
1468
1469	return ret;
1470}
1471
1472/*
1473 * allocate and map the aead extended descriptor for aead givencrypt
1474 */
1475static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
1476					       *greq, int desc_bytes,
1477					       u32 *contig_ptr)
1478{
1479	struct aead_request *req = &greq->areq;
1480	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1481	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1482	struct device *jrdev = ctx->jrdev;
1483	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1484		       CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
1485	int assoc_nents, src_nents, dst_nents = 0;
1486	struct aead_edesc *edesc;
1487	dma_addr_t iv_dma = 0;
1488	int sgc;
1489	u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
1490	int ivsize = crypto_aead_ivsize(aead);
1491	bool assoc_chained = false, src_chained = false, dst_chained = false;
1492	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
1493
1494	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
1495	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
1496
1497	if (unlikely(req->dst != req->src))
1498		dst_nents = sg_count(req->dst, req->cryptlen + ctx->authsize,
1499				     &dst_chained);
1500
1501	sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1,
1502				 DMA_TO_DEVICE, assoc_chained);
1503	if (likely(req->src == req->dst)) {
1504		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1505					 DMA_BIDIRECTIONAL, src_chained);
1506	} else {
1507		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1508					 DMA_TO_DEVICE, src_chained);
1509		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1510					 DMA_FROM_DEVICE, dst_chained);
1511	}
1512
1513	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
1514	if (dma_mapping_error(jrdev, iv_dma)) {
1515		dev_err(jrdev, "unable to map IV\n");
1516		return ERR_PTR(-ENOMEM);
1517	}
1518
1519	/* Check if data are contiguous */
1520	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
1521	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
1522		contig &= ~GIV_SRC_CONTIG;
1523	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
1524		contig &= ~GIV_DST_CONTIG;
1525	if (unlikely(req->src != req->dst)) {
1526		dst_nents = dst_nents ? : 1;
1527		sec4_sg_len += 1;
1528	}
1529	if (!(contig & GIV_SRC_CONTIG)) {
1530		assoc_nents = assoc_nents ? : 1;
1531		src_nents = src_nents ? : 1;
1532		sec4_sg_len += assoc_nents + 1 + src_nents;
1533		if (likely(req->src == req->dst))
1534			contig &= ~GIV_DST_CONTIG;
1535	}
1536	sec4_sg_len += dst_nents;
1537
1538	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
1539
1540	/* allocate space for base edesc and hw desc commands, link tables */
1541	edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
1542			sec4_sg_bytes, GFP_DMA | flags);
1543	if (!edesc) {
1544		dev_err(jrdev, "could not allocate extended descriptor\n");
1545		return ERR_PTR(-ENOMEM);
1546	}
1547
1548	edesc->assoc_nents = assoc_nents;
1549	edesc->assoc_chained = assoc_chained;
1550	edesc->src_nents = src_nents;
1551	edesc->src_chained = src_chained;
1552	edesc->dst_nents = dst_nents;
1553	edesc->dst_chained = dst_chained;
1554	edesc->iv_dma = iv_dma;
1555	edesc->sec4_sg_bytes = sec4_sg_bytes;
1556	edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
1557			 desc_bytes;
1558	*contig_ptr = contig;
1559
1560	sec4_sg_index = 0;
1561	if (!(contig & GIV_SRC_CONTIG)) {
1562		sg_to_sec4_sg(req->assoc, assoc_nents,
1563			      edesc->sec4_sg +
1564			      sec4_sg_index, 0);
1565		sec4_sg_index += assoc_nents;
1566		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1567				   iv_dma, ivsize, 0);
1568		sec4_sg_index += 1;
1569		sg_to_sec4_sg_last(req->src, src_nents,
1570				   edesc->sec4_sg +
1571				   sec4_sg_index, 0);
1572		sec4_sg_index += src_nents;
1573	}
1574	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
1575		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
1576				   iv_dma, ivsize, 0);
1577		sec4_sg_index += 1;
1578		sg_to_sec4_sg_last(req->dst, dst_nents,
1579				   edesc->sec4_sg + sec4_sg_index, 0);
1580	}
1581	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1582					    sec4_sg_bytes, DMA_TO_DEVICE);
1583	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1584		dev_err(jrdev, "unable to map S/G table\n");
1585		return ERR_PTR(-ENOMEM);
1586	}
1587
1588	return edesc;
1589}
1590
1591static int aead_givencrypt(struct aead_givcrypt_request *areq)
1592{
1593	struct aead_request *req = &areq->areq;
1594	struct aead_edesc *edesc;
1595	struct crypto_aead *aead = crypto_aead_reqtfm(req);
1596	struct caam_ctx *ctx = crypto_aead_ctx(aead);
1597	struct device *jrdev = ctx->jrdev;
1598	u32 contig;
1599	u32 *desc;
1600	int ret = 0;
1601
1602	/* allocate extended descriptor */
1603	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
1604				     CAAM_CMD_SZ, &contig);
1605
1606	if (IS_ERR(edesc))
1607		return PTR_ERR(edesc);
1608
1609#ifdef DEBUG
1610	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
1611		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
1612		       req->cryptlen, 1);
1613#endif
1614
1615	/* Create and submit job descriptor*/
1616	init_aead_giv_job(ctx->sh_desc_givenc,
1617			  ctx->sh_desc_givenc_dma, edesc, req, contig);
1618#ifdef DEBUG
1619	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
1620		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1621		       desc_bytes(edesc->hw_desc), 1);
1622#endif
1623
1624	desc = edesc->hw_desc;
1625	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
1626	if (!ret) {
1627		ret = -EINPROGRESS;
1628	} else {
1629		aead_unmap(jrdev, edesc, req);
1630		kfree(edesc);
1631	}
1632
1633	return ret;
1634}
1635
1636static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
1637{
1638	return aead_encrypt(&areq->areq);
1639}
1640
1641/*
1642 * allocate and map the ablkcipher extended descriptor for ablkcipher
1643 */
1644static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
1645						       *req, int desc_bytes,
1646						       bool *iv_contig_out)
1647{
1648	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1649	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1650	struct device *jrdev = ctx->jrdev;
1651	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
1652					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
1653		       GFP_KERNEL : GFP_ATOMIC;
1654	int src_nents, dst_nents = 0, sec4_sg_bytes;
1655	struct ablkcipher_edesc *edesc;
1656	dma_addr_t iv_dma = 0;
1657	bool iv_contig = false;
1658	int sgc;
1659	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1660	bool src_chained = false, dst_chained = false;
1661	int sec4_sg_index;
1662
1663	src_nents = sg_count(req->src, req->nbytes, &src_chained);
1664
1665	if (req->dst != req->src)
1666		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
1667
1668	if (likely(req->src == req->dst)) {
1669		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1670					 DMA_BIDIRECTIONAL, src_chained);
1671	} else {
1672		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
1673					 DMA_TO_DEVICE, src_chained);
1674		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
1675					 DMA_FROM_DEVICE, dst_chained);
1676	}
1677
1678	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
1679	if (dma_mapping_error(jrdev, iv_dma)) {
1680		dev_err(jrdev, "unable to map IV\n");
1681		return ERR_PTR(-ENOMEM);
1682	}
1683
1684	/*
1685	 * Check if iv can be contiguous with source and destination.
1686	 * If so, include it. If not, create scatterlist.
1687	 */
1688	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
1689		iv_contig = true;
1690	else
1691		src_nents = src_nents ? : 1;
1692	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
1693			sizeof(struct sec4_sg_entry);
1694
1695	/* allocate space for base edesc and hw desc commands, link tables */
1696	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
1697			sec4_sg_bytes, GFP_DMA | flags);
1698	if (!edesc) {
1699		dev_err(jrdev, "could not allocate extended descriptor\n");
1700		return ERR_PTR(-ENOMEM);
1701	}
1702
1703	edesc->src_nents = src_nents;
1704	edesc->src_chained = src_chained;
1705	edesc->dst_nents = dst_nents;
1706	edesc->dst_chained = dst_chained;
1707	edesc->sec4_sg_bytes = sec4_sg_bytes;
1708	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
1709			 desc_bytes;
1710
1711	sec4_sg_index = 0;
1712	if (!iv_contig) {
1713		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
1714		sg_to_sec4_sg_last(req->src, src_nents,
1715				   edesc->sec4_sg + 1, 0);
1716		sec4_sg_index += 1 + src_nents;
1717	}
1718
1719	if (dst_nents) {
1720		sg_to_sec4_sg_last(req->dst, dst_nents,
1721			edesc->sec4_sg + sec4_sg_index, 0);
1722	}
1723
1724	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
1725					    sec4_sg_bytes, DMA_TO_DEVICE);
1726	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
1727		dev_err(jrdev, "unable to map S/G table\n");
1728		return ERR_PTR(-ENOMEM);
1729	}
1730
1731	edesc->iv_dma = iv_dma;
1732
1733#ifdef DEBUG
1734	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
1735		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
1736		       sec4_sg_bytes, 1);
1737#endif
1738
1739	*iv_contig_out = iv_contig;
1740	return edesc;
1741}
1742
1743static int ablkcipher_encrypt(struct ablkcipher_request *req)
1744{
1745	struct ablkcipher_edesc *edesc;
1746	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1747	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1748	struct device *jrdev = ctx->jrdev;
1749	bool iv_contig;
1750	u32 *desc;
1751	int ret = 0;
1752
1753	/* allocate extended descriptor */
1754	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1755				       CAAM_CMD_SZ, &iv_contig);
1756	if (IS_ERR(edesc))
1757		return PTR_ERR(edesc);
1758
1759	/* Create and submit job descriptor*/
1760	init_ablkcipher_job(ctx->sh_desc_enc,
1761		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
1762#ifdef DEBUG
1763	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1764		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1765		       desc_bytes(edesc->hw_desc), 1);
1766#endif
1767	desc = edesc->hw_desc;
1768	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
1769
1770	if (!ret) {
1771		ret = -EINPROGRESS;
1772	} else {
1773		ablkcipher_unmap(jrdev, edesc, req);
1774		kfree(edesc);
1775	}
1776
1777	return ret;
1778}
1779
1780static int ablkcipher_decrypt(struct ablkcipher_request *req)
1781{
1782	struct ablkcipher_edesc *edesc;
1783	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1784	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1785	struct device *jrdev = ctx->jrdev;
1786	bool iv_contig;
1787	u32 *desc;
1788	int ret = 0;
1789
1790	/* allocate extended descriptor */
1791	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
1792				       CAAM_CMD_SZ, &iv_contig);
1793	if (IS_ERR(edesc))
1794		return PTR_ERR(edesc);
1795
1796	/* Create and submit job descriptor*/
1797	init_ablkcipher_job(ctx->sh_desc_dec,
1798		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
1799	desc = edesc->hw_desc;
1800#ifdef DEBUG
1801	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
1802		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
1803		       desc_bytes(edesc->hw_desc), 1);
1804#endif
1805
1806	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
1807	if (!ret) {
1808		ret = -EINPROGRESS;
1809	} else {
1810		ablkcipher_unmap(jrdev, edesc, req);
1811		kfree(edesc);
1812	}
1813
1814	return ret;
1815}
1816
1817#define template_aead		template_u.aead
1818#define template_ablkcipher	template_u.ablkcipher
1819struct caam_alg_template {
1820	char name[CRYPTO_MAX_ALG_NAME];
1821	char driver_name[CRYPTO_MAX_ALG_NAME];
1822	unsigned int blocksize;
1823	u32 type;
1824	union {
1825		struct ablkcipher_alg ablkcipher;
1826		struct aead_alg aead;
1827		struct blkcipher_alg blkcipher;
1828		struct cipher_alg cipher;
1829		struct compress_alg compress;
1830		struct rng_alg rng;
1831	} template_u;
1832	u32 class1_alg_type;
1833	u32 class2_alg_type;
1834	u32 alg_op;
1835};
1836
1837static struct caam_alg_template driver_algs[] = {
1838	/* single-pass ipsec_esp descriptor */
1839	{
1840		.name = "authenc(hmac(md5),ecb(cipher_null))",
1841		.driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
1842		.blocksize = NULL_BLOCK_SIZE,
1843		.type = CRYPTO_ALG_TYPE_AEAD,
1844		.template_aead = {
1845			.setkey = aead_setkey,
1846			.setauthsize = aead_setauthsize,
1847			.encrypt = aead_encrypt,
1848			.decrypt = aead_decrypt,
1849			.givencrypt = aead_null_givencrypt,
1850			.geniv = "<built-in>",
1851			.ivsize = NULL_IV_SIZE,
1852			.maxauthsize = MD5_DIGEST_SIZE,
1853			},
1854		.class1_alg_type = 0,
1855		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1856		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1857	},
1858	{
1859		.name = "authenc(hmac(sha1),ecb(cipher_null))",
1860		.driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
1861		.blocksize = NULL_BLOCK_SIZE,
1862		.type = CRYPTO_ALG_TYPE_AEAD,
1863		.template_aead = {
1864			.setkey = aead_setkey,
1865			.setauthsize = aead_setauthsize,
1866			.encrypt = aead_encrypt,
1867			.decrypt = aead_decrypt,
1868			.givencrypt = aead_null_givencrypt,
1869			.geniv = "<built-in>",
1870			.ivsize = NULL_IV_SIZE,
1871			.maxauthsize = SHA1_DIGEST_SIZE,
1872			},
1873		.class1_alg_type = 0,
1874		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1875		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1876	},
1877	{
1878		.name = "authenc(hmac(sha224),ecb(cipher_null))",
1879		.driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
1880		.blocksize = NULL_BLOCK_SIZE,
1881		.type = CRYPTO_ALG_TYPE_AEAD,
1882		.template_aead = {
1883			.setkey = aead_setkey,
1884			.setauthsize = aead_setauthsize,
1885			.encrypt = aead_encrypt,
1886			.decrypt = aead_decrypt,
1887			.givencrypt = aead_null_givencrypt,
1888			.geniv = "<built-in>",
1889			.ivsize = NULL_IV_SIZE,
1890			.maxauthsize = SHA224_DIGEST_SIZE,
1891			},
1892		.class1_alg_type = 0,
1893		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1894				   OP_ALG_AAI_HMAC_PRECOMP,
1895		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
1896	},
1897	{
1898		.name = "authenc(hmac(sha256),ecb(cipher_null))",
1899		.driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
1900		.blocksize = NULL_BLOCK_SIZE,
1901		.type = CRYPTO_ALG_TYPE_AEAD,
1902		.template_aead = {
1903			.setkey = aead_setkey,
1904			.setauthsize = aead_setauthsize,
1905			.encrypt = aead_encrypt,
1906			.decrypt = aead_decrypt,
1907			.givencrypt = aead_null_givencrypt,
1908			.geniv = "<built-in>",
1909			.ivsize = NULL_IV_SIZE,
1910			.maxauthsize = SHA256_DIGEST_SIZE,
1911			},
1912		.class1_alg_type = 0,
1913		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
1914				   OP_ALG_AAI_HMAC_PRECOMP,
1915		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
1916	},
1917	{
1918		.name = "authenc(hmac(sha384),ecb(cipher_null))",
1919		.driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
1920		.blocksize = NULL_BLOCK_SIZE,
1921		.type = CRYPTO_ALG_TYPE_AEAD,
1922		.template_aead = {
1923			.setkey = aead_setkey,
1924			.setauthsize = aead_setauthsize,
1925			.encrypt = aead_encrypt,
1926			.decrypt = aead_decrypt,
1927			.givencrypt = aead_null_givencrypt,
1928			.geniv = "<built-in>",
1929			.ivsize = NULL_IV_SIZE,
1930			.maxauthsize = SHA384_DIGEST_SIZE,
1931			},
1932		.class1_alg_type = 0,
1933		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
1934				   OP_ALG_AAI_HMAC_PRECOMP,
1935		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
1936	},
1937	{
1938		.name = "authenc(hmac(sha512),ecb(cipher_null))",
1939		.driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
1940		.blocksize = NULL_BLOCK_SIZE,
1941		.type = CRYPTO_ALG_TYPE_AEAD,
1942		.template_aead = {
1943			.setkey = aead_setkey,
1944			.setauthsize = aead_setauthsize,
1945			.encrypt = aead_encrypt,
1946			.decrypt = aead_decrypt,
1947			.givencrypt = aead_null_givencrypt,
1948			.geniv = "<built-in>",
1949			.ivsize = NULL_IV_SIZE,
1950			.maxauthsize = SHA512_DIGEST_SIZE,
1951			},
1952		.class1_alg_type = 0,
1953		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
1954				   OP_ALG_AAI_HMAC_PRECOMP,
1955		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
1956	},
1957	{
1958		.name = "authenc(hmac(md5),cbc(aes))",
1959		.driver_name = "authenc-hmac-md5-cbc-aes-caam",
1960		.blocksize = AES_BLOCK_SIZE,
1961		.type = CRYPTO_ALG_TYPE_AEAD,
1962		.template_aead = {
1963			.setkey = aead_setkey,
1964			.setauthsize = aead_setauthsize,
1965			.encrypt = aead_encrypt,
1966			.decrypt = aead_decrypt,
1967			.givencrypt = aead_givencrypt,
1968			.geniv = "<built-in>",
1969			.ivsize = AES_BLOCK_SIZE,
1970			.maxauthsize = MD5_DIGEST_SIZE,
1971			},
1972		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1973		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
1974		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
1975	},
1976	{
1977		.name = "authenc(hmac(sha1),cbc(aes))",
1978		.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
1979		.blocksize = AES_BLOCK_SIZE,
1980		.type = CRYPTO_ALG_TYPE_AEAD,
1981		.template_aead = {
1982			.setkey = aead_setkey,
1983			.setauthsize = aead_setauthsize,
1984			.encrypt = aead_encrypt,
1985			.decrypt = aead_decrypt,
1986			.givencrypt = aead_givencrypt,
1987			.geniv = "<built-in>",
1988			.ivsize = AES_BLOCK_SIZE,
1989			.maxauthsize = SHA1_DIGEST_SIZE,
1990			},
1991		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1992		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
1993		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
1994	},
1995	{
1996		.name = "authenc(hmac(sha224),cbc(aes))",
1997		.driver_name = "authenc-hmac-sha224-cbc-aes-caam",
1998		.blocksize = AES_BLOCK_SIZE,
1999		.type = CRYPTO_ALG_TYPE_AEAD,
2000		.template_aead = {
2001			.setkey = aead_setkey,
2002			.setauthsize = aead_setauthsize,
2003			.encrypt = aead_encrypt,
2004			.decrypt = aead_decrypt,
2005			.givencrypt = aead_givencrypt,
2006			.geniv = "<built-in>",
2007			.ivsize = AES_BLOCK_SIZE,
2008			.maxauthsize = SHA224_DIGEST_SIZE,
2009			},
2010		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2011		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2012				   OP_ALG_AAI_HMAC_PRECOMP,
2013		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2014	},
2015	{
2016		.name = "authenc(hmac(sha256),cbc(aes))",
2017		.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
2018		.blocksize = AES_BLOCK_SIZE,
2019		.type = CRYPTO_ALG_TYPE_AEAD,
2020		.template_aead = {
2021			.setkey = aead_setkey,
2022			.setauthsize = aead_setauthsize,
2023			.encrypt = aead_encrypt,
2024			.decrypt = aead_decrypt,
2025			.givencrypt = aead_givencrypt,
2026			.geniv = "<built-in>",
2027			.ivsize = AES_BLOCK_SIZE,
2028			.maxauthsize = SHA256_DIGEST_SIZE,
2029			},
2030		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2031		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2032				   OP_ALG_AAI_HMAC_PRECOMP,
2033		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2034	},
2035	{
2036		.name = "authenc(hmac(sha384),cbc(aes))",
2037		.driver_name = "authenc-hmac-sha384-cbc-aes-caam",
2038		.blocksize = AES_BLOCK_SIZE,
2039		.type = CRYPTO_ALG_TYPE_AEAD,
2040		.template_aead = {
2041			.setkey = aead_setkey,
2042			.setauthsize = aead_setauthsize,
2043			.encrypt = aead_encrypt,
2044			.decrypt = aead_decrypt,
2045			.givencrypt = aead_givencrypt,
2046			.geniv = "<built-in>",
2047			.ivsize = AES_BLOCK_SIZE,
2048			.maxauthsize = SHA384_DIGEST_SIZE,
2049			},
2050		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2051		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2052				   OP_ALG_AAI_HMAC_PRECOMP,
2053		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2054	},
2055
2056	{
2057		.name = "authenc(hmac(sha512),cbc(aes))",
2058		.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
2059		.blocksize = AES_BLOCK_SIZE,
2060		.type = CRYPTO_ALG_TYPE_AEAD,
2061		.template_aead = {
2062			.setkey = aead_setkey,
2063			.setauthsize = aead_setauthsize,
2064			.encrypt = aead_encrypt,
2065			.decrypt = aead_decrypt,
2066			.givencrypt = aead_givencrypt,
2067			.geniv = "<built-in>",
2068			.ivsize = AES_BLOCK_SIZE,
2069			.maxauthsize = SHA512_DIGEST_SIZE,
2070			},
2071		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2072		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2073				   OP_ALG_AAI_HMAC_PRECOMP,
2074		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2075	},
2076	{
2077		.name = "authenc(hmac(md5),cbc(des3_ede))",
2078		.driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
2079		.blocksize = DES3_EDE_BLOCK_SIZE,
2080		.type = CRYPTO_ALG_TYPE_AEAD,
2081		.template_aead = {
2082			.setkey = aead_setkey,
2083			.setauthsize = aead_setauthsize,
2084			.encrypt = aead_encrypt,
2085			.decrypt = aead_decrypt,
2086			.givencrypt = aead_givencrypt,
2087			.geniv = "<built-in>",
2088			.ivsize = DES3_EDE_BLOCK_SIZE,
2089			.maxauthsize = MD5_DIGEST_SIZE,
2090			},
2091		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2092		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2093		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2094	},
2095	{
2096		.name = "authenc(hmac(sha1),cbc(des3_ede))",
2097		.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
2098		.blocksize = DES3_EDE_BLOCK_SIZE,
2099		.type = CRYPTO_ALG_TYPE_AEAD,
2100		.template_aead = {
2101			.setkey = aead_setkey,
2102			.setauthsize = aead_setauthsize,
2103			.encrypt = aead_encrypt,
2104			.decrypt = aead_decrypt,
2105			.givencrypt = aead_givencrypt,
2106			.geniv = "<built-in>",
2107			.ivsize = DES3_EDE_BLOCK_SIZE,
2108			.maxauthsize = SHA1_DIGEST_SIZE,
2109			},
2110		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2111		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2112		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2113	},
2114	{
2115		.name = "authenc(hmac(sha224),cbc(des3_ede))",
2116		.driver_name = "authenc-hmac-sha224-cbc-des3_ede-caam",
2117		.blocksize = DES3_EDE_BLOCK_SIZE,
2118		.type = CRYPTO_ALG_TYPE_AEAD,
2119		.template_aead = {
2120			.setkey = aead_setkey,
2121			.setauthsize = aead_setauthsize,
2122			.encrypt = aead_encrypt,
2123			.decrypt = aead_decrypt,
2124			.givencrypt = aead_givencrypt,
2125			.geniv = "<built-in>",
2126			.ivsize = DES3_EDE_BLOCK_SIZE,
2127			.maxauthsize = SHA224_DIGEST_SIZE,
2128			},
2129		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2130		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2131				   OP_ALG_AAI_HMAC_PRECOMP,
2132		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2133	},
2134	{
2135		.name = "authenc(hmac(sha256),cbc(des3_ede))",
2136		.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
2137		.blocksize = DES3_EDE_BLOCK_SIZE,
2138		.type = CRYPTO_ALG_TYPE_AEAD,
2139		.template_aead = {
2140			.setkey = aead_setkey,
2141			.setauthsize = aead_setauthsize,
2142			.encrypt = aead_encrypt,
2143			.decrypt = aead_decrypt,
2144			.givencrypt = aead_givencrypt,
2145			.geniv = "<built-in>",
2146			.ivsize = DES3_EDE_BLOCK_SIZE,
2147			.maxauthsize = SHA256_DIGEST_SIZE,
2148			},
2149		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2150		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2151				   OP_ALG_AAI_HMAC_PRECOMP,
2152		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2153	},
2154	{
2155		.name = "authenc(hmac(sha384),cbc(des3_ede))",
2156		.driver_name = "authenc-hmac-sha384-cbc-des3_ede-caam",
2157		.blocksize = DES3_EDE_BLOCK_SIZE,
2158		.type = CRYPTO_ALG_TYPE_AEAD,
2159		.template_aead = {
2160			.setkey = aead_setkey,
2161			.setauthsize = aead_setauthsize,
2162			.encrypt = aead_encrypt,
2163			.decrypt = aead_decrypt,
2164			.givencrypt = aead_givencrypt,
2165			.geniv = "<built-in>",
2166			.ivsize = DES3_EDE_BLOCK_SIZE,
2167			.maxauthsize = SHA384_DIGEST_SIZE,
2168			},
2169		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2170		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2171				   OP_ALG_AAI_HMAC_PRECOMP,
2172		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2173	},
2174	{
2175		.name = "authenc(hmac(sha512),cbc(des3_ede))",
2176		.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
2177		.blocksize = DES3_EDE_BLOCK_SIZE,
2178		.type = CRYPTO_ALG_TYPE_AEAD,
2179		.template_aead = {
2180			.setkey = aead_setkey,
2181			.setauthsize = aead_setauthsize,
2182			.encrypt = aead_encrypt,
2183			.decrypt = aead_decrypt,
2184			.givencrypt = aead_givencrypt,
2185			.geniv = "<built-in>",
2186			.ivsize = DES3_EDE_BLOCK_SIZE,
2187			.maxauthsize = SHA512_DIGEST_SIZE,
2188			},
2189		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2190		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2191				   OP_ALG_AAI_HMAC_PRECOMP,
2192		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2193	},
2194	{
2195		.name = "authenc(hmac(md5),cbc(des))",
2196		.driver_name = "authenc-hmac-md5-cbc-des-caam",
2197		.blocksize = DES_BLOCK_SIZE,
2198		.type = CRYPTO_ALG_TYPE_AEAD,
2199		.template_aead = {
2200			.setkey = aead_setkey,
2201			.setauthsize = aead_setauthsize,
2202			.encrypt = aead_encrypt,
2203			.decrypt = aead_decrypt,
2204			.givencrypt = aead_givencrypt,
2205			.geniv = "<built-in>",
2206			.ivsize = DES_BLOCK_SIZE,
2207			.maxauthsize = MD5_DIGEST_SIZE,
2208			},
2209		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2210		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
2211		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
2212	},
2213	{
2214		.name = "authenc(hmac(sha1),cbc(des))",
2215		.driver_name = "authenc-hmac-sha1-cbc-des-caam",
2216		.blocksize = DES_BLOCK_SIZE,
2217		.type = CRYPTO_ALG_TYPE_AEAD,
2218		.template_aead = {
2219			.setkey = aead_setkey,
2220			.setauthsize = aead_setauthsize,
2221			.encrypt = aead_encrypt,
2222			.decrypt = aead_decrypt,
2223			.givencrypt = aead_givencrypt,
2224			.geniv = "<built-in>",
2225			.ivsize = DES_BLOCK_SIZE,
2226			.maxauthsize = SHA1_DIGEST_SIZE,
2227			},
2228		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2229		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
2230		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
2231	},
2232	{
2233		.name = "authenc(hmac(sha224),cbc(des))",
2234		.driver_name = "authenc-hmac-sha224-cbc-des-caam",
2235		.blocksize = DES_BLOCK_SIZE,
2236		.type = CRYPTO_ALG_TYPE_AEAD,
2237		.template_aead = {
2238			.setkey = aead_setkey,
2239			.setauthsize = aead_setauthsize,
2240			.encrypt = aead_encrypt,
2241			.decrypt = aead_decrypt,
2242			.givencrypt = aead_givencrypt,
2243			.geniv = "<built-in>",
2244			.ivsize = DES_BLOCK_SIZE,
2245			.maxauthsize = SHA224_DIGEST_SIZE,
2246			},
2247		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2248		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2249				   OP_ALG_AAI_HMAC_PRECOMP,
2250		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
2251	},
2252	{
2253		.name = "authenc(hmac(sha256),cbc(des))",
2254		.driver_name = "authenc-hmac-sha256-cbc-des-caam",
2255		.blocksize = DES_BLOCK_SIZE,
2256		.type = CRYPTO_ALG_TYPE_AEAD,
2257		.template_aead = {
2258			.setkey = aead_setkey,
2259			.setauthsize = aead_setauthsize,
2260			.encrypt = aead_encrypt,
2261			.decrypt = aead_decrypt,
2262			.givencrypt = aead_givencrypt,
2263			.geniv = "<built-in>",
2264			.ivsize = DES_BLOCK_SIZE,
2265			.maxauthsize = SHA256_DIGEST_SIZE,
2266			},
2267		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2268		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2269				   OP_ALG_AAI_HMAC_PRECOMP,
2270		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
2271	},
2272	{
2273		.name = "authenc(hmac(sha384),cbc(des))",
2274		.driver_name = "authenc-hmac-sha384-cbc-des-caam",
2275		.blocksize = DES_BLOCK_SIZE,
2276		.type = CRYPTO_ALG_TYPE_AEAD,
2277		.template_aead = {
2278			.setkey = aead_setkey,
2279			.setauthsize = aead_setauthsize,
2280			.encrypt = aead_encrypt,
2281			.decrypt = aead_decrypt,
2282			.givencrypt = aead_givencrypt,
2283			.geniv = "<built-in>",
2284			.ivsize = DES_BLOCK_SIZE,
2285			.maxauthsize = SHA384_DIGEST_SIZE,
2286			},
2287		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2288		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2289				   OP_ALG_AAI_HMAC_PRECOMP,
2290		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
2291	},
2292	{
2293		.name = "authenc(hmac(sha512),cbc(des))",
2294		.driver_name = "authenc-hmac-sha512-cbc-des-caam",
2295		.blocksize = DES_BLOCK_SIZE,
2296		.type = CRYPTO_ALG_TYPE_AEAD,
2297		.template_aead = {
2298			.setkey = aead_setkey,
2299			.setauthsize = aead_setauthsize,
2300			.encrypt = aead_encrypt,
2301			.decrypt = aead_decrypt,
2302			.givencrypt = aead_givencrypt,
2303			.geniv = "<built-in>",
2304			.ivsize = DES_BLOCK_SIZE,
2305			.maxauthsize = SHA512_DIGEST_SIZE,
2306			},
2307		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2308		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2309				   OP_ALG_AAI_HMAC_PRECOMP,
2310		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
2311	},
2312	/* ablkcipher descriptor */
2313	{
2314		.name = "cbc(aes)",
2315		.driver_name = "cbc-aes-caam",
2316		.blocksize = AES_BLOCK_SIZE,
2317		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2318		.template_ablkcipher = {
2319			.setkey = ablkcipher_setkey,
2320			.encrypt = ablkcipher_encrypt,
2321			.decrypt = ablkcipher_decrypt,
2322			.geniv = "eseqiv",
2323			.min_keysize = AES_MIN_KEY_SIZE,
2324			.max_keysize = AES_MAX_KEY_SIZE,
2325			.ivsize = AES_BLOCK_SIZE,
2326			},
2327		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2328	},
2329	{
2330		.name = "cbc(des3_ede)",
2331		.driver_name = "cbc-3des-caam",
2332		.blocksize = DES3_EDE_BLOCK_SIZE,
2333		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2334		.template_ablkcipher = {
2335			.setkey = ablkcipher_setkey,
2336			.encrypt = ablkcipher_encrypt,
2337			.decrypt = ablkcipher_decrypt,
2338			.geniv = "eseqiv",
2339			.min_keysize = DES3_EDE_KEY_SIZE,
2340			.max_keysize = DES3_EDE_KEY_SIZE,
2341			.ivsize = DES3_EDE_BLOCK_SIZE,
2342			},
2343		.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2344	},
2345	{
2346		.name = "cbc(des)",
2347		.driver_name = "cbc-des-caam",
2348		.blocksize = DES_BLOCK_SIZE,
2349		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2350		.template_ablkcipher = {
2351			.setkey = ablkcipher_setkey,
2352			.encrypt = ablkcipher_encrypt,
2353			.decrypt = ablkcipher_decrypt,
2354			.geniv = "eseqiv",
2355			.min_keysize = DES_KEY_SIZE,
2356			.max_keysize = DES_KEY_SIZE,
2357			.ivsize = DES_BLOCK_SIZE,
2358			},
2359		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2360	}
2361};
2362
2363struct caam_crypto_alg {
2364	struct list_head entry;
2365	int class1_alg_type;
2366	int class2_alg_type;
2367	int alg_op;
2368	struct crypto_alg crypto_alg;
2369};
2370
2371static int caam_cra_init(struct crypto_tfm *tfm)
2372{
2373	struct crypto_alg *alg = tfm->__crt_alg;
2374	struct caam_crypto_alg *caam_alg =
2375		 container_of(alg, struct caam_crypto_alg, crypto_alg);
2376	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2377
2378	ctx->jrdev = caam_jr_alloc();
2379	if (IS_ERR(ctx->jrdev)) {
2380		pr_err("Job Ring Device allocation for transform failed\n");
2381		return PTR_ERR(ctx->jrdev);
2382	}
2383
2384	/* copy descriptor header template value */
2385	ctx->class1_alg_type = OP_TYPE_CLASS1_ALG | caam_alg->class1_alg_type;
2386	ctx->class2_alg_type = OP_TYPE_CLASS2_ALG | caam_alg->class2_alg_type;
2387	ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_alg->alg_op;
2388
2389	return 0;
2390}
2391
2392static void caam_cra_exit(struct crypto_tfm *tfm)
2393{
2394	struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
2395
2396	if (ctx->sh_desc_enc_dma &&
2397	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
2398		dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
2399				 desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
2400	if (ctx->sh_desc_dec_dma &&
2401	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
2402		dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
2403				 desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
2404	if (ctx->sh_desc_givenc_dma &&
2405	    !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
2406		dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
2407				 desc_bytes(ctx->sh_desc_givenc),
2408				 DMA_TO_DEVICE);
2409	if (ctx->key_dma &&
2410	    !dma_mapping_error(ctx->jrdev, ctx->key_dma))
2411		dma_unmap_single(ctx->jrdev, ctx->key_dma,
2412				 ctx->enckeylen + ctx->split_key_pad_len,
2413				 DMA_TO_DEVICE);
2414
2415	caam_jr_free(ctx->jrdev);
2416}
2417
2418static void __exit caam_algapi_exit(void)
2419{
2420
2421	struct caam_crypto_alg *t_alg, *n;
2422
2423	if (!alg_list.next)
2424		return;
2425
2426	list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
2427		crypto_unregister_alg(&t_alg->crypto_alg);
2428		list_del(&t_alg->entry);
2429		kfree(t_alg);
2430	}
2431}
2432
2433static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
2434					      *template)
2435{
2436	struct caam_crypto_alg *t_alg;
2437	struct crypto_alg *alg;
2438
2439	t_alg = kzalloc(sizeof(struct caam_crypto_alg), GFP_KERNEL);
2440	if (!t_alg) {
2441		pr_err("failed to allocate t_alg\n");
2442		return ERR_PTR(-ENOMEM);
2443	}
2444
2445	alg = &t_alg->crypto_alg;
2446
2447	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
2448	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2449		 template->driver_name);
2450	alg->cra_module = THIS_MODULE;
2451	alg->cra_init = caam_cra_init;
2452	alg->cra_exit = caam_cra_exit;
2453	alg->cra_priority = CAAM_CRA_PRIORITY;
2454	alg->cra_blocksize = template->blocksize;
2455	alg->cra_alignmask = 0;
2456	alg->cra_ctxsize = sizeof(struct caam_ctx);
2457	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2458			 template->type;
2459	switch (template->type) {
2460	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2461		alg->cra_type = &crypto_ablkcipher_type;
2462		alg->cra_ablkcipher = template->template_ablkcipher;
2463		break;
2464	case CRYPTO_ALG_TYPE_AEAD:
2465		alg->cra_type = &crypto_aead_type;
2466		alg->cra_aead = template->template_aead;
2467		break;
2468	}
2469
2470	t_alg->class1_alg_type = template->class1_alg_type;
2471	t_alg->class2_alg_type = template->class2_alg_type;
2472	t_alg->alg_op = template->alg_op;
2473
2474	return t_alg;
2475}
2476
2477static int __init caam_algapi_init(void)
2478{
2479	struct device_node *dev_node;
2480	struct platform_device *pdev;
2481	struct device *ctrldev;
2482	void *priv;
2483	int i = 0, err = 0;
2484
2485	dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
2486	if (!dev_node) {
2487		dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
2488		if (!dev_node)
2489			return -ENODEV;
2490	}
2491
2492	pdev = of_find_device_by_node(dev_node);
2493	if (!pdev) {
2494		of_node_put(dev_node);
2495		return -ENODEV;
2496	}
2497
2498	ctrldev = &pdev->dev;
2499	priv = dev_get_drvdata(ctrldev);
2500	of_node_put(dev_node);
2501
2502	/*
2503	 * If priv is NULL, it's probably because the caam driver wasn't
2504	 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
2505	 */
2506	if (!priv)
2507		return -ENODEV;
2508
2509
2510	INIT_LIST_HEAD(&alg_list);
2511
2512	/* register crypto algorithms the device supports */
2513	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2514		/* TODO: check if h/w supports alg */
2515		struct caam_crypto_alg *t_alg;
2516
2517		t_alg = caam_alg_alloc(&driver_algs[i]);
2518		if (IS_ERR(t_alg)) {
2519			err = PTR_ERR(t_alg);
2520			pr_warn("%s alg allocation failed\n",
2521				driver_algs[i].driver_name);
2522			continue;
2523		}
2524
2525		err = crypto_register_alg(&t_alg->crypto_alg);
2526		if (err) {
2527			pr_warn("%s alg registration failed\n",
2528				t_alg->crypto_alg.cra_driver_name);
2529			kfree(t_alg);
2530		} else
2531			list_add_tail(&t_alg->entry, &alg_list);
2532	}
2533	if (!list_empty(&alg_list))
2534		pr_info("caam algorithms registered in /proc/crypto\n");
2535
2536	return err;
2537}
2538
2539module_init(caam_algapi_init);
2540module_exit(caam_algapi_exit);
2541
2542MODULE_LICENSE("GPL");
2543MODULE_DESCRIPTION("FSL CAAM support for crypto API");
2544MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
2545