[go: nahoru, domu]

1/*
2 * Intel IXP4xx NPE-C crypto driver
3 *
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/platform_device.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmapool.h>
15#include <linux/crypto.h>
16#include <linux/kernel.h>
17#include <linux/rtnetlink.h>
18#include <linux/interrupt.h>
19#include <linux/spinlock.h>
20#include <linux/gfp.h>
21#include <linux/module.h>
22
23#include <crypto/ctr.h>
24#include <crypto/des.h>
25#include <crypto/aes.h>
26#include <crypto/sha.h>
27#include <crypto/algapi.h>
28#include <crypto/aead.h>
29#include <crypto/authenc.h>
30#include <crypto/scatterwalk.h>
31
32#include <mach/npe.h>
33#include <mach/qmgr.h>
34
35#define MAX_KEYLEN 32
36
37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
38#define NPE_CTX_LEN 80
39#define AES_BLOCK128 16
40
41#define NPE_OP_HASH_VERIFY   0x01
42#define NPE_OP_CCM_ENABLE    0x04
43#define NPE_OP_CRYPT_ENABLE  0x08
44#define NPE_OP_HASH_ENABLE   0x10
45#define NPE_OP_NOT_IN_PLACE  0x20
46#define NPE_OP_HMAC_DISABLE  0x40
47#define NPE_OP_CRYPT_ENCRYPT 0x80
48
49#define NPE_OP_CCM_GEN_MIC   0xcc
50#define NPE_OP_HASH_GEN_ICV  0x50
51#define NPE_OP_ENC_GEN_KEY   0xc9
52
53#define MOD_ECB     0x0000
54#define MOD_CTR     0x1000
55#define MOD_CBC_ENC 0x2000
56#define MOD_CBC_DEC 0x3000
57#define MOD_CCM_ENC 0x4000
58#define MOD_CCM_DEC 0x5000
59
60#define KEYLEN_128  4
61#define KEYLEN_192  6
62#define KEYLEN_256  8
63
64#define CIPH_DECR   0x0000
65#define CIPH_ENCR   0x0400
66
67#define MOD_DES     0x0000
68#define MOD_TDEA2   0x0100
69#define MOD_3DES   0x0200
70#define MOD_AES     0x0800
71#define MOD_AES128  (0x0800 | KEYLEN_128)
72#define MOD_AES192  (0x0900 | KEYLEN_192)
73#define MOD_AES256  (0x0a00 | KEYLEN_256)
74
75#define MAX_IVLEN   16
76#define NPE_ID      2  /* NPE C */
77#define NPE_QLEN    16
78/* Space for registering when the first
79 * NPE_QLEN crypt_ctl are busy */
80#define NPE_QLEN_TOTAL 64
81
82#define SEND_QID    29
83#define RECV_QID    30
84
85#define CTL_FLAG_UNUSED		0x0000
86#define CTL_FLAG_USED		0x1000
87#define CTL_FLAG_PERFORM_ABLK	0x0001
88#define CTL_FLAG_GEN_ICV	0x0002
89#define CTL_FLAG_GEN_REVAES	0x0004
90#define CTL_FLAG_PERFORM_AEAD	0x0008
91#define CTL_FLAG_MASK		0x000f
92
93#define HMAC_IPAD_VALUE   0x36
94#define HMAC_OPAD_VALUE   0x5C
95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
96
97#define MD5_DIGEST_SIZE   16
98
99struct buffer_desc {
100	u32 phys_next;
101#ifdef __ARMEB__
102	u16 buf_len;
103	u16 pkt_len;
104#else
105	u16 pkt_len;
106	u16 buf_len;
107#endif
108	u32 phys_addr;
109	u32 __reserved[4];
110	struct buffer_desc *next;
111	enum dma_data_direction dir;
112};
113
114struct crypt_ctl {
115#ifdef __ARMEB__
116	u8 mode;		/* NPE_OP_*  operation mode */
117	u8 init_len;
118	u16 reserved;
119#else
120	u16 reserved;
121	u8 init_len;
122	u8 mode;		/* NPE_OP_*  operation mode */
123#endif
124	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
125	u32 icv_rev_aes;	/* icv or rev aes */
126	u32 src_buf;
127	u32 dst_buf;
128#ifdef __ARMEB__
129	u16 auth_offs;		/* Authentication start offset */
130	u16 auth_len;		/* Authentication data length */
131	u16 crypt_offs;		/* Cryption start offset */
132	u16 crypt_len;		/* Cryption data length */
133#else
134	u16 auth_len;		/* Authentication data length */
135	u16 auth_offs;		/* Authentication start offset */
136	u16 crypt_len;		/* Cryption data length */
137	u16 crypt_offs;		/* Cryption start offset */
138#endif
139	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
140	u32 crypto_ctx;		/* NPE Crypto Param structure address */
141
142	/* Used by Host: 4*4 bytes*/
143	unsigned ctl_flags;
144	union {
145		struct ablkcipher_request *ablk_req;
146		struct aead_request *aead_req;
147		struct crypto_tfm *tfm;
148	} data;
149	struct buffer_desc *regist_buf;
150	u8 *regist_ptr;
151};
152
153struct ablk_ctx {
154	struct buffer_desc *src;
155	struct buffer_desc *dst;
156};
157
158struct aead_ctx {
159	struct buffer_desc *buffer;
160	struct scatterlist ivlist;
161	/* used when the hmac is not on one sg entry */
162	u8 *hmac_virt;
163	int encrypt;
164};
165
166struct ix_hash_algo {
167	u32 cfgword;
168	unsigned char *icv;
169};
170
171struct ix_sa_dir {
172	unsigned char *npe_ctx;
173	dma_addr_t npe_ctx_phys;
174	int npe_ctx_idx;
175	u8 npe_mode;
176};
177
178struct ixp_ctx {
179	struct ix_sa_dir encrypt;
180	struct ix_sa_dir decrypt;
181	int authkey_len;
182	u8 authkey[MAX_KEYLEN];
183	int enckey_len;
184	u8 enckey[MAX_KEYLEN];
185	u8 salt[MAX_IVLEN];
186	u8 nonce[CTR_RFC3686_NONCE_SIZE];
187	unsigned salted;
188	atomic_t configuring;
189	struct completion completion;
190};
191
192struct ixp_alg {
193	struct crypto_alg crypto;
194	const struct ix_hash_algo *hash;
195	u32 cfg_enc;
196	u32 cfg_dec;
197
198	int registered;
199};
200
201static const struct ix_hash_algo hash_alg_md5 = {
202	.cfgword	= 0xAA010004,
203	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
204			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
205};
206static const struct ix_hash_algo hash_alg_sha1 = {
207	.cfgword	= 0x00000005,
208	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
209			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
210};
211
212static struct npe *npe_c;
213static struct dma_pool *buffer_pool = NULL;
214static struct dma_pool *ctx_pool = NULL;
215
216static struct crypt_ctl *crypt_virt = NULL;
217static dma_addr_t crypt_phys;
218
219static int support_aes = 1;
220
221#define DRIVER_NAME "ixp4xx_crypto"
222
223static struct platform_device *pdev;
224
225static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
226{
227	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
228}
229
230static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
231{
232	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
233}
234
235static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
236{
237	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
238}
239
240static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
241{
242	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
243}
244
245static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
246{
247	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
248}
249
250static int setup_crypt_desc(void)
251{
252	struct device *dev = &pdev->dev;
253	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
254	crypt_virt = dma_alloc_coherent(dev,
255			NPE_QLEN * sizeof(struct crypt_ctl),
256			&crypt_phys, GFP_ATOMIC);
257	if (!crypt_virt)
258		return -ENOMEM;
259	memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
260	return 0;
261}
262
263static spinlock_t desc_lock;
264static struct crypt_ctl *get_crypt_desc(void)
265{
266	int i;
267	static int idx = 0;
268	unsigned long flags;
269
270	spin_lock_irqsave(&desc_lock, flags);
271
272	if (unlikely(!crypt_virt))
273		setup_crypt_desc();
274	if (unlikely(!crypt_virt)) {
275		spin_unlock_irqrestore(&desc_lock, flags);
276		return NULL;
277	}
278	i = idx;
279	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
280		if (++idx >= NPE_QLEN)
281			idx = 0;
282		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
283		spin_unlock_irqrestore(&desc_lock, flags);
284		return crypt_virt +i;
285	} else {
286		spin_unlock_irqrestore(&desc_lock, flags);
287		return NULL;
288	}
289}
290
291static spinlock_t emerg_lock;
292static struct crypt_ctl *get_crypt_desc_emerg(void)
293{
294	int i;
295	static int idx = NPE_QLEN;
296	struct crypt_ctl *desc;
297	unsigned long flags;
298
299	desc = get_crypt_desc();
300	if (desc)
301		return desc;
302	if (unlikely(!crypt_virt))
303		return NULL;
304
305	spin_lock_irqsave(&emerg_lock, flags);
306	i = idx;
307	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
308		if (++idx >= NPE_QLEN_TOTAL)
309			idx = NPE_QLEN;
310		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
311		spin_unlock_irqrestore(&emerg_lock, flags);
312		return crypt_virt +i;
313	} else {
314		spin_unlock_irqrestore(&emerg_lock, flags);
315		return NULL;
316	}
317}
318
319static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
320{
321	while (buf) {
322		struct buffer_desc *buf1;
323		u32 phys1;
324
325		buf1 = buf->next;
326		phys1 = buf->phys_next;
327		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
328		dma_pool_free(buffer_pool, buf, phys);
329		buf = buf1;
330		phys = phys1;
331	}
332}
333
334static struct tasklet_struct crypto_done_tasklet;
335
336static void finish_scattered_hmac(struct crypt_ctl *crypt)
337{
338	struct aead_request *req = crypt->data.aead_req;
339	struct aead_ctx *req_ctx = aead_request_ctx(req);
340	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
341	int authsize = crypto_aead_authsize(tfm);
342	int decryptlen = req->cryptlen - authsize;
343
344	if (req_ctx->encrypt) {
345		scatterwalk_map_and_copy(req_ctx->hmac_virt,
346			req->src, decryptlen, authsize, 1);
347	}
348	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
349}
350
351static void one_packet(dma_addr_t phys)
352{
353	struct device *dev = &pdev->dev;
354	struct crypt_ctl *crypt;
355	struct ixp_ctx *ctx;
356	int failed;
357
358	failed = phys & 0x1 ? -EBADMSG : 0;
359	phys &= ~0x3;
360	crypt = crypt_phys2virt(phys);
361
362	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
363	case CTL_FLAG_PERFORM_AEAD: {
364		struct aead_request *req = crypt->data.aead_req;
365		struct aead_ctx *req_ctx = aead_request_ctx(req);
366
367		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
368		if (req_ctx->hmac_virt) {
369			finish_scattered_hmac(crypt);
370		}
371		req->base.complete(&req->base, failed);
372		break;
373	}
374	case CTL_FLAG_PERFORM_ABLK: {
375		struct ablkcipher_request *req = crypt->data.ablk_req;
376		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
377
378		if (req_ctx->dst) {
379			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
380		}
381		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
382		req->base.complete(&req->base, failed);
383		break;
384	}
385	case CTL_FLAG_GEN_ICV:
386		ctx = crypto_tfm_ctx(crypt->data.tfm);
387		dma_pool_free(ctx_pool, crypt->regist_ptr,
388				crypt->regist_buf->phys_addr);
389		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
390		if (atomic_dec_and_test(&ctx->configuring))
391			complete(&ctx->completion);
392		break;
393	case CTL_FLAG_GEN_REVAES:
394		ctx = crypto_tfm_ctx(crypt->data.tfm);
395		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
396		if (atomic_dec_and_test(&ctx->configuring))
397			complete(&ctx->completion);
398		break;
399	default:
400		BUG();
401	}
402	crypt->ctl_flags = CTL_FLAG_UNUSED;
403}
404
405static void irqhandler(void *_unused)
406{
407	tasklet_schedule(&crypto_done_tasklet);
408}
409
410static void crypto_done_action(unsigned long arg)
411{
412	int i;
413
414	for(i=0; i<4; i++) {
415		dma_addr_t phys = qmgr_get_entry(RECV_QID);
416		if (!phys)
417			return;
418		one_packet(phys);
419	}
420	tasklet_schedule(&crypto_done_tasklet);
421}
422
423static int init_ixp_crypto(struct device *dev)
424{
425	int ret = -ENODEV;
426	u32 msg[2] = { 0, 0 };
427
428	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
429				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
430		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
431		return ret;
432	}
433	npe_c = npe_request(NPE_ID);
434	if (!npe_c)
435		return ret;
436
437	if (!npe_running(npe_c)) {
438		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
439		if (ret) {
440			return ret;
441		}
442		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
443			goto npe_error;
444	} else {
445		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
446			goto npe_error;
447
448		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
449			goto npe_error;
450	}
451
452	switch ((msg[1]>>16) & 0xff) {
453	case 3:
454		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
455				npe_name(npe_c));
456		support_aes = 0;
457		break;
458	case 4:
459	case 5:
460		support_aes = 1;
461		break;
462	default:
463		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
464			npe_name(npe_c));
465		return -ENODEV;
466	}
467	/* buffer_pool will also be used to sometimes store the hmac,
468	 * so assure it is large enough
469	 */
470	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
471	buffer_pool = dma_pool_create("buffer", dev,
472			sizeof(struct buffer_desc), 32, 0);
473	ret = -ENOMEM;
474	if (!buffer_pool) {
475		goto err;
476	}
477	ctx_pool = dma_pool_create("context", dev,
478			NPE_CTX_LEN, 16, 0);
479	if (!ctx_pool) {
480		goto err;
481	}
482	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
483				 "ixp_crypto:out", NULL);
484	if (ret)
485		goto err;
486	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
487				 "ixp_crypto:in", NULL);
488	if (ret) {
489		qmgr_release_queue(SEND_QID);
490		goto err;
491	}
492	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
493	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
494
495	qmgr_enable_irq(RECV_QID);
496	return 0;
497
498npe_error:
499	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
500	ret = -EIO;
501err:
502	if (ctx_pool)
503		dma_pool_destroy(ctx_pool);
504	if (buffer_pool)
505		dma_pool_destroy(buffer_pool);
506	npe_release(npe_c);
507	return ret;
508}
509
510static void release_ixp_crypto(struct device *dev)
511{
512	qmgr_disable_irq(RECV_QID);
513	tasklet_kill(&crypto_done_tasklet);
514
515	qmgr_release_queue(SEND_QID);
516	qmgr_release_queue(RECV_QID);
517
518	dma_pool_destroy(ctx_pool);
519	dma_pool_destroy(buffer_pool);
520
521	npe_release(npe_c);
522
523	if (crypt_virt) {
524		dma_free_coherent(dev,
525			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
526			crypt_virt, crypt_phys);
527	}
528	return;
529}
530
531static void reset_sa_dir(struct ix_sa_dir *dir)
532{
533	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
534	dir->npe_ctx_idx = 0;
535	dir->npe_mode = 0;
536}
537
538static int init_sa_dir(struct ix_sa_dir *dir)
539{
540	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
541	if (!dir->npe_ctx) {
542		return -ENOMEM;
543	}
544	reset_sa_dir(dir);
545	return 0;
546}
547
548static void free_sa_dir(struct ix_sa_dir *dir)
549{
550	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
551	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
552}
553
554static int init_tfm(struct crypto_tfm *tfm)
555{
556	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
557	int ret;
558
559	atomic_set(&ctx->configuring, 0);
560	ret = init_sa_dir(&ctx->encrypt);
561	if (ret)
562		return ret;
563	ret = init_sa_dir(&ctx->decrypt);
564	if (ret) {
565		free_sa_dir(&ctx->encrypt);
566	}
567	return ret;
568}
569
570static int init_tfm_ablk(struct crypto_tfm *tfm)
571{
572	tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
573	return init_tfm(tfm);
574}
575
576static int init_tfm_aead(struct crypto_tfm *tfm)
577{
578	tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
579	return init_tfm(tfm);
580}
581
582static void exit_tfm(struct crypto_tfm *tfm)
583{
584	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
585	free_sa_dir(&ctx->encrypt);
586	free_sa_dir(&ctx->decrypt);
587}
588
589static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
590		int init_len, u32 ctx_addr, const u8 *key, int key_len)
591{
592	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
593	struct crypt_ctl *crypt;
594	struct buffer_desc *buf;
595	int i;
596	u8 *pad;
597	u32 pad_phys, buf_phys;
598
599	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
600	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
601	if (!pad)
602		return -ENOMEM;
603	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
604	if (!buf) {
605		dma_pool_free(ctx_pool, pad, pad_phys);
606		return -ENOMEM;
607	}
608	crypt = get_crypt_desc_emerg();
609	if (!crypt) {
610		dma_pool_free(ctx_pool, pad, pad_phys);
611		dma_pool_free(buffer_pool, buf, buf_phys);
612		return -EAGAIN;
613	}
614
615	memcpy(pad, key, key_len);
616	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
617	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
618		pad[i] ^= xpad;
619	}
620
621	crypt->data.tfm = tfm;
622	crypt->regist_ptr = pad;
623	crypt->regist_buf = buf;
624
625	crypt->auth_offs = 0;
626	crypt->auth_len = HMAC_PAD_BLOCKLEN;
627	crypt->crypto_ctx = ctx_addr;
628	crypt->src_buf = buf_phys;
629	crypt->icv_rev_aes = target;
630	crypt->mode = NPE_OP_HASH_GEN_ICV;
631	crypt->init_len = init_len;
632	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
633
634	buf->next = 0;
635	buf->buf_len = HMAC_PAD_BLOCKLEN;
636	buf->pkt_len = 0;
637	buf->phys_addr = pad_phys;
638
639	atomic_inc(&ctx->configuring);
640	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
641	BUG_ON(qmgr_stat_overflow(SEND_QID));
642	return 0;
643}
644
645static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
646		const u8 *key, int key_len, unsigned digest_len)
647{
648	u32 itarget, otarget, npe_ctx_addr;
649	unsigned char *cinfo;
650	int init_len, ret = 0;
651	u32 cfgword;
652	struct ix_sa_dir *dir;
653	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
654	const struct ix_hash_algo *algo;
655
656	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
657	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
658	algo = ix_hash(tfm);
659
660	/* write cfg word to cryptinfo */
661	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
662#ifndef __ARMEB__
663	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
664#endif
665	*(u32*)cinfo = cpu_to_be32(cfgword);
666	cinfo += sizeof(cfgword);
667
668	/* write ICV to cryptinfo */
669	memcpy(cinfo, algo->icv, digest_len);
670	cinfo += digest_len;
671
672	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
673				+ sizeof(algo->cfgword);
674	otarget = itarget + digest_len;
675	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
676	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
677
678	dir->npe_ctx_idx += init_len;
679	dir->npe_mode |= NPE_OP_HASH_ENABLE;
680
681	if (!encrypt)
682		dir->npe_mode |= NPE_OP_HASH_VERIFY;
683
684	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
685			init_len, npe_ctx_addr, key, key_len);
686	if (ret)
687		return ret;
688	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
689			init_len, npe_ctx_addr, key, key_len);
690}
691
692static int gen_rev_aes_key(struct crypto_tfm *tfm)
693{
694	struct crypt_ctl *crypt;
695	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
696	struct ix_sa_dir *dir = &ctx->decrypt;
697
698	crypt = get_crypt_desc_emerg();
699	if (!crypt) {
700		return -EAGAIN;
701	}
702	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
703
704	crypt->data.tfm = tfm;
705	crypt->crypt_offs = 0;
706	crypt->crypt_len = AES_BLOCK128;
707	crypt->src_buf = 0;
708	crypt->crypto_ctx = dir->npe_ctx_phys;
709	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
710	crypt->mode = NPE_OP_ENC_GEN_KEY;
711	crypt->init_len = dir->npe_ctx_idx;
712	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
713
714	atomic_inc(&ctx->configuring);
715	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
716	BUG_ON(qmgr_stat_overflow(SEND_QID));
717	return 0;
718}
719
720static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
721		const u8 *key, int key_len)
722{
723	u8 *cinfo;
724	u32 cipher_cfg;
725	u32 keylen_cfg = 0;
726	struct ix_sa_dir *dir;
727	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
728	u32 *flags = &tfm->crt_flags;
729
730	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
731	cinfo = dir->npe_ctx;
732
733	if (encrypt) {
734		cipher_cfg = cipher_cfg_enc(tfm);
735		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
736	} else {
737		cipher_cfg = cipher_cfg_dec(tfm);
738	}
739	if (cipher_cfg & MOD_AES) {
740		switch (key_len) {
741		case 16: keylen_cfg = MOD_AES128; break;
742		case 24: keylen_cfg = MOD_AES192; break;
743		case 32: keylen_cfg = MOD_AES256; break;
744		default:
745			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
746			return -EINVAL;
747		}
748		cipher_cfg |= keylen_cfg;
749	} else if (cipher_cfg & MOD_3DES) {
750		const u32 *K = (const u32 *)key;
751		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
752			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
753		{
754			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
755			return -EINVAL;
756		}
757	} else {
758		u32 tmp[DES_EXPKEY_WORDS];
759		if (des_ekey(tmp, key) == 0) {
760			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
761		}
762	}
763	/* write cfg word to cryptinfo */
764	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
765	cinfo += sizeof(cipher_cfg);
766
767	/* write cipher key to cryptinfo */
768	memcpy(cinfo, key, key_len);
769	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
770	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
771		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
772		key_len = DES3_EDE_KEY_SIZE;
773	}
774	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
775	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
776	if ((cipher_cfg & MOD_AES) && !encrypt) {
777		return gen_rev_aes_key(tfm);
778	}
779	return 0;
780}
781
782static struct buffer_desc *chainup_buffers(struct device *dev,
783		struct scatterlist *sg,	unsigned nbytes,
784		struct buffer_desc *buf, gfp_t flags,
785		enum dma_data_direction dir)
786{
787	for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
788		unsigned len = min(nbytes, sg->length);
789		struct buffer_desc *next_buf;
790		u32 next_buf_phys;
791		void *ptr;
792
793		nbytes -= len;
794		ptr = page_address(sg_page(sg)) + sg->offset;
795		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
796		if (!next_buf) {
797			buf = NULL;
798			break;
799		}
800		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
801		buf->next = next_buf;
802		buf->phys_next = next_buf_phys;
803		buf = next_buf;
804
805		buf->phys_addr = sg_dma_address(sg);
806		buf->buf_len = len;
807		buf->dir = dir;
808	}
809	buf->next = NULL;
810	buf->phys_next = 0;
811	return buf;
812}
813
814static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
815			unsigned int key_len)
816{
817	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
818	u32 *flags = &tfm->base.crt_flags;
819	int ret;
820
821	init_completion(&ctx->completion);
822	atomic_inc(&ctx->configuring);
823
824	reset_sa_dir(&ctx->encrypt);
825	reset_sa_dir(&ctx->decrypt);
826
827	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
828	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
829
830	ret = setup_cipher(&tfm->base, 0, key, key_len);
831	if (ret)
832		goto out;
833	ret = setup_cipher(&tfm->base, 1, key, key_len);
834	if (ret)
835		goto out;
836
837	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
838		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
839			ret = -EINVAL;
840		} else {
841			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
842		}
843	}
844out:
845	if (!atomic_dec_and_test(&ctx->configuring))
846		wait_for_completion(&ctx->completion);
847	return ret;
848}
849
850static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
851		unsigned int key_len)
852{
853	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
854
855	/* the nonce is stored in bytes at end of key */
856	if (key_len < CTR_RFC3686_NONCE_SIZE)
857		return -EINVAL;
858
859	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
860			CTR_RFC3686_NONCE_SIZE);
861
862	key_len -= CTR_RFC3686_NONCE_SIZE;
863	return ablk_setkey(tfm, key, key_len);
864}
865
866static int ablk_perform(struct ablkcipher_request *req, int encrypt)
867{
868	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
869	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
870	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
871	struct ix_sa_dir *dir;
872	struct crypt_ctl *crypt;
873	unsigned int nbytes = req->nbytes;
874	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
875	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
876	struct buffer_desc src_hook;
877	struct device *dev = &pdev->dev;
878	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
879				GFP_KERNEL : GFP_ATOMIC;
880
881	if (qmgr_stat_full(SEND_QID))
882		return -EAGAIN;
883	if (atomic_read(&ctx->configuring))
884		return -EAGAIN;
885
886	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
887
888	crypt = get_crypt_desc();
889	if (!crypt)
890		return -ENOMEM;
891
892	crypt->data.ablk_req = req;
893	crypt->crypto_ctx = dir->npe_ctx_phys;
894	crypt->mode = dir->npe_mode;
895	crypt->init_len = dir->npe_ctx_idx;
896
897	crypt->crypt_offs = 0;
898	crypt->crypt_len = nbytes;
899
900	BUG_ON(ivsize && !req->info);
901	memcpy(crypt->iv, req->info, ivsize);
902	if (req->src != req->dst) {
903		struct buffer_desc dst_hook;
904		crypt->mode |= NPE_OP_NOT_IN_PLACE;
905		/* This was never tested by Intel
906		 * for more than one dst buffer, I think. */
907		BUG_ON(req->dst->length < nbytes);
908		req_ctx->dst = NULL;
909		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
910					flags, DMA_FROM_DEVICE))
911			goto free_buf_dest;
912		src_direction = DMA_TO_DEVICE;
913		req_ctx->dst = dst_hook.next;
914		crypt->dst_buf = dst_hook.phys_next;
915	} else {
916		req_ctx->dst = NULL;
917	}
918	req_ctx->src = NULL;
919	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
920				flags, src_direction))
921		goto free_buf_src;
922
923	req_ctx->src = src_hook.next;
924	crypt->src_buf = src_hook.phys_next;
925	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
926	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
927	BUG_ON(qmgr_stat_overflow(SEND_QID));
928	return -EINPROGRESS;
929
930free_buf_src:
931	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
932free_buf_dest:
933	if (req->src != req->dst) {
934		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
935	}
936	crypt->ctl_flags = CTL_FLAG_UNUSED;
937	return -ENOMEM;
938}
939
940static int ablk_encrypt(struct ablkcipher_request *req)
941{
942	return ablk_perform(req, 1);
943}
944
945static int ablk_decrypt(struct ablkcipher_request *req)
946{
947	return ablk_perform(req, 0);
948}
949
950static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
951{
952	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
953	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
954	u8 iv[CTR_RFC3686_BLOCK_SIZE];
955	u8 *info = req->info;
956	int ret;
957
958	/* set up counter block */
959        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
960	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
961
962	/* initialize counter portion of counter block */
963	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
964		cpu_to_be32(1);
965
966	req->info = iv;
967	ret = ablk_perform(req, 1);
968	req->info = info;
969	return ret;
970}
971
972static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
973		unsigned int nbytes)
974{
975	int offset = 0;
976
977	if (!nbytes)
978		return 0;
979
980	for (;;) {
981		if (start < offset + sg->length)
982			break;
983
984		offset += sg->length;
985		sg = scatterwalk_sg_next(sg);
986	}
987	return (start + nbytes > offset + sg->length);
988}
989
990static int aead_perform(struct aead_request *req, int encrypt,
991		int cryptoffset, int eff_cryptlen, u8 *iv)
992{
993	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
994	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
995	unsigned ivsize = crypto_aead_ivsize(tfm);
996	unsigned authsize = crypto_aead_authsize(tfm);
997	struct ix_sa_dir *dir;
998	struct crypt_ctl *crypt;
999	unsigned int cryptlen;
1000	struct buffer_desc *buf, src_hook;
1001	struct aead_ctx *req_ctx = aead_request_ctx(req);
1002	struct device *dev = &pdev->dev;
1003	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1004				GFP_KERNEL : GFP_ATOMIC;
1005
1006	if (qmgr_stat_full(SEND_QID))
1007		return -EAGAIN;
1008	if (atomic_read(&ctx->configuring))
1009		return -EAGAIN;
1010
1011	if (encrypt) {
1012		dir = &ctx->encrypt;
1013		cryptlen = req->cryptlen;
1014	} else {
1015		dir = &ctx->decrypt;
1016		/* req->cryptlen includes the authsize when decrypting */
1017		cryptlen = req->cryptlen -authsize;
1018		eff_cryptlen -= authsize;
1019	}
1020	crypt = get_crypt_desc();
1021	if (!crypt)
1022		return -ENOMEM;
1023
1024	crypt->data.aead_req = req;
1025	crypt->crypto_ctx = dir->npe_ctx_phys;
1026	crypt->mode = dir->npe_mode;
1027	crypt->init_len = dir->npe_ctx_idx;
1028
1029	crypt->crypt_offs = cryptoffset;
1030	crypt->crypt_len = eff_cryptlen;
1031
1032	crypt->auth_offs = 0;
1033	crypt->auth_len = req->assoclen + ivsize + cryptlen;
1034	BUG_ON(ivsize && !req->iv);
1035	memcpy(crypt->iv, req->iv, ivsize);
1036
1037	if (req->src != req->dst) {
1038		BUG(); /* -ENOTSUP because of my laziness */
1039	}
1040
1041	/* ASSOC data */
1042	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1043		flags, DMA_TO_DEVICE);
1044	req_ctx->buffer = src_hook.next;
1045	crypt->src_buf = src_hook.phys_next;
1046	if (!buf)
1047		goto out;
1048	/* IV */
1049	sg_init_table(&req_ctx->ivlist, 1);
1050	sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1051	buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1052			DMA_BIDIRECTIONAL);
1053	if (!buf)
1054		goto free_chain;
1055	if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1056		/* The 12 hmac bytes are scattered,
1057		 * we need to copy them into a safe buffer */
1058		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1059				&crypt->icv_rev_aes);
1060		if (unlikely(!req_ctx->hmac_virt))
1061			goto free_chain;
1062		if (!encrypt) {
1063			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1064				req->src, cryptlen, authsize, 0);
1065		}
1066		req_ctx->encrypt = encrypt;
1067	} else {
1068		req_ctx->hmac_virt = NULL;
1069	}
1070	/* Crypt */
1071	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1072			DMA_BIDIRECTIONAL);
1073	if (!buf)
1074		goto free_hmac_virt;
1075	if (!req_ctx->hmac_virt) {
1076		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1077	}
1078
1079	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1080	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1081	BUG_ON(qmgr_stat_overflow(SEND_QID));
1082	return -EINPROGRESS;
1083free_hmac_virt:
1084	if (req_ctx->hmac_virt) {
1085		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1086				crypt->icv_rev_aes);
1087	}
1088free_chain:
1089	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1090out:
1091	crypt->ctl_flags = CTL_FLAG_UNUSED;
1092	return -ENOMEM;
1093}
1094
1095static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1096{
1097	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1098	u32 *flags = &tfm->base.crt_flags;
1099	unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1100	int ret;
1101
1102	if (!ctx->enckey_len && !ctx->authkey_len)
1103		return 0;
1104	init_completion(&ctx->completion);
1105	atomic_inc(&ctx->configuring);
1106
1107	reset_sa_dir(&ctx->encrypt);
1108	reset_sa_dir(&ctx->decrypt);
1109
1110	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1111	if (ret)
1112		goto out;
1113	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1114	if (ret)
1115		goto out;
1116	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1117			ctx->authkey_len, digest_len);
1118	if (ret)
1119		goto out;
1120	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1121			ctx->authkey_len, digest_len);
1122	if (ret)
1123		goto out;
1124
1125	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1126		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1127			ret = -EINVAL;
1128			goto out;
1129		} else {
1130			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1131		}
1132	}
1133out:
1134	if (!atomic_dec_and_test(&ctx->configuring))
1135		wait_for_completion(&ctx->completion);
1136	return ret;
1137}
1138
1139static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1140{
1141	int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1142
1143	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1144		return -EINVAL;
1145	return aead_setup(tfm, authsize);
1146}
1147
1148static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1149			unsigned int keylen)
1150{
1151	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1152	struct crypto_authenc_keys keys;
1153
1154	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1155		goto badkey;
1156
1157	if (keys.authkeylen > sizeof(ctx->authkey))
1158		goto badkey;
1159
1160	if (keys.enckeylen > sizeof(ctx->enckey))
1161		goto badkey;
1162
1163	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1164	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1165	ctx->authkey_len = keys.authkeylen;
1166	ctx->enckey_len = keys.enckeylen;
1167
1168	return aead_setup(tfm, crypto_aead_authsize(tfm));
1169badkey:
1170	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1171	return -EINVAL;
1172}
1173
1174static int aead_encrypt(struct aead_request *req)
1175{
1176	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1177	return aead_perform(req, 1, req->assoclen + ivsize,
1178			req->cryptlen, req->iv);
1179}
1180
1181static int aead_decrypt(struct aead_request *req)
1182{
1183	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1184	return aead_perform(req, 0, req->assoclen + ivsize,
1185			req->cryptlen, req->iv);
1186}
1187
1188static int aead_givencrypt(struct aead_givcrypt_request *req)
1189{
1190	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1191	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1192	unsigned len, ivsize = crypto_aead_ivsize(tfm);
1193	__be64 seq;
1194
1195	/* copied from eseqiv.c */
1196	if (!ctx->salted) {
1197		get_random_bytes(ctx->salt, ivsize);
1198		ctx->salted = 1;
1199	}
1200	memcpy(req->areq.iv, ctx->salt, ivsize);
1201	len = ivsize;
1202	if (ivsize > sizeof(u64)) {
1203		memset(req->giv, 0, ivsize - sizeof(u64));
1204		len = sizeof(u64);
1205	}
1206	seq = cpu_to_be64(req->seq);
1207	memcpy(req->giv + ivsize - len, &seq, len);
1208	return aead_perform(&req->areq, 1, req->areq.assoclen,
1209			req->areq.cryptlen +ivsize, req->giv);
1210}
1211
1212static struct ixp_alg ixp4xx_algos[] = {
1213{
1214	.crypto	= {
1215		.cra_name	= "cbc(des)",
1216		.cra_blocksize	= DES_BLOCK_SIZE,
1217		.cra_u		= { .ablkcipher = {
1218			.min_keysize	= DES_KEY_SIZE,
1219			.max_keysize	= DES_KEY_SIZE,
1220			.ivsize		= DES_BLOCK_SIZE,
1221			.geniv		= "eseqiv",
1222			}
1223		}
1224	},
1225	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1226	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1227
1228}, {
1229	.crypto	= {
1230		.cra_name	= "ecb(des)",
1231		.cra_blocksize	= DES_BLOCK_SIZE,
1232		.cra_u		= { .ablkcipher = {
1233			.min_keysize	= DES_KEY_SIZE,
1234			.max_keysize	= DES_KEY_SIZE,
1235			}
1236		}
1237	},
1238	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1239	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1240}, {
1241	.crypto	= {
1242		.cra_name	= "cbc(des3_ede)",
1243		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1244		.cra_u		= { .ablkcipher = {
1245			.min_keysize	= DES3_EDE_KEY_SIZE,
1246			.max_keysize	= DES3_EDE_KEY_SIZE,
1247			.ivsize		= DES3_EDE_BLOCK_SIZE,
1248			.geniv		= "eseqiv",
1249			}
1250		}
1251	},
1252	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1253	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1254}, {
1255	.crypto	= {
1256		.cra_name	= "ecb(des3_ede)",
1257		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1258		.cra_u		= { .ablkcipher = {
1259			.min_keysize	= DES3_EDE_KEY_SIZE,
1260			.max_keysize	= DES3_EDE_KEY_SIZE,
1261			}
1262		}
1263	},
1264	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1265	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1266}, {
1267	.crypto	= {
1268		.cra_name	= "cbc(aes)",
1269		.cra_blocksize	= AES_BLOCK_SIZE,
1270		.cra_u		= { .ablkcipher = {
1271			.min_keysize	= AES_MIN_KEY_SIZE,
1272			.max_keysize	= AES_MAX_KEY_SIZE,
1273			.ivsize		= AES_BLOCK_SIZE,
1274			.geniv		= "eseqiv",
1275			}
1276		}
1277	},
1278	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1279	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1280}, {
1281	.crypto	= {
1282		.cra_name	= "ecb(aes)",
1283		.cra_blocksize	= AES_BLOCK_SIZE,
1284		.cra_u		= { .ablkcipher = {
1285			.min_keysize	= AES_MIN_KEY_SIZE,
1286			.max_keysize	= AES_MAX_KEY_SIZE,
1287			}
1288		}
1289	},
1290	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1291	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1292}, {
1293	.crypto	= {
1294		.cra_name	= "ctr(aes)",
1295		.cra_blocksize	= AES_BLOCK_SIZE,
1296		.cra_u		= { .ablkcipher = {
1297			.min_keysize	= AES_MIN_KEY_SIZE,
1298			.max_keysize	= AES_MAX_KEY_SIZE,
1299			.ivsize		= AES_BLOCK_SIZE,
1300			.geniv		= "eseqiv",
1301			}
1302		}
1303	},
1304	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1305	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1306}, {
1307	.crypto	= {
1308		.cra_name	= "rfc3686(ctr(aes))",
1309		.cra_blocksize	= AES_BLOCK_SIZE,
1310		.cra_u		= { .ablkcipher = {
1311			.min_keysize	= AES_MIN_KEY_SIZE,
1312			.max_keysize	= AES_MAX_KEY_SIZE,
1313			.ivsize		= AES_BLOCK_SIZE,
1314			.geniv		= "eseqiv",
1315			.setkey		= ablk_rfc3686_setkey,
1316			.encrypt	= ablk_rfc3686_crypt,
1317			.decrypt	= ablk_rfc3686_crypt }
1318		}
1319	},
1320	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1321	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1322}, {
1323	.crypto	= {
1324		.cra_name	= "authenc(hmac(md5),cbc(des))",
1325		.cra_blocksize	= DES_BLOCK_SIZE,
1326		.cra_u		= { .aead = {
1327			.ivsize		= DES_BLOCK_SIZE,
1328			.maxauthsize	= MD5_DIGEST_SIZE,
1329			}
1330		}
1331	},
1332	.hash = &hash_alg_md5,
1333	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1334	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1335}, {
1336	.crypto	= {
1337		.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1338		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1339		.cra_u		= { .aead = {
1340			.ivsize		= DES3_EDE_BLOCK_SIZE,
1341			.maxauthsize	= MD5_DIGEST_SIZE,
1342			}
1343		}
1344	},
1345	.hash = &hash_alg_md5,
1346	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1347	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1348}, {
1349	.crypto	= {
1350		.cra_name	= "authenc(hmac(sha1),cbc(des))",
1351		.cra_blocksize	= DES_BLOCK_SIZE,
1352		.cra_u		= { .aead = {
1353			.ivsize		= DES_BLOCK_SIZE,
1354			.maxauthsize	= SHA1_DIGEST_SIZE,
1355			}
1356		}
1357	},
1358	.hash = &hash_alg_sha1,
1359	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1360	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1361}, {
1362	.crypto	= {
1363		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1364		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1365		.cra_u		= { .aead = {
1366			.ivsize		= DES3_EDE_BLOCK_SIZE,
1367			.maxauthsize	= SHA1_DIGEST_SIZE,
1368			}
1369		}
1370	},
1371	.hash = &hash_alg_sha1,
1372	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1373	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1374}, {
1375	.crypto	= {
1376		.cra_name	= "authenc(hmac(md5),cbc(aes))",
1377		.cra_blocksize	= AES_BLOCK_SIZE,
1378		.cra_u		= { .aead = {
1379			.ivsize		= AES_BLOCK_SIZE,
1380			.maxauthsize	= MD5_DIGEST_SIZE,
1381			}
1382		}
1383	},
1384	.hash = &hash_alg_md5,
1385	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1386	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1387}, {
1388	.crypto	= {
1389		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1390		.cra_blocksize	= AES_BLOCK_SIZE,
1391		.cra_u		= { .aead = {
1392			.ivsize		= AES_BLOCK_SIZE,
1393			.maxauthsize	= SHA1_DIGEST_SIZE,
1394			}
1395		}
1396	},
1397	.hash = &hash_alg_sha1,
1398	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1399	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1400} };
1401
1402#define IXP_POSTFIX "-ixp4xx"
1403
1404static const struct platform_device_info ixp_dev_info __initdata = {
1405	.name		= DRIVER_NAME,
1406	.id		= 0,
1407	.dma_mask	= DMA_BIT_MASK(32),
1408};
1409
1410static int __init ixp_module_init(void)
1411{
1412	int num = ARRAY_SIZE(ixp4xx_algos);
1413	int i, err;
1414
1415	pdev = platform_device_register_full(&ixp_dev_info);
1416	if (IS_ERR(pdev))
1417		return PTR_ERR(pdev);
1418
1419	spin_lock_init(&desc_lock);
1420	spin_lock_init(&emerg_lock);
1421
1422	err = init_ixp_crypto(&pdev->dev);
1423	if (err) {
1424		platform_device_unregister(pdev);
1425		return err;
1426	}
1427	for (i=0; i< num; i++) {
1428		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1429
1430		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1431			"%s"IXP_POSTFIX, cra->cra_name) >=
1432			CRYPTO_MAX_ALG_NAME)
1433		{
1434			continue;
1435		}
1436		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1437			continue;
1438		}
1439		if (!ixp4xx_algos[i].hash) {
1440			/* block ciphers */
1441			cra->cra_type = &crypto_ablkcipher_type;
1442			cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1443					 CRYPTO_ALG_KERN_DRIVER_ONLY |
1444					 CRYPTO_ALG_ASYNC;
1445			if (!cra->cra_ablkcipher.setkey)
1446				cra->cra_ablkcipher.setkey = ablk_setkey;
1447			if (!cra->cra_ablkcipher.encrypt)
1448				cra->cra_ablkcipher.encrypt = ablk_encrypt;
1449			if (!cra->cra_ablkcipher.decrypt)
1450				cra->cra_ablkcipher.decrypt = ablk_decrypt;
1451			cra->cra_init = init_tfm_ablk;
1452		} else {
1453			/* authenc */
1454			cra->cra_type = &crypto_aead_type;
1455			cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1456					 CRYPTO_ALG_KERN_DRIVER_ONLY |
1457					 CRYPTO_ALG_ASYNC;
1458			cra->cra_aead.setkey = aead_setkey;
1459			cra->cra_aead.setauthsize = aead_setauthsize;
1460			cra->cra_aead.encrypt = aead_encrypt;
1461			cra->cra_aead.decrypt = aead_decrypt;
1462			cra->cra_aead.givencrypt = aead_givencrypt;
1463			cra->cra_init = init_tfm_aead;
1464		}
1465		cra->cra_ctxsize = sizeof(struct ixp_ctx);
1466		cra->cra_module = THIS_MODULE;
1467		cra->cra_alignmask = 3;
1468		cra->cra_priority = 300;
1469		cra->cra_exit = exit_tfm;
1470		if (crypto_register_alg(cra))
1471			printk(KERN_ERR "Failed to register '%s'\n",
1472				cra->cra_name);
1473		else
1474			ixp4xx_algos[i].registered = 1;
1475	}
1476	return 0;
1477}
1478
1479static void __exit ixp_module_exit(void)
1480{
1481	int num = ARRAY_SIZE(ixp4xx_algos);
1482	int i;
1483
1484	for (i=0; i< num; i++) {
1485		if (ixp4xx_algos[i].registered)
1486			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1487	}
1488	release_ixp_crypto(&pdev->dev);
1489	platform_device_unregister(pdev);
1490}
1491
1492module_init(ixp_module_init);
1493module_exit(ixp_module_exit);
1494
1495MODULE_LICENSE("GPL");
1496MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1497MODULE_DESCRIPTION("IXP4xx hardware crypto");
1498
1499