ia64/linux-2.6.18-xen.hg

view crypto/cipher.c @ 854:950b9eb27661

usbback: fix urb interval value for interrupt urbs.

Signed-off-by: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
author Keir Fraser <keir.fraser@citrix.com>
date Mon Apr 06 13:51:20 2009 +0100 (2009-04-06)
parents 831230e53067
children
line source
1 /*
2 * Cryptographic API.
3 *
4 * Cipher operations.
5 *
6 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15 #include <linux/compiler.h>
16 #include <linux/kernel.h>
17 #include <linux/crypto.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/slab.h>
21 #include <linux/string.h>
22 #include <asm/scatterlist.h>
23 #include "internal.h"
24 #include "scatterwalk.h"
26 static inline void xor_64(u8 *a, const u8 *b)
27 {
28 ((u32 *)a)[0] ^= ((u32 *)b)[0];
29 ((u32 *)a)[1] ^= ((u32 *)b)[1];
30 }
32 static inline void xor_128(u8 *a, const u8 *b)
33 {
34 ((u32 *)a)[0] ^= ((u32 *)b)[0];
35 ((u32 *)a)[1] ^= ((u32 *)b)[1];
36 ((u32 *)a)[2] ^= ((u32 *)b)[2];
37 ((u32 *)a)[3] ^= ((u32 *)b)[3];
38 }
40 static unsigned int crypt_slow(const struct cipher_desc *desc,
41 struct scatter_walk *in,
42 struct scatter_walk *out, unsigned int bsize)
43 {
44 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
45 u8 buffer[bsize * 2 + alignmask];
46 u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
47 u8 *dst = src + bsize;
48 unsigned int n;
50 n = scatterwalk_copychunks(src, in, bsize, 0);
51 scatterwalk_advance(in, n);
53 desc->prfn(desc, dst, src, bsize);
55 n = scatterwalk_copychunks(dst, out, bsize, 1);
56 scatterwalk_advance(out, n);
58 return bsize;
59 }
61 static inline unsigned int crypt_fast(const struct cipher_desc *desc,
62 struct scatter_walk *in,
63 struct scatter_walk *out,
64 unsigned int nbytes, u8 *tmp)
65 {
66 u8 *src, *dst;
68 src = in->data;
69 dst = scatterwalk_samebuf(in, out) ? src : out->data;
71 if (tmp) {
72 memcpy(tmp, in->data, nbytes);
73 src = tmp;
74 dst = tmp;
75 }
77 nbytes = desc->prfn(desc, dst, src, nbytes);
79 if (tmp)
80 memcpy(out->data, tmp, nbytes);
82 scatterwalk_advance(in, nbytes);
83 scatterwalk_advance(out, nbytes);
85 return nbytes;
86 }
88 /*
89 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
90 * multiple page boundaries by using temporary blocks. In user context,
91 * the kernel is given a chance to schedule us once per page.
92 */
93 static int crypt(const struct cipher_desc *desc,
94 struct scatterlist *dst,
95 struct scatterlist *src,
96 unsigned int nbytes)
97 {
98 struct scatter_walk walk_in, walk_out;
99 struct crypto_tfm *tfm = desc->tfm;
100 const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
101 unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
102 unsigned long buffer = 0;
104 if (!nbytes)
105 return 0;
107 if (nbytes % bsize) {
108 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
109 return -EINVAL;
110 }
112 scatterwalk_start(&walk_in, src);
113 scatterwalk_start(&walk_out, dst);
115 for(;;) {
116 unsigned int n = nbytes;
117 u8 *tmp = NULL;
119 if (!scatterwalk_aligned(&walk_in, alignmask) ||
120 !scatterwalk_aligned(&walk_out, alignmask)) {
121 if (!buffer) {
122 buffer = __get_free_page(GFP_ATOMIC);
123 if (!buffer)
124 n = 0;
125 }
126 tmp = (u8 *)buffer;
127 }
129 scatterwalk_map(&walk_in, 0);
130 scatterwalk_map(&walk_out, 1);
132 n = scatterwalk_clamp(&walk_in, n);
133 n = scatterwalk_clamp(&walk_out, n);
135 if (likely(n >= bsize))
136 n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
137 else
138 n = crypt_slow(desc, &walk_in, &walk_out, bsize);
140 nbytes -= n;
142 scatterwalk_done(&walk_in, 0, nbytes);
143 scatterwalk_done(&walk_out, 1, nbytes);
145 if (!nbytes)
146 break;
148 crypto_yield(tfm);
149 }
151 if (buffer)
152 free_page(buffer);
154 return 0;
155 }
157 static int crypt_iv_unaligned(struct cipher_desc *desc,
158 struct scatterlist *dst,
159 struct scatterlist *src,
160 unsigned int nbytes)
161 {
162 struct crypto_tfm *tfm = desc->tfm;
163 unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
164 u8 *iv = desc->info;
166 if (unlikely(((unsigned long)iv & alignmask))) {
167 unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
168 u8 buffer[ivsize + alignmask];
169 u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
170 int err;
172 desc->info = memcpy(tmp, iv, ivsize);
173 err = crypt(desc, dst, src, nbytes);
174 memcpy(iv, tmp, ivsize);
176 return err;
177 }
179 return crypt(desc, dst, src, nbytes);
180 }
182 static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
183 u8 *dst, const u8 *src,
184 unsigned int nbytes)
185 {
186 struct crypto_tfm *tfm = desc->tfm;
187 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
188 int bsize = crypto_tfm_alg_blocksize(tfm);
190 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
191 u8 *iv = desc->info;
192 unsigned int done = 0;
194 nbytes -= bsize;
196 do {
197 xor(iv, src);
198 fn(tfm, dst, iv);
199 memcpy(iv, dst, bsize);
201 src += bsize;
202 dst += bsize;
203 } while ((done += bsize) <= nbytes);
205 return done;
206 }
208 static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
209 u8 *dst, const u8 *src,
210 unsigned int nbytes)
211 {
212 struct crypto_tfm *tfm = desc->tfm;
213 void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
214 int bsize = crypto_tfm_alg_blocksize(tfm);
215 unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
217 u8 stack[src == dst ? bsize + alignmask : 0];
218 u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
219 u8 **dst_p = src == dst ? &buf : &dst;
221 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
222 u8 *iv = desc->info;
223 unsigned int done = 0;
225 nbytes -= bsize;
227 do {
228 u8 *tmp_dst = *dst_p;
230 fn(tfm, tmp_dst, src);
231 xor(tmp_dst, iv);
232 memcpy(iv, src, bsize);
233 if (tmp_dst != dst)
234 memcpy(dst, tmp_dst, bsize);
236 src += bsize;
237 dst += bsize;
238 } while ((done += bsize) <= nbytes);
240 return done;
241 }
243 static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
244 const u8 *src, unsigned int nbytes)
245 {
246 struct crypto_tfm *tfm = desc->tfm;
247 int bsize = crypto_tfm_alg_blocksize(tfm);
248 void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
249 unsigned int done = 0;
251 nbytes -= bsize;
253 do {
254 fn(tfm, dst, src);
256 src += bsize;
257 dst += bsize;
258 } while ((done += bsize) <= nbytes);
260 return done;
261 }
263 static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
264 {
265 struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
267 if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
268 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
269 return -EINVAL;
270 } else
271 return cia->cia_setkey(tfm, key, keylen,
272 &tfm->crt_flags);
273 }
275 static int ecb_encrypt(struct crypto_tfm *tfm,
276 struct scatterlist *dst,
277 struct scatterlist *src, unsigned int nbytes)
278 {
279 struct cipher_desc desc;
280 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
282 desc.tfm = tfm;
283 desc.crfn = cipher->cia_encrypt;
284 desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
286 return crypt(&desc, dst, src, nbytes);
287 }
289 static int ecb_decrypt(struct crypto_tfm *tfm,
290 struct scatterlist *dst,
291 struct scatterlist *src,
292 unsigned int nbytes)
293 {
294 struct cipher_desc desc;
295 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
297 desc.tfm = tfm;
298 desc.crfn = cipher->cia_decrypt;
299 desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
301 return crypt(&desc, dst, src, nbytes);
302 }
304 static int cbc_encrypt(struct crypto_tfm *tfm,
305 struct scatterlist *dst,
306 struct scatterlist *src,
307 unsigned int nbytes)
308 {
309 struct cipher_desc desc;
310 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
312 desc.tfm = tfm;
313 desc.crfn = cipher->cia_encrypt;
314 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
315 desc.info = tfm->crt_cipher.cit_iv;
317 return crypt(&desc, dst, src, nbytes);
318 }
320 static int cbc_encrypt_iv(struct crypto_tfm *tfm,
321 struct scatterlist *dst,
322 struct scatterlist *src,
323 unsigned int nbytes, u8 *iv)
324 {
325 struct cipher_desc desc;
326 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
328 desc.tfm = tfm;
329 desc.crfn = cipher->cia_encrypt;
330 desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
331 desc.info = iv;
333 return crypt_iv_unaligned(&desc, dst, src, nbytes);
334 }
336 static int cbc_decrypt(struct crypto_tfm *tfm,
337 struct scatterlist *dst,
338 struct scatterlist *src,
339 unsigned int nbytes)
340 {
341 struct cipher_desc desc;
342 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
344 desc.tfm = tfm;
345 desc.crfn = cipher->cia_decrypt;
346 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
347 desc.info = tfm->crt_cipher.cit_iv;
349 return crypt(&desc, dst, src, nbytes);
350 }
352 static int cbc_decrypt_iv(struct crypto_tfm *tfm,
353 struct scatterlist *dst,
354 struct scatterlist *src,
355 unsigned int nbytes, u8 *iv)
356 {
357 struct cipher_desc desc;
358 struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
360 desc.tfm = tfm;
361 desc.crfn = cipher->cia_decrypt;
362 desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
363 desc.info = iv;
365 return crypt_iv_unaligned(&desc, dst, src, nbytes);
366 }
368 static int nocrypt(struct crypto_tfm *tfm,
369 struct scatterlist *dst,
370 struct scatterlist *src,
371 unsigned int nbytes)
372 {
373 return -ENOSYS;
374 }
376 static int nocrypt_iv(struct crypto_tfm *tfm,
377 struct scatterlist *dst,
378 struct scatterlist *src,
379 unsigned int nbytes, u8 *iv)
380 {
381 return -ENOSYS;
382 }
384 int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
385 {
386 u32 mode = flags & CRYPTO_TFM_MODE_MASK;
387 tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
388 return 0;
389 }
391 int crypto_init_cipher_ops(struct crypto_tfm *tfm)
392 {
393 int ret = 0;
394 struct cipher_tfm *ops = &tfm->crt_cipher;
396 ops->cit_setkey = setkey;
398 switch (tfm->crt_cipher.cit_mode) {
399 case CRYPTO_TFM_MODE_ECB:
400 ops->cit_encrypt = ecb_encrypt;
401 ops->cit_decrypt = ecb_decrypt;
402 break;
404 case CRYPTO_TFM_MODE_CBC:
405 ops->cit_encrypt = cbc_encrypt;
406 ops->cit_decrypt = cbc_decrypt;
407 ops->cit_encrypt_iv = cbc_encrypt_iv;
408 ops->cit_decrypt_iv = cbc_decrypt_iv;
409 break;
411 case CRYPTO_TFM_MODE_CFB:
412 ops->cit_encrypt = nocrypt;
413 ops->cit_decrypt = nocrypt;
414 ops->cit_encrypt_iv = nocrypt_iv;
415 ops->cit_decrypt_iv = nocrypt_iv;
416 break;
418 case CRYPTO_TFM_MODE_CTR:
419 ops->cit_encrypt = nocrypt;
420 ops->cit_decrypt = nocrypt;
421 ops->cit_encrypt_iv = nocrypt_iv;
422 ops->cit_decrypt_iv = nocrypt_iv;
423 break;
425 default:
426 BUG();
427 }
429 if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
430 unsigned long align;
431 unsigned long addr;
433 switch (crypto_tfm_alg_blocksize(tfm)) {
434 case 8:
435 ops->cit_xor_block = xor_64;
436 break;
438 case 16:
439 ops->cit_xor_block = xor_128;
440 break;
442 default:
443 printk(KERN_WARNING "%s: block size %u not supported\n",
444 crypto_tfm_alg_name(tfm),
445 crypto_tfm_alg_blocksize(tfm));
446 ret = -EINVAL;
447 goto out;
448 }
450 ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
451 align = crypto_tfm_alg_alignmask(tfm) + 1;
452 addr = (unsigned long)crypto_tfm_ctx(tfm);
453 addr = ALIGN(addr, align);
454 addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
455 ops->cit_iv = (void *)addr;
456 }
458 out:
459 return ret;
460 }
462 void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
463 {
464 }