https://github.com/torvalds/linux
Raw File
Tip revision: 44597f65f6af3c692560a639f61d25398d13d1b6 authored by Linus Torvalds on 16 November 2006, 04:03:40 UTC
Linux 2.6.19-rc6
Tip revision: 44597f6
cipher.c
/*
 * Cryptographic API.
 *
 * Cipher operations.
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option) 
 * any later version.
 *
 */
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/scatterlist.h>
#include "internal.h"
#include "scatterwalk.h"

struct cipher_alg_compat {
	unsigned int cia_min_keysize;
	unsigned int cia_max_keysize;
	int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
	                  unsigned int keylen);
	void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
	void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);

	unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes);
	unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes);
	unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes);
	unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes);
};

static inline void xor_64(u8 *a, const u8 *b)
{
	((u32 *)a)[0] ^= ((u32 *)b)[0];
	((u32 *)a)[1] ^= ((u32 *)b)[1];
}

static inline void xor_128(u8 *a, const u8 *b)
{
	((u32 *)a)[0] ^= ((u32 *)b)[0];
	((u32 *)a)[1] ^= ((u32 *)b)[1];
	((u32 *)a)[2] ^= ((u32 *)b)[2];
	((u32 *)a)[3] ^= ((u32 *)b)[3];
}

static unsigned int crypt_slow(const struct cipher_desc *desc,
			       struct scatter_walk *in,
			       struct scatter_walk *out, unsigned int bsize)
{
	unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
	u8 buffer[bsize * 2 + alignmask];
	u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
	u8 *dst = src + bsize;

	scatterwalk_copychunks(src, in, bsize, 0);
	desc->prfn(desc, dst, src, bsize);
	scatterwalk_copychunks(dst, out, bsize, 1);

	return bsize;
}

static inline unsigned int crypt_fast(const struct cipher_desc *desc,
				      struct scatter_walk *in,
				      struct scatter_walk *out,
				      unsigned int nbytes, u8 *tmp)
{
	u8 *src, *dst;
	u8 *real_src, *real_dst;

	real_src = scatterwalk_map(in, 0);
	real_dst = scatterwalk_map(out, 1);

	src = real_src;
	dst = scatterwalk_samebuf(in, out) ? src : real_dst;

	if (tmp) {
		memcpy(tmp, src, nbytes);
		src = tmp;
		dst = tmp;
	}

	nbytes = desc->prfn(desc, dst, src, nbytes);

	if (tmp)
		memcpy(real_dst, tmp, nbytes);

	scatterwalk_unmap(real_src, 0);
	scatterwalk_unmap(real_dst, 1);

	scatterwalk_advance(in, nbytes);
	scatterwalk_advance(out, nbytes);

	return nbytes;
}

/* 
 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
 * multiple page boundaries by using temporary blocks.  In user context,
 * the kernel is given a chance to schedule us once per page.
 */
static int crypt(const struct cipher_desc *desc,
		 struct scatterlist *dst,
		 struct scatterlist *src,
		 unsigned int nbytes)
{
	struct scatter_walk walk_in, walk_out;
	struct crypto_tfm *tfm = desc->tfm;
	const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
	unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
	unsigned long buffer = 0;

	if (!nbytes)
		return 0;

	if (nbytes % bsize) {
		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
		return -EINVAL;
	}

	scatterwalk_start(&walk_in, src);
	scatterwalk_start(&walk_out, dst);

	for(;;) {
		unsigned int n = nbytes;
		u8 *tmp = NULL;

		if (!scatterwalk_aligned(&walk_in, alignmask) ||
		    !scatterwalk_aligned(&walk_out, alignmask)) {
			if (!buffer) {
				buffer = __get_free_page(GFP_ATOMIC);
				if (!buffer)
					n = 0;
			}
			tmp = (u8 *)buffer;
		}

		n = scatterwalk_clamp(&walk_in, n);
		n = scatterwalk_clamp(&walk_out, n);

		if (likely(n >= bsize))
			n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
		else
			n = crypt_slow(desc, &walk_in, &walk_out, bsize);

		nbytes -= n;

		scatterwalk_done(&walk_in, 0, nbytes);
		scatterwalk_done(&walk_out, 1, nbytes);

		if (!nbytes)
			break;

		crypto_yield(tfm->crt_flags);
	}

	if (buffer)
		free_page(buffer);

	return 0;
}

static int crypt_iv_unaligned(struct cipher_desc *desc,
			      struct scatterlist *dst,
			      struct scatterlist *src,
			      unsigned int nbytes)
{
	struct crypto_tfm *tfm = desc->tfm;
	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
	u8 *iv = desc->info;

	if (unlikely(((unsigned long)iv & alignmask))) {
		unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
		u8 buffer[ivsize + alignmask];
		u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
		int err;

		desc->info = memcpy(tmp, iv, ivsize);
		err = crypt(desc, dst, src, nbytes);
		memcpy(iv, tmp, ivsize);

		return err;
	}

	return crypt(desc, dst, src, nbytes);
}

static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes)
{
	struct crypto_tfm *tfm = desc->tfm;
	void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
	int bsize = crypto_tfm_alg_blocksize(tfm);

	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
	u8 *iv = desc->info;
	unsigned int done = 0;

	nbytes -= bsize;

	do {
		xor(iv, src);
		fn(tfm, dst, iv);
		memcpy(iv, dst, bsize);

		src += bsize;
		dst += bsize;
	} while ((done += bsize) <= nbytes);

	return done;
}

static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
					u8 *dst, const u8 *src,
					unsigned int nbytes)
{
	struct crypto_tfm *tfm = desc->tfm;
	void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
	int bsize = crypto_tfm_alg_blocksize(tfm);
	unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);

	u8 stack[src == dst ? bsize + alignmask : 0];
	u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
	u8 **dst_p = src == dst ? &buf : &dst;

	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
	u8 *iv = desc->info;
	unsigned int done = 0;

	nbytes -= bsize;

	do {
		u8 *tmp_dst = *dst_p;

		fn(tfm, tmp_dst, src);
		xor(tmp_dst, iv);
		memcpy(iv, src, bsize);
		if (tmp_dst != dst)
			memcpy(dst, tmp_dst, bsize);

		src += bsize;
		dst += bsize;
	} while ((done += bsize) <= nbytes);

	return done;
}

static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
				const u8 *src, unsigned int nbytes)
{
	struct crypto_tfm *tfm = desc->tfm;
	int bsize = crypto_tfm_alg_blocksize(tfm);
	void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
	unsigned int done = 0;

	nbytes -= bsize;

	do {
		fn(tfm, dst, src);

		src += bsize;
		dst += bsize;
	} while ((done += bsize) <= nbytes);

	return done;
}

static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
	struct cipher_alg *cia = &tfm->__crt_alg->cra_cipher;
	
	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
	if (keylen < cia->cia_min_keysize || keylen > cia->cia_max_keysize) {
		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
		return -EINVAL;
	} else
		return cia->cia_setkey(tfm, key, keylen);
}

static int ecb_encrypt(struct crypto_tfm *tfm,
		       struct scatterlist *dst,
                       struct scatterlist *src, unsigned int nbytes)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_encrypt;
	desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;

	return crypt(&desc, dst, src, nbytes);
}

static int ecb_decrypt(struct crypto_tfm *tfm,
                       struct scatterlist *dst,
                       struct scatterlist *src,
		       unsigned int nbytes)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_decrypt;
	desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;

	return crypt(&desc, dst, src, nbytes);
}

static int cbc_encrypt(struct crypto_tfm *tfm,
                       struct scatterlist *dst,
                       struct scatterlist *src,
		       unsigned int nbytes)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_encrypt;
	desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
	desc.info = tfm->crt_cipher.cit_iv;

	return crypt(&desc, dst, src, nbytes);
}

static int cbc_encrypt_iv(struct crypto_tfm *tfm,
                          struct scatterlist *dst,
                          struct scatterlist *src,
                          unsigned int nbytes, u8 *iv)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_encrypt;
	desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
	desc.info = iv;

	return crypt_iv_unaligned(&desc, dst, src, nbytes);
}

static int cbc_decrypt(struct crypto_tfm *tfm,
                       struct scatterlist *dst,
                       struct scatterlist *src,
		       unsigned int nbytes)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_decrypt;
	desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
	desc.info = tfm->crt_cipher.cit_iv;

	return crypt(&desc, dst, src, nbytes);
}

static int cbc_decrypt_iv(struct crypto_tfm *tfm,
                          struct scatterlist *dst,
                          struct scatterlist *src,
                          unsigned int nbytes, u8 *iv)
{
	struct cipher_desc desc;
	struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;

	desc.tfm = tfm;
	desc.crfn = cipher->cia_decrypt;
	desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
	desc.info = iv;

	return crypt_iv_unaligned(&desc, dst, src, nbytes);
}

static int nocrypt(struct crypto_tfm *tfm,
                   struct scatterlist *dst,
                   struct scatterlist *src,
		   unsigned int nbytes)
{
	return -ENOSYS;
}

static int nocrypt_iv(struct crypto_tfm *tfm,
                      struct scatterlist *dst,
                      struct scatterlist *src,
                      unsigned int nbytes, u8 *iv)
{
	return -ENOSYS;
}

int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
{
	u32 mode = flags & CRYPTO_TFM_MODE_MASK;
	tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
	return 0;
}

static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
					      const u8 *),
				   struct crypto_tfm *tfm,
				   u8 *dst, const u8 *src)
{
	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
	unsigned int size = crypto_tfm_alg_blocksize(tfm);
	u8 buffer[size + alignmask];
	u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);

	memcpy(tmp, src, size);
	fn(tfm, tmp, tmp);
	memcpy(dst, tmp, size);
}

static void cipher_encrypt_unaligned(struct crypto_tfm *tfm,
				     u8 *dst, const u8 *src)
{
	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
	struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;

	if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
		cipher_crypt_unaligned(cipher->cia_encrypt, tfm, dst, src);
		return;
	}

	cipher->cia_encrypt(tfm, dst, src);
}

static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
				     u8 *dst, const u8 *src)
{
	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
	struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;

	if (unlikely(((unsigned long)dst | (unsigned long)src) & alignmask)) {
		cipher_crypt_unaligned(cipher->cia_decrypt, tfm, dst, src);
		return;
	}

	cipher->cia_decrypt(tfm, dst, src);
}

int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
	int ret = 0;
	struct cipher_tfm *ops = &tfm->crt_cipher;
	struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;

	ops->cit_setkey = setkey;
	ops->cit_encrypt_one = crypto_tfm_alg_alignmask(tfm) ?
		cipher_encrypt_unaligned : cipher->cia_encrypt;
	ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
		cipher_decrypt_unaligned : cipher->cia_decrypt;

	switch (tfm->crt_cipher.cit_mode) {
	case CRYPTO_TFM_MODE_ECB:
		ops->cit_encrypt = ecb_encrypt;
		ops->cit_decrypt = ecb_decrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;
		
	case CRYPTO_TFM_MODE_CBC:
		ops->cit_encrypt = cbc_encrypt;
		ops->cit_decrypt = cbc_decrypt;
		ops->cit_encrypt_iv = cbc_encrypt_iv;
		ops->cit_decrypt_iv = cbc_decrypt_iv;
		break;
		
	case CRYPTO_TFM_MODE_CFB:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;
	
	case CRYPTO_TFM_MODE_CTR:
		ops->cit_encrypt = nocrypt;
		ops->cit_decrypt = nocrypt;
		ops->cit_encrypt_iv = nocrypt_iv;
		ops->cit_decrypt_iv = nocrypt_iv;
		break;

	default:
		BUG();
	}
	
	if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
		unsigned long align;
		unsigned long addr;
	    	
	    	switch (crypto_tfm_alg_blocksize(tfm)) {
	    	case 8:
	    		ops->cit_xor_block = xor_64;
	    		break;
	    		
	    	case 16:
	    		ops->cit_xor_block = xor_128;
	    		break;
	    		
	    	default:
	    		printk(KERN_WARNING "%s: block size %u not supported\n",
	    		       crypto_tfm_alg_name(tfm),
	    		       crypto_tfm_alg_blocksize(tfm));
	    		ret = -EINVAL;
	    		goto out;
	    	}
	    	
		ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
		align = crypto_tfm_alg_alignmask(tfm) + 1;
		addr = (unsigned long)crypto_tfm_ctx(tfm);
		addr = ALIGN(addr, align);
		addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
		ops->cit_iv = (void *)addr;
	}

out:	
	return ret;
}

void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
{
}
back to top