summaryrefslogtreecommitdiffstats
path: root/kernel/include/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/include/crypto')
-rw-r--r--kernel/include/crypto/ablk_helper.h31
-rw-r--r--kernel/include/crypto/aead.h105
-rw-r--r--kernel/include/crypto/aes.h39
-rw-r--r--kernel/include/crypto/algapi.h419
-rw-r--r--kernel/include/crypto/authenc.h37
-rw-r--r--kernel/include/crypto/b128ops.h80
-rw-r--r--kernel/include/crypto/blowfish.h23
-rw-r--r--kernel/include/crypto/cast5.h23
-rw-r--r--kernel/include/crypto/cast6.h24
-rw-r--r--kernel/include/crypto/cast_common.h9
-rw-r--r--kernel/include/crypto/compress.h145
-rw-r--r--kernel/include/crypto/cryptd.h69
-rw-r--r--kernel/include/crypto/crypto_wq.h7
-rw-r--r--kernel/include/crypto/ctr.h20
-rw-r--r--kernel/include/crypto/des.h22
-rw-r--r--kernel/include/crypto/drbg.h296
-rw-r--r--kernel/include/crypto/gf128mul.h200
-rw-r--r--kernel/include/crypto/hash.h850
-rw-r--r--kernel/include/crypto/hash_info.h40
-rw-r--r--kernel/include/crypto/if_alg.h96
-rw-r--r--kernel/include/crypto/internal/aead.h82
-rw-r--r--kernel/include/crypto/internal/compress.h28
-rw-r--r--kernel/include/crypto/internal/hash.h247
-rw-r--r--kernel/include/crypto/internal/rng.h26
-rw-r--r--kernel/include/crypto/internal/skcipher.h111
-rw-r--r--kernel/include/crypto/lrw.h43
-rw-r--r--kernel/include/crypto/mcryptd.h112
-rw-r--r--kernel/include/crypto/md5.h17
-rw-r--r--kernel/include/crypto/null.h11
-rw-r--r--kernel/include/crypto/padlock.h29
-rw-r--r--kernel/include/crypto/pcrypt.h51
-rw-r--r--kernel/include/crypto/pkcs7.h36
-rw-r--r--kernel/include/crypto/public_key.h107
-rw-r--r--kernel/include/crypto/rng.h154
-rw-r--r--kernel/include/crypto/scatterwalk.h105
-rw-r--r--kernel/include/crypto/serpent.h27
-rw-r--r--kernel/include/crypto/sha.h104
-rw-r--r--kernel/include/crypto/sha1_base.h106
-rw-r--r--kernel/include/crypto/sha256_base.h128
-rw-r--r--kernel/include/crypto/sha512_base.h131
-rw-r--r--kernel/include/crypto/skcipher.h110
-rw-r--r--kernel/include/crypto/twofish.h24
-rw-r--r--kernel/include/crypto/vmac.h63
-rw-r--r--kernel/include/crypto/xts.h27
44 files changed, 4414 insertions, 0 deletions
diff --git a/kernel/include/crypto/ablk_helper.h b/kernel/include/crypto/ablk_helper.h
new file mode 100644
index 000000000..4f93df50c
--- /dev/null
+++ b/kernel/include/crypto/ablk_helper.h
@@ -0,0 +1,31 @@
+/*
+ * Shared async block cipher helpers
+ */
+
+#ifndef _CRYPTO_ABLK_HELPER_H
+#define _CRYPTO_ABLK_HELPER_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/cryptd.h>
+
+struct async_helper_ctx {
+ struct cryptd_ablkcipher *cryptd_tfm;
+};
+
+extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
+ unsigned int key_len);
+
+extern int __ablk_encrypt(struct ablkcipher_request *req);
+
+extern int ablk_encrypt(struct ablkcipher_request *req);
+
+extern int ablk_decrypt(struct ablkcipher_request *req);
+
+extern void ablk_exit(struct crypto_tfm *tfm);
+
+extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
+
+extern int ablk_init(struct crypto_tfm *tfm);
+
+#endif /* _CRYPTO_ABLK_HELPER_H */
diff --git a/kernel/include/crypto/aead.h b/kernel/include/crypto/aead.h
new file mode 100644
index 000000000..94b19be67
--- /dev/null
+++ b/kernel/include/crypto/aead.h
@@ -0,0 +1,105 @@
+/*
+ * AEAD: Authenticated Encryption with Associated Data
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_AEAD_H
+#define _CRYPTO_AEAD_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ * struct aead_givcrypt_request - AEAD request with IV generation
+ * @seq: Sequence number for IV generation
+ * @giv: Space for generated IV
+ * @areq: The AEAD request itself
+ */
+struct aead_givcrypt_request {
+ u64 seq;
+ u8 *giv;
+
+ struct aead_request areq;
+};
+
+static inline struct crypto_aead *aead_givcrypt_reqtfm(
+ struct aead_givcrypt_request *req)
+{
+ return crypto_aead_reqtfm(&req->areq);
+}
+
+static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
+ return crt->givencrypt(req);
+};
+
+static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
+{
+ struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req));
+ return crt->givdecrypt(req);
+};
+
+static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
+ struct crypto_aead *tfm)
+{
+ req->areq.base.tfm = crypto_aead_tfm(tfm);
+}
+
+static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
+ struct crypto_aead *tfm, gfp_t gfp)
+{
+ struct aead_givcrypt_request *req;
+
+ req = kmalloc(sizeof(struct aead_givcrypt_request) +
+ crypto_aead_reqsize(tfm), gfp);
+
+ if (likely(req))
+ aead_givcrypt_set_tfm(req, tfm);
+
+ return req;
+}
+
+static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
+{
+ kfree(req);
+}
+
+static inline void aead_givcrypt_set_callback(
+ struct aead_givcrypt_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
+{
+ aead_request_set_callback(&req->areq, flags, compl, data);
+}
+
+static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int nbytes, void *iv)
+{
+ aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
+}
+
+static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
+ struct scatterlist *assoc,
+ unsigned int assoclen)
+{
+ aead_request_set_assoc(&req->areq, assoc, assoclen);
+}
+
+static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
+ u8 *giv, u64 seq)
+{
+ req->giv = giv;
+ req->seq = seq;
+}
+
+#endif /* _CRYPTO_AEAD_H */
diff --git a/kernel/include/crypto/aes.h b/kernel/include/crypto/aes.h
new file mode 100644
index 000000000..7524ba3b6
--- /dev/null
+++ b/kernel/include/crypto/aes.h
@@ -0,0 +1,39 @@
+/*
+ * Common values for AES algorithms
+ */
+
+#ifndef _CRYPTO_AES_H
+#define _CRYPTO_AES_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define AES_MIN_KEY_SIZE 16
+#define AES_MAX_KEY_SIZE 32
+#define AES_KEYSIZE_128 16
+#define AES_KEYSIZE_192 24
+#define AES_KEYSIZE_256 32
+#define AES_BLOCK_SIZE 16
+#define AES_MAX_KEYLENGTH (15 * 16)
+#define AES_MAX_KEYLENGTH_U32 (AES_MAX_KEYLENGTH / sizeof(u32))
+
+/*
+ * Please ensure that the first two fields are 16-byte aligned
+ * relative to the start of the structure, i.e., don't move them!
+ */
+struct crypto_aes_ctx {
+ u32 key_enc[AES_MAX_KEYLENGTH_U32];
+ u32 key_dec[AES_MAX_KEYLENGTH_U32];
+ u32 key_length;
+};
+
+extern const u32 crypto_ft_tab[4][256];
+extern const u32 crypto_fl_tab[4][256];
+extern const u32 crypto_it_tab[4][256];
+extern const u32 crypto_il_tab[4][256];
+
+int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len);
+int crypto_aes_expand_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
+ unsigned int key_len);
+#endif
diff --git a/kernel/include/crypto/algapi.h b/kernel/include/crypto/algapi.h
new file mode 100644
index 000000000..0ecb7688a
--- /dev/null
+++ b/kernel/include/crypto/algapi.h
@@ -0,0 +1,419 @@
+/*
+ * Cryptographic API for algorithms (i.e., low-level API).
+ *
+ * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_ALGAPI_H
+#define _CRYPTO_ALGAPI_H
+
+#include <linux/crypto.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+struct module;
+struct rtattr;
+struct seq_file;
+
+struct crypto_type {
+ unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+ unsigned int (*extsize)(struct crypto_alg *alg);
+ int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
+ int (*init_tfm)(struct crypto_tfm *tfm);
+ void (*show)(struct seq_file *m, struct crypto_alg *alg);
+ int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
+ struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+
+ unsigned int type;
+ unsigned int maskclear;
+ unsigned int maskset;
+ unsigned int tfmsize;
+};
+
+struct crypto_instance {
+ struct crypto_alg alg;
+
+ struct crypto_template *tmpl;
+ struct hlist_node list;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+struct crypto_template {
+ struct list_head list;
+ struct hlist_head instances;
+ struct module *module;
+
+ struct crypto_instance *(*alloc)(struct rtattr **tb);
+ void (*free)(struct crypto_instance *inst);
+ int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
+
+ char name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct crypto_spawn {
+ struct list_head list;
+ struct crypto_alg *alg;
+ struct crypto_instance *inst;
+ const struct crypto_type *frontend;
+ u32 mask;
+};
+
+struct crypto_queue {
+ struct list_head list;
+ struct list_head *backlog;
+
+ unsigned int qlen;
+ unsigned int max_qlen;
+};
+
+struct scatter_walk {
+ struct scatterlist *sg;
+ unsigned int offset;
+};
+
+struct blkcipher_walk {
+ union {
+ struct {
+ struct page *page;
+ unsigned long offset;
+ } phys;
+
+ struct {
+ u8 *page;
+ u8 *addr;
+ } virt;
+ } src, dst;
+
+ struct scatter_walk in;
+ unsigned int nbytes;
+
+ struct scatter_walk out;
+ unsigned int total;
+
+ void *page;
+ u8 *buffer;
+ u8 *iv;
+ unsigned int ivsize;
+
+ int flags;
+ unsigned int walk_blocksize;
+ unsigned int cipher_blocksize;
+ unsigned int alignmask;
+};
+
+struct ablkcipher_walk {
+ struct {
+ struct page *page;
+ unsigned int offset;
+ } src, dst;
+
+ struct scatter_walk in;
+ unsigned int nbytes;
+ struct scatter_walk out;
+ unsigned int total;
+ struct list_head buffers;
+ u8 *iv_buffer;
+ u8 *iv;
+ int flags;
+ unsigned int blocksize;
+};
+
+extern const struct crypto_type crypto_ablkcipher_type;
+extern const struct crypto_type crypto_aead_type;
+extern const struct crypto_type crypto_blkcipher_type;
+
+void crypto_mod_put(struct crypto_alg *alg);
+
+int crypto_register_template(struct crypto_template *tmpl);
+void crypto_unregister_template(struct crypto_template *tmpl);
+struct crypto_template *crypto_lookup_template(const char *name);
+
+int crypto_register_instance(struct crypto_template *tmpl,
+ struct crypto_instance *inst);
+int crypto_unregister_instance(struct crypto_instance *inst);
+
+int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst, u32 mask);
+int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
+ struct crypto_instance *inst,
+ const struct crypto_type *frontend);
+
+void crypto_drop_spawn(struct crypto_spawn *spawn);
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask);
+void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
+
+static inline void crypto_set_spawn(struct crypto_spawn *spawn,
+ struct crypto_instance *inst)
+{
+ spawn->inst = inst;
+}
+
+struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
+int crypto_check_attr_type(struct rtattr **tb, u32 type);
+const char *crypto_attr_alg_name(struct rtattr *rta);
+struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
+ const struct crypto_type *frontend,
+ u32 type, u32 mask);
+
+static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
+ u32 type, u32 mask)
+{
+ return crypto_attr_alg2(rta, NULL, type, mask);
+}
+
+int crypto_attr_u32(struct rtattr *rta, u32 *num);
+void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg,
+ unsigned int head);
+struct crypto_instance *crypto_alloc_instance(const char *name,
+ struct crypto_alg *alg);
+
+void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
+int crypto_enqueue_request(struct crypto_queue *queue,
+ struct crypto_async_request *request);
+void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
+struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
+int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
+
+/* These functions require the input/output to be aligned as u32. */
+void crypto_inc(u8 *a, unsigned int size);
+void crypto_xor(u8 *dst, const u8 *src, unsigned int size);
+
+int blkcipher_walk_done(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk, int err);
+int blkcipher_walk_virt(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk);
+int blkcipher_walk_phys(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk);
+int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ unsigned int blocksize);
+int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_aead *tfm,
+ unsigned int blocksize);
+
+int ablkcipher_walk_done(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk, int err);
+int ablkcipher_walk_phys(struct ablkcipher_request *req,
+ struct ablkcipher_walk *walk);
+void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
+
+static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
+{
+ return PTR_ALIGN(crypto_tfm_ctx(tfm),
+ crypto_tfm_alg_alignmask(tfm) + 1);
+}
+
+static inline struct crypto_instance *crypto_tfm_alg_instance(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm->__crt_alg, struct crypto_instance, alg);
+}
+
+static inline void *crypto_instance_ctx(struct crypto_instance *inst)
+{
+ return inst->__ctx;
+}
+
+static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
+ struct crypto_ablkcipher *tfm)
+{
+ return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
+}
+
+static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
+{
+ return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
+}
+
+static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *crypto_aead_alg_instance(
+ struct crypto_aead *aead)
+{
+ return crypto_tfm_alg_instance(&aead->base);
+}
+
+static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
+ struct crypto_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
+static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
+static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
+{
+ return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
+}
+
+static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_HASH;
+ u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
+
+ return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
+static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ walk->in.sg = src;
+ walk->out.sg = dst;
+ walk->total = nbytes;
+}
+
+static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
+ struct scatterlist *dst,
+ struct scatterlist *src,
+ unsigned int nbytes)
+{
+ walk->in.sg = src;
+ walk->out.sg = dst;
+ walk->total = nbytes;
+ INIT_LIST_HEAD(&walk->buffers);
+}
+
+static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
+{
+ if (unlikely(!list_empty(&walk->buffers)))
+ __ablkcipher_walk_complete(walk);
+}
+
+static inline struct crypto_async_request *crypto_get_backlog(
+ struct crypto_queue *queue)
+{
+ return queue->backlog == &queue->list ? NULL :
+ container_of(queue->backlog, struct crypto_async_request, list);
+}
+
+static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
+ struct ablkcipher_request *request)
+{
+ return crypto_enqueue_request(queue, &request->base);
+}
+
+static inline struct ablkcipher_request *ablkcipher_dequeue_request(
+ struct crypto_queue *queue)
+{
+ return ablkcipher_request_cast(crypto_dequeue_request(queue));
+}
+
+static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue,
+ struct crypto_ablkcipher *tfm)
+{
+ return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm));
+}
+
+static inline void *aead_request_ctx(struct aead_request *req)
+{
+ return req->__ctx;
+}
+
+static inline void aead_request_complete(struct aead_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 aead_request_flags(struct aead_request *req)
+{
+ return req->base.flags;
+}
+
+static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
+ u32 type, u32 mask)
+{
+ return crypto_attr_alg(tb[1], type, mask);
+}
+
+/*
+ * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms.
+ * Otherwise returns zero.
+ */
+static inline int crypto_requires_sync(u32 type, u32 mask)
+{
+ return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
+}
+
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ * timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+ return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
+
+static inline void crypto_yield(u32 flags)
+{
+ if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+ cond_resched();
+}
+
+#endif /* _CRYPTO_ALGAPI_H */
diff --git a/kernel/include/crypto/authenc.h b/kernel/include/crypto/authenc.h
new file mode 100644
index 000000000..677505953
--- /dev/null
+++ b/kernel/include/crypto/authenc.h
@@ -0,0 +1,37 @@
+/*
+ * Authenc: Simple AEAD wrapper for IPsec
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+#ifndef _CRYPTO_AUTHENC_H
+#define _CRYPTO_AUTHENC_H
+
+#include <linux/types.h>
+
+enum {
+ CRYPTO_AUTHENC_KEYA_UNSPEC,
+ CRYPTO_AUTHENC_KEYA_PARAM,
+};
+
+struct crypto_authenc_key_param {
+ __be32 enckeylen;
+};
+
+struct crypto_authenc_keys {
+ const u8 *authkey;
+ const u8 *enckey;
+
+ unsigned int authkeylen;
+ unsigned int enckeylen;
+};
+
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+ unsigned int keylen);
+
+#endif /* _CRYPTO_AUTHENC_H */
diff --git a/kernel/include/crypto/b128ops.h b/kernel/include/crypto/b128ops.h
new file mode 100644
index 000000000..0b8e6bc55
--- /dev/null
+++ b/kernel/include/crypto/b128ops.h
@@ -0,0 +1,80 @@
+/* b128ops.h - common 128-bit block operations
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 13/06/2006
+*/
+
+#ifndef _CRYPTO_B128OPS_H
+#define _CRYPTO_B128OPS_H
+
+#include <linux/types.h>
+
+typedef struct {
+ u64 a, b;
+} u128;
+
+typedef struct {
+ __be64 a, b;
+} be128;
+
+typedef struct {
+ __le64 b, a;
+} le128;
+
+static inline void u128_xor(u128 *r, const u128 *p, const u128 *q)
+{
+ r->a = p->a ^ q->a;
+ r->b = p->b ^ q->b;
+}
+
+static inline void be128_xor(be128 *r, const be128 *p, const be128 *q)
+{
+ u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+static inline void le128_xor(le128 *r, const le128 *p, const le128 *q)
+{
+ u128_xor((u128 *)r, (u128 *)p, (u128 *)q);
+}
+
+#endif /* _CRYPTO_B128OPS_H */
diff --git a/kernel/include/crypto/blowfish.h b/kernel/include/crypto/blowfish.h
new file mode 100644
index 000000000..1450d4a27
--- /dev/null
+++ b/kernel/include/crypto/blowfish.h
@@ -0,0 +1,23 @@
+/*
+ * Common values for blowfish algorithms
+ */
+
+#ifndef _CRYPTO_BLOWFISH_H
+#define _CRYPTO_BLOWFISH_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define BF_BLOCK_SIZE 8
+#define BF_MIN_KEY_SIZE 4
+#define BF_MAX_KEY_SIZE 56
+
+struct bf_ctx {
+ u32 p[18];
+ u32 s[1024];
+};
+
+int blowfish_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int key_len);
+
+#endif
diff --git a/kernel/include/crypto/cast5.h b/kernel/include/crypto/cast5.h
new file mode 100644
index 000000000..14fbf39d6
--- /dev/null
+++ b/kernel/include/crypto/cast5.h
@@ -0,0 +1,23 @@
+#ifndef _CRYPTO_CAST5_H
+#define _CRYPTO_CAST5_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <crypto/cast_common.h>
+
+#define CAST5_BLOCK_SIZE 8
+#define CAST5_MIN_KEY_SIZE 5
+#define CAST5_MAX_KEY_SIZE 16
+
+struct cast5_ctx {
+ u32 Km[16];
+ u8 Kr[16];
+ int rr; /* rr ? rounds = 12 : rounds = 16; (rfc 2144) */
+};
+
+int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/kernel/include/crypto/cast6.h b/kernel/include/crypto/cast6.h
new file mode 100644
index 000000000..32b60eb8b
--- /dev/null
+++ b/kernel/include/crypto/cast6.h
@@ -0,0 +1,24 @@
+#ifndef _CRYPTO_CAST6_H
+#define _CRYPTO_CAST6_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+#include <crypto/cast_common.h>
+
+#define CAST6_BLOCK_SIZE 16
+#define CAST6_MIN_KEY_SIZE 16
+#define CAST6_MAX_KEY_SIZE 32
+
+struct cast6_ctx {
+ u32 Km[12][4];
+ u8 Kr[12][4];
+};
+
+int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
+ unsigned int keylen, u32 *flags);
+int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/kernel/include/crypto/cast_common.h b/kernel/include/crypto/cast_common.h
new file mode 100644
index 000000000..b7df35cd9
--- /dev/null
+++ b/kernel/include/crypto/cast_common.h
@@ -0,0 +1,9 @@
+#ifndef _CRYPTO_CAST_COMMON_H
+#define _CRYPTO_CAST_COMMON_H
+
+extern const u32 cast_s1[256];
+extern const u32 cast_s2[256];
+extern const u32 cast_s3[256];
+extern const u32 cast_s4[256];
+
+#endif
diff --git a/kernel/include/crypto/compress.h b/kernel/include/crypto/compress.h
new file mode 100644
index 000000000..86163ef24
--- /dev/null
+++ b/kernel/include/crypto/compress.h
@@ -0,0 +1,145 @@
+/*
+ * Compress: Compression algorithms under the cryptographic API.
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CRYPTO_COMPRESS_H
+#define _CRYPTO_COMPRESS_H
+
+#include <linux/crypto.h>
+
+
+struct comp_request {
+ const void *next_in; /* next input byte */
+ void *next_out; /* next output byte */
+ unsigned int avail_in; /* bytes available at next_in */
+ unsigned int avail_out; /* bytes available at next_out */
+};
+
+enum zlib_comp_params {
+ ZLIB_COMP_LEVEL = 1, /* e.g. Z_DEFAULT_COMPRESSION */
+ ZLIB_COMP_METHOD, /* e.g. Z_DEFLATED */
+ ZLIB_COMP_WINDOWBITS, /* e.g. MAX_WBITS */
+ ZLIB_COMP_MEMLEVEL, /* e.g. DEF_MEM_LEVEL */
+ ZLIB_COMP_STRATEGY, /* e.g. Z_DEFAULT_STRATEGY */
+ __ZLIB_COMP_MAX,
+};
+
+#define ZLIB_COMP_MAX (__ZLIB_COMP_MAX - 1)
+
+
+enum zlib_decomp_params {
+ ZLIB_DECOMP_WINDOWBITS = 1, /* e.g. DEF_WBITS */
+ __ZLIB_DECOMP_MAX,
+};
+
+#define ZLIB_DECOMP_MAX (__ZLIB_DECOMP_MAX - 1)
+
+
+struct crypto_pcomp {
+ struct crypto_tfm base;
+};
+
+struct pcomp_alg {
+ int (*compress_setup)(struct crypto_pcomp *tfm, void *params,
+ unsigned int len);
+ int (*compress_init)(struct crypto_pcomp *tfm);
+ int (*compress_update)(struct crypto_pcomp *tfm,
+ struct comp_request *req);
+ int (*compress_final)(struct crypto_pcomp *tfm,
+ struct comp_request *req);
+ int (*decompress_setup)(struct crypto_pcomp *tfm, void *params,
+ unsigned int len);
+ int (*decompress_init)(struct crypto_pcomp *tfm);
+ int (*decompress_update)(struct crypto_pcomp *tfm,
+ struct comp_request *req);
+ int (*decompress_final)(struct crypto_pcomp *tfm,
+ struct comp_request *req);
+
+ struct crypto_alg base;
+};
+
+extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
+{
+ return &tfm->base;
+}
+
+static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
+}
+
+static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct pcomp_alg, base);
+}
+
+static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
+{
+ return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
+}
+
+static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
+ void *params, unsigned int len)
+{
+ return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
+}
+
+static inline int crypto_compress_init(struct crypto_pcomp *tfm)
+{
+ return crypto_pcomp_alg(tfm)->compress_init(tfm);
+}
+
+static inline int crypto_compress_update(struct crypto_pcomp *tfm,
+ struct comp_request *req)
+{
+ return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
+}
+
+static inline int crypto_compress_final(struct crypto_pcomp *tfm,
+ struct comp_request *req)
+{
+ return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
+}
+
+static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
+ void *params, unsigned int len)
+{
+ return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
+}
+
+static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
+{
+ return crypto_pcomp_alg(tfm)->decompress_init(tfm);
+}
+
+static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
+ struct comp_request *req)
+{
+ return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
+}
+
+static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
+ struct comp_request *req)
+{
+ return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
+}
+
+#endif /* _CRYPTO_COMPRESS_H */
diff --git a/kernel/include/crypto/cryptd.h b/kernel/include/crypto/cryptd.h
new file mode 100644
index 000000000..ba98918bb
--- /dev/null
+++ b/kernel/include/crypto/cryptd.h
@@ -0,0 +1,69 @@
+/*
+ * Software async crypto daemon
+ *
+ * Added AEAD support to cryptd.
+ * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
+ * Adrian Hoban <adrian.hoban@intel.com>
+ * Gabriele Paoloni <gabriele.paoloni@intel.com>
+ * Aidan O'Mahony (aidan.o.mahony@intel.com)
+ * Copyright (c) 2010, Intel Corporation.
+ */
+
+#ifndef _CRYPTO_CRYPT_H
+#define _CRYPTO_CRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+struct cryptd_ablkcipher {
+ struct crypto_ablkcipher base;
+};
+
+static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
+ struct crypto_ablkcipher *tfm)
+{
+ return (struct cryptd_ablkcipher *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
+void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
+
+struct cryptd_ahash {
+ struct crypto_ahash base;
+};
+
+static inline struct cryptd_ahash *__cryptd_ahash_cast(
+ struct crypto_ahash *tfm)
+{
+ return (struct cryptd_ahash *)tfm;
+}
+
+/* alg_name should be algorithm to be cryptd-ed */
+struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm);
+struct shash_desc *cryptd_shash_desc(struct ahash_request *req);
+void cryptd_free_ahash(struct cryptd_ahash *tfm);
+
+struct cryptd_aead {
+ struct crypto_aead base;
+};
+
+static inline struct cryptd_aead *__cryptd_aead_cast(
+ struct crypto_aead *tfm)
+{
+ return (struct cryptd_aead *)tfm;
+}
+
+struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
+ u32 type, u32 mask);
+
+struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm);
+
+void cryptd_free_aead(struct cryptd_aead *tfm);
+
+#endif
diff --git a/kernel/include/crypto/crypto_wq.h b/kernel/include/crypto/crypto_wq.h
new file mode 100644
index 000000000..a7d252daf
--- /dev/null
+++ b/kernel/include/crypto/crypto_wq.h
@@ -0,0 +1,7 @@
+#ifndef CRYPTO_WQ_H
+#define CRYPTO_WQ_H
+
+#include <linux/workqueue.h>
+
+extern struct workqueue_struct *kcrypto_wq;
+#endif
diff --git a/kernel/include/crypto/ctr.h b/kernel/include/crypto/ctr.h
new file mode 100644
index 000000000..4180fc080
--- /dev/null
+++ b/kernel/include/crypto/ctr.h
@@ -0,0 +1,20 @@
+/*
+ * CTR: Counter mode
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_CTR_H
+#define _CRYPTO_CTR_H
+
+#define CTR_RFC3686_NONCE_SIZE 4
+#define CTR_RFC3686_IV_SIZE 8
+#define CTR_RFC3686_BLOCK_SIZE 16
+
+#endif /* _CRYPTO_CTR_H */
diff --git a/kernel/include/crypto/des.h b/kernel/include/crypto/des.h
new file mode 100644
index 000000000..fc6274c6b
--- /dev/null
+++ b/kernel/include/crypto/des.h
@@ -0,0 +1,22 @@
+/*
+ * DES & Triple DES EDE Cipher Algorithms.
+ */
+
+#ifndef __CRYPTO_DES_H
+#define __CRYPTO_DES_H
+
+#define DES_KEY_SIZE 8
+#define DES_EXPKEY_WORDS 32
+#define DES_BLOCK_SIZE 8
+
+#define DES3_EDE_KEY_SIZE (3 * DES_KEY_SIZE)
+#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
+#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
+
+
+extern unsigned long des_ekey(u32 *pe, const u8 *k);
+
+extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+ unsigned int keylen);
+
+#endif /* __CRYPTO_DES_H */
diff --git a/kernel/include/crypto/drbg.h b/kernel/include/crypto/drbg.h
new file mode 100644
index 000000000..5186f750c
--- /dev/null
+++ b/kernel/include/crypto/drbg.h
@@ -0,0 +1,296 @@
+/*
+ * DRBG based on NIST SP800-90A
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _DRBG_H
+#define _DRBG_H
+
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/slab.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+ const unsigned char *buf;
+ size_t len;
+ struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+ const unsigned char *buf, size_t len)
+{
+ string->buf = buf;
+ string->len = len;
+ INIT_LIST_HEAD(&string->list);
+}
+
+struct drbg_state;
+typedef uint32_t drbg_flag_t;
+
+struct drbg_core {
+ drbg_flag_t flags; /* flags for the cipher */
+ __u8 statelen; /* maximum state length */
+ __u8 blocklen_bytes; /* block size of output in bytes */
+ char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
+ /* kernel crypto API backend cipher name */
+ char backend_cra_name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct drbg_state_ops {
+ int (*update)(struct drbg_state *drbg, struct list_head *seed,
+ int reseed);
+ int (*generate)(struct drbg_state *drbg,
+ unsigned char *buf, unsigned int buflen,
+ struct list_head *addtl);
+ int (*crypto_init)(struct drbg_state *drbg);
+ int (*crypto_fini)(struct drbg_state *drbg);
+
+};
+
+struct drbg_test_data {
+ struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+};
+
+struct drbg_state {
+ spinlock_t drbg_lock; /* lock around DRBG */
+ unsigned char *V; /* internal state 10.1.1.1 1a) */
+ /* hash: static value 10.1.1.1 1b) hmac / ctr: key */
+ unsigned char *C;
+ /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
+ size_t reseed_ctr;
+ /* some memory the DRBG can use for its operation */
+ unsigned char *scratchpad;
+ void *priv_data; /* Cipher handle */
+ bool seeded; /* DRBG fully seeded? */
+ bool pr; /* Prediction resistance enabled? */
+#ifdef CONFIG_CRYPTO_FIPS
+ bool fips_primed; /* Continuous test primed? */
+ unsigned char *prev; /* FIPS 140-2 continuous test value */
+#endif
+ const struct drbg_state_ops *d_ops;
+ const struct drbg_core *core;
+ struct drbg_test_data *test_data;
+};
+
+static inline __u8 drbg_statelen(struct drbg_state *drbg)
+{
+ if (drbg && drbg->core)
+ return drbg->core->statelen;
+ return 0;
+}
+
+static inline __u8 drbg_blocklen(struct drbg_state *drbg)
+{
+ if (drbg && drbg->core)
+ return drbg->core->blocklen_bytes;
+ return 0;
+}
+
+static inline __u8 drbg_keylen(struct drbg_state *drbg)
+{
+ if (drbg && drbg->core)
+ return (drbg->core->statelen - drbg->core->blocklen_bytes);
+ return 0;
+}
+
+static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
+{
+ /* SP800-90A requires the limit 2**19 bits, but we return bytes */
+ return (1 << 16);
+}
+
+static inline size_t drbg_max_addtl(struct drbg_state *drbg)
+{
+ /* SP800-90A requires 2**35 bytes additional info str / pers str */
+#if (__BITS_PER_LONG == 32)
+ /*
+ * SP800-90A allows smaller maximum numbers to be returned -- we
+ * return SIZE_MAX - 1 to allow the verification of the enforcement
+ * of this value in drbg_healthcheck_sanity.
+ */
+ return (SIZE_MAX - 1);
+#else
+ return (1UL<<35);
+#endif
+}
+
+static inline size_t drbg_max_requests(struct drbg_state *drbg)
+{
+ /* SP800-90A requires 2**48 maximum requests before reseeding */
+#if (__BITS_PER_LONG == 32)
+ return SIZE_MAX;
+#else
+ return (1UL<<48);
+#endif
+}
+
+/*
+ * kernel crypto API input data structure for DRBG generate in case dlen
+ * is set to 0
+ */
+struct drbg_gen {
+ unsigned char *outbuf; /* output buffer for random numbers */
+ unsigned int outlen; /* size of output buffer */
+ struct drbg_string *addtl; /* additional information string */
+ struct drbg_test_data *test_data; /* test data */
+};
+
+/*
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ *
+ * return
+ * see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
+ unsigned char *outbuf, unsigned int outlen,
+ struct drbg_string *addtl)
+{
+ int ret;
+ struct drbg_gen genbuf;
+ genbuf.outbuf = outbuf;
+ genbuf.outlen = outlen;
+ genbuf.addtl = addtl;
+ genbuf.test_data = NULL;
+ ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+ return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * allow furnishing of test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ * see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
+ unsigned char *outbuf, unsigned int outlen,
+ struct drbg_string *addtl,
+ struct drbg_test_data *test_data)
+{
+ int ret;
+ struct drbg_gen genbuf;
+ genbuf.outbuf = outbuf;
+ genbuf.outlen = outlen;
+ genbuf.addtl = addtl;
+ genbuf.test_data = test_data;
+ ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+ return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_reset() to allow the caller to provide test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_reset
+ * @pers personalization string input buffer
+ * @perslen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ * see crypto_rng_reset
+ */
+static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
+ struct drbg_string *pers,
+ struct drbg_test_data *test_data)
+{
+ int ret;
+ struct drbg_gen genbuf;
+ genbuf.outbuf = NULL;
+ genbuf.outlen = 0;
+ genbuf.addtl = pers;
+ genbuf.test_data = test_data;
+ ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
+ return ret;
+}
+
+/* DRBG type flags */
+#define DRBG_CTR ((drbg_flag_t)1<<0)
+#define DRBG_HMAC ((drbg_flag_t)1<<1)
+#define DRBG_HASH ((drbg_flag_t)1<<2)
+#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH)
+/* DRBG strength flags */
+#define DRBG_STRENGTH128 ((drbg_flag_t)1<<3)
+#define DRBG_STRENGTH192 ((drbg_flag_t)1<<4)
+#define DRBG_STRENGTH256 ((drbg_flag_t)1<<5)
+#define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
+ DRBG_STRENGTH256)
+
+enum drbg_prefixes {
+ DRBG_PREFIX0 = 0x00,
+ DRBG_PREFIX1,
+ DRBG_PREFIX2,
+ DRBG_PREFIX3
+};
+
+#endif /* _DRBG_H */
diff --git a/kernel/include/crypto/gf128mul.h b/kernel/include/crypto/gf128mul.h
new file mode 100644
index 000000000..da2530e34
--- /dev/null
+++ b/kernel/include/crypto/gf128mul.h
@@ -0,0 +1,200 @@
+/* gf128mul.h - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://fp.gladman.plus.com/cryptography_technology/index.htm
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue Date: 31/01/2006
+
+ An implementation of field multiplication in Galois Field GF(128)
+*/
+
+#ifndef _CRYPTO_GF128MUL_H
+#define _CRYPTO_GF128MUL_H
+
+#include <crypto/b128ops.h>
+#include <linux/slab.h>
+
+/* Comment by Rik:
+ *
+ * For some background on GF(2^128) see for example:
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
+ *
+ * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can
+ * be mapped to computer memory in a variety of ways. Let's examine
+ * three common cases.
+ *
+ * Take a look at the 16 binary octets below in memory order. The msb's
+ * are left and the lsb's are right. char b[16] is an array and b[0] is
+ * the first octet.
+ *
+ * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ * b[0] b[1] b[2] b[3] b[13] b[14] b[15]
+ *
+ * Every bit is a coefficient of some power of X. We can store the bits
+ * in every byte in little-endian order and the bytes themselves also in
+ * little endian order. I will call this lle (little-little-endian).
+ * The above buffer represents the polynomial 1, and X^7+X^2+X^1+1 looks
+ * like 11100001 00000000 .... 00000000 = { 0xE1, 0x00, }.
+ * This format was originally implemented in gf128mul and is used
+ * in GCM (Galois/Counter mode) and in ABL (Arbitrary Block Length).
+ *
+ * Another convention says: store the bits in bigendian order and the
+ * bytes also. This is bbe (big-big-endian). Now the buffer above
+ * represents X^127. X^7+X^2+X^1+1 looks like 00000000 .... 10000111,
+ * b[15] = 0x87 and the rest is 0. LRW uses this convention and bbe
+ * is partly implemented.
+ *
+ * Both of the above formats are easy to implement on big-endian
+ * machines.
+ *
+ * EME (which is patent encumbered) uses the ble format (bits are stored
+ * in big endian order and the bytes in little endian). The above buffer
+ * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ *
+ * The common machine word-size is smaller than 128 bits, so to make
+ * an efficient implementation we must split into machine word sizes.
+ * This file uses one 32bit for the moment. Machine endianness comes into
+ * play. The lle format in relation to machine endianness is discussed
+ * below by the original author of gf128mul Dr Brian Gladman.
+ *
+ * Let's look at the bbe and ble format on a little endian machine.
+ *
+ * bbe on a little endian machine u32 x[4]:
+ *
+ * MS x[0] LS MS x[1] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 103..96 111.104 119.112 127.120 71...64 79...72 87...80 95...88
+ *
+ * MS x[2] LS MS x[3] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 39...32 47...40 55...48 63...56 07...00 15...08 23...16 31...24
+ *
+ * ble on a little endian machine
+ *
+ * MS x[0] LS MS x[1] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 31...24 23...16 15...08 07...00 63...56 55...48 47...40 39...32
+ *
+ * MS x[2] LS MS x[3] LS
+ * ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ * 95...88 87...80 79...72 71...64 127.120 199.112 111.104 103..96
+ *
+ * Multiplications in GF(2^128) are mostly bit-shifts, so you see why
+ * ble (and lbe also) are easier to implement on a little-endian
+ * machine than on a big-endian machine. The converse holds for bbe
+ * and lle.
+ *
+ * Note: to have good alignment, it seems to me that it is sufficient
+ * to keep elements of GF(2^128) in type u64[2]. On 32-bit wordsize
+ * machines this will automatically aligned to wordsize and on a 64-bit
+ * machine also.
+ */
+/* Multiply a GF128 field element by x. Field elements are held in arrays
+ of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
+ indexed bits placed in the more numerically significant bit positions
+ within bytes.
+
+ On little endian machines the bit indexes translate into the bit
+ positions within four 32-bit words in the following way
+
+ MS x[0] LS MS x[1] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 24...31 16...23 08...15 00...07 56...63 48...55 40...47 32...39
+
+ MS x[2] LS MS x[3] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 88...95 80...87 72...79 64...71 120.127 112.119 104.111 96..103
+
+ On big endian machines the bit indexes translate into the bit
+ positions within four 32-bit words in the following way
+
+ MS x[0] LS MS x[1] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 00...07 08...15 16...23 24...31 32...39 40...47 48...55 56...63
+
+ MS x[2] LS MS x[3] LS
+ ms ls ms ls ms ls ms ls ms ls ms ls ms ls ms ls
+ 64...71 72...79 80...87 88...95 96..103 104.111 112.119 120.127
+*/
+
+/* A slow generic version of gf_mul, implemented for lle and bbe
+ * It multiplies a and b and puts the result in a */
+void gf128mul_lle(be128 *a, const be128 *b);
+
+void gf128mul_bbe(be128 *a, const be128 *b);
+
+/* multiply by x in ble format, needed by XTS */
+void gf128mul_x_ble(be128 *a, const be128 *b);
+
+/* 4k table optimization */
+
+struct gf128mul_4k {
+ be128 t[256];
+};
+
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
+void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+
+static inline void gf128mul_free_4k(struct gf128mul_4k *t)
+{
+ kfree(t);
+}
+
+
+/* 64k table optimization, implemented for lle and bbe */
+
+struct gf128mul_64k {
+ struct gf128mul_4k *t[16];
+};
+
+/* first initialize with the constant factor with which you
+ * want to multiply and then call gf128_64k_lle with the other
+ * factor in the first argument, the table in the second and a
+ * scratch register in the third. Afterwards *a = *r. */
+struct gf128mul_64k *gf128mul_init_64k_lle(const be128 *g);
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
+void gf128mul_free_64k(struct gf128mul_64k *t);
+void gf128mul_64k_lle(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+
+#endif /* _CRYPTO_GF128MUL_H */
diff --git a/kernel/include/crypto/hash.h b/kernel/include/crypto/hash.h
new file mode 100644
index 000000000..98abda9ed
--- /dev/null
+++ b/kernel/include/crypto/hash.h
@@ -0,0 +1,850 @@
+/*
+ * Hash: Hash algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_H
+#define _CRYPTO_HASH_H
+
+#include <linux/crypto.h>
+
+struct crypto_ahash;
+
+/**
+ * DOC: Message Digest Algorithm Definitions
+ *
+ * These data structures define modular message digest algorithm
+ * implementations, managed via crypto_register_ahash(),
+ * crypto_register_shash(), crypto_unregister_ahash() and
+ * crypto_unregister_shash().
+ */
+
+/**
+ * struct hash_alg_common - define properties of message digest
+ * @digestsize: Size of the result of the transformation. A buffer of this size
+ * must be available to the @final and @finup calls, so they can
+ * store the resulting hash into it. For various predefined sizes,
+ * search include/crypto/ using
+ * git grep _DIGEST_SIZE include/crypto.
+ * @statesize: Size of the block for partial state of the transformation. A
+ * buffer of this size must be passed to the @export function as it
+ * will save the partial state of the transformation into it. On the
+ * other side, the @import function will load the state from a
+ * buffer of this size as well.
+ * @base: Start of data structure of cipher algorithm. The common data
+ * structure of crypto_alg contains information common to all ciphers.
+ * The hash_alg_common data structure now adds the hash-specific
+ * information.
+ */
+struct hash_alg_common {
+ unsigned int digestsize;
+ unsigned int statesize;
+
+ struct crypto_alg base;
+};
+
+struct ahash_request {
+ struct crypto_async_request base;
+
+ unsigned int nbytes;
+ struct scatterlist *src;
+ u8 *result;
+
+ /* This field may only be used by the ahash API code. */
+ void *priv;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
+ * struct ahash_alg - asynchronous message digest definition
+ * @init: Initialize the transformation context. Intended only to initialize the
+ * state of the HASH transformation at the begining. This shall fill in
+ * the internal structures used during the entire duration of the whole
+ * transformation. No data processing happens at this point.
+ * @update: Push a chunk of data into the driver for transformation. This
+ * function actually pushes blocks of data from upper layers into the
+ * driver, which then passes those to the hardware as seen fit. This
+ * function must not finalize the HASH transformation by calculating the
+ * final message digest as this only adds more data into the
+ * transformation. This function shall not modify the transformation
+ * context, as this function may be called in parallel with the same
+ * transformation object. Data processing can happen synchronously
+ * [SHASH] or asynchronously [AHASH] at this point.
+ * @final: Retrieve result from the driver. This function finalizes the
+ * transformation and retrieves the resulting hash from the driver and
+ * pushes it back to upper layers. No data processing happens at this
+ * point.
+ * @finup: Combination of @update and @final. This function is effectively a
+ * combination of @update and @final calls issued in sequence. As some
+ * hardware cannot do @update and @final separately, this callback was
+ * added to allow such hardware to be used at least by IPsec. Data
+ * processing can happen synchronously [SHASH] or asynchronously [AHASH]
+ * at this point.
+ * @digest: Combination of @init and @update and @final. This function
+ * effectively behaves as the entire chain of operations, @init,
+ * @update and @final issued in sequence. Just like @finup, this was
+ * added for hardware which cannot do even the @finup, but can only do
+ * the whole transformation in one run. Data processing can happen
+ * synchronously [SHASH] or asynchronously [AHASH] at this point.
+ * @setkey: Set optional key used by the hashing algorithm. Intended to push
+ * optional key used by the hashing algorithm from upper layers into
+ * the driver. This function can store the key in the transformation
+ * context or can outright program it into the hardware. In the former
+ * case, one must be careful to program the key into the hardware at
+ * appropriate time and one must be careful that .setkey() can be
+ * called multiple times during the existence of the transformation
+ * object. Not all hashing algorithms do implement this function as it
+ * is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
+ * implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
+ * this function. This function must be called before any other of the
+ * @init, @update, @final, @finup, @digest is called. No data
+ * processing happens at this point.
+ * @export: Export partial state of the transformation. This function dumps the
+ * entire state of the ongoing transformation into a provided block of
+ * data so it can be @import 'ed back later on. This is useful in case
+ * you want to save partial result of the transformation after
+ * processing certain amount of data and reload this partial result
+ * multiple times later on for multiple re-use. No data processing
+ * happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ * entire state of the ongoing transformation from a provided block of
+ * data so the transformation can continue from this point onward. No
+ * data processing happens at this point.
+ * @halg: see struct hash_alg_common
+ */
+struct ahash_alg {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*export)(struct ahash_request *req, void *out);
+ int (*import)(struct ahash_request *req, const void *in);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ struct hash_alg_common halg;
+};
+
+struct shash_desc {
+ struct crypto_shash *tfm;
+ u32 flags;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+#define SHASH_DESC_ON_STACK(shash, ctx) \
+ char __##shash##_desc[sizeof(struct shash_desc) + \
+ crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
+
+/**
+ * struct shash_alg - synchronous message digest definition
+ * @init: see struct ahash_alg
+ * @update: see struct ahash_alg
+ * @final: see struct ahash_alg
+ * @finup: see struct ahash_alg
+ * @digest: see struct ahash_alg
+ * @export: see struct ahash_alg
+ * @import: see struct ahash_alg
+ * @setkey: see struct ahash_alg
+ * @digestsize: see struct ahash_alg
+ * @statesize: see struct ahash_alg
+ * @descsize: Size of the operational state for the message digest. This state
+ * size is the memory size that needs to be allocated for
+ * shash_desc.__ctx
+ * @base: internally used
+ */
+struct shash_alg {
+ int (*init)(struct shash_desc *desc);
+ int (*update)(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+ int (*final)(struct shash_desc *desc, u8 *out);
+ int (*finup)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+ int (*digest)(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+ int (*export)(struct shash_desc *desc, void *out);
+ int (*import)(struct shash_desc *desc, const void *in);
+ int (*setkey)(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ unsigned int descsize;
+
+ /* These fields must match hash_alg_common. */
+ unsigned int digestsize
+ __attribute__ ((aligned(__alignof__(struct hash_alg_common))));
+ unsigned int statesize;
+
+ struct crypto_alg base;
+};
+
+struct crypto_ahash {
+ int (*init)(struct ahash_request *req);
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int (*digest)(struct ahash_request *req);
+ int (*export)(struct ahash_request *req, void *out);
+ int (*import)(struct ahash_request *req, const void *in);
+ int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+ unsigned int reqsize;
+ struct crypto_tfm base;
+};
+
+struct crypto_shash {
+ unsigned int descsize;
+ struct crypto_tfm base;
+};
+
+/**
+ * DOC: Asynchronous Message Digest API
+ *
+ * The asynchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
+ *
+ * The asynchronous cipher operation discussion provided for the
+ * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ */
+
+static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_ahash, base);
+}
+
+/**
+ * crypto_alloc_ahash() - allocate ahash cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * ahash cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ahash. The returned struct
+ * crypto_ahash is the cipher handle that is required for any subsequent
+ * API invocation for that ahash.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_ahash() - zeroize and free the ahash handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_ahash(struct crypto_ahash *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
+}
+
+static inline unsigned int crypto_ahash_alignmask(
+ struct crypto_ahash *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_ahash_tfm(tfm));
+}
+
+static inline struct hash_alg_common *__crypto_hash_alg_common(
+ struct crypto_alg *alg)
+{
+ return container_of(alg, struct hash_alg_common, base);
+}
+
+static inline struct hash_alg_common *crypto_hash_alg_common(
+ struct crypto_ahash *tfm)
+{
+ return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
+}
+
+/**
+ * crypto_ahash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ *
+ * Return: message digest size of cipher
+ */
+static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->digestsize;
+}
+
+static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->statesize;
+}
+
+static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_get_flags(crypto_ahash_tfm(tfm));
+}
+
+static inline void crypto_ahash_set_flags(struct crypto_ahash *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_ahash_tfm(tfm), flags);
+}
+
+static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
+}
+
+/**
+ * crypto_ahash_reqtfm() - obtain cipher handle from request
+ * @req: asynchronous request handle that contains the reference to the ahash
+ * cipher handle
+ *
+ * Return the ahash cipher handle that is registered with the asynchronous
+ * request handle ahash_request.
+ *
+ * Return: ahash cipher handle
+ */
+static inline struct crypto_ahash *crypto_ahash_reqtfm(
+ struct ahash_request *req)
+{
+ return __crypto_ahash_cast(req->base.tfm);
+}
+
+/**
+ * crypto_ahash_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return the size of the ahash state size. With the crypto_ahash_export
+ * function, the caller can export the state into a buffer whose size is
+ * defined with this function.
+ *
+ * Return: size of the ahash state
+ */
+static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
+{
+ return tfm->reqsize;
+}
+
+static inline void *ahash_request_ctx(struct ahash_request *req)
+{
+ return req->__ctx;
+}
+
+/**
+ * crypto_ahash_setkey - set key for cipher handle
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ahash cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen);
+
+/**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_ahash_finup(struct ahash_request *req);
+
+/**
+ * crypto_ahash_final() - calculate message digest
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer registered with the ahash_request handle.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_ahash_final(struct ahash_request *req);
+
+/**
+ * crypto_ahash_digest() - calculate message digest for a buffer
+ * @req: reference to the ahash_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of crypto_ahash_init,
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_ahash_digest(struct ahash_request *req);
+
+/**
+ * crypto_ahash_export() - extract current message digest state
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the ahash_request handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_ahash_reqsize).
+ *
+ * Return: 0 if the export was successful; < 0 if an error occurred
+ */
+static inline int crypto_ahash_export(struct ahash_request *req, void *out)
+{
+ return crypto_ahash_reqtfm(req)->export(req, out);
+}
+
+/**
+ * crypto_ahash_import() - import message digest state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the ahash_request handle from the
+ * input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
+{
+ return crypto_ahash_reqtfm(req)->import(req, in);
+}
+
+/**
+ * crypto_ahash_init() - (re)initialize message digest handle
+ * @req: ahash_request handle that already is initialized with all necessary
+ * data using the ahash_request_* API functions
+ *
+ * The call (re-)initializes the message digest referenced by the ahash_request
+ * handle. Any potentially existing state created by previous operations is
+ * discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
+static inline int crypto_ahash_init(struct ahash_request *req)
+{
+ return crypto_ahash_reqtfm(req)->init(req);
+}
+
+/**
+ * crypto_ahash_update() - add data to message digest for processing
+ * @req: ahash_request handle that was previously initialized with the
+ * crypto_ahash_init call.
+ *
+ * Updates the message digest state of the &ahash_request handle. The input data
+ * is pointed to by the scatter/gather list registered in the &ahash_request
+ * handle
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
+static inline int crypto_ahash_update(struct ahash_request *req)
+{
+ return crypto_ahash_reqtfm(req)->update(req);
+}
+
+/**
+ * DOC: Asynchronous Hash Request Handle
+ *
+ * The &ahash_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple &ahash_request instances), pointer
+ * to plaintext and the message digest output buffer, asynchronous callback
+ * function, etc. It acts as a handle to the ahash_request_* API calls in a
+ * similar way as ahash handle to the crypto_ahash_* API calls.
+ */
+
+/**
+ * ahash_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ahash handle in the request
+ * data structure with a different one.
+ */
+static inline void ahash_request_set_tfm(struct ahash_request *req,
+ struct crypto_ahash *tfm)
+{
+ req->base.tfm = crypto_ahash_tfm(tfm);
+}
+
+/**
+ * ahash_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ahash
+ * message digest API calls. During
+ * the allocation, the provided ahash handle
+ * is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct ahash_request *ahash_request_alloc(
+ struct crypto_ahash *tfm, gfp_t gfp)
+{
+ struct ahash_request *req;
+
+ req = kmalloc(sizeof(struct ahash_request) +
+ crypto_ahash_reqsize(tfm), gfp);
+
+ if (likely(req))
+ ahash_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * ahash_request_free() - zeroize and free the request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void ahash_request_free(struct ahash_request *req)
+{
+ kzfree(req);
+}
+
+static inline struct ahash_request *ahash_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct ahash_request, base);
+}
+
+/**
+ * ahash_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * &crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once
+ * the cipher operation completes.
+ *
+ * The callback function is registered with the &ahash_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void ahash_request_set_callback(struct ahash_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * ahash_request_set_crypt() - set data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source scatter/gather list
+ * @result: buffer that is filled with the message digest -- the caller must
+ * ensure that the buffer has sufficient space by, for example, calling
+ * crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source scatter/gather list
+ *
+ * By using this call, the caller references the source scatter/gather list.
+ * The source scatter/gather list points to the data the message digest is to
+ * be calculated for.
+ */
+static inline void ahash_request_set_crypt(struct ahash_request *req,
+ struct scatterlist *src, u8 *result,
+ unsigned int nbytes)
+{
+ req->src = src;
+ req->nbytes = nbytes;
+ req->result = result;
+}
+
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
+ *
+ * The message digest API is able to maintain state information for the
+ * caller.
+ *
+ * The synchronous message digest API can store user-related context in in its
+ * shash_desc request data structure.
+ */
+
+/**
+ * crypto_alloc_shash() - allocate message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned &struct
+ * crypto_shash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
+ u32 mask);
+
+static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_shash() - zeroize and free the message digest handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_shash(struct crypto_shash *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
+}
+
+static inline unsigned int crypto_shash_alignmask(
+ struct crypto_shash *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
+}
+
+/**
+ * crypto_shash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
+}
+
+static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
+{
+ return container_of(alg, struct shash_alg, base);
+}
+
+static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
+{
+ return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
+}
+
+/**
+ * crypto_shash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: digest size of cipher
+ */
+static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->digestsize;
+}
+
+static inline unsigned int crypto_shash_statesize(struct crypto_shash *tfm)
+{
+ return crypto_shash_alg(tfm)->statesize;
+}
+
+static inline u32 crypto_shash_get_flags(struct crypto_shash *tfm)
+{
+ return crypto_tfm_get_flags(crypto_shash_tfm(tfm));
+}
+
+static inline void crypto_shash_set_flags(struct crypto_shash *tfm, u32 flags)
+{
+ crypto_tfm_set_flags(crypto_shash_tfm(tfm), flags);
+}
+
+static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
+}
+
+/**
+ * crypto_shash_descsize() - obtain the operational state size
+ * @tfm: cipher handle
+ *
+ * The size of the operational state the cipher needs during operation is
+ * returned for the hash referenced with the cipher handle. This size is
+ * required to calculate the memory requirements to allow the caller allocating
+ * sufficient memory for operational state.
+ *
+ * The operational state is defined with struct shash_desc where the size of
+ * that data structure is to be calculated as
+ * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
+ *
+ * Return: size of the operational state
+ */
+static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
+{
+ return tfm->descsize;
+}
+
+static inline void *shash_desc_ctx(struct shash_desc *desc)
+{
+ return desc->__ctx;
+}
+
+/**
+ * crypto_shash_setkey() - set key for message digest
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the keyed message digest cipher. The
+ * cipher handle must point to a keyed message digest cipher in order for this
+ * function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen);
+
+/**
+ * crypto_shash_digest() - calculate message digest for buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_shash_init,
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+
+/**
+ * crypto_shash_export() - extract operational state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the operational state handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_shash_descsize).
+ *
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+static inline int crypto_shash_export(struct shash_desc *desc, void *out)
+{
+ return crypto_shash_alg(desc->tfm)->export(desc, out);
+}
+
+/**
+ * crypto_shash_import() - import operational state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the operational state handle from
+ * the input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
+{
+ return crypto_shash_alg(desc->tfm)->import(desc, in);
+}
+
+/**
+ * crypto_shash_init() - (re)initialize message digest
+ * @desc: operational state handle that is already filled
+ *
+ * The call (re-)initializes the message digest referenced by the
+ * operational state handle. Any potentially existing state created by
+ * previous operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ * error occurred
+ */
+static inline int crypto_shash_init(struct shash_desc *desc)
+{
+ return crypto_shash_alg(desc->tfm)->init(desc);
+}
+
+/**
+ * crypto_shash_update() - add data to message digest for processing
+ * @desc: operational state handle that is already initialized
+ * @data: input data to be added to the message digest
+ * @len: length of the input data
+ *
+ * Updates the message digest state of the operational state handle.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+/**
+ * crypto_shash_final() - calculate message digest
+ * @desc: operational state handle that is already filled with data
+ * @out: output buffer filled with the message digest
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer. The caller must ensure that the output buffer is
+ * large enough by using crypto_shash_digestsize.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_final(struct shash_desc *desc, u8 *out);
+
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ * occurred
+ */
+int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *out);
+
+#endif /* _CRYPTO_HASH_H */
diff --git a/kernel/include/crypto/hash_info.h b/kernel/include/crypto/hash_info.h
new file mode 100644
index 000000000..e1e5a3e5d
--- /dev/null
+++ b/kernel/include/crypto/hash_info.h
@@ -0,0 +1,40 @@
+/*
+ * Hash Info: Hash algorithms information
+ *
+ * Copyright (c) 2013 Dmitry Kasatkin <d.kasatkin@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_HASH_INFO_H
+#define _CRYPTO_HASH_INFO_H
+
+#include <crypto/sha.h>
+#include <crypto/md5.h>
+
+#include <uapi/linux/hash_info.h>
+
+/* not defined in include/crypto/ */
+#define RMD128_DIGEST_SIZE 16
+#define RMD160_DIGEST_SIZE 20
+#define RMD256_DIGEST_SIZE 32
+#define RMD320_DIGEST_SIZE 40
+
+/* not defined in include/crypto/ */
+#define WP512_DIGEST_SIZE 64
+#define WP384_DIGEST_SIZE 48
+#define WP256_DIGEST_SIZE 32
+
+/* not defined in include/crypto/ */
+#define TGR128_DIGEST_SIZE 16
+#define TGR160_DIGEST_SIZE 20
+#define TGR192_DIGEST_SIZE 24
+
+extern const char *const hash_algo_name[HASH_ALGO__LAST];
+extern const int hash_digest_size[HASH_ALGO__LAST];
+
+#endif /* _CRYPTO_HASH_INFO_H */
diff --git a/kernel/include/crypto/if_alg.h b/kernel/include/crypto/if_alg.h
new file mode 100644
index 000000000..018afb264
--- /dev/null
+++ b/kernel/include/crypto/if_alg.h
@@ -0,0 +1,96 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_IF_ALG_H
+#define _CRYPTO_IF_ALG_H
+
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/if_alg.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+#include <net/sock.h>
+
+#define ALG_MAX_PAGES 16
+
+struct crypto_async_request;
+
+struct alg_sock {
+ /* struct sock must be the first member of struct alg_sock */
+ struct sock sk;
+
+ struct sock *parent;
+
+ const struct af_alg_type *type;
+ void *private;
+};
+
+struct af_alg_completion {
+ struct completion completion;
+ int err;
+};
+
+struct af_alg_control {
+ struct af_alg_iv *iv;
+ int op;
+ unsigned int aead_assoclen;
+};
+
+struct af_alg_type {
+ void *(*bind)(const char *name, u32 type, u32 mask);
+ void (*release)(void *private);
+ int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*accept)(void *private, struct sock *sk);
+ int (*setauthsize)(void *private, unsigned int authsize);
+
+ struct proto_ops *ops;
+ struct module *owner;
+ char name[14];
+};
+
+struct af_alg_sgl {
+ struct scatterlist sg[ALG_MAX_PAGES + 1];
+ struct page *pages[ALG_MAX_PAGES];
+ unsigned int npages;
+};
+
+int af_alg_register_type(const struct af_alg_type *type);
+int af_alg_unregister_type(const struct af_alg_type *type);
+
+int af_alg_release(struct socket *sock);
+int af_alg_accept(struct sock *sk, struct socket *newsock);
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
+void af_alg_free_sg(struct af_alg_sgl *sgl);
+void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
+void af_alg_complete(struct crypto_async_request *req, int err);
+
+static inline struct alg_sock *alg_sk(struct sock *sk)
+{
+ return (struct alg_sock *)sk;
+}
+
+static inline void af_alg_release_parent(struct sock *sk)
+{
+ sock_put(alg_sk(sk)->parent);
+}
+
+static inline void af_alg_init_completion(struct af_alg_completion *completion)
+{
+ init_completion(&completion->completion);
+}
+
+#endif /* _CRYPTO_IF_ALG_H */
diff --git a/kernel/include/crypto/internal/aead.h b/kernel/include/crypto/internal/aead.h
new file mode 100644
index 000000000..2eba34023
--- /dev/null
+++ b/kernel/include/crypto/internal/aead.h
@@ -0,0 +1,82 @@
+/*
+ * AEAD: Authenticated Encryption with Associated Data
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_AEAD_H
+#define _CRYPTO_INTERNAL_AEAD_H
+
+#include <crypto/aead.h>
+#include <crypto/algapi.h>
+#include <linux/types.h>
+
+struct rtattr;
+
+struct crypto_aead_spawn {
+ struct crypto_spawn base;
+};
+
+extern const struct crypto_type crypto_nivaead_type;
+
+static inline void crypto_set_aead_spawn(
+ struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
+{
+ crypto_set_spawn(&spawn->base, inst);
+}
+
+struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
+
+int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
+ u32 type, u32 mask);
+
+static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_aead_spawn_alg(
+ struct crypto_aead_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_aead *crypto_spawn_aead(
+ struct crypto_aead_spawn *spawn)
+{
+ return __crypto_aead_cast(
+ crypto_spawn_tfm(&spawn->base, CRYPTO_ALG_TYPE_AEAD,
+ CRYPTO_ALG_TYPE_MASK));
+}
+
+struct crypto_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type,
+ u32 mask);
+void aead_geniv_free(struct crypto_instance *inst);
+int aead_geniv_init(struct crypto_tfm *tfm);
+void aead_geniv_exit(struct crypto_tfm *tfm);
+
+static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
+{
+ return crypto_aead_crt(geniv)->base;
+}
+
+static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
+{
+ return aead_request_ctx(&req->areq);
+}
+
+static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
+ int err)
+{
+ aead_request_complete(&req->areq, err);
+}
+
+#endif /* _CRYPTO_INTERNAL_AEAD_H */
+
diff --git a/kernel/include/crypto/internal/compress.h b/kernel/include/crypto/internal/compress.h
new file mode 100644
index 000000000..178a888d1
--- /dev/null
+++ b/kernel/include/crypto/internal/compress.h
@@ -0,0 +1,28 @@
+/*
+ * Compress: Compression algorithms under the cryptographic API.
+ *
+ * Copyright 2008 Sony Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ * If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CRYPTO_INTERNAL_COMPRESS_H
+#define _CRYPTO_INTERNAL_COMPRESS_H
+
+#include <crypto/compress.h>
+
+extern int crypto_register_pcomp(struct pcomp_alg *alg);
+extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
+
+#endif /* _CRYPTO_INTERNAL_COMPRESS_H */
diff --git a/kernel/include/crypto/internal/hash.h b/kernel/include/crypto/internal/hash.h
new file mode 100644
index 000000000..3b4af1d7c
--- /dev/null
+++ b/kernel/include/crypto/internal/hash.h
@@ -0,0 +1,247 @@
+/*
+ * Hash algorithms.
+ *
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_HASH_H
+#define _CRYPTO_INTERNAL_HASH_H
+
+#include <crypto/algapi.h>
+#include <crypto/hash.h>
+
+struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+ char *data;
+
+ unsigned int offset;
+ unsigned int alignmask;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+
+ unsigned int flags;
+};
+
+struct ahash_instance {
+ struct ahash_alg alg;
+};
+
+struct shash_instance {
+ struct shash_alg alg;
+};
+
+struct crypto_ahash_spawn {
+ struct crypto_spawn base;
+};
+
+struct crypto_shash_spawn {
+ struct crypto_spawn base;
+};
+
+extern const struct crypto_type crypto_ahash_type;
+
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+int crypto_ahash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
+ struct crypto_hash_walk *walk,
+ struct scatterlist *sg, unsigned int len);
+
+static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
+ int err)
+{
+ return crypto_hash_walk_done(walk, err);
+}
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
+static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
+{
+ return crypto_hash_walk_last(walk);
+}
+
+int crypto_register_ahash(struct ahash_alg *alg);
+int crypto_unregister_ahash(struct ahash_alg *alg);
+int ahash_register_instance(struct crypto_template *tmpl,
+ struct ahash_instance *inst);
+void ahash_free_instance(struct crypto_instance *inst);
+
+int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ struct hash_alg_common *alg,
+ struct crypto_instance *inst);
+
+static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
+int crypto_register_shash(struct shash_alg *alg);
+int crypto_unregister_shash(struct shash_alg *alg);
+int crypto_register_shashes(struct shash_alg *algs, int count);
+int crypto_unregister_shashes(struct shash_alg *algs, int count);
+int shash_register_instance(struct crypto_template *tmpl,
+ struct shash_instance *inst);
+void shash_free_instance(struct crypto_instance *inst);
+
+int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
+ struct shash_alg *alg,
+ struct crypto_instance *inst);
+
+static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+
+int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
+int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc);
+
+int shash_ahash_mcryptd_update(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_final(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_finup(struct ahash_request *req,
+ struct shash_desc *desc);
+int shash_ahash_mcryptd_digest(struct ahash_request *req,
+ struct shash_desc *desc);
+
+int crypto_init_shash_ops_async(struct crypto_tfm *tfm);
+
+static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm)
+{
+ return crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+}
+
+static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
+{
+ return container_of(__crypto_hash_alg_common(alg), struct ahash_alg,
+ halg);
+}
+
+static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
+ unsigned int reqsize)
+{
+ tfm->reqsize = reqsize;
+}
+
+static inline struct crypto_instance *ahash_crypto_instance(
+ struct ahash_instance *inst)
+{
+ return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
+}
+
+static inline struct ahash_instance *ahash_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+}
+
+static inline void *ahash_instance_ctx(struct ahash_instance *inst)
+{
+ return crypto_instance_ctx(ahash_crypto_instance(inst));
+}
+
+static inline unsigned int ahash_instance_headroom(void)
+{
+ return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
+}
+
+static inline struct ahash_instance *ahash_alloc_instance(
+ const char *name, struct crypto_alg *alg)
+{
+ return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
+}
+
+static inline struct crypto_ahash *crypto_spawn_ahash(
+ struct crypto_ahash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline int ahash_enqueue_request(struct crypto_queue *queue,
+ struct ahash_request *request)
+{
+ return crypto_enqueue_request(queue, &request->base);
+}
+
+static inline struct ahash_request *ahash_dequeue_request(
+ struct crypto_queue *queue)
+{
+ return ahash_request_cast(crypto_dequeue_request(queue));
+}
+
+static inline int ahash_tfm_in_queue(struct crypto_queue *queue,
+ struct crypto_ahash *tfm)
+{
+ return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm));
+}
+
+static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline struct crypto_instance *shash_crypto_instance(
+ struct shash_instance *inst)
+{
+ return container_of(&inst->alg.base, struct crypto_instance, alg);
+}
+
+static inline struct shash_instance *shash_instance(
+ struct crypto_instance *inst)
+{
+ return container_of(__crypto_shash_alg(&inst->alg),
+ struct shash_instance, alg);
+}
+
+static inline void *shash_instance_ctx(struct shash_instance *inst)
+{
+ return crypto_instance_ctx(shash_crypto_instance(inst));
+}
+
+static inline struct shash_instance *shash_alloc_instance(
+ const char *name, struct crypto_alg *alg)
+{
+ return crypto_alloc_instance2(name, alg,
+ sizeof(struct shash_alg) - sizeof(*alg));
+}
+
+static inline struct crypto_shash *crypto_spawn_shash(
+ struct crypto_shash_spawn *spawn)
+{
+ return crypto_spawn_tfm2(&spawn->base);
+}
+
+static inline void *crypto_shash_ctx_aligned(struct crypto_shash *tfm)
+{
+ return crypto_tfm_ctx_aligned(&tfm->base);
+}
+
+static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_shash, base);
+}
+
+#endif /* _CRYPTO_INTERNAL_HASH_H */
+
diff --git a/kernel/include/crypto/internal/rng.h b/kernel/include/crypto/internal/rng.h
new file mode 100644
index 000000000..896973369
--- /dev/null
+++ b/kernel/include/crypto/internal/rng.h
@@ -0,0 +1,26 @@
+/*
+ * RNG: Random Number Generator algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_RNG_H
+#define _CRYPTO_INTERNAL_RNG_H
+
+#include <crypto/algapi.h>
+#include <crypto/rng.h>
+
+extern const struct crypto_type crypto_rng_type;
+
+static inline void *crypto_rng_ctx(struct crypto_rng *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+#endif
diff --git a/kernel/include/crypto/internal/skcipher.h b/kernel/include/crypto/internal/skcipher.h
new file mode 100644
index 000000000..b3a46c515
--- /dev/null
+++ b/kernel/include/crypto/internal/skcipher.h
@@ -0,0 +1,111 @@
+/*
+ * Symmetric key ciphers.
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
+#define _CRYPTO_INTERNAL_SKCIPHER_H
+
+#include <crypto/algapi.h>
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+
+struct rtattr;
+
+struct crypto_skcipher_spawn {
+ struct crypto_spawn base;
+};
+
+extern const struct crypto_type crypto_givcipher_type;
+
+static inline void crypto_set_skcipher_spawn(
+ struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
+{
+ crypto_set_spawn(&spawn->base, inst);
+}
+
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
+ u32 type, u32 mask);
+
+struct crypto_alg *crypto_lookup_skcipher(const char *name, u32 type, u32 mask);
+
+static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
+{
+ crypto_drop_spawn(&spawn->base);
+}
+
+static inline struct crypto_alg *crypto_skcipher_spawn_alg(
+ struct crypto_skcipher_spawn *spawn)
+{
+ return spawn->base.alg;
+}
+
+static inline struct crypto_ablkcipher *crypto_spawn_skcipher(
+ struct crypto_skcipher_spawn *spawn)
+{
+ return __crypto_ablkcipher_cast(
+ crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
+ crypto_skcipher_mask(0)));
+}
+
+int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
+int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
+const char *crypto_default_geniv(const struct crypto_alg *alg);
+
+struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type,
+ u32 mask);
+void skcipher_geniv_free(struct crypto_instance *inst);
+int skcipher_geniv_init(struct crypto_tfm *tfm);
+void skcipher_geniv_exit(struct crypto_tfm *tfm);
+
+static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
+ struct crypto_ablkcipher *geniv)
+{
+ return crypto_ablkcipher_crt(geniv)->base;
+}
+
+static inline int skcipher_enqueue_givcrypt(
+ struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
+{
+ return ablkcipher_enqueue_request(queue, &request->creq);
+}
+
+static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
+ struct crypto_queue *queue)
+{
+ return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
+}
+
+static inline void *skcipher_givcrypt_reqctx(
+ struct skcipher_givcrypt_request *req)
+{
+ return ablkcipher_request_ctx(&req->creq);
+}
+
+static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
+ int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline void skcipher_givcrypt_complete(
+ struct skcipher_givcrypt_request *req, int err)
+{
+ ablkcipher_request_complete(&req->creq, err);
+}
+
+static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
+{
+ return req->base.flags;
+}
+
+#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
+
diff --git a/kernel/include/crypto/lrw.h b/kernel/include/crypto/lrw.h
new file mode 100644
index 000000000..25a2c8716
--- /dev/null
+++ b/kernel/include/crypto/lrw.h
@@ -0,0 +1,43 @@
+#ifndef _CRYPTO_LRW_H
+#define _CRYPTO_LRW_H
+
+#include <crypto/b128ops.h>
+
+struct scatterlist;
+struct gf128mul_64k;
+struct blkcipher_desc;
+
+#define LRW_BLOCK_SIZE 16
+
+struct lrw_table_ctx {
+ /* optimizes multiplying a random (non incrementing, as at the
+ * start of a new sector) value with key2, we could also have
+ * used 4k optimization tables or no optimization at all. In the
+ * latter case we would have to store key2 here */
+ struct gf128mul_64k *table;
+ /* stores:
+ * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+ * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+ * key2*{ 0,0,...1,1,1,1,1 }, etc
+ * needed for optimized multiplication of incrementing values
+ * with key2 */
+ be128 mulinc[128];
+};
+
+int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
+void lrw_free_table(struct lrw_table_ctx *ctx);
+
+struct lrw_crypt_req {
+ be128 *tbuf;
+ unsigned int tbuflen;
+
+ struct lrw_table_ctx *table_ctx;
+ void *crypt_ctx;
+ void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
+};
+
+int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ struct lrw_crypt_req *req);
+
+#endif /* _CRYPTO_LRW_H */
diff --git a/kernel/include/crypto/mcryptd.h b/kernel/include/crypto/mcryptd.h
new file mode 100644
index 000000000..c23ee1f7e
--- /dev/null
+++ b/kernel/include/crypto/mcryptd.h
@@ -0,0 +1,112 @@
+/*
+ * Software async multibuffer crypto daemon headers
+ *
+ * Author:
+ * Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * Copyright (c) 2014, Intel Corporation.
+ */
+
+#ifndef _CRYPTO_MCRYPT_H
+#define _CRYPTO_MCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+struct mcryptd_ahash {
+ struct crypto_ahash base;
+};
+
+static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
+ struct crypto_ahash *tfm)
+{
+ return (struct mcryptd_ahash *)tfm;
+}
+
+struct mcryptd_cpu_queue {
+ struct crypto_queue queue;
+ struct work_struct work;
+};
+
+struct mcryptd_queue {
+ struct mcryptd_cpu_queue __percpu *cpu_queue;
+};
+
+struct mcryptd_instance_ctx {
+ struct crypto_spawn spawn;
+ struct mcryptd_queue *queue;
+};
+
+struct mcryptd_hash_ctx {
+ struct crypto_shash *child;
+ struct mcryptd_alg_state *alg_state;
+};
+
+struct mcryptd_tag {
+ /* seq number of request */
+ unsigned seq_num;
+ /* arrival time of request */
+ unsigned long arrival;
+ unsigned long expire;
+ int cpu;
+};
+
+struct mcryptd_hash_request_ctx {
+ struct list_head waiter;
+ crypto_completion_t complete;
+ struct mcryptd_tag tag;
+ struct crypto_hash_walk walk;
+ u8 *out;
+ int flag;
+ struct shash_desc desc;
+};
+
+struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
+ u32 type, u32 mask);
+struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
+struct shash_desc *mcryptd_shash_desc(struct ahash_request *req);
+void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
+void mcryptd_flusher(struct work_struct *work);
+
+enum mcryptd_req_type {
+ MCRYPTD_NONE,
+ MCRYPTD_UPDATE,
+ MCRYPTD_FINUP,
+ MCRYPTD_DIGEST,
+ MCRYPTD_FINAL
+};
+
+struct mcryptd_alg_cstate {
+ unsigned long next_flush;
+ unsigned next_seq_num;
+ bool flusher_engaged;
+ struct delayed_work flush;
+ int cpu;
+ struct mcryptd_alg_state *alg_state;
+ void *mgr;
+ spinlock_t work_lock;
+ struct list_head work_list;
+ struct list_head flush_list;
+};
+
+struct mcryptd_alg_state {
+ struct mcryptd_alg_cstate __percpu *alg_cstate;
+ unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
+};
+
+/* return delay in jiffies from current time */
+static inline unsigned long get_delay(unsigned long t)
+{
+ long delay;
+
+ delay = (long) t - (long) jiffies;
+ if (delay <= 0)
+ return 0;
+ else
+ return (unsigned long) delay;
+}
+
+void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
+
+#endif
diff --git a/kernel/include/crypto/md5.h b/kernel/include/crypto/md5.h
new file mode 100644
index 000000000..65f299b08
--- /dev/null
+++ b/kernel/include/crypto/md5.h
@@ -0,0 +1,17 @@
+#ifndef _CRYPTO_MD5_H
+#define _CRYPTO_MD5_H
+
+#include <linux/types.h>
+
+#define MD5_DIGEST_SIZE 16
+#define MD5_HMAC_BLOCK_SIZE 64
+#define MD5_BLOCK_WORDS 16
+#define MD5_HASH_WORDS 4
+
+struct md5_state {
+ u32 hash[MD5_HASH_WORDS];
+ u32 block[MD5_BLOCK_WORDS];
+ u64 byte_count;
+};
+
+#endif
diff --git a/kernel/include/crypto/null.h b/kernel/include/crypto/null.h
new file mode 100644
index 000000000..b7c864cc7
--- /dev/null
+++ b/kernel/include/crypto/null.h
@@ -0,0 +1,11 @@
+/* Values for NULL algorithms */
+
+#ifndef _CRYPTO_NULL_H
+#define _CRYPTO_NULL_H
+
+#define NULL_KEY_SIZE 0
+#define NULL_BLOCK_SIZE 1
+#define NULL_DIGEST_SIZE 0
+#define NULL_IV_SIZE 0
+
+#endif
diff --git a/kernel/include/crypto/padlock.h b/kernel/include/crypto/padlock.h
new file mode 100644
index 000000000..d2cfa2ef4
--- /dev/null
+++ b/kernel/include/crypto/padlock.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for VIA PadLock
+ *
+ * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_PADLOCK_H
+#define _CRYPTO_PADLOCK_H
+
+#define PADLOCK_ALIGNMENT 16
+
+#define PFX KBUILD_MODNAME ": "
+
+#define PADLOCK_CRA_PRIORITY 300
+#define PADLOCK_COMPOSITE_PRIORITY 400
+
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
+#endif /* _CRYPTO_PADLOCK_H */
diff --git a/kernel/include/crypto/pcrypt.h b/kernel/include/crypto/pcrypt.h
new file mode 100644
index 000000000..d7d8bd8c6
--- /dev/null
+++ b/kernel/include/crypto/pcrypt.h
@@ -0,0 +1,51 @@
+/*
+ * pcrypt - Parallel crypto engine.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTO_PCRYPT_H
+#define _CRYPTO_PCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/padata.h>
+
+struct pcrypt_request {
+ struct padata_priv padata;
+ void *data;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
+{
+ return req->__ctx;
+}
+
+static inline
+struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
+{
+ return &req->padata;
+}
+
+static inline
+struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
+{
+ return container_of(padata, struct pcrypt_request, padata);
+}
+
+#endif
diff --git a/kernel/include/crypto/pkcs7.h b/kernel/include/crypto/pkcs7.h
new file mode 100644
index 000000000..691c79172
--- /dev/null
+++ b/kernel/include/crypto/pkcs7.h
@@ -0,0 +1,36 @@
+/* PKCS#7 crypto data parser
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+struct key;
+struct pkcs7_message;
+
+/*
+ * pkcs7_parser.c
+ */
+extern struct pkcs7_message *pkcs7_parse_message(const void *data,
+ size_t datalen);
+extern void pkcs7_free_message(struct pkcs7_message *pkcs7);
+
+extern int pkcs7_get_content_data(const struct pkcs7_message *pkcs7,
+ const void **_data, size_t *_datalen,
+ bool want_wrapper);
+
+/*
+ * pkcs7_trust.c
+ */
+extern int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
+ struct key *trust_keyring,
+ bool *_trusted);
+
+/*
+ * pkcs7_verify.c
+ */
+extern int pkcs7_verify(struct pkcs7_message *pkcs7);
diff --git a/kernel/include/crypto/public_key.h b/kernel/include/crypto/public_key.h
new file mode 100644
index 000000000..54add2069
--- /dev/null
+++ b/kernel/include/crypto/public_key.h
@@ -0,0 +1,107 @@
+/* Asymmetric public-key algorithm definitions
+ *
+ * See Documentation/crypto/asymmetric-keys.txt
+ *
+ * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_PUBLIC_KEY_H
+#define _LINUX_PUBLIC_KEY_H
+
+#include <linux/mpi.h>
+#include <keys/asymmetric-type.h>
+#include <crypto/hash_info.h>
+
+enum pkey_algo {
+ PKEY_ALGO_DSA,
+ PKEY_ALGO_RSA,
+ PKEY_ALGO__LAST
+};
+
+extern const char *const pkey_algo_name[PKEY_ALGO__LAST];
+extern const struct public_key_algorithm *pkey_algo[PKEY_ALGO__LAST];
+
+/* asymmetric key implementation supports only up to SHA224 */
+#define PKEY_HASH__LAST (HASH_ALGO_SHA224 + 1)
+
+enum pkey_id_type {
+ PKEY_ID_PGP, /* OpenPGP generated key ID */
+ PKEY_ID_X509, /* X.509 arbitrary subjectKeyIdentifier */
+ PKEY_ID_TYPE__LAST
+};
+
+extern const char *const pkey_id_type_name[PKEY_ID_TYPE__LAST];
+
+/*
+ * Cryptographic data for the public-key subtype of the asymmetric key type.
+ *
+ * Note that this may include private part of the key as well as the public
+ * part.
+ */
+struct public_key {
+ const struct public_key_algorithm *algo;
+ u8 capabilities;
+#define PKEY_CAN_ENCRYPT 0x01
+#define PKEY_CAN_DECRYPT 0x02
+#define PKEY_CAN_SIGN 0x04
+#define PKEY_CAN_VERIFY 0x08
+ enum pkey_algo pkey_algo : 8;
+ enum pkey_id_type id_type : 8;
+ union {
+ MPI mpi[5];
+ struct {
+ MPI p; /* DSA prime */
+ MPI q; /* DSA group order */
+ MPI g; /* DSA group generator */
+ MPI y; /* DSA public-key value = g^x mod p */
+ MPI x; /* DSA secret exponent (if present) */
+ } dsa;
+ struct {
+ MPI n; /* RSA public modulus */
+ MPI e; /* RSA public encryption exponent */
+ MPI d; /* RSA secret encryption exponent (if present) */
+ MPI p; /* RSA secret prime (if present) */
+ MPI q; /* RSA secret prime (if present) */
+ } rsa;
+ };
+};
+
+extern void public_key_destroy(void *payload);
+
+/*
+ * Public key cryptography signature data
+ */
+struct public_key_signature {
+ u8 *digest;
+ u8 digest_size; /* Number of bytes in digest */
+ u8 nr_mpi; /* Occupancy of mpi[] */
+ enum pkey_algo pkey_algo : 8;
+ enum hash_algo pkey_hash_algo : 8;
+ union {
+ MPI mpi[2];
+ struct {
+ MPI s; /* m^d mod n */
+ } rsa;
+ struct {
+ MPI r;
+ MPI s;
+ } dsa;
+ };
+};
+
+struct key;
+extern int verify_signature(const struct key *key,
+ const struct public_key_signature *sig);
+
+struct asymmetric_key_id;
+extern struct key *x509_request_asymmetric_key(struct key *keyring,
+ const struct asymmetric_key_id *kid,
+ bool partial);
+
+#endif /* _LINUX_PUBLIC_KEY_H */
diff --git a/kernel/include/crypto/rng.h b/kernel/include/crypto/rng.h
new file mode 100644
index 000000000..6e28ea5be
--- /dev/null
+++ b/kernel/include/crypto/rng.h
@@ -0,0 +1,154 @@
+/*
+ * RNG: Random Number Generator algorithms under the crypto API
+ *
+ * Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_RNG_H
+#define _CRYPTO_RNG_H
+
+#include <linux/crypto.h>
+
+extern struct crypto_rng *crypto_default_rng;
+
+int crypto_get_default_rng(void);
+void crypto_put_default_rng(void);
+
+/**
+ * DOC: Random number generator API
+ *
+ * The random number generator API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
+ */
+
+static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
+{
+ return (struct crypto_rng *)tfm;
+}
+
+/**
+ * crypto_alloc_rng() -- allocate RNG handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a random number generator. The returned struct
+ * crypto_rng is the cipher handle that is required for any subsequent
+ * API invocation for that random number generator.
+ *
+ * For all random number generators, this call creates a new private copy of
+ * the random number generator that does not share a state with other
+ * instances. The only exception is the "krng" random number generator which
+ * is a kernel crypto API use case for the get_random_bytes() function of the
+ * /dev/random driver.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
+ u32 type, u32 mask)
+{
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_RNG;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_rng_cast(crypto_alloc_base(alg_name, type, mask));
+}
+
+static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_rng_alg - obtain name of RNG
+ * @tfm: cipher handle
+ *
+ * Return the generic name (cra_name) of the initialized random number generator
+ *
+ * Return: generic name string
+ */
+static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
+{
+ return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
+}
+
+static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
+{
+ return &crypto_rng_tfm(tfm)->crt_rng;
+}
+
+/**
+ * crypto_free_rng() - zeroize and free RNG handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_rng(struct crypto_rng *tfm)
+{
+ crypto_free_tfm(crypto_rng_tfm(tfm));
+}
+
+/**
+ * crypto_rng_get_bytes() - get random number
+ * @tfm: cipher handle
+ * @rdata: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * random number generator referenced by the cipher handle.
+ *
+ * Return: 0 function was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
+ u8 *rdata, unsigned int dlen)
+{
+ return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
+}
+
+/**
+ * crypto_rng_reset() - re-initialize the RNG
+ * @tfm: cipher handle
+ * @seed: seed input data
+ * @slen: length of the seed input data
+ *
+ * The reset function completely re-initializes the random number generator
+ * referenced by the cipher handle by clearing the current state. The new state
+ * is initialized with the caller provided seed or automatically, depending
+ * on the random number generator type (the ANSI X9.31 RNG requires
+ * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
+ * The seed is provided as a parameter to this function call. The provided seed
+ * should have the length of the seed size defined for the random number
+ * generator as defined by crypto_rng_seedsize.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_rng_reset(struct crypto_rng *tfm,
+ u8 *seed, unsigned int slen)
+{
+ return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
+}
+
+/**
+ * crypto_rng_seedsize() - obtain seed size of RNG
+ * @tfm: cipher handle
+ *
+ * The function returns the seed size for the random number generator
+ * referenced by the cipher handle. This value may be zero if the random
+ * number generator does not implement or require a reseeding. For example,
+ * the SP800-90A DRBGs implement an automated reseeding after reaching a
+ * pre-defined threshold.
+ *
+ * Return: seed size for the random number generator
+ */
+static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
+{
+ return crypto_rng_alg(tfm)->seedsize;
+}
+
+#endif
diff --git a/kernel/include/crypto/scatterwalk.h b/kernel/include/crypto/scatterwalk.h
new file mode 100644
index 000000000..20e4226a2
--- /dev/null
+++ b/kernel/include/crypto/scatterwalk.h
@@ -0,0 +1,105 @@
+/*
+ * Cryptographic scatter and gather helpers.
+ *
+ * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
+ * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
+ * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_SCATTERWALK_H
+#define _CRYPTO_SCATTERWALK_H
+
+#include <asm/kmap_types.h>
+#include <crypto/algapi.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+
+static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
+ struct scatterlist *sg2)
+{
+ sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+ sg1[num - 1].page_link &= ~0x02;
+ sg1[num - 1].page_link |= 0x01;
+}
+
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+ struct scatterlist *sg,
+ int chain, int num)
+{
+ if (chain) {
+ head->length += sg->length;
+ sg = sg_next(sg);
+ }
+
+ if (sg)
+ scatterwalk_sg_chain(head, num, sg);
+ else
+ sg_mark_end(head);
+}
+
+static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
+ struct scatter_walk *walk_out)
+{
+ return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
+ (int)(walk_in->offset - walk_out->offset));
+}
+
+static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
+{
+ unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
+ unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
+ return len_this_page > len ? len : len_this_page;
+}
+
+static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
+ unsigned int nbytes)
+{
+ unsigned int len_this_page = scatterwalk_pagelen(walk);
+ return nbytes > len_this_page ? len_this_page : nbytes;
+}
+
+static inline void scatterwalk_advance(struct scatter_walk *walk,
+ unsigned int nbytes)
+{
+ walk->offset += nbytes;
+}
+
+static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
+ unsigned int alignmask)
+{
+ return !(walk->offset & alignmask);
+}
+
+static inline struct page *scatterwalk_page(struct scatter_walk *walk)
+{
+ return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+}
+
+static inline void scatterwalk_unmap(void *vaddr)
+{
+ kunmap_atomic(vaddr);
+}
+
+void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
+void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
+ size_t nbytes, int out);
+void *scatterwalk_map(struct scatter_walk *walk);
+void scatterwalk_done(struct scatter_walk *walk, int out, int more);
+
+void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out);
+
+int scatterwalk_bytes_sglen(struct scatterlist *sg, int num_bytes);
+
+#endif /* _CRYPTO_SCATTERWALK_H */
diff --git a/kernel/include/crypto/serpent.h b/kernel/include/crypto/serpent.h
new file mode 100644
index 000000000..b7e0941eb
--- /dev/null
+++ b/kernel/include/crypto/serpent.h
@@ -0,0 +1,27 @@
+/*
+ * Common values for serpent algorithms
+ */
+
+#ifndef _CRYPTO_SERPENT_H
+#define _CRYPTO_SERPENT_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define SERPENT_MIN_KEY_SIZE 0
+#define SERPENT_MAX_KEY_SIZE 32
+#define SERPENT_EXPKEY_WORDS 132
+#define SERPENT_BLOCK_SIZE 16
+
+struct serpent_ctx {
+ u32 expkey[SERPENT_EXPKEY_WORDS];
+};
+
+int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
+ unsigned int keylen);
+int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
+
+void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+
+#endif
diff --git a/kernel/include/crypto/sha.h b/kernel/include/crypto/sha.h
new file mode 100644
index 000000000..dd7905a3c
--- /dev/null
+++ b/kernel/include/crypto/sha.h
@@ -0,0 +1,104 @@
+/*
+ * Common values for SHA algorithms
+ */
+
+#ifndef _CRYPTO_SHA_H
+#define _CRYPTO_SHA_H
+
+#include <linux/types.h>
+
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+struct sha1_state {
+ u32 state[SHA1_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buffer[SHA1_BLOCK_SIZE];
+};
+
+struct sha256_state {
+ u32 state[SHA256_DIGEST_SIZE / 4];
+ u64 count;
+ u8 buf[SHA256_BLOCK_SIZE];
+};
+
+struct sha512_state {
+ u64 state[SHA512_DIGEST_SIZE / 8];
+ u64 count[2];
+ u8 buf[SHA512_BLOCK_SIZE];
+};
+
+struct shash_desc;
+
+extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+
+extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len);
+
+extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
+ unsigned int len, u8 *hash);
+#endif
diff --git a/kernel/include/crypto/sha1_base.h b/kernel/include/crypto/sha1_base.h
new file mode 100644
index 000000000..d0df431f9
--- /dev/null
+++ b/kernel/include/crypto/sha1_base.h
@@ -0,0 +1,106 @@
+/*
+ * sha1_base.h - core logic for SHA-1 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha1_block_fn)(struct sha1_state *sst, u8 const *src, int blocks);
+
+static inline int sha1_base_init(struct shash_desc *desc)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA1_H0;
+ sctx->state[1] = SHA1_H1;
+ sctx->state[2] = SHA1_H2;
+ sctx->state[3] = SHA1_H3;
+ sctx->state[4] = SHA1_H4;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha1_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha1_block_fn *block_fn)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA1_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA1_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buffer + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ blocks = len / SHA1_BLOCK_SIZE;
+ len %= SHA1_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA1_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buffer + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha1_base_do_finalize(struct shash_desc *desc,
+ sha1_block_fn *block_fn)
+{
+ const int bit_offset = SHA1_BLOCK_SIZE - sizeof(__be64);
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
+ unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+
+ sctx->buffer[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buffer + partial, 0x0, SHA1_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buffer, 1);
+ }
+
+ memset(sctx->buffer + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buffer, 1);
+
+ return 0;
+}
+
+static inline int sha1_base_finish(struct shash_desc *desc, u8 *out)
+{
+ struct sha1_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++)
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha1_state){};
+ return 0;
+}
diff --git a/kernel/include/crypto/sha256_base.h b/kernel/include/crypto/sha256_base.h
new file mode 100644
index 000000000..d1f2195bb
--- /dev/null
+++ b/kernel/include/crypto/sha256_base.h
@@ -0,0 +1,128 @@
+/*
+ * sha256_base.h - core logic for SHA-256 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha256_block_fn)(struct sha256_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha224_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA224_H0;
+ sctx->state[1] = SHA224_H1;
+ sctx->state[2] = SHA224_H2;
+ sctx->state[3] = SHA224_H3;
+ sctx->state[4] = SHA224_H4;
+ sctx->state[5] = SHA224_H5;
+ sctx->state[6] = SHA224_H6;
+ sctx->state[7] = SHA224_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_init(struct shash_desc *desc)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA256_H0;
+ sctx->state[1] = SHA256_H1;
+ sctx->state[2] = SHA256_H2;
+ sctx->state[3] = SHA256_H3;
+ sctx->state[4] = SHA256_H4;
+ sctx->state[5] = SHA256_H5;
+ sctx->state[6] = SHA256_H6;
+ sctx->state[7] = SHA256_H7;
+ sctx->count = 0;
+
+ return 0;
+}
+
+static inline int sha256_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha256_block_fn *block_fn)
+{
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->count += len;
+
+ if (unlikely((partial + len) >= SHA256_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA256_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA256_BLOCK_SIZE;
+ len %= SHA256_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA256_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha256_base_do_finalize(struct shash_desc *desc,
+ sha256_block_fn *block_fn)
+{
+ const int bit_offset = SHA256_BLOCK_SIZE - sizeof(__be64);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA256_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ *bits = cpu_to_be64(sctx->count << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha256_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ __be32 *digest = (__be32 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32))
+ put_unaligned_be32(sctx->state[i], digest++);
+
+ *sctx = (struct sha256_state){};
+ return 0;
+}
diff --git a/kernel/include/crypto/sha512_base.h b/kernel/include/crypto/sha512_base.h
new file mode 100644
index 000000000..6c5341e00
--- /dev/null
+++ b/kernel/include/crypto/sha512_base.h
@@ -0,0 +1,131 @@
+/*
+ * sha512_base.h - core logic for SHA-512 implementations
+ *
+ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+#include <linux/crypto.h>
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+typedef void (sha512_block_fn)(struct sha512_state *sst, u8 const *src,
+ int blocks);
+
+static inline int sha384_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA384_H0;
+ sctx->state[1] = SHA384_H1;
+ sctx->state[2] = SHA384_H2;
+ sctx->state[3] = SHA384_H3;
+ sctx->state[4] = SHA384_H4;
+ sctx->state[5] = SHA384_H5;
+ sctx->state[6] = SHA384_H6;
+ sctx->state[7] = SHA384_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_init(struct shash_desc *desc)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+ sctx->state[0] = SHA512_H0;
+ sctx->state[1] = SHA512_H1;
+ sctx->state[2] = SHA512_H2;
+ sctx->state[3] = SHA512_H3;
+ sctx->state[4] = SHA512_H4;
+ sctx->state[5] = SHA512_H5;
+ sctx->state[6] = SHA512_H6;
+ sctx->state[7] = SHA512_H7;
+ sctx->count[0] = sctx->count[1] = 0;
+
+ return 0;
+}
+
+static inline int sha512_base_do_update(struct shash_desc *desc,
+ const u8 *data,
+ unsigned int len,
+ sha512_block_fn *block_fn)
+{
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->count[0] += len;
+ if (sctx->count[0] < len)
+ sctx->count[1]++;
+
+ if (unlikely((partial + len) >= SHA512_BLOCK_SIZE)) {
+ int blocks;
+
+ if (partial) {
+ int p = SHA512_BLOCK_SIZE - partial;
+
+ memcpy(sctx->buf + partial, data, p);
+ data += p;
+ len -= p;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ blocks = len / SHA512_BLOCK_SIZE;
+ len %= SHA512_BLOCK_SIZE;
+
+ if (blocks) {
+ block_fn(sctx, data, blocks);
+ data += blocks * SHA512_BLOCK_SIZE;
+ }
+ partial = 0;
+ }
+ if (len)
+ memcpy(sctx->buf + partial, data, len);
+
+ return 0;
+}
+
+static inline int sha512_base_do_finalize(struct shash_desc *desc,
+ sha512_block_fn *block_fn)
+{
+ const int bit_offset = SHA512_BLOCK_SIZE - sizeof(__be64[2]);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *bits = (__be64 *)(sctx->buf + bit_offset);
+ unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+
+ sctx->buf[partial++] = 0x80;
+ if (partial > bit_offset) {
+ memset(sctx->buf + partial, 0x0, SHA512_BLOCK_SIZE - partial);
+ partial = 0;
+
+ block_fn(sctx, sctx->buf, 1);
+ }
+
+ memset(sctx->buf + partial, 0x0, bit_offset - partial);
+ bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+ bits[1] = cpu_to_be64(sctx->count[0] << 3);
+ block_fn(sctx, sctx->buf, 1);
+
+ return 0;
+}
+
+static inline int sha512_base_finish(struct shash_desc *desc, u8 *out)
+{
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ __be64 *digest = (__be64 *)out;
+ int i;
+
+ for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64))
+ put_unaligned_be64(sctx->state[i], digest++);
+
+ *sctx = (struct sha512_state){};
+ return 0;
+}
diff --git a/kernel/include/crypto/skcipher.h b/kernel/include/crypto/skcipher.h
new file mode 100644
index 000000000..07d245f07
--- /dev/null
+++ b/kernel/include/crypto/skcipher.h
@@ -0,0 +1,110 @@
+/*
+ * Symmetric key ciphers.
+ *
+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_SKCIPHER_H
+#define _CRYPTO_SKCIPHER_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+/**
+ * struct skcipher_givcrypt_request - Crypto request with IV generation
+ * @seq: Sequence number for IV generation
+ * @giv: Space for generated IV
+ * @creq: The crypto request itself
+ */
+struct skcipher_givcrypt_request {
+ u64 seq;
+ u8 *giv;
+
+ struct ablkcipher_request creq;
+};
+
+static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
+ struct skcipher_givcrypt_request *req)
+{
+ return crypto_ablkcipher_reqtfm(&req->creq);
+}
+
+static inline int crypto_skcipher_givencrypt(
+ struct skcipher_givcrypt_request *req)
+{
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
+ return crt->givencrypt(req);
+};
+
+static inline int crypto_skcipher_givdecrypt(
+ struct skcipher_givcrypt_request *req)
+{
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req));
+ return crt->givdecrypt(req);
+};
+
+static inline void skcipher_givcrypt_set_tfm(
+ struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm)
+{
+ req->creq.base.tfm = crypto_ablkcipher_tfm(tfm);
+}
+
+static inline struct skcipher_givcrypt_request *skcipher_givcrypt_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(ablkcipher_request_cast(req),
+ struct skcipher_givcrypt_request, creq);
+}
+
+static inline struct skcipher_givcrypt_request *skcipher_givcrypt_alloc(
+ struct crypto_ablkcipher *tfm, gfp_t gfp)
+{
+ struct skcipher_givcrypt_request *req;
+
+ req = kmalloc(sizeof(struct skcipher_givcrypt_request) +
+ crypto_ablkcipher_reqsize(tfm), gfp);
+
+ if (likely(req))
+ skcipher_givcrypt_set_tfm(req, tfm);
+
+ return req;
+}
+
+static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
+{
+ kfree(req);
+}
+
+static inline void skcipher_givcrypt_set_callback(
+ struct skcipher_givcrypt_request *req, u32 flags,
+ crypto_completion_t compl, void *data)
+{
+ ablkcipher_request_set_callback(&req->creq, flags, compl, data);
+}
+
+static inline void skcipher_givcrypt_set_crypt(
+ struct skcipher_givcrypt_request *req,
+ struct scatterlist *src, struct scatterlist *dst,
+ unsigned int nbytes, void *iv)
+{
+ ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv);
+}
+
+static inline void skcipher_givcrypt_set_giv(
+ struct skcipher_givcrypt_request *req, u8 *giv, u64 seq)
+{
+ req->giv = giv;
+ req->seq = seq;
+}
+
+#endif /* _CRYPTO_SKCIPHER_H */
+
diff --git a/kernel/include/crypto/twofish.h b/kernel/include/crypto/twofish.h
new file mode 100644
index 000000000..095c901a8
--- /dev/null
+++ b/kernel/include/crypto/twofish.h
@@ -0,0 +1,24 @@
+#ifndef _CRYPTO_TWOFISH_H
+#define _CRYPTO_TWOFISH_H
+
+#include <linux/types.h>
+
+#define TF_MIN_KEY_SIZE 16
+#define TF_MAX_KEY_SIZE 32
+#define TF_BLOCK_SIZE 16
+
+struct crypto_tfm;
+
+/* Structure for an expanded Twofish key. s contains the key-dependent
+ * S-boxes composed with the MDS matrix; w contains the eight "whitening"
+ * subkeys, K[0] through K[7]. k holds the remaining, "round" subkeys. Note
+ * that k[i] corresponds to what the Twofish paper calls K[i+8]. */
+struct twofish_ctx {
+ u32 s[4][256], w[8], k[32];
+};
+
+int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
+ unsigned int key_len, u32 *flags);
+int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
+
+#endif
diff --git a/kernel/include/crypto/vmac.h b/kernel/include/crypto/vmac.h
new file mode 100644
index 000000000..6b700c7b2
--- /dev/null
+++ b/kernel/include/crypto/vmac.h
@@ -0,0 +1,63 @@
+/*
+ * Modified to interface to the Linux kernel
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#ifndef __CRYPTO_VMAC_H
+#define __CRYPTO_VMAC_H
+
+/* --------------------------------------------------------------------------
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Please send bug reports to the authors.
+ * Last modified: 17 APR 08, 1700 PDT
+ * ----------------------------------------------------------------------- */
+
+/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+
+/*
+ * This implementation uses u32 and u64 as names for unsigned 32-
+ * and 64-bit integer types. These are defined in C99 stdint.h. The
+ * following may need adaptation if you are not running a C99 or
+ * Microsoft C environment.
+ */
+struct vmac_ctx {
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+ u64 polytmp[2*VMAC_TAG_LEN/64];
+ u64 cached_nonce[2];
+ u64 cached_aes[2];
+ int first_block_processed;
+};
+
+typedef u64 vmac_t;
+
+struct vmac_ctx_t {
+ struct crypto_cipher *child;
+ struct vmac_ctx __vmac_ctx;
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ int partial_size; /* size of the partial block */
+};
+
+#endif /* __CRYPTO_VMAC_H */
diff --git a/kernel/include/crypto/xts.h b/kernel/include/crypto/xts.h
new file mode 100644
index 000000000..72c09eb56
--- /dev/null
+++ b/kernel/include/crypto/xts.h
@@ -0,0 +1,27 @@
+#ifndef _CRYPTO_XTS_H
+#define _CRYPTO_XTS_H
+
+#include <crypto/b128ops.h>
+
+struct scatterlist;
+struct blkcipher_desc;
+
+#define XTS_BLOCK_SIZE 16
+
+struct xts_crypt_req {
+ be128 *tbuf;
+ unsigned int tbuflen;
+
+ void *tweak_ctx;
+ void (*tweak_fn)(void *ctx, u8* dst, const u8* src);
+ void *crypt_ctx;
+ void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
+};
+
+#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
+
+int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes,
+ struct xts_crypt_req *req);
+
+#endif /* _CRYPTO_XTS_H */