/* * pcrypt - Parallel crypto wrapper. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include #include #include #include struct padata_pcrypt { struct padata_instance *pinst; struct workqueue_struct *wq; /* * Cpumask for callback CPUs. It should be * equal to serial cpumask of corresponding padata instance, * so it is updated when padata notifies us about serial * cpumask change. * * cb_cpumask is protected by RCU. This fact prevents us from * using cpumask_var_t directly because the actual type of * cpumsak_var_t depends on kernel configuration(particularly on * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration * cpumask_var_t may be either a pointer to the struct cpumask * or a variable allocated on the stack. Thus we can not safely use * cpumask_var_t with RCU operations such as rcu_assign_pointer or * rcu_dereference. So cpumask_var_t is wrapped with struct * pcrypt_cpumask which makes possible to use it with RCU. */ struct pcrypt_cpumask { cpumask_var_t mask; } *cb_cpumask; struct notifier_block nblock; }; static struct padata_pcrypt pencrypt; static struct padata_pcrypt pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; atomic_t tfm_count; }; struct pcrypt_aead_ctx { struct crypto_aead *child; unsigned int cb_cpu; }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, struct padata_pcrypt *pcrypt) { unsigned int cpu_index, cpu, i; struct pcrypt_cpumask *cpumask; cpu = *cb_cpu; rcu_read_lock_bh(); cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); if (cpumask_test_cpu(cpu, cpumask->mask)) goto out; if (!cpumask_weight(cpumask->mask)) goto out; cpu_index = cpu % cpumask_weight(cpumask->mask); cpu = cpumask_first(cpumask->mask); for (i = 0; i < cpu_index; i++) cpu = cpumask_next(cpu, cpumask->mask); *cb_cpu = cpu; out: rcu_read_unlock_bh(); return padata_do_parallel(pcrypt->pinst, padata, cpu); } static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); } static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); } static void pcrypt_aead_serial(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); aead_request_complete(req->base.data, padata->info); } static void pcrypt_aead_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_encrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_encrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_enc; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; return err; } static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_decrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_decrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_dec; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); if (!err) return -EINPROGRESS; return err; } static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { int cpu, cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *cipher; cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); return 0; } static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MA
/*
 * Copyright 2002 Integrated Device Technology, Inc.
 *		All rights reserved.
 *
 * DMA register definition.
 *
 * Author : ryan.holmQVist@idt.com
 * Date	  : 20011005
 */

#ifndef __ASM_RC32434_DMA_H
#define __ASM_RC32434_DMA_H

#include <asm/mach-rc32434/rb.h>

#define DMA0_BASE_ADDR			0x18040000

/*
 * DMA descriptor (in physical memory).
 */

struct dma_desc {
	u32 control;			/* Control. use DMAD_* */
	u32