Merge: crypto: tegra - Add Tegra Security Engine driver

MR: https://gitlab.com/redhat/centos-stream/src/kernel/centos-stream-9/-/merge_requests/4354

JIRA: https://issues.redhat.com/browse/RHEL-34947

Signed-off-by: Charles Mirabile <cmirabil@redhat.com>

Approved-by: Tony Camuso <tcamuso@redhat.com>
Approved-by: Jocelyn Falempe <jfalempe@redhat.com>
Approved-by: Clemens Lang <cllang@redhat.com>
Approved-by: Lyude Paul <lyude@redhat.com>
Approved-by: Vladis Dronov <vdronov@redhat.com>
Approved-by: CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com>

Merged-by: Rado Vrbovsky <rvrbovsk@redhat.com>
This commit is contained in:
Rado Vrbovsky 2024-10-25 16:09:01 +00:00
commit e3dbab91dd
31 changed files with 4727 additions and 92 deletions

View File

@ -69,6 +69,8 @@ the crypto engine via one of:
* crypto_transfer_hash_request_to_engine()
* crypto_transfer_kpp_request_to_engine()
* crypto_transfer_skcipher_request_to_engine()
At the end of the request process, a call to one of the following functions is needed:
@ -79,4 +81,6 @@ At the end of the request process, a call to one of the following functions is n
* crypto_finalize_hash_request()
* crypto_finalize_kpp_request()
* crypto_finalize_skcipher_request()

View File

@ -0,0 +1,52 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/nvidia,tegra234-se-aes.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NVIDIA Tegra Security Engine for AES algorithms
description:
The Tegra Security Engine accelerates the following AES encryption/decryption
algorithms - AES-ECB, AES-CBC, AES-OFB, AES-XTS, AES-CTR, AES-GCM, AES-CCM,
AES-CMAC
maintainers:
- Akhil R <akhilrajeev@nvidia.com>
properties:
compatible:
const: nvidia,tegra234-se-aes
reg:
maxItems: 1
clocks:
maxItems: 1
iommus:
maxItems: 1
dma-coherent: true
required:
- compatible
- reg
- clocks
- iommus
additionalProperties: false
examples:
- |
#include <dt-bindings/memory/tegra234-mc.h>
#include <dt-bindings/clock/tegra234-clock.h>
crypto@15820000 {
compatible = "nvidia,tegra234-se-aes";
reg = <0x15820000 0x10000>;
clocks = <&bpmp TEGRA234_CLK_SE>;
iommus = <&smmu TEGRA234_SID_SES_SE1>;
dma-coherent;
};
...

View File

@ -0,0 +1,52 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/crypto/nvidia,tegra234-se-hash.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: NVIDIA Tegra Security Engine for HASH algorithms
description:
The Tegra Security HASH Engine accelerates the following HASH functions -
SHA1, SHA224, SHA256, SHA384, SHA512, SHA3-224, SHA3-256, SHA3-384, SHA3-512
HMAC(SHA224), HMAC(SHA256), HMAC(SHA384), HMAC(SHA512)
maintainers:
- Akhil R <akhilrajeev@nvidia.com>
properties:
compatible:
const: nvidia,tegra234-se-hash
reg:
maxItems: 1
clocks:
maxItems: 1
iommus:
maxItems: 1
dma-coherent: true
required:
- compatible
- reg
- clocks
- iommus
additionalProperties: false
examples:
- |
#include <dt-bindings/memory/tegra234-mc.h>
#include <dt-bindings/clock/tegra234-clock.h>
crypto@15840000 {
compatible = "nvidia,tegra234-se-hash";
reg = <0x15840000 0x10000>;
clocks = <&bpmp TEGRA234_CLK_SE>;
iommus = <&smmu TEGRA234_SID_SES_SE2>;
dma-coherent;
};
...

View File

@ -18975,6 +18975,11 @@ M: Prashant Gaikwad <pgaikwad@nvidia.com>
S: Supported
F: drivers/clk/tegra/
TEGRA CRYPTO DRIVERS
M: Akhil R <akhilrajeev@nvidia.com>
S: Supported
F: drivers/crypto/tegra/*
TEGRA DMA DRIVERS
M: Laxman Dewangan <ldewangan@nvidia.com>
M: Jon Hunter <jonathanh@nvidia.com>

View File

@ -2224,6 +2224,22 @@
*/
status = "disabled";
};
crypto@15820000 {
compatible = "nvidia,tegra234-se-aes";
reg = <0x00 0x15820000 0x00 0x10000>;
clocks = <&bpmp TEGRA234_CLK_SE>;
iommus = <&smmu_niso1 TEGRA234_SID_SES_SE1>;
dma-coherent;
};
crypto@15840000 {
compatible = "nvidia,tegra234-se-hash";
reg = <0x00 0x15840000 0x00 0x10000>;
clocks = <&bpmp TEGRA234_CLK_SE>;
iommus = <&smmu_niso1 TEGRA234_SID_SES_SE2>;
dma-coherent;
};
};
pcie@140a0000 {

View File

@ -1200,6 +1200,7 @@ CONFIG_CRYPTO_DEV_SUN8I_CE=m
CONFIG_CRYPTO_DEV_FSL_CAAM=m
CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
CONFIG_CRYPTO_DEV_QCOM_RNG=m
CONFIG_CRYPTO_DEV_TEGRA=m
CONFIG_CRYPTO_DEV_CCREE=m
CONFIG_CRYPTO_DEV_HISI_SEC2=m
CONFIG_CRYPTO_DEV_HISI_ZIP=m

View File

@ -459,6 +459,8 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
hash->setkey = ahash_nosetkey;
crypto_ahash_set_statesize(hash, alg->halg.statesize);
if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
return crypto_init_shash_ops_async(tfm);

View File

@ -7,15 +7,30 @@
* Author: Baolin Wang <baolin.wang@linaro.org>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/engine.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/kpp.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <crypto/engine.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <uapi/linux/sched/types.h>
#include "internal.h"
#define CRYPTO_ENGINE_MAX_QLEN 10
/* Temporary algorithm flag used to indicate an updated driver. */
#define CRYPTO_ALG_ENGINE 0x200
struct crypto_engine_alg {
struct crypto_alg base;
struct crypto_engine_op op;
};
/**
* crypto_finalize_request - finalize one request if the request is done
* @engine: the hardware engine
@ -71,6 +86,8 @@ static void crypto_pump_requests(struct crypto_engine *engine,
bool in_kthread)
{
struct crypto_async_request *async_req, *backlog;
struct crypto_engine_alg *alg;
struct crypto_engine_op *op;
unsigned long flags;
bool was_busy = false;
int ret;
@ -147,23 +164,30 @@ start_request:
}
}
enginectx = crypto_tfm_ctx(async_req->tfm);
if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) {
alg = container_of(async_req->tfm->__crt_alg,
struct crypto_engine_alg, base);
op = &alg->op;
} else {
enginectx = crypto_tfm_ctx(async_req->tfm);
op = &enginectx->op;
if (enginectx->op.prepare_request) {
ret = enginectx->op.prepare_request(engine, async_req);
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err_2;
if (op->prepare_request) {
ret = op->prepare_request(engine, async_req);
if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n",
ret);
goto req_err_2;
}
}
if (!op->do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
}
if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL;
goto req_err_1;
}
ret = enginectx->op.do_one_request(engine, async_req);
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
@ -327,6 +351,19 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/**
* crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
* into the engine queue
* @engine: the hardware engine
* @req: the request need to be listed into the engine queue
*/
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
struct kpp_request *req)
{
return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
/**
* crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
* to list into the engine queue
@ -382,6 +419,19 @@ void crypto_finalize_hash_request(struct crypto_engine *engine,
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/**
* crypto_finalize_kpp_request - finalize one kpp_request if the request is done
* @engine: the hardware engine
* @req: the request need to be finalized
* @err: error number
*/
void crypto_finalize_kpp_request(struct crypto_engine *engine,
struct kpp_request *req, int err)
{
return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
/**
* crypto_finalize_skcipher_request - finalize one skcipher_request if
* the request is done
@ -563,5 +613,177 @@ int crypto_engine_exit(struct crypto_engine *engine)
}
EXPORT_SYMBOL_GPL(crypto_engine_exit);
int crypto_engine_register_aead(struct aead_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aead);
void crypto_engine_unregister_aead(struct aead_engine_alg *alg)
{
crypto_unregister_aead(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead);
int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_aead(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_aeads(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_aeads);
void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_aead(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads);
int crypto_engine_register_ahash(struct ahash_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahash);
void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg)
{
crypto_unregister_ahash(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash);
int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_ahash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_ahashes(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes);
void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_ahash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes);
int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher);
void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg)
{
crypto_unregister_akcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher);
int crypto_engine_register_kpp(struct kpp_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_kpp);
void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg)
{
crypto_unregister_kpp(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp);
int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg)
{
if (!alg->op.do_one_request)
return -EINVAL;
alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE;
return crypto_register_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher);
void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg)
{
return crypto_unregister_skcipher(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher);
int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_engine_register_skcipher(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
crypto_engine_unregister_skciphers(algs, i);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers);
void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_engine_unregister_skcipher(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto hardware engine framework");

View File

@ -110,9 +110,9 @@
* as stdrng. Each DRBG receives an increasing cra_priority values the later
* they are defined in this array (see drbg_fill_array).
*
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
* the SHA256 / AES 256 over other ciphers. Thus, the favored
* DRBGs are the latest entries in this array.
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the
* HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the
* favored DRBGs are the latest entries in this array.
*/
static const struct drbg_core drbg_cores[] = {
#ifdef CONFIG_CRYPTO_DRBG_CTR
@ -1449,11 +1449,11 @@ static int drbg_generate(struct drbg_state *drbg,
int err = 0;
pr_devel("DRBG: start to perform self test\n");
if (drbg->core->flags & DRBG_HMAC)
err = alg_test("drbg_pr_hmac_sha256",
"drbg_pr_hmac_sha256", 0, 0);
err = alg_test("drbg_pr_hmac_sha512",
"drbg_pr_hmac_sha512", 0, 0);
else if (drbg->core->flags & DRBG_CTR)
err = alg_test("drbg_pr_ctr_aes128",
"drbg_pr_ctr_aes128", 0, 0);
err = alg_test("drbg_pr_ctr_aes256",
"drbg_pr_ctr_aes256", 0, 0);
else
err = alg_test("drbg_pr_sha256",
"drbg_pr_sha256", 0, 0);
@ -2004,11 +2004,13 @@ static inline int __init drbg_healthcheck_sanity(void)
return 0;
#ifdef CONFIG_CRYPTO_DRBG_CTR
drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
#elif defined CONFIG_CRYPTO_DRBG_HASH
drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr);
#endif
#ifdef CONFIG_CRYPTO_DRBG_HASH
drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
#else
drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
#endif
#ifdef CONFIG_CRYPTO_DRBG_HMAC
drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr);
#endif
drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);

View File

@ -655,6 +655,15 @@ config CRYPTO_DEV_ROCKCHIP
This driver interfaces with the hardware crypto accelerator.
Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode.
config CRYPTO_DEV_TEGRA
tristate "Enable Tegra Security Engine"
depends on TEGRA_HOST1X
select CRYPTO_ENGINE
help
Select this to enable Tegra Security Engine which accelerates various
AES encryption/decryption and HASH algorithms.
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST

View File

@ -40,6 +40,7 @@ obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-$(CONFIG_ARCH_STM32) += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/

View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
tegra-se-objs := tegra-se-key.o tegra-se-main.o
tegra-se-y += tegra-se-aes.o
tegra-se-y += tegra-se-hash.o
obj-$(CONFIG_CRYPTO_DEV_TEGRA) += tegra-se.o

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,156 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver file to manage keys of NVIDIA Security Engine.
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <crypto/aes.h>
#include "tegra-se.h"
#define SE_KEY_FULL_MASK GENMASK(SE_MAX_KEYSLOT, 0)
/* Reserve keyslot 0, 14, 15 */
#define SE_KEY_RSVD_MASK (BIT(0) | BIT(14) | BIT(15))
#define SE_KEY_VALID_MASK (SE_KEY_FULL_MASK & ~SE_KEY_RSVD_MASK)
/* Mutex lock to guard keyslots */
static DEFINE_MUTEX(kslt_lock);
/* Keyslot bitmask (0 = available, 1 = in use/not available) */
static u16 tegra_se_keyslots = SE_KEY_RSVD_MASK;
static u16 tegra_keyslot_alloc(void)
{
u16 keyid;
mutex_lock(&kslt_lock);
/* Check if all key slots are full */
if (tegra_se_keyslots == GENMASK(SE_MAX_KEYSLOT, 0)) {
mutex_unlock(&kslt_lock);
return 0;
}
keyid = ffz(tegra_se_keyslots);
tegra_se_keyslots |= BIT(keyid);
mutex_unlock(&kslt_lock);
return keyid;
}
static void tegra_keyslot_free(u16 slot)
{
mutex_lock(&kslt_lock);
tegra_se_keyslots &= ~(BIT(slot));
mutex_unlock(&kslt_lock);
}
static unsigned int tegra_key_prep_ins_cmd(struct tegra_se *se, u32 *cpuvaddr,
const u32 *key, u32 keylen, u16 slot, u32 alg)
{
int i = 0, j;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_DUMMY;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->manifest);
cpuvaddr[i++] = se->manifest(se->owner, alg, keylen);
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_dst);
cpuvaddr[i++] = SE_AES_KEY_DST_INDEX(slot);
for (j = 0; j < keylen / 4; j++) {
/* Set key address */
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_addr);
cpuvaddr[i++] = j;
/* Set key data */
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->key_data);
cpuvaddr[i++] = key[j];
}
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->config);
cpuvaddr[i++] = SE_CFG_INS;
cpuvaddr[i++] = host1x_opcode_setpayload(1);
cpuvaddr[i++] = se_host1x_opcode_incr_w(se->hw->regs->op);
cpuvaddr[i++] = SE_AES_OP_WRSTALL | SE_AES_OP_START |
SE_AES_OP_LASTBUF;
cpuvaddr[i++] = se_host1x_opcode_nonincr(host1x_uclass_incr_syncpt_r(), 1);
cpuvaddr[i++] = host1x_uclass_incr_syncpt_cond_f(1) |
host1x_uclass_incr_syncpt_indx_f(se->syncpt_id);
dev_dbg(se->dev, "key-slot %u key-manifest %#x\n",
slot, se->manifest(se->owner, alg, keylen));
return i;
}
static bool tegra_key_in_kslt(u32 keyid)
{
bool ret;
if (keyid > SE_MAX_KEYSLOT)
return false;
mutex_lock(&kslt_lock);
ret = ((BIT(keyid) & SE_KEY_VALID_MASK) &&
(BIT(keyid) & tegra_se_keyslots));
mutex_unlock(&kslt_lock);
return ret;
}
static int tegra_key_insert(struct tegra_se *se, const u8 *key,
u32 keylen, u16 slot, u32 alg)
{
const u32 *keyval = (u32 *)key;
u32 *addr = se->cmdbuf->addr, size;
size = tegra_key_prep_ins_cmd(se, addr, keyval, keylen, slot, alg);
return tegra_se_host1x_submit(se, size);
}
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg)
{
u8 zkey[AES_MAX_KEY_SIZE] = {0};
if (!keyid)
return;
/* Overwrite the key with 0s */
tegra_key_insert(se, zkey, AES_MAX_KEY_SIZE, keyid, alg);
tegra_keyslot_free(keyid);
}
int tegra_key_submit(struct tegra_se *se, const u8 *key, u32 keylen, u32 alg, u32 *keyid)
{
int ret;
/* Use the existing slot if it is already allocated */
if (!tegra_key_in_kslt(*keyid)) {
*keyid = tegra_keyslot_alloc();
if (!(*keyid)) {
dev_err(se->dev, "failed to allocate key slot\n");
return -ENOMEM;
}
}
ret = tegra_key_insert(se, key, keylen, *keyid, alg);
if (ret)
return ret;
return 0;
}

View File

@ -0,0 +1,436 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
/*
* Crypto driver for NVIDIA Security Engine in Tegra Chips
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <crypto/engine.h>
#include "tegra-se.h"
static struct host1x_bo *tegra_se_cmdbuf_get(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
kref_get(&cmdbuf->ref);
return host_bo;
}
static void tegra_se_cmdbuf_release(struct kref *ref)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(ref, struct tegra_se_cmdbuf, ref);
dma_free_attrs(cmdbuf->dev, cmdbuf->size, cmdbuf->addr,
cmdbuf->iova, 0);
kfree(cmdbuf);
}
static void tegra_se_cmdbuf_put(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
kref_put(&cmdbuf->ref, tegra_se_cmdbuf_release);
}
static struct host1x_bo_mapping *
tegra_se_cmdbuf_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(bo, struct tegra_se_cmdbuf, bo);
struct host1x_bo_mapping *map;
int err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo);
map->direction = direction;
map->dev = dev;
map->sgt = kzalloc(sizeof(*map->sgt), GFP_KERNEL);
if (!map->sgt) {
err = -ENOMEM;
goto free;
}
err = dma_get_sgtable(dev, map->sgt, cmdbuf->addr,
cmdbuf->iova, cmdbuf->words * 4);
if (err)
goto free_sgt;
err = dma_map_sgtable(dev, map->sgt, direction, 0);
if (err)
goto free_sgt;
map->phys = sg_dma_address(map->sgt->sgl);
map->size = cmdbuf->words * 4;
map->chunks = err;
return map;
free_sgt:
sg_free_table(map->sgt);
kfree(map->sgt);
free:
kfree(map);
return ERR_PTR(err);
}
static void tegra_se_cmdbuf_unpin(struct host1x_bo_mapping *map)
{
if (!map)
return;
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
sg_free_table(map->sgt);
kfree(map->sgt);
host1x_bo_put(map->bo);
kfree(map);
}
static void *tegra_se_cmdbuf_mmap(struct host1x_bo *host_bo)
{
struct tegra_se_cmdbuf *cmdbuf = container_of(host_bo, struct tegra_se_cmdbuf, bo);
return cmdbuf->addr;
}
static void tegra_se_cmdbuf_munmap(struct host1x_bo *host_bo, void *addr)
{
}
static const struct host1x_bo_ops tegra_se_cmdbuf_ops = {
.get = tegra_se_cmdbuf_get,
.put = tegra_se_cmdbuf_put,
.pin = tegra_se_cmdbuf_pin,
.unpin = tegra_se_cmdbuf_unpin,
.mmap = tegra_se_cmdbuf_mmap,
.munmap = tegra_se_cmdbuf_munmap,
};
static struct tegra_se_cmdbuf *tegra_se_host1x_bo_alloc(struct tegra_se *se, ssize_t size)
{
struct tegra_se_cmdbuf *cmdbuf;
struct device *dev = se->dev->parent;
cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL);
if (!cmdbuf)
return NULL;
cmdbuf->addr = dma_alloc_attrs(dev, size, &cmdbuf->iova,
GFP_KERNEL, 0);
if (!cmdbuf->addr)
return NULL;
cmdbuf->size = size;
cmdbuf->dev = dev;
host1x_bo_init(&cmdbuf->bo, &tegra_se_cmdbuf_ops);
kref_init(&cmdbuf->ref);
return cmdbuf;
}
int tegra_se_host1x_submit(struct tegra_se *se, u32 size)
{
struct host1x_job *job;
int ret;
job = host1x_job_alloc(se->channel, 1, 0, true);
if (!job) {
dev_err(se->dev, "failed to allocate host1x job\n");
return -ENOMEM;
}
job->syncpt = host1x_syncpt_get(se->syncpt);
job->syncpt_incrs = 1;
job->client = &se->client;
job->class = se->client.class;
job->serialize = true;
job->engine_fallback_streamid = se->stream_id;
job->engine_streamid_offset = SE_STREAM_ID;
se->cmdbuf->words = size;
host1x_job_add_gather(job, &se->cmdbuf->bo, size, 0);
ret = host1x_job_pin(job, se->dev);
if (ret) {
dev_err(se->dev, "failed to pin host1x job\n");
goto job_put;
}
ret = host1x_job_submit(job);
if (ret) {
dev_err(se->dev, "failed to submit host1x job\n");
goto job_unpin;
}
ret = host1x_syncpt_wait(job->syncpt, job->syncpt_end,
MAX_SCHEDULE_TIMEOUT, NULL);
if (ret) {
dev_err(se->dev, "host1x job timed out\n");
return ret;
}
host1x_job_put(job);
return 0;
job_unpin:
host1x_job_unpin(job);
job_put:
host1x_job_put(job);
return ret;
}
static int tegra_se_client_init(struct host1x_client *client)
{
struct tegra_se *se = container_of(client, struct tegra_se, client);
int ret;
se->channel = host1x_channel_request(&se->client);
if (!se->channel) {
dev_err(se->dev, "host1x channel map failed\n");
return -ENODEV;
}
se->syncpt = host1x_syncpt_request(&se->client, 0);
if (!se->syncpt) {
dev_err(se->dev, "host1x syncpt allocation failed\n");
ret = -EINVAL;
goto channel_put;
}
se->syncpt_id = host1x_syncpt_id(se->syncpt);
se->cmdbuf = tegra_se_host1x_bo_alloc(se, SZ_4K);
if (!se->cmdbuf) {
ret = -ENOMEM;
goto syncpt_put;
}
ret = se->hw->init_alg(se);
if (ret) {
dev_err(se->dev, "failed to register algorithms\n");
goto cmdbuf_put;
}
return 0;
cmdbuf_put:
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
syncpt_put:
host1x_syncpt_put(se->syncpt);
channel_put:
host1x_channel_put(se->channel);
return ret;
}
static int tegra_se_client_deinit(struct host1x_client *client)
{
struct tegra_se *se = container_of(client, struct tegra_se, client);
se->hw->deinit_alg(se);
tegra_se_cmdbuf_put(&se->cmdbuf->bo);
host1x_syncpt_put(se->syncpt);
host1x_channel_put(se->channel);
return 0;
}
static const struct host1x_client_ops tegra_se_client_ops = {
.init = tegra_se_client_init,
.exit = tegra_se_client_deinit,
};
static int tegra_se_host1x_register(struct tegra_se *se)
{
INIT_LIST_HEAD(&se->client.list);
se->client.dev = se->dev;
se->client.ops = &tegra_se_client_ops;
se->client.class = se->hw->host1x_class;
se->client.num_syncpts = 1;
host1x_client_register(&se->client);
return 0;
}
static int tegra_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct tegra_se *se;
int ret;
se = devm_kzalloc(dev, sizeof(*se), GFP_KERNEL);
if (!se)
return -ENOMEM;
se->dev = dev;
se->owner = TEGRA_GPSE_ID;
se->hw = device_get_match_data(&pdev->dev);
se->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(se->base))
return PTR_ERR(se->base);
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
platform_set_drvdata(pdev, se);
se->clk = devm_clk_get_enabled(se->dev, NULL);
if (IS_ERR(se->clk))
return dev_err_probe(dev, PTR_ERR(se->clk),
"failed to enable clocks\n");
if (!tegra_dev_iommu_get_stream_id(dev, &se->stream_id))
return dev_err_probe(dev, -ENODEV,
"failed to get IOMMU stream ID\n");
writel(se->stream_id, se->base + SE_STREAM_ID);
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine)
return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
ret = crypto_engine_start(se->engine);
if (ret) {
crypto_engine_exit(se->engine);
return dev_err_probe(dev, ret, "failed to start crypto engine\n");
}
ret = tegra_se_host1x_register(se);
if (ret) {
crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
return dev_err_probe(dev, ret, "failed to init host1x params\n");
}
return 0;
}
static void tegra_se_remove(struct platform_device *pdev)
{
struct tegra_se *se = platform_get_drvdata(pdev);
crypto_engine_stop(se->engine);
crypto_engine_exit(se->engine);
host1x_client_unregister(&se->client);
}
static const struct tegra_se_regs tegra234_aes1_regs = {
.config = SE_AES1_CFG,
.op = SE_AES1_OPERATION,
.last_blk = SE_AES1_LAST_BLOCK,
.linear_ctr = SE_AES1_LINEAR_CTR,
.aad_len = SE_AES1_AAD_LEN,
.cryp_msg_len = SE_AES1_CRYPTO_MSG_LEN,
.manifest = SE_AES1_KEYMANIFEST,
.key_addr = SE_AES1_KEY_ADDR,
.key_data = SE_AES1_KEY_DATA,
.key_dst = SE_AES1_KEY_DST,
.result = SE_AES1_CMAC_RESULT,
};
static const struct tegra_se_regs tegra234_hash_regs = {
.config = SE_SHA_CFG,
.op = SE_SHA_OPERATION,
.manifest = SE_SHA_KEYMANIFEST,
.key_addr = SE_SHA_KEY_ADDR,
.key_data = SE_SHA_KEY_DATA,
.key_dst = SE_SHA_KEY_DST,
.result = SE_SHA_HASH_RESULT,
};
static const struct tegra_se_hw tegra234_aes_hw = {
.regs = &tegra234_aes1_regs,
.kac_ver = 1,
.host1x_class = 0x3b,
.init_alg = tegra_init_aes,
.deinit_alg = tegra_deinit_aes,
};
static const struct tegra_se_hw tegra234_hash_hw = {
.regs = &tegra234_hash_regs,
.kac_ver = 1,
.host1x_class = 0x3d,
.init_alg = tegra_init_hash,
.deinit_alg = tegra_deinit_hash,
};
static const struct of_device_id tegra_se_of_match[] = {
{
.compatible = "nvidia,tegra234-se-aes",
.data = &tegra234_aes_hw
}, {
.compatible = "nvidia,tegra234-se-hash",
.data = &tegra234_hash_hw,
},
{ },
};
MODULE_DEVICE_TABLE(of, tegra_se_of_match);
static struct platform_driver tegra_se_driver = {
.driver = {
.name = "tegra-se",
.of_match_table = tegra_se_of_match,
},
.probe = tegra_se_probe,
.remove_new = tegra_se_remove,
};
static int tegra_se_host1x_probe(struct host1x_device *dev)
{
return host1x_device_init(dev);
}
static int tegra_se_host1x_remove(struct host1x_device *dev)
{
host1x_device_exit(dev);
return 0;
}
static struct host1x_driver tegra_se_host1x_driver = {
.driver = {
.name = "tegra-se-host1x",
},
.probe = tegra_se_host1x_probe,
.remove = tegra_se_host1x_remove,
.subdevs = tegra_se_of_match,
};
static int __init tegra_se_module_init(void)
{
int ret;
ret = host1x_driver_register(&tegra_se_host1x_driver);
if (ret)
return ret;
return platform_driver_register(&tegra_se_driver);
}
static void __exit tegra_se_module_exit(void)
{
host1x_driver_unregister(&tegra_se_host1x_driver);
platform_driver_unregister(&tegra_se_driver);
}
module_init(tegra_se_module_init);
module_exit(tegra_se_module_exit);
MODULE_DESCRIPTION("NVIDIA Tegra Security Engine Driver");
MODULE_AUTHOR("Akhil R <akhilrajeev@nvidia.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,560 @@
/* SPDX-License-Identifier: GPL-2.0-only
* SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
*
* Header file for NVIDIA Security Engine driver.
*/
#ifndef _TEGRA_SE_H
#define _TEGRA_SE_H
#include <linux/bitfield.h>
#include <linux/iommu.h>
#include <linux/host1x.h>
#include <crypto/aead.h>
#include <crypto/engine.h>
#include <crypto/hash.h>
#include <crypto/sha1.h>
#include <crypto/sha3.h>
#include <crypto/skcipher.h>
#define SE_OWNERSHIP 0x14
#define SE_OWNERSHIP_UID(x) FIELD_GET(GENMASK(7, 0), x)
#define TEGRA_GPSE_ID 3
#define SE_STREAM_ID 0x90
#define SE_SHA_CFG 0x4004
#define SE_SHA_KEY_ADDR 0x4094
#define SE_SHA_KEY_DATA 0x4098
#define SE_SHA_KEYMANIFEST 0x409c
#define SE_SHA_CRYPTO_CFG 0x40a4
#define SE_SHA_KEY_DST 0x40a8
#define SE_SHA_SRC_KSLT 0x4180
#define SE_SHA_TGT_KSLT 0x4184
#define SE_SHA_MSG_LENGTH 0x401c
#define SE_SHA_OPERATION 0x407c
#define SE_SHA_HASH_RESULT 0x40b0
#define SE_SHA_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_SHA_ENC_MODE_SHA1 SE_SHA_ENC_MODE(0)
#define SE_SHA_ENC_MODE_SHA224 SE_SHA_ENC_MODE(4)
#define SE_SHA_ENC_MODE_SHA256 SE_SHA_ENC_MODE(5)
#define SE_SHA_ENC_MODE_SHA384 SE_SHA_ENC_MODE(6)
#define SE_SHA_ENC_MODE_SHA512 SE_SHA_ENC_MODE(7)
#define SE_SHA_ENC_MODE_SHA_CTX_INTEGRITY SE_SHA_ENC_MODE(8)
#define SE_SHA_ENC_MODE_SHA3_224 SE_SHA_ENC_MODE(9)
#define SE_SHA_ENC_MODE_SHA3_256 SE_SHA_ENC_MODE(10)
#define SE_SHA_ENC_MODE_SHA3_384 SE_SHA_ENC_MODE(11)
#define SE_SHA_ENC_MODE_SHA3_512 SE_SHA_ENC_MODE(12)
#define SE_SHA_ENC_MODE_SHAKE128 SE_SHA_ENC_MODE(13)
#define SE_SHA_ENC_MODE_SHAKE256 SE_SHA_ENC_MODE(14)
#define SE_SHA_ENC_MODE_HMAC_SHA256_1KEY SE_SHA_ENC_MODE(0)
#define SE_SHA_ENC_MODE_HMAC_SHA256_2KEY SE_SHA_ENC_MODE(1)
#define SE_SHA_ENC_MODE_SM3_256 SE_SHA_ENC_MODE(0)
#define SE_SHA_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
#define SE_SHA_ENC_ALG_NOP SE_SHA_CFG_ENC_ALG(0)
#define SE_SHA_ENC_ALG_SHA_ENC SE_SHA_CFG_ENC_ALG(1)
#define SE_SHA_ENC_ALG_RNG SE_SHA_CFG_ENC_ALG(2)
#define SE_SHA_ENC_ALG_SHA SE_SHA_CFG_ENC_ALG(3)
#define SE_SHA_ENC_ALG_SM3 SE_SHA_CFG_ENC_ALG(4)
#define SE_SHA_ENC_ALG_HMAC SE_SHA_CFG_ENC_ALG(7)
#define SE_SHA_ENC_ALG_KDF SE_SHA_CFG_ENC_ALG(8)
#define SE_SHA_ENC_ALG_KEY_INVLD SE_SHA_CFG_ENC_ALG(10)
#define SE_SHA_ENC_ALG_KEY_INQUIRE SE_SHA_CFG_ENC_ALG(12)
#define SE_SHA_ENC_ALG_INS SE_SHA_CFG_ENC_ALG(13)
#define SE_SHA_OP_LASTBUF FIELD_PREP(BIT(16), 1)
#define SE_SHA_OP_WRSTALL FIELD_PREP(BIT(15), 1)
#define SE_SHA_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
#define SE_SHA_OP_START SE_SHA_OP_OP(1)
#define SE_SHA_OP_RESTART_OUT SE_SHA_OP_OP(2)
#define SE_SHA_OP_RESTART_IN SE_SHA_OP_OP(4)
#define SE_SHA_OP_RESTART_INOUT SE_SHA_OP_OP(5)
#define SE_SHA_OP_DUMMY SE_SHA_OP_OP(6)
#define SE_SHA_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_SHA_DEC_ALG_NOP SE_SHA_CFG_DEC_ALG(0)
#define SE_SHA_DEC_ALG_AES_DEC SE_SHA_CFG_DEC_ALG(1)
#define SE_SHA_DEC_ALG_HMAC SE_SHA_CFG_DEC_ALG(7)
#define SE_SHA_DEC_ALG_HMAC_VERIFY SE_SHA_CFG_DEC_ALG(9)
#define SE_SHA_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
#define SE_SHA_DST_MEMORY SE_SHA_CFG_DST(0)
#define SE_SHA_DST_HASH_REG SE_SHA_CFG_DST(1)
#define SE_SHA_DST_KEYTABLE SE_SHA_CFG_DST(2)
#define SE_SHA_DST_SRK SE_SHA_CFG_DST(3)
#define SE_SHA_TASK_HASH_INIT BIT(0)
/* AES Configuration */
#define SE_AES0_CFG 0x1004
#define SE_AES0_CRYPTO_CONFIG 0x1008
#define SE_AES0_KEY_DST 0x1030
#define SE_AES0_OPERATION 0x1038
#define SE_AES0_LINEAR_CTR 0x101c
#define SE_AES0_LAST_BLOCK 0x102c
#define SE_AES0_KEY_ADDR 0x10bc
#define SE_AES0_KEY_DATA 0x10c0
#define SE_AES0_CMAC_RESULT 0x10c4
#define SE_AES0_SRC_KSLT 0x1100
#define SE_AES0_TGT_KSLT 0x1104
#define SE_AES0_KEYMANIFEST 0x1114
#define SE_AES0_AAD_LEN 0x112c
#define SE_AES0_CRYPTO_MSG_LEN 0x1134
#define SE_AES1_CFG 0x2004
#define SE_AES1_CRYPTO_CONFIG 0x2008
#define SE_AES1_KEY_DST 0x2030
#define SE_AES1_OPERATION 0x2038
#define SE_AES1_LINEAR_CTR 0x201c
#define SE_AES1_LAST_BLOCK 0x202c
#define SE_AES1_KEY_ADDR 0x20bc
#define SE_AES1_KEY_DATA 0x20c0
#define SE_AES1_CMAC_RESULT 0x20c4
#define SE_AES1_SRC_KSLT 0x2100
#define SE_AES1_TGT_KSLT 0x2104
#define SE_AES1_KEYMANIFEST 0x2114
#define SE_AES1_AAD_LEN 0x212c
#define SE_AES1_CRYPTO_MSG_LEN 0x2134
#define SE_AES_CFG_ENC_MODE(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_AES_ENC_MODE_GMAC SE_AES_CFG_ENC_MODE(3)
#define SE_AES_ENC_MODE_GCM SE_AES_CFG_ENC_MODE(4)
#define SE_AES_ENC_MODE_GCM_FINAL SE_AES_CFG_ENC_MODE(5)
#define SE_AES_ENC_MODE_CMAC SE_AES_CFG_ENC_MODE(7)
#define SE_AES_ENC_MODE_CBC_MAC SE_AES_CFG_ENC_MODE(12)
#define SE_AES_CFG_DEC_MODE(x) FIELD_PREP(GENMASK(23, 16), x)
#define SE_AES_DEC_MODE_GMAC SE_AES_CFG_DEC_MODE(3)
#define SE_AES_DEC_MODE_GCM SE_AES_CFG_DEC_MODE(4)
#define SE_AES_DEC_MODE_GCM_FINAL SE_AES_CFG_DEC_MODE(5)
#define SE_AES_DEC_MODE_CBC_MAC SE_AES_CFG_DEC_MODE(12)
#define SE_AES_CFG_ENC_ALG(x) FIELD_PREP(GENMASK(15, 12), x)
#define SE_AES_ENC_ALG_NOP SE_AES_CFG_ENC_ALG(0)
#define SE_AES_ENC_ALG_AES_ENC SE_AES_CFG_ENC_ALG(1)
#define SE_AES_ENC_ALG_RNG SE_AES_CFG_ENC_ALG(2)
#define SE_AES_ENC_ALG_SHA SE_AES_CFG_ENC_ALG(3)
#define SE_AES_ENC_ALG_HMAC SE_AES_CFG_ENC_ALG(7)
#define SE_AES_ENC_ALG_KDF SE_AES_CFG_ENC_ALG(8)
#define SE_AES_ENC_ALG_INS SE_AES_CFG_ENC_ALG(13)
#define SE_AES_CFG_DEC_ALG(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_AES_DEC_ALG_NOP SE_AES_CFG_DEC_ALG(0)
#define SE_AES_DEC_ALG_AES_DEC SE_AES_CFG_DEC_ALG(1)
#define SE_AES_CFG_DST(x) FIELD_PREP(GENMASK(4, 2), x)
#define SE_AES_DST_MEMORY SE_AES_CFG_DST(0)
#define SE_AES_DST_HASH_REG SE_AES_CFG_DST(1)
#define SE_AES_DST_KEYTABLE SE_AES_CFG_DST(2)
#define SE_AES_DST_SRK SE_AES_CFG_DST(3)
/* AES Crypto Configuration */
#define SE_AES_KEY2_INDEX(x) FIELD_PREP(GENMASK(31, 28), x)
#define SE_AES_KEY_INDEX(x) FIELD_PREP(GENMASK(27, 24), x)
#define SE_AES_CRYPTO_CFG_SCC_DIS FIELD_PREP(BIT(20), 1)
#define SE_AES_CRYPTO_CFG_CTR_CNTN(x) FIELD_PREP(GENMASK(18, 11), x)
#define SE_AES_CRYPTO_CFG_IV_MODE(x) FIELD_PREP(BIT(10), x)
#define SE_AES_IV_MODE_SWIV SE_AES_CRYPTO_CFG_IV_MODE(0)
#define SE_AES_IV_MODE_HWIV SE_AES_CRYPTO_CFG_IV_MODE(1)
#define SE_AES_CRYPTO_CFG_CORE_SEL(x) FIELD_PREP(BIT(9), x)
#define SE_AES_CORE_SEL_DECRYPT SE_AES_CRYPTO_CFG_CORE_SEL(0)
#define SE_AES_CORE_SEL_ENCRYPT SE_AES_CRYPTO_CFG_CORE_SEL(1)
#define SE_AES_CRYPTO_CFG_IV_SEL(x) FIELD_PREP(GENMASK(8, 7), x)
#define SE_AES_IV_SEL_UPDATED SE_AES_CRYPTO_CFG_IV_SEL(1)
#define SE_AES_IV_SEL_REG SE_AES_CRYPTO_CFG_IV_SEL(2)
#define SE_AES_IV_SEL_RANDOM SE_AES_CRYPTO_CFG_IV_SEL(3)
#define SE_AES_CRYPTO_CFG_VCTRAM_SEL(x) FIELD_PREP(GENMASK(6, 5), x)
#define SE_AES_VCTRAM_SEL_MEMORY SE_AES_CRYPTO_CFG_VCTRAM_SEL(0)
#define SE_AES_VCTRAM_SEL_TWEAK SE_AES_CRYPTO_CFG_VCTRAM_SEL(1)
#define SE_AES_VCTRAM_SEL_AESOUT SE_AES_CRYPTO_CFG_VCTRAM_SEL(2)
#define SE_AES_VCTRAM_SEL_PREV_MEM SE_AES_CRYPTO_CFG_VCTRAM_SEL(3)
#define SE_AES_CRYPTO_CFG_INPUT_SEL(x) FIELD_PREP(GENMASK(4, 3), x)
#define SE_AES_INPUT_SEL_MEMORY SE_AES_CRYPTO_CFG_INPUT_SEL(0)
#define SE_AES_INPUT_SEL_RANDOM SE_AES_CRYPTO_CFG_INPUT_SEL(1)
#define SE_AES_INPUT_SEL_AESOUT SE_AES_CRYPTO_CFG_INPUT_SEL(2)
#define SE_AES_INPUT_SEL_LINEAR_CTR SE_AES_CRYPTO_CFG_INPUT_SEL(3)
#define SE_AES_INPUT_SEL_REG SE_AES_CRYPTO_CFG_INPUT_SEL(1)
#define SE_AES_CRYPTO_CFG_XOR_POS(x) FIELD_PREP(GENMASK(2, 1), x)
#define SE_AES_XOR_POS_BYPASS SE_AES_CRYPTO_CFG_XOR_POS(0)
#define SE_AES_XOR_POS_BOTH SE_AES_CRYPTO_CFG_XOR_POS(1)
#define SE_AES_XOR_POS_TOP SE_AES_CRYPTO_CFG_XOR_POS(2)
#define SE_AES_XOR_POS_BOTTOM SE_AES_CRYPTO_CFG_XOR_POS(3)
#define SE_AES_CRYPTO_CFG_HASH_EN(x) FIELD_PREP(BIT(0), x)
#define SE_AES_HASH_DISABLE SE_AES_CRYPTO_CFG_HASH_EN(0)
#define SE_AES_HASH_ENABLE SE_AES_CRYPTO_CFG_HASH_EN(1)
#define SE_LAST_BLOCK_VAL(x) FIELD_PREP(GENMASK(19, 0), x)
#define SE_LAST_BLOCK_RES_BITS(x) FIELD_PREP(GENMASK(26, 20), x)
#define SE_AES_OP_LASTBUF FIELD_PREP(BIT(16), 1)
#define SE_AES_OP_WRSTALL FIELD_PREP(BIT(15), 1)
#define SE_AES_OP_FINAL FIELD_PREP(BIT(5), 1)
#define SE_AES_OP_INIT FIELD_PREP(BIT(4), 1)
#define SE_AES_OP_OP(x) FIELD_PREP(GENMASK(2, 0), x)
#define SE_AES_OP_START SE_AES_OP_OP(1)
#define SE_AES_OP_RESTART_OUT SE_AES_OP_OP(2)
#define SE_AES_OP_RESTART_IN SE_AES_OP_OP(4)
#define SE_AES_OP_RESTART_INOUT SE_AES_OP_OP(5)
#define SE_AES_OP_DUMMY SE_AES_OP_OP(6)
#define SE_KAC_SIZE(x) FIELD_PREP(GENMASK(15, 14), x)
#define SE_KAC_SIZE_128 SE_KAC_SIZE(0)
#define SE_KAC_SIZE_192 SE_KAC_SIZE(1)
#define SE_KAC_SIZE_256 SE_KAC_SIZE(2)
#define SE_KAC_EXPORTABLE FIELD_PREP(BIT(12), 1)
#define SE_KAC_PURPOSE(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_KAC_ENC SE_KAC_PURPOSE(0)
#define SE_KAC_CMAC SE_KAC_PURPOSE(1)
#define SE_KAC_HMAC SE_KAC_PURPOSE(2)
#define SE_KAC_GCM_KW SE_KAC_PURPOSE(3)
#define SE_KAC_HMAC_KDK SE_KAC_PURPOSE(6)
#define SE_KAC_HMAC_KDD SE_KAC_PURPOSE(7)
#define SE_KAC_HMAC_KDD_KUW SE_KAC_PURPOSE(8)
#define SE_KAC_XTS SE_KAC_PURPOSE(9)
#define SE_KAC_GCM SE_KAC_PURPOSE(10)
#define SE_KAC_USER_NS FIELD_PREP(GENMASK(6, 4), 3)
#define SE_AES_KEY_DST_INDEX(x) FIELD_PREP(GENMASK(11, 8), x)
#define SE_ADDR_HI_MSB(x) FIELD_PREP(GENMASK(31, 24), x)
#define SE_ADDR_HI_SZ(x) FIELD_PREP(GENMASK(23, 0), x)
#define SE_CFG_AES_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_DST_MEMORY)
#define SE_CFG_AES_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GMAC_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GMAC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GMAC_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GMAC | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GCM | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GCM | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_FINAL_ENCRYPT (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_DEC_ALG_NOP | \
SE_AES_ENC_MODE_GCM_FINAL | \
SE_AES_DST_MEMORY)
#define SE_CFG_GCM_FINAL_DECRYPT (SE_AES_ENC_ALG_NOP | \
SE_AES_DEC_ALG_AES_DEC | \
SE_AES_DEC_MODE_GCM_FINAL | \
SE_AES_DST_MEMORY)
#define SE_CFG_CMAC (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_ENC_MODE_CMAC | \
SE_AES_DST_HASH_REG)
#define SE_CFG_CBC_MAC (SE_AES_ENC_ALG_AES_ENC | \
SE_AES_ENC_MODE_CBC_MAC)
#define SE_CFG_INS (SE_AES_ENC_ALG_INS | \
SE_AES_DEC_ALG_NOP)
#define SE_CRYPTO_CFG_ECB_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_XOR_POS_BYPASS | \
SE_AES_CORE_SEL_ENCRYPT)
#define SE_CRYPTO_CFG_ECB_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_XOR_POS_BYPASS | \
SE_AES_CORE_SEL_DECRYPT)
#define SE_CRYPTO_CFG_CBC_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_AESOUT | \
SE_AES_XOR_POS_TOP | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CBC_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_PREV_MEM | \
SE_AES_XOR_POS_BOTTOM | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CTR (SE_AES_INPUT_SEL_LINEAR_CTR | \
SE_AES_VCTRAM_SEL_MEMORY | \
SE_AES_XOR_POS_BOTTOM | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_CRYPTO_CFG_CTR_CNTN(1) | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_ENCRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_XTS_DECRYPT (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_TWEAK | \
SE_AES_XOR_POS_BOTH | \
SE_AES_CORE_SEL_DECRYPT | \
SE_AES_IV_SEL_REG)
#define SE_CRYPTO_CFG_CBC_MAC (SE_AES_INPUT_SEL_MEMORY | \
SE_AES_VCTRAM_SEL_AESOUT | \
SE_AES_XOR_POS_TOP | \
SE_AES_CORE_SEL_ENCRYPT | \
SE_AES_HASH_ENABLE | \
SE_AES_IV_SEL_REG)
#define HASH_RESULT_REG_COUNT 50
#define CMAC_RESULT_REG_COUNT 4
#define SE_CRYPTO_CTR_REG_COUNT 4
#define SE_MAX_KEYSLOT 15
#define SE_MAX_MEM_ALLOC SZ_4M
#define SE_AES_BUFLEN 0x8000
#define SE_SHA_BUFLEN 0x2000
#define SHA_FIRST BIT(0)
#define SHA_UPDATE BIT(1)
#define SHA_FINAL BIT(2)
/* Security Engine operation modes */
enum se_aes_alg {
SE_ALG_CBC, /* Cipher Block Chaining (CBC) mode */
SE_ALG_ECB, /* Electronic Codebook (ECB) mode */
SE_ALG_CTR, /* Counter (CTR) mode */
SE_ALG_XTS, /* XTS mode */
SE_ALG_GMAC, /* GMAC mode */
SE_ALG_GCM, /* GCM mode */
SE_ALG_GCM_FINAL, /* GCM FINAL mode */
SE_ALG_CMAC, /* Cipher-based MAC (CMAC) mode */
SE_ALG_CBC_MAC, /* CBC MAC mode */
};
enum se_hash_alg {
SE_ALG_RNG_DRBG, /* Deterministic Random Bit Generator */
SE_ALG_SHA1, /* Secure Hash Algorithm-1 (SHA1) mode */
SE_ALG_SHA224, /* Secure Hash Algorithm-224 (SHA224) mode */
SE_ALG_SHA256, /* Secure Hash Algorithm-256 (SHA256) mode */
SE_ALG_SHA384, /* Secure Hash Algorithm-384 (SHA384) mode */
SE_ALG_SHA512, /* Secure Hash Algorithm-512 (SHA512) mode */
SE_ALG_SHA3_224, /* Secure Hash Algorithm3-224 (SHA3-224) mode */
SE_ALG_SHA3_256, /* Secure Hash Algorithm3-256 (SHA3-256) mode */
SE_ALG_SHA3_384, /* Secure Hash Algorithm3-384 (SHA3-384) mode */
SE_ALG_SHA3_512, /* Secure Hash Algorithm3-512 (SHA3-512) mode */
SE_ALG_SHAKE128, /* Secure Hash Algorithm3 (SHAKE128) mode */
SE_ALG_SHAKE256, /* Secure Hash Algorithm3 (SHAKE256) mode */
SE_ALG_HMAC_SHA224, /* Hash based MAC (HMAC) - 224 */
SE_ALG_HMAC_SHA256, /* Hash based MAC (HMAC) - 256 */
SE_ALG_HMAC_SHA384, /* Hash based MAC (HMAC) - 384 */
SE_ALG_HMAC_SHA512, /* Hash based MAC (HMAC) - 512 */
};
struct tegra_se_alg {
struct tegra_se *se_dev;
const char *alg_base;
union {
struct skcipher_engine_alg skcipher;
struct aead_engine_alg aead;
struct ahash_engine_alg ahash;
} alg;
};
struct tegra_se_regs {
u32 op;
u32 config;
u32 last_blk;
u32 linear_ctr;
u32 out_addr;
u32 aad_len;
u32 cryp_msg_len;
u32 manifest;
u32 key_addr;
u32 key_data;
u32 key_dst;
u32 result;
};
struct tegra_se_hw {
const struct tegra_se_regs *regs;
int (*init_alg)(struct tegra_se *se);
void (*deinit_alg)(struct tegra_se *se);
bool support_sm_alg;
u32 host1x_class;
u32 kac_ver;
};
struct tegra_se {
int (*manifest)(u32 user, u32 alg, u32 keylen);
const struct tegra_se_hw *hw;
struct host1x_client client;
struct host1x_channel *channel;
struct tegra_se_cmdbuf *cmdbuf;
struct crypto_engine *engine;
struct host1x_syncpt *syncpt;
struct device *dev;
struct clk *clk;
unsigned int opcode_addr;
unsigned int stream_id;
unsigned int syncpt_id;
void __iomem *base;
u32 owner;
};
struct tegra_se_cmdbuf {
dma_addr_t iova;
u32 *addr;
struct device *dev;
struct kref ref;
struct host1x_bo bo;
ssize_t size;
u32 words;
};
struct tegra_se_datbuf {
u8 *buf;
dma_addr_t addr;
ssize_t size;
};
static inline int se_algname_to_algid(const char *name)
{
if (!strcmp(name, "cbc(aes)"))
return SE_ALG_CBC;
else if (!strcmp(name, "ecb(aes)"))
return SE_ALG_ECB;
else if (!strcmp(name, "ctr(aes)"))
return SE_ALG_CTR;
else if (!strcmp(name, "xts(aes)"))
return SE_ALG_XTS;
else if (!strcmp(name, "cmac(aes)"))
return SE_ALG_CMAC;
else if (!strcmp(name, "gcm(aes)"))
return SE_ALG_GCM;
else if (!strcmp(name, "ccm(aes)"))
return SE_ALG_CBC_MAC;
else if (!strcmp(name, "sha1"))
return SE_ALG_SHA1;
else if (!strcmp(name, "sha224"))
return SE_ALG_SHA224;
else if (!strcmp(name, "sha256"))
return SE_ALG_SHA256;
else if (!strcmp(name, "sha384"))
return SE_ALG_SHA384;
else if (!strcmp(name, "sha512"))
return SE_ALG_SHA512;
else if (!strcmp(name, "sha3-224"))
return SE_ALG_SHA3_224;
else if (!strcmp(name, "sha3-256"))
return SE_ALG_SHA3_256;
else if (!strcmp(name, "sha3-384"))
return SE_ALG_SHA3_384;
else if (!strcmp(name, "sha3-512"))
return SE_ALG_SHA3_512;
else if (!strcmp(name, "hmac(sha224)"))
return SE_ALG_HMAC_SHA224;
else if (!strcmp(name, "hmac(sha256)"))
return SE_ALG_HMAC_SHA256;
else if (!strcmp(name, "hmac(sha384)"))
return SE_ALG_HMAC_SHA384;
else if (!strcmp(name, "hmac(sha512)"))
return SE_ALG_HMAC_SHA512;
else
return -EINVAL;
}
/* Functions */
int tegra_init_aes(struct tegra_se *se);
int tegra_init_hash(struct tegra_se *se);
void tegra_deinit_aes(struct tegra_se *se);
void tegra_deinit_hash(struct tegra_se *se);
int tegra_key_submit(struct tegra_se *se, const u8 *key,
u32 keylen, u32 alg, u32 *keyid);
void tegra_key_invalidate(struct tegra_se *se, u32 keyid, u32 alg);
int tegra_se_host1x_submit(struct tegra_se *se, u32 size);
/* HOST1x OPCODES */
static inline u32 host1x_opcode_setpayload(unsigned int payload)
{
return (9 << 28) | payload;
}
static inline u32 host1x_opcode_incr_w(unsigned int offset)
{
/* 22-bit offset supported */
return (10 << 28) | offset;
}
static inline u32 host1x_opcode_nonincr_w(unsigned int offset)
{
/* 22-bit offset supported */
return (11 << 28) | offset;
}
static inline u32 host1x_opcode_incr(unsigned int offset, unsigned int count)
{
return (1 << 28) | (offset << 16) | count;
}
static inline u32 host1x_opcode_nonincr(unsigned int offset, unsigned int count)
{
return (2 << 28) | (offset << 16) | count;
}
static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
{
return (v & 0xff) << 10;
}
static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
{
return (v & 0x3ff) << 0;
}
static inline u32 host1x_uclass_wait_syncpt_r(void)
{
return 0x8;
}
static inline u32 host1x_uclass_incr_syncpt_r(void)
{
return 0x0;
}
#define se_host1x_opcode_incr_w(x) host1x_opcode_incr_w((x) / 4)
#define se_host1x_opcode_nonincr_w(x) host1x_opcode_nonincr_w((x) / 4)
#define se_host1x_opcode_incr(x, y) host1x_opcode_incr((x) / 4, y)
#define se_host1x_opcode_nonincr(x, y) host1x_opcode_nonincr((x) / 4, y)
#endif /*_TEGRA_SE_H*/

View File

@ -215,6 +215,30 @@ static const struct host1x_info host1x07_info = {
* and firmware stream ID in the MMIO path table.
*/
static const struct host1x_sid_entry tegra234_sid_table[] = {
{
/* SE2 MMIO */
.base = 0x1658,
.offset = 0x90,
.limit = 0x90
},
{
/* SE4 MMIO */
.base = 0x1660,
.offset = 0x90,
.limit = 0x90
},
{
/* SE2 channel */
.base = 0x1738,
.offset = 0x90,
.limit = 0x90
},
{
/* SE4 channel */
.base = 0x1740,
.offset = 0x90,
.limit = 0x90
},
{
/* VIC channel */
.base = 0x17b8,

View File

@ -8,9 +8,10 @@
#ifndef _CRYPTO_AEAD_H
#define _CRYPTO_AEAD_H
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
/**
* DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
@ -73,6 +74,7 @@
*/
struct crypto_aead;
struct scatterlist;
/**
* struct aead_request - AEAD request

View File

@ -7,9 +7,11 @@
#ifndef _CRYPTO_ALGAPI_H
#define _CRYPTO_ALGAPI_H
#include <linux/align.h>
#include <linux/crypto.h>
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/types.h>
/*
* Maximum values for blocksize and alignmask, used to allocate
@ -24,6 +26,7 @@
struct crypto_aead;
struct crypto_instance;
struct module;
struct notifier_block;
struct rtattr;
struct seq_file;
struct sk_buff;

View File

@ -5,7 +5,6 @@
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
enum blake2b_lengths {

View File

@ -7,8 +7,8 @@
#define _CRYPTO_BLAKE2S_H
#include <linux/bug.h>
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
enum blake2s_lengths {

View File

@ -13,7 +13,8 @@
#ifndef _CRYPTO_CRYPT_H
#define _CRYPTO_CRYPT_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>

View File

@ -7,69 +7,15 @@
#ifndef _CRYPTO_ENGINE_H
#define _CRYPTO_ENGINE_H
#include <linux/crypto.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <crypto/algapi.h>
#include <crypto/aead.h>
#include <crypto/akcipher.h>
#include <crypto/hash.h>
#include <crypto/kpp.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
#define ENGINE_NAME_LEN 30
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
* @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
* @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
* @do_batch_requests: execute a batch of requests. Depends on multiple
* requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
* @cur_req: the current request which is on processing
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
bool idling;
bool busy;
bool running;
bool retry_support;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
bool rt;
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
struct kthread_worker *kworker;
struct kthread_work pump_requests;
void *priv_data;
struct crypto_async_request *cur_req;
};
struct crypto_engine;
struct device;
/*
* struct crypto_engine_op - crypto hardware engine operations
@ -90,12 +36,39 @@ struct crypto_engine_ctx {
struct crypto_engine_op op;
};
struct aead_engine_alg {
struct aead_alg base;
struct crypto_engine_op op;
};
struct ahash_engine_alg {
struct ahash_alg base;
struct crypto_engine_op op;
};
struct akcipher_engine_alg {
struct akcipher_alg base;
struct crypto_engine_op op;
};
struct kpp_engine_alg {
struct kpp_alg base;
struct crypto_engine_op op;
};
struct skcipher_engine_alg {
struct skcipher_alg base;
struct crypto_engine_op op;
};
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
struct aead_request *req);
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
struct akcipher_request *req);
int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
struct kpp_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
void crypto_finalize_aead_request(struct crypto_engine *engine,
@ -104,6 +77,8 @@ void crypto_finalize_akcipher_request(struct crypto_engine *engine,
struct akcipher_request *req, int err);
void crypto_finalize_hash_request(struct crypto_engine *engine,
struct ahash_request *req, int err);
void crypto_finalize_kpp_request(struct crypto_engine *engine,
struct kpp_request *req, int err);
void crypto_finalize_skcipher_request(struct crypto_engine *engine,
struct skcipher_request *req, int err);
int crypto_engine_start(struct crypto_engine *engine);
@ -115,4 +90,28 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine);
int crypto_engine_register_aead(struct aead_engine_alg *alg);
void crypto_engine_unregister_aead(struct aead_engine_alg *alg);
int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count);
void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count);
int crypto_engine_register_ahash(struct ahash_engine_alg *alg);
void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg);
int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count);
void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs,
int count);
int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg);
void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg);
int crypto_engine_register_kpp(struct kpp_engine_alg *alg);
void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg);
int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg);
void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg);
int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs,
int count);
void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs,
int count);
#endif /* _CRYPTO_ENGINE_H */

View File

@ -232,6 +232,7 @@ struct crypto_ahash {
int (*setkey)(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
unsigned int statesize;
unsigned int reqsize;
struct crypto_tfm base;
};
@ -370,7 +371,7 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
*/
static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm)
{
return crypto_hash_alg_common(tfm)->statesize;
return tfm->statesize;
}
static inline u32 crypto_ahash_get_flags(struct crypto_ahash *tfm)

View File

@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Crypto engine API
*
* Copyright (c) 2016 Baolin Wang <baolin.wang@linaro.org>
* Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
*/
#ifndef _CRYPTO_INTERNAL_ENGINE_H
#define _CRYPTO_INTERNAL_ENGINE_H
#include <crypto/algapi.h>
#include <crypto/engine.h>
#include <linux/kthread.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#define ENGINE_NAME_LEN 30
struct device;
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
* @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
* @rt: whether this queue is set to run as a realtime task
* @prepare_crypt_hardware: a request will soon arrive from the queue
* so the subsystem requests the driver to prepare the hardware
* by issuing this call
* @unprepare_crypt_hardware: there are currently no more requests on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
* @do_batch_requests: execute a batch of requests. Depends on multiple
* requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
* @cur_req: the current request which is on processing
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
bool idling;
bool busy;
bool running;
bool retry_support;
struct list_head list;
spinlock_t queue_lock;
struct crypto_queue queue;
struct device *dev;
bool rt;
int (*prepare_crypt_hardware)(struct crypto_engine *engine);
int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
int (*do_batch_requests)(struct crypto_engine *engine);
struct kthread_worker *kworker;
struct kthread_work pump_requests;
void *priv_data;
struct crypto_async_request *cur_req;
};
#endif

View File

@ -140,6 +140,12 @@ static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg)
halg);
}
static inline void crypto_ahash_set_statesize(struct crypto_ahash *tfm,
unsigned int size)
{
tfm->statesize = size;
}
static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
unsigned int reqsize)
{

View File

@ -9,8 +9,8 @@
#ifndef _CRYPTO_PCRYPT_H
#define _CRYPTO_PCRYPT_H
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/padata.h>
struct pcrypt_request {

View File

@ -12,8 +12,9 @@
#define _CRYPTO_SCATTERWALK_H
#include <crypto/algapi.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,

View File

@ -8,9 +8,13 @@
#ifndef _CRYPTO_SKCIPHER_H
#define _CRYPTO_SKCIPHER_H
#include <linux/container_of.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
struct scatterlist;
/**
* struct skcipher_request - Symmetric key cipher request

View File

@ -0,0 +1 @@
CONFIG_CRYPTO_DEV_TEGRA=m