crypto: rockchip: modify crypto hash cache support for crypto v1&v2
Change-Id: I6e0604bf02908269ab021714378b66ed712fdc06 Signed-off-by: Lin Jinhan <troy.lin@rock-chips.com>
This commit is contained in:
parent
66d0591041
commit
c48f1acf4a
|
|
@ -4,8 +4,8 @@
|
|||
# Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
|
||||
#
|
||||
|
||||
obj-$(CONFIG_$(SPL_TPL_)ROCKCHIP_CRYPTO_V1) += crypto_v1.o
|
||||
obj-$(CONFIG_$(SPL_TPL_)ROCKCHIP_CRYPTO_V2) += crypto_v2.o
|
||||
obj-$(CONFIG_$(SPL_TPL_)ROCKCHIP_CRYPTO_V1) += crypto_v1.o crypto_hash_cache.o
|
||||
obj-$(CONFIG_$(SPL_TPL_)ROCKCHIP_CRYPTO_V2) += crypto_v2.o crypto_hash_cache.o
|
||||
|
||||
ifeq ($(CONFIG_$(SPL_TPL_)ROCKCHIP_CRYPTO_V2)$(CONFIG_$(SPL_TPL_)ROCKCHIP_RSA), yy)
|
||||
obj-y += crypto_v2_pka.o crypto_v2_util.o
|
||||
|
|
|
|||
|
|
@ -0,0 +1,193 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (c) 2020 Rockchip Electronics Co., Ltd
|
||||
*/
|
||||
|
||||
#include <common.h>
|
||||
#include <clk.h>
|
||||
#include <crypto.h>
|
||||
|
||||
#include <rockchip/crypto_hash_cache.h>
|
||||
|
||||
static int hash_cache_calc(struct crypto_hash_cache *hash_cache, const u8 *data,
|
||||
u32 data_len, u8 is_last)
|
||||
{
|
||||
crypto_hash_calc direct_calc = hash_cache->direct_calc;
|
||||
int ret = 0;
|
||||
|
||||
if (!hash_cache->cache) {
|
||||
hash_cache->cache = (u8 *)memalign(CONFIG_SYS_CACHELINE_SIZE,
|
||||
HASH_CACHE_SIZE);
|
||||
if (!hash_cache->cache)
|
||||
goto error;
|
||||
|
||||
hash_cache->cache_size = 0;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
u32 tmp_len = 0;
|
||||
|
||||
if (hash_cache->cache_size + data_len <= HASH_CACHE_SIZE) {
|
||||
/* copy to cache */
|
||||
debug("%s, %d: copy to cache %u\n",
|
||||
__func__, __LINE__, data_len);
|
||||
memcpy(hash_cache->cache + hash_cache->cache_size, data,
|
||||
data_len);
|
||||
hash_cache->cache_size += data_len;
|
||||
|
||||
/* if last one calc cache immediately */
|
||||
if (is_last) {
|
||||
debug("%s, %d: last one calc cache %u\n",
|
||||
__func__, __LINE__,
|
||||
hash_cache->cache_size);
|
||||
|
||||
ret = direct_calc(hash_cache->user_data,
|
||||
hash_cache->cache,
|
||||
hash_cache->cache_size,
|
||||
&hash_cache->is_started,
|
||||
is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/* 1. make cache be full */
|
||||
/* 2. calc cache */
|
||||
tmp_len = HASH_CACHE_SIZE - hash_cache->cache_size;
|
||||
debug("%s, %d: make cache be full %u\n",
|
||||
__func__, __LINE__, tmp_len);
|
||||
memcpy(hash_cache->cache + hash_cache->cache_size,
|
||||
data, tmp_len);
|
||||
|
||||
ret = direct_calc(hash_cache->user_data, hash_cache->cache,
|
||||
HASH_CACHE_SIZE, &hash_cache->is_started, 0);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
data += tmp_len;
|
||||
data_len -= tmp_len;
|
||||
hash_cache->cache_size = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void crypto_flush_cacheline(ulong addr, ulong size)
|
||||
{
|
||||
ulong alignment = CONFIG_SYS_CACHELINE_SIZE;
|
||||
ulong aligned_input, aligned_len;
|
||||
|
||||
/* Must flush dcache before crypto DMA fetch data region */
|
||||
aligned_input = round_down(addr, alignment);
|
||||
aligned_len = round_up(size + (addr - aligned_input), alignment);
|
||||
flush_cache(aligned_input, aligned_len);
|
||||
}
|
||||
|
||||
struct crypto_hash_cache *crypto_hash_cache_alloc(crypto_hash_calc direct_calc,
|
||||
void *user_data, u32 total,
|
||||
u32 data_align, u32 len_align)
|
||||
{
|
||||
struct crypto_hash_cache *hash_cache = NULL;
|
||||
|
||||
if (!direct_calc)
|
||||
return NULL;
|
||||
|
||||
hash_cache = malloc(sizeof(struct crypto_hash_cache));
|
||||
if (!hash_cache)
|
||||
return NULL;
|
||||
|
||||
memset(hash_cache, 0x00, sizeof(*hash_cache));
|
||||
|
||||
hash_cache->direct_calc = direct_calc;
|
||||
hash_cache->user_data = user_data;
|
||||
hash_cache->data_align = data_align;
|
||||
hash_cache->len_align = len_align;
|
||||
hash_cache->left_len = total;
|
||||
|
||||
return hash_cache;
|
||||
}
|
||||
|
||||
void crypto_hash_cache_free(struct crypto_hash_cache *hash_cache)
|
||||
{
|
||||
if (!hash_cache)
|
||||
return;
|
||||
|
||||
if (hash_cache->cache)
|
||||
free(hash_cache->cache);
|
||||
|
||||
free(hash_cache);
|
||||
}
|
||||
|
||||
int crypto_hash_update_with_cache(struct crypto_hash_cache *hash_cache,
|
||||
const u8 *data, u32 data_len)
|
||||
{
|
||||
crypto_hash_calc direct_calc = hash_cache->direct_calc;
|
||||
const u8 *direct_data = NULL, *cache_data = NULL;
|
||||
u32 direct_data_len = 0, cache_data_len = 0;
|
||||
u8 is_last = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (hash_cache->left_len < data_len)
|
||||
goto error;
|
||||
|
||||
is_last = hash_cache->left_len == data_len ? 1 : 0;
|
||||
|
||||
if (!hash_cache->use_cache &&
|
||||
IS_ALIGNED((ulong)data, hash_cache->data_align)) {
|
||||
direct_data = data;
|
||||
if (IS_ALIGNED(data_len, hash_cache->len_align) || is_last) {
|
||||
/* calc all directly */
|
||||
debug("%s, %d: calc all directly\n",
|
||||
__func__, __LINE__);
|
||||
direct_data_len = data_len;
|
||||
} else {
|
||||
/* calc some directly calc some in cache */
|
||||
debug("%s, %d: calc some directly calc some in cache\n",
|
||||
__func__, __LINE__);
|
||||
direct_data_len = round_down((ulong)data_len,
|
||||
hash_cache->len_align);
|
||||
cache_data = direct_data + direct_data_len;
|
||||
cache_data_len = data_len % hash_cache->len_align;
|
||||
hash_cache->use_cache = 1;
|
||||
}
|
||||
} else {
|
||||
/* calc all in cache */
|
||||
debug("%s, %d: calc all in cache\n", __func__, __LINE__);
|
||||
cache_data = data;
|
||||
cache_data_len = data_len;
|
||||
hash_cache->use_cache = 1;
|
||||
}
|
||||
|
||||
if (direct_data_len) {
|
||||
debug("%s, %d: calc direct data %u\n",
|
||||
__func__, __LINE__, direct_data_len);
|
||||
ret = direct_calc(hash_cache->user_data,
|
||||
direct_data, direct_data_len,
|
||||
&hash_cache->is_started, is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
hash_cache->left_len -= direct_data_len;
|
||||
}
|
||||
|
||||
if (cache_data_len) {
|
||||
debug("%s, %d: calc cache data %u\n",
|
||||
__func__, __LINE__, cache_data_len);
|
||||
ret = hash_cache_calc(hash_cache, cache_data,
|
||||
cache_data_len, is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
hash_cache->left_len -= cache_data_len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
error:
|
||||
if (hash_cache->cache) {
|
||||
free(hash_cache->cache);
|
||||
hash_cache->cache = NULL;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -7,14 +7,23 @@
|
|||
#include <clk.h>
|
||||
#include <crypto.h>
|
||||
#include <dm.h>
|
||||
#include <rockchip/crypto_hash_cache.h>
|
||||
#include <rockchip/crypto_v1.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/arch/hardware.h>
|
||||
#include <asm/arch/clock.h>
|
||||
|
||||
#define CRYPTO_V1_DEFAULT_RATE 100000000
|
||||
/* crypto timeout 500ms, must support more than 32M data per times*/
|
||||
#define HASH_UPDATE_LIMIT (32 * 1024 * 1024)
|
||||
#define RK_CRYPTO_TIME_OUT 500000
|
||||
|
||||
#define LLI_ADDR_ALIGIN_SIZE 8
|
||||
#define DATA_ADDR_ALIGIN_SIZE 8
|
||||
#define DATA_LEN_ALIGIN_SIZE 64
|
||||
|
||||
struct rockchip_crypto_priv {
|
||||
struct crypto_hash_cache *hash_cache;
|
||||
struct rk_crypto_reg *reg;
|
||||
struct clk clk;
|
||||
sha_context *ctx;
|
||||
|
|
@ -34,6 +43,36 @@ static u32 rockchip_crypto_capability(struct udevice *dev)
|
|||
CRYPTO_RSA2048;
|
||||
}
|
||||
|
||||
static int rk_hash_direct_calc(void *hw_data, const u8 *data,
|
||||
u32 data_len, u8 *started_flag, u8 is_last)
|
||||
{
|
||||
struct rockchip_crypto_priv *priv = hw_data;
|
||||
struct rk_crypto_reg *reg = priv->reg;
|
||||
|
||||
if (!data_len)
|
||||
return -EINVAL;
|
||||
|
||||
/* Must flush dcache before crypto DMA fetch data region */
|
||||
crypto_flush_cacheline((ulong)data, data_len);
|
||||
|
||||
/* Hash Done Interrupt */
|
||||
writel(HASH_DONE_INT, ®->crypto_intsts);
|
||||
|
||||
/* Set data base and length */
|
||||
writel((u32)(ulong)data, ®->crypto_hrdmas);
|
||||
writel((data_len + 3) >> 2, ®->crypto_hrdmal);
|
||||
|
||||
/* Write 1 to start. When finishes, the core will clear it */
|
||||
rk_setreg(®->crypto_ctrl, HASH_START);
|
||||
|
||||
/* Wait last complete */
|
||||
do {} while (readl(®->crypto_ctrl) & HASH_START);
|
||||
|
||||
priv->length += data_len;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
|
||||
{
|
||||
struct rockchip_crypto_priv *priv = dev_get_priv(dev);
|
||||
|
|
@ -48,6 +87,13 @@ static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
|
||||
priv, ctx->length,
|
||||
DATA_ADDR_ALIGIN_SIZE,
|
||||
DATA_LEN_ALIGIN_SIZE);
|
||||
if (!priv->hash_cache)
|
||||
return -EFAULT;
|
||||
|
||||
priv->ctx = ctx;
|
||||
priv->length = 0;
|
||||
writel(ctx->length, ®->crypto_hash_msg_len);
|
||||
|
|
@ -97,40 +143,32 @@ static int rockchip_crypto_sha_update(struct udevice *dev,
|
|||
u32 *input, u32 len)
|
||||
{
|
||||
struct rockchip_crypto_priv *priv = dev_get_priv(dev);
|
||||
struct rk_crypto_reg *reg = priv->reg;
|
||||
ulong aligned_input, aligned_len;
|
||||
int ret = -EINVAL, i;
|
||||
u8 *p;
|
||||
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
if (!input || !len)
|
||||
goto exit;
|
||||
|
||||
priv->length += len;
|
||||
if ((priv->length != priv->ctx->length) && !IS_ALIGNED(len, 4)) {
|
||||
printf("Crypto-v1: require update data length 4-byte "
|
||||
"aligned(0x%08lx - 0x%08lx)\n",
|
||||
(ulong)input, (ulong)input + len);
|
||||
return -EINVAL;
|
||||
p = (u8 *)input;
|
||||
|
||||
for (i = 0; i < len / HASH_UPDATE_LIMIT; i++, p += HASH_UPDATE_LIMIT) {
|
||||
ret = crypto_hash_update_with_cache(priv->hash_cache, p,
|
||||
HASH_UPDATE_LIMIT);
|
||||
if (ret)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Must flush dcache before crypto DMA fetch data region */
|
||||
aligned_input = round_down((ulong)input, CONFIG_SYS_CACHELINE_SIZE);
|
||||
aligned_len = round_up(len + ((ulong)input - aligned_input),
|
||||
CONFIG_SYS_CACHELINE_SIZE);
|
||||
flush_cache(aligned_input, aligned_len);
|
||||
if (len % HASH_UPDATE_LIMIT)
|
||||
ret = crypto_hash_update_with_cache(priv->hash_cache, p,
|
||||
len % HASH_UPDATE_LIMIT);
|
||||
|
||||
/* Wait last complete */
|
||||
do {} while (readl(®->crypto_ctrl) & HASH_START);
|
||||
exit:
|
||||
if (ret) {
|
||||
crypto_hash_cache_free(priv->hash_cache);
|
||||
priv->hash_cache = NULL;
|
||||
}
|
||||
|
||||
/* Hash Done Interrupt */
|
||||
writel(HASH_DONE_INT, ®->crypto_intsts);
|
||||
|
||||
/* Set data base and length */
|
||||
writel((u32)(ulong)input, ®->crypto_hrdmas);
|
||||
writel((len + 3) >> 2, ®->crypto_hrdmal);
|
||||
|
||||
/* Write 1 to start. When finishes, the core will clear it */
|
||||
rk_setreg(®->crypto_ctrl, HASH_START);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rockchip_crypto_sha_final(struct udevice *dev,
|
||||
|
|
@ -139,13 +177,15 @@ static int rockchip_crypto_sha_final(struct udevice *dev,
|
|||
struct rockchip_crypto_priv *priv = dev_get_priv(dev);
|
||||
struct rk_crypto_reg *reg = priv->reg;
|
||||
u32 *buf = (u32 *)output;
|
||||
int ret = 0;
|
||||
u32 nbits;
|
||||
int i;
|
||||
|
||||
if (priv->length != ctx->length) {
|
||||
printf("Crypto-v1: data total length(0x%08x) != init length(0x%08x)!\n",
|
||||
printf("total length(0x%08x) != init length(0x%08x)!\n",
|
||||
priv->length, ctx->length);
|
||||
return -EIO;
|
||||
ret = -EIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Wait last complete */
|
||||
|
|
@ -159,7 +199,10 @@ static int rockchip_crypto_sha_final(struct udevice *dev,
|
|||
for (i = 0; i < BITS2WORD(nbits); i++)
|
||||
buf[i] = readl(®->crypto_hash_dout[i]);
|
||||
|
||||
return 0;
|
||||
exit:
|
||||
crypto_hash_cache_free(priv->hash_cache);
|
||||
priv->hash_cache = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
|
||||
|
|
|
|||
|
|
@ -10,9 +10,44 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/arch/hardware.h>
|
||||
#include <asm/arch/clock.h>
|
||||
#include <rockchip/crypto_hash_cache.h>
|
||||
#include <rockchip/crypto_v2.h>
|
||||
#include <rockchip/crypto_v2_pka.h>
|
||||
|
||||
#define RK_HASH_CTX_MAGIC 0x1A1A1A1A
|
||||
|
||||
#define CRYPTO_TRNG_MAX 32
|
||||
|
||||
enum endian_mode {
|
||||
BIG_ENDIAN = 0,
|
||||
LITTLE_ENDIAN
|
||||
};
|
||||
|
||||
enum clk_type {
|
||||
CLK = 0,
|
||||
HCLK
|
||||
};
|
||||
|
||||
struct crypto_lli_desc {
|
||||
u32 src_addr;
|
||||
u32 src_len;
|
||||
u32 dst_addr;
|
||||
u32 dst_len;
|
||||
u32 user_define;
|
||||
u32 reserve;
|
||||
u32 dma_ctrl;
|
||||
u32 next_addr;
|
||||
};
|
||||
|
||||
struct rk_hash_ctx {
|
||||
struct crypto_lli_desc data_lli; /* lli desc */
|
||||
struct crypto_hash_cache *hash_cache;
|
||||
u32 magic; /* to check ctx */
|
||||
u32 algo; /* hash algo */
|
||||
u8 digest_size; /* hash out length */
|
||||
u8 reserved[3];
|
||||
};
|
||||
|
||||
struct rockchip_crypto_priv {
|
||||
fdt_addr_t reg;
|
||||
struct clk clk;
|
||||
|
|
@ -73,16 +108,6 @@ static void word2byte(u32 word, u8 *ch, u32 endian)
|
|||
}
|
||||
}
|
||||
|
||||
static void rk_flush_cache_align(ulong addr, ulong size, ulong alignment)
|
||||
{
|
||||
ulong aligned_input, aligned_len;
|
||||
|
||||
/* Must flush dcache before crypto DMA fetch data region */
|
||||
aligned_input = round_down(addr, alignment);
|
||||
aligned_len = round_up(size + (addr - aligned_input), alignment);
|
||||
flush_cache(aligned_input, aligned_len);
|
||||
}
|
||||
|
||||
static inline void clear_hash_out_reg(void)
|
||||
{
|
||||
int i;
|
||||
|
|
@ -117,13 +142,12 @@ static void hw_hash_clean_ctx(struct rk_hash_ctx *ctx)
|
|||
assert(ctx);
|
||||
assert(ctx->magic == RK_HASH_CTX_MAGIC);
|
||||
|
||||
if (ctx->cache)
|
||||
free(ctx->cache);
|
||||
crypto_hash_cache_free(ctx->hash_cache);
|
||||
|
||||
memset(ctx, 0x00, sizeof(*ctx));
|
||||
}
|
||||
|
||||
int rk_hash_init(void *hw_ctx, u32 algo, u32 length)
|
||||
static int rk_hash_init(void *hw_ctx, u32 algo)
|
||||
{
|
||||
struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)hw_ctx;
|
||||
u32 reg_ctrl = 0;
|
||||
|
|
@ -132,8 +156,6 @@ int rk_hash_init(void *hw_ctx, u32 algo, u32 length)
|
|||
if (!tmp_ctx)
|
||||
return -EINVAL;
|
||||
|
||||
memset(tmp_ctx, 0x00, sizeof(*tmp_ctx));
|
||||
|
||||
reg_ctrl = CRYPTO_SW_CC_RESET;
|
||||
crypto_write(reg_ctrl | (reg_ctrl << CRYPTO_WRITE_MASK_SHIFT),
|
||||
CRYPTO_RST_CTL);
|
||||
|
|
@ -182,7 +204,6 @@ int rk_hash_init(void *hw_ctx, u32 algo, u32 length)
|
|||
crypto_write(CRYPTO_SRC_ITEM_INT_EN, CRYPTO_DMA_INT_EN);
|
||||
|
||||
tmp_ctx->magic = RK_HASH_CTX_MAGIC;
|
||||
tmp_ctx->left_len = length;
|
||||
|
||||
return 0;
|
||||
exit:
|
||||
|
|
@ -192,9 +213,12 @@ exit:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rk_hash_direct_calc(struct crypto_lli_desc *lli, const u8 *data,
|
||||
static int rk_hash_direct_calc(void *hw_data, const u8 *data,
|
||||
u32 data_len, u8 *started_flag, u8 is_last)
|
||||
{
|
||||
struct rockchip_crypto_priv *priv = hw_data;
|
||||
struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
|
||||
struct crypto_lli_desc *lli = &hash_ctx->data_lli;
|
||||
int ret = -EINVAL;
|
||||
u32 tmp = 0;
|
||||
|
||||
|
|
@ -230,9 +254,8 @@ static int rk_hash_direct_calc(struct crypto_lli_desc *lli, const u8 *data,
|
|||
}
|
||||
|
||||
/* flush cache */
|
||||
rk_flush_cache_align((ulong)lli, sizeof(*lli),
|
||||
CONFIG_SYS_CACHELINE_SIZE);
|
||||
rk_flush_cache_align((ulong)data, data_len, CONFIG_SYS_CACHELINE_SIZE);
|
||||
crypto_flush_cacheline((ulong)lli, sizeof(*lli));
|
||||
crypto_flush_cacheline((ulong)data, data_len);
|
||||
|
||||
/* start calculate */
|
||||
crypto_write(tmp << CRYPTO_WRITE_MASK_SHIFT | tmp,
|
||||
|
|
@ -253,153 +276,32 @@ static int rk_hash_direct_calc(struct crypto_lli_desc *lli, const u8 *data,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
priv->length += data_len;
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rk_hash_cache_calc(struct rk_hash_ctx *tmp_ctx, const u8 *data,
|
||||
u32 data_len, u8 is_last)
|
||||
{
|
||||
u32 left_len;
|
||||
int ret = 0;
|
||||
|
||||
if (!tmp_ctx->cache) {
|
||||
tmp_ctx->cache = (u8 *)memalign(DATA_ADDR_ALIGIN_SIZE,
|
||||
HASH_CACHE_SIZE);
|
||||
if (!tmp_ctx->cache)
|
||||
goto error;
|
||||
|
||||
tmp_ctx->cache_size = 0;
|
||||
}
|
||||
|
||||
left_len = tmp_ctx->left_len;
|
||||
|
||||
while (1) {
|
||||
u32 tmp_len = 0;
|
||||
|
||||
if (tmp_ctx->cache_size + data_len <= HASH_CACHE_SIZE) {
|
||||
/* copy to cache */
|
||||
debug("%s, %d: copy to cache %u\n",
|
||||
__func__, __LINE__, data_len);
|
||||
memcpy(tmp_ctx->cache + tmp_ctx->cache_size, data,
|
||||
data_len);
|
||||
tmp_ctx->cache_size += data_len;
|
||||
|
||||
/* if last one calc cache immediately */
|
||||
if (is_last) {
|
||||
debug("%s, %d: last one calc cache %u\n",
|
||||
__func__, __LINE__, tmp_ctx->cache_size);
|
||||
ret = rk_hash_direct_calc(&tmp_ctx->data_lli,
|
||||
tmp_ctx->cache,
|
||||
tmp_ctx->cache_size,
|
||||
&tmp_ctx->is_started,
|
||||
is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
left_len -= data_len;
|
||||
break;
|
||||
}
|
||||
|
||||
/* 1. make cache be full */
|
||||
/* 2. calc cache */
|
||||
tmp_len = HASH_CACHE_SIZE - tmp_ctx->cache_size;
|
||||
debug("%s, %d: make cache be full %u\n",
|
||||
__func__, __LINE__, tmp_len);
|
||||
memcpy(tmp_ctx->cache + tmp_ctx->cache_size, data, tmp_len);
|
||||
|
||||
ret = rk_hash_direct_calc(&tmp_ctx->data_lli,
|
||||
tmp_ctx->cache,
|
||||
HASH_CACHE_SIZE,
|
||||
&tmp_ctx->is_started,
|
||||
0);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
data += tmp_len;
|
||||
data_len -= tmp_len;
|
||||
left_len -= tmp_len;
|
||||
tmp_ctx->cache_size = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
error:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int rk_hash_update(void *ctx, const u8 *data, u32 data_len)
|
||||
{
|
||||
struct rk_hash_ctx *tmp_ctx = (struct rk_hash_ctx *)ctx;
|
||||
const u8 *direct_data = NULL, *cache_data = NULL;
|
||||
u32 direct_data_len = 0, cache_data_len = 0;
|
||||
int ret = 0;
|
||||
u8 is_last = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
debug("\n");
|
||||
if (!tmp_ctx || !data)
|
||||
goto error;
|
||||
goto exit;
|
||||
|
||||
if (tmp_ctx->digest_size == 0 || tmp_ctx->magic != RK_HASH_CTX_MAGIC)
|
||||
goto error;
|
||||
goto exit;
|
||||
|
||||
if (tmp_ctx->left_len < data_len)
|
||||
goto error;
|
||||
ret = crypto_hash_update_with_cache(tmp_ctx->hash_cache,
|
||||
data, data_len);
|
||||
|
||||
is_last = tmp_ctx->left_len == data_len ? 1 : 0;
|
||||
|
||||
if (!tmp_ctx->use_cache &&
|
||||
IS_ALIGNED((ulong)data, DATA_ADDR_ALIGIN_SIZE)) {
|
||||
direct_data = data;
|
||||
if (IS_ALIGNED(data_len, DATA_LEN_ALIGIN_SIZE) || is_last) {
|
||||
/* calc all directly */
|
||||
debug("%s, %d: calc all directly\n",
|
||||
__func__, __LINE__);
|
||||
direct_data_len = data_len;
|
||||
} else {
|
||||
/* calc some directly calc some in cache */
|
||||
debug("%s, %d: calc some directly calc some in cache\n",
|
||||
__func__, __LINE__);
|
||||
direct_data_len = round_down((ulong)data_len,
|
||||
DATA_LEN_ALIGIN_SIZE);
|
||||
cache_data = direct_data + direct_data_len;
|
||||
cache_data_len = data_len % DATA_LEN_ALIGIN_SIZE;
|
||||
tmp_ctx->use_cache = 1;
|
||||
}
|
||||
} else {
|
||||
/* calc all in cache */
|
||||
debug("%s, %d: calc all in cache\n", __func__, __LINE__);
|
||||
cache_data = data;
|
||||
cache_data_len = data_len;
|
||||
tmp_ctx->use_cache = 1;
|
||||
}
|
||||
|
||||
if (direct_data_len) {
|
||||
debug("%s, %d: calc direct data %u\n",
|
||||
__func__, __LINE__, direct_data_len);
|
||||
ret = rk_hash_direct_calc(&tmp_ctx->data_lli, direct_data,
|
||||
direct_data_len,
|
||||
&tmp_ctx->is_started, is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
tmp_ctx->left_len -= direct_data_len;
|
||||
}
|
||||
|
||||
if (cache_data_len) {
|
||||
debug("%s, %d: calc cache data %u\n",
|
||||
__func__, __LINE__, cache_data_len);
|
||||
ret = rk_hash_cache_calc(tmp_ctx, cache_data,
|
||||
cache_data_len, is_last);
|
||||
if (ret)
|
||||
goto error;
|
||||
tmp_ctx->left_len -= cache_data_len;
|
||||
}
|
||||
|
||||
return ret;
|
||||
error:
|
||||
exit:
|
||||
/* free lli list */
|
||||
if (ret)
|
||||
hw_hash_clean_ctx(tmp_ctx);
|
||||
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int rk_hash_final(void *ctx, u8 *digest, size_t len)
|
||||
|
|
@ -439,8 +341,6 @@ int rk_hash_final(void *ctx, u8 *digest, size_t len)
|
|||
crypto_write(CRYPTO_WRITE_MASK_ALL | 0, CRYPTO_HASH_CTL);
|
||||
|
||||
exit:
|
||||
/* free lli list */
|
||||
hw_hash_clean_ctx(tmp_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -500,13 +400,23 @@ static u32 rockchip_crypto_capability(struct udevice *dev)
|
|||
static int rockchip_crypto_sha_init(struct udevice *dev, sha_context *ctx)
|
||||
{
|
||||
struct rockchip_crypto_priv *priv = dev_get_priv(dev);
|
||||
struct rk_hash_ctx *hash_ctx = priv->hw_ctx;
|
||||
|
||||
if (!ctx)
|
||||
return -EINVAL;
|
||||
|
||||
memset(priv->hw_ctx, 0x00, sizeof(struct rk_hash_ctx));
|
||||
memset(hash_ctx, 0x00, sizeof(*hash_ctx));
|
||||
|
||||
return rk_hash_init(priv->hw_ctx, ctx->algo, ctx->length);
|
||||
priv->length = 0;
|
||||
|
||||
hash_ctx->hash_cache = crypto_hash_cache_alloc(rk_hash_direct_calc,
|
||||
priv, ctx->length,
|
||||
DATA_ADDR_ALIGIN_SIZE,
|
||||
DATA_LEN_ALIGIN_SIZE);
|
||||
if (!hash_ctx->hash_cache)
|
||||
return -EFAULT;
|
||||
|
||||
return rk_hash_init(hash_ctx, ctx->algo);
|
||||
}
|
||||
|
||||
static int rockchip_crypto_sha_update(struct udevice *dev,
|
||||
|
|
@ -539,10 +449,22 @@ static int rockchip_crypto_sha_final(struct udevice *dev,
|
|||
{
|
||||
struct rockchip_crypto_priv *priv = dev_get_priv(dev);
|
||||
u32 nbits;
|
||||
int ret;
|
||||
|
||||
nbits = crypto_algo_nbits(ctx->algo);
|
||||
|
||||
return rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
|
||||
if (priv->length != ctx->length) {
|
||||
printf("total length(0x%08x) != init length(0x%08x)!\n",
|
||||
priv->length, ctx->length);
|
||||
ret = -EIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = rk_hash_final(priv->hw_ctx, (u8 *)output, BITS2BYTE(nbits));
|
||||
|
||||
exit:
|
||||
hw_hash_clean_ctx(priv->hw_ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if CONFIG_IS_ENABLED(ROCKCHIP_RSA)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,37 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0+ */
|
||||
/*
|
||||
* (C) Copyright 2020 Rockchip Electronics Co., Ltd
|
||||
*/
|
||||
|
||||
#ifndef _CRYPTO_HASH_CACHE_H_
|
||||
#define _CRYPTO_HASH_CACHE_H_
|
||||
|
||||
#define HASH_CACHE_SIZE 8192
|
||||
#define CIPHER_CACHE_SIZE 8192
|
||||
|
||||
typedef int (*crypto_hash_calc)(void *hw_data, const u8 *data, u32 data_len,
|
||||
u8 *started_flag, u8 is_last);
|
||||
|
||||
struct crypto_hash_cache {
|
||||
crypto_hash_calc direct_calc; /* hardware hash callback*/
|
||||
void *user_data;
|
||||
void *cache; /* virt addr for hash src data*/
|
||||
u32 cache_size; /* data in cached size */
|
||||
u32 data_align;
|
||||
u32 len_align;
|
||||
u32 left_len; /* left data to calc */
|
||||
u8 is_started; /* start or restart */
|
||||
u8 use_cache; /* is use cache or not*/
|
||||
u8 reserved[2];
|
||||
};
|
||||
|
||||
struct crypto_hash_cache *crypto_hash_cache_alloc(crypto_hash_calc direct_calc,
|
||||
void *user_data, u32 total,
|
||||
u32 data_align,
|
||||
u32 len_align);
|
||||
void crypto_hash_cache_free(struct crypto_hash_cache *hash_cache);
|
||||
int crypto_hash_update_with_cache(struct crypto_hash_cache *hash_cache,
|
||||
const u8 *data, u32 data_len);
|
||||
void crypto_flush_cacheline(ulong addr, ulong size);
|
||||
|
||||
#endif
|
||||
|
|
@ -44,9 +44,6 @@ enum rk_hash_algo {
|
|||
#define RK_MODE_ENCRYPT 0
|
||||
#define RK_MODE_DECRYPT 1
|
||||
|
||||
#define HASH_CACHE_SIZE 8192
|
||||
#define CIPHER_CACHE_SIZE 8192
|
||||
|
||||
#define _SBF(s, v) ((v) << (s))
|
||||
#define _BIT(b) _SBF(b, 1)
|
||||
|
||||
|
|
@ -556,44 +553,6 @@ enum rk_hash_algo {
|
|||
#define LLI_USER_PRIVACY_KEY _BIT(7)
|
||||
#define LLI_USER_ROOT_KEY _BIT(8)
|
||||
|
||||
#define CRYPTO_TRNG_MAX 32
|
||||
|
||||
enum endian_mode {
|
||||
BIG_ENDIAN = 0,
|
||||
LITTLE_ENDIAN
|
||||
};
|
||||
|
||||
enum clk_type {
|
||||
CLK = 0,
|
||||
HCLK
|
||||
};
|
||||
|
||||
struct crypto_lli_desc {
|
||||
u32 src_addr;
|
||||
u32 src_len;
|
||||
u32 dst_addr;
|
||||
u32 dst_len;
|
||||
u32 user_define;
|
||||
u32 reserve;
|
||||
u32 dma_ctrl;
|
||||
u32 next_addr;
|
||||
};
|
||||
|
||||
struct rk_hash_ctx {
|
||||
struct crypto_lli_desc data_lli;/* lli desc */
|
||||
void *cache; /* virt addr for hash src data*/
|
||||
u32 cache_size; /* data in cached size */
|
||||
u32 left_len; /* left data to calc */
|
||||
u32 magic; /* to check ctx */
|
||||
u32 algo; /* hash algo */
|
||||
u8 digest_size; /* hash out length */
|
||||
u8 is_started; /* choose use start or restart */
|
||||
u8 use_cache; /* is use cache or not*/
|
||||
u8 reserved;
|
||||
};
|
||||
|
||||
#define RK_HASH_CTX_MAGIC 0x1A1A1A1A
|
||||
|
||||
extern fdt_addr_t crypto_base;
|
||||
|
||||
static inline u32 crypto_read(u32 offset)
|
||||
|
|
|
|||
Loading…
Reference in New Issue