crypto: engine - Remove prepare/unprepare request
JIRA: https://issues.redhat.com/browse/RHEL-83268 Conflicts: crypto/crypto_engine.c code change in crypto_pump_requests Different name of variable due to different order of kernel backports. Upstream commit e5e7eb023f24 (crypto: engine - Move crypto_engine_ops from request into crypto_alg) was merged without backporting this patch. commit bcd6e41d983621954dfc3f1f64249a55838b3e6a Author: Herbert Xu <herbert@gondor.apana.org.au> Date: Sun, 13 Aug 2023 14:54:32 +0800 The callbacks for prepare and unprepare request in crypto_engine is superfluous. They can be done directly from do_one_request. Move the code into do_one_request and remove the unused callbacks. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Marcin Juszkiewicz <mjuszkiewicz@redhat.com>
This commit is contained in:
parent
302e852098
commit
3267525538
|
@ -41,9 +41,6 @@ static void crypto_finalize_request(struct crypto_engine *engine,
|
||||||
struct crypto_async_request *req, int err)
|
struct crypto_async_request *req, int err)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool finalize_req = false;
|
|
||||||
int ret;
|
|
||||||
struct crypto_engine_ctx *enginectx;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If hardware cannot enqueue more requests
|
* If hardware cannot enqueue more requests
|
||||||
|
@ -53,21 +50,11 @@ static void crypto_finalize_request(struct crypto_engine *engine,
|
||||||
if (!engine->retry_support) {
|
if (!engine->retry_support) {
|
||||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||||
if (engine->cur_req == req) {
|
if (engine->cur_req == req) {
|
||||||
finalize_req = true;
|
|
||||||
engine->cur_req = NULL;
|
engine->cur_req = NULL;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
spin_unlock_irqrestore(&engine->queue_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (finalize_req || engine->retry_support) {
|
|
||||||
enginectx = crypto_tfm_ctx(req->tfm);
|
|
||||||
if (enginectx->op.prepare_request &&
|
|
||||||
enginectx->op.unprepare_request) {
|
|
||||||
ret = enginectx->op.unprepare_request(engine, req);
|
|
||||||
if (ret)
|
|
||||||
dev_err(engine->dev, "failed to unprepare request\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
req->complete(req, err);
|
req->complete(req, err);
|
||||||
|
|
||||||
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
kthread_queue_work(engine->kworker, &engine->pump_requests);
|
||||||
|
@ -160,7 +147,7 @@ start_request:
|
||||||
ret = engine->prepare_crypt_hardware(engine);
|
ret = engine->prepare_crypt_hardware(engine);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(engine->dev, "failed to prepare crypt hardware\n");
|
dev_err(engine->dev, "failed to prepare crypt hardware\n");
|
||||||
goto req_err_2;
|
goto req_err_1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -170,16 +157,7 @@ start_request:
|
||||||
op = &alg->op;
|
op = &alg->op;
|
||||||
} else {
|
} else {
|
||||||
enginectx = crypto_tfm_ctx(async_req->tfm);
|
enginectx = crypto_tfm_ctx(async_req->tfm);
|
||||||
op = &enginectx->op;
|
|
||||||
|
|
||||||
if (op->prepare_request) {
|
|
||||||
ret = op->prepare_request(engine, async_req);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(engine->dev, "failed to prepare request: %d\n",
|
|
||||||
ret);
|
|
||||||
goto req_err_2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!op->do_one_request) {
|
if (!op->do_one_request) {
|
||||||
dev_err(engine->dev, "failed to do request\n");
|
dev_err(engine->dev, "failed to do request\n");
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -203,18 +181,6 @@ start_request:
|
||||||
ret);
|
ret);
|
||||||
goto req_err_1;
|
goto req_err_1;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* If retry mechanism is supported,
|
|
||||||
* unprepare current request and
|
|
||||||
* enqueue it back into crypto-engine queue.
|
|
||||||
*/
|
|
||||||
if (enginectx->op.unprepare_request) {
|
|
||||||
ret = enginectx->op.unprepare_request(engine,
|
|
||||||
async_req);
|
|
||||||
if (ret)
|
|
||||||
dev_err(engine->dev,
|
|
||||||
"failed to unprepare request\n");
|
|
||||||
}
|
|
||||||
spin_lock_irqsave(&engine->queue_lock, flags);
|
spin_lock_irqsave(&engine->queue_lock, flags);
|
||||||
/*
|
/*
|
||||||
* If hardware was unable to execute request, enqueue it
|
* If hardware was unable to execute request, enqueue it
|
||||||
|
@ -230,13 +196,6 @@ start_request:
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
req_err_1:
|
req_err_1:
|
||||||
if (enginectx->op.unprepare_request) {
|
|
||||||
ret = enginectx->op.unprepare_request(engine, async_req);
|
|
||||||
if (ret)
|
|
||||||
dev_err(engine->dev, "failed to unprepare request\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
req_err_2:
|
|
||||||
async_req->complete(async_req, ret);
|
async_req->complete(async_req, ret);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
|
|
|
@ -19,15 +19,9 @@ struct device;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct crypto_engine_op - crypto hardware engine operations
|
* struct crypto_engine_op - crypto hardware engine operations
|
||||||
* @prepare__request: do some prepare if need before handle the current request
|
|
||||||
* @unprepare_request: undo any work done by prepare_request()
|
|
||||||
* @do_one_request: do encryption for current request
|
* @do_one_request: do encryption for current request
|
||||||
*/
|
*/
|
||||||
struct crypto_engine_op {
|
struct crypto_engine_op {
|
||||||
int (*prepare_request)(struct crypto_engine *engine,
|
|
||||||
void *areq);
|
|
||||||
int (*unprepare_request)(struct crypto_engine *engine,
|
|
||||||
void *areq);
|
|
||||||
int (*do_one_request)(struct crypto_engine *engine,
|
int (*do_one_request)(struct crypto_engine *engine,
|
||||||
void *areq);
|
void *areq);
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue