summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
authorChenghai Huang <huangchenghai2@huawei.com>2025-12-18 21:44:46 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2026-02-26 15:00:46 -0800
commit5d70c905a0bbd64857eed519bf62f3633ad7d162 (patch)
tree1a44188987f355d9c6c70594fdc76b026f7a005e /drivers/crypto
parent63223f79858de58d35344fc88a27bf23f0398f30 (diff)
crypto: hisilicon/qm - centralize the sending locks of each module into qm
[ Upstream commit 8cd9b608ee8dea78cac3f373bd5e3b3de2755d46 ] When a single queue used by multiple tfms, the protection of shared resources by individual module driver programs is no longer sufficient. The hisi_qp_send needs to be ensured by the lock in qp. Fixes: 5fdb4b345cfb ("crypto: hisilicon - add a lock for the qp send operation") Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c4
-rw-r--r--drivers/crypto/hisilicon/qm.c16
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c3
3 files changed, 12 insertions, 11 deletions
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 4197281c8dff..220022ae7afb 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -109,7 +109,6 @@ struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
struct hpre *hpre;
- spinlock_t req_lock;
unsigned int key_sz;
bool crt_g2_mode;
union {
@@ -410,7 +409,6 @@ static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
qp->qp_ctx = ctx;
qp->req_cb = hpre_alg_cb;
- spin_lock_init(&ctx->req_lock);
ctx->qp = qp;
ctx->dev = &qp->qm->pdev->dev;
hpre = container_of(ctx->qp->qm, struct hpre, qm);
@@ -478,9 +476,7 @@ static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
do {
atomic64_inc(&dfx[HPRE_SEND_CNT].value);
- spin_lock_bh(&ctx->req_lock);
ret = hisi_qp_send(ctx->qp, msg);
- spin_unlock_bh(&ctx->req_lock);
if (ret != -EBUSY)
break;
atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 5c80ca04a8d4..0f5e39884e4a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -2369,26 +2369,33 @@ EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
int hisi_qp_send(struct hisi_qp *qp, const void *msg)
{
struct hisi_qp_status *qp_status = &qp->qp_status;
- u16 sq_tail = qp_status->sq_tail;
- u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth;
- void *sqe = qm_get_avail_sqe(qp);
+ u16 sq_tail, sq_tail_next;
+ void *sqe;
+ spin_lock_bh(&qp->qp_lock);
if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
atomic_read(&qp->qm->status.flags) == QM_STOP ||
qp->is_resetting)) {
+ spin_unlock_bh(&qp->qp_lock);
dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
return -EAGAIN;
}
- if (!sqe)
+ sqe = qm_get_avail_sqe(qp);
+ if (!sqe) {
+ spin_unlock_bh(&qp->qp_lock);
return -EBUSY;
+ }
+ sq_tail = qp_status->sq_tail;
+ sq_tail_next = (sq_tail + 1) % qp->sq_depth;
memcpy(sqe, msg, qp->qm->sqe_size);
qp->msg[sq_tail] = msg;
qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
atomic_inc(&qp->qp_status.used);
qp_status->sq_tail = sq_tail_next;
+ spin_unlock_bh(&qp->qp_lock);
return 0;
}
@@ -2968,6 +2975,7 @@ static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
qp->qm = qm;
qp->qp_id = id;
+ spin_lock_init(&qp->qp_lock);
spin_lock_init(&qp->backlog.lock);
INIT_LIST_HEAD(&qp->backlog.list);
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 8250a33ba586..2f9035c016f3 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -217,7 +217,6 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
- struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct acomp_req *a_req = req->req;
struct hisi_qp *qp = qp_ctx->qp;
struct device *dev = &qp->qm->pdev->dev;
@@ -250,9 +249,7 @@ static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
/* send command to start a task */
atomic64_inc(&dfx->send_cnt);
- spin_lock_bh(&req_q->req_lock);
ret = hisi_qp_send(qp, &zip_sqe);
- spin_unlock_bh(&req_q->req_lock);
if (unlikely(ret < 0)) {
atomic64_inc(&dfx->send_busy_cnt);
ret = -EAGAIN;