summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd/iommu.c
diff options
context:
space:
mode:
authorAnkit Soni <Ankit.Soni@amd.com>2026-01-22 15:30:38 +0000
committerJoerg Roedel <joerg.roedel@amd.com>2026-02-03 14:27:05 +0100
commit9e249c48412828e807afddc21527eb734dc9bd3d (patch)
tree8db2544375a42daf8a53a5cc1f3109aca5c18016 /drivers/iommu/amd/iommu.c
parent5b0530bb16ec5af3cc96fc25891d6a860fd37a8c (diff)
iommu/amd: serialize sequence allocation under concurrent TLB invalidations
With concurrent TLB invalidations, completion wait randomly gets timed out because cmd_sem_val was incremented outside the IOMMU spinlock, allowing CMD_COMPL_WAIT commands to be queued out of sequence and breaking the ordering assumption in wait_on_sem(). Move the cmd_sem_val increment under iommu->lock so completion sequence allocation is serialized with command queuing. And remove the unnecessary return. Fixes: d2a0cac10597 ("iommu/amd: move wait_on_sem() out of spinlock") Tested-by: Srikanth Aithal <sraithal@amd.com> Reported-by: Srikanth Aithal <sraithal@amd.com> Signed-off-by: Ankit Soni <Ankit.Soni@amd.com> Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r--drivers/iommu/amd/iommu.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 285ae635c324..58be841d624e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1439,6 +1439,12 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
return iommu_queue_command_sync(iommu, cmd, true);
}
+static u64 get_cmdsem_val(struct amd_iommu *iommu)
+{
+ lockdep_assert_held(&iommu->lock);
+ return ++iommu->cmd_sem_val;
+}
+
/*
* This function queues a completion wait command into the command
* buffer of an IOMMU
@@ -1453,11 +1459,11 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
if (!iommu->need_sync)
return 0;
- data = atomic64_inc_return(&iommu->cmd_sem_val);
- build_completion_wait(&cmd, iommu, data);
-
raw_spin_lock_irqsave(&iommu->lock, flags);
+ data = get_cmdsem_val(iommu);
+ build_completion_wait(&cmd, iommu, data);
+
ret = __iommu_queue_command_sync(iommu, &cmd, false);
raw_spin_unlock_irqrestore(&iommu->lock, flags);
@@ -3177,10 +3183,11 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
return;
build_inv_irt(&cmd, devid);
- data = atomic64_inc_return(&iommu->cmd_sem_val);
- build_completion_wait(&cmd2, iommu, data);
raw_spin_lock_irqsave(&iommu->lock, flags);
+ data = get_cmdsem_val(iommu);
+ build_completion_wait(&cmd2, iommu, data);
+
ret = __iommu_queue_command_sync(iommu, &cmd, true);
if (ret)
goto out_err;
@@ -3194,7 +3201,6 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid)
out_err:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
- return;
}
static inline u8 iommu_get_int_tablen(struct iommu_dev_data *dev_data)