summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/avic.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2025-06-11 15:45:43 -0700
committerSean Christopherson <seanjc@google.com>2025-06-23 09:50:37 -0700
commit08d9ccdd1a5c75d7aca7ac3af56f723d780dd6ac (patch)
tree2c3d237eaeb78b12d926c762f7d6f380c7180b99 /arch/x86/kvm/svm/avic.c
parent3be405e89f3daea23ddfcf6b6526ae44ce38dd9a (diff)
iommu/amd: KVM: SVM: Infer IsRun from validity of pCPU destination
Infer whether or not a vCPU should be marked running from the validity of the pCPU on which it is running. amd_iommu_update_ga() already skips the IRTE update if the pCPU is invalid, i.e. passing %true for is_run with an invalid pCPU would be a blatant and egregrious KVM bug. Tested-by: Sairaj Kodilkar <sarunkod@amd.com> Link: https://lore.kernel.org/r/20250611224604.313496-42-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/svm/avic.c')
-rw-r--r--arch/x86/kvm/svm/avic.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 42fd1868c32f..1960bb06c4b9 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -833,7 +833,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
entry = svm->avic_physical_id_entry;
if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
- true, pi_data.ir_data);
+ pi_data.ir_data);
irqfd->irq_bypass_data = pi_data.ir_data;
list_add(&irqfd->vcpu_list, &svm->ir_list);
@@ -842,8 +842,7 @@ int avic_pi_update_irte(struct kvm_kernel_irqfd *irqfd, struct kvm *kvm,
return irq_set_vcpu_affinity(host_irq, NULL);
}
-static inline int
-avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
+static inline int avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu)
{
int ret = 0;
struct vcpu_svm *svm = to_svm(vcpu);
@@ -862,7 +861,7 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
return 0;
list_for_each_entry(irqfd, &svm->ir_list, vcpu_list) {
- ret = amd_iommu_update_ga(cpu, r, irqfd->irq_bypass_data);
+ ret = amd_iommu_update_ga(cpu, irqfd->irq_bypass_data);
if (ret)
return ret;
}
@@ -924,7 +923,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
WRITE_ONCE(kvm_svm->avic_physical_id_table[vcpu->vcpu_id], entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
@@ -964,7 +963,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
*/
spin_lock_irqsave(&svm->ir_list_lock, flags);
- avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+ avic_update_iommu_vcpu_affinity(vcpu, -1);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
svm->avic_physical_id_entry = entry;