summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/pmu-emul.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2024-10-25 18:25:59 +0000
committerOliver Upton <oliver.upton@linux.dev>2024-10-31 19:00:41 +0000
commitae323e035801def145776dddf46c01ca1b90d21d (patch)
tree694aeedb349a8a4fd1d005922760bd09b3b206bf /arch/arm64/kvm/pmu-emul.c
parent8a34979030f6bcb713476ae485a58f5d8e96ebea (diff)
KVM: arm64: nv: Reprogram PMU events affected by nested transition
Start reprogramming PMU events at nested boundaries now that everything is in place to handle the EL2 event filter. Only repaint events where the filter differs between EL1 and EL2 as a slight optimization. PMU now 'works' for nested VMs, albeit slow. Reviewed-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20241025182559.3364829-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm/pmu-emul.c')
-rw-r--r--arch/arm64/kvm/pmu-emul.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index e2eb2ba903b6..8ad62284fa23 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -1215,3 +1215,32 @@ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu)
return u64_replace_bits(pmcr, vcpu->kvm->arch.pmcr_n, ARMV8_PMU_PMCR_N);
}
+
+void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu)
+{
+ bool reprogrammed = false;
+ unsigned long mask;
+ int i;
+
+ if (!kvm_vcpu_has_pmu(vcpu))
+ return;
+
+ mask = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ for_each_set_bit(i, &mask, 32) {
+ struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i);
+
+ /*
+ * We only need to reconfigure events where the filter is
+ * different at EL1 vs. EL2, as we're multiplexing the true EL1
+ * event filter bit for nested.
+ */
+ if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc))
+ continue;
+
+ kvm_pmu_create_perf_event(pmc);
+ reprogrammed = true;
+ }
+
+ if (reprogrammed)
+ kvm_vcpu_pmu_restore_guest(vcpu);
+}