diff options
| -rw-r--r-- | arch/arm/kvm/mmu.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 683cac91a7f6..84f18dc83532 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -181,6 +181,14 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, do { next = kvm_pgd_addr_end(addr, end); unmap_puds(kvm, pgd, addr, next); + /* + * If we are dealing with a large range in + * stage2 table, release the kvm->mmu_lock + * to prevent starvation and lockup detector + * warnings. + */ + if (kvm && (next != end)) + cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } @@ -525,6 +533,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) */ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) { + assert_spin_locked(&kvm->mmu_lock); unmap_range(kvm, kvm->arch.pgd, start, size); } @@ -609,7 +618,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm) if (kvm->arch.pgd == NULL) return; + spin_lock(&kvm->mmu_lock); unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); + spin_unlock(&kvm->mmu_lock); + free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); kvm->arch.pgd = NULL; } |
