summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 070a01e53fcb..17d64a1e11e5 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1751,17 +1751,16 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
force_pte = (max_map_size == PAGE_SIZE);
vma_pagesize = min_t(long, vma_pagesize, max_map_size);
+ vma_shift = __ffs(vma_pagesize);
}
/*
- * Both the canonical IPA and fault IPA must be hugepage-aligned to
- * ensure we find the right PFN and lay down the mapping in the right
- * place.
+ * Both the canonical IPA and fault IPA must be aligned to the
+ * mapping size to ensure we find the right PFN and lay down the
+ * mapping in the right place.
*/
- if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
- fault_ipa &= ~(vma_pagesize - 1);
- ipa &= ~(vma_pagesize - 1);
- }
+ fault_ipa = ALIGN_DOWN(fault_ipa, vma_pagesize);
+ ipa = ALIGN_DOWN(ipa, vma_pagesize);
gfn = ipa >> PAGE_SHIFT;
mte_allowed = kvm_vma_mte_allowed(vma);
@@ -1839,10 +1838,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && s2_force_noncacheable)
ret = -ENOEXEC;
- if (ret) {
- kvm_release_page_unused(page);
- return ret;
- }
+ if (ret)
+ goto out_put_page;
/*
* Guest performs atomic/exclusive operations on memory with unsupported
@@ -1852,7 +1849,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
+ ret = 1;
+ goto out_put_page;
}
if (nested)
@@ -1938,6 +1936,10 @@ out_unlock:
mark_page_dirty_in_slot(kvm, memslot, gfn);
return ret != -EAGAIN ? ret : 0;
+
+out_put_page:
+ kvm_release_page_unused(page);
+ return ret;
}
/* Resolve the access fault by making the page young again. */