summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2025-08-18 18:39:13 +0200
committerAndrew Morton <akpm@linux-foundation.org>2025-08-27 22:45:42 -0700
commitc519c3c0a1133c408e83a383aa4dd30010aa5d71 (patch)
tree4ce13380bea40296ee195d2b18c35e4d9b6eff92 /mm
parent08c7c253e032863199da4f089bd0ccab5d1a4876 (diff)
mm/kasan: avoid lazy MMU mode hazards
Functions __kasan_populate_vmalloc() and __kasan_depopulate_vmalloc() use apply_to_pte_range(), which enters lazy MMU mode. In that mode updating PTEs may not be observed until the mode is left. That may lead to a situation in which otherwise correct reads and writes to a PTE using ptep_get(), set_pte(), pte_clear() and other access primitives bring wrong results when the vmalloc shadow memory is being (de-)populated. To avoid these hazards leave the lazy MMU mode before and re-enter it after each PTE manipulation. Link: https://lkml.kernel.org/r/0d2efb7ddddbff6b288fbffeeb10166e90771718.1755528662.git.agordeev@linux.ibm.com Fixes: 3c5c3cfb9ef4 ("kasan: support backing vmalloc space with real shadow memory") Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Daniel Axtens <dja@axtens.net> Cc: Marc Rutland <mark.rutland@arm.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/shadow.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 4d846d146d02..e2ceebf737ef 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -305,6 +305,8 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
pte_t pte;
int index;
+ arch_leave_lazy_mmu_mode();
+
index = PFN_DOWN(addr - data->start);
page = data->pages[index];
__memset(page_to_virt(page), KASAN_VMALLOC_INVALID, PAGE_SIZE);
@@ -317,6 +319,8 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
}
spin_unlock(&init_mm.page_table_lock);
+ arch_enter_lazy_mmu_mode();
+
return 0;
}
@@ -461,6 +465,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
pte_t pte;
int none;
+ arch_leave_lazy_mmu_mode();
+
spin_lock(&init_mm.page_table_lock);
pte = ptep_get(ptep);
none = pte_none(pte);
@@ -471,6 +477,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
if (likely(!none))
__free_page(pfn_to_page(pte_pfn(pte)));
+ arch_enter_lazy_mmu_mode();
+
return 0;
}