summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2026-03-19 16:08:51 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2026-03-19 16:08:51 +0100
commit1c6d58b8a03b656bfe4c6930a7d6052782c0bc89 (patch)
tree70991c25d188f5b1cd6f5d84f9f352ec2fa2effa /mm
parent744943ac89cd209aec9414dd751c53528d5757e7 (diff)
parent4aea1dc4cad17cd146072e13b1fd404f32b8b3ef (diff)
Merge v6.18.19linux-rolling-lts
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/core.c10
-rw-r--r--mm/filemap.c13
-rw-r--r--mm/kfence/core.c29
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory.c3
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slub.c54
7 files changed, 83 insertions, 31 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index b787cdb07cb2..cee5320cd9a1 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -1236,6 +1236,9 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src)
{
int err;
+ if (!is_power_of_2(src->min_sz_region))
+ return -EINVAL;
+
err = damon_commit_schemes(dst, src);
if (err)
return err;
@@ -1526,8 +1529,13 @@ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control)
}
ctx->walk_control = control;
mutex_unlock(&ctx->walk_control_lock);
- if (!damon_is_running(ctx))
+ if (!damon_is_running(ctx)) {
+ mutex_lock(&ctx->walk_control_lock);
+ if (ctx->walk_control == control)
+ ctx->walk_control = NULL;
+ mutex_unlock(&ctx->walk_control_lock);
return -EINVAL;
+ }
wait_for_completion(&control->completion);
if (control->canceled)
return -ECANCELED;
diff --git a/mm/filemap.c b/mm/filemap.c
index 024b71da5224..8a7f4ce69aff 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1386,14 +1386,16 @@ repeat:
#ifdef CONFIG_MIGRATION
/**
- * migration_entry_wait_on_locked - Wait for a migration entry to be removed
- * @entry: migration swap entry.
+ * migration_entry_wait_on_locked - Wait for a migration entry or
+ * device_private entry to be removed.
+ * @entry: migration or device_private swap entry.
* @ptl: already locked ptl. This function will drop the lock.
*
- * Wait for a migration entry referencing the given page to be removed. This is
+ * Wait for a migration entry referencing the given page, or device_private
+ * entry referencing a dvice_private page to be unlocked. This is
* equivalent to folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE) except
* this can be called without taking a reference on the page. Instead this
- * should be called while holding the ptl for the migration entry referencing
+ * should be called while holding the ptl for @entry referencing
* the page.
*
* Returns after unlocking the ptl.
@@ -1435,6 +1437,9 @@ void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
* If a migration entry exists for the page the migration path must hold
* a valid reference to the page, and it must take the ptl to remove the
* migration entry. So the page is valid until the ptl is dropped.
+ * Similarly any path attempting to drop the last reference to a
+ * device-private page needs to grab the ptl to remove the device-private
+ * entry.
*/
spin_unlock(ptl);
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 6da35d477269..c5d525fcfcca 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -13,6 +13,7 @@
#include <linux/hash.h>
#include <linux/irq_work.h>
#include <linux/jhash.h>
+#include <linux/kasan-enabled.h>
#include <linux/kcsan-checks.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h>
@@ -883,6 +884,20 @@ void __init kfence_alloc_pool_and_metadata(void)
return;
/*
+ * If KASAN hardware tags are enabled, disable KFENCE, because it
+ * does not support MTE yet.
+ */
+ if (kasan_hw_tags_enabled()) {
+ pr_info("disabled as KASAN HW tags are enabled\n");
+ if (__kfence_pool) {
+ memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
+ __kfence_pool = NULL;
+ }
+ kfence_sample_interval = 0;
+ return;
+ }
+
+ /*
* If the pool has already been initialized by arch, there is no need to
* re-allocate the memory pool.
*/
@@ -951,14 +966,14 @@ static int kfence_init_late(void)
#ifdef CONFIG_CONTIG_ALLOC
struct page *pages;
- pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (!pages)
return -ENOMEM;
__kfence_pool = page_to_virt(pages);
- pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
- NULL);
+ pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL | __GFP_SKIP_KASAN,
+ first_online_node, NULL);
if (pages)
kfence_metadata_init = page_to_virt(pages);
#else
@@ -968,11 +983,13 @@ static int kfence_init_late(void)
return -EINVAL;
}
- __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
+ __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
if (!__kfence_pool)
return -ENOMEM;
- kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
+ kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE,
+ GFP_KERNEL | __GFP_SKIP_KASAN);
#endif
if (!kfence_metadata_init)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ab25d540f0b8..61cf6af26f3c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3028,7 +3028,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
if (!local_trylock(&obj_stock.lock)) {
if (pgdat)
- mod_objcg_mlstate(objcg, pgdat, idx, nr_bytes);
+ mod_objcg_mlstate(objcg, pgdat, idx, nr_acct);
nr_pages = nr_bytes >> PAGE_SHIFT;
nr_bytes = nr_bytes & (PAGE_SIZE - 1);
atomic_add(nr_bytes, &objcg->nr_charged_bytes);
diff --git a/mm/memory.c b/mm/memory.c
index 61748b762876..e43f0a4702c4 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4642,7 +4642,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
unlock_page(vmf->page);
put_page(vmf->page);
} else {
- pte_unmap_unlock(vmf->pte, vmf->ptl);
+ pte_unmap(vmf->pte);
+ migration_entry_wait_on_locked(entry, vmf->ptl);
}
} else if (is_hwpoison_entry(entry)) {
ret = VM_FAULT_HWPOISON;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d76f0f60f080..6288c7e4b971 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6888,7 +6888,8 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
{
const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
- __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
+ __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO |
+ __GFP_SKIP_KASAN;
const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
/*
diff --git a/mm/slub.c b/mm/slub.c
index 870b8e00a938..5b038d1c8250 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2113,13 +2113,6 @@ static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
size_t sz = sizeof(struct slabobj_ext) * slab->objects;
struct kmem_cache *obj_exts_cache;
- /*
- * slabobj_ext array for KMALLOC_CGROUP allocations
- * are served from KMALLOC_NORMAL caches.
- */
- if (!mem_alloc_profiling_enabled())
- return sz;
-
if (sz > KMALLOC_MAX_CACHE_SIZE)
return sz;
@@ -2720,19 +2713,19 @@ static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
* object pointers are moved to a on-stack array under the lock. To bound the
* stack usage, limit each batch to PCS_BATCH_MAX.
*
- * returns true if at least partially flushed
+ * Must be called with s->cpu_sheaves->lock locked, returns with the lock
+ * unlocked.
+ *
+ * Returns how many objects are remaining to be flushed
*/
-static bool sheaf_flush_main(struct kmem_cache *s)
+static unsigned int __sheaf_flush_main_batch(struct kmem_cache *s)
{
struct slub_percpu_sheaves *pcs;
unsigned int batch, remaining;
void *objects[PCS_BATCH_MAX];
struct slab_sheaf *sheaf;
- bool ret = false;
-next_batch:
- if (!local_trylock(&s->cpu_sheaves->lock))
- return ret;
+ lockdep_assert_held(this_cpu_ptr(&s->cpu_sheaves->lock));
pcs = this_cpu_ptr(s->cpu_sheaves);
sheaf = pcs->main;
@@ -2750,10 +2743,37 @@ next_batch:
stat_add(s, SHEAF_FLUSH, batch);
- ret = true;
+ return remaining;
+}
- if (remaining)
- goto next_batch;
+static void sheaf_flush_main(struct kmem_cache *s)
+{
+ unsigned int remaining;
+
+ do {
+ local_lock(&s->cpu_sheaves->lock);
+
+ remaining = __sheaf_flush_main_batch(s);
+
+ } while (remaining);
+}
+
+/*
+ * Returns true if the main sheaf was at least partially flushed.
+ */
+static bool sheaf_try_flush_main(struct kmem_cache *s)
+{
+ unsigned int remaining;
+ bool ret = false;
+
+ do {
+ if (!local_trylock(&s->cpu_sheaves->lock))
+ return ret;
+
+ ret = true;
+ remaining = __sheaf_flush_main_batch(s);
+
+ } while (remaining);
return ret;
}
@@ -6147,7 +6167,7 @@ alloc_empty:
if (put_fail)
stat(s, BARN_PUT_FAIL);
- if (!sheaf_flush_main(s))
+ if (!sheaf_try_flush_main(s))
return NULL;
if (!local_trylock(&s->cpu_sheaves->lock))