summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHarry Yoo <harry.yoo@oracle.com>2026-02-23 22:33:22 +0900
committerSasha Levin <sashal@kernel.org>2026-03-12 07:09:23 -0400
commitd303c51288badfabe7b92d1a8e5f63963e67620d (patch)
tree8ae86525d19537288d956cb5f48e67b0c127d14e /mm
parent372571d2ef0f8758e4113411f1ae072c3fbda384 (diff)
mm/slab: pass __GFP_NOWARN to refill_sheaf() if fallback is available
[ Upstream commit 021ca6b670bebebc409d43845efcfe8c11c1dd54 ] When refill_sheaf() is called, failing to refill the sheaf doesn't necessarily mean the allocation will fail because a fallback path might be available and serve the allocation request. Suppress spurious warnings by passing __GFP_NOWARN along with __GFP_NOMEMALLOC whenever a fallback path is available. When the caller is alloc_full_sheaf() or __pcs_replace_empty_main(), the kernel always falls back to the slowpath (__slab_alloc_node()). For __prefill_sheaf_pfmemalloc(), the fallback path is available only when gfp_pfmemalloc_allowed() returns true. Reported-and-tested-by: Chris Bainbridge <chris.bainbridge@gmail.com> Closes: https://lore.kernel.org/linux-mm/aZt2-oS9lkmwT7Ch@debian.local Fixes: 1ce20c28eafd ("slab: handle pfmemalloc slabs properly with sheaves") Link: https://lore.kernel.org/linux-mm/aZwSreGj9-HHdD-j@hyeyoo Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Link: https://patch.msgid.link/20260223133322.16705-1-harry.yoo@oracle.com Tested-by: Mikhail Gavrilov <mikhail.v.gavrilov@gmail.com> Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 889c2804bbfe..b68db0f5a637 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2715,7 +2715,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
if (!sheaf)
return NULL;
- if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC)) {
+ if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
free_empty_sheaf(s, sheaf);
return NULL;
}
@@ -5092,7 +5092,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
return NULL;
if (empty) {
- if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC)) {
+ if (!refill_sheaf(s, empty, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
full = empty;
} else {
/*
@@ -5395,9 +5395,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
static int __prefill_sheaf_pfmemalloc(struct kmem_cache *s,
struct slab_sheaf *sheaf, gfp_t gfp)
{
- int ret = 0;
+ gfp_t gfp_nomemalloc;
+ int ret;
+
+ gfp_nomemalloc = gfp | __GFP_NOMEMALLOC;
+ if (gfp_pfmemalloc_allowed(gfp))
+ gfp_nomemalloc |= __GFP_NOWARN;
- ret = refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC);
+ ret = refill_sheaf(s, sheaf, gfp_nomemalloc);
if (likely(!ret || !gfp_pfmemalloc_allowed(gfp)))
return ret;