summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorHarry Yoo <harry.yoo@oracle.com>2026-02-10 13:46:42 +0900
committerVlastimil Babka <vbabka@suse.cz>2026-02-10 11:39:30 +0100
commit27125df9a5d3b4cfd03bce3a8ec405a368cc9aae (patch)
tree37556c6aa3e13d879d7edc1bfb5d62e4b173fc82 /mm
parentc4d6d7829817f762dfdce829ffd0c14ea3bad7fe (diff)
mm/slab: drop the OBJEXTS_NOSPIN_ALLOC flag from enum objext_flags
OBJEXTS_NOSPIN_ALLOC was used to remember whether a slabobj_ext vector was allocated via kmalloc_nolock(), so that free_slab_obj_exts() could call kfree_nolock() instead of kfree(). Now that kfree() supports freeing kmalloc_nolock() objects, this flag is no longer needed. Instead, pass the allow_spin parameter down to free_slab_obj_exts() to determine whether kfree_nolock() or kfree() should be called in the free path, and free one bit in enum objext_flags. Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Harry Yoo <harry.yoo@oracle.com> Reviewed-by: Hao Li <hao.li@linux.dev> Link: https://patch.msgid.link/20260210044642.139482-3-harry.yoo@oracle.com Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 3f64a6b94571..18c30872d196 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2190,8 +2190,6 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
virt_to_slab(vec)->slab_cache == s);
new_exts = (unsigned long)vec;
- if (unlikely(!allow_spin))
- new_exts |= OBJEXTS_NOSPIN_ALLOC;
#ifdef CONFIG_MEMCG
new_exts |= MEMCG_DATA_OBJEXTS;
#endif
@@ -2229,7 +2227,7 @@ retry:
return 0;
}
-static inline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
{
struct slabobj_ext *obj_exts;
@@ -2257,10 +2255,10 @@ static inline void free_slab_obj_exts(struct slab *slab)
* the extension for obj_exts is expected to be NULL.
*/
mark_objexts_empty(obj_exts);
- if (unlikely(READ_ONCE(slab->obj_exts) & OBJEXTS_NOSPIN_ALLOC))
- kfree_nolock(obj_exts);
- else
+ if (allow_spin)
kfree(obj_exts);
+ else
+ kfree_nolock(obj_exts);
slab->obj_exts = 0;
}
@@ -2324,7 +2322,7 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
return 0;
}
-static inline void free_slab_obj_exts(struct slab *slab)
+static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
{
}
@@ -3404,14 +3402,14 @@ static __always_inline void account_slab(struct slab *slab, int order,
}
static __always_inline void unaccount_slab(struct slab *slab, int order,
- struct kmem_cache *s)
+ struct kmem_cache *s, bool allow_spin)
{
/*
* The slab object extensions should now be freed regardless of
* whether mem_alloc_profiling_enabled() or not because profiling
* might have been disabled after slab->obj_exts got allocated.
*/
- free_slab_obj_exts(slab);
+ free_slab_obj_exts(slab, allow_spin);
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
@@ -3515,7 +3513,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin
page->mapping = NULL;
__ClearPageSlab(page);
mm_account_reclaimed_pages(pages);
- unaccount_slab(slab, order, s);
+ unaccount_slab(slab, order, s, allow_spin);
if (allow_spin)
free_frozen_pages(page, order);
else