summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/mm/slub.c b/mm/slub.c
index cdc1e652ec52..e1583757331e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -6265,11 +6265,29 @@ empty:
free_empty_sheaf(s, sheaf);
}
+/*
+ * kvfree_call_rcu() can be called while holding a raw_spinlock_t. Since
+ * __kfree_rcu_sheaf() may acquire a spinlock_t (sleeping lock on PREEMPT_RT),
+ * this would violate lock nesting rules. Therefore, kvfree_call_rcu() avoids
+ * this problem by bypassing the sheaves layer entirely on PREEMPT_RT.
+ *
+ * However, lockdep still complains that it is invalid to acquire spinlock_t
+ * while holding raw_spinlock_t, even on !PREEMPT_RT where spinlock_t is a
+ * spinning lock. Tell lockdep that acquiring spinlock_t is valid here
+ * by temporarily raising the wait-type to LD_WAIT_CONFIG.
+ */
+static DEFINE_WAIT_OVERRIDE_MAP(kfree_rcu_sheaf_map, LD_WAIT_CONFIG);
+
bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj)
{
struct slub_percpu_sheaves *pcs;
struct slab_sheaf *rcu_sheaf;
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
+ return false;
+
+ lock_map_acquire_try(&kfree_rcu_sheaf_map);
+
if (!local_trylock(&s->cpu_sheaves->lock))
goto fail;
@@ -6346,10 +6364,12 @@ do_free:
local_unlock(&s->cpu_sheaves->lock);
stat(s, FREE_RCU_SHEAF);
+ lock_map_release(&kfree_rcu_sheaf_map);
return true;
fail:
stat(s, FREE_RCU_SHEAF_FAIL);
+ lock_map_release(&kfree_rcu_sheaf_map);
return false;
}