summaryrefslogtreecommitdiff
path: root/mm/vmstat.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2025-05-15 17:25:07 +0200
committerFrederic Weisbecker <frederic@kernel.org>2026-02-03 15:21:12 +0100
commit69e227e45069d613767c3dbd5d6a5570a2dddba8 (patch)
tree740b091aa6a52b27ce6ad2a7965b83738ba1e15a /mm/vmstat.c
parent2d05068610a336e0ddc7b090e34b6d9932e6fd90 (diff)
mm: vmstat: Prepare to protect against concurrent isolated cpuset change
The HK_TYPE_DOMAIN housekeeping cpumask will soon be made modifiable at runtime. In order to synchronize against vmstat workqueue to make sure that no asynchronous vmstat work is pending or executing on a newly made isolated CPU, target and queue a vmstat work under the same RCU read side critical section. Whenever housekeeping will update the HK_TYPE_DOMAIN cpumask, a vmstat workqueue flush will also be issued in a further change to make sure that no work remains pending after a CPU has been made isolated. Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Marco Crivellari <marco.crivellari@suse.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Waiman Long <longman@redhat.com> Cc: linux-mm@kvack.org
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 65de88cdf40e..ed19c0d42de6 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -2144,11 +2144,13 @@ static void vmstat_shepherd(struct work_struct *w)
* infrastructure ever noticing. Skip regular flushing from vmstat_shepherd
* for all isolated CPUs to avoid interference with the isolated workload.
*/
- if (cpu_is_isolated(cpu))
- continue;
+ scoped_guard(rcu) {
+ if (cpu_is_isolated(cpu))
+ continue;
- if (!delayed_work_pending(dw) && need_update(cpu))
- queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
+ if (!delayed_work_pending(dw) && need_update(cpu))
+ queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
+ }
cond_resched();
}