diff options
| author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2026-03-19 16:15:33 +0100 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2026-03-19 16:15:33 +0100 |
| commit | 7e2dc8ed7862ac622b5a59953b679de97001dc83 (patch) | |
| tree | d2d2cf61a22f5a6404000ee007c5e80bc2d9eca9 /mm/damon/core.c | |
| parent | a7e8c9cc3a13baf3dcf9734dd55609aa7ff9a1a0 (diff) | |
| parent | 4a2b0ed2ac7abe9743e1559d212075a0ebac96b3 (diff) | |
Merge v6.19.9linux-rolling-stable
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm/damon/core.c')
| -rw-r--r-- | mm/damon/core.c | 79 |
1 files changed, 44 insertions, 35 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c index 84f80a20f233..ba3b7ff8ecf4 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -197,7 +197,7 @@ static int damon_fill_regions_holes(struct damon_region *first, * @t: the given target. * @ranges: array of new monitoring target ranges. * @nr_ranges: length of @ranges. - * @min_sz_region: minimum region size. + * @min_region_sz: minimum region size. * * This function adds new regions to, or modify existing regions of a * monitoring target to fit in specific ranges. @@ -205,7 +205,7 @@ static int damon_fill_regions_holes(struct damon_region *first, * Return: 0 if success, or negative error code otherwise. */ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, - unsigned int nr_ranges, unsigned long min_sz_region) + unsigned int nr_ranges, unsigned long min_region_sz) { struct damon_region *r, *next; unsigned int i; @@ -242,16 +242,16 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, /* no region intersects with this range */ newr = damon_new_region( ALIGN_DOWN(range->start, - min_sz_region), - ALIGN(range->end, min_sz_region)); + min_region_sz), + ALIGN(range->end, min_region_sz)); if (!newr) return -ENOMEM; damon_insert_region(newr, damon_prev_region(r), r, t); } else { /* resize intersecting regions to fit in this range */ first->ar.start = ALIGN_DOWN(range->start, - min_sz_region); - last->ar.end = ALIGN(range->end, min_sz_region); + min_region_sz); + last->ar.end = ALIGN(range->end, min_region_sz); /* fill possible holes in the range */ err = damon_fill_regions_holes(first, last, t); @@ -546,7 +546,7 @@ struct damon_ctx *damon_new_ctx(void) ctx->attrs.max_nr_regions = 1000; ctx->addr_unit = 1; - ctx->min_sz_region = DAMON_MIN_REGION; + ctx->min_region_sz = DAMON_MIN_REGION_SZ; INIT_LIST_HEAD(&ctx->adaptive_targets); INIT_LIST_HEAD(&ctx->schemes); @@ -1131,7 +1131,7 @@ static struct damon_target *damon_nth_target(int n, struct damon_ctx *ctx) * If @src has no region, @dst keeps current regions. */ static int damon_commit_target_regions(struct damon_target *dst, - struct damon_target *src, unsigned long src_min_sz_region) + struct damon_target *src, unsigned long src_min_region_sz) { struct damon_region *src_region; struct damon_addr_range *ranges; @@ -1148,7 +1148,7 @@ static int damon_commit_target_regions(struct damon_target *dst, i = 0; damon_for_each_region(src_region, src) ranges[i++] = src_region->ar; - err = damon_set_regions(dst, ranges, i, src_min_sz_region); + err = damon_set_regions(dst, ranges, i, src_min_region_sz); kfree(ranges); return err; } @@ -1156,11 +1156,11 @@ static int damon_commit_target_regions(struct damon_target *dst, static int damon_commit_target( struct damon_target *dst, bool dst_has_pid, struct damon_target *src, bool src_has_pid, - unsigned long src_min_sz_region) + unsigned long src_min_region_sz) { int err; - err = damon_commit_target_regions(dst, src, src_min_sz_region); + err = damon_commit_target_regions(dst, src, src_min_region_sz); if (err) return err; if (dst_has_pid) @@ -1187,7 +1187,7 @@ static int damon_commit_targets( err = damon_commit_target( dst_target, damon_target_has_pid(dst), src_target, damon_target_has_pid(src), - src->min_sz_region); + src->min_region_sz); if (err) return err; } else { @@ -1214,7 +1214,7 @@ static int damon_commit_targets( return -ENOMEM; err = damon_commit_target(new_target, false, src_target, damon_target_has_pid(src), - src->min_sz_region); + src->min_region_sz); if (err) { damon_destroy_target(new_target, NULL); return err; @@ -1241,6 +1241,9 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) { int err; + if (!is_power_of_2(src->min_region_sz)) + return -EINVAL; + err = damon_commit_schemes(dst, src); if (err) return err; @@ -1261,7 +1264,7 @@ int damon_commit_ctx(struct damon_ctx *dst, struct damon_ctx *src) } dst->ops = src->ops; dst->addr_unit = src->addr_unit; - dst->min_sz_region = src->min_sz_region; + dst->min_region_sz = src->min_region_sz; return 0; } @@ -1294,8 +1297,8 @@ static unsigned long damon_region_sz_limit(struct damon_ctx *ctx) if (ctx->attrs.min_nr_regions) sz /= ctx->attrs.min_nr_regions; - if (sz < ctx->min_sz_region) - sz = ctx->min_sz_region; + if (sz < ctx->min_region_sz) + sz = ctx->min_region_sz; return sz; } @@ -1531,8 +1534,13 @@ int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) } ctx->walk_control = control; mutex_unlock(&ctx->walk_control_lock); - if (!damon_is_running(ctx)) + if (!damon_is_running(ctx)) { + mutex_lock(&ctx->walk_control_lock); + if (ctx->walk_control == control) + ctx->walk_control = NULL; + mutex_unlock(&ctx->walk_control_lock); return -EINVAL; + } wait_for_completion(&control->completion); if (control->canceled) return -ECANCELED; @@ -1668,7 +1676,7 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, * @t: The target of the region. * @rp: The pointer to the region. * @s: The scheme to be applied. - * @min_sz_region: minimum region size. + * @min_region_sz: minimum region size. * * If a quota of a scheme has exceeded in a quota charge window, the scheme's * action would applied to only a part of the target access pattern fulfilling @@ -1686,7 +1694,8 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t, * Return: true if the region should be entirely skipped, false otherwise. */ static bool damos_skip_charged_region(struct damon_target *t, - struct damon_region **rp, struct damos *s, unsigned long min_sz_region) + struct damon_region **rp, struct damos *s, + unsigned long min_region_sz) { struct damon_region *r = *rp; struct damos_quota *quota = &s->quota; @@ -1708,11 +1717,11 @@ static bool damos_skip_charged_region(struct damon_target *t, if (quota->charge_addr_from && r->ar.start < quota->charge_addr_from) { sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - - r->ar.start, min_sz_region); + r->ar.start, min_region_sz); if (!sz_to_skip) { - if (damon_sz_region(r) <= min_sz_region) + if (damon_sz_region(r) <= min_region_sz) return true; - sz_to_skip = min_sz_region; + sz_to_skip = min_region_sz; } damon_split_region_at(t, r, sz_to_skip); r = damon_next_region(r); @@ -1738,7 +1747,7 @@ static void damos_update_stat(struct damos *s, static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos_filter *filter, - unsigned long min_sz_region) + unsigned long min_region_sz) { bool matched = false; struct damon_target *ti; @@ -1755,8 +1764,8 @@ static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, matched = target_idx == filter->target_idx; break; case DAMOS_FILTER_TYPE_ADDR: - start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); - end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); + start = ALIGN_DOWN(filter->addr_range.start, min_region_sz); + end = ALIGN_DOWN(filter->addr_range.end, min_region_sz); /* inside the range */ if (start <= r->ar.start && r->ar.end <= end) { @@ -1792,7 +1801,7 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, s->core_filters_allowed = false; damos_for_each_core_filter(filter, s) { - if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { + if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) { if (filter->allow) s->core_filters_allowed = true; return !filter->allow; @@ -1927,7 +1936,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (c->ops.apply_scheme) { if (quota->esz && quota->charged_sz + sz > quota->esz) { sz = ALIGN_DOWN(quota->esz - quota->charged_sz, - c->min_sz_region); + c->min_region_sz); if (!sz) goto update_stat; damon_split_region_at(t, r, sz); @@ -1975,7 +1984,7 @@ static void damon_do_apply_schemes(struct damon_ctx *c, if (quota->esz && quota->charged_sz >= quota->esz) continue; - if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) + if (damos_skip_charged_region(t, &r, s, c->min_region_sz)) continue; if (!damos_valid_target(c, t, r, s)) @@ -2424,7 +2433,7 @@ static void damon_split_region_at(struct damon_target *t, /* Split every region in the given target into 'nr_subs' regions */ static void damon_split_regions_of(struct damon_target *t, int nr_subs, - unsigned long min_sz_region) + unsigned long min_region_sz) { struct damon_region *r, *next; unsigned long sz_region, sz_sub = 0; @@ -2434,13 +2443,13 @@ static void damon_split_regions_of(struct damon_target *t, int nr_subs, sz_region = damon_sz_region(r); for (i = 0; i < nr_subs - 1 && - sz_region > 2 * min_sz_region; i++) { + sz_region > 2 * min_region_sz; i++) { /* * Randomly select size of left sub-region to be at * least 10 percent and at most 90% of original region */ sz_sub = ALIGN_DOWN(damon_rand(1, 10) * - sz_region / 10, min_sz_region); + sz_region / 10, min_region_sz); /* Do not allow blank region */ if (sz_sub == 0 || sz_sub >= sz_region) continue; @@ -2480,7 +2489,7 @@ static void kdamond_split_regions(struct damon_ctx *ctx) nr_subregions = 3; damon_for_each_target(t, ctx) - damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); + damon_split_regions_of(t, nr_subregions, ctx->min_region_sz); last_nr_regions = nr_regions; } @@ -2850,7 +2859,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start, * @t: The monitoring target to set the region. * @start: The pointer to the start address of the region. * @end: The pointer to the end address of the region. - * @min_sz_region: Minimum region size. + * @min_region_sz: Minimum region size. * * This function sets the region of @t as requested by @start and @end. If the * values of @start and @end are zero, however, this function finds the biggest @@ -2862,7 +2871,7 @@ static bool damon_find_biggest_system_ram(unsigned long *start, */ int damon_set_region_biggest_system_ram_default(struct damon_target *t, unsigned long *start, unsigned long *end, - unsigned long min_sz_region) + unsigned long min_region_sz) { struct damon_addr_range addr_range; @@ -2875,7 +2884,7 @@ int damon_set_region_biggest_system_ram_default(struct damon_target *t, addr_range.start = *start; addr_range.end = *end; - return damon_set_regions(t, &addr_range, 1, min_sz_region); + return damon_set_regions(t, &addr_range, 1, min_region_sz); } /* |
