Lines Matching +full:psi +full:- +full:l
1 // SPDX-License-Identifier: GPL-2.0
14 #include <linux/psi.h>
47 * damon_is_registered_ops() - Check if a given damon_operations is registered.
65 * damon_register_ops() - Register a monitoring operations set to DAMON.
69 * damon_operations->id so that others can find and use them later.
77 if (ops->id >= NR_DAMON_OPS) in damon_register_ops()
78 return -EINVAL; in damon_register_ops()
81 if (__damon_is_registered_ops(ops->id)) { in damon_register_ops()
82 err = -EINVAL; in damon_register_ops()
85 damon_registered_ops[ops->id] = *ops; in damon_register_ops()
92 * damon_select_ops() - Select a monitoring operations to use with the context.
106 return -EINVAL; in damon_select_ops()
110 err = -EINVAL; in damon_select_ops()
112 ctx->ops = damon_registered_ops[id]; in damon_select_ops()
130 region->ar.start = start; in damon_new_region()
131 region->ar.end = end; in damon_new_region()
132 region->nr_accesses = 0; in damon_new_region()
133 region->nr_accesses_bp = 0; in damon_new_region()
134 INIT_LIST_HEAD(®ion->list); in damon_new_region()
136 region->age = 0; in damon_new_region()
137 region->last_nr_accesses = 0; in damon_new_region()
144 list_add_tail(&r->list, &t->regions_list); in damon_add_region()
145 t->nr_regions++; in damon_add_region()
150 list_del(&r->list); in damon_del_region()
151 t->nr_regions--; in damon_del_region()
173 return !(r->ar.end <= re->start || re->end <= r->ar.start); in damon_intersect()
190 if (r->ar.end != next->ar.start) { in damon_fill_regions_holes()
191 newr = damon_new_region(r->ar.end, next->ar.start); in damon_fill_regions_holes()
193 return -ENOMEM; in damon_fill_regions_holes()
201 * damon_set_regions() - Set regions of a target for given address ranges.
242 if (r->ar.start >= range->end) in damon_set_regions()
248 ALIGN_DOWN(range->start, in damon_set_regions()
250 ALIGN(range->end, DAMON_MIN_REGION)); in damon_set_regions()
252 return -ENOMEM; in damon_set_regions()
256 first->ar.start = ALIGN_DOWN(range->start, in damon_set_regions()
258 last->ar.end = ALIGN(range->end, DAMON_MIN_REGION); in damon_set_regions()
277 filter->type = type; in damos_new_filter()
278 filter->matching = matching; in damos_new_filter()
279 filter->allow = allow; in damos_new_filter()
280 INIT_LIST_HEAD(&filter->list); in damos_new_filter()
286 list_add_tail(&f->list, &s->filters); in damos_add_filter()
291 list_del(&f->list); in damos_del_filter()
314 goal->metric = metric; in damos_new_quota_goal()
315 goal->target_value = target_value; in damos_new_quota_goal()
316 INIT_LIST_HEAD(&goal->list); in damos_new_quota_goal()
322 list_add_tail(&g->list, &q->goals); in damos_add_quota_goal()
327 list_del(&g->list); in damos_del_quota_goal()
344 quota->esz = 0; in damos_quota_init()
345 quota->total_charged_sz = 0; in damos_quota_init()
346 quota->total_charged_ns = 0; in damos_quota_init()
347 quota->charged_sz = 0; in damos_quota_init()
348 quota->charged_from = 0; in damos_quota_init()
349 quota->charge_target_from = NULL; in damos_quota_init()
350 quota->charge_addr_from = 0; in damos_quota_init()
351 quota->esz_bp = 0; in damos_quota_init()
367 scheme->pattern = *pattern; in damon_new_scheme()
368 scheme->action = action; in damon_new_scheme()
369 scheme->apply_interval_us = apply_interval_us; in damon_new_scheme()
375 scheme->next_apply_sis = 0; in damon_new_scheme()
376 scheme->walk_completed = false; in damon_new_scheme()
377 INIT_LIST_HEAD(&scheme->filters); in damon_new_scheme()
378 scheme->stat = (struct damos_stat){}; in damon_new_scheme()
379 INIT_LIST_HEAD(&scheme->list); in damon_new_scheme()
381 scheme->quota = *(damos_quota_init(quota)); in damon_new_scheme()
383 INIT_LIST_HEAD(&scheme->quota.goals); in damon_new_scheme()
385 scheme->wmarks = *wmarks; in damon_new_scheme()
386 scheme->wmarks.activated = true; in damon_new_scheme()
388 scheme->target_nid = target_nid; in damon_new_scheme()
395 unsigned long sample_interval = ctx->attrs.sample_interval ? in damos_set_next_apply_sis()
396 ctx->attrs.sample_interval : 1; in damos_set_next_apply_sis()
397 unsigned long apply_interval = s->apply_interval_us ? in damos_set_next_apply_sis()
398 s->apply_interval_us : ctx->attrs.aggr_interval; in damos_set_next_apply_sis()
400 s->next_apply_sis = ctx->passed_sample_intervals + in damos_set_next_apply_sis()
406 list_add_tail(&s->list, &ctx->schemes); in damon_add_scheme()
412 list_del(&s->list); in damon_del_scheme()
425 damos_for_each_quota_goal_safe(g, g_next, &s->quota) in damon_destroy_scheme()
447 t->pid = NULL; in damon_new_target()
448 t->nr_regions = 0; in damon_new_target()
449 INIT_LIST_HEAD(&t->regions_list); in damon_new_target()
450 INIT_LIST_HEAD(&t->list); in damon_new_target()
457 list_add_tail(&t->list, &ctx->adaptive_targets); in damon_add_target()
462 return list_empty(&ctx->adaptive_targets); in damon_targets_empty()
467 list_del(&t->list); in damon_del_target()
487 return t->nr_regions; in damon_nr_regions()
498 init_completion(&ctx->kdamond_started); in damon_new_ctx()
500 ctx->attrs.sample_interval = 5 * 1000; in damon_new_ctx()
501 ctx->attrs.aggr_interval = 100 * 1000; in damon_new_ctx()
502 ctx->attrs.ops_update_interval = 60 * 1000 * 1000; in damon_new_ctx()
504 ctx->passed_sample_intervals = 0; in damon_new_ctx()
506 ctx->next_aggregation_sis = 0; in damon_new_ctx()
507 ctx->next_ops_update_sis = 0; in damon_new_ctx()
509 mutex_init(&ctx->kdamond_lock); in damon_new_ctx()
510 mutex_init(&ctx->call_control_lock); in damon_new_ctx()
511 mutex_init(&ctx->walk_control_lock); in damon_new_ctx()
513 ctx->attrs.min_nr_regions = 10; in damon_new_ctx()
514 ctx->attrs.max_nr_regions = 1000; in damon_new_ctx()
516 INIT_LIST_HEAD(&ctx->adaptive_targets); in damon_new_ctx()
517 INIT_LIST_HEAD(&ctx->schemes); in damon_new_ctx()
526 if (ctx->ops.cleanup) { in damon_destroy_targets()
527 ctx->ops.cleanup(ctx); in damon_destroy_targets()
550 return age * old_attrs->aggr_interval / new_attrs->aggr_interval; in damon_age_for_new_attrs()
564 * damon_update_monitoring_results() does . Otherwise, divide-by-zero would
585 r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses, in damon_update_monitoring_result()
587 r->nr_accesses_bp = r->nr_accesses * 10000; in damon_update_monitoring_result()
588 r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs); in damon_update_monitoring_result()
592 * region->nr_accesses is the number of sampling intervals in the last
593 * aggregation interval that access to the region has found, and region->age is
597 * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
602 struct damon_attrs *old_attrs = &ctx->attrs; in damon_update_monitoring_results()
607 if (!old_attrs->sample_interval || !old_attrs->aggr_interval || in damon_update_monitoring_results()
608 !new_attrs->sample_interval || in damon_update_monitoring_results()
609 !new_attrs->aggr_interval) in damon_update_monitoring_results()
619 * damon_set_attrs() - Set attributes for the monitoring.
625 * &struct damon_callback->after_aggregation or
626 * &struct damon_callback->after_wmarks_check callbacks).
628 * Every time interval is in micro-seconds.
634 unsigned long sample_interval = attrs->sample_interval ? in damon_set_attrs()
635 attrs->sample_interval : 1; in damon_set_attrs()
638 if (attrs->min_nr_regions < 3) in damon_set_attrs()
639 return -EINVAL; in damon_set_attrs()
640 if (attrs->min_nr_regions > attrs->max_nr_regions) in damon_set_attrs()
641 return -EINVAL; in damon_set_attrs()
642 if (attrs->sample_interval > attrs->aggr_interval) in damon_set_attrs()
643 return -EINVAL; in damon_set_attrs()
645 ctx->next_aggregation_sis = ctx->passed_sample_intervals + in damon_set_attrs()
646 attrs->aggr_interval / sample_interval; in damon_set_attrs()
647 ctx->next_ops_update_sis = ctx->passed_sample_intervals + in damon_set_attrs()
648 attrs->ops_update_interval / sample_interval; in damon_set_attrs()
651 ctx->attrs = *attrs; in damon_set_attrs()
660 * damon_set_schemes() - Set data access monitoring based operation schemes.
696 dst->metric = src->metric; in damos_commit_quota_goal()
697 dst->target_value = src->target_value; in damos_commit_quota_goal()
698 if (dst->metric == DAMOS_QUOTA_USER_INPUT) in damos_commit_quota_goal()
699 dst->current_value = src->current_value; in damos_commit_quota_goal()
704 * damos_commit_quota_goals() - Commit DAMOS quota goals to another quota.
708 * Copies user-specified parameters for quota goals from @src to @dst. Users
709 * should use this function for quota goals-level parameters update of running
710 * DAMON contexts, instead of manual in-place updates.
712 * This function should be called from parameters-update safe context, like
731 src_goal->metric, src_goal->target_value); in damos_commit_quota_goals()
733 return -ENOMEM; in damos_commit_quota_goals()
743 dst->reset_interval = src->reset_interval; in damos_commit_quota()
744 dst->ms = src->ms; in damos_commit_quota()
745 dst->sz = src->sz; in damos_commit_quota()
749 dst->weight_sz = src->weight_sz; in damos_commit_quota()
750 dst->weight_nr_accesses = src->weight_nr_accesses; in damos_commit_quota()
751 dst->weight_age = src->weight_age; in damos_commit_quota()
770 switch (dst->type) { in damos_commit_filter_arg()
772 dst->memcg_id = src->memcg_id; in damos_commit_filter_arg()
775 dst->addr_range = src->addr_range; in damos_commit_filter_arg()
778 dst->target_idx = src->target_idx; in damos_commit_filter_arg()
788 dst->type = src->type; in damos_commit_filter()
789 dst->matching = src->matching; in damos_commit_filter()
811 src_filter->type, src_filter->matching, in damos_commit_filters()
812 src_filter->allow); in damos_commit_filters()
814 return -ENOMEM; in damos_commit_filters()
837 dst->pattern = src->pattern; in damos_commit()
838 dst->action = src->action; in damos_commit()
839 dst->apply_interval_us = src->apply_interval_us; in damos_commit()
841 err = damos_commit_quota(&dst->quota, &src->quota); in damos_commit()
845 dst->wmarks = src->wmarks; in damos_commit()
870 new_scheme = damon_new_scheme(&src_scheme->pattern, in damon_commit_schemes()
871 src_scheme->action, in damon_commit_schemes()
872 src_scheme->apply_interval_us, in damon_commit_schemes()
873 &src_scheme->quota, &src_scheme->wmarks, in damon_commit_schemes()
876 return -ENOMEM; in damon_commit_schemes()
920 return -ENOMEM; in damon_commit_target_regions()
923 ranges[i++] = src_region->ar; in damon_commit_target_regions()
939 put_pid(dst->pid); in damon_commit_target()
941 get_pid(src->pid); in damon_commit_target()
942 dst->pid = src->pid; in damon_commit_target()
962 put_pid(dst_target->pid); in damon_commit_targets()
972 return -ENOMEM; in damon_commit_targets()
985 * damon_commit_ctx() - Commit parameters of a DAMON context to another.
989 * This function copies user-specified parameters from @src to @dst and update
991 * for context-level parameters update of running context, instead of manual
992 * in-place updates.
994 * This function should be called from parameters-update safe context, like
1014 err = damon_set_attrs(dst, &src->attrs); in damon_commit_ctx()
1017 dst->ops = src->ops; in damon_commit_ctx()
1023 * damon_nr_running_ctxs() - Return number of currently running contexts.
1048 if (ctx->attrs.min_nr_regions) in damon_region_sz_limit()
1049 sz /= ctx->attrs.min_nr_regions; in damon_region_sz_limit()
1059 * __damon_start() - Starts monitoring with given context.
1068 int err = -EBUSY; in __damon_start()
1070 mutex_lock(&ctx->kdamond_lock); in __damon_start()
1071 if (!ctx->kdamond) { in __damon_start()
1073 reinit_completion(&ctx->kdamond_started); in __damon_start()
1074 ctx->kdamond = kthread_run(kdamond_fn, ctx, "kdamond.%d", in __damon_start()
1076 if (IS_ERR(ctx->kdamond)) { in __damon_start()
1077 err = PTR_ERR(ctx->kdamond); in __damon_start()
1078 ctx->kdamond = NULL; in __damon_start()
1080 wait_for_completion(&ctx->kdamond_started); in __damon_start()
1083 mutex_unlock(&ctx->kdamond_lock); in __damon_start()
1089 * damon_start() - Starts the monitorings for a given group of contexts.
1099 * returns -EBUSY.
1112 return -EBUSY; in damon_start()
1129 * __damon_stop() - Stops monitoring of a given context.
1138 mutex_lock(&ctx->kdamond_lock); in __damon_stop()
1139 tsk = ctx->kdamond; in __damon_stop()
1142 mutex_unlock(&ctx->kdamond_lock); in __damon_stop()
1146 mutex_unlock(&ctx->kdamond_lock); in __damon_stop()
1148 return -EPERM; in __damon_stop()
1152 * damon_stop() - Stops the monitorings for a given group of contexts.
1175 mutex_lock(&ctx->kdamond_lock); in damon_is_running()
1176 running = ctx->kdamond != NULL; in damon_is_running()
1177 mutex_unlock(&ctx->kdamond_lock); in damon_is_running()
1182 * damon_call() - Invoke a given function on DAMON worker thread (kdamond).
1187 * argument data that respectively passed via &damon_call_control->fn and
1188 * &damon_call_control->data of @control, and wait until the kdamond finishes
1195 * &damon_call_control->return_code.
1201 init_completion(&control->completion); in damon_call()
1202 control->canceled = false; in damon_call()
1204 mutex_lock(&ctx->call_control_lock); in damon_call()
1205 if (ctx->call_control) { in damon_call()
1206 mutex_unlock(&ctx->call_control_lock); in damon_call()
1207 return -EBUSY; in damon_call()
1209 ctx->call_control = control; in damon_call()
1210 mutex_unlock(&ctx->call_control_lock); in damon_call()
1212 return -EINVAL; in damon_call()
1213 wait_for_completion(&control->completion); in damon_call()
1214 if (control->canceled) in damon_call()
1215 return -ECANCELED; in damon_call()
1220 * damos_walk() - Invoke a given functions while DAMOS walk regions.
1230 * made only within one &damos->apply_interval_us since damos_walk()
1235 * passed at least one &damos->apply_interval_us, kdamond marks the request as
1242 init_completion(&control->completion); in damos_walk()
1243 control->canceled = false; in damos_walk()
1244 mutex_lock(&ctx->walk_control_lock); in damos_walk()
1245 if (ctx->walk_control) { in damos_walk()
1246 mutex_unlock(&ctx->walk_control_lock); in damos_walk()
1247 return -EBUSY; in damos_walk()
1249 ctx->walk_control = control; in damos_walk()
1250 mutex_unlock(&ctx->walk_control_lock); in damos_walk()
1252 return -EINVAL; in damos_walk()
1253 wait_for_completion(&control->completion); in damos_walk()
1254 if (control->canceled) in damos_walk()
1255 return -ECANCELED; in damos_walk()
1272 r->last_nr_accesses = r->nr_accesses; in kdamond_reset_aggregated()
1273 r->nr_accesses = 0; in kdamond_reset_aggregated()
1285 unsigned int nr_accesses = r->nr_accesses_bp / 10000; in __damos_valid_target()
1288 return s->pattern.min_sz_region <= sz && in __damos_valid_target()
1289 sz <= s->pattern.max_sz_region && in __damos_valid_target()
1290 s->pattern.min_nr_accesses <= nr_accesses && in __damos_valid_target()
1291 nr_accesses <= s->pattern.max_nr_accesses && in __damos_valid_target()
1292 s->pattern.min_age_region <= r->age && in __damos_valid_target()
1293 r->age <= s->pattern.max_age_region; in __damos_valid_target()
1301 if (!ret || !s->quota.esz || !c->ops.get_scheme_score) in damos_valid_target()
1304 return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score; in damos_valid_target()
1308 * damos_skip_charged_region() - Check if the given region or starting part of
1333 struct damos_quota *quota = &s->quota; in damos_skip_charged_region()
1337 if (quota->charge_target_from) { in damos_skip_charged_region()
1338 if (t != quota->charge_target_from) in damos_skip_charged_region()
1341 quota->charge_target_from = NULL; in damos_skip_charged_region()
1342 quota->charge_addr_from = 0; in damos_skip_charged_region()
1345 if (quota->charge_addr_from && in damos_skip_charged_region()
1346 r->ar.end <= quota->charge_addr_from) in damos_skip_charged_region()
1349 if (quota->charge_addr_from && r->ar.start < in damos_skip_charged_region()
1350 quota->charge_addr_from) { in damos_skip_charged_region()
1351 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - in damos_skip_charged_region()
1352 r->ar.start, DAMON_MIN_REGION); in damos_skip_charged_region()
1362 quota->charge_target_from = NULL; in damos_skip_charged_region()
1363 quota->charge_addr_from = 0; in damos_skip_charged_region()
1372 s->stat.nr_tried++; in damos_update_stat()
1373 s->stat.sz_tried += sz_tried; in damos_update_stat()
1375 s->stat.nr_applied++; in damos_update_stat()
1376 s->stat.sz_applied += sz_applied; in damos_update_stat()
1377 s->stat.sz_ops_filter_passed += sz_ops_filter_passed; in damos_update_stat()
1388 switch (filter->type) { in damos_filter_match()
1395 matched = target_idx == filter->target_idx; in damos_filter_match()
1398 start = ALIGN_DOWN(filter->addr_range.start, DAMON_MIN_REGION); in damos_filter_match()
1399 end = ALIGN_DOWN(filter->addr_range.end, DAMON_MIN_REGION); in damos_filter_match()
1402 if (start <= r->ar.start && r->ar.end <= end) { in damos_filter_match()
1407 if (r->ar.end <= start || end <= r->ar.start) { in damos_filter_match()
1412 if (r->ar.start < start) { in damos_filter_match()
1413 damon_split_region_at(t, r, start - r->ar.start); in damos_filter_match()
1418 damon_split_region_at(t, r, end - r->ar.start); in damos_filter_match()
1425 return matched == filter->matching; in damos_filter_match()
1433 s->core_filters_allowed = false; in damos_filter_out()
1436 if (filter->allow) in damos_filter_out()
1437 s->core_filters_allowed = true; in damos_filter_out()
1438 return !filter->allow; in damos_filter_out()
1445 * damos_walk_call_walk() - Call &damos_walk_control->walk_fn.
1446 * @ctx: The context of &damon_ctx->walk_control.
1461 mutex_lock(&ctx->walk_control_lock); in damos_walk_call_walk()
1462 control = ctx->walk_control; in damos_walk_call_walk()
1463 mutex_unlock(&ctx->walk_control_lock); in damos_walk_call_walk()
1466 control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); in damos_walk_call_walk()
1470 * damos_walk_complete() - Complete DAMOS walk request if all walks are done.
1471 * @ctx: The context of &damon_ctx->walk_control.
1475 * scheme to all regions that eligible for the given &damos->apply_interval_us.
1477 * &damos->apply_interval_us, this function makrs the handling of the given
1485 mutex_lock(&ctx->walk_control_lock); in damos_walk_complete()
1486 control = ctx->walk_control; in damos_walk_complete()
1487 mutex_unlock(&ctx->walk_control_lock); in damos_walk_complete()
1491 s->walk_completed = true; in damos_walk_complete()
1494 if (!siter->walk_completed) in damos_walk_complete()
1497 complete(&control->completion); in damos_walk_complete()
1498 mutex_lock(&ctx->walk_control_lock); in damos_walk_complete()
1499 ctx->walk_control = NULL; in damos_walk_complete()
1500 mutex_unlock(&ctx->walk_control_lock); in damos_walk_complete()
1504 * damos_walk_cancel() - Cancel the current DAMOS walk request.
1505 * @ctx: The context of &damon_ctx->walk_control.
1517 mutex_lock(&ctx->walk_control_lock); in damos_walk_cancel()
1518 control = ctx->walk_control; in damos_walk_cancel()
1519 mutex_unlock(&ctx->walk_control_lock); in damos_walk_cancel()
1523 control->canceled = true; in damos_walk_cancel()
1524 complete(&control->completion); in damos_walk_cancel()
1525 mutex_lock(&ctx->walk_control_lock); in damos_walk_cancel()
1526 ctx->walk_control = NULL; in damos_walk_cancel()
1527 mutex_unlock(&ctx->walk_control_lock); in damos_walk_cancel()
1533 struct damos_quota *quota = &s->quota; in damos_apply_scheme()
1567 if (c->ops.apply_scheme) { in damos_apply_scheme()
1568 if (quota->esz && quota->charged_sz + sz > quota->esz) { in damos_apply_scheme()
1569 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, in damos_apply_scheme()
1578 if (c->callback.before_damos_apply) in damos_apply_scheme()
1579 err = c->callback.before_damos_apply(c, t, r, s); in damos_apply_scheme()
1583 sz_applied = c->ops.apply_scheme(c, t, r, s, in damos_apply_scheme()
1588 quota->total_charged_ns += timespec64_to_ns(&end) - in damos_apply_scheme()
1590 quota->charged_sz += sz; in damos_apply_scheme()
1591 if (quota->esz && quota->charged_sz >= quota->esz) { in damos_apply_scheme()
1592 quota->charge_target_from = t; in damos_apply_scheme()
1593 quota->charge_addr_from = r->ar.end + 1; in damos_apply_scheme()
1596 if (s->action != DAMOS_STAT) in damos_apply_scheme()
1597 r->age = 0; in damos_apply_scheme()
1610 struct damos_quota *quota = &s->quota; in damon_do_apply_schemes()
1612 if (c->passed_sample_intervals < s->next_apply_sis) in damon_do_apply_schemes()
1615 if (!s->wmarks.activated) in damon_do_apply_schemes()
1619 if (quota->esz && quota->charged_sz >= quota->esz) in damon_do_apply_schemes()
1633 * damon_feed_loop_next_input() - get next input to achieve a target score.
1641 * next input always being zero by setting it non-zero always. In short form
1645 * next_input = max(last_input * ((goal - current) / goal + 1), 1)
1667 score_goal_diff = score - goal; in damon_feed_loop_next_input()
1669 score_goal_diff = goal - score; in damon_feed_loop_next_input()
1677 return max(last_input - compensation, min_input); in damon_feed_loop_next_input()
1678 if (last_input < ULONG_MAX - compensation) in damon_feed_loop_next_input()
1706 switch (goal->metric) { in damos_set_quota_goal_current_value()
1708 /* User should already set goal->current_value */ in damos_set_quota_goal_current_value()
1712 goal->current_value = now_psi_total - goal->last_psi_total; in damos_set_quota_goal_current_value()
1713 goal->last_psi_total = now_psi_total; in damos_set_quota_goal_current_value()
1729 goal->current_value * 10000 / in damos_quota_score()
1730 goal->target_value); in damos_quota_score()
1737 * Called only if quota->ms, or quota->sz are set, or quota->goals is not empty
1744 if (!quota->ms && list_empty("a->goals)) { in damos_set_effective_quota()
1745 quota->esz = quota->sz; in damos_set_effective_quota()
1749 if (!list_empty("a->goals)) { in damos_set_effective_quota()
1752 quota->esz_bp = damon_feed_loop_next_input( in damos_set_effective_quota()
1753 max(quota->esz_bp, 10000UL), in damos_set_effective_quota()
1755 esz = quota->esz_bp / 10000; in damos_set_effective_quota()
1758 if (quota->ms) { in damos_set_effective_quota()
1759 if (quota->total_charged_ns) in damos_set_effective_quota()
1760 throughput = quota->total_charged_sz * 1000000 / in damos_set_effective_quota()
1761 quota->total_charged_ns; in damos_set_effective_quota()
1764 esz = min(throughput * quota->ms, esz); in damos_set_effective_quota()
1767 if (quota->sz && quota->sz < esz) in damos_set_effective_quota()
1768 esz = quota->sz; in damos_set_effective_quota()
1770 quota->esz = esz; in damos_set_effective_quota()
1775 struct damos_quota *quota = &s->quota; in damos_adjust_quota()
1781 if (!quota->ms && !quota->sz && list_empty("a->goals)) in damos_adjust_quota()
1785 if (time_after_eq(jiffies, quota->charged_from + in damos_adjust_quota()
1786 msecs_to_jiffies(quota->reset_interval))) { in damos_adjust_quota()
1787 if (quota->esz && quota->charged_sz >= quota->esz) in damos_adjust_quota()
1788 s->stat.qt_exceeds++; in damos_adjust_quota()
1789 quota->total_charged_sz += quota->charged_sz; in damos_adjust_quota()
1790 quota->charged_from = jiffies; in damos_adjust_quota()
1791 quota->charged_sz = 0; in damos_adjust_quota()
1795 if (!c->ops.get_scheme_score) in damos_adjust_quota()
1799 memset(c->regions_score_histogram, 0, in damos_adjust_quota()
1800 sizeof(*c->regions_score_histogram) * in damos_adjust_quota()
1806 score = c->ops.get_scheme_score(c, t, r, s); in damos_adjust_quota()
1807 c->regions_score_histogram[score] += in damos_adjust_quota()
1815 for (cumulated_sz = 0, score = max_score; ; score--) { in damos_adjust_quota()
1816 cumulated_sz += c->regions_score_histogram[score]; in damos_adjust_quota()
1817 if (cumulated_sz >= quota->esz || !score) in damos_adjust_quota()
1820 quota->min_score = score; in damos_adjust_quota()
1828 unsigned long sample_interval = c->attrs.sample_interval ? in kdamond_apply_schemes()
1829 c->attrs.sample_interval : 1; in kdamond_apply_schemes()
1833 if (c->passed_sample_intervals < s->next_apply_sis) in kdamond_apply_schemes()
1836 if (!s->wmarks.activated) in kdamond_apply_schemes()
1853 if (c->passed_sample_intervals < s->next_apply_sis) in kdamond_apply_schemes()
1856 s->next_apply_sis = c->passed_sample_intervals + in kdamond_apply_schemes()
1857 (s->apply_interval_us ? s->apply_interval_us : in kdamond_apply_schemes()
1858 c->attrs.aggr_interval) / sample_interval; in kdamond_apply_schemes()
1866 struct damon_region *l, struct damon_region *r) in damon_merge_two_regions() argument
1868 unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); in damon_merge_two_regions()
1870 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / in damon_merge_two_regions()
1872 l->nr_accesses_bp = l->nr_accesses * 10000; in damon_merge_two_regions()
1873 l->age = (l->age * sz_l + r->age * sz_r) / (sz_l + sz_r); in damon_merge_two_regions()
1874 l->ar.end = r->ar.end; in damon_merge_two_regions()
1882 * thres '->nr_accesses' diff threshold for the merge
1891 if (abs(r->nr_accesses - r->last_nr_accesses) > thres) in damon_merge_regions_of()
1892 r->age = 0; in damon_merge_regions_of()
1894 r->age++; in damon_merge_regions_of()
1896 if (prev && prev->ar.end == r->ar.start && in damon_merge_regions_of()
1897 abs(prev->nr_accesses - r->nr_accesses) <= thres && in damon_merge_regions_of()
1908 * threshold '->nr_accesses' diff threshold for the merge
1916 * The total number of regions could be higher than the user-defined limit,
1929 max_thres = c->attrs.aggr_interval / in kdamond_merge_regions()
1930 (c->attrs.sample_interval ? c->attrs.sample_interval : 1); in kdamond_merge_regions()
1938 } while (nr_regions > c->attrs.max_nr_regions && in kdamond_merge_regions()
1946 * sz_r size of the first sub-region that will be made
1953 new = damon_new_region(r->ar.start + sz_r, r->ar.end); in damon_split_region_at()
1957 r->ar.end = new->ar.start; in damon_split_region_at()
1959 new->age = r->age; in damon_split_region_at()
1960 new->last_nr_accesses = r->last_nr_accesses; in damon_split_region_at()
1961 new->nr_accesses_bp = r->nr_accesses_bp; in damon_split_region_at()
1962 new->nr_accesses = r->nr_accesses; in damon_split_region_at()
1977 for (i = 0; i < nr_subs - 1 && in damon_split_regions_of()
1980 * Randomly select size of left sub-region to be at in damon_split_regions_of()
1996 * Split every target region into randomly-sized small regions
1998 * This function splits every target region into random-sized small regions if
2000 * user-specified maximum number of regions. This is for maximizing the
2015 if (nr_regions > ctx->attrs.max_nr_regions / 2) in kdamond_split_regions()
2020 nr_regions < ctx->attrs.max_nr_regions / 3) in kdamond_split_regions()
2044 if (!ctx->ops.target_valid) in kdamond_need_stop()
2048 if (ctx->ops.target_valid(t)) in kdamond_need_stop()
2066 return -EINVAL; in damos_get_wmark_metric_value()
2071 * watermark check in micro-seconds.
2077 if (damos_get_wmark_metric_value(scheme->wmarks.metric, &metric)) in damos_wmark_wait_us()
2081 if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) { in damos_wmark_wait_us()
2082 if (scheme->wmarks.activated) in damos_wmark_wait_us()
2084 scheme->action, in damos_wmark_wait_us()
2085 str_high_low(metric > scheme->wmarks.high)); in damos_wmark_wait_us()
2086 scheme->wmarks.activated = false; in damos_wmark_wait_us()
2087 return scheme->wmarks.interval; in damos_wmark_wait_us()
2091 if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) && in damos_wmark_wait_us()
2092 !scheme->wmarks.activated) in damos_wmark_wait_us()
2093 return scheme->wmarks.interval; in damos_wmark_wait_us()
2095 if (!scheme->wmarks.activated) in damos_wmark_wait_us()
2096 pr_debug("activate a scheme (%d)\n", scheme->action); in damos_wmark_wait_us()
2097 scheme->wmarks.activated = true; in damos_wmark_wait_us()
2110 * kdamond_call() - handle damon_call_control.
2125 mutex_lock(&ctx->call_control_lock); in kdamond_call()
2126 control = ctx->call_control; in kdamond_call()
2127 mutex_unlock(&ctx->call_control_lock); in kdamond_call()
2131 control->canceled = true; in kdamond_call()
2133 ret = control->fn(control->data); in kdamond_call()
2134 control->return_code = ret; in kdamond_call()
2136 complete(&control->completion); in kdamond_call()
2137 mutex_lock(&ctx->call_control_lock); in kdamond_call()
2138 ctx->call_control = NULL; in kdamond_call()
2139 mutex_unlock(&ctx->call_control_lock); in kdamond_call()
2163 if (ctx->callback.after_wmarks_check && in kdamond_wait_activation()
2164 ctx->callback.after_wmarks_check(ctx)) in kdamond_wait_activation()
2169 return -EBUSY; in kdamond_wait_activation()
2174 unsigned long sample_interval = ctx->attrs.sample_interval ? in kdamond_init_intervals_sis()
2175 ctx->attrs.sample_interval : 1; in kdamond_init_intervals_sis()
2179 ctx->passed_sample_intervals = 0; in kdamond_init_intervals_sis()
2180 ctx->next_aggregation_sis = ctx->attrs.aggr_interval / sample_interval; in kdamond_init_intervals_sis()
2181 ctx->next_ops_update_sis = ctx->attrs.ops_update_interval / in kdamond_init_intervals_sis()
2185 apply_interval = scheme->apply_interval_us ? in kdamond_init_intervals_sis()
2186 scheme->apply_interval_us : ctx->attrs.aggr_interval; in kdamond_init_intervals_sis()
2187 scheme->next_apply_sis = apply_interval / sample_interval; in kdamond_init_intervals_sis()
2202 pr_debug("kdamond (%d) starts\n", current->pid); in kdamond_fn()
2204 complete(&ctx->kdamond_started); in kdamond_fn()
2207 if (ctx->ops.init) in kdamond_fn()
2208 ctx->ops.init(ctx); in kdamond_fn()
2209 if (ctx->callback.before_start && ctx->callback.before_start(ctx)) in kdamond_fn()
2211 ctx->regions_score_histogram = kmalloc_array(DAMOS_MAX_SCORE + 1, in kdamond_fn()
2212 sizeof(*ctx->regions_score_histogram), GFP_KERNEL); in kdamond_fn()
2213 if (!ctx->regions_score_histogram) in kdamond_fn()
2220 * ctx->attrs and ctx->next_{aggregation,ops_update}_sis could in kdamond_fn()
2226 unsigned long next_aggregation_sis = ctx->next_aggregation_sis; in kdamond_fn()
2227 unsigned long next_ops_update_sis = ctx->next_ops_update_sis; in kdamond_fn()
2228 unsigned long sample_interval = ctx->attrs.sample_interval; in kdamond_fn()
2233 if (ctx->ops.prepare_access_checks) in kdamond_fn()
2234 ctx->ops.prepare_access_checks(ctx); in kdamond_fn()
2235 if (ctx->callback.after_sampling && in kdamond_fn()
2236 ctx->callback.after_sampling(ctx)) in kdamond_fn()
2241 ctx->passed_sample_intervals++; in kdamond_fn()
2243 if (ctx->ops.check_accesses) in kdamond_fn()
2244 max_nr_accesses = ctx->ops.check_accesses(ctx); in kdamond_fn()
2246 if (ctx->passed_sample_intervals >= next_aggregation_sis) { in kdamond_fn()
2250 if (ctx->callback.after_aggregation && in kdamond_fn()
2251 ctx->callback.after_aggregation(ctx)) in kdamond_fn()
2259 if (!list_empty(&ctx->schemes)) in kdamond_fn()
2264 sample_interval = ctx->attrs.sample_interval ? in kdamond_fn()
2265 ctx->attrs.sample_interval : 1; in kdamond_fn()
2266 if (ctx->passed_sample_intervals >= next_aggregation_sis) { in kdamond_fn()
2267 ctx->next_aggregation_sis = next_aggregation_sis + in kdamond_fn()
2268 ctx->attrs.aggr_interval / sample_interval; in kdamond_fn()
2272 if (ctx->ops.reset_aggregated) in kdamond_fn()
2273 ctx->ops.reset_aggregated(ctx); in kdamond_fn()
2276 if (ctx->passed_sample_intervals >= next_ops_update_sis) { in kdamond_fn()
2277 ctx->next_ops_update_sis = next_ops_update_sis + in kdamond_fn()
2278 ctx->attrs.ops_update_interval / in kdamond_fn()
2280 if (ctx->ops.update) in kdamond_fn()
2281 ctx->ops.update(ctx); in kdamond_fn()
2291 if (ctx->callback.before_terminate) in kdamond_fn()
2292 ctx->callback.before_terminate(ctx); in kdamond_fn()
2293 if (ctx->ops.cleanup) in kdamond_fn()
2294 ctx->ops.cleanup(ctx); in kdamond_fn()
2295 kfree(ctx->regions_score_histogram); in kdamond_fn()
2297 pr_debug("kdamond (%d) finishes\n", current->pid); in kdamond_fn()
2298 mutex_lock(&ctx->kdamond_lock); in kdamond_fn()
2299 ctx->kdamond = NULL; in kdamond_fn()
2300 mutex_unlock(&ctx->kdamond_lock); in kdamond_fn()
2306 nr_running_ctxs--; in kdamond_fn()
2315 * struct damon_system_ram_region - System RAM resource address region of
2329 if (a->end - a->start < resource_size(res)) { in walk_system_ram()
2330 a->start = res->start; in walk_system_ram()
2331 a->end = res->end; in walk_system_ram()
2356 * damon_set_region_biggest_system_ram_default() - Set the region of the given
2376 return -EINVAL; in damon_set_region_biggest_system_ram_default()
2380 return -EINVAL; in damon_set_region_biggest_system_ram_default()
2388 * damon_moving_sum() - Calculate an inferred moving sum value.
2390 * @nomvsum: Non-moving sum of the last discrete @len_window window values.
2399 * It simply assumes there was no noise in the past, and get the no-noise
2401 * non-moving sum of the last window. For example, if @len_window is 10 and we
2412 * the new value to get the updated pseduo-moving average.
2419 * Return: Pseudo-moving average after getting the @new_value.
2424 return mvsum - nomvsum / len_window + new_value; in damon_moving_sum()
2428 * damon_update_region_access_rate() - Update the access rate of a region.
2436 * Usually this will be called by &damon_operations->check_accesses callback.
2447 if (attrs->sample_interval) in damon_update_region_access_rate()
2449 r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp, in damon_update_region_access_rate()
2450 r->last_nr_accesses * 10000, len_window, in damon_update_region_access_rate()
2454 r->nr_accesses++; in damon_update_region_access_rate()
2462 return -ENOMEM; in damon_init()
2470 #include "tests/core-kunit.h"