1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * DAMON api
4 *
5 * Author: SeongJae Park <sj@kernel.org>
6 */
7
8 #ifndef _DAMON_H_
9 #define _DAMON_H_
10
11 #include <linux/memcontrol.h>
12 #include <linux/mutex.h>
13 #include <linux/time64.h>
14 #include <linux/types.h>
15 #include <linux/random.h>
16
17 /* Minimal region size. Every damon_region is aligned by this. */
18 #define DAMON_MIN_REGION PAGE_SIZE
19 /* Max priority score for DAMON-based operation schemes */
20 #define DAMOS_MAX_SCORE (99)
21
22 /* Get a random number in [l, r) */
damon_rand(unsigned long l,unsigned long r)23 static inline unsigned long damon_rand(unsigned long l, unsigned long r)
24 {
25 return l + get_random_u32_below(r - l);
26 }
27
28 /**
29 * struct damon_addr_range - Represents an address region of [@start, @end).
30 * @start: Start address of the region (inclusive).
31 * @end: End address of the region (exclusive).
32 */
33 struct damon_addr_range {
34 unsigned long start;
35 unsigned long end;
36 };
37
38 /**
39 * struct damon_size_range - Represents size for filter to operate on [@min, @max].
40 * @min: Min size (inclusive).
41 * @max: Max size (inclusive).
42 */
43 struct damon_size_range {
44 unsigned long min;
45 unsigned long max;
46 };
47
48 /**
49 * struct damon_region - Represents a monitoring target region.
50 * @ar: The address range of the region.
51 * @sampling_addr: Address of the sample for the next access check.
52 * @nr_accesses: Access frequency of this region.
53 * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
54 * each sampling interval.
55 * @list: List head for siblings.
56 * @age: Age of this region.
57 *
58 * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
59 * increased for every &damon_attrs->sample_interval if an access to the region
60 * during the last sampling interval is found. The update of this field should
61 * not be done with direct access but with the helper function,
62 * damon_update_region_access_rate().
63 *
64 * @nr_accesses_bp is another representation of @nr_accesses in basis point
65 * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
66 * manner similar to moving sum. By the algorithm, this value becomes
67 * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
68 * be used when the aggregation interval is too huge and therefore cannot wait
69 * for it before getting the access monitoring results.
70 *
71 * @age is initially zero, increased for each aggregation interval, and reset
72 * to zero again if the access frequency is significantly changed. If two
73 * regions are merged into a new region, both @nr_accesses and @age of the new
74 * region are set as region size-weighted average of those of the two regions.
75 */
76 struct damon_region {
77 struct damon_addr_range ar;
78 unsigned long sampling_addr;
79 unsigned int nr_accesses;
80 unsigned int nr_accesses_bp;
81 struct list_head list;
82
83 unsigned int age;
84 /* private: Internal value for age calculation. */
85 unsigned int last_nr_accesses;
86 };
87
88 /**
89 * struct damon_target - Represents a monitoring target.
90 * @pid: The PID of the virtual address space to monitor.
91 * @nr_regions: Number of monitoring target regions of this target.
92 * @regions_list: Head of the monitoring target regions of this target.
93 * @list: List head for siblings.
94 * @obsolete: Whether the commit destination target is obsolete.
95 *
96 * Each monitoring context could have multiple targets. For example, a context
97 * for virtual memory address spaces could have multiple target processes. The
98 * @pid should be set for appropriate &struct damon_operations including the
99 * virtual address spaces monitoring operations.
100 *
101 * @obsolete is used only for damon_commit_targets() source targets, to specify
102 * the matching destination targets are obsolete. Read damon_commit_targets()
103 * to see how it is handled.
104 */
105 struct damon_target {
106 struct pid *pid;
107 unsigned int nr_regions;
108 struct list_head regions_list;
109 struct list_head list;
110 bool obsolete;
111 };
112
113 /**
114 * enum damos_action - Represents an action of a Data Access Monitoring-based
115 * Operation Scheme.
116 *
117 * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED.
118 * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD.
119 * @DAMOS_PAGEOUT: Reclaim the region.
120 * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE.
121 * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
122 * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists.
123 * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists.
124 * @DAMOS_MIGRATE_HOT: Migrate the regions prioritizing warmer regions.
125 * @DAMOS_MIGRATE_COLD: Migrate the regions prioritizing colder regions.
126 * @DAMOS_STAT: Do nothing but count the stat.
127 * @NR_DAMOS_ACTIONS: Total number of DAMOS actions
128 *
129 * The support of each action is up to running &struct damon_operations.
130 * Refer to 'Operation Action' section of Documentation/mm/damon/design.rst for
131 * status of the supports.
132 *
133 * Note that DAMOS_PAGEOUT doesn't trigger demotions.
134 */
135 enum damos_action {
136 DAMOS_WILLNEED,
137 DAMOS_COLD,
138 DAMOS_PAGEOUT,
139 DAMOS_HUGEPAGE,
140 DAMOS_NOHUGEPAGE,
141 DAMOS_LRU_PRIO,
142 DAMOS_LRU_DEPRIO,
143 DAMOS_MIGRATE_HOT,
144 DAMOS_MIGRATE_COLD,
145 DAMOS_STAT, /* Do nothing but only record the stat */
146 NR_DAMOS_ACTIONS,
147 };
148
149 /**
150 * enum damos_quota_goal_metric - Represents the metric to be used as the goal
151 *
152 * @DAMOS_QUOTA_USER_INPUT: User-input value.
153 * @DAMOS_QUOTA_SOME_MEM_PSI_US: System level some memory PSI in us.
154 * @DAMOS_QUOTA_NODE_MEM_USED_BP: MemUsed ratio of a node.
155 * @DAMOS_QUOTA_NODE_MEM_FREE_BP: MemFree ratio of a node.
156 * @DAMOS_QUOTA_NODE_MEMCG_USED_BP: MemUsed ratio of a node for a cgroup.
157 * @DAMOS_QUOTA_NODE_MEMCG_FREE_BP: MemFree ratio of a node for a cgroup.
158 * @NR_DAMOS_QUOTA_GOAL_METRICS: Number of DAMOS quota goal metrics.
159 *
160 * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
161 */
162 enum damos_quota_goal_metric {
163 DAMOS_QUOTA_USER_INPUT,
164 DAMOS_QUOTA_SOME_MEM_PSI_US,
165 DAMOS_QUOTA_NODE_MEM_USED_BP,
166 DAMOS_QUOTA_NODE_MEM_FREE_BP,
167 DAMOS_QUOTA_NODE_MEMCG_USED_BP,
168 DAMOS_QUOTA_NODE_MEMCG_FREE_BP,
169 NR_DAMOS_QUOTA_GOAL_METRICS,
170 };
171
172 /**
173 * struct damos_quota_goal - DAMOS scheme quota auto-tuning goal.
174 * @metric: Metric to be used for representing the goal.
175 * @target_value: Target value of @metric to achieve with the tuning.
176 * @current_value: Current value of @metric.
177 * @last_psi_total: Last measured total PSI
178 * @nid: Node id.
179 * @memcg_id: Memcg id.
180 * @list: List head for siblings.
181 *
182 * Data structure for getting the current score of the quota tuning goal. The
183 * score is calculated by how close @current_value and @target_value are. Then
184 * the score is entered to DAMON's internal feedback loop mechanism to get the
185 * auto-tuned quota.
186 *
187 * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
188 * entered by the user, probably inside the kdamond callbacks. Otherwise,
189 * DAMON sets @current_value with self-measured value of @metric.
190 *
191 * If @metric is DAMOS_QUOTA_NODE_MEM_{USED,FREE}_BP, @nid represents the node
192 * id of the target node to account the used/free memory.
193 *
194 * If @metric is DAMOS_QUOTA_NODE_MEMCG_{USED,FREE}_BP, @nid and @memcg_id
195 * represents the node id and the cgroup to account the used memory for.
196 */
197 struct damos_quota_goal {
198 enum damos_quota_goal_metric metric;
199 unsigned long target_value;
200 unsigned long current_value;
201 /* metric-dependent fields */
202 union {
203 u64 last_psi_total;
204 struct {
205 int nid;
206 unsigned short memcg_id;
207 };
208 };
209 struct list_head list;
210 };
211
212 /**
213 * struct damos_quota - Controls the aggressiveness of the given scheme.
214 * @reset_interval: Charge reset interval in milliseconds.
215 * @ms: Maximum milliseconds that the scheme can use.
216 * @sz: Maximum bytes of memory that the action can be applied.
217 * @goals: Head of quota tuning goals (&damos_quota_goal) list.
218 * @esz: Effective size quota in bytes.
219 *
220 * @weight_sz: Weight of the region's size for prioritization.
221 * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization.
222 * @weight_age: Weight of the region's age for prioritization.
223 *
224 * To avoid consuming too much CPU time or IO resources for applying the
225 * &struct damos->action to large memory, DAMON allows users to set time and/or
226 * size quotas. The quotas can be set by writing non-zero values to &ms and
227 * &sz, respectively. If the time quota is set, DAMON tries to use only up to
228 * &ms milliseconds within &reset_interval for applying the action. If the
229 * size quota is set, DAMON tries to apply the action only up to &sz bytes
230 * within &reset_interval.
231 *
232 * To convince the different types of quotas and goals, DAMON internally
233 * converts those into one single size quota called "effective quota". DAMON
234 * internally uses it as the only one real quota. The conversion is made as
235 * follows.
236 *
237 * The time quota is transformed to a size quota using estimated throughput of
238 * the scheme's action. DAMON then compares it against &sz and uses smaller
239 * one as the effective quota.
240 *
241 * If @goals is not empty, DAMON calculates yet another size quota based on the
242 * goals using its internal feedback loop algorithm, for every @reset_interval.
243 * Then, if the new size quota is smaller than the effective quota, it uses the
244 * new size quota as the effective quota.
245 *
246 * The resulting effective size quota in bytes is set to @esz.
247 *
248 * For selecting regions within the quota, DAMON prioritizes current scheme's
249 * target memory regions using the &struct damon_operations->get_scheme_score.
250 * You could customize the prioritization logic by setting &weight_sz,
251 * &weight_nr_accesses, and &weight_age, because monitoring operations are
252 * encouraged to respect those.
253 */
254 struct damos_quota {
255 unsigned long reset_interval;
256 unsigned long ms;
257 unsigned long sz;
258 struct list_head goals;
259 unsigned long esz;
260
261 unsigned int weight_sz;
262 unsigned int weight_nr_accesses;
263 unsigned int weight_age;
264
265 /* private: */
266 /* For throughput estimation */
267 unsigned long total_charged_sz;
268 unsigned long total_charged_ns;
269
270 /* For charging the quota */
271 unsigned long charged_sz;
272 unsigned long charged_from;
273 struct damon_target *charge_target_from;
274 unsigned long charge_addr_from;
275
276 /* For prioritization */
277 unsigned int min_score;
278
279 /* For feedback loop */
280 unsigned long esz_bp;
281 };
282
283 /**
284 * enum damos_wmark_metric - Represents the watermark metric.
285 *
286 * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme.
287 * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000].
288 * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics
289 */
290 enum damos_wmark_metric {
291 DAMOS_WMARK_NONE,
292 DAMOS_WMARK_FREE_MEM_RATE,
293 NR_DAMOS_WMARK_METRICS,
294 };
295
296 /**
297 * struct damos_watermarks - Controls when a given scheme should be activated.
298 * @metric: Metric for the watermarks.
299 * @interval: Watermarks check time interval in microseconds.
300 * @high: High watermark.
301 * @mid: Middle watermark.
302 * @low: Low watermark.
303 *
304 * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active
305 * means DAMON does monitoring and applying the action of the scheme to
306 * appropriate memory regions. Else, DAMON checks &metric of the system for at
307 * least every &interval microseconds and works as below.
308 *
309 * If &metric is higher than &high, the scheme is inactivated. If &metric is
310 * between &mid and &low, the scheme is activated. If &metric is lower than
311 * &low, the scheme is inactivated.
312 */
313 struct damos_watermarks {
314 enum damos_wmark_metric metric;
315 unsigned long interval;
316 unsigned long high;
317 unsigned long mid;
318 unsigned long low;
319
320 /* private: */
321 bool activated;
322 };
323
324 /**
325 * struct damos_stat - Statistics on a given scheme.
326 * @nr_tried: Total number of regions that the scheme is tried to be applied.
327 * @sz_tried: Total size of regions that the scheme is tried to be applied.
328 * @nr_applied: Total number of regions that the scheme is applied.
329 * @sz_applied: Total size of regions that the scheme is applied.
330 * @sz_ops_filter_passed:
331 * Total bytes that passed ops layer-handled DAMOS filters.
332 * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
333 *
334 * "Tried an action to a region" in this context means the DAMOS core logic
335 * determined the region as eligible to apply the action. The access pattern
336 * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
337 * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
338 * on core logic can affect this. The core logic asks the operation set
339 * (&struct damon_operations) to apply the action to the region.
340 *
341 * "Applied an action to a region" in this context means the operation set
342 * (&struct damon_operations) successfully applied the action to the region, at
343 * least to a part of the region. The filters (&struct damos_filter) that
344 * handled on operation set layer and type of the action and pages of the
345 * region can affect this. For example, if a filter is set to exclude
346 * anonymous pages and the region has only anonymous pages, the region will be
347 * failed at applying the action. If the action is &DAMOS_PAGEOUT and all
348 * pages of the region are already paged out, the region will be failed at
349 * applying the action.
350 */
351 struct damos_stat {
352 unsigned long nr_tried;
353 unsigned long sz_tried;
354 unsigned long nr_applied;
355 unsigned long sz_applied;
356 unsigned long sz_ops_filter_passed;
357 unsigned long qt_exceeds;
358 };
359
360 /**
361 * enum damos_filter_type - Type of memory for &struct damos_filter
362 * @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
363 * @DAMOS_FILTER_TYPE_ACTIVE: Active pages.
364 * @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
365 * @DAMOS_FILTER_TYPE_YOUNG: Recently accessed pages.
366 * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE: Page is part of a hugepage.
367 * @DAMOS_FILTER_TYPE_UNMAPPED: Unmapped pages.
368 * @DAMOS_FILTER_TYPE_ADDR: Address range.
369 * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
370 * @NR_DAMOS_FILTER_TYPES: Number of filter types.
371 *
372 * The anon pages type and memcg type filters are handled by underlying
373 * &struct damon_operations as a part of scheme action trying, and therefore
374 * accounted as 'tried'. In contrast, other types are handled by core layer
375 * before trying of the action and therefore not accounted as 'tried'.
376 *
377 * The support of the filters that handled by &struct damon_operations depend
378 * on the running &struct damon_operations.
379 * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
380 * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
381 * the two types.
382 */
383 enum damos_filter_type {
384 DAMOS_FILTER_TYPE_ANON,
385 DAMOS_FILTER_TYPE_ACTIVE,
386 DAMOS_FILTER_TYPE_MEMCG,
387 DAMOS_FILTER_TYPE_YOUNG,
388 DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
389 DAMOS_FILTER_TYPE_UNMAPPED,
390 DAMOS_FILTER_TYPE_ADDR,
391 DAMOS_FILTER_TYPE_TARGET,
392 NR_DAMOS_FILTER_TYPES,
393 };
394
395 /**
396 * struct damos_filter - DAMOS action target memory filter.
397 * @type: Type of the target memory.
398 * @matching: Whether this is for @type-matching memory.
399 * @allow: Whether to include or exclude the @matching memory.
400 * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
401 * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
402 * @target_idx: Index of the &struct damon_target of
403 * &damon_ctx->adaptive_targets if @type is
404 * DAMOS_FILTER_TYPE_TARGET.
405 * @sz_range: Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
406 * @list: List head for siblings.
407 *
408 * Before applying the &damos->action to a memory region, DAMOS checks if each
409 * byte of the region matches to this given condition and avoid applying the
410 * action if so. Support of each filter type depends on the running &struct
411 * damon_operations and the type. Refer to &enum damos_filter_type for more
412 * details.
413 */
414 struct damos_filter {
415 enum damos_filter_type type;
416 bool matching;
417 bool allow;
418 union {
419 unsigned short memcg_id;
420 struct damon_addr_range addr_range;
421 int target_idx;
422 struct damon_size_range sz_range;
423 };
424 struct list_head list;
425 };
426
427 struct damon_ctx;
428 struct damos;
429
430 /**
431 * struct damos_walk_control - Control damos_walk().
432 *
433 * @walk_fn: Function to be called back for each region.
434 * @data: Data that will be passed to walk functions.
435 *
436 * Control damos_walk(), which requests specific kdamond to invoke the given
437 * function to each region that eligible to apply actions of the kdamond's
438 * schemes. Refer to damos_walk() for more details.
439 */
440 struct damos_walk_control {
441 void (*walk_fn)(void *data, struct damon_ctx *ctx,
442 struct damon_target *t, struct damon_region *r,
443 struct damos *s, unsigned long sz_filter_passed);
444 void *data;
445 /* private: internal use only */
446 /* informs if the kdamond finished handling of the walk request */
447 struct completion completion;
448 /* informs if the walk is canceled. */
449 bool canceled;
450 };
451
452 /**
453 * struct damos_access_pattern - Target access pattern of the given scheme.
454 * @min_sz_region: Minimum size of target regions.
455 * @max_sz_region: Maximum size of target regions.
456 * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions.
457 * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions.
458 * @min_age_region: Minimum age of target regions.
459 * @max_age_region: Maximum age of target regions.
460 */
461 struct damos_access_pattern {
462 unsigned long min_sz_region;
463 unsigned long max_sz_region;
464 unsigned int min_nr_accesses;
465 unsigned int max_nr_accesses;
466 unsigned int min_age_region;
467 unsigned int max_age_region;
468 };
469
470 /**
471 * struct damos_migrate_dests - Migration destination nodes and their weights.
472 * @node_id_arr: Array of migration destination node ids.
473 * @weight_arr: Array of migration weights for @node_id_arr.
474 * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
475 *
476 * @node_id_arr is an array of the ids of migration destination nodes.
477 * @weight_arr is an array of the weights for those. The weights in
478 * @weight_arr are for nodes in @node_id_arr of same array index.
479 */
480 struct damos_migrate_dests {
481 unsigned int *node_id_arr;
482 unsigned int *weight_arr;
483 size_t nr_dests;
484 };
485
486 /**
487 * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
488 * @pattern: Access pattern of target regions.
489 * @action: &damos_action to be applied to the target regions.
490 * @apply_interval_us: The time between applying the @action.
491 * @quota: Control the aggressiveness of this scheme.
492 * @wmarks: Watermarks for automated (in)activation of this scheme.
493 * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
494 * @target_nid: Destination node if @action is "migrate_{hot,cold}".
495 * @core_filters: Additional set of &struct damos_filter for &action.
496 * @ops_filters: ops layer handling &struct damos_filter objects list.
497 * @last_applied: Last @action applied ops-managing entity.
498 * @stat: Statistics of this scheme.
499 * @list: List head for siblings.
500 *
501 * For each @apply_interval_us, DAMON finds regions which fit in the
502 * &pattern and applies &action to those. To avoid consuming too much
503 * CPU time or IO resources for the &action, "a is used.
504 *
505 * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
506 *
507 * To do the work only when needed, schemes can be activated for specific
508 * system situations using &wmarks. If all schemes that registered to the
509 * monitoring context are inactive, DAMON stops monitoring either, and just
510 * repeatedly checks the watermarks.
511 *
512 * @migrate_dests specifies multiple migration target nodes with different
513 * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
514 * this is set.
515 *
516 * @target_nid is used to set the migration target node for migrate_hot or
517 * migrate_cold actions, and @migrate_dests is unset.
518 *
519 * Before applying the &action to a memory region, &struct damon_operations
520 * implementation could check pages of the region and skip &action to respect
521 * &core_filters
522 *
523 * The minimum entity that @action can be applied depends on the underlying
524 * &struct damon_operations. Since it may not be aligned with the core layer
525 * abstract, namely &struct damon_region, &struct damon_operations could apply
526 * @action to same entity multiple times. Large folios that underlying on
527 * multiple &struct damon region objects could be such examples. The &struct
528 * damon_operations can use @last_applied to avoid that. DAMOS core logic
529 * unsets @last_applied when each regions walking for applying the scheme is
530 * finished.
531 *
532 * After applying the &action to each region, &stat_count and &stat_sz is
533 * updated to reflect the number of regions and total size of regions that the
534 * &action is applied.
535 */
536 struct damos {
537 struct damos_access_pattern pattern;
538 enum damos_action action;
539 unsigned long apply_interval_us;
540 /* private: internal use only */
541 /*
542 * number of sample intervals that should be passed before applying
543 * @action
544 */
545 unsigned long next_apply_sis;
546 /* informs if ongoing DAMOS walk for this scheme is finished */
547 bool walk_completed;
548 /*
549 * If the current region in the filtering stage is allowed by core
550 * layer-handled filters. If true, operations layer allows it, too.
551 */
552 bool core_filters_allowed;
553 /* whether to reject core/ops filters umatched regions */
554 bool core_filters_default_reject;
555 bool ops_filters_default_reject;
556 /* public: */
557 struct damos_quota quota;
558 struct damos_watermarks wmarks;
559 union {
560 struct {
561 int target_nid;
562 struct damos_migrate_dests migrate_dests;
563 };
564 };
565 struct list_head core_filters;
566 struct list_head ops_filters;
567 void *last_applied;
568 struct damos_stat stat;
569 struct list_head list;
570 };
571
572 /**
573 * enum damon_ops_id - Identifier for each monitoring operations implementation
574 *
575 * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces
576 * @DAMON_OPS_FVADDR: Monitoring operations for only fixed ranges of virtual
577 * address spaces
578 * @DAMON_OPS_PADDR: Monitoring operations for the physical address space
579 * @NR_DAMON_OPS: Number of monitoring operations implementations
580 */
581 enum damon_ops_id {
582 DAMON_OPS_VADDR,
583 DAMON_OPS_FVADDR,
584 DAMON_OPS_PADDR,
585 NR_DAMON_OPS,
586 };
587
588 /**
589 * struct damon_operations - Monitoring operations for given use cases.
590 *
591 * @id: Identifier of this operations set.
592 * @init: Initialize operations-related data structures.
593 * @update: Update operations-related data structures.
594 * @prepare_access_checks: Prepare next access check of target regions.
595 * @check_accesses: Check the accesses to target regions.
596 * @get_scheme_score: Get the score of a region for a scheme.
597 * @apply_scheme: Apply a DAMON-based operation scheme.
598 * @target_valid: Determine if the target is valid.
599 * @cleanup_target: Clean up each target before deallocation.
600 * @cleanup: Clean up the context.
601 *
602 * DAMON can be extended for various address spaces and usages. For this,
603 * users should register the low level operations for their target address
604 * space and usecase via the &damon_ctx.ops. Then, the monitoring thread
605 * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
606 * the monitoring, @update after each &damon_attrs.ops_update_interval, and
607 * @check_accesses, @target_valid and @prepare_access_checks after each
608 * &damon_attrs.sample_interval.
609 *
610 * Each &struct damon_operations instance having valid @id can be registered
611 * via damon_register_ops() and selected by damon_select_ops() later.
612 * @init should initialize operations-related data structures. For example,
613 * this could be used to construct proper monitoring target regions and link
614 * those to @damon_ctx.adaptive_targets.
615 * @update should update the operations-related data structures. For example,
616 * this could be used to update monitoring target regions for current status.
617 * @prepare_access_checks should manipulate the monitoring regions to be
618 * prepared for the next access check.
619 * @check_accesses should check the accesses to each region that made after the
620 * last preparation and update the number of observed accesses of each region.
621 * It should also return max number of observed accesses that made as a result
622 * of its update. The value will be used for regions adjustment threshold.
623 * @get_scheme_score should return the priority score of a region for a scheme
624 * as an integer in [0, &DAMOS_MAX_SCORE].
625 * @apply_scheme is called from @kdamond when a region for user provided
626 * DAMON-based operation scheme is found. It should apply the scheme's action
627 * to the region and return bytes of the region that the action is successfully
628 * applied. It should also report how many bytes of the region has passed
629 * filters (&struct damos_filter) that handled by itself.
630 * @target_valid should check whether the target is still valid for the
631 * monitoring.
632 * @cleanup_target is called before the target will be deallocated.
633 * @cleanup is called from @kdamond just before its termination.
634 */
635 struct damon_operations {
636 enum damon_ops_id id;
637 void (*init)(struct damon_ctx *context);
638 void (*update)(struct damon_ctx *context);
639 void (*prepare_access_checks)(struct damon_ctx *context);
640 unsigned int (*check_accesses)(struct damon_ctx *context);
641 int (*get_scheme_score)(struct damon_ctx *context,
642 struct damon_target *t, struct damon_region *r,
643 struct damos *scheme);
644 unsigned long (*apply_scheme)(struct damon_ctx *context,
645 struct damon_target *t, struct damon_region *r,
646 struct damos *scheme, unsigned long *sz_filter_passed);
647 bool (*target_valid)(struct damon_target *t);
648 void (*cleanup_target)(struct damon_target *t);
649 void (*cleanup)(struct damon_ctx *context);
650 };
651
652 /*
653 * struct damon_call_control - Control damon_call().
654 *
655 * @fn: Function to be called back.
656 * @data: Data that will be passed to @fn.
657 * @repeat: Repeat invocations.
658 * @return_code: Return code from @fn invocation.
659 * @dealloc_on_cancel: De-allocate when canceled.
660 *
661 * Control damon_call(), which requests specific kdamond to invoke a given
662 * function. Refer to damon_call() for more details.
663 */
664 struct damon_call_control {
665 int (*fn)(void *data);
666 void *data;
667 bool repeat;
668 int return_code;
669 bool dealloc_on_cancel;
670 /* private: internal use only */
671 /* informs if the kdamond finished handling of the request */
672 struct completion completion;
673 /* informs if the kdamond canceled @fn infocation */
674 bool canceled;
675 /* List head for siblings. */
676 struct list_head list;
677 };
678
679 /**
680 * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
681 *
682 * @access_bp: Access events observation ratio to achieve in bp.
683 * @aggrs: Number of aggregations to achieve @access_bp within.
684 * @min_sample_us: Minimum resulting sampling interval in microseconds.
685 * @max_sample_us: Maximum resulting sampling interval in microseconds.
686 *
687 * DAMON automatically tunes &damon_attrs->sample_interval and
688 * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
689 * DAMON-observed access events to theoretical maximum amount within @aggrs
690 * aggregations be same to @access_bp. The logic increases
691 * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
692 * ratio if the current access events observation ratio is lower than the
693 * target for each @aggrs aggregations, and vice versa.
694 *
695 * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
696 */
697 struct damon_intervals_goal {
698 unsigned long access_bp;
699 unsigned long aggrs;
700 unsigned long min_sample_us;
701 unsigned long max_sample_us;
702 };
703
704 /**
705 * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
706 *
707 * @sample_interval: The time between access samplings.
708 * @aggr_interval: The time between monitor results aggregations.
709 * @ops_update_interval: The time between monitoring operations updates.
710 * @intervals_goal: Intervals auto-tuning goal.
711 * @min_nr_regions: The minimum number of adaptive monitoring
712 * regions.
713 * @max_nr_regions: The maximum number of adaptive monitoring
714 * regions.
715 *
716 * For each @sample_interval, DAMON checks whether each region is accessed or
717 * not during the last @sample_interval. If such access is found, DAMON
718 * aggregates the information by increasing &damon_region->nr_accesses for
719 * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
720 * also checks whether the target memory regions need update (e.g., by
721 * ``mmap()`` calls from the application, in case of virtual memory monitoring)
722 * and applies the changes for each @ops_update_interval. All time intervals
723 * are in micro-seconds. Please refer to &struct damon_operations and &struct
724 * damon_call_control for more detail.
725 */
726 struct damon_attrs {
727 unsigned long sample_interval;
728 unsigned long aggr_interval;
729 unsigned long ops_update_interval;
730 struct damon_intervals_goal intervals_goal;
731 unsigned long min_nr_regions;
732 unsigned long max_nr_regions;
733 /* private: internal use only */
734 /*
735 * @aggr_interval to @sample_interval ratio.
736 * Core-external components call damon_set_attrs() with &damon_attrs
737 * that this field is unset. In the case, damon_set_attrs() sets this
738 * field of resulting &damon_attrs. Core-internal components such as
739 * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
740 * that this field is set. In the case, damon_set_attrs() just keep
741 * it.
742 */
743 unsigned long aggr_samples;
744 };
745
746 /**
747 * struct damon_ctx - Represents a context for each monitoring. This is the
748 * main interface that allows users to set the attributes and get the results
749 * of the monitoring.
750 *
751 * @attrs: Monitoring attributes for accuracy/overhead control.
752 * @kdamond: Kernel thread who does the monitoring.
753 * @kdamond_lock: Mutex for the synchronizations with @kdamond.
754 *
755 * For each monitoring context, one kernel thread for the monitoring is
756 * created. The pointer to the thread is stored in @kdamond.
757 *
758 * Once started, the monitoring thread runs until explicitly required to be
759 * terminated or every monitoring target is invalid. The validity of the
760 * targets is checked via the &damon_operations.target_valid of @ops. The
761 * termination can also be explicitly requested by calling damon_stop().
762 * The thread sets @kdamond to NULL when it terminates. Therefore, users can
763 * know whether the monitoring is ongoing or terminated by reading @kdamond.
764 * Reads and writes to @kdamond from outside of the monitoring thread must
765 * be protected by @kdamond_lock.
766 *
767 * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
768 * Accesses to other fields must be protected by themselves.
769 *
770 * @ops: Set of monitoring operations for given use cases.
771 * @addr_unit: Scale factor for core to ops address conversion.
772 * @min_sz_region: Minimum region size.
773 * @adaptive_targets: Head of monitoring targets (&damon_target) list.
774 * @schemes: Head of schemes (&damos) list.
775 */
776 struct damon_ctx {
777 struct damon_attrs attrs;
778
779 /* private: internal use only */
780 /* number of sample intervals that passed since this context started */
781 unsigned long passed_sample_intervals;
782 /*
783 * number of sample intervals that should be passed before next
784 * aggregation
785 */
786 unsigned long next_aggregation_sis;
787 /*
788 * number of sample intervals that should be passed before next ops
789 * update
790 */
791 unsigned long next_ops_update_sis;
792 /*
793 * number of sample intervals that should be passed before next
794 * intervals tuning
795 */
796 unsigned long next_intervals_tune_sis;
797 /* for waiting until the execution of the kdamond_fn is started */
798 struct completion kdamond_started;
799 /* for scheme quotas prioritization */
800 unsigned long *regions_score_histogram;
801
802 /* lists of &struct damon_call_control */
803 struct list_head call_controls;
804 struct mutex call_controls_lock;
805
806 struct damos_walk_control *walk_control;
807 struct mutex walk_control_lock;
808
809 /* public: */
810 struct task_struct *kdamond;
811 struct mutex kdamond_lock;
812
813 struct damon_operations ops;
814 unsigned long addr_unit;
815 unsigned long min_sz_region;
816
817 struct list_head adaptive_targets;
818 struct list_head schemes;
819 };
820
damon_next_region(struct damon_region * r)821 static inline struct damon_region *damon_next_region(struct damon_region *r)
822 {
823 return container_of(r->list.next, struct damon_region, list);
824 }
825
damon_prev_region(struct damon_region * r)826 static inline struct damon_region *damon_prev_region(struct damon_region *r)
827 {
828 return container_of(r->list.prev, struct damon_region, list);
829 }
830
damon_last_region(struct damon_target * t)831 static inline struct damon_region *damon_last_region(struct damon_target *t)
832 {
833 return list_last_entry(&t->regions_list, struct damon_region, list);
834 }
835
damon_first_region(struct damon_target * t)836 static inline struct damon_region *damon_first_region(struct damon_target *t)
837 {
838 return list_first_entry(&t->regions_list, struct damon_region, list);
839 }
840
damon_sz_region(struct damon_region * r)841 static inline unsigned long damon_sz_region(struct damon_region *r)
842 {
843 return r->ar.end - r->ar.start;
844 }
845
846
847 #define damon_for_each_region(r, t) \
848 list_for_each_entry(r, &t->regions_list, list)
849
850 #define damon_for_each_region_from(r, t) \
851 list_for_each_entry_from(r, &t->regions_list, list)
852
853 #define damon_for_each_region_safe(r, next, t) \
854 list_for_each_entry_safe(r, next, &t->regions_list, list)
855
856 #define damon_for_each_target(t, ctx) \
857 list_for_each_entry(t, &(ctx)->adaptive_targets, list)
858
859 #define damon_for_each_target_safe(t, next, ctx) \
860 list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
861
862 #define damon_for_each_scheme(s, ctx) \
863 list_for_each_entry(s, &(ctx)->schemes, list)
864
865 #define damon_for_each_scheme_safe(s, next, ctx) \
866 list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
867
868 #define damos_for_each_quota_goal(goal, quota) \
869 list_for_each_entry(goal, "a->goals, list)
870
871 #define damos_for_each_quota_goal_safe(goal, next, quota) \
872 list_for_each_entry_safe(goal, next, &(quota)->goals, list)
873
874 #define damos_for_each_core_filter(f, scheme) \
875 list_for_each_entry(f, &(scheme)->core_filters, list)
876
877 #define damos_for_each_core_filter_safe(f, next, scheme) \
878 list_for_each_entry_safe(f, next, &(scheme)->core_filters, list)
879
880 #define damos_for_each_ops_filter(f, scheme) \
881 list_for_each_entry(f, &(scheme)->ops_filters, list)
882
883 #define damos_for_each_ops_filter_safe(f, next, scheme) \
884 list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
885
886 #ifdef CONFIG_DAMON
887
888 struct damon_region *damon_new_region(unsigned long start, unsigned long end);
889
890 /*
891 * Add a region between two other regions
892 */
damon_insert_region(struct damon_region * r,struct damon_region * prev,struct damon_region * next,struct damon_target * t)893 static inline void damon_insert_region(struct damon_region *r,
894 struct damon_region *prev, struct damon_region *next,
895 struct damon_target *t)
896 {
897 __list_add(&r->list, &prev->list, &next->list);
898 t->nr_regions++;
899 }
900
901 void damon_add_region(struct damon_region *r, struct damon_target *t);
902 void damon_destroy_region(struct damon_region *r, struct damon_target *t);
903 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
904 unsigned int nr_ranges, unsigned long min_sz_region);
905 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
906 struct damon_attrs *attrs);
907
908 struct damos_filter *damos_new_filter(enum damos_filter_type type,
909 bool matching, bool allow);
910 void damos_add_filter(struct damos *s, struct damos_filter *f);
911 bool damos_filter_for_ops(enum damos_filter_type type);
912 void damos_destroy_filter(struct damos_filter *f);
913
914 struct damos_quota_goal *damos_new_quota_goal(
915 enum damos_quota_goal_metric metric,
916 unsigned long target_value);
917 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
918 void damos_destroy_quota_goal(struct damos_quota_goal *goal);
919
920 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
921 enum damos_action action,
922 unsigned long apply_interval_us,
923 struct damos_quota *quota,
924 struct damos_watermarks *wmarks,
925 int target_nid);
926 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
927 void damon_destroy_scheme(struct damos *s);
928 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src);
929
930 struct damon_target *damon_new_target(void);
931 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
932 bool damon_targets_empty(struct damon_ctx *ctx);
933 void damon_free_target(struct damon_target *t);
934 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
935 unsigned int damon_nr_regions(struct damon_target *t);
936
937 struct damon_ctx *damon_new_ctx(void);
938 void damon_destroy_ctx(struct damon_ctx *ctx);
939 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
940 void damon_set_schemes(struct damon_ctx *ctx,
941 struct damos **schemes, ssize_t nr_schemes);
942 int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx);
943 int damon_nr_running_ctxs(void);
944 bool damon_is_registered_ops(enum damon_ops_id id);
945 int damon_register_ops(struct damon_operations *ops);
946 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
947
damon_target_has_pid(const struct damon_ctx * ctx)948 static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
949 {
950 return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
951 }
952
damon_max_nr_accesses(const struct damon_attrs * attrs)953 static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
954 {
955 /* {aggr,sample}_interval are unsigned long, hence could overflow */
956 return min(attrs->aggr_interval / attrs->sample_interval,
957 (unsigned long)UINT_MAX);
958 }
959
960
961 bool damon_initialized(void);
962 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
963 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
964 bool damon_is_running(struct damon_ctx *ctx);
965
966 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
967 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
968
969 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
970 unsigned long *start, unsigned long *end,
971 unsigned long min_sz_region);
972
973 #endif /* CONFIG_DAMON */
974
975 #endif /* _DAMON_H */
976