xref: /linux/include/linux/damon.h (revision beace86e61e465dba204a268ab3f3377153a4973)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * DAMON api
4  *
5  * Author: SeongJae Park <sj@kernel.org>
6  */
7 
8 #ifndef _DAMON_H_
9 #define _DAMON_H_
10 
11 #include <linux/memcontrol.h>
12 #include <linux/mutex.h>
13 #include <linux/time64.h>
14 #include <linux/types.h>
15 #include <linux/random.h>
16 
17 /* Minimal region size.  Every damon_region is aligned by this. */
18 #define DAMON_MIN_REGION	PAGE_SIZE
19 /* Max priority score for DAMON-based operation schemes */
20 #define DAMOS_MAX_SCORE		(99)
21 
22 /* Get a random number in [l, r) */
23 static inline unsigned long damon_rand(unsigned long l, unsigned long r)
24 {
25 	return l + get_random_u32_below(r - l);
26 }
27 
28 /**
29  * struct damon_addr_range - Represents an address region of [@start, @end).
30  * @start:	Start address of the region (inclusive).
31  * @end:	End address of the region (exclusive).
32  */
33 struct damon_addr_range {
34 	unsigned long start;
35 	unsigned long end;
36 };
37 
38 /**
39  * struct damon_size_range - Represents size for filter to operate on [@min, @max].
40  * @min:	Min size (inclusive).
41  * @max:	Max size (inclusive).
42  */
43 struct damon_size_range {
44 	unsigned long min;
45 	unsigned long max;
46 };
47 
48 /**
49  * struct damon_region - Represents a monitoring target region.
50  * @ar:			The address range of the region.
51  * @sampling_addr:	Address of the sample for the next access check.
52  * @nr_accesses:	Access frequency of this region.
53  * @nr_accesses_bp:	@nr_accesses in basis point (0.01%) that updated for
54  *			each sampling interval.
55  * @list:		List head for siblings.
56  * @age:		Age of this region.
57  *
58  * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
59  * increased for every &damon_attrs->sample_interval if an access to the region
60  * during the last sampling interval is found.  The update of this field should
61  * not be done with direct access but with the helper function,
62  * damon_update_region_access_rate().
63  *
64  * @nr_accesses_bp is another representation of @nr_accesses in basis point
65  * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
66  * manner similar to moving sum.  By the algorithm, this value becomes
67  * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval.  This can
68  * be used when the aggregation interval is too huge and therefore cannot wait
69  * for it before getting the access monitoring results.
70  *
71  * @age is initially zero, increased for each aggregation interval, and reset
72  * to zero again if the access frequency is significantly changed.  If two
73  * regions are merged into a new region, both @nr_accesses and @age of the new
74  * region are set as region size-weighted average of those of the two regions.
75  */
76 struct damon_region {
77 	struct damon_addr_range ar;
78 	unsigned long sampling_addr;
79 	unsigned int nr_accesses;
80 	unsigned int nr_accesses_bp;
81 	struct list_head list;
82 
83 	unsigned int age;
84 /* private: Internal value for age calculation. */
85 	unsigned int last_nr_accesses;
86 };
87 
88 /**
89  * struct damon_target - Represents a monitoring target.
90  * @pid:		The PID of the virtual address space to monitor.
91  * @nr_regions:		Number of monitoring target regions of this target.
92  * @regions_list:	Head of the monitoring target regions of this target.
93  * @list:		List head for siblings.
94  *
95  * Each monitoring context could have multiple targets.  For example, a context
96  * for virtual memory address spaces could have multiple target processes.  The
97  * @pid should be set for appropriate &struct damon_operations including the
98  * virtual address spaces monitoring operations.
99  */
100 struct damon_target {
101 	struct pid *pid;
102 	unsigned int nr_regions;
103 	struct list_head regions_list;
104 	struct list_head list;
105 };
106 
107 /**
108  * enum damos_action - Represents an action of a Data Access Monitoring-based
109  * Operation Scheme.
110  *
111  * @DAMOS_WILLNEED:	Call ``madvise()`` for the region with MADV_WILLNEED.
112  * @DAMOS_COLD:		Call ``madvise()`` for the region with MADV_COLD.
113  * @DAMOS_PAGEOUT:	Call ``madvise()`` for the region with MADV_PAGEOUT.
114  * @DAMOS_HUGEPAGE:	Call ``madvise()`` for the region with MADV_HUGEPAGE.
115  * @DAMOS_NOHUGEPAGE:	Call ``madvise()`` for the region with MADV_NOHUGEPAGE.
116  * @DAMOS_LRU_PRIO:	Prioritize the region on its LRU lists.
117  * @DAMOS_LRU_DEPRIO:	Deprioritize the region on its LRU lists.
118  * @DAMOS_MIGRATE_HOT:  Migrate the regions prioritizing warmer regions.
119  * @DAMOS_MIGRATE_COLD:	Migrate the regions prioritizing colder regions.
120  * @DAMOS_STAT:		Do nothing but count the stat.
121  * @NR_DAMOS_ACTIONS:	Total number of DAMOS actions
122  *
123  * The support of each action is up to running &struct damon_operations.
124  * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR supports all actions except
125  * &enum DAMOS_LRU_PRIO and &enum DAMOS_LRU_DEPRIO.  &enum DAMON_OPS_PADDR
126  * supports only &enum DAMOS_PAGEOUT, &enum DAMOS_LRU_PRIO, &enum
127  * DAMOS_LRU_DEPRIO, and &DAMOS_STAT.
128  */
129 enum damos_action {
130 	DAMOS_WILLNEED,
131 	DAMOS_COLD,
132 	DAMOS_PAGEOUT,
133 	DAMOS_HUGEPAGE,
134 	DAMOS_NOHUGEPAGE,
135 	DAMOS_LRU_PRIO,
136 	DAMOS_LRU_DEPRIO,
137 	DAMOS_MIGRATE_HOT,
138 	DAMOS_MIGRATE_COLD,
139 	DAMOS_STAT,		/* Do nothing but only record the stat */
140 	NR_DAMOS_ACTIONS,
141 };
142 
143 /**
144  * enum damos_quota_goal_metric - Represents the metric to be used as the goal
145  *
146  * @DAMOS_QUOTA_USER_INPUT:	User-input value.
147  * @DAMOS_QUOTA_SOME_MEM_PSI_US:	System level some memory PSI in us.
148  * @DAMOS_QUOTA_NODE_MEM_USED_BP:	MemUsed ratio of a node.
149  * @DAMOS_QUOTA_NODE_MEM_FREE_BP:	MemFree ratio of a node.
150  * @NR_DAMOS_QUOTA_GOAL_METRICS:	Number of DAMOS quota goal metrics.
151  *
152  * Metrics equal to larger than @NR_DAMOS_QUOTA_GOAL_METRICS are unsupported.
153  */
154 enum damos_quota_goal_metric {
155 	DAMOS_QUOTA_USER_INPUT,
156 	DAMOS_QUOTA_SOME_MEM_PSI_US,
157 	DAMOS_QUOTA_NODE_MEM_USED_BP,
158 	DAMOS_QUOTA_NODE_MEM_FREE_BP,
159 	NR_DAMOS_QUOTA_GOAL_METRICS,
160 };
161 
162 /**
163  * struct damos_quota_goal - DAMOS scheme quota auto-tuning goal.
164  * @metric:		Metric to be used for representing the goal.
165  * @target_value:	Target value of @metric to achieve with the tuning.
166  * @current_value:	Current value of @metric.
167  * @last_psi_total:	Last measured total PSI
168  * @nid:		Node id.
169  * @list:		List head for siblings.
170  *
171  * Data structure for getting the current score of the quota tuning goal.  The
172  * score is calculated by how close @current_value and @target_value are.  Then
173  * the score is entered to DAMON's internal feedback loop mechanism to get the
174  * auto-tuned quota.
175  *
176  * If @metric is DAMOS_QUOTA_USER_INPUT, @current_value should be manually
177  * entered by the user, probably inside the kdamond callbacks.  Otherwise,
178  * DAMON sets @current_value with self-measured value of @metric.
179  */
180 struct damos_quota_goal {
181 	enum damos_quota_goal_metric metric;
182 	unsigned long target_value;
183 	unsigned long current_value;
184 	/* metric-dependent fields */
185 	union {
186 		u64 last_psi_total;
187 		int nid;
188 	};
189 	struct list_head list;
190 };
191 
192 /**
193  * struct damos_quota - Controls the aggressiveness of the given scheme.
194  * @reset_interval:	Charge reset interval in milliseconds.
195  * @ms:			Maximum milliseconds that the scheme can use.
196  * @sz:			Maximum bytes of memory that the action can be applied.
197  * @goals:		Head of quota tuning goals (&damos_quota_goal) list.
198  * @esz:		Effective size quota in bytes.
199  *
200  * @weight_sz:		Weight of the region's size for prioritization.
201  * @weight_nr_accesses:	Weight of the region's nr_accesses for prioritization.
202  * @weight_age:		Weight of the region's age for prioritization.
203  *
204  * To avoid consuming too much CPU time or IO resources for applying the
205  * &struct damos->action to large memory, DAMON allows users to set time and/or
206  * size quotas.  The quotas can be set by writing non-zero values to &ms and
207  * &sz, respectively.  If the time quota is set, DAMON tries to use only up to
208  * &ms milliseconds within &reset_interval for applying the action.  If the
209  * size quota is set, DAMON tries to apply the action only up to &sz bytes
210  * within &reset_interval.
211  *
212  * To convince the different types of quotas and goals, DAMON internally
213  * converts those into one single size quota called "effective quota".  DAMON
214  * internally uses it as the only one real quota.  The conversion is made as
215  * follows.
216  *
217  * The time quota is transformed to a size quota using estimated throughput of
218  * the scheme's action.  DAMON then compares it against &sz and uses smaller
219  * one as the effective quota.
220  *
221  * If @goals is not empty, DAMON calculates yet another size quota based on the
222  * goals using its internal feedback loop algorithm, for every @reset_interval.
223  * Then, if the new size quota is smaller than the effective quota, it uses the
224  * new size quota as the effective quota.
225  *
226  * The resulting effective size quota in bytes is set to @esz.
227  *
228  * For selecting regions within the quota, DAMON prioritizes current scheme's
229  * target memory regions using the &struct damon_operations->get_scheme_score.
230  * You could customize the prioritization logic by setting &weight_sz,
231  * &weight_nr_accesses, and &weight_age, because monitoring operations are
232  * encouraged to respect those.
233  */
234 struct damos_quota {
235 	unsigned long reset_interval;
236 	unsigned long ms;
237 	unsigned long sz;
238 	struct list_head goals;
239 	unsigned long esz;
240 
241 	unsigned int weight_sz;
242 	unsigned int weight_nr_accesses;
243 	unsigned int weight_age;
244 
245 /* private: */
246 	/* For throughput estimation */
247 	unsigned long total_charged_sz;
248 	unsigned long total_charged_ns;
249 
250 	/* For charging the quota */
251 	unsigned long charged_sz;
252 	unsigned long charged_from;
253 	struct damon_target *charge_target_from;
254 	unsigned long charge_addr_from;
255 
256 	/* For prioritization */
257 	unsigned int min_score;
258 
259 	/* For feedback loop */
260 	unsigned long esz_bp;
261 };
262 
263 /**
264  * enum damos_wmark_metric - Represents the watermark metric.
265  *
266  * @DAMOS_WMARK_NONE:		Ignore the watermarks of the given scheme.
267  * @DAMOS_WMARK_FREE_MEM_RATE:	Free memory rate of the system in [0,1000].
268  * @NR_DAMOS_WMARK_METRICS:	Total number of DAMOS watermark metrics
269  */
270 enum damos_wmark_metric {
271 	DAMOS_WMARK_NONE,
272 	DAMOS_WMARK_FREE_MEM_RATE,
273 	NR_DAMOS_WMARK_METRICS,
274 };
275 
276 /**
277  * struct damos_watermarks - Controls when a given scheme should be activated.
278  * @metric:	Metric for the watermarks.
279  * @interval:	Watermarks check time interval in microseconds.
280  * @high:	High watermark.
281  * @mid:	Middle watermark.
282  * @low:	Low watermark.
283  *
284  * If &metric is &DAMOS_WMARK_NONE, the scheme is always active.  Being active
285  * means DAMON does monitoring and applying the action of the scheme to
286  * appropriate memory regions.  Else, DAMON checks &metric of the system for at
287  * least every &interval microseconds and works as below.
288  *
289  * If &metric is higher than &high, the scheme is inactivated.  If &metric is
290  * between &mid and &low, the scheme is activated.  If &metric is lower than
291  * &low, the scheme is inactivated.
292  */
293 struct damos_watermarks {
294 	enum damos_wmark_metric metric;
295 	unsigned long interval;
296 	unsigned long high;
297 	unsigned long mid;
298 	unsigned long low;
299 
300 /* private: */
301 	bool activated;
302 };
303 
304 /**
305  * struct damos_stat - Statistics on a given scheme.
306  * @nr_tried:	Total number of regions that the scheme is tried to be applied.
307  * @sz_tried:	Total size of regions that the scheme is tried to be applied.
308  * @nr_applied:	Total number of regions that the scheme is applied.
309  * @sz_applied:	Total size of regions that the scheme is applied.
310  * @sz_ops_filter_passed:
311  *		Total bytes that passed ops layer-handled DAMOS filters.
312  * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
313  *
314  * "Tried an action to a region" in this context means the DAMOS core logic
315  * determined the region as eligible to apply the action.  The access pattern
316  * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks
317  * (&struct damos_watermarks) and filters (&struct damos_filter) that handled
318  * on core logic can affect this.  The core logic asks the operation set
319  * (&struct damon_operations) to apply the action to the region.
320  *
321  * "Applied an action to a region" in this context means the operation set
322  * (&struct damon_operations) successfully applied the action to the region, at
323  * least to a part of the region.  The filters (&struct damos_filter) that
324  * handled on operation set layer and type of the action and pages of the
325  * region can affect this.  For example, if a filter is set to exclude
326  * anonymous pages and the region has only anonymous pages, the region will be
327  * failed at applying the action.  If the action is &DAMOS_PAGEOUT and all
328  * pages of the region are already paged out, the region will be failed at
329  * applying the action.
330  */
331 struct damos_stat {
332 	unsigned long nr_tried;
333 	unsigned long sz_tried;
334 	unsigned long nr_applied;
335 	unsigned long sz_applied;
336 	unsigned long sz_ops_filter_passed;
337 	unsigned long qt_exceeds;
338 };
339 
340 /**
341  * enum damos_filter_type - Type of memory for &struct damos_filter
342  * @DAMOS_FILTER_TYPE_ANON:	Anonymous pages.
343  * @DAMOS_FILTER_TYPE_ACTIVE:	Active pages.
344  * @DAMOS_FILTER_TYPE_MEMCG:	Specific memcg's pages.
345  * @DAMOS_FILTER_TYPE_YOUNG:	Recently accessed pages.
346  * @DAMOS_FILTER_TYPE_HUGEPAGE_SIZE:	Page is part of a hugepage.
347  * @DAMOS_FILTER_TYPE_UNMAPPED:	Unmapped pages.
348  * @DAMOS_FILTER_TYPE_ADDR:	Address range.
349  * @DAMOS_FILTER_TYPE_TARGET:	Data Access Monitoring target.
350  * @NR_DAMOS_FILTER_TYPES:	Number of filter types.
351  *
352  * The anon pages type and memcg type filters are handled by underlying
353  * &struct damon_operations as a part of scheme action trying, and therefore
354  * accounted as 'tried'.  In contrast, other types are handled by core layer
355  * before trying of the action and therefore not accounted as 'tried'.
356  *
357  * The support of the filters that handled by &struct damon_operations depend
358  * on the running &struct damon_operations.
359  * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
360  * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
361  * the two types.
362  */
363 enum damos_filter_type {
364 	DAMOS_FILTER_TYPE_ANON,
365 	DAMOS_FILTER_TYPE_ACTIVE,
366 	DAMOS_FILTER_TYPE_MEMCG,
367 	DAMOS_FILTER_TYPE_YOUNG,
368 	DAMOS_FILTER_TYPE_HUGEPAGE_SIZE,
369 	DAMOS_FILTER_TYPE_UNMAPPED,
370 	DAMOS_FILTER_TYPE_ADDR,
371 	DAMOS_FILTER_TYPE_TARGET,
372 	NR_DAMOS_FILTER_TYPES,
373 };
374 
375 /**
376  * struct damos_filter - DAMOS action target memory filter.
377  * @type:	Type of the target memory.
378  * @matching:	Whether this is for @type-matching memory.
379  * @allow:	Whether to include or exclude the @matching memory.
380  * @memcg_id:	Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
381  * @addr_range:	Address range if @type is DAMOS_FILTER_TYPE_ADDR.
382  * @target_idx:	Index of the &struct damon_target of
383  *		&damon_ctx->adaptive_targets if @type is
384  *		DAMOS_FILTER_TYPE_TARGET.
385  * @sz_range:	Size range if @type is DAMOS_FILTER_TYPE_HUGEPAGE_SIZE.
386  * @list:	List head for siblings.
387  *
388  * Before applying the &damos->action to a memory region, DAMOS checks if each
389  * byte of the region matches to this given condition and avoid applying the
390  * action if so.  Support of each filter type depends on the running &struct
391  * damon_operations and the type.  Refer to &enum damos_filter_type for more
392  * details.
393  */
394 struct damos_filter {
395 	enum damos_filter_type type;
396 	bool matching;
397 	bool allow;
398 	union {
399 		unsigned short memcg_id;
400 		struct damon_addr_range addr_range;
401 		int target_idx;
402 		struct damon_size_range sz_range;
403 	};
404 	struct list_head list;
405 };
406 
407 struct damon_ctx;
408 struct damos;
409 
410 /**
411  * struct damos_walk_control - Control damos_walk().
412  *
413  * @walk_fn:	Function to be called back for each region.
414  * @data:	Data that will be passed to walk functions.
415  *
416  * Control damos_walk(), which requests specific kdamond to invoke the given
417  * function to each region that eligible to apply actions of the kdamond's
418  * schemes.  Refer to damos_walk() for more details.
419  */
420 struct damos_walk_control {
421 	void (*walk_fn)(void *data, struct damon_ctx *ctx,
422 			struct damon_target *t, struct damon_region *r,
423 			struct damos *s, unsigned long sz_filter_passed);
424 	void *data;
425 /* private: internal use only */
426 	/* informs if the kdamond finished handling of the walk request */
427 	struct completion completion;
428 	/* informs if the walk is canceled. */
429 	bool canceled;
430 };
431 
432 /**
433  * struct damos_access_pattern - Target access pattern of the given scheme.
434  * @min_sz_region:	Minimum size of target regions.
435  * @max_sz_region:	Maximum size of target regions.
436  * @min_nr_accesses:	Minimum ``->nr_accesses`` of target regions.
437  * @max_nr_accesses:	Maximum ``->nr_accesses`` of target regions.
438  * @min_age_region:	Minimum age of target regions.
439  * @max_age_region:	Maximum age of target regions.
440  */
441 struct damos_access_pattern {
442 	unsigned long min_sz_region;
443 	unsigned long max_sz_region;
444 	unsigned int min_nr_accesses;
445 	unsigned int max_nr_accesses;
446 	unsigned int min_age_region;
447 	unsigned int max_age_region;
448 };
449 
450 /**
451  * struct damos_migrate_dests - Migration destination nodes and their weights.
452  * @node_id_arr:	Array of migration destination node ids.
453  * @weight_arr:		Array of migration weights for @node_id_arr.
454  * @nr_dests:		Length of the @node_id_arr and @weight_arr arrays.
455  *
456  * @node_id_arr is an array of the ids of migration destination nodes.
457  * @weight_arr is an array of the weights for those.  The weights in
458  * @weight_arr are for nodes in @node_id_arr of same array index.
459  */
460 struct damos_migrate_dests {
461 	unsigned int *node_id_arr;
462 	unsigned int *weight_arr;
463 	size_t nr_dests;
464 };
465 
466 /**
467  * struct damos - Represents a Data Access Monitoring-based Operation Scheme.
468  * @pattern:		Access pattern of target regions.
469  * @action:		&damos_action to be applied to the target regions.
470  * @apply_interval_us:	The time between applying the @action.
471  * @quota:		Control the aggressiveness of this scheme.
472  * @wmarks:		Watermarks for automated (in)activation of this scheme.
473  * @migrate_dests:	Destination nodes if @action is "migrate_{hot,cold}".
474  * @target_nid:		Destination node if @action is "migrate_{hot,cold}".
475  * @filters:		Additional set of &struct damos_filter for &action.
476  * @ops_filters:	ops layer handling &struct damos_filter objects list.
477  * @last_applied:	Last @action applied ops-managing entity.
478  * @stat:		Statistics of this scheme.
479  * @list:		List head for siblings.
480  *
481  * For each @apply_interval_us, DAMON finds regions which fit in the
482  * &pattern and applies &action to those. To avoid consuming too much
483  * CPU time or IO resources for the &action, &quota is used.
484  *
485  * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
486  *
487  * To do the work only when needed, schemes can be activated for specific
488  * system situations using &wmarks.  If all schemes that registered to the
489  * monitoring context are inactive, DAMON stops monitoring either, and just
490  * repeatedly checks the watermarks.
491  *
492  * @migrate_dests specifies multiple migration target nodes with different
493  * weights for migrate_hot or migrate_cold actions.  @target_nid is ignored if
494  * this is set.
495  *
496  * @target_nid is used to set the migration target node for migrate_hot or
497  * migrate_cold actions, and @migrate_dests is unset.
498  *
499  * Before applying the &action to a memory region, &struct damon_operations
500  * implementation could check pages of the region and skip &action to respect
501  * &filters
502  *
503  * The minimum entity that @action can be applied depends on the underlying
504  * &struct damon_operations.  Since it may not be aligned with the core layer
505  * abstract, namely &struct damon_region, &struct damon_operations could apply
506  * @action to same entity multiple times.  Large folios that underlying on
507  * multiple &struct damon region objects could be such examples.  The &struct
508  * damon_operations can use @last_applied to avoid that.  DAMOS core logic
509  * unsets @last_applied when each regions walking for applying the scheme is
510  * finished.
511  *
512  * After applying the &action to each region, &stat_count and &stat_sz is
513  * updated to reflect the number of regions and total size of regions that the
514  * &action is applied.
515  */
516 struct damos {
517 	struct damos_access_pattern pattern;
518 	enum damos_action action;
519 	unsigned long apply_interval_us;
520 /* private: internal use only */
521 	/*
522 	 * number of sample intervals that should be passed before applying
523 	 * @action
524 	 */
525 	unsigned long next_apply_sis;
526 	/* informs if ongoing DAMOS walk for this scheme is finished */
527 	bool walk_completed;
528 	/*
529 	 * If the current region in the filtering stage is allowed by core
530 	 * layer-handled filters.  If true, operations layer allows it, too.
531 	 */
532 	bool core_filters_allowed;
533 	/* whether to reject core/ops filters umatched regions */
534 	bool core_filters_default_reject;
535 	bool ops_filters_default_reject;
536 /* public: */
537 	struct damos_quota quota;
538 	struct damos_watermarks wmarks;
539 	union {
540 		struct {
541 			int target_nid;
542 			struct damos_migrate_dests migrate_dests;
543 		};
544 	};
545 	struct list_head filters;
546 	struct list_head ops_filters;
547 	void *last_applied;
548 	struct damos_stat stat;
549 	struct list_head list;
550 };
551 
552 /**
553  * enum damon_ops_id - Identifier for each monitoring operations implementation
554  *
555  * @DAMON_OPS_VADDR:	Monitoring operations for virtual address spaces
556  * @DAMON_OPS_FVADDR:	Monitoring operations for only fixed ranges of virtual
557  *			address spaces
558  * @DAMON_OPS_PADDR:	Monitoring operations for the physical address space
559  * @NR_DAMON_OPS:	Number of monitoring operations implementations
560  */
561 enum damon_ops_id {
562 	DAMON_OPS_VADDR,
563 	DAMON_OPS_FVADDR,
564 	DAMON_OPS_PADDR,
565 	NR_DAMON_OPS,
566 };
567 
568 /**
569  * struct damon_operations - Monitoring operations for given use cases.
570  *
571  * @id:				Identifier of this operations set.
572  * @init:			Initialize operations-related data structures.
573  * @update:			Update operations-related data structures.
574  * @prepare_access_checks:	Prepare next access check of target regions.
575  * @check_accesses:		Check the accesses to target regions.
576  * @get_scheme_score:		Get the score of a region for a scheme.
577  * @apply_scheme:		Apply a DAMON-based operation scheme.
578  * @target_valid:		Determine if the target is valid.
579  * @cleanup_target:		Clean up each target before deallocation.
580  * @cleanup:			Clean up the context.
581  *
582  * DAMON can be extended for various address spaces and usages.  For this,
583  * users should register the low level operations for their target address
584  * space and usecase via the &damon_ctx.ops.  Then, the monitoring thread
585  * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting
586  * the monitoring, @update after each &damon_attrs.ops_update_interval, and
587  * @check_accesses, @target_valid and @prepare_access_checks after each
588  * &damon_attrs.sample_interval.
589  *
590  * Each &struct damon_operations instance having valid @id can be registered
591  * via damon_register_ops() and selected by damon_select_ops() later.
592  * @init should initialize operations-related data structures.  For example,
593  * this could be used to construct proper monitoring target regions and link
594  * those to @damon_ctx.adaptive_targets.
595  * @update should update the operations-related data structures.  For example,
596  * this could be used to update monitoring target regions for current status.
597  * @prepare_access_checks should manipulate the monitoring regions to be
598  * prepared for the next access check.
599  * @check_accesses should check the accesses to each region that made after the
600  * last preparation and update the number of observed accesses of each region.
601  * It should also return max number of observed accesses that made as a result
602  * of its update.  The value will be used for regions adjustment threshold.
603  * @get_scheme_score should return the priority score of a region for a scheme
604  * as an integer in [0, &DAMOS_MAX_SCORE].
605  * @apply_scheme is called from @kdamond when a region for user provided
606  * DAMON-based operation scheme is found.  It should apply the scheme's action
607  * to the region and return bytes of the region that the action is successfully
608  * applied.  It should also report how many bytes of the region has passed
609  * filters (&struct damos_filter) that handled by itself.
610  * @target_valid should check whether the target is still valid for the
611  * monitoring.
612  * @cleanup_target is called before the target will be deallocated.
613  * @cleanup is called from @kdamond just before its termination.
614  */
615 struct damon_operations {
616 	enum damon_ops_id id;
617 	void (*init)(struct damon_ctx *context);
618 	void (*update)(struct damon_ctx *context);
619 	void (*prepare_access_checks)(struct damon_ctx *context);
620 	unsigned int (*check_accesses)(struct damon_ctx *context);
621 	int (*get_scheme_score)(struct damon_ctx *context,
622 			struct damon_target *t, struct damon_region *r,
623 			struct damos *scheme);
624 	unsigned long (*apply_scheme)(struct damon_ctx *context,
625 			struct damon_target *t, struct damon_region *r,
626 			struct damos *scheme, unsigned long *sz_filter_passed);
627 	bool (*target_valid)(struct damon_target *t);
628 	void (*cleanup_target)(struct damon_target *t);
629 	void (*cleanup)(struct damon_ctx *context);
630 };
631 
632 /*
633  * struct damon_call_control - Control damon_call().
634  *
635  * @fn:			Function to be called back.
636  * @data:		Data that will be passed to @fn.
637  * @repeat:		Repeat invocations.
638  * @return_code:	Return code from @fn invocation.
639  *
640  * Control damon_call(), which requests specific kdamond to invoke a given
641  * function.  Refer to damon_call() for more details.
642  */
643 struct damon_call_control {
644 	int (*fn)(void *data);
645 	void *data;
646 	bool repeat;
647 	int return_code;
648 /* private: internal use only */
649 	/* informs if the kdamond finished handling of the request */
650 	struct completion completion;
651 	/* informs if the kdamond canceled @fn infocation */
652 	bool canceled;
653 	/* List head for siblings. */
654 	struct list_head list;
655 };
656 
657 /**
658  * struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
659  *
660  * @access_bp:		Access events observation ratio to achieve in bp.
661  * @aggrs:		Number of aggregations to achieve @access_bp within.
662  * @min_sample_us:	Minimum resulting sampling interval in microseconds.
663  * @max_sample_us:	Maximum resulting sampling interval in microseconds.
664  *
665  * DAMON automatically tunes &damon_attrs->sample_interval and
666  * &damon_attrs->aggr_interval aiming the ratio in bp (1/10,000) of
667  * DAMON-observed access events to theoretical maximum amount within @aggrs
668  * aggregations be same to @access_bp.  The logic increases
669  * &damon_attrs->aggr_interval and &damon_attrs->sampling_interval in same
670  * ratio if the current access events observation ratio is lower than the
671  * target for each @aggrs aggregations, and vice versa.
672  *
673  * If @aggrs is zero, the tuning is disabled and hence this struct is ignored.
674  */
675 struct damon_intervals_goal {
676 	unsigned long access_bp;
677 	unsigned long aggrs;
678 	unsigned long min_sample_us;
679 	unsigned long max_sample_us;
680 };
681 
682 /**
683  * struct damon_attrs - Monitoring attributes for accuracy/overhead control.
684  *
685  * @sample_interval:		The time between access samplings.
686  * @aggr_interval:		The time between monitor results aggregations.
687  * @ops_update_interval:	The time between monitoring operations updates.
688  * @intervals_goal:		Intervals auto-tuning goal.
689  * @min_nr_regions:		The minimum number of adaptive monitoring
690  *				regions.
691  * @max_nr_regions:		The maximum number of adaptive monitoring
692  *				regions.
693  *
694  * For each @sample_interval, DAMON checks whether each region is accessed or
695  * not during the last @sample_interval.  If such access is found, DAMON
696  * aggregates the information by increasing &damon_region->nr_accesses for
697  * @aggr_interval time.  For each @aggr_interval, the count is reset.  DAMON
698  * also checks whether the target memory regions need update (e.g., by
699  * ``mmap()`` calls from the application, in case of virtual memory monitoring)
700  * and applies the changes for each @ops_update_interval.  All time intervals
701  * are in micro-seconds.  Please refer to &struct damon_operations and &struct
702  * damon_call_control for more detail.
703  */
704 struct damon_attrs {
705 	unsigned long sample_interval;
706 	unsigned long aggr_interval;
707 	unsigned long ops_update_interval;
708 	struct damon_intervals_goal intervals_goal;
709 	unsigned long min_nr_regions;
710 	unsigned long max_nr_regions;
711 /* private: internal use only */
712 	/*
713 	 * @aggr_interval to @sample_interval ratio.
714 	 * Core-external components call damon_set_attrs() with &damon_attrs
715 	 * that this field is unset.  In the case, damon_set_attrs() sets this
716 	 * field of resulting &damon_attrs.  Core-internal components such as
717 	 * kdamond_tune_intervals() calls damon_set_attrs() with &damon_attrs
718 	 * that this field is set.  In the case, damon_set_attrs() just keep
719 	 * it.
720 	 */
721 	unsigned long aggr_samples;
722 };
723 
724 /**
725  * struct damon_ctx - Represents a context for each monitoring.  This is the
726  * main interface that allows users to set the attributes and get the results
727  * of the monitoring.
728  *
729  * @attrs:		Monitoring attributes for accuracy/overhead control.
730  * @kdamond:		Kernel thread who does the monitoring.
731  * @kdamond_lock:	Mutex for the synchronizations with @kdamond.
732  *
733  * For each monitoring context, one kernel thread for the monitoring is
734  * created.  The pointer to the thread is stored in @kdamond.
735  *
736  * Once started, the monitoring thread runs until explicitly required to be
737  * terminated or every monitoring target is invalid.  The validity of the
738  * targets is checked via the &damon_operations.target_valid of @ops.  The
739  * termination can also be explicitly requested by calling damon_stop().
740  * The thread sets @kdamond to NULL when it terminates. Therefore, users can
741  * know whether the monitoring is ongoing or terminated by reading @kdamond.
742  * Reads and writes to @kdamond from outside of the monitoring thread must
743  * be protected by @kdamond_lock.
744  *
745  * Note that the monitoring thread protects only @kdamond via @kdamond_lock.
746  * Accesses to other fields must be protected by themselves.
747  *
748  * @ops:	Set of monitoring operations for given use cases.
749  *
750  * @adaptive_targets:	Head of monitoring targets (&damon_target) list.
751  * @schemes:		Head of schemes (&damos) list.
752  */
753 struct damon_ctx {
754 	struct damon_attrs attrs;
755 
756 /* private: internal use only */
757 	/* number of sample intervals that passed since this context started */
758 	unsigned long passed_sample_intervals;
759 	/*
760 	 * number of sample intervals that should be passed before next
761 	 * aggregation
762 	 */
763 	unsigned long next_aggregation_sis;
764 	/*
765 	 * number of sample intervals that should be passed before next ops
766 	 * update
767 	 */
768 	unsigned long next_ops_update_sis;
769 	/*
770 	 * number of sample intervals that should be passed before next
771 	 * intervals tuning
772 	 */
773 	unsigned long next_intervals_tune_sis;
774 	/* for waiting until the execution of the kdamond_fn is started */
775 	struct completion kdamond_started;
776 	/* for scheme quotas prioritization */
777 	unsigned long *regions_score_histogram;
778 
779 	/* lists of &struct damon_call_control */
780 	struct list_head call_controls;
781 	struct mutex call_controls_lock;
782 
783 	struct damos_walk_control *walk_control;
784 	struct mutex walk_control_lock;
785 
786 /* public: */
787 	struct task_struct *kdamond;
788 	struct mutex kdamond_lock;
789 
790 	struct damon_operations ops;
791 
792 	struct list_head adaptive_targets;
793 	struct list_head schemes;
794 };
795 
796 static inline struct damon_region *damon_next_region(struct damon_region *r)
797 {
798 	return container_of(r->list.next, struct damon_region, list);
799 }
800 
801 static inline struct damon_region *damon_prev_region(struct damon_region *r)
802 {
803 	return container_of(r->list.prev, struct damon_region, list);
804 }
805 
806 static inline struct damon_region *damon_last_region(struct damon_target *t)
807 {
808 	return list_last_entry(&t->regions_list, struct damon_region, list);
809 }
810 
811 static inline struct damon_region *damon_first_region(struct damon_target *t)
812 {
813 	return list_first_entry(&t->regions_list, struct damon_region, list);
814 }
815 
816 static inline unsigned long damon_sz_region(struct damon_region *r)
817 {
818 	return r->ar.end - r->ar.start;
819 }
820 
821 
822 #define damon_for_each_region(r, t) \
823 	list_for_each_entry(r, &t->regions_list, list)
824 
825 #define damon_for_each_region_from(r, t) \
826 	list_for_each_entry_from(r, &t->regions_list, list)
827 
828 #define damon_for_each_region_safe(r, next, t) \
829 	list_for_each_entry_safe(r, next, &t->regions_list, list)
830 
831 #define damon_for_each_target(t, ctx) \
832 	list_for_each_entry(t, &(ctx)->adaptive_targets, list)
833 
834 #define damon_for_each_target_safe(t, next, ctx)	\
835 	list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list)
836 
837 #define damon_for_each_scheme(s, ctx) \
838 	list_for_each_entry(s, &(ctx)->schemes, list)
839 
840 #define damon_for_each_scheme_safe(s, next, ctx) \
841 	list_for_each_entry_safe(s, next, &(ctx)->schemes, list)
842 
843 #define damos_for_each_quota_goal(goal, quota) \
844 	list_for_each_entry(goal, &quota->goals, list)
845 
846 #define damos_for_each_quota_goal_safe(goal, next, quota) \
847 	list_for_each_entry_safe(goal, next, &(quota)->goals, list)
848 
849 #define damos_for_each_filter(f, scheme) \
850 	list_for_each_entry(f, &(scheme)->filters, list)
851 
852 #define damos_for_each_filter_safe(f, next, scheme) \
853 	list_for_each_entry_safe(f, next, &(scheme)->filters, list)
854 
855 #define damos_for_each_ops_filter(f, scheme) \
856 	list_for_each_entry(f, &(scheme)->ops_filters, list)
857 
858 #define damos_for_each_ops_filter_safe(f, next, scheme) \
859 	list_for_each_entry_safe(f, next, &(scheme)->ops_filters, list)
860 
861 #ifdef CONFIG_DAMON
862 
863 struct damon_region *damon_new_region(unsigned long start, unsigned long end);
864 
865 /*
866  * Add a region between two other regions
867  */
868 static inline void damon_insert_region(struct damon_region *r,
869 		struct damon_region *prev, struct damon_region *next,
870 		struct damon_target *t)
871 {
872 	__list_add(&r->list, &prev->list, &next->list);
873 	t->nr_regions++;
874 }
875 
876 void damon_add_region(struct damon_region *r, struct damon_target *t);
877 void damon_destroy_region(struct damon_region *r, struct damon_target *t);
878 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
879 		unsigned int nr_ranges);
880 void damon_update_region_access_rate(struct damon_region *r, bool accessed,
881 		struct damon_attrs *attrs);
882 
883 struct damos_filter *damos_new_filter(enum damos_filter_type type,
884 		bool matching, bool allow);
885 void damos_add_filter(struct damos *s, struct damos_filter *f);
886 bool damos_filter_for_ops(enum damos_filter_type type);
887 void damos_destroy_filter(struct damos_filter *f);
888 
889 struct damos_quota_goal *damos_new_quota_goal(
890 		enum damos_quota_goal_metric metric,
891 		unsigned long target_value);
892 void damos_add_quota_goal(struct damos_quota *q, struct damos_quota_goal *g);
893 void damos_destroy_quota_goal(struct damos_quota_goal *goal);
894 
895 struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
896 			enum damos_action action,
897 			unsigned long apply_interval_us,
898 			struct damos_quota *quota,
899 			struct damos_watermarks *wmarks,
900 			int target_nid);
901 void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
902 void damon_destroy_scheme(struct damos *s);
903 int damos_commit_quota_goals(struct damos_quota *dst, struct damos_quota *src);
904 
905 struct damon_target *damon_new_target(void);
906 void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
907 bool damon_targets_empty(struct damon_ctx *ctx);
908 void damon_free_target(struct damon_target *t);
909 void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
910 unsigned int damon_nr_regions(struct damon_target *t);
911 
912 struct damon_ctx *damon_new_ctx(void);
913 void damon_destroy_ctx(struct damon_ctx *ctx);
914 int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs);
915 void damon_set_schemes(struct damon_ctx *ctx,
916 			struct damos **schemes, ssize_t nr_schemes);
917 int damon_commit_ctx(struct damon_ctx *old_ctx, struct damon_ctx *new_ctx);
918 int damon_nr_running_ctxs(void);
919 bool damon_is_registered_ops(enum damon_ops_id id);
920 int damon_register_ops(struct damon_operations *ops);
921 int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id);
922 
923 static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
924 {
925 	return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
926 }
927 
928 static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
929 {
930 	/* {aggr,sample}_interval are unsigned long, hence could overflow */
931 	return min(attrs->aggr_interval / attrs->sample_interval,
932 			(unsigned long)UINT_MAX);
933 }
934 
935 
936 int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
937 int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
938 bool damon_is_running(struct damon_ctx *ctx);
939 
940 int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
941 int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
942 
943 int damon_set_region_biggest_system_ram_default(struct damon_target *t,
944 				unsigned long *start, unsigned long *end);
945 
946 #endif	/* CONFIG_DAMON */
947 
948 #endif	/* _DAMON_H */
949