xref: /linux/samples/damon/mtier.c (revision 793020545cea0c9e2a79de6ad5c9746ec4f5bd7e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
4  * 1 and node 0, respectively.  Adjust the hotness/coldness threshold aiming
5  * resulting 99.6 % node 0 utilization ratio.
6  */
7 
8 #define pr_fmt(fmt) "damon_sample_mtier: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 
15 #ifdef MODULE_PARAM_PREFIX
16 #undef MODULE_PARAM_PREFIX
17 #endif
18 #define MODULE_PARAM_PREFIX "damon_sample_mtier."
19 
20 static unsigned long node0_start_addr __read_mostly;
21 module_param(node0_start_addr, ulong, 0600);
22 
23 static unsigned long node0_end_addr __read_mostly;
24 module_param(node0_end_addr, ulong, 0600);
25 
26 static unsigned long node1_start_addr __read_mostly;
27 module_param(node1_start_addr, ulong, 0600);
28 
29 static unsigned long node1_end_addr __read_mostly;
30 module_param(node1_end_addr, ulong, 0600);
31 
32 static unsigned long node0_mem_used_bp __read_mostly = 9970;
33 module_param(node0_mem_used_bp, ulong, 0600);
34 
35 static unsigned long node0_mem_free_bp __read_mostly = 50;
36 module_param(node0_mem_free_bp, ulong, 0600);
37 
38 static int damon_sample_mtier_enable_store(
39 		const char *val, const struct kernel_param *kp);
40 
41 static const struct kernel_param_ops enabled_param_ops = {
42 	.set = damon_sample_mtier_enable_store,
43 	.get = param_get_bool,
44 };
45 
46 static bool enabled __read_mostly;
47 module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
48 MODULE_PARM_DESC(enabled, "Enable or disable DAMON_SAMPLE_MTIER");
49 
50 static struct damon_ctx *ctxs[2];
51 
52 static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
53 {
54 	struct damon_ctx *ctx;
55 	struct damon_attrs attrs;
56 	struct damon_target *target;
57 	struct damon_region *region;
58 	struct damos *scheme;
59 	struct damos_quota_goal *quota_goal;
60 	struct damos_filter *filter;
61 
62 	ctx = damon_new_ctx();
63 	if (!ctx)
64 		return NULL;
65 	attrs = (struct damon_attrs) {
66 		.sample_interval = 5 * USEC_PER_MSEC,
67 		.aggr_interval = 100 * USEC_PER_MSEC,
68 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
69 		.min_nr_regions = 10,
70 		.max_nr_regions = 1000,
71 	};
72 
73 	/*
74 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
75 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
76 	 */
77 	attrs.intervals_goal = (struct damon_intervals_goal) {
78 		.access_bp = 400, .aggrs = 3,
79 		.min_sample_us = 5000, .max_sample_us = 10000000,
80 	};
81 	if (damon_set_attrs(ctx, &attrs))
82 		goto free_out;
83 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
84 		goto free_out;
85 
86 	target = damon_new_target();
87 	if (!target)
88 		goto free_out;
89 	damon_add_target(ctx, target);
90 	region = damon_new_region(
91 			promote ? node1_start_addr : node0_start_addr,
92 			promote ? node1_end_addr : node0_end_addr);
93 	if (!region)
94 		goto free_out;
95 	damon_add_region(region, target);
96 
97 	scheme = damon_new_scheme(
98 			/* access pattern */
99 			&(struct damos_access_pattern) {
100 				.min_sz_region = PAGE_SIZE,
101 				.max_sz_region = ULONG_MAX,
102 				.min_nr_accesses = promote ? 1 : 0,
103 				.max_nr_accesses = promote ? UINT_MAX : 0,
104 				.min_age_region = 0,
105 				.max_age_region = UINT_MAX},
106 			/* action */
107 			promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
108 			1000000,	/* apply interval (1s) */
109 			&(struct damos_quota){
110 				/* 200 MiB per sec by most */
111 				.reset_interval = 1000,
112 				.sz = 200 * 1024 * 1024,
113 				/* ignore size of region when prioritizing */
114 				.weight_sz = 0,
115 				.weight_nr_accesses = 100,
116 				.weight_age = 100,
117 			},
118 			&(struct damos_watermarks){},
119 			promote ? 0 : 1);	/* migrate target node id */
120 	if (!scheme)
121 		goto free_out;
122 	damon_set_schemes(ctx, &scheme, 1);
123 	quota_goal = damos_new_quota_goal(
124 			promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
125 			DAMOS_QUOTA_NODE_MEM_FREE_BP,
126 			promote ? node0_mem_used_bp : node0_mem_free_bp);
127 	if (!quota_goal)
128 		goto free_out;
129 	quota_goal->nid = 0;
130 	damos_add_quota_goal(&scheme->quota, quota_goal);
131 	filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
132 	if (!filter)
133 		goto free_out;
134 	damos_add_filter(scheme, filter);
135 	return ctx;
136 free_out:
137 	damon_destroy_ctx(ctx);
138 	return NULL;
139 }
140 
141 static int damon_sample_mtier_start(void)
142 {
143 	struct damon_ctx *ctx;
144 
145 	ctx = damon_sample_mtier_build_ctx(true);
146 	if (!ctx)
147 		return -ENOMEM;
148 	ctxs[0] = ctx;
149 	ctx = damon_sample_mtier_build_ctx(false);
150 	if (!ctx) {
151 		damon_destroy_ctx(ctxs[0]);
152 		return -ENOMEM;
153 	}
154 	ctxs[1] = ctx;
155 	return damon_start(ctxs, 2, true);
156 }
157 
158 static void damon_sample_mtier_stop(void)
159 {
160 	damon_stop(ctxs, 2);
161 	damon_destroy_ctx(ctxs[0]);
162 	damon_destroy_ctx(ctxs[1]);
163 }
164 
165 static bool init_called;
166 
167 static int damon_sample_mtier_enable_store(
168 		const char *val, const struct kernel_param *kp)
169 {
170 	bool is_enabled = enabled;
171 	int err;
172 
173 	err = kstrtobool(val, &enabled);
174 	if (err)
175 		return err;
176 
177 	if (enabled == is_enabled)
178 		return 0;
179 
180 	if (enabled) {
181 		err = damon_sample_mtier_start();
182 		if (err)
183 			enabled = false;
184 		return err;
185 	}
186 	damon_sample_mtier_stop();
187 	return 0;
188 }
189 
190 static int __init damon_sample_mtier_init(void)
191 {
192 	int err = 0;
193 
194 	init_called = true;
195 	if (enabled) {
196 		err = damon_sample_mtier_start();
197 		if (err)
198 			enabled = false;
199 	}
200 	return 0;
201 }
202 
203 module_init(damon_sample_mtier_init);
204