xref: /linux/samples/damon/mtier.c (revision 964314344eab7bc43e38a32be281c5ea0609773b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
4  * 1 and node 0, respectively.  Adjust the hotness/coldness threshold aiming
5  * resulting 99.6 % node 0 utilization ratio.
6  */
7 
8 #define pr_fmt(fmt) "damon_sample_mtier: " fmt
9 
10 #include <linux/damon.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 
15 static unsigned long node0_start_addr __read_mostly;
16 module_param(node0_start_addr, ulong, 0600);
17 
18 static unsigned long node0_end_addr __read_mostly;
19 module_param(node0_end_addr, ulong, 0600);
20 
21 static unsigned long node1_start_addr __read_mostly;
22 module_param(node1_start_addr, ulong, 0600);
23 
24 static unsigned long node1_end_addr __read_mostly;
25 module_param(node1_end_addr, ulong, 0600);
26 
27 static unsigned long node0_mem_used_bp __read_mostly = 9970;
28 module_param(node0_mem_used_bp, ulong, 0600);
29 
30 static unsigned long node0_mem_free_bp __read_mostly = 50;
31 module_param(node0_mem_free_bp, ulong, 0600);
32 
33 static int damon_sample_mtier_enable_store(
34 		const char *val, const struct kernel_param *kp);
35 
36 static const struct kernel_param_ops enable_param_ops = {
37 	.set = damon_sample_mtier_enable_store,
38 	.get = param_get_bool,
39 };
40 
41 static bool enable __read_mostly;
42 module_param_cb(enable, &enable_param_ops, &enable, 0600);
43 MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_MTIER");
44 
45 static struct damon_ctx *ctxs[2];
46 
47 static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
48 {
49 	struct damon_ctx *ctx;
50 	struct damon_attrs attrs;
51 	struct damon_target *target;
52 	struct damon_region *region;
53 	struct damos *scheme;
54 	struct damos_quota_goal *quota_goal;
55 	struct damos_filter *filter;
56 
57 	ctx = damon_new_ctx();
58 	if (!ctx)
59 		return NULL;
60 	attrs = (struct damon_attrs) {
61 		.sample_interval = 5 * USEC_PER_MSEC,
62 		.aggr_interval = 100 * USEC_PER_MSEC,
63 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
64 		.min_nr_regions = 10,
65 		.max_nr_regions = 1000,
66 	};
67 
68 	/*
69 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
70 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
71 	 */
72 	attrs.intervals_goal = (struct damon_intervals_goal) {
73 		.access_bp = 400, .aggrs = 3,
74 		.min_sample_us = 5000, .max_sample_us = 10000000,
75 	};
76 	if (damon_set_attrs(ctx, &attrs))
77 		goto free_out;
78 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
79 		goto free_out;
80 
81 	target = damon_new_target();
82 	if (!target)
83 		goto free_out;
84 	damon_add_target(ctx, target);
85 	region = damon_new_region(
86 			promote ? node1_start_addr : node0_start_addr,
87 			promote ? node1_end_addr : node0_end_addr);
88 	if (!region)
89 		goto free_out;
90 	damon_add_region(region, target);
91 
92 	scheme = damon_new_scheme(
93 			/* access pattern */
94 			&(struct damos_access_pattern) {
95 				.min_sz_region = PAGE_SIZE,
96 				.max_sz_region = ULONG_MAX,
97 				.min_nr_accesses = promote ? 1 : 0,
98 				.max_nr_accesses = promote ? UINT_MAX : 0,
99 				.min_age_region = 0,
100 				.max_age_region = UINT_MAX},
101 			/* action */
102 			promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
103 			1000000,	/* apply interval (1s) */
104 			&(struct damos_quota){
105 				/* 200 MiB per sec by most */
106 				.reset_interval = 1000,
107 				.sz = 200 * 1024 * 1024,
108 				/* ignore size of region when prioritizing */
109 				.weight_sz = 0,
110 				.weight_nr_accesses = 100,
111 				.weight_age = 100,
112 			},
113 			&(struct damos_watermarks){},
114 			promote ? 0 : 1);	/* migrate target node id */
115 	if (!scheme)
116 		goto free_out;
117 	damon_set_schemes(ctx, &scheme, 1);
118 	quota_goal = damos_new_quota_goal(
119 			promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
120 			DAMOS_QUOTA_NODE_MEM_FREE_BP,
121 			promote ? node0_mem_used_bp : node0_mem_free_bp);
122 	if (!quota_goal)
123 		goto free_out;
124 	quota_goal->nid = 0;
125 	damos_add_quota_goal(&scheme->quota, quota_goal);
126 	filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
127 	if (!filter)
128 		goto free_out;
129 	damos_add_filter(scheme, filter);
130 	return ctx;
131 free_out:
132 	damon_destroy_ctx(ctx);
133 	return NULL;
134 }
135 
136 static int damon_sample_mtier_start(void)
137 {
138 	struct damon_ctx *ctx;
139 
140 	ctx = damon_sample_mtier_build_ctx(true);
141 	if (!ctx)
142 		return -ENOMEM;
143 	ctxs[0] = ctx;
144 	ctx = damon_sample_mtier_build_ctx(false);
145 	if (!ctx) {
146 		damon_destroy_ctx(ctxs[0]);
147 		return -ENOMEM;
148 	}
149 	ctxs[1] = ctx;
150 	return damon_start(ctxs, 2, true);
151 }
152 
153 static void damon_sample_mtier_stop(void)
154 {
155 	damon_stop(ctxs, 2);
156 	damon_destroy_ctx(ctxs[0]);
157 	damon_destroy_ctx(ctxs[1]);
158 }
159 
160 static bool init_called;
161 
162 static int damon_sample_mtier_enable_store(
163 		const char *val, const struct kernel_param *kp)
164 {
165 	bool enabled = enable;
166 	int err;
167 
168 	err = kstrtobool(val, &enable);
169 	if (err)
170 		return err;
171 
172 	if (enable == enabled)
173 		return 0;
174 
175 	if (enable) {
176 		err = damon_sample_mtier_start();
177 		if (err)
178 			enable = false;
179 		return err;
180 	}
181 	damon_sample_mtier_stop();
182 	return 0;
183 }
184 
185 static int __init damon_sample_mtier_init(void)
186 {
187 	int err = 0;
188 
189 	init_called = true;
190 	if (enable) {
191 		err = damon_sample_mtier_start();
192 		if (err)
193 			enable = false;
194 	}
195 	return 0;
196 }
197 
198 module_init(damon_sample_mtier_init);
199