xref: /linux/samples/damon/mtier.c (revision 00c010e130e58301db2ea0cec1eadc931e1cb8cf)
1*82a08bdeSSeongJae Park // SPDX-License-Identifier: GPL-2.0
2*82a08bdeSSeongJae Park /*
3*82a08bdeSSeongJae Park  * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
4*82a08bdeSSeongJae Park  * 1 and node 0, respectively.  Adjust the hotness/coldness threshold aiming
5*82a08bdeSSeongJae Park  * resulting 99.6 % node 0 utilization ratio.
6*82a08bdeSSeongJae Park  */
7*82a08bdeSSeongJae Park 
8*82a08bdeSSeongJae Park #define pr_fmt(fmt) "damon_sample_mtier: " fmt
9*82a08bdeSSeongJae Park 
10*82a08bdeSSeongJae Park #include <linux/damon.h>
11*82a08bdeSSeongJae Park #include <linux/init.h>
12*82a08bdeSSeongJae Park #include <linux/kernel.h>
13*82a08bdeSSeongJae Park #include <linux/module.h>
14*82a08bdeSSeongJae Park 
15*82a08bdeSSeongJae Park static unsigned long node0_start_addr __read_mostly;
16*82a08bdeSSeongJae Park module_param(node0_start_addr, ulong, 0600);
17*82a08bdeSSeongJae Park 
18*82a08bdeSSeongJae Park static unsigned long node0_end_addr __read_mostly;
19*82a08bdeSSeongJae Park module_param(node0_end_addr, ulong, 0600);
20*82a08bdeSSeongJae Park 
21*82a08bdeSSeongJae Park static unsigned long node1_start_addr __read_mostly;
22*82a08bdeSSeongJae Park module_param(node1_start_addr, ulong, 0600);
23*82a08bdeSSeongJae Park 
24*82a08bdeSSeongJae Park static unsigned long node1_end_addr __read_mostly;
25*82a08bdeSSeongJae Park module_param(node1_end_addr, ulong, 0600);
26*82a08bdeSSeongJae Park 
27*82a08bdeSSeongJae Park static int damon_sample_mtier_enable_store(
28*82a08bdeSSeongJae Park 		const char *val, const struct kernel_param *kp);
29*82a08bdeSSeongJae Park 
30*82a08bdeSSeongJae Park static const struct kernel_param_ops enable_param_ops = {
31*82a08bdeSSeongJae Park 	.set = damon_sample_mtier_enable_store,
32*82a08bdeSSeongJae Park 	.get = param_get_bool,
33*82a08bdeSSeongJae Park };
34*82a08bdeSSeongJae Park 
35*82a08bdeSSeongJae Park static bool enable __read_mostly;
36*82a08bdeSSeongJae Park module_param_cb(enable, &enable_param_ops, &enable, 0600);
37*82a08bdeSSeongJae Park MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_MTIER");
38*82a08bdeSSeongJae Park 
39*82a08bdeSSeongJae Park static struct damon_ctx *ctxs[2];
40*82a08bdeSSeongJae Park 
damon_sample_mtier_build_ctx(bool promote)41*82a08bdeSSeongJae Park static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
42*82a08bdeSSeongJae Park {
43*82a08bdeSSeongJae Park 	struct damon_ctx *ctx;
44*82a08bdeSSeongJae Park 	struct damon_attrs attrs;
45*82a08bdeSSeongJae Park 	struct damon_target *target;
46*82a08bdeSSeongJae Park 	struct damon_region *region;
47*82a08bdeSSeongJae Park 	struct damos *scheme;
48*82a08bdeSSeongJae Park 	struct damos_quota_goal *quota_goal;
49*82a08bdeSSeongJae Park 	struct damos_filter *filter;
50*82a08bdeSSeongJae Park 
51*82a08bdeSSeongJae Park 	ctx = damon_new_ctx();
52*82a08bdeSSeongJae Park 	if (!ctx)
53*82a08bdeSSeongJae Park 		return NULL;
54*82a08bdeSSeongJae Park 	attrs = (struct damon_attrs) {
55*82a08bdeSSeongJae Park 		.sample_interval = 5 * USEC_PER_MSEC,
56*82a08bdeSSeongJae Park 		.aggr_interval = 100 * USEC_PER_MSEC,
57*82a08bdeSSeongJae Park 		.ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
58*82a08bdeSSeongJae Park 		.min_nr_regions = 10,
59*82a08bdeSSeongJae Park 		.max_nr_regions = 1000,
60*82a08bdeSSeongJae Park 	};
61*82a08bdeSSeongJae Park 
62*82a08bdeSSeongJae Park 	/*
63*82a08bdeSSeongJae Park 	 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
64*82a08bdeSSeongJae Park 	 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
65*82a08bdeSSeongJae Park 	 */
66*82a08bdeSSeongJae Park 	attrs.intervals_goal = (struct damon_intervals_goal) {
67*82a08bdeSSeongJae Park 		.access_bp = 400, .aggrs = 3,
68*82a08bdeSSeongJae Park 		.min_sample_us = 5000, .max_sample_us = 10000000,
69*82a08bdeSSeongJae Park 	};
70*82a08bdeSSeongJae Park 	if (damon_set_attrs(ctx, &attrs))
71*82a08bdeSSeongJae Park 		goto free_out;
72*82a08bdeSSeongJae Park 	if (damon_select_ops(ctx, DAMON_OPS_PADDR))
73*82a08bdeSSeongJae Park 		goto free_out;
74*82a08bdeSSeongJae Park 
75*82a08bdeSSeongJae Park 	target = damon_new_target();
76*82a08bdeSSeongJae Park 	if (!target)
77*82a08bdeSSeongJae Park 		goto free_out;
78*82a08bdeSSeongJae Park 	damon_add_target(ctx, target);
79*82a08bdeSSeongJae Park 	region = damon_new_region(
80*82a08bdeSSeongJae Park 			promote ? node1_start_addr : node0_start_addr,
81*82a08bdeSSeongJae Park 			promote ? node1_end_addr : node0_end_addr);
82*82a08bdeSSeongJae Park 	if (!region)
83*82a08bdeSSeongJae Park 		goto free_out;
84*82a08bdeSSeongJae Park 	damon_add_region(region, target);
85*82a08bdeSSeongJae Park 
86*82a08bdeSSeongJae Park 	scheme = damon_new_scheme(
87*82a08bdeSSeongJae Park 			/* access pattern */
88*82a08bdeSSeongJae Park 			&(struct damos_access_pattern) {
89*82a08bdeSSeongJae Park 				.min_sz_region = PAGE_SIZE,
90*82a08bdeSSeongJae Park 				.max_sz_region = ULONG_MAX,
91*82a08bdeSSeongJae Park 				.min_nr_accesses = promote ? 1 : 0,
92*82a08bdeSSeongJae Park 				.max_nr_accesses = promote ? UINT_MAX : 0,
93*82a08bdeSSeongJae Park 				.min_age_region = 0,
94*82a08bdeSSeongJae Park 				.max_age_region = UINT_MAX},
95*82a08bdeSSeongJae Park 			/* action */
96*82a08bdeSSeongJae Park 			promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
97*82a08bdeSSeongJae Park 			1000000,	/* apply interval (1s) */
98*82a08bdeSSeongJae Park 			&(struct damos_quota){
99*82a08bdeSSeongJae Park 				/* 200 MiB per sec by most */
100*82a08bdeSSeongJae Park 				.reset_interval = 1000,
101*82a08bdeSSeongJae Park 				.sz = 200 * 1024 * 1024,
102*82a08bdeSSeongJae Park 				/* ignore size of region when prioritizing */
103*82a08bdeSSeongJae Park 				.weight_sz = 0,
104*82a08bdeSSeongJae Park 				.weight_nr_accesses = 100,
105*82a08bdeSSeongJae Park 				.weight_age = 100,
106*82a08bdeSSeongJae Park 			},
107*82a08bdeSSeongJae Park 			&(struct damos_watermarks){},
108*82a08bdeSSeongJae Park 			promote ? 0 : 1);	/* migrate target node id */
109*82a08bdeSSeongJae Park 	if (!scheme)
110*82a08bdeSSeongJae Park 		goto free_out;
111*82a08bdeSSeongJae Park 	damon_set_schemes(ctx, &scheme, 1);
112*82a08bdeSSeongJae Park 	quota_goal = damos_new_quota_goal(
113*82a08bdeSSeongJae Park 			promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
114*82a08bdeSSeongJae Park 			DAMOS_QUOTA_NODE_MEM_FREE_BP,
115*82a08bdeSSeongJae Park 			promote ? 9970 : 50);
116*82a08bdeSSeongJae Park 	if (!quota_goal)
117*82a08bdeSSeongJae Park 		goto free_out;
118*82a08bdeSSeongJae Park 	quota_goal->nid = 0;
119*82a08bdeSSeongJae Park 	damos_add_quota_goal(&scheme->quota, quota_goal);
120*82a08bdeSSeongJae Park 	filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
121*82a08bdeSSeongJae Park 	if (!filter)
122*82a08bdeSSeongJae Park 		goto free_out;
123*82a08bdeSSeongJae Park 	damos_add_filter(scheme, filter);
124*82a08bdeSSeongJae Park 	return ctx;
125*82a08bdeSSeongJae Park free_out:
126*82a08bdeSSeongJae Park 	damon_destroy_ctx(ctx);
127*82a08bdeSSeongJae Park 	return NULL;
128*82a08bdeSSeongJae Park }
129*82a08bdeSSeongJae Park 
damon_sample_mtier_start(void)130*82a08bdeSSeongJae Park static int damon_sample_mtier_start(void)
131*82a08bdeSSeongJae Park {
132*82a08bdeSSeongJae Park 	struct damon_ctx *ctx;
133*82a08bdeSSeongJae Park 
134*82a08bdeSSeongJae Park 	ctx = damon_sample_mtier_build_ctx(true);
135*82a08bdeSSeongJae Park 	if (!ctx)
136*82a08bdeSSeongJae Park 		return -ENOMEM;
137*82a08bdeSSeongJae Park 	ctxs[0] = ctx;
138*82a08bdeSSeongJae Park 	ctx = damon_sample_mtier_build_ctx(false);
139*82a08bdeSSeongJae Park 	if (!ctx) {
140*82a08bdeSSeongJae Park 		damon_destroy_ctx(ctxs[0]);
141*82a08bdeSSeongJae Park 		return -ENOMEM;
142*82a08bdeSSeongJae Park 	}
143*82a08bdeSSeongJae Park 	ctxs[1] = ctx;
144*82a08bdeSSeongJae Park 	return damon_start(ctxs, 2, true);
145*82a08bdeSSeongJae Park }
146*82a08bdeSSeongJae Park 
damon_sample_mtier_stop(void)147*82a08bdeSSeongJae Park static void damon_sample_mtier_stop(void)
148*82a08bdeSSeongJae Park {
149*82a08bdeSSeongJae Park 	damon_stop(ctxs, 2);
150*82a08bdeSSeongJae Park 	damon_destroy_ctx(ctxs[0]);
151*82a08bdeSSeongJae Park 	damon_destroy_ctx(ctxs[1]);
152*82a08bdeSSeongJae Park }
153*82a08bdeSSeongJae Park 
damon_sample_mtier_enable_store(const char * val,const struct kernel_param * kp)154*82a08bdeSSeongJae Park static int damon_sample_mtier_enable_store(
155*82a08bdeSSeongJae Park 		const char *val, const struct kernel_param *kp)
156*82a08bdeSSeongJae Park {
157*82a08bdeSSeongJae Park 	bool enabled = enable;
158*82a08bdeSSeongJae Park 	int err;
159*82a08bdeSSeongJae Park 
160*82a08bdeSSeongJae Park 	err = kstrtobool(val, &enable);
161*82a08bdeSSeongJae Park 	if (err)
162*82a08bdeSSeongJae Park 		return err;
163*82a08bdeSSeongJae Park 
164*82a08bdeSSeongJae Park 	if (enable == enabled)
165*82a08bdeSSeongJae Park 		return 0;
166*82a08bdeSSeongJae Park 
167*82a08bdeSSeongJae Park 	if (enable)
168*82a08bdeSSeongJae Park 		return damon_sample_mtier_start();
169*82a08bdeSSeongJae Park 	damon_sample_mtier_stop();
170*82a08bdeSSeongJae Park 	return 0;
171*82a08bdeSSeongJae Park }
172*82a08bdeSSeongJae Park 
damon_sample_mtier_init(void)173*82a08bdeSSeongJae Park static int __init damon_sample_mtier_init(void)
174*82a08bdeSSeongJae Park {
175*82a08bdeSSeongJae Park 	return 0;
176*82a08bdeSSeongJae Park }
177*82a08bdeSSeongJae Park 
178*82a08bdeSSeongJae Park module_init(damon_sample_mtier_init);
179