1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * memory tiering: migrate cold pages in node 0 and hot pages in node 1 to node
4 * 1 and node 0, respectively. Adjust the hotness/coldness threshold aiming
5 * resulting 99.6 % node 0 utilization ratio.
6 */
7
8 #define pr_fmt(fmt) "damon_sample_mtier: " fmt
9
10 #include <linux/damon.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14
15 static unsigned long node0_start_addr __read_mostly;
16 module_param(node0_start_addr, ulong, 0600);
17
18 static unsigned long node0_end_addr __read_mostly;
19 module_param(node0_end_addr, ulong, 0600);
20
21 static unsigned long node1_start_addr __read_mostly;
22 module_param(node1_start_addr, ulong, 0600);
23
24 static unsigned long node1_end_addr __read_mostly;
25 module_param(node1_end_addr, ulong, 0600);
26
27 static int damon_sample_mtier_enable_store(
28 const char *val, const struct kernel_param *kp);
29
30 static const struct kernel_param_ops enable_param_ops = {
31 .set = damon_sample_mtier_enable_store,
32 .get = param_get_bool,
33 };
34
35 static bool enable __read_mostly;
36 module_param_cb(enable, &enable_param_ops, &enable, 0600);
37 MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_MTIER");
38
39 static struct damon_ctx *ctxs[2];
40
damon_sample_mtier_build_ctx(bool promote)41 static struct damon_ctx *damon_sample_mtier_build_ctx(bool promote)
42 {
43 struct damon_ctx *ctx;
44 struct damon_attrs attrs;
45 struct damon_target *target;
46 struct damon_region *region;
47 struct damos *scheme;
48 struct damos_quota_goal *quota_goal;
49 struct damos_filter *filter;
50
51 ctx = damon_new_ctx();
52 if (!ctx)
53 return NULL;
54 attrs = (struct damon_attrs) {
55 .sample_interval = 5 * USEC_PER_MSEC,
56 .aggr_interval = 100 * USEC_PER_MSEC,
57 .ops_update_interval = 60 * USEC_PER_MSEC * MSEC_PER_SEC,
58 .min_nr_regions = 10,
59 .max_nr_regions = 1000,
60 };
61
62 /*
63 * auto-tune sampling and aggregation interval aiming 4% DAMON-observed
64 * accesses ratio, keeping sampling interval in [5ms, 10s] range.
65 */
66 attrs.intervals_goal = (struct damon_intervals_goal) {
67 .access_bp = 400, .aggrs = 3,
68 .min_sample_us = 5000, .max_sample_us = 10000000,
69 };
70 if (damon_set_attrs(ctx, &attrs))
71 goto free_out;
72 if (damon_select_ops(ctx, DAMON_OPS_PADDR))
73 goto free_out;
74
75 target = damon_new_target();
76 if (!target)
77 goto free_out;
78 damon_add_target(ctx, target);
79 region = damon_new_region(
80 promote ? node1_start_addr : node0_start_addr,
81 promote ? node1_end_addr : node0_end_addr);
82 if (!region)
83 goto free_out;
84 damon_add_region(region, target);
85
86 scheme = damon_new_scheme(
87 /* access pattern */
88 &(struct damos_access_pattern) {
89 .min_sz_region = PAGE_SIZE,
90 .max_sz_region = ULONG_MAX,
91 .min_nr_accesses = promote ? 1 : 0,
92 .max_nr_accesses = promote ? UINT_MAX : 0,
93 .min_age_region = 0,
94 .max_age_region = UINT_MAX},
95 /* action */
96 promote ? DAMOS_MIGRATE_HOT : DAMOS_MIGRATE_COLD,
97 1000000, /* apply interval (1s) */
98 &(struct damos_quota){
99 /* 200 MiB per sec by most */
100 .reset_interval = 1000,
101 .sz = 200 * 1024 * 1024,
102 /* ignore size of region when prioritizing */
103 .weight_sz = 0,
104 .weight_nr_accesses = 100,
105 .weight_age = 100,
106 },
107 &(struct damos_watermarks){},
108 promote ? 0 : 1); /* migrate target node id */
109 if (!scheme)
110 goto free_out;
111 damon_set_schemes(ctx, &scheme, 1);
112 quota_goal = damos_new_quota_goal(
113 promote ? DAMOS_QUOTA_NODE_MEM_USED_BP :
114 DAMOS_QUOTA_NODE_MEM_FREE_BP,
115 promote ? 9970 : 50);
116 if (!quota_goal)
117 goto free_out;
118 quota_goal->nid = 0;
119 damos_add_quota_goal(&scheme->quota, quota_goal);
120 filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true, promote);
121 if (!filter)
122 goto free_out;
123 damos_add_filter(scheme, filter);
124 return ctx;
125 free_out:
126 damon_destroy_ctx(ctx);
127 return NULL;
128 }
129
damon_sample_mtier_start(void)130 static int damon_sample_mtier_start(void)
131 {
132 struct damon_ctx *ctx;
133
134 ctx = damon_sample_mtier_build_ctx(true);
135 if (!ctx)
136 return -ENOMEM;
137 ctxs[0] = ctx;
138 ctx = damon_sample_mtier_build_ctx(false);
139 if (!ctx) {
140 damon_destroy_ctx(ctxs[0]);
141 return -ENOMEM;
142 }
143 ctxs[1] = ctx;
144 return damon_start(ctxs, 2, true);
145 }
146
damon_sample_mtier_stop(void)147 static void damon_sample_mtier_stop(void)
148 {
149 damon_stop(ctxs, 2);
150 damon_destroy_ctx(ctxs[0]);
151 damon_destroy_ctx(ctxs[1]);
152 }
153
damon_sample_mtier_enable_store(const char * val,const struct kernel_param * kp)154 static int damon_sample_mtier_enable_store(
155 const char *val, const struct kernel_param *kp)
156 {
157 bool enabled = enable;
158 int err;
159
160 err = kstrtobool(val, &enable);
161 if (err)
162 return err;
163
164 if (enable == enabled)
165 return 0;
166
167 if (enable)
168 return damon_sample_mtier_start();
169 damon_sample_mtier_stop();
170 return 0;
171 }
172
damon_sample_mtier_init(void)173 static int __init damon_sample_mtier_init(void)
174 {
175 return 0;
176 }
177
178 module_init(damon_sample_mtier_init);
179