xref: /linux/drivers/dax/kmem.c (revision 2d7f3d1a5866705be2393150e1ffdf67030ab88d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
3 #include <linux/memremap.h>
4 #include <linux/pagemap.h>
5 #include <linux/memory.h>
6 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/pfn_t.h>
9 #include <linux/slab.h>
10 #include <linux/dax.h>
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/mman.h>
14 #include <linux/memory-tiers.h>
15 #include <linux/memory_hotplug.h>
16 #include "dax-private.h"
17 #include "bus.h"
18 
19 /*
20  * Default abstract distance assigned to the NUMA node onlined
21  * by DAX/kmem if the low level platform driver didn't initialize
22  * one for this NUMA node.
23  */
24 #define MEMTIER_DEFAULT_DAX_ADISTANCE	(MEMTIER_ADISTANCE_DRAM * 5)
25 
26 /* Memory resource name used for add_memory_driver_managed(). */
27 static const char *kmem_name;
28 /* Set if any memory will remain added when the driver will be unloaded. */
29 static bool any_hotremove_failed;
30 
31 static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r)
32 {
33 	struct dev_dax_range *dax_range = &dev_dax->ranges[i];
34 	struct range *range = &dax_range->range;
35 
36 	/* memory-block align the hotplug range */
37 	r->start = ALIGN(range->start, memory_block_size_bytes());
38 	r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1;
39 	if (r->start >= r->end) {
40 		r->start = range->start;
41 		r->end = range->end;
42 		return -ENOSPC;
43 	}
44 	return 0;
45 }
46 
47 struct dax_kmem_data {
48 	const char *res_name;
49 	int mgid;
50 	struct resource *res[];
51 };
52 
53 static DEFINE_MUTEX(kmem_memory_type_lock);
54 static LIST_HEAD(kmem_memory_types);
55 
56 static struct memory_dev_type *kmem_find_alloc_memory_type(int adist)
57 {
58 	bool found = false;
59 	struct memory_dev_type *mtype;
60 
61 	mutex_lock(&kmem_memory_type_lock);
62 	list_for_each_entry(mtype, &kmem_memory_types, list) {
63 		if (mtype->adistance == adist) {
64 			found = true;
65 			break;
66 		}
67 	}
68 	if (!found) {
69 		mtype = alloc_memory_type(adist);
70 		if (!IS_ERR(mtype))
71 			list_add(&mtype->list, &kmem_memory_types);
72 	}
73 	mutex_unlock(&kmem_memory_type_lock);
74 
75 	return mtype;
76 }
77 
78 static void kmem_put_memory_types(void)
79 {
80 	struct memory_dev_type *mtype, *mtn;
81 
82 	mutex_lock(&kmem_memory_type_lock);
83 	list_for_each_entry_safe(mtype, mtn, &kmem_memory_types, list) {
84 		list_del(&mtype->list);
85 		put_memory_type(mtype);
86 	}
87 	mutex_unlock(&kmem_memory_type_lock);
88 }
89 
90 static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
91 {
92 	struct device *dev = &dev_dax->dev;
93 	unsigned long total_len = 0;
94 	struct dax_kmem_data *data;
95 	struct memory_dev_type *mtype;
96 	int i, rc, mapped = 0;
97 	mhp_t mhp_flags;
98 	int numa_node;
99 	int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
100 
101 	/*
102 	 * Ensure good NUMA information for the persistent memory.
103 	 * Without this check, there is a risk that slow memory
104 	 * could be mixed in a node with faster memory, causing
105 	 * unavoidable performance issues.
106 	 */
107 	numa_node = dev_dax->target_node;
108 	if (numa_node < 0) {
109 		dev_warn(dev, "rejecting DAX region with invalid node: %d\n",
110 				numa_node);
111 		return -EINVAL;
112 	}
113 
114 	mt_calc_adistance(numa_node, &adist);
115 	mtype = kmem_find_alloc_memory_type(adist);
116 	if (IS_ERR(mtype))
117 		return PTR_ERR(mtype);
118 
119 	for (i = 0; i < dev_dax->nr_range; i++) {
120 		struct range range;
121 
122 		rc = dax_kmem_range(dev_dax, i, &range);
123 		if (rc) {
124 			dev_info(dev, "mapping%d: %#llx-%#llx too small after alignment\n",
125 					i, range.start, range.end);
126 			continue;
127 		}
128 		total_len += range_len(&range);
129 	}
130 
131 	if (!total_len) {
132 		dev_warn(dev, "rejecting DAX region without any memory after alignment\n");
133 		return -EINVAL;
134 	}
135 
136 	init_node_memory_type(numa_node, mtype);
137 
138 	rc = -ENOMEM;
139 	data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
140 	if (!data)
141 		goto err_dax_kmem_data;
142 
143 	data->res_name = kstrdup(dev_name(dev), GFP_KERNEL);
144 	if (!data->res_name)
145 		goto err_res_name;
146 
147 	rc = memory_group_register_static(numa_node, PFN_UP(total_len));
148 	if (rc < 0)
149 		goto err_reg_mgid;
150 	data->mgid = rc;
151 
152 	for (i = 0; i < dev_dax->nr_range; i++) {
153 		struct resource *res;
154 		struct range range;
155 
156 		rc = dax_kmem_range(dev_dax, i, &range);
157 		if (rc)
158 			continue;
159 
160 		/* Region is permanently reserved if hotremove fails. */
161 		res = request_mem_region(range.start, range_len(&range), data->res_name);
162 		if (!res) {
163 			dev_warn(dev, "mapping%d: %#llx-%#llx could not reserve region\n",
164 					i, range.start, range.end);
165 			/*
166 			 * Once some memory has been onlined we can't
167 			 * assume that it can be un-onlined safely.
168 			 */
169 			if (mapped)
170 				continue;
171 			rc = -EBUSY;
172 			goto err_request_mem;
173 		}
174 		data->res[i] = res;
175 
176 		/*
177 		 * Set flags appropriate for System RAM.  Leave ..._BUSY clear
178 		 * so that add_memory() can add a child resource.  Do not
179 		 * inherit flags from the parent since it may set new flags
180 		 * unknown to us that will break add_memory() below.
181 		 */
182 		res->flags = IORESOURCE_SYSTEM_RAM;
183 
184 		mhp_flags = MHP_NID_IS_MGID;
185 		if (dev_dax->memmap_on_memory)
186 			mhp_flags |= MHP_MEMMAP_ON_MEMORY;
187 
188 		/*
189 		 * Ensure that future kexec'd kernels will not treat
190 		 * this as RAM automatically.
191 		 */
192 		rc = add_memory_driver_managed(data->mgid, range.start,
193 				range_len(&range), kmem_name, mhp_flags);
194 
195 		if (rc) {
196 			dev_warn(dev, "mapping%d: %#llx-%#llx memory add failed\n",
197 					i, range.start, range.end);
198 			remove_resource(res);
199 			kfree(res);
200 			data->res[i] = NULL;
201 			if (mapped)
202 				continue;
203 			goto err_request_mem;
204 		}
205 		mapped++;
206 	}
207 
208 	dev_set_drvdata(dev, data);
209 
210 	return 0;
211 
212 err_request_mem:
213 	memory_group_unregister(data->mgid);
214 err_reg_mgid:
215 	kfree(data->res_name);
216 err_res_name:
217 	kfree(data);
218 err_dax_kmem_data:
219 	clear_node_memory_type(numa_node, mtype);
220 	return rc;
221 }
222 
223 #ifdef CONFIG_MEMORY_HOTREMOVE
224 static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
225 {
226 	int i, success = 0;
227 	int node = dev_dax->target_node;
228 	struct device *dev = &dev_dax->dev;
229 	struct dax_kmem_data *data = dev_get_drvdata(dev);
230 
231 	/*
232 	 * We have one shot for removing memory, if some memory blocks were not
233 	 * offline prior to calling this function remove_memory() will fail, and
234 	 * there is no way to hotremove this memory until reboot because device
235 	 * unbind will succeed even if we return failure.
236 	 */
237 	for (i = 0; i < dev_dax->nr_range; i++) {
238 		struct range range;
239 		int rc;
240 
241 		rc = dax_kmem_range(dev_dax, i, &range);
242 		if (rc)
243 			continue;
244 
245 		rc = remove_memory(range.start, range_len(&range));
246 		if (rc == 0) {
247 			remove_resource(data->res[i]);
248 			kfree(data->res[i]);
249 			data->res[i] = NULL;
250 			success++;
251 			continue;
252 		}
253 		any_hotremove_failed = true;
254 		dev_err(dev,
255 			"mapping%d: %#llx-%#llx cannot be hotremoved until the next reboot\n",
256 				i, range.start, range.end);
257 	}
258 
259 	if (success >= dev_dax->nr_range) {
260 		memory_group_unregister(data->mgid);
261 		kfree(data->res_name);
262 		kfree(data);
263 		dev_set_drvdata(dev, NULL);
264 		/*
265 		 * Clear the memtype association on successful unplug.
266 		 * If not, we have memory blocks left which can be
267 		 * offlined/onlined later. We need to keep memory_dev_type
268 		 * for that. This implies this reference will be around
269 		 * till next reboot.
270 		 */
271 		clear_node_memory_type(node, NULL);
272 	}
273 }
274 #else
275 static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
276 {
277 	/*
278 	 * Without hotremove purposely leak the request_mem_region() for the
279 	 * device-dax range and return '0' to ->remove() attempts. The removal
280 	 * of the device from the driver always succeeds, but the region is
281 	 * permanently pinned as reserved by the unreleased
282 	 * request_mem_region().
283 	 */
284 	any_hotremove_failed = true;
285 }
286 #endif /* CONFIG_MEMORY_HOTREMOVE */
287 
288 static struct dax_device_driver device_dax_kmem_driver = {
289 	.probe = dev_dax_kmem_probe,
290 	.remove = dev_dax_kmem_remove,
291 	.type = DAXDRV_KMEM_TYPE,
292 };
293 
294 static int __init dax_kmem_init(void)
295 {
296 	int rc;
297 
298 	/* Resource name is permanently allocated if any hotremove fails. */
299 	kmem_name = kstrdup_const("System RAM (kmem)", GFP_KERNEL);
300 	if (!kmem_name)
301 		return -ENOMEM;
302 
303 	rc = dax_driver_register(&device_dax_kmem_driver);
304 	if (rc)
305 		goto error_dax_driver;
306 
307 	return rc;
308 
309 error_dax_driver:
310 	kmem_put_memory_types();
311 	kfree_const(kmem_name);
312 	return rc;
313 }
314 
315 static void __exit dax_kmem_exit(void)
316 {
317 	dax_driver_unregister(&device_dax_kmem_driver);
318 	if (!any_hotremove_failed)
319 		kfree_const(kmem_name);
320 	kmem_put_memory_types();
321 }
322 
323 MODULE_AUTHOR("Intel Corporation");
324 MODULE_LICENSE("GPL v2");
325 module_init(dax_kmem_init);
326 module_exit(dax_kmem_exit);
327 MODULE_ALIAS_DAX_DEVICE(0);
328