xref: /linux/drivers/dax/hmem/device.c (revision f2527d8f566a45fa00ee5abd04d1c9476d4d704f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/platform_device.h>
3 #include <linux/memregion.h>
4 #include <linux/module.h>
5 #include <linux/dax.h>
6 #include <linux/mm.h>
7 
8 static bool nohmem;
9 module_param_named(disable, nohmem, bool, 0444);
10 
11 static struct resource hmem_active = {
12 	.name = "HMEM devices",
13 	.start = 0,
14 	.end = -1,
15 	.flags = IORESOURCE_MEM,
16 };
17 
18 void hmem_register_device(int target_nid, struct resource *r)
19 {
20 	/* define a clean / non-busy resource for the platform device */
21 	struct resource res = {
22 		.start = r->start,
23 		.end = r->end,
24 		.flags = IORESOURCE_MEM,
25 		.desc = IORES_DESC_SOFT_RESERVED,
26 	};
27 	struct platform_device *pdev;
28 	struct memregion_info info;
29 	int rc, id;
30 
31 	if (nohmem)
32 		return;
33 
34 	rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
35 			IORES_DESC_SOFT_RESERVED);
36 	if (rc != REGION_INTERSECTS)
37 		return;
38 
39 	id = memregion_alloc(GFP_KERNEL);
40 	if (id < 0) {
41 		pr_err("memregion allocation failure for %pr\n", &res);
42 		return;
43 	}
44 
45 	pdev = platform_device_alloc("hmem", id);
46 	if (!pdev) {
47 		pr_err("hmem device allocation failure for %pr\n", &res);
48 		goto out_pdev;
49 	}
50 
51 	if (!__request_region(&hmem_active, res.start, resource_size(&res),
52 			      dev_name(&pdev->dev), 0)) {
53 		dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
54 		goto out_active;
55 	}
56 
57 	pdev->dev.numa_node = numa_map_to_online_node(target_nid);
58 	info = (struct memregion_info) {
59 		.target_node = target_nid,
60 	};
61 	rc = platform_device_add_data(pdev, &info, sizeof(info));
62 	if (rc < 0) {
63 		pr_err("hmem memregion_info allocation failure for %pr\n", &res);
64 		goto out_resource;
65 	}
66 
67 	rc = platform_device_add_resources(pdev, &res, 1);
68 	if (rc < 0) {
69 		pr_err("hmem resource allocation failure for %pr\n", &res);
70 		goto out_resource;
71 	}
72 
73 	rc = platform_device_add(pdev);
74 	if (rc < 0) {
75 		dev_err(&pdev->dev, "device add failed for %pr\n", &res);
76 		goto out_resource;
77 	}
78 
79 	return;
80 
81 out_resource:
82 	__release_region(&hmem_active, res.start, resource_size(&res));
83 out_active:
84 	platform_device_put(pdev);
85 out_pdev:
86 	memregion_free(id);
87 }
88 
89 static __init int hmem_register_one(struct resource *res, void *data)
90 {
91 	hmem_register_device(phys_to_target_node(res->start), res);
92 
93 	return 0;
94 }
95 
96 static __init int hmem_init(void)
97 {
98 	walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
99 			IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
100 	return 0;
101 }
102 
103 /*
104  * As this is a fallback for address ranges unclaimed by the ACPI HMAT
105  * parsing it must be at an initcall level greater than hmat_init().
106  */
107 late_initcall(hmem_init);
108