xref: /linux/drivers/acpi/numa/hmat.c (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019, Intel Corporation.
4  *
5  * Heterogeneous Memory Attributes Table (HMAT) representation
6  *
7  * This program parses and reports the platform's HMAT tables, and registers
8  * the applicable attributes with the node's interfaces.
9  */
10 
11 #define pr_fmt(fmt) "acpi/hmat: " fmt
12 #define dev_fmt(fmt) "acpi/hmat: " fmt
13 
14 #include <linux/acpi.h>
15 #include <linux/bitops.h>
16 #include <linux/device.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/mm.h>
20 #include <linux/platform_device.h>
21 #include <linux/list_sort.h>
22 #include <linux/memregion.h>
23 #include <linux/memory.h>
24 #include <linux/mutex.h>
25 #include <linux/node.h>
26 #include <linux/sysfs.h>
27 #include <linux/dax.h>
28 
29 static u8 hmat_revision;
30 static int hmat_disable __initdata;
31 
32 void __init disable_hmat(void)
33 {
34 	hmat_disable = 1;
35 }
36 
37 static LIST_HEAD(targets);
38 static LIST_HEAD(initiators);
39 static LIST_HEAD(localities);
40 
41 static DEFINE_MUTEX(target_lock);
42 
43 /*
44  * The defined enum order is used to prioritize attributes to break ties when
45  * selecting the best performing node.
46  */
47 enum locality_types {
48 	WRITE_LATENCY,
49 	READ_LATENCY,
50 	WRITE_BANDWIDTH,
51 	READ_BANDWIDTH,
52 };
53 
54 static struct memory_locality *localities_types[4];
55 
56 struct target_cache {
57 	struct list_head node;
58 	struct node_cache_attrs cache_attrs;
59 };
60 
61 struct memory_target {
62 	struct list_head node;
63 	unsigned int memory_pxm;
64 	unsigned int processor_pxm;
65 	struct resource memregions;
66 	struct node_hmem_attrs hmem_attrs[2];
67 	struct list_head caches;
68 	struct node_cache_attrs cache_attrs;
69 	bool registered;
70 };
71 
72 struct memory_initiator {
73 	struct list_head node;
74 	unsigned int processor_pxm;
75 	bool has_cpu;
76 };
77 
78 struct memory_locality {
79 	struct list_head node;
80 	struct acpi_hmat_locality *hmat_loc;
81 };
82 
83 static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
84 {
85 	struct memory_initiator *initiator;
86 
87 	list_for_each_entry(initiator, &initiators, node)
88 		if (initiator->processor_pxm == cpu_pxm)
89 			return initiator;
90 	return NULL;
91 }
92 
93 static struct memory_target *find_mem_target(unsigned int mem_pxm)
94 {
95 	struct memory_target *target;
96 
97 	list_for_each_entry(target, &targets, node)
98 		if (target->memory_pxm == mem_pxm)
99 			return target;
100 	return NULL;
101 }
102 
103 static __init void alloc_memory_initiator(unsigned int cpu_pxm)
104 {
105 	struct memory_initiator *initiator;
106 
107 	if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
108 		return;
109 
110 	initiator = find_mem_initiator(cpu_pxm);
111 	if (initiator)
112 		return;
113 
114 	initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
115 	if (!initiator)
116 		return;
117 
118 	initiator->processor_pxm = cpu_pxm;
119 	initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
120 	list_add_tail(&initiator->node, &initiators);
121 }
122 
123 static __init void alloc_memory_target(unsigned int mem_pxm,
124 		resource_size_t start, resource_size_t len)
125 {
126 	struct memory_target *target;
127 
128 	target = find_mem_target(mem_pxm);
129 	if (!target) {
130 		target = kzalloc(sizeof(*target), GFP_KERNEL);
131 		if (!target)
132 			return;
133 		target->memory_pxm = mem_pxm;
134 		target->processor_pxm = PXM_INVAL;
135 		target->memregions = (struct resource) {
136 			.name	= "ACPI mem",
137 			.start	= 0,
138 			.end	= -1,
139 			.flags	= IORESOURCE_MEM,
140 		};
141 		list_add_tail(&target->node, &targets);
142 		INIT_LIST_HEAD(&target->caches);
143 	}
144 
145 	/*
146 	 * There are potentially multiple ranges per PXM, so record each
147 	 * in the per-target memregions resource tree.
148 	 */
149 	if (!__request_region(&target->memregions, start, len, "memory target",
150 				IORESOURCE_MEM))
151 		pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
152 				start, start + len, mem_pxm);
153 }
154 
155 static __init const char *hmat_data_type(u8 type)
156 {
157 	switch (type) {
158 	case ACPI_HMAT_ACCESS_LATENCY:
159 		return "Access Latency";
160 	case ACPI_HMAT_READ_LATENCY:
161 		return "Read Latency";
162 	case ACPI_HMAT_WRITE_LATENCY:
163 		return "Write Latency";
164 	case ACPI_HMAT_ACCESS_BANDWIDTH:
165 		return "Access Bandwidth";
166 	case ACPI_HMAT_READ_BANDWIDTH:
167 		return "Read Bandwidth";
168 	case ACPI_HMAT_WRITE_BANDWIDTH:
169 		return "Write Bandwidth";
170 	default:
171 		return "Reserved";
172 	}
173 }
174 
175 static __init const char *hmat_data_type_suffix(u8 type)
176 {
177 	switch (type) {
178 	case ACPI_HMAT_ACCESS_LATENCY:
179 	case ACPI_HMAT_READ_LATENCY:
180 	case ACPI_HMAT_WRITE_LATENCY:
181 		return " nsec";
182 	case ACPI_HMAT_ACCESS_BANDWIDTH:
183 	case ACPI_HMAT_READ_BANDWIDTH:
184 	case ACPI_HMAT_WRITE_BANDWIDTH:
185 		return " MB/s";
186 	default:
187 		return "";
188 	}
189 }
190 
191 static u32 hmat_normalize(u16 entry, u64 base, u8 type)
192 {
193 	u32 value;
194 
195 	/*
196 	 * Check for invalid and overflow values
197 	 */
198 	if (entry == 0xffff || !entry)
199 		return 0;
200 	else if (base > (UINT_MAX / (entry)))
201 		return 0;
202 
203 	/*
204 	 * Divide by the base unit for version 1, convert latency from
205 	 * picosenonds to nanoseconds if revision 2.
206 	 */
207 	value = entry * base;
208 	if (hmat_revision == 1) {
209 		if (value < 10)
210 			return 0;
211 		value = DIV_ROUND_UP(value, 10);
212 	} else if (hmat_revision == 2) {
213 		switch (type) {
214 		case ACPI_HMAT_ACCESS_LATENCY:
215 		case ACPI_HMAT_READ_LATENCY:
216 		case ACPI_HMAT_WRITE_LATENCY:
217 			value = DIV_ROUND_UP(value, 1000);
218 			break;
219 		default:
220 			break;
221 		}
222 	}
223 	return value;
224 }
225 
226 static void hmat_update_target_access(struct memory_target *target,
227 				      u8 type, u32 value, int access)
228 {
229 	switch (type) {
230 	case ACPI_HMAT_ACCESS_LATENCY:
231 		target->hmem_attrs[access].read_latency = value;
232 		target->hmem_attrs[access].write_latency = value;
233 		break;
234 	case ACPI_HMAT_READ_LATENCY:
235 		target->hmem_attrs[access].read_latency = value;
236 		break;
237 	case ACPI_HMAT_WRITE_LATENCY:
238 		target->hmem_attrs[access].write_latency = value;
239 		break;
240 	case ACPI_HMAT_ACCESS_BANDWIDTH:
241 		target->hmem_attrs[access].read_bandwidth = value;
242 		target->hmem_attrs[access].write_bandwidth = value;
243 		break;
244 	case ACPI_HMAT_READ_BANDWIDTH:
245 		target->hmem_attrs[access].read_bandwidth = value;
246 		break;
247 	case ACPI_HMAT_WRITE_BANDWIDTH:
248 		target->hmem_attrs[access].write_bandwidth = value;
249 		break;
250 	default:
251 		break;
252 	}
253 }
254 
255 static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
256 {
257 	struct memory_locality *loc;
258 
259 	loc = kzalloc(sizeof(*loc), GFP_KERNEL);
260 	if (!loc) {
261 		pr_notice_once("Failed to allocate HMAT locality\n");
262 		return;
263 	}
264 
265 	loc->hmat_loc = hmat_loc;
266 	list_add_tail(&loc->node, &localities);
267 
268 	switch (hmat_loc->data_type) {
269 	case ACPI_HMAT_ACCESS_LATENCY:
270 		localities_types[READ_LATENCY] = loc;
271 		localities_types[WRITE_LATENCY] = loc;
272 		break;
273 	case ACPI_HMAT_READ_LATENCY:
274 		localities_types[READ_LATENCY] = loc;
275 		break;
276 	case ACPI_HMAT_WRITE_LATENCY:
277 		localities_types[WRITE_LATENCY] = loc;
278 		break;
279 	case ACPI_HMAT_ACCESS_BANDWIDTH:
280 		localities_types[READ_BANDWIDTH] = loc;
281 		localities_types[WRITE_BANDWIDTH] = loc;
282 		break;
283 	case ACPI_HMAT_READ_BANDWIDTH:
284 		localities_types[READ_BANDWIDTH] = loc;
285 		break;
286 	case ACPI_HMAT_WRITE_BANDWIDTH:
287 		localities_types[WRITE_BANDWIDTH] = loc;
288 		break;
289 	default:
290 		break;
291 	}
292 }
293 
294 static __init int hmat_parse_locality(union acpi_subtable_headers *header,
295 				      const unsigned long end)
296 {
297 	struct acpi_hmat_locality *hmat_loc = (void *)header;
298 	struct memory_target *target;
299 	unsigned int init, targ, total_size, ipds, tpds;
300 	u32 *inits, *targs, value;
301 	u16 *entries;
302 	u8 type, mem_hier;
303 
304 	if (hmat_loc->header.length < sizeof(*hmat_loc)) {
305 		pr_notice("HMAT: Unexpected locality header length: %u\n",
306 			 hmat_loc->header.length);
307 		return -EINVAL;
308 	}
309 
310 	type = hmat_loc->data_type;
311 	mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
312 	ipds = hmat_loc->number_of_initiator_Pds;
313 	tpds = hmat_loc->number_of_target_Pds;
314 	total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
315 		     sizeof(*inits) * ipds + sizeof(*targs) * tpds;
316 	if (hmat_loc->header.length < total_size) {
317 		pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
318 			 hmat_loc->header.length, total_size);
319 		return -EINVAL;
320 	}
321 
322 	pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
323 		hmat_loc->flags, hmat_data_type(type), ipds, tpds,
324 		hmat_loc->entry_base_unit);
325 
326 	inits = (u32 *)(hmat_loc + 1);
327 	targs = inits + ipds;
328 	entries = (u16 *)(targs + tpds);
329 	for (init = 0; init < ipds; init++) {
330 		alloc_memory_initiator(inits[init]);
331 		for (targ = 0; targ < tpds; targ++) {
332 			value = hmat_normalize(entries[init * tpds + targ],
333 					       hmat_loc->entry_base_unit,
334 					       type);
335 			pr_info("  Initiator-Target[%u-%u]:%u%s\n",
336 				inits[init], targs[targ], value,
337 				hmat_data_type_suffix(type));
338 
339 			if (mem_hier == ACPI_HMAT_MEMORY) {
340 				target = find_mem_target(targs[targ]);
341 				if (target && target->processor_pxm == inits[init]) {
342 					hmat_update_target_access(target, type, value, 0);
343 					/* If the node has a CPU, update access 1 */
344 					if (node_state(pxm_to_node(inits[init]), N_CPU))
345 						hmat_update_target_access(target, type, value, 1);
346 				}
347 			}
348 		}
349 	}
350 
351 	if (mem_hier == ACPI_HMAT_MEMORY)
352 		hmat_add_locality(hmat_loc);
353 
354 	return 0;
355 }
356 
357 static __init int hmat_parse_cache(union acpi_subtable_headers *header,
358 				   const unsigned long end)
359 {
360 	struct acpi_hmat_cache *cache = (void *)header;
361 	struct memory_target *target;
362 	struct target_cache *tcache;
363 	u32 attrs;
364 
365 	if (cache->header.length < sizeof(*cache)) {
366 		pr_notice("HMAT: Unexpected cache header length: %u\n",
367 			 cache->header.length);
368 		return -EINVAL;
369 	}
370 
371 	attrs = cache->cache_attributes;
372 	pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
373 		cache->memory_PD, cache->cache_size, attrs,
374 		cache->number_of_SMBIOShandles);
375 
376 	target = find_mem_target(cache->memory_PD);
377 	if (!target)
378 		return 0;
379 
380 	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
381 	if (!tcache) {
382 		pr_notice_once("Failed to allocate HMAT cache info\n");
383 		return 0;
384 	}
385 
386 	tcache->cache_attrs.size = cache->cache_size;
387 	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
388 	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
389 
390 	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
391 	case ACPI_HMAT_CA_DIRECT_MAPPED:
392 		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
393 		break;
394 	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
395 		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
396 		break;
397 	case ACPI_HMAT_CA_NONE:
398 	default:
399 		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
400 		break;
401 	}
402 
403 	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
404 	case ACPI_HMAT_CP_WB:
405 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
406 		break;
407 	case ACPI_HMAT_CP_WT:
408 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
409 		break;
410 	case ACPI_HMAT_CP_NONE:
411 	default:
412 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
413 		break;
414 	}
415 	list_add_tail(&tcache->node, &target->caches);
416 
417 	return 0;
418 }
419 
420 static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
421 					      const unsigned long end)
422 {
423 	struct acpi_hmat_proximity_domain *p = (void *)header;
424 	struct memory_target *target = NULL;
425 
426 	if (p->header.length != sizeof(*p)) {
427 		pr_notice("HMAT: Unexpected address range header length: %u\n",
428 			 p->header.length);
429 		return -EINVAL;
430 	}
431 
432 	if (hmat_revision == 1)
433 		pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
434 			p->reserved3, p->reserved4, p->flags, p->processor_PD,
435 			p->memory_PD);
436 	else
437 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
438 			p->flags, p->processor_PD, p->memory_PD);
439 
440 	if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
441 	    hmat_revision > 1) {
442 		target = find_mem_target(p->memory_PD);
443 		if (!target) {
444 			pr_debug("HMAT: Memory Domain missing from SRAT\n");
445 			return -EINVAL;
446 		}
447 	}
448 	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
449 		int p_node = pxm_to_node(p->processor_PD);
450 
451 		if (p_node == NUMA_NO_NODE) {
452 			pr_debug("HMAT: Invalid Processor Domain\n");
453 			return -EINVAL;
454 		}
455 		target->processor_pxm = p->processor_PD;
456 	}
457 
458 	return 0;
459 }
460 
461 static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
462 				      const unsigned long end)
463 {
464 	struct acpi_hmat_structure *hdr = (void *)header;
465 
466 	if (!hdr)
467 		return -EINVAL;
468 
469 	switch (hdr->type) {
470 	case ACPI_HMAT_TYPE_PROXIMITY:
471 		return hmat_parse_proximity_domain(header, end);
472 	case ACPI_HMAT_TYPE_LOCALITY:
473 		return hmat_parse_locality(header, end);
474 	case ACPI_HMAT_TYPE_CACHE:
475 		return hmat_parse_cache(header, end);
476 	default:
477 		return -EINVAL;
478 	}
479 }
480 
481 static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
482 					  const unsigned long end)
483 {
484 	struct acpi_srat_mem_affinity *ma = (void *)header;
485 
486 	if (!ma)
487 		return -EINVAL;
488 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
489 		return 0;
490 	alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
491 	return 0;
492 }
493 
494 static u32 hmat_initiator_perf(struct memory_target *target,
495 			       struct memory_initiator *initiator,
496 			       struct acpi_hmat_locality *hmat_loc)
497 {
498 	unsigned int ipds, tpds, i, idx = 0, tdx = 0;
499 	u32 *inits, *targs;
500 	u16 *entries;
501 
502 	ipds = hmat_loc->number_of_initiator_Pds;
503 	tpds = hmat_loc->number_of_target_Pds;
504 	inits = (u32 *)(hmat_loc + 1);
505 	targs = inits + ipds;
506 	entries = (u16 *)(targs + tpds);
507 
508 	for (i = 0; i < ipds; i++) {
509 		if (inits[i] == initiator->processor_pxm) {
510 			idx = i;
511 			break;
512 		}
513 	}
514 
515 	if (i == ipds)
516 		return 0;
517 
518 	for (i = 0; i < tpds; i++) {
519 		if (targs[i] == target->memory_pxm) {
520 			tdx = i;
521 			break;
522 		}
523 	}
524 	if (i == tpds)
525 		return 0;
526 
527 	return hmat_normalize(entries[idx * tpds + tdx],
528 			      hmat_loc->entry_base_unit,
529 			      hmat_loc->data_type);
530 }
531 
532 static bool hmat_update_best(u8 type, u32 value, u32 *best)
533 {
534 	bool updated = false;
535 
536 	if (!value)
537 		return false;
538 
539 	switch (type) {
540 	case ACPI_HMAT_ACCESS_LATENCY:
541 	case ACPI_HMAT_READ_LATENCY:
542 	case ACPI_HMAT_WRITE_LATENCY:
543 		if (!*best || *best > value) {
544 			*best = value;
545 			updated = true;
546 		}
547 		break;
548 	case ACPI_HMAT_ACCESS_BANDWIDTH:
549 	case ACPI_HMAT_READ_BANDWIDTH:
550 	case ACPI_HMAT_WRITE_BANDWIDTH:
551 		if (!*best || *best < value) {
552 			*best = value;
553 			updated = true;
554 		}
555 		break;
556 	}
557 
558 	return updated;
559 }
560 
561 static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
562 {
563 	struct memory_initiator *ia;
564 	struct memory_initiator *ib;
565 	unsigned long *p_nodes = priv;
566 
567 	ia = list_entry(a, struct memory_initiator, node);
568 	ib = list_entry(b, struct memory_initiator, node);
569 
570 	set_bit(ia->processor_pxm, p_nodes);
571 	set_bit(ib->processor_pxm, p_nodes);
572 
573 	return ia->processor_pxm - ib->processor_pxm;
574 }
575 
576 static void hmat_register_target_initiators(struct memory_target *target)
577 {
578 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
579 	struct memory_initiator *initiator;
580 	unsigned int mem_nid, cpu_nid;
581 	struct memory_locality *loc = NULL;
582 	u32 best = 0;
583 	bool access0done = false;
584 	int i;
585 
586 	mem_nid = pxm_to_node(target->memory_pxm);
587 	/*
588 	 * If the Address Range Structure provides a local processor pxm, link
589 	 * only that one. Otherwise, find the best performance attributes and
590 	 * register all initiators that match.
591 	 */
592 	if (target->processor_pxm != PXM_INVAL) {
593 		cpu_nid = pxm_to_node(target->processor_pxm);
594 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
595 		access0done = true;
596 		if (node_state(cpu_nid, N_CPU)) {
597 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
598 			return;
599 		}
600 	}
601 
602 	if (list_empty(&localities))
603 		return;
604 
605 	/*
606 	 * We need the initiator list sorted so we can use bitmap_clear for
607 	 * previously set initiators when we find a better memory accessor.
608 	 * We'll also use the sorting to prime the candidate nodes with known
609 	 * initiators.
610 	 */
611 	bitmap_zero(p_nodes, MAX_NUMNODES);
612 	list_sort(p_nodes, &initiators, initiator_cmp);
613 	if (!access0done) {
614 		for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
615 			loc = localities_types[i];
616 			if (!loc)
617 				continue;
618 
619 			best = 0;
620 			list_for_each_entry(initiator, &initiators, node) {
621 				u32 value;
622 
623 				if (!test_bit(initiator->processor_pxm, p_nodes))
624 					continue;
625 
626 				value = hmat_initiator_perf(target, initiator,
627 							    loc->hmat_loc);
628 				if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
629 					bitmap_clear(p_nodes, 0, initiator->processor_pxm);
630 				if (value != best)
631 					clear_bit(initiator->processor_pxm, p_nodes);
632 			}
633 			if (best)
634 				hmat_update_target_access(target, loc->hmat_loc->data_type,
635 							  best, 0);
636 		}
637 
638 		for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
639 			cpu_nid = pxm_to_node(i);
640 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
641 		}
642 	}
643 
644 	/* Access 1 ignores Generic Initiators */
645 	bitmap_zero(p_nodes, MAX_NUMNODES);
646 	list_sort(p_nodes, &initiators, initiator_cmp);
647 	best = 0;
648 	for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
649 		loc = localities_types[i];
650 		if (!loc)
651 			continue;
652 
653 		best = 0;
654 		list_for_each_entry(initiator, &initiators, node) {
655 			u32 value;
656 
657 			if (!initiator->has_cpu) {
658 				clear_bit(initiator->processor_pxm, p_nodes);
659 				continue;
660 			}
661 			if (!test_bit(initiator->processor_pxm, p_nodes))
662 				continue;
663 
664 			value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
665 			if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
666 				bitmap_clear(p_nodes, 0, initiator->processor_pxm);
667 			if (value != best)
668 				clear_bit(initiator->processor_pxm, p_nodes);
669 		}
670 		if (best)
671 			hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
672 	}
673 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
674 		cpu_nid = pxm_to_node(i);
675 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
676 	}
677 }
678 
679 static void hmat_register_target_cache(struct memory_target *target)
680 {
681 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
682 	struct target_cache *tcache;
683 
684 	list_for_each_entry(tcache, &target->caches, node)
685 		node_add_cache(mem_nid, &tcache->cache_attrs);
686 }
687 
688 static void hmat_register_target_perf(struct memory_target *target, int access)
689 {
690 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
691 	node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
692 }
693 
694 static void hmat_register_target_devices(struct memory_target *target)
695 {
696 	struct resource *res;
697 
698 	/*
699 	 * Do not bother creating devices if no driver is available to
700 	 * consume them.
701 	 */
702 	if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
703 		return;
704 
705 	for (res = target->memregions.child; res; res = res->sibling) {
706 		int target_nid = pxm_to_node(target->memory_pxm);
707 
708 		hmem_register_device(target_nid, res);
709 	}
710 }
711 
712 static void hmat_register_target(struct memory_target *target)
713 {
714 	int nid = pxm_to_node(target->memory_pxm);
715 
716 	/*
717 	 * Devices may belong to either an offline or online
718 	 * node, so unconditionally add them.
719 	 */
720 	hmat_register_target_devices(target);
721 
722 	/*
723 	 * Skip offline nodes. This can happen when memory
724 	 * marked EFI_MEMORY_SP, "specific purpose", is applied
725 	 * to all the memory in a promixity domain leading to
726 	 * the node being marked offline / unplugged, or if
727 	 * memory-only "hotplug" node is offline.
728 	 */
729 	if (nid == NUMA_NO_NODE || !node_online(nid))
730 		return;
731 
732 	mutex_lock(&target_lock);
733 	if (!target->registered) {
734 		hmat_register_target_initiators(target);
735 		hmat_register_target_cache(target);
736 		hmat_register_target_perf(target, 0);
737 		hmat_register_target_perf(target, 1);
738 		target->registered = true;
739 	}
740 	mutex_unlock(&target_lock);
741 }
742 
743 static void hmat_register_targets(void)
744 {
745 	struct memory_target *target;
746 
747 	list_for_each_entry(target, &targets, node)
748 		hmat_register_target(target);
749 }
750 
751 static int hmat_callback(struct notifier_block *self,
752 			 unsigned long action, void *arg)
753 {
754 	struct memory_target *target;
755 	struct memory_notify *mnb = arg;
756 	int pxm, nid = mnb->status_change_nid;
757 
758 	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
759 		return NOTIFY_OK;
760 
761 	pxm = node_to_pxm(nid);
762 	target = find_mem_target(pxm);
763 	if (!target)
764 		return NOTIFY_OK;
765 
766 	hmat_register_target(target);
767 	return NOTIFY_OK;
768 }
769 
770 static struct notifier_block hmat_callback_nb = {
771 	.notifier_call = hmat_callback,
772 	.priority = 2,
773 };
774 
775 static __init void hmat_free_structures(void)
776 {
777 	struct memory_target *target, *tnext;
778 	struct memory_locality *loc, *lnext;
779 	struct memory_initiator *initiator, *inext;
780 	struct target_cache *tcache, *cnext;
781 
782 	list_for_each_entry_safe(target, tnext, &targets, node) {
783 		struct resource *res, *res_next;
784 
785 		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
786 			list_del(&tcache->node);
787 			kfree(tcache);
788 		}
789 
790 		list_del(&target->node);
791 		res = target->memregions.child;
792 		while (res) {
793 			res_next = res->sibling;
794 			__release_region(&target->memregions, res->start,
795 					resource_size(res));
796 			res = res_next;
797 		}
798 		kfree(target);
799 	}
800 
801 	list_for_each_entry_safe(initiator, inext, &initiators, node) {
802 		list_del(&initiator->node);
803 		kfree(initiator);
804 	}
805 
806 	list_for_each_entry_safe(loc, lnext, &localities, node) {
807 		list_del(&loc->node);
808 		kfree(loc);
809 	}
810 }
811 
812 static __init int hmat_init(void)
813 {
814 	struct acpi_table_header *tbl;
815 	enum acpi_hmat_type i;
816 	acpi_status status;
817 
818 	if (srat_disabled() || hmat_disable)
819 		return 0;
820 
821 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
822 	if (ACPI_FAILURE(status))
823 		return 0;
824 
825 	if (acpi_table_parse_entries(ACPI_SIG_SRAT,
826 				sizeof(struct acpi_table_srat),
827 				ACPI_SRAT_TYPE_MEMORY_AFFINITY,
828 				srat_parse_mem_affinity, 0) < 0)
829 		goto out_put;
830 	acpi_put_table(tbl);
831 
832 	status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
833 	if (ACPI_FAILURE(status))
834 		goto out_put;
835 
836 	hmat_revision = tbl->revision;
837 	switch (hmat_revision) {
838 	case 1:
839 	case 2:
840 		break;
841 	default:
842 		pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
843 		goto out_put;
844 	}
845 
846 	for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
847 		if (acpi_table_parse_entries(ACPI_SIG_HMAT,
848 					     sizeof(struct acpi_table_hmat), i,
849 					     hmat_parse_subtable, 0) < 0) {
850 			pr_notice("Ignoring HMAT: Invalid table");
851 			goto out_put;
852 		}
853 	}
854 	hmat_register_targets();
855 
856 	/* Keep the table and structures if the notifier may use them */
857 	if (!register_hotmemory_notifier(&hmat_callback_nb))
858 		return 0;
859 out_put:
860 	hmat_free_structures();
861 	acpi_put_table(tbl);
862 	return 0;
863 }
864 device_initcall(hmat_init);
865