xref: /linux/tools/testing/cxl/test/cxl.c (revision 303d32843b831ba86c28aea188db95da65d88f31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/memory_hotplug.h>
6 #include <linux/genalloc.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/acpi.h>
10 #include <linux/pci.h>
11 #include <linux/mm.h>
12 #include <cxlmem.h>
13 
14 #include "../watermark.h"
15 #include "mock.h"
16 
17 static int interleave_arithmetic;
18 static bool extended_linear_cache;
19 static bool fail_autoassemble;
20 
21 #define FAKE_QTG_ID	42
22 
23 #define NR_CXL_HOST_BRIDGES 2
24 #define NR_CXL_SINGLE_HOST 1
25 #define NR_CXL_RCH 1
26 #define NR_CXL_ROOT_PORTS 2
27 #define NR_CXL_SWITCH_PORTS 2
28 #define NR_CXL_PORT_DECODERS 8
29 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
30 
31 #define MOCK_AUTO_REGION_SIZE_DEFAULT SZ_512M
32 static int mock_auto_region_size = MOCK_AUTO_REGION_SIZE_DEFAULT;
33 
34 static struct platform_device *cxl_acpi;
35 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
36 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
37 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
38 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
39 #define NR_MEM_MULTI \
40 	(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
41 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
42 
43 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
44 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
45 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
46 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
47 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
48 
49 struct platform_device *cxl_mem[NR_MEM_MULTI];
50 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
51 
52 static struct platform_device *cxl_rch[NR_CXL_RCH];
53 static struct platform_device *cxl_rcd[NR_CXL_RCH];
54 
55 /*
56  * Decoder registry
57  *
58  * Record decoder programming so that the topology can be reconstructed
59  * after cxl_acpi unbind/bind. This allows a user-created region config
60  * to be replayed as if firmware had provided the region at enumeration
61  * time.
62  *
63  * Entries are keyed by a stable port identity (port->uport_dev) combined
64  * with the decoder id. Decoder state is saved at initialization and
65  * updated on commit and reset.
66  *
67  * On re-enumeration mock_init_hdm_decoder() consults this registry to
68  * restore enabled decoders. Disabled decoders are reinitialized to a
69  * clean default state rather than replaying stale programming.
70  */
71 static DEFINE_XARRAY(decoder_registry);
72 
73 /*
74  * When set, decoder reset will not update the registry. This allows
75  * region destroy operations to reset live decoders without erasing
76  * the saved programming needed for replay after re-enumeration.
77  */
78 static bool decoder_reset_preserve_registry;
79 
80 static inline bool is_multi_bridge(struct device *dev)
81 {
82 	int i;
83 
84 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
85 		if (&cxl_host_bridge[i]->dev == dev)
86 			return true;
87 	return false;
88 }
89 
90 static inline bool is_single_bridge(struct device *dev)
91 {
92 	int i;
93 
94 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
95 		if (&cxl_hb_single[i]->dev == dev)
96 			return true;
97 	return false;
98 }
99 
100 static struct acpi_device acpi0017_mock;
101 static struct acpi_device host_bridge[NR_BRIDGES] = {
102 	[0] = {
103 		.handle = &host_bridge[0],
104 		.pnp.unique_id = "0",
105 	},
106 	[1] = {
107 		.handle = &host_bridge[1],
108 		.pnp.unique_id = "1",
109 	},
110 	[2] = {
111 		.handle = &host_bridge[2],
112 		.pnp.unique_id = "2",
113 	},
114 	[3] = {
115 		.handle = &host_bridge[3],
116 		.pnp.unique_id = "3",
117 	},
118 };
119 
120 static bool is_mock_dev(struct device *dev)
121 {
122 	int i;
123 
124 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
125 		if (dev == &cxl_mem[i]->dev)
126 			return true;
127 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
128 		if (dev == &cxl_mem_single[i]->dev)
129 			return true;
130 	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
131 		if (dev == &cxl_rcd[i]->dev)
132 			return true;
133 	if (dev == &cxl_acpi->dev)
134 		return true;
135 	return false;
136 }
137 
138 static bool is_mock_adev(struct acpi_device *adev)
139 {
140 	int i;
141 
142 	if (adev == &acpi0017_mock)
143 		return true;
144 
145 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
146 		if (adev == &host_bridge[i])
147 			return true;
148 
149 	return false;
150 }
151 
152 static struct {
153 	struct acpi_table_cedt cedt;
154 	struct acpi_cedt_chbs chbs[NR_BRIDGES];
155 	struct {
156 		struct acpi_cedt_cfmws cfmws;
157 		u32 target[1];
158 	} cfmws0;
159 	struct {
160 		struct acpi_cedt_cfmws cfmws;
161 		u32 target[2];
162 	} cfmws1;
163 	struct {
164 		struct acpi_cedt_cfmws cfmws;
165 		u32 target[1];
166 	} cfmws2;
167 	struct {
168 		struct acpi_cedt_cfmws cfmws;
169 		u32 target[2];
170 	} cfmws3;
171 	struct {
172 		struct acpi_cedt_cfmws cfmws;
173 		u32 target[1];
174 	} cfmws4;
175 	struct {
176 		struct acpi_cedt_cfmws cfmws;
177 		u32 target[1];
178 	} cfmws5;
179 	struct {
180 		struct acpi_cedt_cfmws cfmws;
181 		u32 target[1];
182 	} cfmws6;
183 	struct {
184 		struct acpi_cedt_cfmws cfmws;
185 		u32 target[2];
186 	} cfmws7;
187 	struct {
188 		struct acpi_cedt_cfmws cfmws;
189 		u32 target[3];
190 	} cfmws8;
191 	struct {
192 		struct acpi_cedt_cxims cxims;
193 		u64 xormap_list[2];
194 	} cxims0;
195 } __packed mock_cedt = {
196 	.cedt = {
197 		.header = {
198 			.signature = "CEDT",
199 			.length = sizeof(mock_cedt),
200 			.revision = 1,
201 		},
202 	},
203 	.chbs[0] = {
204 		.header = {
205 			.type = ACPI_CEDT_TYPE_CHBS,
206 			.length = sizeof(mock_cedt.chbs[0]),
207 		},
208 		.uid = 0,
209 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
210 	},
211 	.chbs[1] = {
212 		.header = {
213 			.type = ACPI_CEDT_TYPE_CHBS,
214 			.length = sizeof(mock_cedt.chbs[0]),
215 		},
216 		.uid = 1,
217 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
218 	},
219 	.chbs[2] = {
220 		.header = {
221 			.type = ACPI_CEDT_TYPE_CHBS,
222 			.length = sizeof(mock_cedt.chbs[0]),
223 		},
224 		.uid = 2,
225 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
226 	},
227 	.chbs[3] = {
228 		.header = {
229 			.type = ACPI_CEDT_TYPE_CHBS,
230 			.length = sizeof(mock_cedt.chbs[0]),
231 		},
232 		.uid = 3,
233 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
234 	},
235 	.cfmws0 = {
236 		.cfmws = {
237 			.header = {
238 				.type = ACPI_CEDT_TYPE_CFMWS,
239 				.length = sizeof(mock_cedt.cfmws0),
240 			},
241 			.interleave_ways = 0,
242 			.granularity = 4,
243 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
244 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
245 			.qtg_id = FAKE_QTG_ID,
246 			.window_size = SZ_256M * 4UL,
247 		},
248 		.target = { 0 },
249 	},
250 	.cfmws1 = {
251 		.cfmws = {
252 			.header = {
253 				.type = ACPI_CEDT_TYPE_CFMWS,
254 				.length = sizeof(mock_cedt.cfmws1),
255 			},
256 			.interleave_ways = 1,
257 			.granularity = 4,
258 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
259 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
260 			.qtg_id = FAKE_QTG_ID,
261 			.window_size = SZ_256M * 8UL,
262 		},
263 		.target = { 0, 1, },
264 	},
265 	.cfmws2 = {
266 		.cfmws = {
267 			.header = {
268 				.type = ACPI_CEDT_TYPE_CFMWS,
269 				.length = sizeof(mock_cedt.cfmws2),
270 			},
271 			.interleave_ways = 0,
272 			.granularity = 4,
273 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
274 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
275 			.qtg_id = FAKE_QTG_ID,
276 			.window_size = SZ_256M * 4UL,
277 		},
278 		.target = { 0 },
279 	},
280 	.cfmws3 = {
281 		.cfmws = {
282 			.header = {
283 				.type = ACPI_CEDT_TYPE_CFMWS,
284 				.length = sizeof(mock_cedt.cfmws3),
285 			},
286 			.interleave_ways = 1,
287 			.granularity = 4,
288 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
289 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
290 			.qtg_id = FAKE_QTG_ID,
291 			.window_size = SZ_256M * 8UL,
292 		},
293 		.target = { 0, 1, },
294 	},
295 	.cfmws4 = {
296 		.cfmws = {
297 			.header = {
298 				.type = ACPI_CEDT_TYPE_CFMWS,
299 				.length = sizeof(mock_cedt.cfmws4),
300 			},
301 			.interleave_ways = 0,
302 			.granularity = 4,
303 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
304 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
305 			.qtg_id = FAKE_QTG_ID,
306 			.window_size = SZ_256M * 4UL,
307 		},
308 		.target = { 2 },
309 	},
310 	.cfmws5 = {
311 		.cfmws = {
312 			.header = {
313 				.type = ACPI_CEDT_TYPE_CFMWS,
314 				.length = sizeof(mock_cedt.cfmws5),
315 			},
316 			.interleave_ways = 0,
317 			.granularity = 4,
318 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
319 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
320 			.qtg_id = FAKE_QTG_ID,
321 			.window_size = SZ_256M,
322 		},
323 		.target = { 3 },
324 	},
325 	/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
326 	.cfmws6 = {
327 		.cfmws = {
328 			.header = {
329 				.type = ACPI_CEDT_TYPE_CFMWS,
330 				.length = sizeof(mock_cedt.cfmws6),
331 			},
332 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
333 			.interleave_ways = 0,
334 			.granularity = 4,
335 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
336 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
337 			.qtg_id = FAKE_QTG_ID,
338 			.window_size = SZ_256M * 8UL,
339 		},
340 		.target = { 0, },
341 	},
342 	.cfmws7 = {
343 		.cfmws = {
344 			.header = {
345 				.type = ACPI_CEDT_TYPE_CFMWS,
346 				.length = sizeof(mock_cedt.cfmws7),
347 			},
348 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
349 			.interleave_ways = 1,
350 			.granularity = 0,
351 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
352 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
353 			.qtg_id = FAKE_QTG_ID,
354 			.window_size = SZ_256M * 8UL,
355 		},
356 		.target = { 0, 1, },
357 	},
358 	.cfmws8 = {
359 		.cfmws = {
360 			.header = {
361 				.type = ACPI_CEDT_TYPE_CFMWS,
362 				.length = sizeof(mock_cedt.cfmws8),
363 			},
364 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
365 			.interleave_ways = 8,
366 			.granularity = 1,
367 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM |
368 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
369 			.qtg_id = FAKE_QTG_ID,
370 			.window_size = SZ_512M * 6UL,
371 		},
372 		.target = { 0, 1, 2, },
373 	},
374 	.cxims0 = {
375 		.cxims = {
376 			.header = {
377 				.type = ACPI_CEDT_TYPE_CXIMS,
378 				.length = sizeof(mock_cedt.cxims0),
379 			},
380 			.hbig = 0,
381 			.nr_xormaps = 2,
382 		},
383 		.xormap_list = { 0x404100, 0x808200, },
384 	},
385 };
386 
387 struct acpi_cedt_cfmws *mock_cfmws[] = {
388 	[0] = &mock_cedt.cfmws0.cfmws,
389 	[1] = &mock_cedt.cfmws1.cfmws,
390 	[2] = &mock_cedt.cfmws2.cfmws,
391 	[3] = &mock_cedt.cfmws3.cfmws,
392 	[4] = &mock_cedt.cfmws4.cfmws,
393 	[5] = &mock_cedt.cfmws5.cfmws,
394 	/* Modulo Math above, XOR Math below */
395 	[6] = &mock_cedt.cfmws6.cfmws,
396 	[7] = &mock_cedt.cfmws7.cfmws,
397 	[8] = &mock_cedt.cfmws8.cfmws,
398 };
399 
400 static int cfmws_start;
401 static int cfmws_end;
402 #define CFMWS_MOD_ARRAY_START 0
403 #define CFMWS_MOD_ARRAY_END   5
404 #define CFMWS_XOR_ARRAY_START 6
405 #define CFMWS_XOR_ARRAY_END   8
406 
407 struct acpi_cedt_cxims *mock_cxims[1] = {
408 	[0] = &mock_cedt.cxims0.cxims,
409 };
410 
411 struct cxl_mock_res {
412 	struct list_head list;
413 	struct range range;
414 };
415 
416 static LIST_HEAD(mock_res);
417 static DEFINE_MUTEX(mock_res_lock);
418 static struct gen_pool *cxl_mock_pool;
419 
420 static void depopulate_all_mock_resources(void)
421 {
422 	struct cxl_mock_res *res, *_res;
423 
424 	mutex_lock(&mock_res_lock);
425 	list_for_each_entry_safe(res, _res, &mock_res, list) {
426 		gen_pool_free(cxl_mock_pool, res->range.start,
427 			      range_len(&res->range));
428 		list_del(&res->list);
429 		kfree(res);
430 	}
431 	mutex_unlock(&mock_res_lock);
432 }
433 
434 static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
435 {
436 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
437 	struct genpool_data_align data = {
438 		.align = align,
439 	};
440 	unsigned long phys;
441 
442 	INIT_LIST_HEAD(&res->list);
443 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
444 				   gen_pool_first_fit_align, &data);
445 	if (!phys)
446 		return NULL;
447 
448 	res->range = (struct range) {
449 		.start = phys,
450 		.end = phys + size - 1,
451 	};
452 	mutex_lock(&mock_res_lock);
453 	list_add(&res->list, &mock_res);
454 	mutex_unlock(&mock_res_lock);
455 
456 	return res;
457 }
458 
459 /* Only update CFMWS0 as this is used by the auto region. */
460 static void cfmws_elc_update(struct acpi_cedt_cfmws *window, int index)
461 {
462 	if (!extended_linear_cache)
463 		return;
464 
465 	if (index != 0)
466 		return;
467 
468 	/*
469 	 * The window size should be 2x of the CXL region size where half is
470 	 * DRAM and half is CXL
471 	 */
472 	window->window_size = mock_auto_region_size * 2;
473 }
474 
475 static int populate_cedt(void)
476 {
477 	struct cxl_mock_res *res;
478 	int i;
479 
480 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
481 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
482 		resource_size_t size;
483 
484 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
485 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
486 		else
487 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
488 
489 		res = alloc_mock_res(size, size);
490 		if (!res)
491 			return -ENOMEM;
492 		chbs->base = res->range.start;
493 		chbs->length = size;
494 	}
495 
496 	for (i = cfmws_start; i <= cfmws_end; i++) {
497 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
498 
499 		cfmws_elc_update(window, i);
500 		res = alloc_mock_res(window->window_size, SZ_256M);
501 		if (!res)
502 			return -ENOMEM;
503 		window->base_hpa = res->range.start;
504 	}
505 
506 	return 0;
507 }
508 
509 static bool is_mock_port(struct device *dev);
510 
511 /*
512  * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
513  * and 'struct cxl_chbs_context' share the property that the first
514  * struct member is a cxl_test device being probed by the cxl_acpi
515  * driver.
516  */
517 struct cxl_cedt_context {
518 	struct device *dev;
519 };
520 
521 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
522 				      acpi_tbl_entry_handler_arg handler_arg,
523 				      void *arg)
524 {
525 	struct cxl_cedt_context *ctx = arg;
526 	struct device *dev = ctx->dev;
527 	union acpi_subtable_headers *h;
528 	unsigned long end;
529 	int i;
530 
531 	if (!is_mock_port(dev) && !is_mock_dev(dev))
532 		return acpi_table_parse_cedt(id, handler_arg, arg);
533 
534 	if (id == ACPI_CEDT_TYPE_CHBS)
535 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
536 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
537 			end = (unsigned long)&mock_cedt.chbs[i + 1];
538 			handler_arg(h, arg, end);
539 		}
540 
541 	if (id == ACPI_CEDT_TYPE_CFMWS)
542 		for (i = cfmws_start; i <= cfmws_end; i++) {
543 			h = (union acpi_subtable_headers *) mock_cfmws[i];
544 			end = (unsigned long) h + mock_cfmws[i]->header.length;
545 			handler_arg(h, arg, end);
546 		}
547 
548 	if (id == ACPI_CEDT_TYPE_CXIMS)
549 		for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
550 			h = (union acpi_subtable_headers *)mock_cxims[i];
551 			end = (unsigned long)h + mock_cxims[i]->header.length;
552 			handler_arg(h, arg, end);
553 		}
554 
555 	return 0;
556 }
557 
558 static bool is_mock_bridge(struct device *dev)
559 {
560 	int i;
561 
562 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
563 		if (dev == &cxl_host_bridge[i]->dev)
564 			return true;
565 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
566 		if (dev == &cxl_hb_single[i]->dev)
567 			return true;
568 	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
569 		if (dev == &cxl_rch[i]->dev)
570 			return true;
571 
572 	return false;
573 }
574 
575 static bool is_mock_port(struct device *dev)
576 {
577 	int i;
578 
579 	if (is_mock_bridge(dev))
580 		return true;
581 
582 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
583 		if (dev == &cxl_root_port[i]->dev)
584 			return true;
585 
586 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
587 		if (dev == &cxl_switch_uport[i]->dev)
588 			return true;
589 
590 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
591 		if (dev == &cxl_switch_dport[i]->dev)
592 			return true;
593 
594 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
595 		if (dev == &cxl_root_single[i]->dev)
596 			return true;
597 
598 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
599 		if (dev == &cxl_swu_single[i]->dev)
600 			return true;
601 
602 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
603 		if (dev == &cxl_swd_single[i]->dev)
604 			return true;
605 
606 	if (is_cxl_memdev(dev))
607 		return is_mock_dev(dev->parent);
608 
609 	return false;
610 }
611 
612 static int host_bridge_index(struct acpi_device *adev)
613 {
614 	return adev - host_bridge;
615 }
616 
617 static struct acpi_device *find_host_bridge(acpi_handle handle)
618 {
619 	int i;
620 
621 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
622 		if (handle == host_bridge[i].handle)
623 			return &host_bridge[i];
624 	return NULL;
625 }
626 
627 static acpi_status
628 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
629 			   struct acpi_object_list *arguments,
630 			   unsigned long long *data)
631 {
632 	struct acpi_device *adev = find_host_bridge(handle);
633 
634 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
635 		return acpi_evaluate_integer(handle, pathname, arguments, data);
636 
637 	*data = host_bridge_index(adev);
638 	return AE_OK;
639 }
640 
641 static int
642 mock_hmat_get_extended_linear_cache_size(struct resource *backing_res,
643 					 int nid, resource_size_t *cache_size)
644 {
645 	struct acpi_cedt_cfmws *window = mock_cfmws[0];
646 	struct resource cfmws0_res =
647 		DEFINE_RES_MEM(window->base_hpa, window->window_size);
648 
649 	if (!extended_linear_cache ||
650 	    !resource_contains(&cfmws0_res, backing_res)) {
651 		return hmat_get_extended_linear_cache_size(backing_res,
652 							   nid, cache_size);
653 	}
654 
655 	*cache_size = mock_auto_region_size;
656 
657 	return 0;
658 }
659 
660 static struct pci_bus mock_pci_bus[NR_BRIDGES];
661 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
662 	[0] = {
663 		.bus = &mock_pci_bus[0],
664 	},
665 	[1] = {
666 		.bus = &mock_pci_bus[1],
667 	},
668 	[2] = {
669 		.bus = &mock_pci_bus[2],
670 	},
671 	[3] = {
672 		.bus = &mock_pci_bus[3],
673 	},
674 
675 };
676 
677 static bool is_mock_bus(struct pci_bus *bus)
678 {
679 	int i;
680 
681 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
682 		if (bus == &mock_pci_bus[i])
683 			return true;
684 	return false;
685 }
686 
687 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
688 {
689 	struct acpi_device *adev = find_host_bridge(handle);
690 
691 	if (!adev)
692 		return acpi_pci_find_root(handle);
693 	return &mock_pci_root[host_bridge_index(adev)];
694 }
695 
696 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
697 					  struct cxl_endpoint_dvsec_info *info)
698 {
699 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
700 	struct device *dev = &port->dev;
701 
702 	if (!cxlhdm)
703 		return ERR_PTR(-ENOMEM);
704 
705 	cxlhdm->port = port;
706 	cxlhdm->interleave_mask = ~0U;
707 	cxlhdm->iw_cap_mask = ~0UL;
708 	dev_set_drvdata(dev, cxlhdm);
709 	return cxlhdm;
710 }
711 
712 struct target_map_ctx {
713 	u32 *target_map;
714 	int index;
715 	int target_count;
716 };
717 
718 static int map_targets(struct device *dev, void *data)
719 {
720 	struct platform_device *pdev = to_platform_device(dev);
721 	struct target_map_ctx *ctx = data;
722 
723 	ctx->target_map[ctx->index++] = pdev->id;
724 
725 	if (ctx->index > ctx->target_count) {
726 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
727 		return -ENXIO;
728 	}
729 
730 	return 0;
731 }
732 
733 /*
734  * Build a stable registry key from the decoder's upstream port identity
735  * and decoder id.
736  *
737  * Decoder objects and cxl_port objects are reallocated on each enumeration,
738  * so their addresses cannot be used directly as replay keys. However,
739  * port->uport_dev is stable for a given topology across cxl_acpi unbind/bind
740  * in cxl_test, so use that as the port identity and pack the local decoder
741  * id into the low bits.
742  *
743  * The key is formed as:
744  *     ((unsigned long)port->uport_dev << 4) | cxld->id
745  *
746  * The low bits hold the decoder id (which must fit in 4 bits) while
747  * the remaining bits identify the upstream port. This key is only used
748  * within cxl_test to locate saved decoder state during replay.
749  */
750 static unsigned long cxld_registry_index(struct cxl_decoder *cxld)
751 {
752 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
753 
754 	dev_WARN_ONCE(&port->dev, cxld->id >= 16,
755 		      "decoder id:%d out of range\n", cxld->id);
756 	return (((unsigned long)port->uport_dev) << 4) | cxld->id;
757 }
758 
759 struct cxl_test_decoder {
760 	union {
761 		struct cxl_switch_decoder cxlsd;
762 		struct cxl_endpoint_decoder cxled;
763 	};
764 	struct range dpa_range;
765 };
766 
767 static struct cxl_test_decoder *cxld_registry_find(struct cxl_decoder *cxld)
768 {
769 	return xa_load(&decoder_registry, cxld_registry_index(cxld));
770 }
771 
772 #define dbg_cxld(port, msg, cxld)                                                       \
773 	do {                                                                            \
774 		struct cxl_decoder *___d = (cxld);                                      \
775 		dev_dbg((port)->uport_dev,                                              \
776 			"decoder%d: %s range: %#llx-%#llx iw: %d ig: %d flags: %#lx\n", \
777 			___d->id, msg, ___d->hpa_range.start,                           \
778 			___d->hpa_range.end + 1, ___d->interleave_ways,                 \
779 			___d->interleave_granularity, ___d->flags);                     \
780 	} while (0)
781 
782 static int mock_decoder_commit(struct cxl_decoder *cxld);
783 static void mock_decoder_reset(struct cxl_decoder *cxld);
784 static void init_disabled_mock_decoder(struct cxl_decoder *cxld);
785 
786 static void cxld_copy(struct cxl_decoder *a, struct cxl_decoder *b)
787 {
788 	a->id = b->id;
789 	a->hpa_range = b->hpa_range;
790 	a->interleave_ways = b->interleave_ways;
791 	a->interleave_granularity = b->interleave_granularity;
792 	a->target_type = b->target_type;
793 	a->flags = b->flags;
794 	a->commit = mock_decoder_commit;
795 	a->reset = mock_decoder_reset;
796 }
797 
798 /*
799  * Restore decoder programming saved in the registry.
800  *
801  * Only decoders that were saved enabled are restored. Disabled decoders
802  * are left in their default inactive state so that stale programming is
803  * not resurrected after topology replay.
804  *
805  * For endpoint decoders this also restores the DPA reservation needed
806  * to reconstruct committed mappings.
807  */
808 static int cxld_registry_restore(struct cxl_decoder *cxld,
809 				 struct cxl_test_decoder *td)
810 {
811 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
812 	int rc;
813 
814 	if (is_switch_decoder(&cxld->dev)) {
815 		struct cxl_switch_decoder *cxlsd =
816 			to_cxl_switch_decoder(&cxld->dev);
817 
818 		if (!(td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE))
819 			return 0;
820 
821 		dbg_cxld(port, "restore", &td->cxlsd.cxld);
822 		cxld_copy(cxld, &td->cxlsd.cxld);
823 		WARN_ON(cxlsd->nr_targets != td->cxlsd.nr_targets);
824 
825 		/* Restore saved target intent; live dport binding happens later */
826 		for (int i = 0; i < cxlsd->nr_targets; i++) {
827 			cxlsd->target[i] = NULL;
828 			cxld->target_map[i] = td->cxlsd.cxld.target_map[i];
829 		}
830 
831 		port->commit_end = cxld->id;
832 
833 	} else {
834 		struct cxl_endpoint_decoder *cxled =
835 			to_cxl_endpoint_decoder(&cxld->dev);
836 
837 		if (!(td->cxled.cxld.flags & CXL_DECODER_F_ENABLE))
838 			return 0;
839 
840 		dbg_cxld(port, "restore", &td->cxled.cxld);
841 		cxld_copy(cxld, &td->cxled.cxld);
842 		cxled->state = td->cxled.state;
843 		cxled->skip = td->cxled.skip;
844 		if (range_len(&td->dpa_range)) {
845 			rc = devm_cxl_dpa_reserve(cxled, td->dpa_range.start,
846 						  range_len(&td->dpa_range),
847 						  td->cxled.skip);
848 			if (rc) {
849 				init_disabled_mock_decoder(cxld);
850 				return rc;
851 			}
852 		}
853 		port->commit_end = cxld->id;
854 	}
855 
856 	return 0;
857 }
858 
859 static void __cxld_registry_save(struct cxl_test_decoder *td,
860 				 struct cxl_decoder *cxld)
861 {
862 	if (is_switch_decoder(&cxld->dev)) {
863 		struct cxl_switch_decoder *cxlsd =
864 			to_cxl_switch_decoder(&cxld->dev);
865 
866 		cxld_copy(&td->cxlsd.cxld, cxld);
867 		td->cxlsd.nr_targets = cxlsd->nr_targets;
868 
869 		/* Save target port_id as a stable identify for the dport */
870 		for (int i = 0; i < cxlsd->nr_targets; i++) {
871 			struct cxl_dport *dport;
872 
873 			if (!cxlsd->target[i])
874 				continue;
875 
876 			dport = cxlsd->target[i];
877 			td->cxlsd.cxld.target_map[i] = dport->port_id;
878 		}
879 	} else {
880 		struct cxl_endpoint_decoder *cxled =
881 			to_cxl_endpoint_decoder(&cxld->dev);
882 
883 		cxld_copy(&td->cxled.cxld, cxld);
884 		td->cxled.state = cxled->state;
885 		td->cxled.skip = cxled->skip;
886 
887 		if (!(cxld->flags & CXL_DECODER_F_ENABLE)) {
888 			td->dpa_range.start = 0;
889 			td->dpa_range.end = -1;
890 		} else if (cxled->dpa_res) {
891 			td->dpa_range.start = cxled->dpa_res->start;
892 			td->dpa_range.end = cxled->dpa_res->end;
893 		} else {
894 			td->dpa_range.start = 0;
895 			td->dpa_range.end = -1;
896 		}
897 	}
898 }
899 
900 static void cxld_registry_save(struct cxl_test_decoder *td,
901 			       struct cxl_decoder *cxld)
902 {
903 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
904 
905 	dbg_cxld(port, "save", cxld);
906 	__cxld_registry_save(td, cxld);
907 }
908 
909 static void cxld_registry_update(struct cxl_decoder *cxld)
910 {
911 	struct cxl_test_decoder *td = cxld_registry_find(cxld);
912 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
913 
914 	if (WARN_ON_ONCE(!td))
915 		return;
916 
917 	dbg_cxld(port, "update", cxld);
918 	__cxld_registry_save(td, cxld);
919 }
920 
921 static int mock_decoder_commit(struct cxl_decoder *cxld)
922 {
923 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
924 	int id = cxld->id;
925 
926 	if (cxld->flags & CXL_DECODER_F_ENABLE)
927 		return 0;
928 
929 	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
930 	if (cxl_num_decoders_committed(port) != id) {
931 		dev_dbg(&port->dev,
932 			"%s: out of order commit, expected decoder%d.%d\n",
933 			dev_name(&cxld->dev), port->id,
934 			cxl_num_decoders_committed(port));
935 		return -EBUSY;
936 	}
937 
938 	port->commit_end++;
939 	cxld->flags |= CXL_DECODER_F_ENABLE;
940 	if (is_endpoint_decoder(&cxld->dev)) {
941 		struct cxl_endpoint_decoder *cxled =
942 			to_cxl_endpoint_decoder(&cxld->dev);
943 
944 		cxled->state = CXL_DECODER_STATE_AUTO;
945 	}
946 	cxld_registry_update(cxld);
947 
948 	return 0;
949 }
950 
951 static void mock_decoder_reset(struct cxl_decoder *cxld)
952 {
953 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
954 	int id = cxld->id;
955 
956 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
957 		return;
958 
959 	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
960 	if (port->commit_end == id)
961 		cxl_port_commit_reap(cxld);
962 	else
963 		dev_dbg(&port->dev,
964 			"%s: out of order reset, expected decoder%d.%d\n",
965 			dev_name(&cxld->dev), port->id, port->commit_end);
966 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
967 
968 	if (is_endpoint_decoder(&cxld->dev)) {
969 		struct cxl_endpoint_decoder *cxled =
970 			to_cxl_endpoint_decoder(&cxld->dev);
971 
972 		cxled->state = CXL_DECODER_STATE_MANUAL;
973 		cxled->skip = 0;
974 	}
975 	if (decoder_reset_preserve_registry)
976 		dev_dbg(port->uport_dev, "decoder%d: skip registry update\n",
977 			cxld->id);
978 	else
979 		cxld_registry_update(cxld);
980 }
981 
982 static struct cxl_test_decoder *cxld_registry_new(struct cxl_decoder *cxld)
983 {
984 	struct cxl_test_decoder *td __free(kfree) =
985 		kzalloc(sizeof(*td), GFP_KERNEL);
986 	unsigned long key = cxld_registry_index(cxld);
987 
988 	if (!td)
989 		return NULL;
990 
991 	if (xa_insert(&decoder_registry, key, td, GFP_KERNEL)) {
992 		WARN_ON(1);
993 		return NULL;
994 	}
995 
996 	cxld_registry_save(td, cxld);
997 	return no_free_ptr(td);
998 }
999 
1000 static void init_disabled_mock_decoder(struct cxl_decoder *cxld)
1001 {
1002 	cxld->hpa_range.start = 0;
1003 	cxld->hpa_range.end = -1;
1004 	cxld->interleave_ways = 1;
1005 	cxld->interleave_granularity = 0;
1006 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1007 	cxld->flags = 0;
1008 	cxld->commit = mock_decoder_commit;
1009 	cxld->reset = mock_decoder_reset;
1010 
1011 	if (is_switch_decoder(&cxld->dev)) {
1012 		struct cxl_switch_decoder *cxlsd =
1013 			to_cxl_switch_decoder(&cxld->dev);
1014 
1015 		for (int i = 0; i < cxlsd->nr_targets; i++) {
1016 			cxlsd->target[i] = NULL;
1017 			cxld->target_map[i] = 0;
1018 		}
1019 	} else {
1020 		struct cxl_endpoint_decoder *cxled =
1021 			to_cxl_endpoint_decoder(&cxld->dev);
1022 
1023 		cxled->state = CXL_DECODER_STATE_MANUAL;
1024 		cxled->skip = 0;
1025 	}
1026 }
1027 
1028 static void default_mock_decoder(struct cxl_decoder *cxld)
1029 {
1030 	cxld->hpa_range = (struct range){
1031 		.start = 0,
1032 		.end = -1,
1033 	};
1034 
1035 	cxld->interleave_ways = 1;
1036 	cxld->interleave_granularity = 256;
1037 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1038 	cxld->commit = mock_decoder_commit;
1039 	cxld->reset = mock_decoder_reset;
1040 
1041 	WARN_ON_ONCE(!cxld_registry_new(cxld));
1042 }
1043 
1044 static int first_decoder(struct device *dev, const void *data)
1045 {
1046 	struct cxl_decoder *cxld;
1047 
1048 	if (!is_switch_decoder(dev))
1049 		return 0;
1050 	cxld = to_cxl_decoder(dev);
1051 	if (cxld->id == 0)
1052 		return 1;
1053 	return 0;
1054 }
1055 
1056 /*
1057  * Initialize a decoder during HDM enumeration.
1058  *
1059  * If a saved registry entry exists:
1060  *   - enabled decoders are restored from the saved programming
1061  *   - disabled decoders are initialized in a clean disabled state
1062  *
1063  * If no registry entry exists the decoder follows the normal mock
1064  * initialization path, including the special auto-region setup for
1065  * the first endpoints under host-bridge0.
1066  *
1067  * Returns true if decoder state was restored from the registry. In
1068  * that case the saved decode configuration (including target mapping)
1069  * has already been applied and the map_targets() is skipped.
1070  */
1071 static bool mock_init_hdm_decoder(struct cxl_decoder *cxld)
1072 {
1073 	struct acpi_cedt_cfmws *window = mock_cfmws[0];
1074 	struct platform_device *pdev = NULL;
1075 	struct cxl_endpoint_decoder *cxled;
1076 	struct cxl_switch_decoder *cxlsd;
1077 	struct cxl_port *port, *iter;
1078 	struct cxl_test_decoder *td;
1079 	struct cxl_memdev *cxlmd;
1080 	struct cxl_dport *dport;
1081 	struct device *dev;
1082 	bool hb0 = false;
1083 	u64 base;
1084 	int i;
1085 
1086 	if (is_endpoint_decoder(&cxld->dev)) {
1087 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
1088 		cxlmd = cxled_to_memdev(cxled);
1089 		WARN_ON(!dev_is_platform(cxlmd->dev.parent));
1090 		pdev = to_platform_device(cxlmd->dev.parent);
1091 
1092 		/* check is endpoint is attach to host-bridge0 */
1093 		port = cxled_to_port(cxled);
1094 		do {
1095 			if (port->uport_dev == &cxl_host_bridge[0]->dev) {
1096 				hb0 = true;
1097 				break;
1098 			}
1099 			if (is_cxl_port(port->dev.parent))
1100 				port = to_cxl_port(port->dev.parent);
1101 			else
1102 				port = NULL;
1103 		} while (port);
1104 		port = cxled_to_port(cxled);
1105 	} else {
1106 		port = to_cxl_port(cxld->dev.parent);
1107 	}
1108 
1109 	td = cxld_registry_find(cxld);
1110 	if (td) {
1111 		bool enabled;
1112 
1113 		if (is_switch_decoder(&cxld->dev))
1114 			enabled = td->cxlsd.cxld.flags & CXL_DECODER_F_ENABLE;
1115 		else
1116 			enabled = td->cxled.cxld.flags & CXL_DECODER_F_ENABLE;
1117 
1118 		if (enabled)
1119 			return !cxld_registry_restore(cxld, td);
1120 
1121 		init_disabled_mock_decoder(cxld);
1122 		return false;
1123 	}
1124 
1125 	/*
1126 	 * The first decoder on the first 2 devices on the first switch
1127 	 * attached to host-bridge0 mock a fake / static RAM region. All
1128 	 * other decoders are default disabled. Given the round robin
1129 	 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
1130 	 *
1131 	 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
1132 	 */
1133 	if (!is_endpoint_decoder(&cxld->dev) || !hb0 || pdev->id % 4 ||
1134 	    pdev->id > 4 || cxld->id > 0) {
1135 		default_mock_decoder(cxld);
1136 		return false;
1137 	}
1138 
1139 	/* Simulate missing cxl_mem.4 configuration */
1140 	if (hb0 && pdev->id == 4 && cxld->id == 0 && fail_autoassemble) {
1141 		default_mock_decoder(cxld);
1142 		return false;
1143 	}
1144 
1145 	base = window->base_hpa;
1146 	if (extended_linear_cache)
1147 		base += mock_auto_region_size;
1148 	cxld->hpa_range = (struct range) {
1149 		.start = base,
1150 		.end = base + mock_auto_region_size - 1,
1151 	};
1152 
1153 	cxld->interleave_ways = 2;
1154 	eig_to_granularity(window->granularity, &cxld->interleave_granularity);
1155 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1156 	cxld->flags = CXL_DECODER_F_ENABLE;
1157 	cxled->state = CXL_DECODER_STATE_AUTO;
1158 	port->commit_end = cxld->id;
1159 	devm_cxl_dpa_reserve(cxled, 0,
1160 			     mock_auto_region_size / cxld->interleave_ways, 0);
1161 	cxld->commit = mock_decoder_commit;
1162 	cxld->reset = mock_decoder_reset;
1163 
1164 	WARN_ON_ONCE(!cxld_registry_new(cxld));
1165 	/*
1166 	 * Now that endpoint decoder is set up, walk up the hierarchy
1167 	 * and setup the switch and root port decoders targeting @cxlmd.
1168 	 */
1169 	iter = port;
1170 	for (i = 0; i < 2; i++) {
1171 		dport = iter->parent_dport;
1172 		iter = dport->port;
1173 		dev = device_find_child(&iter->dev, NULL, first_decoder);
1174 		/*
1175 		 * Ancestor ports are guaranteed to be enumerated before
1176 		 * @port, and all ports have at least one decoder.
1177 		 */
1178 		if (WARN_ON(!dev))
1179 			continue;
1180 
1181 		cxlsd = to_cxl_switch_decoder(dev);
1182 		if (i == 0) {
1183 			/* put cxl_mem.4 second in the decode order */
1184 			if (pdev->id == 4) {
1185 				cxlsd->target[1] = dport;
1186 				cxlsd->cxld.target_map[1] = dport->port_id;
1187 			} else {
1188 				cxlsd->target[0] = dport;
1189 				cxlsd->cxld.target_map[0] = dport->port_id;
1190 			}
1191 		} else {
1192 			cxlsd->target[0] = dport;
1193 			cxlsd->cxld.target_map[0] = dport->port_id;
1194 		}
1195 		cxld = &cxlsd->cxld;
1196 		cxld->target_type = CXL_DECODER_HOSTONLYMEM;
1197 		cxld->flags = CXL_DECODER_F_ENABLE;
1198 		iter->commit_end = 0;
1199 		/*
1200 		 * Switch targets 2 endpoints, while host bridge targets
1201 		 * one root port
1202 		 */
1203 		if (i == 0)
1204 			cxld->interleave_ways = 2;
1205 		else
1206 			cxld->interleave_ways = 1;
1207 		cxld->interleave_granularity = 4096;
1208 		cxld->hpa_range = (struct range) {
1209 			.start = base,
1210 			.end = base + mock_auto_region_size - 1,
1211 		};
1212 		cxld->commit = mock_decoder_commit;
1213 		cxld->reset = mock_decoder_reset;
1214 
1215 		cxld_registry_update(cxld);
1216 		put_device(dev);
1217 	}
1218 
1219 	return false;
1220 }
1221 
1222 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
1223 				       struct cxl_endpoint_dvsec_info *info)
1224 {
1225 	struct cxl_port *port = cxlhdm->port;
1226 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
1227 	int target_count, i;
1228 	bool restored;
1229 
1230 	if (is_cxl_endpoint(port))
1231 		target_count = 0;
1232 	else if (is_cxl_root(parent_port))
1233 		target_count = NR_CXL_ROOT_PORTS;
1234 	else
1235 		target_count = NR_CXL_SWITCH_PORTS;
1236 
1237 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
1238 		struct target_map_ctx ctx = {
1239 			.target_count = target_count,
1240 		};
1241 		struct cxl_decoder *cxld;
1242 		int rc;
1243 
1244 		if (target_count) {
1245 			struct cxl_switch_decoder *cxlsd;
1246 
1247 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
1248 			if (IS_ERR(cxlsd)) {
1249 				dev_warn(&port->dev,
1250 					 "Failed to allocate the decoder\n");
1251 				return PTR_ERR(cxlsd);
1252 			}
1253 			cxld = &cxlsd->cxld;
1254 		} else {
1255 			struct cxl_endpoint_decoder *cxled;
1256 
1257 			cxled = cxl_endpoint_decoder_alloc(port);
1258 
1259 			if (IS_ERR(cxled)) {
1260 				dev_warn(&port->dev,
1261 					 "Failed to allocate the decoder\n");
1262 				return PTR_ERR(cxled);
1263 			}
1264 			cxld = &cxled->cxld;
1265 		}
1266 
1267 		ctx.target_map = cxld->target_map;
1268 		restored = mock_init_hdm_decoder(cxld);
1269 		if (target_count && !restored) {
1270 			rc = device_for_each_child(port->uport_dev, &ctx,
1271 						   map_targets);
1272 			if (rc) {
1273 				put_device(&cxld->dev);
1274 				return rc;
1275 			}
1276 		}
1277 
1278 		rc = cxl_decoder_add_locked(cxld);
1279 		if (rc) {
1280 			put_device(&cxld->dev);
1281 			dev_err(&port->dev, "Failed to add decoder\n");
1282 			return rc;
1283 		}
1284 
1285 		rc = cxl_decoder_autoremove(&port->dev, cxld);
1286 		if (rc)
1287 			return rc;
1288 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 static int __mock_cxl_decoders_setup(struct cxl_port *port)
1295 {
1296 	struct cxl_hdm *cxlhdm;
1297 
1298 	cxlhdm = mock_cxl_setup_hdm(port, NULL);
1299 	if (IS_ERR(cxlhdm)) {
1300 		if (PTR_ERR(cxlhdm) != -ENODEV)
1301 			dev_err(&port->dev, "Failed to map HDM decoder capability\n");
1302 		return PTR_ERR(cxlhdm);
1303 	}
1304 
1305 	return mock_cxl_enumerate_decoders(cxlhdm, NULL);
1306 }
1307 
1308 static int mock_cxl_switch_port_decoders_setup(struct cxl_port *port)
1309 {
1310 	if (is_cxl_root(port) || is_cxl_endpoint(port))
1311 		return -EOPNOTSUPP;
1312 
1313 	return __mock_cxl_decoders_setup(port);
1314 }
1315 
1316 static int mock_cxl_endpoint_decoders_setup(struct cxl_port *port)
1317 {
1318 	if (!is_cxl_endpoint(port))
1319 		return -EOPNOTSUPP;
1320 
1321 	return __mock_cxl_decoders_setup(port);
1322 }
1323 
1324 static int get_port_array(struct cxl_port *port,
1325 			  struct platform_device ***port_array,
1326 			  int *port_array_size)
1327 {
1328 	struct platform_device **array;
1329 	int array_size;
1330 
1331 	if (port->depth == 1) {
1332 		if (is_multi_bridge(port->uport_dev)) {
1333 			array_size = ARRAY_SIZE(cxl_root_port);
1334 			array = cxl_root_port;
1335 		} else if (is_single_bridge(port->uport_dev)) {
1336 			array_size = ARRAY_SIZE(cxl_root_single);
1337 			array = cxl_root_single;
1338 		} else {
1339 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
1340 				dev_name(port->uport_dev));
1341 			return -ENXIO;
1342 		}
1343 	} else if (port->depth == 2) {
1344 		struct cxl_port *parent = to_cxl_port(port->dev.parent);
1345 
1346 		if (is_multi_bridge(parent->uport_dev)) {
1347 			array_size = ARRAY_SIZE(cxl_switch_dport);
1348 			array = cxl_switch_dport;
1349 		} else if (is_single_bridge(parent->uport_dev)) {
1350 			array_size = ARRAY_SIZE(cxl_swd_single);
1351 			array = cxl_swd_single;
1352 		} else {
1353 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
1354 				dev_name(port->uport_dev));
1355 			return -ENXIO;
1356 		}
1357 	} else {
1358 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
1359 			      port->depth);
1360 		return -ENXIO;
1361 	}
1362 
1363 	*port_array = array;
1364 	*port_array_size = array_size;
1365 
1366 	return 0;
1367 }
1368 
1369 static struct cxl_dport *mock_cxl_add_dport_by_dev(struct cxl_port *port,
1370 						   struct device *dport_dev)
1371 {
1372 	struct platform_device **array;
1373 	int rc, i, array_size;
1374 
1375 	rc = get_port_array(port, &array, &array_size);
1376 	if (rc)
1377 		return ERR_PTR(rc);
1378 
1379 	for (i = 0; i < array_size; i++) {
1380 		struct platform_device *pdev = array[i];
1381 
1382 		if (pdev->dev.parent != port->uport_dev) {
1383 			dev_dbg(&port->dev, "%s: mismatch parent %s\n",
1384 				dev_name(port->uport_dev),
1385 				dev_name(pdev->dev.parent));
1386 			continue;
1387 		}
1388 
1389 		if (&pdev->dev != dport_dev)
1390 			continue;
1391 
1392 		return devm_cxl_add_dport(port, &pdev->dev, pdev->id,
1393 					  CXL_RESOURCE_NONE);
1394 	}
1395 
1396 	return ERR_PTR(-ENODEV);
1397 }
1398 
1399 /*
1400  * Faking the cxl_dpa_perf for the memdev when appropriate.
1401  */
1402 static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
1403 			   struct cxl_dpa_perf *dpa_perf)
1404 {
1405 	dpa_perf->qos_class = FAKE_QTG_ID;
1406 	dpa_perf->dpa_range = *range;
1407 	for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
1408 		dpa_perf->coord[i].read_latency = 500;
1409 		dpa_perf->coord[i].write_latency = 500;
1410 		dpa_perf->coord[i].read_bandwidth = 1000;
1411 		dpa_perf->coord[i].write_bandwidth = 1000;
1412 	}
1413 }
1414 
1415 static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
1416 {
1417 	struct cxl_root *cxl_root __free(put_cxl_root) =
1418 		find_cxl_root(port);
1419 	struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1420 	struct cxl_dev_state *cxlds = cxlmd->cxlds;
1421 	struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
1422 
1423 	if (!cxl_root)
1424 		return;
1425 
1426 	for (int i = 0; i < cxlds->nr_partitions; i++) {
1427 		struct resource *res = &cxlds->part[i].res;
1428 		struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
1429 		struct range range = {
1430 			.start = res->start,
1431 			.end = res->end,
1432 		};
1433 
1434 		dpa_perf_setup(port, &range, perf);
1435 	}
1436 
1437 	cxl_memdev_update_perf(cxlmd);
1438 
1439 	/*
1440 	 * This function is here to only test the topology iterator. It serves
1441 	 * no other purpose.
1442 	 */
1443 	cxl_endpoint_get_perf_coordinates(port, ep_c);
1444 }
1445 
1446 /*
1447  * Simulate that the first half of mock CXL Window 0 is "Soft Reserve" capacity
1448  */
1449 static int mock_walk_hmem_resources(struct device *host, walk_hmem_fn fn)
1450 {
1451 	struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
1452 	struct resource window =
1453 		DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
1454 
1455 	dev_dbg(host, "walk cxl_test resource: %pr\n", &window);
1456 	return fn(host, 0, &window);
1457 }
1458 
1459 /*
1460  * This should only be called by the dax_hmem case, treat mismatches (negative
1461  * result) as "fallback to base region_intersects()". Simulate that the first
1462  * half of mock CXL Window 0 is IORES_DESC_CXL capacity.
1463  */
1464 static int mock_region_intersects(resource_size_t start, size_t size,
1465 				  unsigned long flags, unsigned long desc)
1466 {
1467 	struct resource res = DEFINE_RES_MEM(start, size);
1468 	struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
1469 	struct resource window =
1470 		DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
1471 
1472 	if (resource_overlaps(&res, &window))
1473 		return REGION_INTERSECTS;
1474 	pr_debug("warning: no cxl_test CXL intersection for %pr\n", &res);
1475 	return -1;
1476 }
1477 
1478 
1479 static int
1480 mock_region_intersects_soft_reserve(resource_size_t start, size_t size)
1481 {
1482 	struct resource res = DEFINE_RES_MEM(start, size);
1483 	struct acpi_cedt_cfmws *cfmws = mock_cfmws[0];
1484 	struct resource window =
1485 		DEFINE_RES_MEM(cfmws->base_hpa, cfmws->window_size / 2);
1486 
1487 	if (resource_overlaps(&res, &window))
1488 		return REGION_INTERSECTS;
1489 	pr_debug("warning: no cxl_test soft reserve intersection for %pr\n", &res);
1490 	return -1;
1491 }
1492 
1493 static struct cxl_mock_ops cxl_mock_ops = {
1494 	.is_mock_adev = is_mock_adev,
1495 	.is_mock_bridge = is_mock_bridge,
1496 	.is_mock_bus = is_mock_bus,
1497 	.is_mock_port = is_mock_port,
1498 	.is_mock_dev = is_mock_dev,
1499 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
1500 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
1501 	.acpi_pci_find_root = mock_acpi_pci_find_root,
1502 	.devm_cxl_switch_port_decoders_setup = mock_cxl_switch_port_decoders_setup,
1503 	.devm_cxl_endpoint_decoders_setup = mock_cxl_endpoint_decoders_setup,
1504 	.cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
1505 	.devm_cxl_add_dport_by_dev = mock_cxl_add_dport_by_dev,
1506 	.hmat_get_extended_linear_cache_size =
1507 		mock_hmat_get_extended_linear_cache_size,
1508 	.walk_hmem_resources = mock_walk_hmem_resources,
1509 	.region_intersects = mock_region_intersects,
1510 	.region_intersects_soft_reserve = mock_region_intersects_soft_reserve,
1511 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
1512 };
1513 
1514 static void mock_companion(struct acpi_device *adev, struct device *dev)
1515 {
1516 	device_initialize(&adev->dev);
1517 	fwnode_init(&adev->fwnode, NULL);
1518 	dev->fwnode = &adev->fwnode;
1519 	adev->fwnode.dev = dev;
1520 }
1521 
1522 #ifndef SZ_64G
1523 #define SZ_64G (SZ_32G * 2)
1524 #endif
1525 
1526 static __init int cxl_rch_topo_init(void)
1527 {
1528 	int rc, i;
1529 
1530 	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1531 		int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1532 		struct acpi_device *adev = &host_bridge[idx];
1533 		struct platform_device *pdev;
1534 
1535 		pdev = platform_device_alloc("cxl_host_bridge", idx);
1536 		if (!pdev)
1537 			goto err_bridge;
1538 
1539 		mock_companion(adev, &pdev->dev);
1540 		rc = platform_device_add(pdev);
1541 		if (rc) {
1542 			platform_device_put(pdev);
1543 			goto err_bridge;
1544 		}
1545 
1546 		cxl_rch[i] = pdev;
1547 		mock_pci_bus[idx].bridge = &pdev->dev;
1548 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1549 				       "firmware_node");
1550 		if (rc)
1551 			goto err_bridge;
1552 	}
1553 
1554 	return 0;
1555 
1556 err_bridge:
1557 	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1558 		struct platform_device *pdev = cxl_rch[i];
1559 
1560 		if (!pdev)
1561 			continue;
1562 		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1563 		platform_device_unregister(cxl_rch[i]);
1564 	}
1565 
1566 	return rc;
1567 }
1568 
1569 static void cxl_rch_topo_exit(void)
1570 {
1571 	int i;
1572 
1573 	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1574 		struct platform_device *pdev = cxl_rch[i];
1575 
1576 		if (!pdev)
1577 			continue;
1578 		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1579 		platform_device_unregister(cxl_rch[i]);
1580 	}
1581 }
1582 
1583 static __init int cxl_single_topo_init(void)
1584 {
1585 	int i, rc;
1586 
1587 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1588 		struct acpi_device *adev =
1589 			&host_bridge[NR_CXL_HOST_BRIDGES + i];
1590 		struct platform_device *pdev;
1591 
1592 		pdev = platform_device_alloc("cxl_host_bridge",
1593 					     NR_CXL_HOST_BRIDGES + i);
1594 		if (!pdev)
1595 			goto err_bridge;
1596 
1597 		mock_companion(adev, &pdev->dev);
1598 		rc = platform_device_add(pdev);
1599 		if (rc) {
1600 			platform_device_put(pdev);
1601 			goto err_bridge;
1602 		}
1603 
1604 		cxl_hb_single[i] = pdev;
1605 		mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1606 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1607 				       "physical_node");
1608 		if (rc)
1609 			goto err_bridge;
1610 	}
1611 
1612 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1613 		struct platform_device *bridge =
1614 			cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1615 		struct platform_device *pdev;
1616 
1617 		pdev = platform_device_alloc("cxl_root_port",
1618 					     NR_MULTI_ROOT + i);
1619 		if (!pdev)
1620 			goto err_port;
1621 		pdev->dev.parent = &bridge->dev;
1622 
1623 		rc = platform_device_add(pdev);
1624 		if (rc) {
1625 			platform_device_put(pdev);
1626 			goto err_port;
1627 		}
1628 		cxl_root_single[i] = pdev;
1629 	}
1630 
1631 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1632 		struct platform_device *root_port = cxl_root_single[i];
1633 		struct platform_device *pdev;
1634 
1635 		pdev = platform_device_alloc("cxl_switch_uport",
1636 					     NR_MULTI_ROOT + i);
1637 		if (!pdev)
1638 			goto err_uport;
1639 		pdev->dev.parent = &root_port->dev;
1640 
1641 		rc = platform_device_add(pdev);
1642 		if (rc) {
1643 			platform_device_put(pdev);
1644 			goto err_uport;
1645 		}
1646 		cxl_swu_single[i] = pdev;
1647 	}
1648 
1649 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1650 		struct platform_device *uport =
1651 			cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1652 		struct platform_device *pdev;
1653 
1654 		pdev = platform_device_alloc("cxl_switch_dport",
1655 					     i + NR_MEM_MULTI);
1656 		if (!pdev)
1657 			goto err_dport;
1658 		pdev->dev.parent = &uport->dev;
1659 
1660 		rc = platform_device_add(pdev);
1661 		if (rc) {
1662 			platform_device_put(pdev);
1663 			goto err_dport;
1664 		}
1665 		cxl_swd_single[i] = pdev;
1666 	}
1667 
1668 	return 0;
1669 
1670 err_dport:
1671 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1672 		platform_device_unregister(cxl_swd_single[i]);
1673 err_uport:
1674 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1675 		platform_device_unregister(cxl_swu_single[i]);
1676 err_port:
1677 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1678 		platform_device_unregister(cxl_root_single[i]);
1679 err_bridge:
1680 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1681 		struct platform_device *pdev = cxl_hb_single[i];
1682 
1683 		if (!pdev)
1684 			continue;
1685 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1686 		platform_device_unregister(cxl_hb_single[i]);
1687 	}
1688 
1689 	return rc;
1690 }
1691 
1692 static void cxl_single_topo_exit(void)
1693 {
1694 	int i;
1695 
1696 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1697 		platform_device_unregister(cxl_swd_single[i]);
1698 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1699 		platform_device_unregister(cxl_swu_single[i]);
1700 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1701 		platform_device_unregister(cxl_root_single[i]);
1702 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1703 		struct platform_device *pdev = cxl_hb_single[i];
1704 
1705 		if (!pdev)
1706 			continue;
1707 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1708 		platform_device_unregister(cxl_hb_single[i]);
1709 	}
1710 }
1711 
1712 static void cxl_mem_exit(void)
1713 {
1714 	int i;
1715 
1716 	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1717 		platform_device_unregister(cxl_rcd[i]);
1718 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1719 		platform_device_unregister(cxl_mem_single[i]);
1720 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1721 		platform_device_unregister(cxl_mem[i]);
1722 }
1723 
1724 static int cxl_mem_init(void)
1725 {
1726 	int i, rc;
1727 
1728 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1729 		struct platform_device *dport = cxl_switch_dport[i];
1730 		struct platform_device *pdev;
1731 
1732 		pdev = platform_device_alloc("cxl_mem", i);
1733 		if (!pdev)
1734 			goto err_mem;
1735 		pdev->dev.parent = &dport->dev;
1736 		set_dev_node(&pdev->dev, i % 2);
1737 
1738 		rc = platform_device_add(pdev);
1739 		if (rc) {
1740 			platform_device_put(pdev);
1741 			goto err_mem;
1742 		}
1743 		cxl_mem[i] = pdev;
1744 	}
1745 
1746 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1747 		struct platform_device *dport = cxl_swd_single[i];
1748 		struct platform_device *pdev;
1749 
1750 		pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1751 		if (!pdev)
1752 			goto err_single;
1753 		pdev->dev.parent = &dport->dev;
1754 		set_dev_node(&pdev->dev, i % 2);
1755 
1756 		rc = platform_device_add(pdev);
1757 		if (rc) {
1758 			platform_device_put(pdev);
1759 			goto err_single;
1760 		}
1761 		cxl_mem_single[i] = pdev;
1762 	}
1763 
1764 	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1765 		int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1766 		struct platform_device *rch = cxl_rch[i];
1767 		struct platform_device *pdev;
1768 
1769 		pdev = platform_device_alloc("cxl_rcd", idx);
1770 		if (!pdev)
1771 			goto err_rcd;
1772 		pdev->dev.parent = &rch->dev;
1773 		set_dev_node(&pdev->dev, i % 2);
1774 
1775 		rc = platform_device_add(pdev);
1776 		if (rc) {
1777 			platform_device_put(pdev);
1778 			goto err_rcd;
1779 		}
1780 		cxl_rcd[i] = pdev;
1781 	}
1782 
1783 	return 0;
1784 
1785 err_rcd:
1786 	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1787 		platform_device_unregister(cxl_rcd[i]);
1788 err_single:
1789 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1790 		platform_device_unregister(cxl_mem_single[i]);
1791 err_mem:
1792 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1793 		platform_device_unregister(cxl_mem[i]);
1794 	return rc;
1795 }
1796 
1797 static ssize_t
1798 decoder_reset_preserve_registry_show(struct device *dev,
1799 				     struct device_attribute *attr, char *buf)
1800 {
1801 	return sysfs_emit(buf, "%d\n", decoder_reset_preserve_registry);
1802 }
1803 
1804 static ssize_t
1805 decoder_reset_preserve_registry_store(struct device *dev,
1806 				      struct device_attribute *attr,
1807 				      const char *buf, size_t count)
1808 {
1809 	int rc;
1810 
1811 	rc = kstrtobool(buf, &decoder_reset_preserve_registry);
1812 	if (rc)
1813 		return rc;
1814 	return count;
1815 }
1816 
1817 static DEVICE_ATTR_RW(decoder_reset_preserve_registry);
1818 
1819 static struct attribute *cxl_acpi_attrs[] = {
1820 	&dev_attr_decoder_reset_preserve_registry.attr, NULL
1821 };
1822 ATTRIBUTE_GROUPS(cxl_acpi);
1823 
1824 static __init int cxl_test_init(void)
1825 {
1826 	int rc, i;
1827 	struct range mappable;
1828 
1829 	cxl_acpi_test();
1830 	cxl_core_test();
1831 	cxl_mem_test();
1832 	cxl_pmem_test();
1833 	cxl_port_test();
1834 
1835 	register_cxl_mock_ops(&cxl_mock_ops);
1836 
1837 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1838 	if (!cxl_mock_pool) {
1839 		rc = -ENOMEM;
1840 		goto err_gen_pool_create;
1841 	}
1842 	mappable = mhp_get_pluggable_range(true);
1843 
1844 	rc = gen_pool_add(cxl_mock_pool,
1845 			  min(iomem_resource.end + 1 - SZ_64G,
1846 			      mappable.end + 1 - SZ_64G),
1847 			  SZ_64G, NUMA_NO_NODE);
1848 	if (rc)
1849 		goto err_gen_pool_add;
1850 
1851 	if (interleave_arithmetic == 1) {
1852 		cfmws_start = CFMWS_XOR_ARRAY_START;
1853 		cfmws_end = CFMWS_XOR_ARRAY_END;
1854 	} else {
1855 		cfmws_start = CFMWS_MOD_ARRAY_START;
1856 		cfmws_end = CFMWS_MOD_ARRAY_END;
1857 	}
1858 
1859 	rc = populate_cedt();
1860 	if (rc)
1861 		goto err_populate;
1862 
1863 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1864 		struct acpi_device *adev = &host_bridge[i];
1865 		struct platform_device *pdev;
1866 
1867 		pdev = platform_device_alloc("cxl_host_bridge", i);
1868 		if (!pdev)
1869 			goto err_bridge;
1870 
1871 		mock_companion(adev, &pdev->dev);
1872 		rc = platform_device_add(pdev);
1873 		if (rc) {
1874 			platform_device_put(pdev);
1875 			goto err_bridge;
1876 		}
1877 
1878 		cxl_host_bridge[i] = pdev;
1879 		mock_pci_bus[i].bridge = &pdev->dev;
1880 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1881 				       "physical_node");
1882 		if (rc)
1883 			goto err_bridge;
1884 	}
1885 
1886 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1887 		struct platform_device *bridge =
1888 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1889 		struct platform_device *pdev;
1890 
1891 		pdev = platform_device_alloc("cxl_root_port", i);
1892 		if (!pdev)
1893 			goto err_port;
1894 		pdev->dev.parent = &bridge->dev;
1895 
1896 		rc = platform_device_add(pdev);
1897 		if (rc) {
1898 			platform_device_put(pdev);
1899 			goto err_port;
1900 		}
1901 		cxl_root_port[i] = pdev;
1902 	}
1903 
1904 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1905 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1906 		struct platform_device *root_port = cxl_root_port[i];
1907 		struct platform_device *pdev;
1908 
1909 		pdev = platform_device_alloc("cxl_switch_uport", i);
1910 		if (!pdev)
1911 			goto err_uport;
1912 		pdev->dev.parent = &root_port->dev;
1913 
1914 		rc = platform_device_add(pdev);
1915 		if (rc) {
1916 			platform_device_put(pdev);
1917 			goto err_uport;
1918 		}
1919 		cxl_switch_uport[i] = pdev;
1920 	}
1921 
1922 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1923 		struct platform_device *uport =
1924 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1925 		struct platform_device *pdev;
1926 
1927 		pdev = platform_device_alloc("cxl_switch_dport", i);
1928 		if (!pdev)
1929 			goto err_dport;
1930 		pdev->dev.parent = &uport->dev;
1931 
1932 		rc = platform_device_add(pdev);
1933 		if (rc) {
1934 			platform_device_put(pdev);
1935 			goto err_dport;
1936 		}
1937 		cxl_switch_dport[i] = pdev;
1938 	}
1939 
1940 	rc = cxl_single_topo_init();
1941 	if (rc)
1942 		goto err_dport;
1943 
1944 	rc = cxl_rch_topo_init();
1945 	if (rc)
1946 		goto err_single;
1947 
1948 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1949 	if (!cxl_acpi)
1950 		goto err_rch;
1951 
1952 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1953 	acpi0017_mock.dev.bus = &platform_bus_type;
1954 	cxl_acpi->dev.groups = cxl_acpi_groups;
1955 
1956 	rc = platform_device_add(cxl_acpi);
1957 	if (rc)
1958 		goto err_root;
1959 
1960 	rc = cxl_mem_init();
1961 	if (rc)
1962 		goto err_root;
1963 
1964 	rc = hmem_test_init();
1965 	if (rc)
1966 		goto err_mem;
1967 
1968 	return 0;
1969 
1970 err_mem:
1971 	cxl_mem_exit();
1972 err_root:
1973 	platform_device_put(cxl_acpi);
1974 err_rch:
1975 	cxl_rch_topo_exit();
1976 err_single:
1977 	cxl_single_topo_exit();
1978 err_dport:
1979 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1980 		platform_device_unregister(cxl_switch_dport[i]);
1981 err_uport:
1982 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1983 		platform_device_unregister(cxl_switch_uport[i]);
1984 err_port:
1985 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1986 		platform_device_unregister(cxl_root_port[i]);
1987 err_bridge:
1988 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1989 		struct platform_device *pdev = cxl_host_bridge[i];
1990 
1991 		if (!pdev)
1992 			continue;
1993 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1994 		platform_device_unregister(cxl_host_bridge[i]);
1995 	}
1996 err_populate:
1997 	depopulate_all_mock_resources();
1998 err_gen_pool_add:
1999 	gen_pool_destroy(cxl_mock_pool);
2000 err_gen_pool_create:
2001 	unregister_cxl_mock_ops(&cxl_mock_ops);
2002 	return rc;
2003 }
2004 
2005 static void free_decoder_registry(void)
2006 {
2007 	unsigned long index;
2008 	void *entry;
2009 
2010 	xa_for_each(&decoder_registry, index, entry) {
2011 		xa_erase(&decoder_registry, index);
2012 		kfree(entry);
2013 	}
2014 }
2015 
2016 static __exit void cxl_test_exit(void)
2017 {
2018 	int i;
2019 
2020 	hmem_test_exit();
2021 	cxl_mem_exit();
2022 	platform_device_unregister(cxl_acpi);
2023 	cxl_rch_topo_exit();
2024 	cxl_single_topo_exit();
2025 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
2026 		platform_device_unregister(cxl_switch_dport[i]);
2027 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
2028 		platform_device_unregister(cxl_switch_uport[i]);
2029 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
2030 		platform_device_unregister(cxl_root_port[i]);
2031 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
2032 		struct platform_device *pdev = cxl_host_bridge[i];
2033 
2034 		if (!pdev)
2035 			continue;
2036 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
2037 		platform_device_unregister(cxl_host_bridge[i]);
2038 	}
2039 	depopulate_all_mock_resources();
2040 	gen_pool_destroy(cxl_mock_pool);
2041 	unregister_cxl_mock_ops(&cxl_mock_ops);
2042 	free_decoder_registry();
2043 	xa_destroy(&decoder_registry);
2044 }
2045 
2046 module_param(interleave_arithmetic, int, 0444);
2047 MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
2048 module_param(extended_linear_cache, bool, 0444);
2049 MODULE_PARM_DESC(extended_linear_cache, "Enable extended linear cache support");
2050 module_param(fail_autoassemble, bool, 0444);
2051 MODULE_PARM_DESC(fail_autoassemble, "Simulate missing member of an auto-region");
2052 module_init(cxl_test_init);
2053 module_exit(cxl_test_exit);
2054 MODULE_LICENSE("GPL v2");
2055 MODULE_DESCRIPTION("cxl_test: setup module");
2056 MODULE_IMPORT_NS("ACPI");
2057 MODULE_IMPORT_NS("CXL");
2058