xref: /linux/tools/testing/cxl/test/cxl.c (revision 6ab1f766a80a6f46c7196f588e867cef51f4f26a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/genalloc.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/acpi.h>
9 #include <linux/pci.h>
10 #include <linux/mm.h>
11 #include <cxlmem.h>
12 
13 #include "../watermark.h"
14 #include "mock.h"
15 
16 static int interleave_arithmetic;
17 
18 #define NR_CXL_HOST_BRIDGES 2
19 #define NR_CXL_SINGLE_HOST 1
20 #define NR_CXL_RCH 1
21 #define NR_CXL_ROOT_PORTS 2
22 #define NR_CXL_SWITCH_PORTS 2
23 #define NR_CXL_PORT_DECODERS 8
24 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
25 
26 static struct platform_device *cxl_acpi;
27 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
28 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
29 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
30 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
31 #define NR_MEM_MULTI \
32 	(NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
33 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
34 
35 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
36 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
37 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
38 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
39 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
40 
41 struct platform_device *cxl_mem[NR_MEM_MULTI];
42 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
43 
44 static struct platform_device *cxl_rch[NR_CXL_RCH];
45 static struct platform_device *cxl_rcd[NR_CXL_RCH];
46 
47 static inline bool is_multi_bridge(struct device *dev)
48 {
49 	int i;
50 
51 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
52 		if (&cxl_host_bridge[i]->dev == dev)
53 			return true;
54 	return false;
55 }
56 
57 static inline bool is_single_bridge(struct device *dev)
58 {
59 	int i;
60 
61 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
62 		if (&cxl_hb_single[i]->dev == dev)
63 			return true;
64 	return false;
65 }
66 
67 static struct acpi_device acpi0017_mock;
68 static struct acpi_device host_bridge[NR_BRIDGES] = {
69 	[0] = {
70 		.handle = &host_bridge[0],
71 		.pnp.unique_id = "0",
72 	},
73 	[1] = {
74 		.handle = &host_bridge[1],
75 		.pnp.unique_id = "1",
76 	},
77 	[2] = {
78 		.handle = &host_bridge[2],
79 		.pnp.unique_id = "2",
80 	},
81 	[3] = {
82 		.handle = &host_bridge[3],
83 		.pnp.unique_id = "3",
84 	},
85 };
86 
87 static bool is_mock_dev(struct device *dev)
88 {
89 	int i;
90 
91 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
92 		if (dev == &cxl_mem[i]->dev)
93 			return true;
94 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
95 		if (dev == &cxl_mem_single[i]->dev)
96 			return true;
97 	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
98 		if (dev == &cxl_rcd[i]->dev)
99 			return true;
100 	if (dev == &cxl_acpi->dev)
101 		return true;
102 	return false;
103 }
104 
105 static bool is_mock_adev(struct acpi_device *adev)
106 {
107 	int i;
108 
109 	if (adev == &acpi0017_mock)
110 		return true;
111 
112 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
113 		if (adev == &host_bridge[i])
114 			return true;
115 
116 	return false;
117 }
118 
119 static struct {
120 	struct acpi_table_cedt cedt;
121 	struct acpi_cedt_chbs chbs[NR_BRIDGES];
122 	struct {
123 		struct acpi_cedt_cfmws cfmws;
124 		u32 target[1];
125 	} cfmws0;
126 	struct {
127 		struct acpi_cedt_cfmws cfmws;
128 		u32 target[2];
129 	} cfmws1;
130 	struct {
131 		struct acpi_cedt_cfmws cfmws;
132 		u32 target[1];
133 	} cfmws2;
134 	struct {
135 		struct acpi_cedt_cfmws cfmws;
136 		u32 target[2];
137 	} cfmws3;
138 	struct {
139 		struct acpi_cedt_cfmws cfmws;
140 		u32 target[1];
141 	} cfmws4;
142 	struct {
143 		struct acpi_cedt_cfmws cfmws;
144 		u32 target[1];
145 	} cfmws5;
146 	struct {
147 		struct acpi_cedt_cfmws cfmws;
148 		u32 target[1];
149 	} cfmws6;
150 	struct {
151 		struct acpi_cedt_cfmws cfmws;
152 		u32 target[2];
153 	} cfmws7;
154 	struct {
155 		struct acpi_cedt_cfmws cfmws;
156 		u32 target[4];
157 	} cfmws8;
158 	struct {
159 		struct acpi_cedt_cxims cxims;
160 		u64 xormap_list[2];
161 	} cxims0;
162 } __packed mock_cedt = {
163 	.cedt = {
164 		.header = {
165 			.signature = "CEDT",
166 			.length = sizeof(mock_cedt),
167 			.revision = 1,
168 		},
169 	},
170 	.chbs[0] = {
171 		.header = {
172 			.type = ACPI_CEDT_TYPE_CHBS,
173 			.length = sizeof(mock_cedt.chbs[0]),
174 		},
175 		.uid = 0,
176 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
177 	},
178 	.chbs[1] = {
179 		.header = {
180 			.type = ACPI_CEDT_TYPE_CHBS,
181 			.length = sizeof(mock_cedt.chbs[0]),
182 		},
183 		.uid = 1,
184 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
185 	},
186 	.chbs[2] = {
187 		.header = {
188 			.type = ACPI_CEDT_TYPE_CHBS,
189 			.length = sizeof(mock_cedt.chbs[0]),
190 		},
191 		.uid = 2,
192 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
193 	},
194 	.chbs[3] = {
195 		.header = {
196 			.type = ACPI_CEDT_TYPE_CHBS,
197 			.length = sizeof(mock_cedt.chbs[0]),
198 		},
199 		.uid = 3,
200 		.cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
201 	},
202 	.cfmws0 = {
203 		.cfmws = {
204 			.header = {
205 				.type = ACPI_CEDT_TYPE_CFMWS,
206 				.length = sizeof(mock_cedt.cfmws0),
207 			},
208 			.interleave_ways = 0,
209 			.granularity = 4,
210 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
211 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
212 			.qtg_id = 0,
213 			.window_size = SZ_256M * 4UL,
214 		},
215 		.target = { 0 },
216 	},
217 	.cfmws1 = {
218 		.cfmws = {
219 			.header = {
220 				.type = ACPI_CEDT_TYPE_CFMWS,
221 				.length = sizeof(mock_cedt.cfmws1),
222 			},
223 			.interleave_ways = 1,
224 			.granularity = 4,
225 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
226 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
227 			.qtg_id = 1,
228 			.window_size = SZ_256M * 8UL,
229 		},
230 		.target = { 0, 1, },
231 	},
232 	.cfmws2 = {
233 		.cfmws = {
234 			.header = {
235 				.type = ACPI_CEDT_TYPE_CFMWS,
236 				.length = sizeof(mock_cedt.cfmws2),
237 			},
238 			.interleave_ways = 0,
239 			.granularity = 4,
240 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
241 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
242 			.qtg_id = 2,
243 			.window_size = SZ_256M * 4UL,
244 		},
245 		.target = { 0 },
246 	},
247 	.cfmws3 = {
248 		.cfmws = {
249 			.header = {
250 				.type = ACPI_CEDT_TYPE_CFMWS,
251 				.length = sizeof(mock_cedt.cfmws3),
252 			},
253 			.interleave_ways = 1,
254 			.granularity = 4,
255 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
256 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
257 			.qtg_id = 3,
258 			.window_size = SZ_256M * 8UL,
259 		},
260 		.target = { 0, 1, },
261 	},
262 	.cfmws4 = {
263 		.cfmws = {
264 			.header = {
265 				.type = ACPI_CEDT_TYPE_CFMWS,
266 				.length = sizeof(mock_cedt.cfmws4),
267 			},
268 			.interleave_ways = 0,
269 			.granularity = 4,
270 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
271 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
272 			.qtg_id = 4,
273 			.window_size = SZ_256M * 4UL,
274 		},
275 		.target = { 2 },
276 	},
277 	.cfmws5 = {
278 		.cfmws = {
279 			.header = {
280 				.type = ACPI_CEDT_TYPE_CFMWS,
281 				.length = sizeof(mock_cedt.cfmws5),
282 			},
283 			.interleave_ways = 0,
284 			.granularity = 4,
285 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
286 					ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
287 			.qtg_id = 5,
288 			.window_size = SZ_256M,
289 		},
290 		.target = { 3 },
291 	},
292 	/* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
293 	.cfmws6 = {
294 		.cfmws = {
295 			.header = {
296 				.type = ACPI_CEDT_TYPE_CFMWS,
297 				.length = sizeof(mock_cedt.cfmws6),
298 			},
299 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
300 			.interleave_ways = 0,
301 			.granularity = 4,
302 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
303 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
304 			.qtg_id = 0,
305 			.window_size = SZ_256M * 8UL,
306 		},
307 		.target = { 0, },
308 	},
309 	.cfmws7 = {
310 		.cfmws = {
311 			.header = {
312 				.type = ACPI_CEDT_TYPE_CFMWS,
313 				.length = sizeof(mock_cedt.cfmws7),
314 			},
315 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
316 			.interleave_ways = 1,
317 			.granularity = 0,
318 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
319 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
320 			.qtg_id = 1,
321 			.window_size = SZ_256M * 8UL,
322 		},
323 		.target = { 0, 1, },
324 	},
325 	.cfmws8 = {
326 		.cfmws = {
327 			.header = {
328 				.type = ACPI_CEDT_TYPE_CFMWS,
329 				.length = sizeof(mock_cedt.cfmws8),
330 			},
331 			.interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
332 			.interleave_ways = 2,
333 			.granularity = 0,
334 			.restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
335 					ACPI_CEDT_CFMWS_RESTRICT_PMEM,
336 			.qtg_id = 0,
337 			.window_size = SZ_256M * 16UL,
338 		},
339 		.target = { 0, 1, 0, 1, },
340 	},
341 	.cxims0 = {
342 		.cxims = {
343 			.header = {
344 				.type = ACPI_CEDT_TYPE_CXIMS,
345 				.length = sizeof(mock_cedt.cxims0),
346 			},
347 			.hbig = 0,
348 			.nr_xormaps = 2,
349 		},
350 		.xormap_list = { 0x404100, 0x808200, },
351 	},
352 };
353 
354 struct acpi_cedt_cfmws *mock_cfmws[] = {
355 	[0] = &mock_cedt.cfmws0.cfmws,
356 	[1] = &mock_cedt.cfmws1.cfmws,
357 	[2] = &mock_cedt.cfmws2.cfmws,
358 	[3] = &mock_cedt.cfmws3.cfmws,
359 	[4] = &mock_cedt.cfmws4.cfmws,
360 	[5] = &mock_cedt.cfmws5.cfmws,
361 	/* Modulo Math above, XOR Math below */
362 	[6] = &mock_cedt.cfmws6.cfmws,
363 	[7] = &mock_cedt.cfmws7.cfmws,
364 	[8] = &mock_cedt.cfmws8.cfmws,
365 };
366 
367 static int cfmws_start;
368 static int cfmws_end;
369 #define CFMWS_MOD_ARRAY_START 0
370 #define CFMWS_MOD_ARRAY_END   5
371 #define CFMWS_XOR_ARRAY_START 6
372 #define CFMWS_XOR_ARRAY_END   8
373 
374 struct acpi_cedt_cxims *mock_cxims[1] = {
375 	[0] = &mock_cedt.cxims0.cxims,
376 };
377 
378 struct cxl_mock_res {
379 	struct list_head list;
380 	struct range range;
381 };
382 
383 static LIST_HEAD(mock_res);
384 static DEFINE_MUTEX(mock_res_lock);
385 static struct gen_pool *cxl_mock_pool;
386 
387 static void depopulate_all_mock_resources(void)
388 {
389 	struct cxl_mock_res *res, *_res;
390 
391 	mutex_lock(&mock_res_lock);
392 	list_for_each_entry_safe(res, _res, &mock_res, list) {
393 		gen_pool_free(cxl_mock_pool, res->range.start,
394 			      range_len(&res->range));
395 		list_del(&res->list);
396 		kfree(res);
397 	}
398 	mutex_unlock(&mock_res_lock);
399 }
400 
401 static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
402 {
403 	struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
404 	struct genpool_data_align data = {
405 		.align = align,
406 	};
407 	unsigned long phys;
408 
409 	INIT_LIST_HEAD(&res->list);
410 	phys = gen_pool_alloc_algo(cxl_mock_pool, size,
411 				   gen_pool_first_fit_align, &data);
412 	if (!phys)
413 		return NULL;
414 
415 	res->range = (struct range) {
416 		.start = phys,
417 		.end = phys + size - 1,
418 	};
419 	mutex_lock(&mock_res_lock);
420 	list_add(&res->list, &mock_res);
421 	mutex_unlock(&mock_res_lock);
422 
423 	return res;
424 }
425 
426 static int populate_cedt(void)
427 {
428 	struct cxl_mock_res *res;
429 	int i;
430 
431 	for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
432 		struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
433 		resource_size_t size;
434 
435 		if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
436 			size = ACPI_CEDT_CHBS_LENGTH_CXL20;
437 		else
438 			size = ACPI_CEDT_CHBS_LENGTH_CXL11;
439 
440 		res = alloc_mock_res(size, size);
441 		if (!res)
442 			return -ENOMEM;
443 		chbs->base = res->range.start;
444 		chbs->length = size;
445 	}
446 
447 	for (i = cfmws_start; i <= cfmws_end; i++) {
448 		struct acpi_cedt_cfmws *window = mock_cfmws[i];
449 
450 		res = alloc_mock_res(window->window_size, SZ_256M);
451 		if (!res)
452 			return -ENOMEM;
453 		window->base_hpa = res->range.start;
454 	}
455 
456 	return 0;
457 }
458 
459 static bool is_mock_port(struct device *dev);
460 
461 /*
462  * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
463  * and 'struct cxl_chbs_context' share the property that the first
464  * struct member is a cxl_test device being probed by the cxl_acpi
465  * driver.
466  */
467 struct cxl_cedt_context {
468 	struct device *dev;
469 };
470 
471 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
472 				      acpi_tbl_entry_handler_arg handler_arg,
473 				      void *arg)
474 {
475 	struct cxl_cedt_context *ctx = arg;
476 	struct device *dev = ctx->dev;
477 	union acpi_subtable_headers *h;
478 	unsigned long end;
479 	int i;
480 
481 	if (!is_mock_port(dev) && !is_mock_dev(dev))
482 		return acpi_table_parse_cedt(id, handler_arg, arg);
483 
484 	if (id == ACPI_CEDT_TYPE_CHBS)
485 		for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
486 			h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
487 			end = (unsigned long)&mock_cedt.chbs[i + 1];
488 			handler_arg(h, arg, end);
489 		}
490 
491 	if (id == ACPI_CEDT_TYPE_CFMWS)
492 		for (i = cfmws_start; i <= cfmws_end; i++) {
493 			h = (union acpi_subtable_headers *) mock_cfmws[i];
494 			end = (unsigned long) h + mock_cfmws[i]->header.length;
495 			handler_arg(h, arg, end);
496 		}
497 
498 	if (id == ACPI_CEDT_TYPE_CXIMS)
499 		for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
500 			h = (union acpi_subtable_headers *)mock_cxims[i];
501 			end = (unsigned long)h + mock_cxims[i]->header.length;
502 			handler_arg(h, arg, end);
503 		}
504 
505 	return 0;
506 }
507 
508 static bool is_mock_bridge(struct device *dev)
509 {
510 	int i;
511 
512 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
513 		if (dev == &cxl_host_bridge[i]->dev)
514 			return true;
515 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
516 		if (dev == &cxl_hb_single[i]->dev)
517 			return true;
518 	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
519 		if (dev == &cxl_rch[i]->dev)
520 			return true;
521 
522 	return false;
523 }
524 
525 static bool is_mock_port(struct device *dev)
526 {
527 	int i;
528 
529 	if (is_mock_bridge(dev))
530 		return true;
531 
532 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
533 		if (dev == &cxl_root_port[i]->dev)
534 			return true;
535 
536 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
537 		if (dev == &cxl_switch_uport[i]->dev)
538 			return true;
539 
540 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
541 		if (dev == &cxl_switch_dport[i]->dev)
542 			return true;
543 
544 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
545 		if (dev == &cxl_root_single[i]->dev)
546 			return true;
547 
548 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
549 		if (dev == &cxl_swu_single[i]->dev)
550 			return true;
551 
552 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
553 		if (dev == &cxl_swd_single[i]->dev)
554 			return true;
555 
556 	if (is_cxl_memdev(dev))
557 		return is_mock_dev(dev->parent);
558 
559 	return false;
560 }
561 
562 static int host_bridge_index(struct acpi_device *adev)
563 {
564 	return adev - host_bridge;
565 }
566 
567 static struct acpi_device *find_host_bridge(acpi_handle handle)
568 {
569 	int i;
570 
571 	for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
572 		if (handle == host_bridge[i].handle)
573 			return &host_bridge[i];
574 	return NULL;
575 }
576 
577 static acpi_status
578 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
579 			   struct acpi_object_list *arguments,
580 			   unsigned long long *data)
581 {
582 	struct acpi_device *adev = find_host_bridge(handle);
583 
584 	if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
585 		return acpi_evaluate_integer(handle, pathname, arguments, data);
586 
587 	*data = host_bridge_index(adev);
588 	return AE_OK;
589 }
590 
591 static struct pci_bus mock_pci_bus[NR_BRIDGES];
592 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
593 	[0] = {
594 		.bus = &mock_pci_bus[0],
595 	},
596 	[1] = {
597 		.bus = &mock_pci_bus[1],
598 	},
599 	[2] = {
600 		.bus = &mock_pci_bus[2],
601 	},
602 	[3] = {
603 		.bus = &mock_pci_bus[3],
604 	},
605 
606 };
607 
608 static bool is_mock_bus(struct pci_bus *bus)
609 {
610 	int i;
611 
612 	for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
613 		if (bus == &mock_pci_bus[i])
614 			return true;
615 	return false;
616 }
617 
618 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
619 {
620 	struct acpi_device *adev = find_host_bridge(handle);
621 
622 	if (!adev)
623 		return acpi_pci_find_root(handle);
624 	return &mock_pci_root[host_bridge_index(adev)];
625 }
626 
627 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
628 					  struct cxl_endpoint_dvsec_info *info)
629 {
630 	struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
631 
632 	if (!cxlhdm)
633 		return ERR_PTR(-ENOMEM);
634 
635 	cxlhdm->port = port;
636 	return cxlhdm;
637 }
638 
639 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
640 {
641 	dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
642 	return -EOPNOTSUPP;
643 }
644 
645 
646 struct target_map_ctx {
647 	int *target_map;
648 	int index;
649 	int target_count;
650 };
651 
652 static int map_targets(struct device *dev, void *data)
653 {
654 	struct platform_device *pdev = to_platform_device(dev);
655 	struct target_map_ctx *ctx = data;
656 
657 	ctx->target_map[ctx->index++] = pdev->id;
658 
659 	if (ctx->index > ctx->target_count) {
660 		dev_WARN_ONCE(dev, 1, "too many targets found?\n");
661 		return -ENXIO;
662 	}
663 
664 	return 0;
665 }
666 
667 static int mock_decoder_commit(struct cxl_decoder *cxld)
668 {
669 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
670 	int id = cxld->id;
671 
672 	if (cxld->flags & CXL_DECODER_F_ENABLE)
673 		return 0;
674 
675 	dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
676 	if (cxl_num_decoders_committed(port) != id) {
677 		dev_dbg(&port->dev,
678 			"%s: out of order commit, expected decoder%d.%d\n",
679 			dev_name(&cxld->dev), port->id,
680 			cxl_num_decoders_committed(port));
681 		return -EBUSY;
682 	}
683 
684 	port->commit_end++;
685 	cxld->flags |= CXL_DECODER_F_ENABLE;
686 
687 	return 0;
688 }
689 
690 static int mock_decoder_reset(struct cxl_decoder *cxld)
691 {
692 	struct cxl_port *port = to_cxl_port(cxld->dev.parent);
693 	int id = cxld->id;
694 
695 	if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
696 		return 0;
697 
698 	dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
699 	if (port->commit_end != id) {
700 		dev_dbg(&port->dev,
701 			"%s: out of order reset, expected decoder%d.%d\n",
702 			dev_name(&cxld->dev), port->id, port->commit_end);
703 		return -EBUSY;
704 	}
705 
706 	port->commit_end--;
707 	cxld->flags &= ~CXL_DECODER_F_ENABLE;
708 
709 	return 0;
710 }
711 
712 static void default_mock_decoder(struct cxl_decoder *cxld)
713 {
714 	cxld->hpa_range = (struct range){
715 		.start = 0,
716 		.end = -1,
717 	};
718 
719 	cxld->interleave_ways = 1;
720 	cxld->interleave_granularity = 256;
721 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
722 	cxld->commit = mock_decoder_commit;
723 	cxld->reset = mock_decoder_reset;
724 }
725 
726 static int first_decoder(struct device *dev, void *data)
727 {
728 	struct cxl_decoder *cxld;
729 
730 	if (!is_switch_decoder(dev))
731 		return 0;
732 	cxld = to_cxl_decoder(dev);
733 	if (cxld->id == 0)
734 		return 1;
735 	return 0;
736 }
737 
738 static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
739 {
740 	struct acpi_cedt_cfmws *window = mock_cfmws[0];
741 	struct platform_device *pdev = NULL;
742 	struct cxl_endpoint_decoder *cxled;
743 	struct cxl_switch_decoder *cxlsd;
744 	struct cxl_port *port, *iter;
745 	const int size = SZ_512M;
746 	struct cxl_memdev *cxlmd;
747 	struct cxl_dport *dport;
748 	struct device *dev;
749 	bool hb0 = false;
750 	u64 base;
751 	int i;
752 
753 	if (is_endpoint_decoder(&cxld->dev)) {
754 		cxled = to_cxl_endpoint_decoder(&cxld->dev);
755 		cxlmd = cxled_to_memdev(cxled);
756 		WARN_ON(!dev_is_platform(cxlmd->dev.parent));
757 		pdev = to_platform_device(cxlmd->dev.parent);
758 
759 		/* check is endpoint is attach to host-bridge0 */
760 		port = cxled_to_port(cxled);
761 		do {
762 			if (port->uport_dev == &cxl_host_bridge[0]->dev) {
763 				hb0 = true;
764 				break;
765 			}
766 			if (is_cxl_port(port->dev.parent))
767 				port = to_cxl_port(port->dev.parent);
768 			else
769 				port = NULL;
770 		} while (port);
771 		port = cxled_to_port(cxled);
772 	}
773 
774 	/*
775 	 * The first decoder on the first 2 devices on the first switch
776 	 * attached to host-bridge0 mock a fake / static RAM region. All
777 	 * other decoders are default disabled. Given the round robin
778 	 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
779 	 *
780 	 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
781 	 */
782 	if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
783 		default_mock_decoder(cxld);
784 		return;
785 	}
786 
787 	base = window->base_hpa;
788 	cxld->hpa_range = (struct range) {
789 		.start = base,
790 		.end = base + size - 1,
791 	};
792 
793 	cxld->interleave_ways = 2;
794 	eig_to_granularity(window->granularity, &cxld->interleave_granularity);
795 	cxld->target_type = CXL_DECODER_HOSTONLYMEM;
796 	cxld->flags = CXL_DECODER_F_ENABLE;
797 	cxled->state = CXL_DECODER_STATE_AUTO;
798 	port->commit_end = cxld->id;
799 	devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
800 	cxld->commit = mock_decoder_commit;
801 	cxld->reset = mock_decoder_reset;
802 
803 	/*
804 	 * Now that endpoint decoder is set up, walk up the hierarchy
805 	 * and setup the switch and root port decoders targeting @cxlmd.
806 	 */
807 	iter = port;
808 	for (i = 0; i < 2; i++) {
809 		dport = iter->parent_dport;
810 		iter = dport->port;
811 		dev = device_find_child(&iter->dev, NULL, first_decoder);
812 		/*
813 		 * Ancestor ports are guaranteed to be enumerated before
814 		 * @port, and all ports have at least one decoder.
815 		 */
816 		if (WARN_ON(!dev))
817 			continue;
818 		cxlsd = to_cxl_switch_decoder(dev);
819 		if (i == 0) {
820 			/* put cxl_mem.4 second in the decode order */
821 			if (pdev->id == 4)
822 				cxlsd->target[1] = dport;
823 			else
824 				cxlsd->target[0] = dport;
825 		} else
826 			cxlsd->target[0] = dport;
827 		cxld = &cxlsd->cxld;
828 		cxld->target_type = CXL_DECODER_HOSTONLYMEM;
829 		cxld->flags = CXL_DECODER_F_ENABLE;
830 		iter->commit_end = 0;
831 		/*
832 		 * Switch targets 2 endpoints, while host bridge targets
833 		 * one root port
834 		 */
835 		if (i == 0)
836 			cxld->interleave_ways = 2;
837 		else
838 			cxld->interleave_ways = 1;
839 		cxld->interleave_granularity = 4096;
840 		cxld->hpa_range = (struct range) {
841 			.start = base,
842 			.end = base + size - 1,
843 		};
844 		put_device(dev);
845 	}
846 }
847 
848 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
849 				       struct cxl_endpoint_dvsec_info *info)
850 {
851 	struct cxl_port *port = cxlhdm->port;
852 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
853 	int target_count, i;
854 
855 	if (is_cxl_endpoint(port))
856 		target_count = 0;
857 	else if (is_cxl_root(parent_port))
858 		target_count = NR_CXL_ROOT_PORTS;
859 	else
860 		target_count = NR_CXL_SWITCH_PORTS;
861 
862 	for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
863 		int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
864 		struct target_map_ctx ctx = {
865 			.target_map = target_map,
866 			.target_count = target_count,
867 		};
868 		struct cxl_decoder *cxld;
869 		int rc;
870 
871 		if (target_count) {
872 			struct cxl_switch_decoder *cxlsd;
873 
874 			cxlsd = cxl_switch_decoder_alloc(port, target_count);
875 			if (IS_ERR(cxlsd)) {
876 				dev_warn(&port->dev,
877 					 "Failed to allocate the decoder\n");
878 				return PTR_ERR(cxlsd);
879 			}
880 			cxld = &cxlsd->cxld;
881 		} else {
882 			struct cxl_endpoint_decoder *cxled;
883 
884 			cxled = cxl_endpoint_decoder_alloc(port);
885 
886 			if (IS_ERR(cxled)) {
887 				dev_warn(&port->dev,
888 					 "Failed to allocate the decoder\n");
889 				return PTR_ERR(cxled);
890 			}
891 			cxld = &cxled->cxld;
892 		}
893 
894 		mock_init_hdm_decoder(cxld);
895 
896 		if (target_count) {
897 			rc = device_for_each_child(port->uport_dev, &ctx,
898 						   map_targets);
899 			if (rc) {
900 				put_device(&cxld->dev);
901 				return rc;
902 			}
903 		}
904 
905 		rc = cxl_decoder_add_locked(cxld, target_map);
906 		if (rc) {
907 			put_device(&cxld->dev);
908 			dev_err(&port->dev, "Failed to add decoder\n");
909 			return rc;
910 		}
911 
912 		rc = cxl_decoder_autoremove(&port->dev, cxld);
913 		if (rc)
914 			return rc;
915 		dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
916 	}
917 
918 	return 0;
919 }
920 
921 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
922 {
923 	struct platform_device **array;
924 	int i, array_size;
925 
926 	if (port->depth == 1) {
927 		if (is_multi_bridge(port->uport_dev)) {
928 			array_size = ARRAY_SIZE(cxl_root_port);
929 			array = cxl_root_port;
930 		} else if (is_single_bridge(port->uport_dev)) {
931 			array_size = ARRAY_SIZE(cxl_root_single);
932 			array = cxl_root_single;
933 		} else {
934 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
935 				dev_name(port->uport_dev));
936 			return -ENXIO;
937 		}
938 	} else if (port->depth == 2) {
939 		struct cxl_port *parent = to_cxl_port(port->dev.parent);
940 
941 		if (is_multi_bridge(parent->uport_dev)) {
942 			array_size = ARRAY_SIZE(cxl_switch_dport);
943 			array = cxl_switch_dport;
944 		} else if (is_single_bridge(parent->uport_dev)) {
945 			array_size = ARRAY_SIZE(cxl_swd_single);
946 			array = cxl_swd_single;
947 		} else {
948 			dev_dbg(&port->dev, "%s: unknown bridge type\n",
949 				dev_name(port->uport_dev));
950 			return -ENXIO;
951 		}
952 	} else {
953 		dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
954 			      port->depth);
955 		return -ENXIO;
956 	}
957 
958 	for (i = 0; i < array_size; i++) {
959 		struct platform_device *pdev = array[i];
960 		struct cxl_dport *dport;
961 
962 		if (pdev->dev.parent != port->uport_dev) {
963 			dev_dbg(&port->dev, "%s: mismatch parent %s\n",
964 				dev_name(port->uport_dev),
965 				dev_name(pdev->dev.parent));
966 			continue;
967 		}
968 
969 		dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
970 					   CXL_RESOURCE_NONE);
971 
972 		if (IS_ERR(dport))
973 			return PTR_ERR(dport);
974 	}
975 
976 	return 0;
977 }
978 
979 static struct cxl_mock_ops cxl_mock_ops = {
980 	.is_mock_adev = is_mock_adev,
981 	.is_mock_bridge = is_mock_bridge,
982 	.is_mock_bus = is_mock_bus,
983 	.is_mock_port = is_mock_port,
984 	.is_mock_dev = is_mock_dev,
985 	.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
986 	.acpi_evaluate_integer = mock_acpi_evaluate_integer,
987 	.acpi_pci_find_root = mock_acpi_pci_find_root,
988 	.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
989 	.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
990 	.devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
991 	.devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
992 	.list = LIST_HEAD_INIT(cxl_mock_ops.list),
993 };
994 
995 static void mock_companion(struct acpi_device *adev, struct device *dev)
996 {
997 	device_initialize(&adev->dev);
998 	fwnode_init(&adev->fwnode, NULL);
999 	dev->fwnode = &adev->fwnode;
1000 	adev->fwnode.dev = dev;
1001 }
1002 
1003 #ifndef SZ_64G
1004 #define SZ_64G (SZ_32G * 2)
1005 #endif
1006 
1007 static __init int cxl_rch_init(void)
1008 {
1009 	int rc, i;
1010 
1011 	for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1012 		int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1013 		struct acpi_device *adev = &host_bridge[idx];
1014 		struct platform_device *pdev;
1015 
1016 		pdev = platform_device_alloc("cxl_host_bridge", idx);
1017 		if (!pdev)
1018 			goto err_bridge;
1019 
1020 		mock_companion(adev, &pdev->dev);
1021 		rc = platform_device_add(pdev);
1022 		if (rc) {
1023 			platform_device_put(pdev);
1024 			goto err_bridge;
1025 		}
1026 
1027 		cxl_rch[i] = pdev;
1028 		mock_pci_bus[idx].bridge = &pdev->dev;
1029 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1030 				       "firmware_node");
1031 		if (rc)
1032 			goto err_bridge;
1033 	}
1034 
1035 	for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1036 		int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1037 		struct platform_device *rch = cxl_rch[i];
1038 		struct platform_device *pdev;
1039 
1040 		pdev = platform_device_alloc("cxl_rcd", idx);
1041 		if (!pdev)
1042 			goto err_mem;
1043 		pdev->dev.parent = &rch->dev;
1044 		set_dev_node(&pdev->dev, i % 2);
1045 
1046 		rc = platform_device_add(pdev);
1047 		if (rc) {
1048 			platform_device_put(pdev);
1049 			goto err_mem;
1050 		}
1051 		cxl_rcd[i] = pdev;
1052 	}
1053 
1054 	return 0;
1055 
1056 err_mem:
1057 	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1058 		platform_device_unregister(cxl_rcd[i]);
1059 err_bridge:
1060 	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1061 		struct platform_device *pdev = cxl_rch[i];
1062 
1063 		if (!pdev)
1064 			continue;
1065 		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1066 		platform_device_unregister(cxl_rch[i]);
1067 	}
1068 
1069 	return rc;
1070 }
1071 
1072 static void cxl_rch_exit(void)
1073 {
1074 	int i;
1075 
1076 	for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1077 		platform_device_unregister(cxl_rcd[i]);
1078 	for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1079 		struct platform_device *pdev = cxl_rch[i];
1080 
1081 		if (!pdev)
1082 			continue;
1083 		sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1084 		platform_device_unregister(cxl_rch[i]);
1085 	}
1086 }
1087 
1088 static __init int cxl_single_init(void)
1089 {
1090 	int i, rc;
1091 
1092 	for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1093 		struct acpi_device *adev =
1094 			&host_bridge[NR_CXL_HOST_BRIDGES + i];
1095 		struct platform_device *pdev;
1096 
1097 		pdev = platform_device_alloc("cxl_host_bridge",
1098 					     NR_CXL_HOST_BRIDGES + i);
1099 		if (!pdev)
1100 			goto err_bridge;
1101 
1102 		mock_companion(adev, &pdev->dev);
1103 		rc = platform_device_add(pdev);
1104 		if (rc) {
1105 			platform_device_put(pdev);
1106 			goto err_bridge;
1107 		}
1108 
1109 		cxl_hb_single[i] = pdev;
1110 		mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1111 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1112 				       "physical_node");
1113 		if (rc)
1114 			goto err_bridge;
1115 	}
1116 
1117 	for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1118 		struct platform_device *bridge =
1119 			cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1120 		struct platform_device *pdev;
1121 
1122 		pdev = platform_device_alloc("cxl_root_port",
1123 					     NR_MULTI_ROOT + i);
1124 		if (!pdev)
1125 			goto err_port;
1126 		pdev->dev.parent = &bridge->dev;
1127 
1128 		rc = platform_device_add(pdev);
1129 		if (rc) {
1130 			platform_device_put(pdev);
1131 			goto err_port;
1132 		}
1133 		cxl_root_single[i] = pdev;
1134 	}
1135 
1136 	for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1137 		struct platform_device *root_port = cxl_root_single[i];
1138 		struct platform_device *pdev;
1139 
1140 		pdev = platform_device_alloc("cxl_switch_uport",
1141 					     NR_MULTI_ROOT + i);
1142 		if (!pdev)
1143 			goto err_uport;
1144 		pdev->dev.parent = &root_port->dev;
1145 
1146 		rc = platform_device_add(pdev);
1147 		if (rc) {
1148 			platform_device_put(pdev);
1149 			goto err_uport;
1150 		}
1151 		cxl_swu_single[i] = pdev;
1152 	}
1153 
1154 	for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1155 		struct platform_device *uport =
1156 			cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1157 		struct platform_device *pdev;
1158 
1159 		pdev = platform_device_alloc("cxl_switch_dport",
1160 					     i + NR_MEM_MULTI);
1161 		if (!pdev)
1162 			goto err_dport;
1163 		pdev->dev.parent = &uport->dev;
1164 
1165 		rc = platform_device_add(pdev);
1166 		if (rc) {
1167 			platform_device_put(pdev);
1168 			goto err_dport;
1169 		}
1170 		cxl_swd_single[i] = pdev;
1171 	}
1172 
1173 	for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1174 		struct platform_device *dport = cxl_swd_single[i];
1175 		struct platform_device *pdev;
1176 
1177 		pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1178 		if (!pdev)
1179 			goto err_mem;
1180 		pdev->dev.parent = &dport->dev;
1181 		set_dev_node(&pdev->dev, i % 2);
1182 
1183 		rc = platform_device_add(pdev);
1184 		if (rc) {
1185 			platform_device_put(pdev);
1186 			goto err_mem;
1187 		}
1188 		cxl_mem_single[i] = pdev;
1189 	}
1190 
1191 	return 0;
1192 
1193 err_mem:
1194 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1195 		platform_device_unregister(cxl_mem_single[i]);
1196 err_dport:
1197 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1198 		platform_device_unregister(cxl_swd_single[i]);
1199 err_uport:
1200 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1201 		platform_device_unregister(cxl_swu_single[i]);
1202 err_port:
1203 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1204 		platform_device_unregister(cxl_root_single[i]);
1205 err_bridge:
1206 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1207 		struct platform_device *pdev = cxl_hb_single[i];
1208 
1209 		if (!pdev)
1210 			continue;
1211 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1212 		platform_device_unregister(cxl_hb_single[i]);
1213 	}
1214 
1215 	return rc;
1216 }
1217 
1218 static void cxl_single_exit(void)
1219 {
1220 	int i;
1221 
1222 	for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1223 		platform_device_unregister(cxl_mem_single[i]);
1224 	for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1225 		platform_device_unregister(cxl_swd_single[i]);
1226 	for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1227 		platform_device_unregister(cxl_swu_single[i]);
1228 	for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1229 		platform_device_unregister(cxl_root_single[i]);
1230 	for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1231 		struct platform_device *pdev = cxl_hb_single[i];
1232 
1233 		if (!pdev)
1234 			continue;
1235 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1236 		platform_device_unregister(cxl_hb_single[i]);
1237 	}
1238 }
1239 
1240 static __init int cxl_test_init(void)
1241 {
1242 	int rc, i;
1243 
1244 	cxl_acpi_test();
1245 	cxl_core_test();
1246 	cxl_mem_test();
1247 	cxl_pmem_test();
1248 	cxl_port_test();
1249 
1250 	register_cxl_mock_ops(&cxl_mock_ops);
1251 
1252 	cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1253 	if (!cxl_mock_pool) {
1254 		rc = -ENOMEM;
1255 		goto err_gen_pool_create;
1256 	}
1257 
1258 	rc = gen_pool_add(cxl_mock_pool, iomem_resource.end + 1 - SZ_64G,
1259 			  SZ_64G, NUMA_NO_NODE);
1260 	if (rc)
1261 		goto err_gen_pool_add;
1262 
1263 	if (interleave_arithmetic == 1) {
1264 		cfmws_start = CFMWS_XOR_ARRAY_START;
1265 		cfmws_end = CFMWS_XOR_ARRAY_END;
1266 	} else {
1267 		cfmws_start = CFMWS_MOD_ARRAY_START;
1268 		cfmws_end = CFMWS_MOD_ARRAY_END;
1269 	}
1270 
1271 	rc = populate_cedt();
1272 	if (rc)
1273 		goto err_populate;
1274 
1275 	for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1276 		struct acpi_device *adev = &host_bridge[i];
1277 		struct platform_device *pdev;
1278 
1279 		pdev = platform_device_alloc("cxl_host_bridge", i);
1280 		if (!pdev)
1281 			goto err_bridge;
1282 
1283 		mock_companion(adev, &pdev->dev);
1284 		rc = platform_device_add(pdev);
1285 		if (rc) {
1286 			platform_device_put(pdev);
1287 			goto err_bridge;
1288 		}
1289 
1290 		cxl_host_bridge[i] = pdev;
1291 		mock_pci_bus[i].bridge = &pdev->dev;
1292 		rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1293 				       "physical_node");
1294 		if (rc)
1295 			goto err_bridge;
1296 	}
1297 
1298 	for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1299 		struct platform_device *bridge =
1300 			cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1301 		struct platform_device *pdev;
1302 
1303 		pdev = platform_device_alloc("cxl_root_port", i);
1304 		if (!pdev)
1305 			goto err_port;
1306 		pdev->dev.parent = &bridge->dev;
1307 
1308 		rc = platform_device_add(pdev);
1309 		if (rc) {
1310 			platform_device_put(pdev);
1311 			goto err_port;
1312 		}
1313 		cxl_root_port[i] = pdev;
1314 	}
1315 
1316 	BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1317 	for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1318 		struct platform_device *root_port = cxl_root_port[i];
1319 		struct platform_device *pdev;
1320 
1321 		pdev = platform_device_alloc("cxl_switch_uport", i);
1322 		if (!pdev)
1323 			goto err_uport;
1324 		pdev->dev.parent = &root_port->dev;
1325 
1326 		rc = platform_device_add(pdev);
1327 		if (rc) {
1328 			platform_device_put(pdev);
1329 			goto err_uport;
1330 		}
1331 		cxl_switch_uport[i] = pdev;
1332 	}
1333 
1334 	for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1335 		struct platform_device *uport =
1336 			cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1337 		struct platform_device *pdev;
1338 
1339 		pdev = platform_device_alloc("cxl_switch_dport", i);
1340 		if (!pdev)
1341 			goto err_dport;
1342 		pdev->dev.parent = &uport->dev;
1343 
1344 		rc = platform_device_add(pdev);
1345 		if (rc) {
1346 			platform_device_put(pdev);
1347 			goto err_dport;
1348 		}
1349 		cxl_switch_dport[i] = pdev;
1350 	}
1351 
1352 	for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1353 		struct platform_device *dport = cxl_switch_dport[i];
1354 		struct platform_device *pdev;
1355 
1356 		pdev = platform_device_alloc("cxl_mem", i);
1357 		if (!pdev)
1358 			goto err_mem;
1359 		pdev->dev.parent = &dport->dev;
1360 		set_dev_node(&pdev->dev, i % 2);
1361 
1362 		rc = platform_device_add(pdev);
1363 		if (rc) {
1364 			platform_device_put(pdev);
1365 			goto err_mem;
1366 		}
1367 		cxl_mem[i] = pdev;
1368 	}
1369 
1370 	rc = cxl_single_init();
1371 	if (rc)
1372 		goto err_mem;
1373 
1374 	rc = cxl_rch_init();
1375 	if (rc)
1376 		goto err_single;
1377 
1378 	cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1379 	if (!cxl_acpi)
1380 		goto err_rch;
1381 
1382 	mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1383 	acpi0017_mock.dev.bus = &platform_bus_type;
1384 
1385 	rc = platform_device_add(cxl_acpi);
1386 	if (rc)
1387 		goto err_add;
1388 
1389 	return 0;
1390 
1391 err_add:
1392 	platform_device_put(cxl_acpi);
1393 err_rch:
1394 	cxl_rch_exit();
1395 err_single:
1396 	cxl_single_exit();
1397 err_mem:
1398 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1399 		platform_device_unregister(cxl_mem[i]);
1400 err_dport:
1401 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1402 		platform_device_unregister(cxl_switch_dport[i]);
1403 err_uport:
1404 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1405 		platform_device_unregister(cxl_switch_uport[i]);
1406 err_port:
1407 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1408 		platform_device_unregister(cxl_root_port[i]);
1409 err_bridge:
1410 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1411 		struct platform_device *pdev = cxl_host_bridge[i];
1412 
1413 		if (!pdev)
1414 			continue;
1415 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1416 		platform_device_unregister(cxl_host_bridge[i]);
1417 	}
1418 err_populate:
1419 	depopulate_all_mock_resources();
1420 err_gen_pool_add:
1421 	gen_pool_destroy(cxl_mock_pool);
1422 err_gen_pool_create:
1423 	unregister_cxl_mock_ops(&cxl_mock_ops);
1424 	return rc;
1425 }
1426 
1427 static __exit void cxl_test_exit(void)
1428 {
1429 	int i;
1430 
1431 	platform_device_unregister(cxl_acpi);
1432 	cxl_rch_exit();
1433 	cxl_single_exit();
1434 	for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1435 		platform_device_unregister(cxl_mem[i]);
1436 	for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1437 		platform_device_unregister(cxl_switch_dport[i]);
1438 	for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1439 		platform_device_unregister(cxl_switch_uport[i]);
1440 	for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1441 		platform_device_unregister(cxl_root_port[i]);
1442 	for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1443 		struct platform_device *pdev = cxl_host_bridge[i];
1444 
1445 		if (!pdev)
1446 			continue;
1447 		sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1448 		platform_device_unregister(cxl_host_bridge[i]);
1449 	}
1450 	depopulate_all_mock_resources();
1451 	gen_pool_destroy(cxl_mock_pool);
1452 	unregister_cxl_mock_ops(&cxl_mock_ops);
1453 }
1454 
1455 module_param(interleave_arithmetic, int, 0444);
1456 MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
1457 module_init(cxl_test_init);
1458 module_exit(cxl_test_exit);
1459 MODULE_LICENSE("GPL v2");
1460 MODULE_IMPORT_NS(ACPI);
1461 MODULE_IMPORT_NS(CXL);
1462