xref: /linux/drivers/cxl/core/region.c (revision b37042b2bb7cd751f03b73afb90364a418d870f4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
3 #include <linux/memregion.h>
4 #include <linux/genalloc.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/uuid.h>
9 #include <linux/idr.h>
10 #include <cxlmem.h>
11 #include <cxl.h>
12 #include "core.h"
13 
14 /**
15  * DOC: cxl core region
16  *
17  * CXL Regions represent mapped memory capacity in system physical address
18  * space. Whereas the CXL Root Decoders identify the bounds of potential CXL
19  * Memory ranges, Regions represent the active mapped capacity by the HDM
20  * Decoder Capability structures throughout the Host Bridges, Switches, and
21  * Endpoints in the topology.
22  *
23  * Region configuration has ordering constraints. UUID may be set at any time
24  * but is only visible for persistent regions.
25  * 1. Interleave granularity
26  * 2. Interleave size
27  * 3. Decoder targets
28  */
29 
30 /*
31  * All changes to the interleave configuration occur with this lock held
32  * for write.
33  */
34 static DECLARE_RWSEM(cxl_region_rwsem);
35 
36 static struct cxl_region *to_cxl_region(struct device *dev);
37 
38 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
39 			 char *buf)
40 {
41 	struct cxl_region *cxlr = to_cxl_region(dev);
42 	struct cxl_region_params *p = &cxlr->params;
43 	ssize_t rc;
44 
45 	rc = down_read_interruptible(&cxl_region_rwsem);
46 	if (rc)
47 		return rc;
48 	rc = sysfs_emit(buf, "%pUb\n", &p->uuid);
49 	up_read(&cxl_region_rwsem);
50 
51 	return rc;
52 }
53 
54 static int is_dup(struct device *match, void *data)
55 {
56 	struct cxl_region_params *p;
57 	struct cxl_region *cxlr;
58 	uuid_t *uuid = data;
59 
60 	if (!is_cxl_region(match))
61 		return 0;
62 
63 	lockdep_assert_held(&cxl_region_rwsem);
64 	cxlr = to_cxl_region(match);
65 	p = &cxlr->params;
66 
67 	if (uuid_equal(&p->uuid, uuid)) {
68 		dev_dbg(match, "already has uuid: %pUb\n", uuid);
69 		return -EBUSY;
70 	}
71 
72 	return 0;
73 }
74 
75 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
76 			  const char *buf, size_t len)
77 {
78 	struct cxl_region *cxlr = to_cxl_region(dev);
79 	struct cxl_region_params *p = &cxlr->params;
80 	uuid_t temp;
81 	ssize_t rc;
82 
83 	if (len != UUID_STRING_LEN + 1)
84 		return -EINVAL;
85 
86 	rc = uuid_parse(buf, &temp);
87 	if (rc)
88 		return rc;
89 
90 	if (uuid_is_null(&temp))
91 		return -EINVAL;
92 
93 	rc = down_write_killable(&cxl_region_rwsem);
94 	if (rc)
95 		return rc;
96 
97 	if (uuid_equal(&p->uuid, &temp))
98 		goto out;
99 
100 	rc = -EBUSY;
101 	if (p->state >= CXL_CONFIG_ACTIVE)
102 		goto out;
103 
104 	rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup);
105 	if (rc < 0)
106 		goto out;
107 
108 	uuid_copy(&p->uuid, &temp);
109 out:
110 	up_write(&cxl_region_rwsem);
111 
112 	if (rc)
113 		return rc;
114 	return len;
115 }
116 static DEVICE_ATTR_RW(uuid);
117 
118 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port,
119 					  struct cxl_region *cxlr)
120 {
121 	return xa_load(&port->regions, (unsigned long)cxlr);
122 }
123 
124 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count)
125 {
126 	struct cxl_region_params *p = &cxlr->params;
127 	int i;
128 
129 	for (i = count - 1; i >= 0; i--) {
130 		struct cxl_endpoint_decoder *cxled = p->targets[i];
131 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
132 		struct cxl_port *iter = cxled_to_port(cxled);
133 		struct cxl_ep *ep;
134 		int rc;
135 
136 		while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
137 			iter = to_cxl_port(iter->dev.parent);
138 
139 		for (ep = cxl_ep_load(iter, cxlmd); iter;
140 		     iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
141 			struct cxl_region_ref *cxl_rr;
142 			struct cxl_decoder *cxld;
143 
144 			cxl_rr = cxl_rr_load(iter, cxlr);
145 			cxld = cxl_rr->decoder;
146 			rc = cxld->reset(cxld);
147 			if (rc)
148 				return rc;
149 		}
150 
151 		rc = cxled->cxld.reset(&cxled->cxld);
152 		if (rc)
153 			return rc;
154 	}
155 
156 	return 0;
157 }
158 
159 static int cxl_region_decode_commit(struct cxl_region *cxlr)
160 {
161 	struct cxl_region_params *p = &cxlr->params;
162 	int i, rc = 0;
163 
164 	for (i = 0; i < p->nr_targets; i++) {
165 		struct cxl_endpoint_decoder *cxled = p->targets[i];
166 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
167 		struct cxl_region_ref *cxl_rr;
168 		struct cxl_decoder *cxld;
169 		struct cxl_port *iter;
170 		struct cxl_ep *ep;
171 
172 		/* commit bottom up */
173 		for (iter = cxled_to_port(cxled); !is_cxl_root(iter);
174 		     iter = to_cxl_port(iter->dev.parent)) {
175 			cxl_rr = cxl_rr_load(iter, cxlr);
176 			cxld = cxl_rr->decoder;
177 			rc = cxld->commit(cxld);
178 			if (rc)
179 				break;
180 		}
181 
182 		if (rc) {
183 			/* programming @iter failed, teardown */
184 			for (ep = cxl_ep_load(iter, cxlmd); ep && iter;
185 			     iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
186 				cxl_rr = cxl_rr_load(iter, cxlr);
187 				cxld = cxl_rr->decoder;
188 				cxld->reset(cxld);
189 			}
190 
191 			cxled->cxld.reset(&cxled->cxld);
192 			goto err;
193 		}
194 	}
195 
196 	return 0;
197 
198 err:
199 	/* undo the targets that were successfully committed */
200 	cxl_region_decode_reset(cxlr, i);
201 	return rc;
202 }
203 
204 static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
205 			    const char *buf, size_t len)
206 {
207 	struct cxl_region *cxlr = to_cxl_region(dev);
208 	struct cxl_region_params *p = &cxlr->params;
209 	bool commit;
210 	ssize_t rc;
211 
212 	rc = kstrtobool(buf, &commit);
213 	if (rc)
214 		return rc;
215 
216 	rc = down_write_killable(&cxl_region_rwsem);
217 	if (rc)
218 		return rc;
219 
220 	/* Already in the requested state? */
221 	if (commit && p->state >= CXL_CONFIG_COMMIT)
222 		goto out;
223 	if (!commit && p->state < CXL_CONFIG_COMMIT)
224 		goto out;
225 
226 	/* Not ready to commit? */
227 	if (commit && p->state < CXL_CONFIG_ACTIVE) {
228 		rc = -ENXIO;
229 		goto out;
230 	}
231 
232 	if (commit)
233 		rc = cxl_region_decode_commit(cxlr);
234 	else {
235 		p->state = CXL_CONFIG_RESET_PENDING;
236 		up_write(&cxl_region_rwsem);
237 		device_release_driver(&cxlr->dev);
238 		down_write(&cxl_region_rwsem);
239 
240 		/*
241 		 * The lock was dropped, so need to revalidate that the reset is
242 		 * still pending.
243 		 */
244 		if (p->state == CXL_CONFIG_RESET_PENDING)
245 			rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
246 	}
247 
248 	if (rc)
249 		goto out;
250 
251 	if (commit)
252 		p->state = CXL_CONFIG_COMMIT;
253 	else if (p->state == CXL_CONFIG_RESET_PENDING)
254 		p->state = CXL_CONFIG_ACTIVE;
255 
256 out:
257 	up_write(&cxl_region_rwsem);
258 
259 	if (rc)
260 		return rc;
261 	return len;
262 }
263 
264 static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
265 			   char *buf)
266 {
267 	struct cxl_region *cxlr = to_cxl_region(dev);
268 	struct cxl_region_params *p = &cxlr->params;
269 	ssize_t rc;
270 
271 	rc = down_read_interruptible(&cxl_region_rwsem);
272 	if (rc)
273 		return rc;
274 	rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT);
275 	up_read(&cxl_region_rwsem);
276 
277 	return rc;
278 }
279 static DEVICE_ATTR_RW(commit);
280 
281 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
282 				  int n)
283 {
284 	struct device *dev = kobj_to_dev(kobj);
285 	struct cxl_region *cxlr = to_cxl_region(dev);
286 
287 	if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM)
288 		return 0;
289 	return a->mode;
290 }
291 
292 static ssize_t interleave_ways_show(struct device *dev,
293 				    struct device_attribute *attr, char *buf)
294 {
295 	struct cxl_region *cxlr = to_cxl_region(dev);
296 	struct cxl_region_params *p = &cxlr->params;
297 	ssize_t rc;
298 
299 	rc = down_read_interruptible(&cxl_region_rwsem);
300 	if (rc)
301 		return rc;
302 	rc = sysfs_emit(buf, "%d\n", p->interleave_ways);
303 	up_read(&cxl_region_rwsem);
304 
305 	return rc;
306 }
307 
308 static const struct attribute_group *get_cxl_region_target_group(void);
309 
310 static ssize_t interleave_ways_store(struct device *dev,
311 				     struct device_attribute *attr,
312 				     const char *buf, size_t len)
313 {
314 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
315 	struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
316 	struct cxl_region *cxlr = to_cxl_region(dev);
317 	struct cxl_region_params *p = &cxlr->params;
318 	unsigned int val, save;
319 	int rc;
320 	u8 iw;
321 
322 	rc = kstrtouint(buf, 0, &val);
323 	if (rc)
324 		return rc;
325 
326 	rc = ways_to_cxl(val, &iw);
327 	if (rc)
328 		return rc;
329 
330 	/*
331 	 * Even for x3, x9, and x12 interleaves the region interleave must be a
332 	 * power of 2 multiple of the host bridge interleave.
333 	 */
334 	if (!is_power_of_2(val / cxld->interleave_ways) ||
335 	    (val % cxld->interleave_ways)) {
336 		dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val);
337 		return -EINVAL;
338 	}
339 
340 	rc = down_write_killable(&cxl_region_rwsem);
341 	if (rc)
342 		return rc;
343 	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
344 		rc = -EBUSY;
345 		goto out;
346 	}
347 
348 	save = p->interleave_ways;
349 	p->interleave_ways = val;
350 	rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group());
351 	if (rc)
352 		p->interleave_ways = save;
353 out:
354 	up_write(&cxl_region_rwsem);
355 	if (rc)
356 		return rc;
357 	return len;
358 }
359 static DEVICE_ATTR_RW(interleave_ways);
360 
361 static ssize_t interleave_granularity_show(struct device *dev,
362 					   struct device_attribute *attr,
363 					   char *buf)
364 {
365 	struct cxl_region *cxlr = to_cxl_region(dev);
366 	struct cxl_region_params *p = &cxlr->params;
367 	ssize_t rc;
368 
369 	rc = down_read_interruptible(&cxl_region_rwsem);
370 	if (rc)
371 		return rc;
372 	rc = sysfs_emit(buf, "%d\n", p->interleave_granularity);
373 	up_read(&cxl_region_rwsem);
374 
375 	return rc;
376 }
377 
378 static ssize_t interleave_granularity_store(struct device *dev,
379 					    struct device_attribute *attr,
380 					    const char *buf, size_t len)
381 {
382 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent);
383 	struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
384 	struct cxl_region *cxlr = to_cxl_region(dev);
385 	struct cxl_region_params *p = &cxlr->params;
386 	int rc, val;
387 	u16 ig;
388 
389 	rc = kstrtoint(buf, 0, &val);
390 	if (rc)
391 		return rc;
392 
393 	rc = granularity_to_cxl(val, &ig);
394 	if (rc)
395 		return rc;
396 
397 	/*
398 	 * When the host-bridge is interleaved, disallow region granularity !=
399 	 * root granularity. Regions with a granularity less than the root
400 	 * interleave result in needing multiple endpoints to support a single
401 	 * slot in the interleave (possible to suport in the future). Regions
402 	 * with a granularity greater than the root interleave result in invalid
403 	 * DPA translations (invalid to support).
404 	 */
405 	if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity)
406 		return -EINVAL;
407 
408 	rc = down_write_killable(&cxl_region_rwsem);
409 	if (rc)
410 		return rc;
411 	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) {
412 		rc = -EBUSY;
413 		goto out;
414 	}
415 
416 	p->interleave_granularity = val;
417 out:
418 	up_write(&cxl_region_rwsem);
419 	if (rc)
420 		return rc;
421 	return len;
422 }
423 static DEVICE_ATTR_RW(interleave_granularity);
424 
425 static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
426 			     char *buf)
427 {
428 	struct cxl_region *cxlr = to_cxl_region(dev);
429 	struct cxl_region_params *p = &cxlr->params;
430 	u64 resource = -1ULL;
431 	ssize_t rc;
432 
433 	rc = down_read_interruptible(&cxl_region_rwsem);
434 	if (rc)
435 		return rc;
436 	if (p->res)
437 		resource = p->res->start;
438 	rc = sysfs_emit(buf, "%#llx\n", resource);
439 	up_read(&cxl_region_rwsem);
440 
441 	return rc;
442 }
443 static DEVICE_ATTR_RO(resource);
444 
445 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
446 {
447 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
448 	struct cxl_region_params *p = &cxlr->params;
449 	struct resource *res;
450 	u32 remainder = 0;
451 
452 	lockdep_assert_held_write(&cxl_region_rwsem);
453 
454 	/* Nothing to do... */
455 	if (p->res && resource_size(p->res) == size)
456 		return 0;
457 
458 	/* To change size the old size must be freed first */
459 	if (p->res)
460 		return -EBUSY;
461 
462 	if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE)
463 		return -EBUSY;
464 
465 	/* ways, granularity and uuid (if PMEM) need to be set before HPA */
466 	if (!p->interleave_ways || !p->interleave_granularity ||
467 	    (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
468 		return -ENXIO;
469 
470 	div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
471 	if (remainder)
472 		return -EINVAL;
473 
474 	res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
475 				    dev_name(&cxlr->dev));
476 	if (IS_ERR(res)) {
477 		dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
478 			PTR_ERR(res));
479 		return PTR_ERR(res);
480 	}
481 
482 	p->res = res;
483 	p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
484 
485 	return 0;
486 }
487 
488 static void cxl_region_iomem_release(struct cxl_region *cxlr)
489 {
490 	struct cxl_region_params *p = &cxlr->params;
491 
492 	if (device_is_registered(&cxlr->dev))
493 		lockdep_assert_held_write(&cxl_region_rwsem);
494 	if (p->res) {
495 		remove_resource(p->res);
496 		kfree(p->res);
497 		p->res = NULL;
498 	}
499 }
500 
501 static int free_hpa(struct cxl_region *cxlr)
502 {
503 	struct cxl_region_params *p = &cxlr->params;
504 
505 	lockdep_assert_held_write(&cxl_region_rwsem);
506 
507 	if (!p->res)
508 		return 0;
509 
510 	if (p->state >= CXL_CONFIG_ACTIVE)
511 		return -EBUSY;
512 
513 	cxl_region_iomem_release(cxlr);
514 	p->state = CXL_CONFIG_IDLE;
515 	return 0;
516 }
517 
518 static ssize_t size_store(struct device *dev, struct device_attribute *attr,
519 			  const char *buf, size_t len)
520 {
521 	struct cxl_region *cxlr = to_cxl_region(dev);
522 	u64 val;
523 	int rc;
524 
525 	rc = kstrtou64(buf, 0, &val);
526 	if (rc)
527 		return rc;
528 
529 	rc = down_write_killable(&cxl_region_rwsem);
530 	if (rc)
531 		return rc;
532 
533 	if (val)
534 		rc = alloc_hpa(cxlr, val);
535 	else
536 		rc = free_hpa(cxlr);
537 	up_write(&cxl_region_rwsem);
538 
539 	if (rc)
540 		return rc;
541 
542 	return len;
543 }
544 
545 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
546 			 char *buf)
547 {
548 	struct cxl_region *cxlr = to_cxl_region(dev);
549 	struct cxl_region_params *p = &cxlr->params;
550 	u64 size = 0;
551 	ssize_t rc;
552 
553 	rc = down_read_interruptible(&cxl_region_rwsem);
554 	if (rc)
555 		return rc;
556 	if (p->res)
557 		size = resource_size(p->res);
558 	rc = sysfs_emit(buf, "%#llx\n", size);
559 	up_read(&cxl_region_rwsem);
560 
561 	return rc;
562 }
563 static DEVICE_ATTR_RW(size);
564 
565 static struct attribute *cxl_region_attrs[] = {
566 	&dev_attr_uuid.attr,
567 	&dev_attr_commit.attr,
568 	&dev_attr_interleave_ways.attr,
569 	&dev_attr_interleave_granularity.attr,
570 	&dev_attr_resource.attr,
571 	&dev_attr_size.attr,
572 	NULL,
573 };
574 
575 static const struct attribute_group cxl_region_group = {
576 	.attrs = cxl_region_attrs,
577 	.is_visible = cxl_region_visible,
578 };
579 
580 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos)
581 {
582 	struct cxl_region_params *p = &cxlr->params;
583 	struct cxl_endpoint_decoder *cxled;
584 	int rc;
585 
586 	rc = down_read_interruptible(&cxl_region_rwsem);
587 	if (rc)
588 		return rc;
589 
590 	if (pos >= p->interleave_ways) {
591 		dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
592 			p->interleave_ways);
593 		rc = -ENXIO;
594 		goto out;
595 	}
596 
597 	cxled = p->targets[pos];
598 	if (!cxled)
599 		rc = sysfs_emit(buf, "\n");
600 	else
601 		rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev));
602 out:
603 	up_read(&cxl_region_rwsem);
604 
605 	return rc;
606 }
607 
608 static int match_free_decoder(struct device *dev, void *data)
609 {
610 	struct cxl_decoder *cxld;
611 	int *id = data;
612 
613 	if (!is_switch_decoder(dev))
614 		return 0;
615 
616 	cxld = to_cxl_decoder(dev);
617 
618 	/* enforce ordered allocation */
619 	if (cxld->id != *id)
620 		return 0;
621 
622 	if (!cxld->region)
623 		return 1;
624 
625 	(*id)++;
626 
627 	return 0;
628 }
629 
630 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
631 						   struct cxl_region *cxlr)
632 {
633 	struct device *dev;
634 	int id = 0;
635 
636 	dev = device_find_child(&port->dev, &id, match_free_decoder);
637 	if (!dev)
638 		return NULL;
639 	/*
640 	 * This decoder is pinned registered as long as the endpoint decoder is
641 	 * registered, and endpoint decoder unregistration holds the
642 	 * cxl_region_rwsem over unregister events, so no need to hold on to
643 	 * this extra reference.
644 	 */
645 	put_device(dev);
646 	return to_cxl_decoder(dev);
647 }
648 
649 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
650 					       struct cxl_region *cxlr)
651 {
652 	struct cxl_region_params *p = &cxlr->params;
653 	struct cxl_region_ref *cxl_rr, *iter;
654 	unsigned long index;
655 	int rc;
656 
657 	xa_for_each(&port->regions, index, iter) {
658 		struct cxl_region_params *ip = &iter->region->params;
659 
660 		if (ip->res->start > p->res->start) {
661 			dev_dbg(&cxlr->dev,
662 				"%s: HPA order violation %s:%pr vs %pr\n",
663 				dev_name(&port->dev),
664 				dev_name(&iter->region->dev), ip->res, p->res);
665 			return ERR_PTR(-EBUSY);
666 		}
667 	}
668 
669 	cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
670 	if (!cxl_rr)
671 		return ERR_PTR(-ENOMEM);
672 	cxl_rr->port = port;
673 	cxl_rr->region = cxlr;
674 	cxl_rr->nr_targets = 1;
675 	xa_init(&cxl_rr->endpoints);
676 
677 	rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL);
678 	if (rc) {
679 		dev_dbg(&cxlr->dev,
680 			"%s: failed to track region reference: %d\n",
681 			dev_name(&port->dev), rc);
682 		kfree(cxl_rr);
683 		return ERR_PTR(rc);
684 	}
685 
686 	return cxl_rr;
687 }
688 
689 static void free_region_ref(struct cxl_region_ref *cxl_rr)
690 {
691 	struct cxl_port *port = cxl_rr->port;
692 	struct cxl_region *cxlr = cxl_rr->region;
693 	struct cxl_decoder *cxld = cxl_rr->decoder;
694 
695 	dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n");
696 	if (cxld->region == cxlr) {
697 		cxld->region = NULL;
698 		put_device(&cxlr->dev);
699 	}
700 
701 	xa_erase(&port->regions, (unsigned long)cxlr);
702 	xa_destroy(&cxl_rr->endpoints);
703 	kfree(cxl_rr);
704 }
705 
706 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr,
707 			 struct cxl_endpoint_decoder *cxled)
708 {
709 	int rc;
710 	struct cxl_port *port = cxl_rr->port;
711 	struct cxl_region *cxlr = cxl_rr->region;
712 	struct cxl_decoder *cxld = cxl_rr->decoder;
713 	struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled));
714 
715 	if (ep) {
716 		rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep,
717 			       GFP_KERNEL);
718 		if (rc)
719 			return rc;
720 	}
721 	cxl_rr->nr_eps++;
722 
723 	if (!cxld->region) {
724 		cxld->region = cxlr;
725 		get_device(&cxlr->dev);
726 	}
727 
728 	return 0;
729 }
730 
731 /**
732  * cxl_port_attach_region() - track a region's interest in a port by endpoint
733  * @port: port to add a new region reference 'struct cxl_region_ref'
734  * @cxlr: region to attach to @port
735  * @cxled: endpoint decoder used to create or further pin a region reference
736  * @pos: interleave position of @cxled in @cxlr
737  *
738  * The attach event is an opportunity to validate CXL decode setup
739  * constraints and record metadata needed for programming HDM decoders,
740  * in particular decoder target lists.
741  *
742  * The steps are:
743  *
744  * - validate that there are no other regions with a higher HPA already
745  *   associated with @port
746  * - establish a region reference if one is not already present
747  *
748  *   - additionally allocate a decoder instance that will host @cxlr on
749  *     @port
750  *
751  * - pin the region reference by the endpoint
752  * - account for how many entries in @port's target list are needed to
753  *   cover all of the added endpoints.
754  */
755 static int cxl_port_attach_region(struct cxl_port *port,
756 				  struct cxl_region *cxlr,
757 				  struct cxl_endpoint_decoder *cxled, int pos)
758 {
759 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
760 	struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
761 	struct cxl_region_ref *cxl_rr;
762 	bool nr_targets_inc = false;
763 	struct cxl_decoder *cxld;
764 	unsigned long index;
765 	int rc = -EBUSY;
766 
767 	lockdep_assert_held_write(&cxl_region_rwsem);
768 
769 	cxl_rr = cxl_rr_load(port, cxlr);
770 	if (cxl_rr) {
771 		struct cxl_ep *ep_iter;
772 		int found = 0;
773 
774 		/*
775 		 * Walk the existing endpoints that have been attached to
776 		 * @cxlr at @port and see if they share the same 'next' port
777 		 * in the downstream direction. I.e. endpoints that share common
778 		 * upstream switch.
779 		 */
780 		xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
781 			if (ep_iter == ep)
782 				continue;
783 			if (ep_iter->next == ep->next) {
784 				found++;
785 				break;
786 			}
787 		}
788 
789 		/*
790 		 * New target port, or @port is an endpoint port that always
791 		 * accounts its own local decode as a target.
792 		 */
793 		if (!found || !ep->next) {
794 			cxl_rr->nr_targets++;
795 			nr_targets_inc = true;
796 		}
797 
798 		/*
799 		 * The decoder for @cxlr was allocated when the region was first
800 		 * attached to @port.
801 		 */
802 		cxld = cxl_rr->decoder;
803 	} else {
804 		cxl_rr = alloc_region_ref(port, cxlr);
805 		if (IS_ERR(cxl_rr)) {
806 			dev_dbg(&cxlr->dev,
807 				"%s: failed to allocate region reference\n",
808 				dev_name(&port->dev));
809 			return PTR_ERR(cxl_rr);
810 		}
811 		nr_targets_inc = true;
812 
813 		if (port == cxled_to_port(cxled))
814 			cxld = &cxled->cxld;
815 		else
816 			cxld = cxl_region_find_decoder(port, cxlr);
817 		if (!cxld) {
818 			dev_dbg(&cxlr->dev, "%s: no decoder available\n",
819 				dev_name(&port->dev));
820 			goto out_erase;
821 		}
822 
823 		if (cxld->region) {
824 			dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n",
825 				dev_name(&port->dev), dev_name(&cxld->dev),
826 				dev_name(&cxld->region->dev));
827 			rc = -EBUSY;
828 			goto out_erase;
829 		}
830 
831 		cxl_rr->decoder = cxld;
832 	}
833 
834 	rc = cxl_rr_ep_add(cxl_rr, cxled);
835 	if (rc) {
836 		dev_dbg(&cxlr->dev,
837 			"%s: failed to track endpoint %s:%s reference\n",
838 			dev_name(&port->dev), dev_name(&cxlmd->dev),
839 			dev_name(&cxld->dev));
840 		goto out_erase;
841 	}
842 
843 	dev_dbg(&cxlr->dev,
844 		"%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n",
845 		dev_name(port->uport), dev_name(&port->dev),
846 		dev_name(&cxld->dev), dev_name(&cxlmd->dev),
847 		dev_name(&cxled->cxld.dev), pos,
848 		ep ? ep->next ? dev_name(ep->next->uport) :
849 				      dev_name(&cxlmd->dev) :
850 			   "none",
851 		cxl_rr->nr_eps, cxl_rr->nr_targets);
852 
853 	return 0;
854 out_erase:
855 	if (nr_targets_inc)
856 		cxl_rr->nr_targets--;
857 	if (cxl_rr->nr_eps == 0)
858 		free_region_ref(cxl_rr);
859 	return rc;
860 }
861 
862 static void cxl_port_detach_region(struct cxl_port *port,
863 				   struct cxl_region *cxlr,
864 				   struct cxl_endpoint_decoder *cxled)
865 {
866 	struct cxl_region_ref *cxl_rr;
867 	struct cxl_ep *ep = NULL;
868 
869 	lockdep_assert_held_write(&cxl_region_rwsem);
870 
871 	cxl_rr = cxl_rr_load(port, cxlr);
872 	if (!cxl_rr)
873 		return;
874 
875 	/*
876 	 * Endpoint ports do not carry cxl_ep references, and they
877 	 * never target more than one endpoint by definition
878 	 */
879 	if (cxl_rr->decoder == &cxled->cxld)
880 		cxl_rr->nr_eps--;
881 	else
882 		ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled);
883 	if (ep) {
884 		struct cxl_ep *ep_iter;
885 		unsigned long index;
886 		int found = 0;
887 
888 		cxl_rr->nr_eps--;
889 		xa_for_each(&cxl_rr->endpoints, index, ep_iter) {
890 			if (ep_iter->next == ep->next) {
891 				found++;
892 				break;
893 			}
894 		}
895 		if (!found)
896 			cxl_rr->nr_targets--;
897 	}
898 
899 	if (cxl_rr->nr_eps == 0)
900 		free_region_ref(cxl_rr);
901 }
902 
903 static int check_last_peer(struct cxl_endpoint_decoder *cxled,
904 			   struct cxl_ep *ep, struct cxl_region_ref *cxl_rr,
905 			   int distance)
906 {
907 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
908 	struct cxl_region *cxlr = cxl_rr->region;
909 	struct cxl_region_params *p = &cxlr->params;
910 	struct cxl_endpoint_decoder *cxled_peer;
911 	struct cxl_port *port = cxl_rr->port;
912 	struct cxl_memdev *cxlmd_peer;
913 	struct cxl_ep *ep_peer;
914 	int pos = cxled->pos;
915 
916 	/*
917 	 * If this position wants to share a dport with the last endpoint mapped
918 	 * then that endpoint, at index 'position - distance', must also be
919 	 * mapped by this dport.
920 	 */
921 	if (pos < distance) {
922 		dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n",
923 			dev_name(port->uport), dev_name(&port->dev),
924 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
925 		return -ENXIO;
926 	}
927 	cxled_peer = p->targets[pos - distance];
928 	cxlmd_peer = cxled_to_memdev(cxled_peer);
929 	ep_peer = cxl_ep_load(port, cxlmd_peer);
930 	if (ep->dport != ep_peer->dport) {
931 		dev_dbg(&cxlr->dev,
932 			"%s:%s: %s:%s pos %d mismatched peer %s:%s\n",
933 			dev_name(port->uport), dev_name(&port->dev),
934 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos,
935 			dev_name(&cxlmd_peer->dev),
936 			dev_name(&cxled_peer->cxld.dev));
937 		return -ENXIO;
938 	}
939 
940 	return 0;
941 }
942 
943 static int cxl_port_setup_targets(struct cxl_port *port,
944 				  struct cxl_region *cxlr,
945 				  struct cxl_endpoint_decoder *cxled)
946 {
947 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
948 	int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
949 	struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
950 	struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
951 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
952 	struct cxl_ep *ep = cxl_ep_load(port, cxlmd);
953 	struct cxl_region_params *p = &cxlr->params;
954 	struct cxl_decoder *cxld = cxl_rr->decoder;
955 	struct cxl_switch_decoder *cxlsd;
956 	u16 eig, peig;
957 	u8 eiw, peiw;
958 
959 	/*
960 	 * While root level decoders support x3, x6, x12, switch level
961 	 * decoders only support powers of 2 up to x16.
962 	 */
963 	if (!is_power_of_2(cxl_rr->nr_targets)) {
964 		dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n",
965 			dev_name(port->uport), dev_name(&port->dev),
966 			cxl_rr->nr_targets);
967 		return -EINVAL;
968 	}
969 
970 	cxlsd = to_cxl_switch_decoder(&cxld->dev);
971 	if (cxl_rr->nr_targets_set) {
972 		int i, distance;
973 
974 		distance = p->nr_targets / cxl_rr->nr_targets;
975 		for (i = 0; i < cxl_rr->nr_targets_set; i++)
976 			if (ep->dport == cxlsd->target[i]) {
977 				rc = check_last_peer(cxled, ep, cxl_rr,
978 						     distance);
979 				if (rc)
980 					return rc;
981 				goto out_target_set;
982 			}
983 		goto add_target;
984 	}
985 
986 	if (is_cxl_root(parent_port)) {
987 		parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
988 		parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
989 		/*
990 		 * For purposes of address bit routing, use power-of-2 math for
991 		 * switch ports.
992 		 */
993 		if (!is_power_of_2(parent_iw))
994 			parent_iw /= 3;
995 	} else {
996 		struct cxl_region_ref *parent_rr;
997 		struct cxl_decoder *parent_cxld;
998 
999 		parent_rr = cxl_rr_load(parent_port, cxlr);
1000 		parent_cxld = parent_rr->decoder;
1001 		parent_ig = parent_cxld->interleave_granularity;
1002 		parent_iw = parent_cxld->interleave_ways;
1003 	}
1004 
1005 	rc = granularity_to_cxl(parent_ig, &peig);
1006 	if (rc) {
1007 		dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n",
1008 			dev_name(parent_port->uport),
1009 			dev_name(&parent_port->dev), parent_ig);
1010 		return rc;
1011 	}
1012 
1013 	rc = ways_to_cxl(parent_iw, &peiw);
1014 	if (rc) {
1015 		dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n",
1016 			dev_name(parent_port->uport),
1017 			dev_name(&parent_port->dev), parent_iw);
1018 		return rc;
1019 	}
1020 
1021 	iw = cxl_rr->nr_targets;
1022 	rc = ways_to_cxl(iw, &eiw);
1023 	if (rc) {
1024 		dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n",
1025 			dev_name(port->uport), dev_name(&port->dev), iw);
1026 		return rc;
1027 	}
1028 
1029 	/*
1030 	 * If @parent_port is masking address bits, pick the next unused address
1031 	 * bit to route @port's targets.
1032 	 */
1033 	if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
1034 		u32 address_bit = max(peig + peiw, eiw + peig);
1035 
1036 		eig = address_bit - eiw + 1;
1037 	} else {
1038 		eiw = peiw;
1039 		eig = peig;
1040 	}
1041 
1042 	rc = cxl_to_granularity(eig, &ig);
1043 	if (rc) {
1044 		dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n",
1045 			dev_name(port->uport), dev_name(&port->dev),
1046 			256 << eig);
1047 		return rc;
1048 	}
1049 
1050 	cxld->interleave_ways = iw;
1051 	cxld->interleave_granularity = ig;
1052 	cxld->hpa_range = (struct range) {
1053 		.start = p->res->start,
1054 		.end = p->res->end,
1055 	};
1056 	dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport),
1057 		dev_name(&port->dev), iw, ig);
1058 add_target:
1059 	if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) {
1060 		dev_dbg(&cxlr->dev,
1061 			"%s:%s: targets full trying to add %s:%s at %d\n",
1062 			dev_name(port->uport), dev_name(&port->dev),
1063 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1064 		return -ENXIO;
1065 	}
1066 	cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
1067 	inc = 1;
1068 out_target_set:
1069 	cxl_rr->nr_targets_set += inc;
1070 	dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
1071 		dev_name(port->uport), dev_name(&port->dev),
1072 		cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport),
1073 		dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos);
1074 
1075 	return 0;
1076 }
1077 
1078 static void cxl_port_reset_targets(struct cxl_port *port,
1079 				   struct cxl_region *cxlr)
1080 {
1081 	struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
1082 	struct cxl_decoder *cxld;
1083 
1084 	/*
1085 	 * After the last endpoint has been detached the entire cxl_rr may now
1086 	 * be gone.
1087 	 */
1088 	if (!cxl_rr)
1089 		return;
1090 	cxl_rr->nr_targets_set = 0;
1091 
1092 	cxld = cxl_rr->decoder;
1093 	cxld->hpa_range = (struct range) {
1094 		.start = 0,
1095 		.end = -1,
1096 	};
1097 }
1098 
1099 static void cxl_region_teardown_targets(struct cxl_region *cxlr)
1100 {
1101 	struct cxl_region_params *p = &cxlr->params;
1102 	struct cxl_endpoint_decoder *cxled;
1103 	struct cxl_memdev *cxlmd;
1104 	struct cxl_port *iter;
1105 	struct cxl_ep *ep;
1106 	int i;
1107 
1108 	for (i = 0; i < p->nr_targets; i++) {
1109 		cxled = p->targets[i];
1110 		cxlmd = cxled_to_memdev(cxled);
1111 
1112 		iter = cxled_to_port(cxled);
1113 		while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1114 			iter = to_cxl_port(iter->dev.parent);
1115 
1116 		for (ep = cxl_ep_load(iter, cxlmd); iter;
1117 		     iter = ep->next, ep = cxl_ep_load(iter, cxlmd))
1118 			cxl_port_reset_targets(iter, cxlr);
1119 	}
1120 }
1121 
1122 static int cxl_region_setup_targets(struct cxl_region *cxlr)
1123 {
1124 	struct cxl_region_params *p = &cxlr->params;
1125 	struct cxl_endpoint_decoder *cxled;
1126 	struct cxl_memdev *cxlmd;
1127 	struct cxl_port *iter;
1128 	struct cxl_ep *ep;
1129 	int i, rc;
1130 
1131 	for (i = 0; i < p->nr_targets; i++) {
1132 		cxled = p->targets[i];
1133 		cxlmd = cxled_to_memdev(cxled);
1134 
1135 		iter = cxled_to_port(cxled);
1136 		while (!is_cxl_root(to_cxl_port(iter->dev.parent)))
1137 			iter = to_cxl_port(iter->dev.parent);
1138 
1139 		/*
1140 		 * Descend the topology tree programming targets while
1141 		 * looking for conflicts.
1142 		 */
1143 		for (ep = cxl_ep_load(iter, cxlmd); iter;
1144 		     iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) {
1145 			rc = cxl_port_setup_targets(iter, cxlr, cxled);
1146 			if (rc) {
1147 				cxl_region_teardown_targets(cxlr);
1148 				return rc;
1149 			}
1150 		}
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static int cxl_region_attach(struct cxl_region *cxlr,
1157 			     struct cxl_endpoint_decoder *cxled, int pos)
1158 {
1159 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
1160 	struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1161 	struct cxl_port *ep_port, *root_port, *iter;
1162 	struct cxl_region_params *p = &cxlr->params;
1163 	struct cxl_dport *dport;
1164 	int i, rc = -ENXIO;
1165 
1166 	if (cxled->mode == CXL_DECODER_DEAD) {
1167 		dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev));
1168 		return -ENODEV;
1169 	}
1170 
1171 	/* all full of members, or interleave config not established? */
1172 	if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) {
1173 		dev_dbg(&cxlr->dev, "region already active\n");
1174 		return -EBUSY;
1175 	} else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) {
1176 		dev_dbg(&cxlr->dev, "interleave config missing\n");
1177 		return -ENXIO;
1178 	}
1179 
1180 	if (pos < 0 || pos >= p->interleave_ways) {
1181 		dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1182 			p->interleave_ways);
1183 		return -ENXIO;
1184 	}
1185 
1186 	if (p->targets[pos] == cxled)
1187 		return 0;
1188 
1189 	if (p->targets[pos]) {
1190 		struct cxl_endpoint_decoder *cxled_target = p->targets[pos];
1191 		struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target);
1192 
1193 		dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n",
1194 			pos, dev_name(&cxlmd_target->dev),
1195 			dev_name(&cxled_target->cxld.dev));
1196 		return -EBUSY;
1197 	}
1198 
1199 	for (i = 0; i < p->interleave_ways; i++) {
1200 		struct cxl_endpoint_decoder *cxled_target;
1201 		struct cxl_memdev *cxlmd_target;
1202 
1203 		cxled_target = p->targets[pos];
1204 		if (!cxled_target)
1205 			continue;
1206 
1207 		cxlmd_target = cxled_to_memdev(cxled_target);
1208 		if (cxlmd_target == cxlmd) {
1209 			dev_dbg(&cxlr->dev,
1210 				"%s already specified at position %d via: %s\n",
1211 				dev_name(&cxlmd->dev), pos,
1212 				dev_name(&cxled_target->cxld.dev));
1213 			return -EBUSY;
1214 		}
1215 	}
1216 
1217 	ep_port = cxled_to_port(cxled);
1218 	root_port = cxlrd_to_port(cxlrd);
1219 	dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
1220 	if (!dport) {
1221 		dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n",
1222 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1223 			dev_name(cxlr->dev.parent));
1224 		return -ENXIO;
1225 	}
1226 
1227 	if (cxlrd->calc_hb(cxlrd, pos) != dport) {
1228 		dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n",
1229 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1230 			dev_name(&cxlrd->cxlsd.cxld.dev));
1231 		return -ENXIO;
1232 	}
1233 
1234 	if (cxled->cxld.target_type != cxlr->type) {
1235 		dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n",
1236 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1237 			cxled->cxld.target_type, cxlr->type);
1238 		return -ENXIO;
1239 	}
1240 
1241 	if (!cxled->dpa_res) {
1242 		dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n",
1243 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev));
1244 		return -ENXIO;
1245 	}
1246 
1247 	if (resource_size(cxled->dpa_res) * p->interleave_ways !=
1248 	    resource_size(p->res)) {
1249 		dev_dbg(&cxlr->dev,
1250 			"%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n",
1251 			dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1252 			(u64)resource_size(cxled->dpa_res), p->interleave_ways,
1253 			(u64)resource_size(p->res));
1254 		return -EINVAL;
1255 	}
1256 
1257 	for (iter = ep_port; !is_cxl_root(iter);
1258 	     iter = to_cxl_port(iter->dev.parent)) {
1259 		rc = cxl_port_attach_region(iter, cxlr, cxled, pos);
1260 		if (rc)
1261 			goto err;
1262 	}
1263 
1264 	p->targets[pos] = cxled;
1265 	cxled->pos = pos;
1266 	p->nr_targets++;
1267 
1268 	if (p->nr_targets == p->interleave_ways) {
1269 		rc = cxl_region_setup_targets(cxlr);
1270 		if (rc)
1271 			goto err_decrement;
1272 		p->state = CXL_CONFIG_ACTIVE;
1273 	}
1274 
1275 	cxled->cxld.interleave_ways = p->interleave_ways;
1276 	cxled->cxld.interleave_granularity = p->interleave_granularity;
1277 	cxled->cxld.hpa_range = (struct range) {
1278 		.start = p->res->start,
1279 		.end = p->res->end,
1280 	};
1281 
1282 	return 0;
1283 
1284 err_decrement:
1285 	p->nr_targets--;
1286 err:
1287 	for (iter = ep_port; !is_cxl_root(iter);
1288 	     iter = to_cxl_port(iter->dev.parent))
1289 		cxl_port_detach_region(iter, cxlr, cxled);
1290 	return rc;
1291 }
1292 
1293 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
1294 {
1295 	struct cxl_port *iter, *ep_port = cxled_to_port(cxled);
1296 	struct cxl_region *cxlr = cxled->cxld.region;
1297 	struct cxl_region_params *p;
1298 	int rc = 0;
1299 
1300 	lockdep_assert_held_write(&cxl_region_rwsem);
1301 
1302 	if (!cxlr)
1303 		return 0;
1304 
1305 	p = &cxlr->params;
1306 	get_device(&cxlr->dev);
1307 
1308 	if (p->state > CXL_CONFIG_ACTIVE) {
1309 		/*
1310 		 * TODO: tear down all impacted regions if a device is
1311 		 * removed out of order
1312 		 */
1313 		rc = cxl_region_decode_reset(cxlr, p->interleave_ways);
1314 		if (rc)
1315 			goto out;
1316 		p->state = CXL_CONFIG_ACTIVE;
1317 	}
1318 
1319 	for (iter = ep_port; !is_cxl_root(iter);
1320 	     iter = to_cxl_port(iter->dev.parent))
1321 		cxl_port_detach_region(iter, cxlr, cxled);
1322 
1323 	if (cxled->pos < 0 || cxled->pos >= p->interleave_ways ||
1324 	    p->targets[cxled->pos] != cxled) {
1325 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1326 
1327 		dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n",
1328 			      dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev),
1329 			      cxled->pos);
1330 		goto out;
1331 	}
1332 
1333 	if (p->state == CXL_CONFIG_ACTIVE) {
1334 		p->state = CXL_CONFIG_INTERLEAVE_ACTIVE;
1335 		cxl_region_teardown_targets(cxlr);
1336 	}
1337 	p->targets[cxled->pos] = NULL;
1338 	p->nr_targets--;
1339 	cxled->cxld.hpa_range = (struct range) {
1340 		.start = 0,
1341 		.end = -1,
1342 	};
1343 
1344 	/* notify the region driver that one of its targets has departed */
1345 	up_write(&cxl_region_rwsem);
1346 	device_release_driver(&cxlr->dev);
1347 	down_write(&cxl_region_rwsem);
1348 out:
1349 	put_device(&cxlr->dev);
1350 	return rc;
1351 }
1352 
1353 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled)
1354 {
1355 	down_write(&cxl_region_rwsem);
1356 	cxled->mode = CXL_DECODER_DEAD;
1357 	cxl_region_detach(cxled);
1358 	up_write(&cxl_region_rwsem);
1359 }
1360 
1361 static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos)
1362 {
1363 	struct device *dev;
1364 	int rc;
1365 
1366 	dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder);
1367 	if (!dev)
1368 		return -ENODEV;
1369 
1370 	if (!is_endpoint_decoder(dev)) {
1371 		put_device(dev);
1372 		return -EINVAL;
1373 	}
1374 
1375 	rc = down_write_killable(&cxl_region_rwsem);
1376 	if (rc)
1377 		goto out;
1378 	down_read(&cxl_dpa_rwsem);
1379 	rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos);
1380 	up_read(&cxl_dpa_rwsem);
1381 	up_write(&cxl_region_rwsem);
1382 out:
1383 	put_device(dev);
1384 	return rc;
1385 }
1386 
1387 static int detach_target(struct cxl_region *cxlr, int pos)
1388 {
1389 	struct cxl_region_params *p = &cxlr->params;
1390 	int rc;
1391 
1392 	rc = down_write_killable(&cxl_region_rwsem);
1393 	if (rc)
1394 		return rc;
1395 
1396 	if (pos >= p->interleave_ways) {
1397 		dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos,
1398 			p->interleave_ways);
1399 		rc = -ENXIO;
1400 		goto out;
1401 	}
1402 
1403 	if (!p->targets[pos]) {
1404 		rc = 0;
1405 		goto out;
1406 	}
1407 
1408 	rc = cxl_region_detach(p->targets[pos]);
1409 out:
1410 	up_write(&cxl_region_rwsem);
1411 	return rc;
1412 }
1413 
1414 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos,
1415 			    size_t len)
1416 {
1417 	int rc;
1418 
1419 	if (sysfs_streq(buf, "\n"))
1420 		rc = detach_target(cxlr, pos);
1421 	else
1422 		rc = attach_target(cxlr, buf, pos);
1423 
1424 	if (rc < 0)
1425 		return rc;
1426 	return len;
1427 }
1428 
1429 #define TARGET_ATTR_RW(n)                                              \
1430 static ssize_t target##n##_show(                                       \
1431 	struct device *dev, struct device_attribute *attr, char *buf)  \
1432 {                                                                      \
1433 	return show_targetN(to_cxl_region(dev), buf, (n));             \
1434 }                                                                      \
1435 static ssize_t target##n##_store(struct device *dev,                   \
1436 				 struct device_attribute *attr,        \
1437 				 const char *buf, size_t len)          \
1438 {                                                                      \
1439 	return store_targetN(to_cxl_region(dev), buf, (n), len);       \
1440 }                                                                      \
1441 static DEVICE_ATTR_RW(target##n)
1442 
1443 TARGET_ATTR_RW(0);
1444 TARGET_ATTR_RW(1);
1445 TARGET_ATTR_RW(2);
1446 TARGET_ATTR_RW(3);
1447 TARGET_ATTR_RW(4);
1448 TARGET_ATTR_RW(5);
1449 TARGET_ATTR_RW(6);
1450 TARGET_ATTR_RW(7);
1451 TARGET_ATTR_RW(8);
1452 TARGET_ATTR_RW(9);
1453 TARGET_ATTR_RW(10);
1454 TARGET_ATTR_RW(11);
1455 TARGET_ATTR_RW(12);
1456 TARGET_ATTR_RW(13);
1457 TARGET_ATTR_RW(14);
1458 TARGET_ATTR_RW(15);
1459 
1460 static struct attribute *target_attrs[] = {
1461 	&dev_attr_target0.attr,
1462 	&dev_attr_target1.attr,
1463 	&dev_attr_target2.attr,
1464 	&dev_attr_target3.attr,
1465 	&dev_attr_target4.attr,
1466 	&dev_attr_target5.attr,
1467 	&dev_attr_target6.attr,
1468 	&dev_attr_target7.attr,
1469 	&dev_attr_target8.attr,
1470 	&dev_attr_target9.attr,
1471 	&dev_attr_target10.attr,
1472 	&dev_attr_target11.attr,
1473 	&dev_attr_target12.attr,
1474 	&dev_attr_target13.attr,
1475 	&dev_attr_target14.attr,
1476 	&dev_attr_target15.attr,
1477 	NULL,
1478 };
1479 
1480 static umode_t cxl_region_target_visible(struct kobject *kobj,
1481 					 struct attribute *a, int n)
1482 {
1483 	struct device *dev = kobj_to_dev(kobj);
1484 	struct cxl_region *cxlr = to_cxl_region(dev);
1485 	struct cxl_region_params *p = &cxlr->params;
1486 
1487 	if (n < p->interleave_ways)
1488 		return a->mode;
1489 	return 0;
1490 }
1491 
1492 static const struct attribute_group cxl_region_target_group = {
1493 	.attrs = target_attrs,
1494 	.is_visible = cxl_region_target_visible,
1495 };
1496 
1497 static const struct attribute_group *get_cxl_region_target_group(void)
1498 {
1499 	return &cxl_region_target_group;
1500 }
1501 
1502 static const struct attribute_group *region_groups[] = {
1503 	&cxl_base_attribute_group,
1504 	&cxl_region_group,
1505 	&cxl_region_target_group,
1506 	NULL,
1507 };
1508 
1509 static void cxl_region_release(struct device *dev)
1510 {
1511 	struct cxl_region *cxlr = to_cxl_region(dev);
1512 
1513 	memregion_free(cxlr->id);
1514 	kfree(cxlr);
1515 }
1516 
1517 const struct device_type cxl_region_type = {
1518 	.name = "cxl_region",
1519 	.release = cxl_region_release,
1520 	.groups = region_groups
1521 };
1522 
1523 bool is_cxl_region(struct device *dev)
1524 {
1525 	return dev->type == &cxl_region_type;
1526 }
1527 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL);
1528 
1529 static struct cxl_region *to_cxl_region(struct device *dev)
1530 {
1531 	if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type,
1532 			  "not a cxl_region device\n"))
1533 		return NULL;
1534 
1535 	return container_of(dev, struct cxl_region, dev);
1536 }
1537 
1538 static void unregister_region(void *dev)
1539 {
1540 	struct cxl_region *cxlr = to_cxl_region(dev);
1541 
1542 	device_del(dev);
1543 	cxl_region_iomem_release(cxlr);
1544 	put_device(dev);
1545 }
1546 
1547 static struct lock_class_key cxl_region_key;
1548 
1549 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id)
1550 {
1551 	struct cxl_region *cxlr;
1552 	struct device *dev;
1553 
1554 	cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL);
1555 	if (!cxlr) {
1556 		memregion_free(id);
1557 		return ERR_PTR(-ENOMEM);
1558 	}
1559 
1560 	dev = &cxlr->dev;
1561 	device_initialize(dev);
1562 	lockdep_set_class(&dev->mutex, &cxl_region_key);
1563 	dev->parent = &cxlrd->cxlsd.cxld.dev;
1564 	device_set_pm_not_required(dev);
1565 	dev->bus = &cxl_bus_type;
1566 	dev->type = &cxl_region_type;
1567 	cxlr->id = id;
1568 
1569 	return cxlr;
1570 }
1571 
1572 /**
1573  * devm_cxl_add_region - Adds a region to a decoder
1574  * @cxlrd: root decoder
1575  * @id: memregion id to create, or memregion_free() on failure
1576  * @mode: mode for the endpoint decoders of this region
1577  * @type: select whether this is an expander or accelerator (type-2 or type-3)
1578  *
1579  * This is the second step of region initialization. Regions exist within an
1580  * address space which is mapped by a @cxlrd.
1581  *
1582  * Return: 0 if the region was added to the @cxlrd, else returns negative error
1583  * code. The region will be named "regionZ" where Z is the unique region number.
1584  */
1585 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
1586 					      int id,
1587 					      enum cxl_decoder_mode mode,
1588 					      enum cxl_decoder_type type)
1589 {
1590 	struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent);
1591 	struct cxl_region *cxlr;
1592 	struct device *dev;
1593 	int rc;
1594 
1595 	cxlr = cxl_region_alloc(cxlrd, id);
1596 	if (IS_ERR(cxlr))
1597 		return cxlr;
1598 	cxlr->mode = mode;
1599 	cxlr->type = type;
1600 
1601 	dev = &cxlr->dev;
1602 	rc = dev_set_name(dev, "region%d", id);
1603 	if (rc)
1604 		goto err;
1605 
1606 	rc = device_add(dev);
1607 	if (rc)
1608 		goto err;
1609 
1610 	rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr);
1611 	if (rc)
1612 		return ERR_PTR(rc);
1613 
1614 	dev_dbg(port->uport, "%s: created %s\n",
1615 		dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev));
1616 	return cxlr;
1617 
1618 err:
1619 	put_device(dev);
1620 	return ERR_PTR(rc);
1621 }
1622 
1623 static ssize_t create_pmem_region_show(struct device *dev,
1624 				       struct device_attribute *attr, char *buf)
1625 {
1626 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1627 
1628 	return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id));
1629 }
1630 
1631 static ssize_t create_pmem_region_store(struct device *dev,
1632 					struct device_attribute *attr,
1633 					const char *buf, size_t len)
1634 {
1635 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1636 	struct cxl_region *cxlr;
1637 	int id, rc;
1638 
1639 	rc = sscanf(buf, "region%d\n", &id);
1640 	if (rc != 1)
1641 		return -EINVAL;
1642 
1643 	rc = memregion_alloc(GFP_KERNEL);
1644 	if (rc < 0)
1645 		return rc;
1646 
1647 	if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) {
1648 		memregion_free(rc);
1649 		return -EBUSY;
1650 	}
1651 
1652 	cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM,
1653 				   CXL_DECODER_EXPANDER);
1654 	if (IS_ERR(cxlr))
1655 		return PTR_ERR(cxlr);
1656 
1657 	return len;
1658 }
1659 DEVICE_ATTR_RW(create_pmem_region);
1660 
1661 static ssize_t region_show(struct device *dev, struct device_attribute *attr,
1662 			   char *buf)
1663 {
1664 	struct cxl_decoder *cxld = to_cxl_decoder(dev);
1665 	ssize_t rc;
1666 
1667 	rc = down_read_interruptible(&cxl_region_rwsem);
1668 	if (rc)
1669 		return rc;
1670 
1671 	if (cxld->region)
1672 		rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev));
1673 	else
1674 		rc = sysfs_emit(buf, "\n");
1675 	up_read(&cxl_region_rwsem);
1676 
1677 	return rc;
1678 }
1679 DEVICE_ATTR_RO(region);
1680 
1681 static struct cxl_region *
1682 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name)
1683 {
1684 	struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
1685 	struct device *region_dev;
1686 
1687 	region_dev = device_find_child_by_name(&cxld->dev, name);
1688 	if (!region_dev)
1689 		return ERR_PTR(-ENODEV);
1690 
1691 	return to_cxl_region(region_dev);
1692 }
1693 
1694 static ssize_t delete_region_store(struct device *dev,
1695 				   struct device_attribute *attr,
1696 				   const char *buf, size_t len)
1697 {
1698 	struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
1699 	struct cxl_port *port = to_cxl_port(dev->parent);
1700 	struct cxl_region *cxlr;
1701 
1702 	cxlr = cxl_find_region_by_name(cxlrd, buf);
1703 	if (IS_ERR(cxlr))
1704 		return PTR_ERR(cxlr);
1705 
1706 	devm_release_action(port->uport, unregister_region, cxlr);
1707 	put_device(&cxlr->dev);
1708 
1709 	return len;
1710 }
1711 DEVICE_ATTR_WO(delete_region);
1712 
1713 static void cxl_pmem_region_release(struct device *dev)
1714 {
1715 	struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
1716 	int i;
1717 
1718 	for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
1719 		struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd;
1720 
1721 		put_device(&cxlmd->dev);
1722 	}
1723 
1724 	kfree(cxlr_pmem);
1725 }
1726 
1727 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = {
1728 	&cxl_base_attribute_group,
1729 	NULL,
1730 };
1731 
1732 const struct device_type cxl_pmem_region_type = {
1733 	.name = "cxl_pmem_region",
1734 	.release = cxl_pmem_region_release,
1735 	.groups = cxl_pmem_region_attribute_groups,
1736 };
1737 
1738 bool is_cxl_pmem_region(struct device *dev)
1739 {
1740 	return dev->type == &cxl_pmem_region_type;
1741 }
1742 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL);
1743 
1744 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
1745 {
1746 	if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev),
1747 			  "not a cxl_pmem_region device\n"))
1748 		return NULL;
1749 	return container_of(dev, struct cxl_pmem_region, dev);
1750 }
1751 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL);
1752 
1753 static struct lock_class_key cxl_pmem_region_key;
1754 
1755 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
1756 {
1757 	struct cxl_region_params *p = &cxlr->params;
1758 	struct cxl_pmem_region *cxlr_pmem;
1759 	struct device *dev;
1760 	int i;
1761 
1762 	down_read(&cxl_region_rwsem);
1763 	if (p->state != CXL_CONFIG_COMMIT) {
1764 		cxlr_pmem = ERR_PTR(-ENXIO);
1765 		goto out;
1766 	}
1767 
1768 	cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets),
1769 			    GFP_KERNEL);
1770 	if (!cxlr_pmem) {
1771 		cxlr_pmem = ERR_PTR(-ENOMEM);
1772 		goto out;
1773 	}
1774 
1775 	cxlr_pmem->hpa_range.start = p->res->start;
1776 	cxlr_pmem->hpa_range.end = p->res->end;
1777 
1778 	/* Snapshot the region configuration underneath the cxl_region_rwsem */
1779 	cxlr_pmem->nr_mappings = p->nr_targets;
1780 	for (i = 0; i < p->nr_targets; i++) {
1781 		struct cxl_endpoint_decoder *cxled = p->targets[i];
1782 		struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
1783 		struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
1784 
1785 		m->cxlmd = cxlmd;
1786 		get_device(&cxlmd->dev);
1787 		m->start = cxled->dpa_res->start;
1788 		m->size = resource_size(cxled->dpa_res);
1789 		m->position = i;
1790 	}
1791 
1792 	dev = &cxlr_pmem->dev;
1793 	cxlr_pmem->cxlr = cxlr;
1794 	device_initialize(dev);
1795 	lockdep_set_class(&dev->mutex, &cxl_pmem_region_key);
1796 	device_set_pm_not_required(dev);
1797 	dev->parent = &cxlr->dev;
1798 	dev->bus = &cxl_bus_type;
1799 	dev->type = &cxl_pmem_region_type;
1800 out:
1801 	up_read(&cxl_region_rwsem);
1802 
1803 	return cxlr_pmem;
1804 }
1805 
1806 static void cxlr_pmem_unregister(void *dev)
1807 {
1808 	device_unregister(dev);
1809 }
1810 
1811 /**
1812  * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
1813  * @cxlr: parent CXL region for this pmem region bridge device
1814  *
1815  * Return: 0 on success negative error code on failure.
1816  */
1817 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr)
1818 {
1819 	struct cxl_pmem_region *cxlr_pmem;
1820 	struct device *dev;
1821 	int rc;
1822 
1823 	cxlr_pmem = cxl_pmem_region_alloc(cxlr);
1824 	if (IS_ERR(cxlr_pmem))
1825 		return PTR_ERR(cxlr_pmem);
1826 
1827 	dev = &cxlr_pmem->dev;
1828 	rc = dev_set_name(dev, "pmem_region%d", cxlr->id);
1829 	if (rc)
1830 		goto err;
1831 
1832 	rc = device_add(dev);
1833 	if (rc)
1834 		goto err;
1835 
1836 	dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent),
1837 		dev_name(dev));
1838 
1839 	return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev);
1840 
1841 err:
1842 	put_device(dev);
1843 	return rc;
1844 }
1845 
1846 static int cxl_region_probe(struct device *dev)
1847 {
1848 	struct cxl_region *cxlr = to_cxl_region(dev);
1849 	struct cxl_region_params *p = &cxlr->params;
1850 	int rc;
1851 
1852 	rc = down_read_interruptible(&cxl_region_rwsem);
1853 	if (rc) {
1854 		dev_dbg(&cxlr->dev, "probe interrupted\n");
1855 		return rc;
1856 	}
1857 
1858 	if (p->state < CXL_CONFIG_COMMIT) {
1859 		dev_dbg(&cxlr->dev, "config state: %d\n", p->state);
1860 		rc = -ENXIO;
1861 	}
1862 
1863 	/*
1864 	 * From this point on any path that changes the region's state away from
1865 	 * CXL_CONFIG_COMMIT is also responsible for releasing the driver.
1866 	 */
1867 	up_read(&cxl_region_rwsem);
1868 
1869 	switch (cxlr->mode) {
1870 	case CXL_DECODER_PMEM:
1871 		return devm_cxl_add_pmem_region(cxlr);
1872 	default:
1873 		dev_dbg(&cxlr->dev, "unsupported region mode: %d\n",
1874 			cxlr->mode);
1875 		return -ENXIO;
1876 	}
1877 }
1878 
1879 static struct cxl_driver cxl_region_driver = {
1880 	.name = "cxl_region",
1881 	.probe = cxl_region_probe,
1882 	.id = CXL_DEVICE_REGION,
1883 };
1884 
1885 int cxl_region_init(void)
1886 {
1887 	return cxl_driver_register(&cxl_region_driver);
1888 }
1889 
1890 void cxl_region_exit(void)
1891 {
1892 	cxl_driver_unregister(&cxl_region_driver);
1893 }
1894 
1895 MODULE_IMPORT_NS(CXL);
1896 MODULE_ALIAS_CXL(CXL_DEVICE_REGION);
1897