Lines Matching +full:interleave +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/memory-tiers.h>
28 * 1. Interleave granularity
29 * 2. Interleave size
36 .attr = { .name = __stringify(_name), .mode = 0444 }, \
50 if (cxlr->coord[level].attrib == 0) \
51 return -ENOENT; \
53 return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
94 cxlr->coord[level].read_latency == 0) \
98 cxlr->coord[level].write_latency == 0) \
102 cxlr->coord[level].read_bandwidth == 0) \
106 cxlr->coord[level].write_bandwidth == 0) \
109 return a->mode; \
141 struct cxl_region_params *p = &cxlr->params; in uuid_show()
147 if (cxlr->mode != CXL_DECODER_PMEM) in uuid_show()
150 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); in uuid_show()
167 p = &cxlr->params; in is_dup()
169 if (uuid_equal(&p->uuid, uuid)) { in is_dup()
171 return -EBUSY; in is_dup()
181 struct cxl_region_params *p = &cxlr->params; in uuid_store()
186 return -EINVAL; in uuid_store()
193 return -EINVAL; in uuid_store()
199 if (uuid_equal(&p->uuid, &temp)) in uuid_store()
202 rc = -EBUSY; in uuid_store()
203 if (p->state >= CXL_CONFIG_ACTIVE) in uuid_store()
210 uuid_copy(&p->uuid, &temp); in uuid_store()
223 return xa_load(&port->regions, (unsigned long)cxlr); in cxl_rr_load()
231 &cxlr->dev, in cxl_region_invalidate_memregion()
235 dev_WARN(&cxlr->dev, in cxl_region_invalidate_memregion()
237 return -ENXIO; in cxl_region_invalidate_memregion()
247 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_reset()
257 for (i = count - 1; i >= 0; i--) { in cxl_region_decode_reset()
258 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_reset()
261 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_region_decode_reset()
264 if (cxlds->rcd) in cxl_region_decode_reset()
267 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_decode_reset()
268 iter = to_cxl_port(iter->dev.parent); in cxl_region_decode_reset()
271 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_reset()
276 cxld = cxl_rr->decoder; in cxl_region_decode_reset()
277 if (cxld->reset) in cxl_region_decode_reset()
278 cxld->reset(cxld); in cxl_region_decode_reset()
279 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
283 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_reset()
284 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
288 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
295 if (cxld->commit) in commit_decoder()
296 return cxld->commit(cxld); in commit_decoder()
298 if (is_switch_decoder(&cxld->dev)) in commit_decoder()
299 cxlsd = to_cxl_switch_decoder(&cxld->dev); in commit_decoder()
301 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, in commit_decoder()
302 "->commit() is required\n")) in commit_decoder()
303 return -ENXIO; in commit_decoder()
309 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_commit()
312 for (i = 0; i < p->nr_targets; i++) { in cxl_region_decode_commit()
313 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_commit()
322 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_decode_commit()
324 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
333 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_commit()
335 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
336 if (cxld->reset) in cxl_region_decode_commit()
337 cxld->reset(cxld); in cxl_region_decode_commit()
340 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_commit()
357 struct cxl_region_params *p = &cxlr->params; in commit_store()
370 if (commit && p->state >= CXL_CONFIG_COMMIT) in commit_store()
372 if (!commit && p->state < CXL_CONFIG_COMMIT) in commit_store()
376 if (commit && p->state < CXL_CONFIG_ACTIVE) { in commit_store()
377 rc = -ENXIO; in commit_store()
392 p->state = CXL_CONFIG_COMMIT; in commit_store()
394 p->state = CXL_CONFIG_RESET_PENDING; in commit_store()
396 device_release_driver(&cxlr->dev); in commit_store()
403 if (p->state == CXL_CONFIG_RESET_PENDING) { in commit_store()
404 cxl_region_decode_reset(cxlr, p->interleave_ways); in commit_store()
405 p->state = CXL_CONFIG_ACTIVE; in commit_store()
421 struct cxl_region_params *p = &cxlr->params; in commit_show()
427 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); in commit_show()
442 * regions regardless of mode. in cxl_region_visible()
444 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) in cxl_region_visible()
446 return a->mode; in cxl_region_visible()
453 struct cxl_region_params *p = &cxlr->params; in interleave_ways_show()
459 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); in interleave_ways_show()
471 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_ways_store()
472 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_ways_store()
474 struct cxl_region_params *p = &cxlr->params; in interleave_ways_store()
488 * Even for x3, x6, and x12 interleaves the region interleave must be a in interleave_ways_store()
489 * power of 2 multiple of the host bridge interleave. in interleave_ways_store()
491 if (!is_power_of_2(val / cxld->interleave_ways) || in interleave_ways_store()
492 (val % cxld->interleave_ways)) { in interleave_ways_store()
493 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); in interleave_ways_store()
494 return -EINVAL; in interleave_ways_store()
500 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_ways_store()
501 rc = -EBUSY; in interleave_ways_store()
505 save = p->interleave_ways; in interleave_ways_store()
506 p->interleave_ways = val; in interleave_ways_store()
507 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in interleave_ways_store()
509 p->interleave_ways = save; in interleave_ways_store()
523 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_show()
529 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); in interleave_granularity_show()
539 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_granularity_store()
540 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_granularity_store()
542 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_store()
555 * When the host-bridge is interleaved, disallow region granularity != in interleave_granularity_store()
557 * interleave result in needing multiple endpoints to support a single in interleave_granularity_store()
558 * slot in the interleave (possible to support in the future). Regions in interleave_granularity_store()
559 * with a granularity greater than the root interleave result in invalid in interleave_granularity_store()
562 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) in interleave_granularity_store()
563 return -EINVAL; in interleave_granularity_store()
568 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_granularity_store()
569 rc = -EBUSY; in interleave_granularity_store()
573 p->interleave_granularity = val; in interleave_granularity_store()
586 struct cxl_region_params *p = &cxlr->params; in resource_show()
587 u64 resource = -1ULL; in resource_show()
593 if (p->res) in resource_show()
594 resource = p->res->start; in resource_show()
607 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); in mode_show()
609 static DEVICE_ATTR_RO(mode);
613 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in alloc_hpa()
614 struct cxl_region_params *p = &cxlr->params; in alloc_hpa()
621 if (p->res && resource_size(p->res) == size) in alloc_hpa()
625 if (p->res) in alloc_hpa()
626 return -EBUSY; in alloc_hpa()
628 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) in alloc_hpa()
629 return -EBUSY; in alloc_hpa()
632 if (!p->interleave_ways || !p->interleave_granularity || in alloc_hpa()
633 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) in alloc_hpa()
634 return -ENXIO; in alloc_hpa()
636 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); in alloc_hpa()
638 return -EINVAL; in alloc_hpa()
640 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, in alloc_hpa()
641 dev_name(&cxlr->dev)); in alloc_hpa()
643 dev_dbg(&cxlr->dev, in alloc_hpa()
645 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res); in alloc_hpa()
649 p->res = res; in alloc_hpa()
650 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in alloc_hpa()
657 struct cxl_region_params *p = &cxlr->params; in cxl_region_iomem_release()
659 if (device_is_registered(&cxlr->dev)) in cxl_region_iomem_release()
661 if (p->res) { in cxl_region_iomem_release()
666 if (p->res->parent) in cxl_region_iomem_release()
667 remove_resource(p->res); in cxl_region_iomem_release()
668 kfree(p->res); in cxl_region_iomem_release()
669 p->res = NULL; in cxl_region_iomem_release()
675 struct cxl_region_params *p = &cxlr->params; in free_hpa()
679 if (!p->res) in free_hpa()
682 if (p->state >= CXL_CONFIG_ACTIVE) in free_hpa()
683 return -EBUSY; in free_hpa()
686 p->state = CXL_CONFIG_IDLE; in free_hpa()
721 struct cxl_region_params *p = &cxlr->params; in size_show()
728 if (p->res) in size_show()
729 size = resource_size(p->res); in size_show()
755 struct cxl_region_params *p = &cxlr->params; in show_targetN()
763 if (pos >= p->interleave_ways) { in show_targetN()
764 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in show_targetN()
765 p->interleave_ways); in show_targetN()
766 rc = -ENXIO; in show_targetN()
770 cxled = p->targets[pos]; in show_targetN()
774 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); in show_targetN()
786 * if port->commit_end is not the only free decoder, then out of in check_commit_order()
790 if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) in check_commit_order()
791 return -EBUSY; in check_commit_order()
797 struct cxl_port *port = to_cxl_port(dev->parent); in match_free_decoder()
806 if (cxld->id != port->commit_end + 1) in match_free_decoder()
809 if (cxld->region) { in match_free_decoder()
810 dev_dbg(dev->parent, in match_free_decoder()
812 dev_name(dev), dev_name(&cxld->region->dev)); in match_free_decoder()
816 rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, in match_free_decoder()
819 dev_dbg(dev->parent, in match_free_decoder()
837 r = &cxld->hpa_range; in match_auto_decoder()
839 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_auto_decoder()
853 return &cxled->cxld; in cxl_region_find_decoder()
855 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_find_decoder()
856 dev = device_find_child(&port->dev, &cxlr->params, in cxl_region_find_decoder()
859 dev = device_find_child(&port->dev, NULL, match_free_decoder); in cxl_region_find_decoder()
876 struct cxl_decoder *cxld_iter = rr->decoder; in auto_order_ok()
879 * Allow the out of order assembly of auto-discovered regions. in auto_order_ok()
884 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n", in auto_order_ok()
885 dev_name(&cxld->dev), cxld->id, in auto_order_ok()
886 dev_name(&cxld_iter->dev), cxld_iter->id); in auto_order_ok()
888 if (cxld_iter->id > cxld->id) in auto_order_ok()
898 struct cxl_region_params *p = &cxlr->params; in alloc_region_ref()
903 xa_for_each(&port->regions, index, iter) { in alloc_region_ref()
904 struct cxl_region_params *ip = &iter->region->params; in alloc_region_ref()
906 if (!ip->res || ip->res->start < p->res->start) in alloc_region_ref()
909 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in alloc_region_ref()
913 if (auto_order_ok(port, iter->region, cxld)) in alloc_region_ref()
916 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n", in alloc_region_ref()
917 dev_name(&port->dev), in alloc_region_ref()
918 dev_name(&iter->region->dev), ip->res, p->res); in alloc_region_ref()
920 return ERR_PTR(-EBUSY); in alloc_region_ref()
925 return ERR_PTR(-ENOMEM); in alloc_region_ref()
926 cxl_rr->port = port; in alloc_region_ref()
927 cxl_rr->region = cxlr; in alloc_region_ref()
928 cxl_rr->nr_targets = 1; in alloc_region_ref()
929 xa_init(&cxl_rr->endpoints); in alloc_region_ref()
931 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); in alloc_region_ref()
933 dev_dbg(&cxlr->dev, in alloc_region_ref()
935 dev_name(&port->dev), rc); in alloc_region_ref()
945 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_free_decoder()
946 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_free_decoder()
951 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); in cxl_rr_free_decoder()
952 if (cxld->region == cxlr) { in cxl_rr_free_decoder()
953 cxld->region = NULL; in cxl_rr_free_decoder()
954 put_device(&cxlr->dev); in cxl_rr_free_decoder()
960 struct cxl_port *port = cxl_rr->port; in free_region_ref()
961 struct cxl_region *cxlr = cxl_rr->region; in free_region_ref()
964 xa_erase(&port->regions, (unsigned long)cxlr); in free_region_ref()
965 xa_destroy(&cxl_rr->endpoints); in free_region_ref()
973 struct cxl_port *port = cxl_rr->port; in cxl_rr_ep_add()
974 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_ep_add()
975 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_ep_add()
979 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, in cxl_rr_ep_add()
984 cxl_rr->nr_eps++; in cxl_rr_ep_add()
986 if (!cxld->region) { in cxl_rr_ep_add()
987 cxld->region = cxlr; in cxl_rr_ep_add()
988 get_device(&cxlr->dev); in cxl_rr_ep_add()
1002 dev_dbg(&cxlr->dev, "%s: no decoder available\n", in cxl_rr_alloc_decoder()
1003 dev_name(&port->dev)); in cxl_rr_alloc_decoder()
1004 return -EBUSY; in cxl_rr_alloc_decoder()
1007 if (cxld->region) { in cxl_rr_alloc_decoder()
1008 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", in cxl_rr_alloc_decoder()
1009 dev_name(&port->dev), dev_name(&cxld->dev), in cxl_rr_alloc_decoder()
1010 dev_name(&cxld->region->dev)); in cxl_rr_alloc_decoder()
1011 return -EBUSY; in cxl_rr_alloc_decoder()
1016 * assumption with an assertion. Switch-decoders change mapping-type in cxl_rr_alloc_decoder()
1019 dev_WARN_ONCE(&cxlr->dev, in cxl_rr_alloc_decoder()
1021 cxld->target_type != cxlr->type, in cxl_rr_alloc_decoder()
1022 "%s:%s mismatch decoder type %d -> %d\n", in cxl_rr_alloc_decoder()
1023 dev_name(&cxled_to_memdev(cxled)->dev), in cxl_rr_alloc_decoder()
1024 dev_name(&cxld->dev), cxld->target_type, cxlr->type); in cxl_rr_alloc_decoder()
1025 cxld->target_type = cxlr->type; in cxl_rr_alloc_decoder()
1026 cxl_rr->decoder = cxld; in cxl_rr_alloc_decoder()
1031 * cxl_port_attach_region() - track a region's interest in a port by endpoint
1035 * @pos: interleave position of @cxled in @cxlr
1043 * - validate that there are no other regions with a higher HPA already
1045 * - establish a region reference if one is not already present
1047 * - additionally allocate a decoder instance that will host @cxlr on
1050 * - pin the region reference by the endpoint
1051 * - account for how many entries in @port's target list are needed to
1064 int rc = -EBUSY; in cxl_port_attach_region()
1079 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_attach_region()
1082 if (ep_iter->next == ep->next) { in cxl_port_attach_region()
1092 if (!found || !ep->next) { in cxl_port_attach_region()
1093 cxl_rr->nr_targets++; in cxl_port_attach_region()
1099 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1101 dev_name(&port->dev)); in cxl_port_attach_region()
1110 cxld = cxl_rr->decoder; in cxl_port_attach_region()
1116 if (is_switch_decoder(&cxld->dev)) { in cxl_port_attach_region()
1119 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_attach_region()
1120 if (cxl_rr->nr_targets > cxlsd->nr_targets) { in cxl_port_attach_region()
1121 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1123 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1124 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1125 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1126 cxlsd->nr_targets); in cxl_port_attach_region()
1127 rc = -ENXIO; in cxl_port_attach_region()
1134 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1136 dev_name(&port->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1137 dev_name(&cxld->dev)); in cxl_port_attach_region()
1141 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1143 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1144 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1145 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1146 ep ? ep->next ? dev_name(ep->next->uport_dev) : in cxl_port_attach_region()
1147 dev_name(&cxlmd->dev) : in cxl_port_attach_region()
1149 cxl_rr->nr_eps, cxl_rr->nr_targets); in cxl_port_attach_region()
1154 cxl_rr->nr_targets--; in cxl_port_attach_region()
1155 if (cxl_rr->nr_eps == 0) in cxl_port_attach_region()
1177 if (cxl_rr->decoder == &cxled->cxld) in cxl_port_detach_region()
1178 cxl_rr->nr_eps--; in cxl_port_detach_region()
1180 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); in cxl_port_detach_region()
1186 cxl_rr->nr_eps--; in cxl_port_detach_region()
1187 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_detach_region()
1188 if (ep_iter->next == ep->next) { in cxl_port_detach_region()
1194 cxl_rr->nr_targets--; in cxl_port_detach_region()
1197 if (cxl_rr->nr_eps == 0) in cxl_port_detach_region()
1206 struct cxl_region *cxlr = cxl_rr->region; in check_last_peer()
1207 struct cxl_region_params *p = &cxlr->params; in check_last_peer()
1209 struct cxl_port *port = cxl_rr->port; in check_last_peer()
1212 int pos = cxled->pos; in check_last_peer()
1216 * then that endpoint, at index 'position - distance', must also be in check_last_peer()
1220 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", in check_last_peer()
1221 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1222 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in check_last_peer()
1223 return -ENXIO; in check_last_peer()
1225 cxled_peer = p->targets[pos - distance]; in check_last_peer()
1228 if (ep->dport != ep_peer->dport) { in check_last_peer()
1229 dev_dbg(&cxlr->dev, in check_last_peer()
1231 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1232 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, in check_last_peer()
1233 dev_name(&cxlmd_peer->dev), in check_last_peer()
1234 dev_name(&cxled_peer->cxld.dev)); in check_last_peer()
1235 return -ENXIO; in check_last_peer()
1243 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in check_interleave_cap()
1244 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in check_interleave_cap()
1250 if (!test_bit(iw, &cxlhdm->iw_cap_mask)) in check_interleave_cap()
1251 return -ENXIO; in check_interleave_cap()
1259 * interleave bits are none. in check_interleave_cap()
1266 * interleave bits are none. in check_interleave_cap()
1274 high_pos = eiw + eig - 1; in check_interleave_cap()
1279 if (interleave_mask & ~cxlhdm->interleave_mask) in check_interleave_cap()
1280 return -ENXIO; in check_interleave_cap()
1289 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_port_setup_targets()
1290 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; in cxl_port_setup_targets()
1291 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); in cxl_port_setup_targets()
1295 struct cxl_region_params *p = &cxlr->params; in cxl_port_setup_targets()
1296 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_port_setup_targets()
1306 if (!is_power_of_2(cxl_rr->nr_targets)) { in cxl_port_setup_targets()
1307 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", in cxl_port_setup_targets()
1308 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1309 cxl_rr->nr_targets); in cxl_port_setup_targets()
1310 return -EINVAL; in cxl_port_setup_targets()
1313 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_setup_targets()
1314 if (cxl_rr->nr_targets_set) { in cxl_port_setup_targets()
1320 * endpoint positions in the region interleave a given port can in cxl_port_setup_targets()
1324 * always 1 as every index targets a different host-bridge. At in cxl_port_setup_targets()
1330 distance *= cxl_rr_iter->nr_targets; in cxl_port_setup_targets()
1331 iter = to_cxl_port(iter->dev.parent); in cxl_port_setup_targets()
1333 distance *= cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1335 for (i = 0; i < cxl_rr->nr_targets_set; i++) in cxl_port_setup_targets()
1336 if (ep->dport == cxlsd->target[i]) { in cxl_port_setup_targets()
1351 * does not allow interleaved host-bridges with in cxl_port_setup_targets()
1354 parent_ig = p->interleave_granularity; in cxl_port_setup_targets()
1355 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1357 * For purposes of address bit routing, use power-of-2 math for in cxl_port_setup_targets()
1367 parent_cxld = parent_rr->decoder; in cxl_port_setup_targets()
1368 parent_ig = parent_cxld->interleave_granularity; in cxl_port_setup_targets()
1369 parent_iw = parent_cxld->interleave_ways; in cxl_port_setup_targets()
1374 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", in cxl_port_setup_targets()
1375 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1376 dev_name(&parent_port->dev), parent_ig); in cxl_port_setup_targets()
1382 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", in cxl_port_setup_targets()
1383 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1384 dev_name(&parent_port->dev), parent_iw); in cxl_port_setup_targets()
1388 iw = cxl_rr->nr_targets; in cxl_port_setup_targets()
1391 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", in cxl_port_setup_targets()
1392 dev_name(port->uport_dev), dev_name(&port->dev), iw); in cxl_port_setup_targets()
1397 * Interleave granularity is a multiple of @parent_port granularity. in cxl_port_setup_targets()
1398 * Multiplier is the parent port interleave ways. in cxl_port_setup_targets()
1402 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1404 dev_name(&parent_port->dev), parent_ig, parent_iw); in cxl_port_setup_targets()
1410 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", in cxl_port_setup_targets()
1411 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1416 if (iw > 8 || iw > cxlsd->nr_targets) { in cxl_port_setup_targets()
1417 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1419 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1420 dev_name(&cxld->dev), iw, cxlsd->nr_targets); in cxl_port_setup_targets()
1421 return -ENXIO; in cxl_port_setup_targets()
1424 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1425 if (cxld->interleave_ways != iw || in cxl_port_setup_targets()
1426 cxld->interleave_granularity != ig || in cxl_port_setup_targets()
1427 cxld->hpa_range.start != p->res->start || in cxl_port_setup_targets()
1428 cxld->hpa_range.end != p->res->end || in cxl_port_setup_targets()
1429 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { in cxl_port_setup_targets()
1430 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1432 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1433 __func__, iw, ig, p->res); in cxl_port_setup_targets()
1434 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1436 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1437 __func__, cxld->interleave_ways, in cxl_port_setup_targets()
1438 cxld->interleave_granularity, in cxl_port_setup_targets()
1439 (cxld->flags & CXL_DECODER_F_ENABLE) ? in cxl_port_setup_targets()
1442 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_port_setup_targets()
1443 return -ENXIO; in cxl_port_setup_targets()
1448 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1450 dev_name(port->uport_dev), in cxl_port_setup_targets()
1451 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1455 cxld->interleave_ways = iw; in cxl_port_setup_targets()
1456 cxld->interleave_granularity = ig; in cxl_port_setup_targets()
1457 cxld->hpa_range = (struct range) { in cxl_port_setup_targets()
1458 .start = p->res->start, in cxl_port_setup_targets()
1459 .end = p->res->end, in cxl_port_setup_targets()
1462 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), in cxl_port_setup_targets()
1463 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1465 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { in cxl_port_setup_targets()
1466 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1468 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1469 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1470 return -ENXIO; in cxl_port_setup_targets()
1472 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1473 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { in cxl_port_setup_targets()
1474 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", in cxl_port_setup_targets()
1475 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1476 dev_name(&cxlsd->cxld.dev), in cxl_port_setup_targets()
1477 dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1478 cxl_rr->nr_targets_set); in cxl_port_setup_targets()
1479 return -ENXIO; in cxl_port_setup_targets()
1482 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; in cxl_port_setup_targets()
1485 cxl_rr->nr_targets_set += inc; in cxl_port_setup_targets()
1486 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", in cxl_port_setup_targets()
1487 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1488 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1489 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1506 cxl_rr->nr_targets_set = 0; in cxl_port_reset_targets()
1508 cxld = cxl_rr->decoder; in cxl_port_reset_targets()
1509 cxld->hpa_range = (struct range) { in cxl_port_reset_targets()
1511 .end = -1, in cxl_port_reset_targets()
1517 struct cxl_region_params *p = &cxlr->params; in cxl_region_teardown_targets()
1526 * In the auto-discovery case skip automatic teardown since the in cxl_region_teardown_targets()
1529 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_teardown_targets()
1532 for (i = 0; i < p->nr_targets; i++) { in cxl_region_teardown_targets()
1533 cxled = p->targets[i]; in cxl_region_teardown_targets()
1535 cxlds = cxlmd->cxlds; in cxl_region_teardown_targets()
1537 if (cxlds->rcd) in cxl_region_teardown_targets()
1541 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_teardown_targets()
1542 iter = to_cxl_port(iter->dev.parent); in cxl_region_teardown_targets()
1545 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) in cxl_region_teardown_targets()
1552 struct cxl_region_params *p = &cxlr->params; in cxl_region_setup_targets()
1560 for (i = 0; i < p->nr_targets; i++) { in cxl_region_setup_targets()
1561 cxled = p->targets[i]; in cxl_region_setup_targets()
1563 cxlds = cxlmd->cxlds; in cxl_region_setup_targets()
1566 if (!cxlds->rcd) { in cxl_region_setup_targets()
1574 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_setup_targets()
1575 iter = to_cxl_port(iter->dev.parent); in cxl_region_setup_targets()
1582 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_setup_targets()
1592 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); in cxl_region_setup_targets()
1594 return -ENXIO; in cxl_region_setup_targets()
1605 struct cxl_region_params *p = &cxlr->params; in cxl_region_validate_position()
1608 if (pos < 0 || pos >= p->interleave_ways) { in cxl_region_validate_position()
1609 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in cxl_region_validate_position()
1610 p->interleave_ways); in cxl_region_validate_position()
1611 return -ENXIO; in cxl_region_validate_position()
1614 if (p->targets[pos] == cxled) in cxl_region_validate_position()
1617 if (p->targets[pos]) { in cxl_region_validate_position()
1618 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; in cxl_region_validate_position()
1621 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", in cxl_region_validate_position()
1622 pos, dev_name(&cxlmd_target->dev), in cxl_region_validate_position()
1623 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1624 return -EBUSY; in cxl_region_validate_position()
1627 for (i = 0; i < p->interleave_ways; i++) { in cxl_region_validate_position()
1631 cxled_target = p->targets[i]; in cxl_region_validate_position()
1637 dev_dbg(&cxlr->dev, in cxl_region_validate_position()
1639 dev_name(&cxlmd->dev), pos, in cxl_region_validate_position()
1640 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1641 return -EBUSY; in cxl_region_validate_position()
1654 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; in cxl_region_attach_position()
1655 struct cxl_decoder *cxld = &cxlsd->cxld; in cxl_region_attach_position()
1656 int iw = cxld->interleave_ways; in cxl_region_attach_position()
1660 if (dport != cxlrd->cxlsd.target[pos % iw]) { in cxl_region_attach_position()
1661 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", in cxl_region_attach_position()
1662 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach_position()
1663 dev_name(&cxlrd->cxlsd.cxld.dev)); in cxl_region_attach_position()
1664 return -ENXIO; in cxl_region_attach_position()
1668 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_attach_position()
1678 iter = to_cxl_port(iter->dev.parent)) in cxl_region_attach_position()
1686 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach_auto()
1688 if (cxled->state != CXL_DECODER_STATE_AUTO) { in cxl_region_attach_auto()
1689 dev_err(&cxlr->dev, in cxl_region_attach_auto()
1691 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1692 return -EINVAL; in cxl_region_attach_auto()
1696 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", in cxl_region_attach_auto()
1697 dev_name(&cxled->cxld.dev), pos); in cxl_region_attach_auto()
1698 return -EINVAL; in cxl_region_attach_auto()
1701 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach_auto()
1702 dev_err(&cxlr->dev, "%s: no more target slots available\n", in cxl_region_attach_auto()
1703 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1704 return -ENXIO; in cxl_region_attach_auto()
1713 pos = p->nr_targets; in cxl_region_attach_auto()
1714 p->targets[pos] = cxled; in cxl_region_attach_auto()
1715 cxled->pos = pos; in cxl_region_attach_auto()
1716 p->nr_targets++; in cxl_region_attach_auto()
1726 return cxled_a->pos - cxled_b->pos; in cmp_interleave_pos()
1731 if (!port->parent_dport) in next_port()
1733 return port->parent_dport->port; in next_port()
1745 r1 = &cxlsd->cxld.hpa_range; in match_switch_decoder_by_range()
1749 return (r1->start == r2->start && r1->end == r2->end); in match_switch_decoder_by_range()
1758 int rc = -ENXIO; in find_pos_and_ways()
1764 dev = device_find_child(&parent->dev, range, in find_pos_and_ways()
1767 dev_err(port->uport_dev, in find_pos_and_ways()
1768 "failed to find decoder mapping %#llx-%#llx\n", in find_pos_and_ways()
1769 range->start, range->end); in find_pos_and_ways()
1773 *ways = cxlsd->cxld.interleave_ways; in find_pos_and_ways()
1776 if (cxlsd->target[i] == port->parent_dport) { in find_pos_and_ways()
1788 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1800 * -ENXIO on failure
1806 struct range *range = &cxled->cxld.hpa_range; in cxl_calc_interleave_pos()
1811 * Example: the expected interleave order of the 4-way region shown in cxl_calc_interleave_pos()
1821 * uses the mem position in the host-bridge and the ways of the host- in cxl_calc_interleave_pos()
1823 * iteration uses the host-bridge position in the root_port and the ways in cxl_calc_interleave_pos()
1851 dev_dbg(&cxlmd->dev, in cxl_calc_interleave_pos()
1852 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", in cxl_calc_interleave_pos()
1853 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), in cxl_calc_interleave_pos()
1854 dev_name(&port->dev), range->start, range->end, pos); in cxl_calc_interleave_pos()
1861 struct cxl_region_params *p = &cxlr->params; in cxl_region_sort_targets()
1864 for (i = 0; i < p->nr_targets; i++) { in cxl_region_sort_targets()
1865 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_sort_targets()
1867 cxled->pos = cxl_calc_interleave_pos(cxled); in cxl_region_sort_targets()
1870 * cxled->pos so that follow-on code paths can reliably in cxl_region_sort_targets()
1871 * do p->targets[cxled->pos] to self-reference their entry. in cxl_region_sort_targets()
1873 if (cxled->pos < 0) in cxl_region_sort_targets()
1874 rc = -ENXIO; in cxl_region_sort_targets()
1876 /* Keep the cxlr target list in interleave position order */ in cxl_region_sort_targets()
1877 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), in cxl_region_sort_targets()
1880 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); in cxl_region_sort_targets()
1887 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_region_attach()
1889 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach()
1892 int rc = -ENXIO; in cxl_region_attach()
1894 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, in cxl_region_attach()
1895 p->interleave_granularity); in cxl_region_attach()
1897 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", in cxl_region_attach()
1898 dev_name(&cxled->cxld.dev), p->interleave_ways, in cxl_region_attach()
1899 p->interleave_granularity); in cxl_region_attach()
1903 if (cxled->mode != cxlr->mode) { in cxl_region_attach()
1904 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", in cxl_region_attach()
1905 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); in cxl_region_attach()
1906 return -EINVAL; in cxl_region_attach()
1909 if (cxled->mode == CXL_DECODER_DEAD) { in cxl_region_attach()
1910 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1911 return -ENODEV; in cxl_region_attach()
1914 /* all full of members, or interleave config not established? */ in cxl_region_attach()
1915 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1916 dev_dbg(&cxlr->dev, "region already active\n"); in cxl_region_attach()
1917 return -EBUSY; in cxl_region_attach()
1918 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1919 dev_dbg(&cxlr->dev, "interleave config missing\n"); in cxl_region_attach()
1920 return -ENXIO; in cxl_region_attach()
1923 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach()
1924 dev_dbg(&cxlr->dev, "region already has %d endpoints\n", in cxl_region_attach()
1925 p->nr_targets); in cxl_region_attach()
1926 return -EINVAL; in cxl_region_attach()
1931 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); in cxl_region_attach()
1933 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", in cxl_region_attach()
1934 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1935 dev_name(cxlr->dev.parent)); in cxl_region_attach()
1936 return -ENXIO; in cxl_region_attach()
1939 if (cxled->cxld.target_type != cxlr->type) { in cxl_region_attach()
1940 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", in cxl_region_attach()
1941 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1942 cxled->cxld.target_type, cxlr->type); in cxl_region_attach()
1943 return -ENXIO; in cxl_region_attach()
1946 if (!cxled->dpa_res) { in cxl_region_attach()
1947 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", in cxl_region_attach()
1948 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1949 return -ENXIO; in cxl_region_attach()
1952 if (resource_size(cxled->dpa_res) * p->interleave_ways != in cxl_region_attach()
1953 resource_size(p->res)) { in cxl_region_attach()
1954 dev_dbg(&cxlr->dev, in cxl_region_attach()
1955 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", in cxl_region_attach()
1956 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1957 (u64)resource_size(cxled->dpa_res), p->interleave_ways, in cxl_region_attach()
1958 (u64)resource_size(p->res)); in cxl_region_attach()
1959 return -EINVAL; in cxl_region_attach()
1964 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_region_attach()
1972 if (p->nr_targets < p->interleave_ways) in cxl_region_attach()
1984 for (i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
1985 cxled = p->targets[i]; in cxl_region_attach()
1988 ep_port->host_bridge); in cxl_region_attach()
2003 p->state = CXL_CONFIG_COMMIT; in cxl_region_attach()
2017 p->targets[pos] = cxled; in cxl_region_attach()
2018 cxled->pos = pos; in cxl_region_attach()
2019 p->nr_targets++; in cxl_region_attach()
2021 if (p->nr_targets == p->interleave_ways) { in cxl_region_attach()
2025 p->state = CXL_CONFIG_ACTIVE; in cxl_region_attach()
2029 cxled->cxld.interleave_ways = p->interleave_ways; in cxl_region_attach()
2030 cxled->cxld.interleave_granularity = p->interleave_granularity; in cxl_region_attach()
2031 cxled->cxld.hpa_range = (struct range) { in cxl_region_attach()
2032 .start = p->res->start, in cxl_region_attach()
2033 .end = p->res->end, in cxl_region_attach()
2036 if (p->nr_targets != p->interleave_ways) in cxl_region_attach()
2040 * Test the auto-discovery position calculator function in cxl_region_attach()
2041 * against this successfully created user-defined region. in cxl_region_attach()
2042 * A fail message here means that this interleave config in cxl_region_attach()
2045 for (int i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
2046 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_attach()
2050 dev_dbg(&cxled->cxld.dev, in cxl_region_attach()
2051 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", in cxl_region_attach()
2052 (test_pos == cxled->pos) ? "success" : "fail", in cxl_region_attach()
2053 test_pos, cxled->pos); in cxl_region_attach()
2062 struct cxl_region *cxlr = cxled->cxld.region; in cxl_region_detach()
2071 p = &cxlr->params; in cxl_region_detach()
2072 get_device(&cxlr->dev); in cxl_region_detach()
2074 if (p->state > CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2075 cxl_region_decode_reset(cxlr, p->interleave_ways); in cxl_region_detach()
2076 p->state = CXL_CONFIG_ACTIVE; in cxl_region_detach()
2080 iter = to_cxl_port(iter->dev.parent)) in cxl_region_detach()
2083 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || in cxl_region_detach()
2084 p->targets[cxled->pos] != cxled) { in cxl_region_detach()
2087 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", in cxl_region_detach()
2088 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_detach()
2089 cxled->pos); in cxl_region_detach()
2093 if (p->state == CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2094 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in cxl_region_detach()
2097 p->targets[cxled->pos] = NULL; in cxl_region_detach()
2098 p->nr_targets--; in cxl_region_detach()
2099 cxled->cxld.hpa_range = (struct range) { in cxl_region_detach()
2101 .end = -1, in cxl_region_detach()
2106 device_release_driver(&cxlr->dev); in cxl_region_detach()
2109 put_device(&cxlr->dev); in cxl_region_detach()
2116 cxled->mode = CXL_DECODER_DEAD; in cxl_decoder_kill_region()
2143 struct cxl_region_params *p = &cxlr->params; in detach_target()
2150 if (pos >= p->interleave_ways) { in detach_target()
2151 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in detach_target()
2152 p->interleave_ways); in detach_target()
2153 rc = -ENXIO; in detach_target()
2157 if (!p->targets[pos]) { in detach_target()
2162 rc = cxl_region_detach(p->targets[pos]); in detach_target()
2180 return -ENODEV; in store_targetN()
2183 rc = -EINVAL; in store_targetN()
2254 struct cxl_region_params *p = &cxlr->params; in cxl_region_target_visible()
2256 if (n < p->interleave_ways) in cxl_region_target_visible()
2257 return a->mode; in cxl_region_target_visible()
2282 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in cxl_region_release()
2284 int id = atomic_read(&cxlrd->region_id); in cxl_region_release()
2291 if (cxlr->id < id) in cxl_region_release()
2292 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { in cxl_region_release()
2297 memregion_free(cxlr->id); in cxl_region_release()
2299 put_device(dev->parent); in cxl_region_release()
2311 return dev->type == &cxl_region_type; in is_cxl_region()
2317 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, in to_cxl_region()
2327 struct cxl_region_params *p = &cxlr->params; in unregister_region()
2330 device_del(&cxlr->dev); in unregister_region()
2334 * read-only, so no need to hold the region rwsem to access the in unregister_region()
2337 for (i = 0; i < p->interleave_ways; i++) in unregister_region()
2341 put_device(&cxlr->dev); in unregister_region()
2354 return ERR_PTR(-ENOMEM); in cxl_region_alloc()
2357 dev = &cxlr->dev; in cxl_region_alloc()
2359 lockdep_set_class(&dev->mutex, &cxl_region_key); in cxl_region_alloc()
2360 dev->parent = &cxlrd->cxlsd.cxld.dev; in cxl_region_alloc()
2365 get_device(dev->parent); in cxl_region_alloc()
2367 dev->bus = &cxl_bus_type; in cxl_region_alloc()
2368 dev->type = &cxl_region_type; in cxl_region_alloc()
2369 cxlr->id = id; in cxl_region_alloc()
2380 if (cxlr->coord[i].read_bandwidth) { in cxl_region_update_coordinates()
2383 node_set_perf_attrs(nid, &cxlr->coord[i], i); in cxl_region_update_coordinates()
2395 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group()); in cxl_region_update_coordinates()
2397 dev_dbg(&cxlr->dev, "Failed to update access0 group\n"); in cxl_region_update_coordinates()
2399 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group()); in cxl_region_update_coordinates()
2401 dev_dbg(&cxlr->dev, "Failed to update access1 group\n"); in cxl_region_update_coordinates()
2412 int nid = mnb->status_change_nid; in cxl_region_perf_attrs_callback()
2422 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_perf_attrs_callback()
2445 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_calculate_adistance()
2449 perf = &cxlr->coord[ACCESS_COORDINATE_CPU]; in cxl_region_calculate_adistance()
2458 * devm_cxl_add_region - Adds a region to a decoder
2461 * @mode: mode for the endpoint decoders of this region
2462 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2472 enum cxl_decoder_mode mode, in devm_cxl_add_region() argument
2475 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); in devm_cxl_add_region()
2483 cxlr->mode = mode; in devm_cxl_add_region()
2484 cxlr->type = type; in devm_cxl_add_region()
2486 dev = &cxlr->dev; in devm_cxl_add_region()
2495 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); in devm_cxl_add_region()
2499 dev_dbg(port->uport_dev, "%s: created %s\n", in devm_cxl_add_region()
2500 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); in devm_cxl_add_region()
2510 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); in __create_region_show()
2526 enum cxl_decoder_mode mode, int id) in __create_region() argument
2530 switch (mode) { in __create_region()
2535 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); in __create_region()
2536 return ERR_PTR(-EINVAL); in __create_region()
2543 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { in __create_region()
2545 return ERR_PTR(-EBUSY); in __create_region()
2548 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); in __create_region()
2552 size_t len, enum cxl_decoder_mode mode) in create_region_store() argument
2560 return -EINVAL; in create_region_store()
2562 cxlr = __create_region(cxlrd, mode, id); in create_region_store()
2595 if (cxld->region) in region_show()
2596 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); in region_show()
2608 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in cxl_find_region_by_name()
2611 region_dev = device_find_child_by_name(&cxld->dev, name); in cxl_find_region_by_name()
2613 return ERR_PTR(-ENODEV); in cxl_find_region_by_name()
2623 struct cxl_port *port = to_cxl_port(dev->parent); in delete_region_store()
2630 devm_release_action(port->uport_dev, unregister_region, cxlr); in delete_region_store()
2631 put_device(&cxlr->dev); in delete_region_store()
2642 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { in cxl_pmem_region_release()
2643 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; in cxl_pmem_region_release()
2645 put_device(&cxlmd->dev); in cxl_pmem_region_release()
2664 return dev->type == &cxl_pmem_region_type; in is_cxl_pmem_region()
2679 enum cxl_decoder_mode mode; member
2686 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_get_poison_unmapped()
2695 * for unmapped resources based on the last decoder's mode: in cxl_get_poison_unmapped()
2700 if (ctx->mode == CXL_DECODER_RAM) { in cxl_get_poison_unmapped()
2701 offset = ctx->offset; in cxl_get_poison_unmapped()
2702 length = resource_size(&cxlds->ram_res) - offset; in cxl_get_poison_unmapped()
2704 if (rc == -EFAULT) in cxl_get_poison_unmapped()
2709 if (ctx->mode == CXL_DECODER_PMEM) { in cxl_get_poison_unmapped()
2710 offset = ctx->offset; in cxl_get_poison_unmapped()
2711 length = resource_size(&cxlds->dpa_res) - offset; in cxl_get_poison_unmapped()
2714 } else if (resource_size(&cxlds->pmem_res)) { in cxl_get_poison_unmapped()
2715 offset = cxlds->pmem_res.start; in cxl_get_poison_unmapped()
2716 length = resource_size(&cxlds->pmem_res); in cxl_get_poison_unmapped()
2736 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) in poison_by_decoder()
2740 * Regions are only created with single mode decoders: pmem or ram. in poison_by_decoder()
2741 * Linux does not support mixed mode decoders. This means that in poison_by_decoder()
2746 if (cxled->mode == CXL_DECODER_MIXED) { in poison_by_decoder()
2747 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); in poison_by_decoder()
2752 if (cxled->skip) { in poison_by_decoder()
2753 offset = cxled->dpa_res->start - cxled->skip; in poison_by_decoder()
2754 length = cxled->skip; in poison_by_decoder()
2756 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2762 offset = cxled->dpa_res->start; in poison_by_decoder()
2763 length = cxled->dpa_res->end - offset + 1; in poison_by_decoder()
2764 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); in poison_by_decoder()
2765 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2771 if (cxled->cxld.id == ctx->port->commit_end) { in poison_by_decoder()
2772 ctx->offset = cxled->dpa_res->end + 1; in poison_by_decoder()
2773 ctx->mode = cxled->mode; in poison_by_decoder()
2789 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); in cxl_get_poison_by_endpoint()
2791 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), in cxl_get_poison_by_endpoint()
2807 u64 dpa = ctx->dpa; in __cxl_dpa_to_region()
2813 if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) in __cxl_dpa_to_region()
2816 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) in __cxl_dpa_to_region()
2824 cxlr = cxled->cxld.region; in __cxl_dpa_to_region()
2827 dev_name(&cxlr->dev)); in __cxl_dpa_to_region()
2832 ctx->cxlr = cxlr; in __cxl_dpa_to_region()
2845 port = cxlmd->endpoint; in cxl_dpa_to_region()
2847 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); in cxl_dpa_to_region()
2854 struct cxl_region_params *p = &cxlr->params; in cxl_is_hpa_in_chunk()
2855 int gran = p->interleave_granularity; in cxl_is_hpa_in_chunk()
2856 int ways = p->interleave_ways; in cxl_is_hpa_in_chunk()
2859 /* Is the hpa in an expected chunk for its pos(-ition) */ in cxl_is_hpa_in_chunk()
2860 offset = hpa - p->res->start; in cxl_is_hpa_in_chunk()
2865 dev_dbg(&cxlr->dev, in cxl_is_hpa_in_chunk()
2874 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_dpa_to_hpa()
2876 struct cxl_region_params *p = &cxlr->params; in cxl_dpa_to_hpa()
2882 for (int i = 0; i < p->nr_targets; i++) { in cxl_dpa_to_hpa()
2883 cxled = p->targets[i]; in cxl_dpa_to_hpa()
2890 pos = cxled->pos; in cxl_dpa_to_hpa()
2891 ways_to_eiw(p->interleave_ways, &eiw); in cxl_dpa_to_hpa()
2892 granularity_to_eig(p->interleave_granularity, &eig); in cxl_dpa_to_hpa()
2895 * The device position in the region interleave set was removed in cxl_dpa_to_hpa()
2896 * from the offset at HPA->DPA translation. To reconstruct the in cxl_dpa_to_hpa()
2899 * The placement of 'pos' in the HPA is determined by interleave in cxl_dpa_to_hpa()
2905 dpa_offset = dpa - cxl_dpa_resource_start(cxled); in cxl_dpa_to_hpa()
2915 hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); in cxl_dpa_to_hpa()
2922 hpa = hpa_offset + p->res->start; in cxl_dpa_to_hpa()
2925 if (cxlrd->hpa_to_spa) in cxl_dpa_to_hpa()
2926 hpa = cxlrd->hpa_to_spa(cxlrd, hpa); in cxl_dpa_to_hpa()
2928 if (hpa < p->res->start || hpa > p->res->end) { in cxl_dpa_to_hpa()
2929 dev_dbg(&cxlr->dev, in cxl_dpa_to_hpa()
2935 if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos))) in cxl_dpa_to_hpa()
2945 struct cxl_region_params *p = &cxlr->params; in cxl_pmem_region_alloc()
2951 if (p->state != CXL_CONFIG_COMMIT) in cxl_pmem_region_alloc()
2952 return -ENXIO; in cxl_pmem_region_alloc()
2955 kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); in cxl_pmem_region_alloc()
2957 return -ENOMEM; in cxl_pmem_region_alloc()
2959 cxlr_pmem->hpa_range.start = p->res->start; in cxl_pmem_region_alloc()
2960 cxlr_pmem->hpa_range.end = p->res->end; in cxl_pmem_region_alloc()
2963 cxlr_pmem->nr_mappings = p->nr_targets; in cxl_pmem_region_alloc()
2964 for (i = 0; i < p->nr_targets; i++) { in cxl_pmem_region_alloc()
2965 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_pmem_region_alloc()
2967 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; in cxl_pmem_region_alloc()
2974 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); in cxl_pmem_region_alloc()
2976 return -ENODEV; in cxl_pmem_region_alloc()
2977 cxlr->cxl_nvb = cxl_nvb; in cxl_pmem_region_alloc()
2979 m->cxlmd = cxlmd; in cxl_pmem_region_alloc()
2980 get_device(&cxlmd->dev); in cxl_pmem_region_alloc()
2981 m->start = cxled->dpa_res->start; in cxl_pmem_region_alloc()
2982 m->size = resource_size(cxled->dpa_res); in cxl_pmem_region_alloc()
2983 m->position = i; in cxl_pmem_region_alloc()
2986 dev = &cxlr_pmem->dev; in cxl_pmem_region_alloc()
2988 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); in cxl_pmem_region_alloc()
2990 dev->parent = &cxlr->dev; in cxl_pmem_region_alloc()
2991 dev->bus = &cxl_bus_type; in cxl_pmem_region_alloc()
2992 dev->type = &cxl_pmem_region_type; in cxl_pmem_region_alloc()
2993 cxlr_pmem->cxlr = cxlr; in cxl_pmem_region_alloc()
2994 cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); in cxl_pmem_region_alloc()
3019 return dev->type == &cxl_dax_region_type; in is_cxl_dax_region()
3035 struct cxl_region_params *p = &cxlr->params; in cxl_dax_region_alloc()
3040 if (p->state != CXL_CONFIG_COMMIT) { in cxl_dax_region_alloc()
3041 cxlr_dax = ERR_PTR(-ENXIO); in cxl_dax_region_alloc()
3047 cxlr_dax = ERR_PTR(-ENOMEM); in cxl_dax_region_alloc()
3051 cxlr_dax->hpa_range.start = p->res->start; in cxl_dax_region_alloc()
3052 cxlr_dax->hpa_range.end = p->res->end; in cxl_dax_region_alloc()
3054 dev = &cxlr_dax->dev; in cxl_dax_region_alloc()
3055 cxlr_dax->cxlr = cxlr; in cxl_dax_region_alloc()
3057 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); in cxl_dax_region_alloc()
3059 dev->parent = &cxlr->dev; in cxl_dax_region_alloc()
3060 dev->bus = &cxl_bus_type; in cxl_dax_region_alloc()
3061 dev->type = &cxl_dax_region_type; in cxl_dax_region_alloc()
3071 struct cxl_region *cxlr = cxlr_pmem->cxlr; in cxlr_pmem_unregister()
3072 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_pmem_unregister()
3075 * Either the bridge is in ->remove() context under the device_lock(), in cxlr_pmem_unregister()
3080 device_lock_assert(&cxl_nvb->dev); in cxlr_pmem_unregister()
3081 cxlr->cxlr_pmem = NULL; in cxlr_pmem_unregister()
3082 cxlr_pmem->cxlr = NULL; in cxlr_pmem_unregister()
3083 device_unregister(&cxlr_pmem->dev); in cxlr_pmem_unregister()
3089 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_release_nvdimm()
3091 scoped_guard(device, &cxl_nvb->dev) { in cxlr_release_nvdimm()
3092 if (cxlr->cxlr_pmem) in cxlr_release_nvdimm()
3093 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, in cxlr_release_nvdimm()
3094 cxlr->cxlr_pmem); in cxlr_release_nvdimm()
3096 cxlr->cxl_nvb = NULL; in cxlr_release_nvdimm()
3097 put_device(&cxl_nvb->dev); in cxlr_release_nvdimm()
3101 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3116 cxlr_pmem = cxlr->cxlr_pmem; in devm_cxl_add_pmem_region()
3117 cxl_nvb = cxlr->cxl_nvb; in devm_cxl_add_pmem_region()
3119 dev = &cxlr_pmem->dev; in devm_cxl_add_pmem_region()
3120 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); in devm_cxl_add_pmem_region()
3128 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_pmem_region()
3131 scoped_guard(device, &cxl_nvb->dev) { in devm_cxl_add_pmem_region()
3132 if (cxl_nvb->dev.driver) in devm_cxl_add_pmem_region()
3133 rc = devm_add_action_or_reset(&cxl_nvb->dev, in devm_cxl_add_pmem_region()
3137 rc = -ENXIO; in devm_cxl_add_pmem_region()
3144 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); in devm_cxl_add_pmem_region()
3149 put_device(&cxl_nvb->dev); in devm_cxl_add_pmem_region()
3150 cxlr->cxl_nvb = NULL; in devm_cxl_add_pmem_region()
3158 device_unregister(&cxlr_dax->dev); in cxlr_dax_unregister()
3171 dev = &cxlr_dax->dev; in devm_cxl_add_dax_region()
3172 rc = dev_set_name(dev, "dax_region%d", cxlr->id); in devm_cxl_add_dax_region()
3180 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_dax_region()
3183 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, in devm_cxl_add_dax_region()
3199 r1 = &cxlrd->cxlsd.cxld.hpa_range; in match_root_decoder_by_range()
3214 p = &cxlr->params; in match_region_by_range()
3217 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_region_by_range()
3230 struct range *hpa = &cxled->cxld.hpa_range; in construct_region()
3237 cxlr = __create_region(cxlrd, cxled->mode, in construct_region()
3238 atomic_read(&cxlrd->region_id)); in construct_region()
3239 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); in construct_region()
3242 dev_err(cxlmd->dev.parent, in construct_region()
3244 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3250 p = &cxlr->params; in construct_region()
3251 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in construct_region()
3252 dev_err(cxlmd->dev.parent, in construct_region()
3254 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3256 rc = -EBUSY; in construct_region()
3260 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); in construct_region()
3264 rc = -ENOMEM; in construct_region()
3268 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), in construct_region()
3269 dev_name(&cxlr->dev)); in construct_region()
3270 rc = insert_resource(cxlrd->res, res); in construct_region()
3273 * Platform-firmware may not have split resources like "System in construct_region()
3276 dev_warn(cxlmd->dev.parent, in construct_region()
3278 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3279 __func__, dev_name(&cxlr->dev)); in construct_region()
3282 p->res = res; in construct_region()
3283 p->interleave_ways = cxled->cxld.interleave_ways; in construct_region()
3284 p->interleave_granularity = cxled->cxld.interleave_granularity; in construct_region()
3285 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in construct_region()
3287 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in construct_region()
3291 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", in construct_region()
3292 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, in construct_region()
3293 dev_name(&cxlr->dev), p->res, p->interleave_ways, in construct_region()
3294 p->interleave_granularity); in construct_region()
3297 get_device(&cxlr->dev); in construct_region()
3304 devm_release_action(port->uport_dev, unregister_region, cxlr); in construct_region()
3311 struct range *hpa = &cxled->cxld.hpa_range; in cxl_add_to_region()
3312 struct cxl_decoder *cxld = &cxled->cxld; in cxl_add_to_region()
3320 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, in cxl_add_to_region()
3323 dev_err(cxlmd->dev.parent, in cxl_add_to_region()
3325 dev_name(&cxlmd->dev), dev_name(&cxld->dev), in cxl_add_to_region()
3326 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_add_to_region()
3327 return -ENXIO; in cxl_add_to_region()
3336 mutex_lock(&cxlrd->range_lock); in cxl_add_to_region()
3337 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, in cxl_add_to_region()
3341 region_dev = &cxlr->dev; in cxl_add_to_region()
3344 mutex_unlock(&cxlrd->range_lock); in cxl_add_to_region()
3350 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); in cxl_add_to_region()
3353 p = &cxlr->params; in cxl_add_to_region()
3354 attach = p->state == CXL_CONFIG_COMMIT; in cxl_add_to_region()
3360 * the platform-firmware memory map, otherwise the driver for in cxl_add_to_region()
3363 if (device_attach(&cxlr->dev) < 0) in cxl_add_to_region()
3364 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", in cxl_add_to_region()
3365 p->res); in cxl_add_to_region()
3378 struct cxl_region_params *p = &cxlr->params; in is_system_ram()
3380 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); in is_system_ram()
3388 unregister_memory_notifier(&cxlr->memory_notifier); in shutdown_notifiers()
3389 unregister_mt_adistance_algorithm(&cxlr->adist_notifier); in shutdown_notifiers()
3395 struct cxl_region_params *p = &cxlr->params; in cxl_region_probe()
3400 dev_dbg(&cxlr->dev, "probe interrupted\n"); in cxl_region_probe()
3404 if (p->state < CXL_CONFIG_COMMIT) { in cxl_region_probe()
3405 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); in cxl_region_probe()
3406 rc = -ENXIO; in cxl_region_probe()
3410 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { in cxl_region_probe()
3411 dev_err(&cxlr->dev, in cxl_region_probe()
3412 "failed to activate, re-commit region and retry\n"); in cxl_region_probe()
3413 rc = -ENXIO; in cxl_region_probe()
3427 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; in cxl_region_probe()
3428 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; in cxl_region_probe()
3429 register_memory_notifier(&cxlr->memory_notifier); in cxl_region_probe()
3431 cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; in cxl_region_probe()
3432 cxlr->adist_notifier.priority = 100; in cxl_region_probe()
3433 register_mt_adistance_algorithm(&cxlr->adist_notifier); in cxl_region_probe()
3435 rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr); in cxl_region_probe()
3439 switch (cxlr->mode) { in cxl_region_probe()
3449 p->res->start, p->res->end, cxlr, in cxl_region_probe()
3454 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", in cxl_region_probe()
3455 cxlr->mode); in cxl_region_probe()
3456 return -ENXIO; in cxl_region_probe()