1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/seq_file.h> 4 #include <linux/device.h> 5 #include <linux/delay.h> 6 7 #include "cxlmem.h" 8 #include "core.h" 9 10 /** 11 * DOC: cxl core hdm 12 * 13 * Compute Express Link Host Managed Device Memory, starting with the 14 * CXL 2.0 specification, is managed by an array of HDM Decoder register 15 * instances per CXL port and per CXL endpoint. Define common helpers 16 * for enumerating these registers and capabilities. 17 */ 18 19 DECLARE_RWSEM(cxl_dpa_rwsem); 20 21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 22 int *target_map) 23 { 24 int rc; 25 26 rc = cxl_decoder_add_locked(cxld, target_map); 27 if (rc) { 28 put_device(&cxld->dev); 29 dev_err(&port->dev, "Failed to add decoder\n"); 30 return rc; 31 } 32 33 rc = cxl_decoder_autoremove(&port->dev, cxld); 34 if (rc) 35 return rc; 36 37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 38 39 return 0; 40 } 41 42 /* 43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) 44 * single ported host-bridges need not publish a decoder capability when a 45 * passthrough decode can be assumed, i.e. all transactions that the uport sees 46 * are claimed and passed to the single dport. Disable the range until the first 47 * CXL region is enumerated / activated. 48 */ 49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 50 { 51 struct cxl_switch_decoder *cxlsd; 52 struct cxl_dport *dport = NULL; 53 int single_port_map[1]; 54 unsigned long index; 55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 56 57 /* 58 * Capability checks are moot for passthrough decoders, support 59 * any and all possibilities. 60 */ 61 cxlhdm->interleave_mask = ~0U; 62 cxlhdm->iw_cap_mask = ~0UL; 63 64 cxlsd = cxl_switch_decoder_alloc(port, 1); 65 if (IS_ERR(cxlsd)) 66 return PTR_ERR(cxlsd); 67 68 device_lock_assert(&port->dev); 69 70 xa_for_each(&port->dports, index, dport) 71 break; 72 single_port_map[0] = dport->port_id; 73 74 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 75 } 76 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); 77 78 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 79 { 80 u32 hdm_cap; 81 82 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); 83 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); 84 cxlhdm->target_count = 85 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); 86 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap)) 87 cxlhdm->interleave_mask |= GENMASK(11, 8); 88 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) 89 cxlhdm->interleave_mask |= GENMASK(14, 12); 90 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); 91 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_3_6_12_WAY, hdm_cap)) 92 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); 93 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_16_WAY, hdm_cap)) 94 cxlhdm->iw_cap_mask |= BIT(16); 95 } 96 97 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) 98 { 99 struct cxl_hdm *cxlhdm; 100 void __iomem *hdm; 101 u32 ctrl; 102 int i; 103 104 if (!info) 105 return false; 106 107 cxlhdm = dev_get_drvdata(&info->port->dev); 108 hdm = cxlhdm->regs.hdm_decoder; 109 110 if (!hdm) 111 return true; 112 113 /* 114 * If HDM decoders are present and the driver is in control of 115 * Mem_Enable skip DVSEC based emulation 116 */ 117 if (!info->mem_enabled) 118 return false; 119 120 /* 121 * If any decoders are committed already, there should not be any 122 * emulated DVSEC decoders. 123 */ 124 for (i = 0; i < cxlhdm->decoder_count; i++) { 125 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 126 dev_dbg(&info->port->dev, 127 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n", 128 info->port->id, i, 129 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl), 130 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)), 131 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)), 132 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)), 133 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i))); 134 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 135 return false; 136 } 137 138 return true; 139 } 140 141 /** 142 * devm_cxl_setup_hdm - map HDM decoder component registers 143 * @port: cxl_port to map 144 * @info: cached DVSEC range register info 145 */ 146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 147 struct cxl_endpoint_dvsec_info *info) 148 { 149 struct cxl_register_map *reg_map = &port->reg_map; 150 struct device *dev = &port->dev; 151 struct cxl_hdm *cxlhdm; 152 int rc; 153 154 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); 155 if (!cxlhdm) 156 return ERR_PTR(-ENOMEM); 157 cxlhdm->port = port; 158 dev_set_drvdata(dev, cxlhdm); 159 160 /* Memory devices can configure device HDM using DVSEC range regs. */ 161 if (reg_map->resource == CXL_RESOURCE_NONE) { 162 if (!info || !info->mem_enabled) { 163 dev_err(dev, "No component registers mapped\n"); 164 return ERR_PTR(-ENXIO); 165 } 166 167 cxlhdm->decoder_count = info->ranges; 168 return cxlhdm; 169 } 170 171 if (!reg_map->component_map.hdm_decoder.valid) { 172 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); 173 /* unique error code to indicate no HDM decoder capability */ 174 return ERR_PTR(-ENODEV); 175 } 176 177 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, 178 BIT(CXL_CM_CAP_CAP_ID_HDM)); 179 if (rc) { 180 dev_err(dev, "Failed to map HDM capability.\n"); 181 return ERR_PTR(rc); 182 } 183 184 parse_hdm_decoder_caps(cxlhdm); 185 if (cxlhdm->decoder_count == 0) { 186 dev_err(dev, "Spec violation. Caps invalid\n"); 187 return ERR_PTR(-ENXIO); 188 } 189 190 /* 191 * Now that the hdm capability is parsed, decide if range 192 * register emulation is needed and fixup cxlhdm accordingly. 193 */ 194 if (should_emulate_decoders(info)) { 195 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, 196 info->ranges > 1 ? "s" : ""); 197 cxlhdm->decoder_count = info->ranges; 198 } 199 200 return cxlhdm; 201 } 202 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); 203 204 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 205 { 206 unsigned long long start = r->start, end = r->end; 207 208 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, 209 r->name); 210 } 211 212 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) 213 { 214 struct resource *p1, *p2; 215 216 down_read(&cxl_dpa_rwsem); 217 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { 218 __cxl_dpa_debug(file, p1, 0); 219 for (p2 = p1->child; p2; p2 = p2->sibling) 220 __cxl_dpa_debug(file, p2, 1); 221 } 222 up_read(&cxl_dpa_rwsem); 223 } 224 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); 225 226 /* 227 * Must be called in a context that synchronizes against this decoder's 228 * port ->remove() callback (like an endpoint decoder sysfs attribute) 229 */ 230 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 231 { 232 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 233 struct cxl_port *port = cxled_to_port(cxled); 234 struct cxl_dev_state *cxlds = cxlmd->cxlds; 235 struct resource *res = cxled->dpa_res; 236 resource_size_t skip_start; 237 238 lockdep_assert_held_write(&cxl_dpa_rwsem); 239 240 /* save @skip_start, before @res is released */ 241 skip_start = res->start - cxled->skip; 242 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); 243 if (cxled->skip) 244 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); 245 cxled->skip = 0; 246 cxled->dpa_res = NULL; 247 put_device(&cxled->cxld.dev); 248 port->hdm_end--; 249 } 250 251 static void cxl_dpa_release(void *cxled) 252 { 253 down_write(&cxl_dpa_rwsem); 254 __cxl_dpa_release(cxled); 255 up_write(&cxl_dpa_rwsem); 256 } 257 258 /* 259 * Must be called from context that will not race port device 260 * unregistration, like decoder sysfs attribute methods 261 */ 262 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 263 { 264 struct cxl_port *port = cxled_to_port(cxled); 265 266 lockdep_assert_held_write(&cxl_dpa_rwsem); 267 devm_remove_action(&port->dev, cxl_dpa_release, cxled); 268 __cxl_dpa_release(cxled); 269 } 270 271 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 272 resource_size_t base, resource_size_t len, 273 resource_size_t skipped) 274 { 275 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 276 struct cxl_port *port = cxled_to_port(cxled); 277 struct cxl_dev_state *cxlds = cxlmd->cxlds; 278 struct device *dev = &port->dev; 279 struct resource *res; 280 281 lockdep_assert_held_write(&cxl_dpa_rwsem); 282 283 if (!len) { 284 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", 285 port->id, cxled->cxld.id); 286 return -EINVAL; 287 } 288 289 if (cxled->dpa_res) { 290 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", 291 port->id, cxled->cxld.id, cxled->dpa_res); 292 return -EBUSY; 293 } 294 295 if (port->hdm_end + 1 != cxled->cxld.id) { 296 /* 297 * Assumes alloc and commit order is always in hardware instance 298 * order per expectations from 8.2.5.12.20 Committing Decoder 299 * Programming that enforce decoder[m] committed before 300 * decoder[m+1] commit start. 301 */ 302 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, 303 cxled->cxld.id, port->id, port->hdm_end + 1); 304 return -EBUSY; 305 } 306 307 if (skipped) { 308 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, 309 dev_name(&cxled->cxld.dev), 0); 310 if (!res) { 311 dev_dbg(dev, 312 "decoder%d.%d: failed to reserve skipped space\n", 313 port->id, cxled->cxld.id); 314 return -EBUSY; 315 } 316 } 317 res = __request_region(&cxlds->dpa_res, base, len, 318 dev_name(&cxled->cxld.dev), 0); 319 if (!res) { 320 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", 321 port->id, cxled->cxld.id); 322 if (skipped) 323 __release_region(&cxlds->dpa_res, base - skipped, 324 skipped); 325 return -EBUSY; 326 } 327 cxled->dpa_res = res; 328 cxled->skip = skipped; 329 330 if (resource_contains(&cxlds->pmem_res, res)) 331 cxled->mode = CXL_DECODER_PMEM; 332 else if (resource_contains(&cxlds->ram_res, res)) 333 cxled->mode = CXL_DECODER_RAM; 334 else { 335 dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", 336 port->id, cxled->cxld.id, cxled->dpa_res); 337 cxled->mode = CXL_DECODER_MIXED; 338 } 339 340 port->hdm_end++; 341 get_device(&cxled->cxld.dev); 342 return 0; 343 } 344 345 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 346 resource_size_t base, resource_size_t len, 347 resource_size_t skipped) 348 { 349 struct cxl_port *port = cxled_to_port(cxled); 350 int rc; 351 352 down_write(&cxl_dpa_rwsem); 353 rc = __cxl_dpa_reserve(cxled, base, len, skipped); 354 up_write(&cxl_dpa_rwsem); 355 356 if (rc) 357 return rc; 358 359 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 360 } 361 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); 362 363 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) 364 { 365 resource_size_t size = 0; 366 367 down_read(&cxl_dpa_rwsem); 368 if (cxled->dpa_res) 369 size = resource_size(cxled->dpa_res); 370 up_read(&cxl_dpa_rwsem); 371 372 return size; 373 } 374 375 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) 376 { 377 resource_size_t base = -1; 378 379 lockdep_assert_held(&cxl_dpa_rwsem); 380 if (cxled->dpa_res) 381 base = cxled->dpa_res->start; 382 383 return base; 384 } 385 386 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) 387 { 388 struct cxl_port *port = cxled_to_port(cxled); 389 struct device *dev = &cxled->cxld.dev; 390 int rc; 391 392 down_write(&cxl_dpa_rwsem); 393 if (!cxled->dpa_res) { 394 rc = 0; 395 goto out; 396 } 397 if (cxled->cxld.region) { 398 dev_dbg(dev, "decoder assigned to: %s\n", 399 dev_name(&cxled->cxld.region->dev)); 400 rc = -EBUSY; 401 goto out; 402 } 403 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 404 dev_dbg(dev, "decoder enabled\n"); 405 rc = -EBUSY; 406 goto out; 407 } 408 if (cxled->cxld.id != port->hdm_end) { 409 dev_dbg(dev, "expected decoder%d.%d\n", port->id, 410 port->hdm_end); 411 rc = -EBUSY; 412 goto out; 413 } 414 devm_cxl_dpa_release(cxled); 415 rc = 0; 416 out: 417 up_write(&cxl_dpa_rwsem); 418 return rc; 419 } 420 421 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, 422 enum cxl_decoder_mode mode) 423 { 424 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 425 struct cxl_dev_state *cxlds = cxlmd->cxlds; 426 struct device *dev = &cxled->cxld.dev; 427 int rc; 428 429 switch (mode) { 430 case CXL_DECODER_RAM: 431 case CXL_DECODER_PMEM: 432 break; 433 default: 434 dev_dbg(dev, "unsupported mode: %d\n", mode); 435 return -EINVAL; 436 } 437 438 down_write(&cxl_dpa_rwsem); 439 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 440 rc = -EBUSY; 441 goto out; 442 } 443 444 /* 445 * Only allow modes that are supported by the current partition 446 * configuration 447 */ 448 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 449 dev_dbg(dev, "no available pmem capacity\n"); 450 rc = -ENXIO; 451 goto out; 452 } 453 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 454 dev_dbg(dev, "no available ram capacity\n"); 455 rc = -ENXIO; 456 goto out; 457 } 458 459 cxled->mode = mode; 460 rc = 0; 461 out: 462 up_write(&cxl_dpa_rwsem); 463 464 return rc; 465 } 466 467 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) 468 { 469 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 470 resource_size_t free_ram_start, free_pmem_start; 471 struct cxl_port *port = cxled_to_port(cxled); 472 struct cxl_dev_state *cxlds = cxlmd->cxlds; 473 struct device *dev = &cxled->cxld.dev; 474 resource_size_t start, avail, skip; 475 struct resource *p, *last; 476 int rc; 477 478 down_write(&cxl_dpa_rwsem); 479 if (cxled->cxld.region) { 480 dev_dbg(dev, "decoder attached to %s\n", 481 dev_name(&cxled->cxld.region->dev)); 482 rc = -EBUSY; 483 goto out; 484 } 485 486 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 487 dev_dbg(dev, "decoder enabled\n"); 488 rc = -EBUSY; 489 goto out; 490 } 491 492 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) 493 last = p; 494 if (last) 495 free_ram_start = last->end + 1; 496 else 497 free_ram_start = cxlds->ram_res.start; 498 499 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) 500 last = p; 501 if (last) 502 free_pmem_start = last->end + 1; 503 else 504 free_pmem_start = cxlds->pmem_res.start; 505 506 if (cxled->mode == CXL_DECODER_RAM) { 507 start = free_ram_start; 508 avail = cxlds->ram_res.end - start + 1; 509 skip = 0; 510 } else if (cxled->mode == CXL_DECODER_PMEM) { 511 resource_size_t skip_start, skip_end; 512 513 start = free_pmem_start; 514 avail = cxlds->pmem_res.end - start + 1; 515 skip_start = free_ram_start; 516 517 /* 518 * If some pmem is already allocated, then that allocation 519 * already handled the skip. 520 */ 521 if (cxlds->pmem_res.child && 522 skip_start == cxlds->pmem_res.child->start) 523 skip_end = skip_start - 1; 524 else 525 skip_end = start - 1; 526 skip = skip_end - skip_start + 1; 527 } else { 528 dev_dbg(dev, "mode not set\n"); 529 rc = -EINVAL; 530 goto out; 531 } 532 533 if (size > avail) { 534 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 535 cxl_decoder_mode_name(cxled->mode), &avail); 536 rc = -ENOSPC; 537 goto out; 538 } 539 540 rc = __cxl_dpa_reserve(cxled, start, size, skip); 541 out: 542 up_write(&cxl_dpa_rwsem); 543 544 if (rc) 545 return rc; 546 547 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 548 } 549 550 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 551 { 552 u16 eig; 553 u8 eiw; 554 555 /* 556 * Input validation ensures these warns never fire, but otherwise 557 * suppress unititalized variable usage warnings. 558 */ 559 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), 560 "invalid interleave_ways: %d\n", cxld->interleave_ways)) 561 return; 562 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), 563 "invalid interleave_granularity: %d\n", 564 cxld->interleave_granularity)) 565 return; 566 567 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 568 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 569 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 570 } 571 572 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 573 { 574 u32p_replace_bits(ctrl, 575 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), 576 CXL_HDM_DECODER0_CTRL_HOSTONLY); 577 } 578 579 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 580 { 581 struct cxl_dport **t = &cxlsd->target[0]; 582 int ways = cxlsd->cxld.interleave_ways; 583 584 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 585 if (ways > 1) 586 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 587 if (ways > 2) 588 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 589 if (ways > 3) 590 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 591 if (ways > 4) 592 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 593 if (ways > 5) 594 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 595 if (ways > 6) 596 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 597 if (ways > 7) 598 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 599 } 600 601 /* 602 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 603 * committed or error within 10ms, but just be generous with 20ms to account for 604 * clock skew and other marginal behavior 605 */ 606 #define COMMIT_TIMEOUT_MS 20 607 static int cxld_await_commit(void __iomem *hdm, int id) 608 { 609 u32 ctrl; 610 int i; 611 612 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 613 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 614 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 615 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 616 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 617 return -EIO; 618 } 619 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 620 return 0; 621 fsleep(1000); 622 } 623 624 return -ETIMEDOUT; 625 } 626 627 static int cxl_decoder_commit(struct cxl_decoder *cxld) 628 { 629 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 630 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 631 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 632 int id = cxld->id, rc; 633 u64 base, size; 634 u32 ctrl; 635 636 if (cxld->flags & CXL_DECODER_F_ENABLE) 637 return 0; 638 639 if (cxl_num_decoders_committed(port) != id) { 640 dev_dbg(&port->dev, 641 "%s: out of order commit, expected decoder%d.%d\n", 642 dev_name(&cxld->dev), port->id, 643 cxl_num_decoders_committed(port)); 644 return -EBUSY; 645 } 646 647 /* 648 * For endpoint decoders hosted on CXL memory devices that 649 * support the sanitize operation, make sure sanitize is not in-flight. 650 */ 651 if (is_endpoint_decoder(&cxld->dev)) { 652 struct cxl_endpoint_decoder *cxled = 653 to_cxl_endpoint_decoder(&cxld->dev); 654 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 655 struct cxl_memdev_state *mds = 656 to_cxl_memdev_state(cxlmd->cxlds); 657 658 if (mds && mds->security.sanitize_active) { 659 dev_dbg(&cxlmd->dev, 660 "attempted to commit %s during sanitize\n", 661 dev_name(&cxld->dev)); 662 return -EBUSY; 663 } 664 } 665 666 down_read(&cxl_dpa_rwsem); 667 /* common decoder settings */ 668 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 669 cxld_set_interleave(cxld, &ctrl); 670 cxld_set_type(cxld, &ctrl); 671 base = cxld->hpa_range.start; 672 size = range_len(&cxld->hpa_range); 673 674 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 675 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 676 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 677 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 678 679 if (is_switch_decoder(&cxld->dev)) { 680 struct cxl_switch_decoder *cxlsd = 681 to_cxl_switch_decoder(&cxld->dev); 682 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 683 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 684 u64 targets; 685 686 cxlsd_set_targets(cxlsd, &targets); 687 writel(upper_32_bits(targets), tl_hi); 688 writel(lower_32_bits(targets), tl_lo); 689 } else { 690 struct cxl_endpoint_decoder *cxled = 691 to_cxl_endpoint_decoder(&cxld->dev); 692 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 693 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 694 695 writel(upper_32_bits(cxled->skip), sk_hi); 696 writel(lower_32_bits(cxled->skip), sk_lo); 697 } 698 699 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 700 up_read(&cxl_dpa_rwsem); 701 702 port->commit_end++; 703 rc = cxld_await_commit(hdm, cxld->id); 704 if (rc) { 705 dev_dbg(&port->dev, "%s: error %d committing decoder\n", 706 dev_name(&cxld->dev), rc); 707 cxld->reset(cxld); 708 return rc; 709 } 710 cxld->flags |= CXL_DECODER_F_ENABLE; 711 712 return 0; 713 } 714 715 static int commit_reap(struct device *dev, const void *data) 716 { 717 struct cxl_port *port = to_cxl_port(dev->parent); 718 struct cxl_decoder *cxld; 719 720 if (!is_switch_decoder(dev) && !is_endpoint_decoder(dev)) 721 return 0; 722 723 cxld = to_cxl_decoder(dev); 724 if (port->commit_end == cxld->id && 725 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { 726 port->commit_end--; 727 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", 728 dev_name(&cxld->dev), port->commit_end); 729 } 730 731 return 0; 732 } 733 734 void cxl_port_commit_reap(struct cxl_decoder *cxld) 735 { 736 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 737 738 lockdep_assert_held_write(&cxl_region_rwsem); 739 740 /* 741 * Once the highest committed decoder is disabled, free any other 742 * decoders that were pinned allocated by out-of-order release. 743 */ 744 port->commit_end--; 745 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), 746 port->commit_end); 747 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, 748 commit_reap); 749 } 750 EXPORT_SYMBOL_NS_GPL(cxl_port_commit_reap, CXL); 751 752 static void cxl_decoder_reset(struct cxl_decoder *cxld) 753 { 754 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 755 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 756 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 757 int id = cxld->id; 758 u32 ctrl; 759 760 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 761 return; 762 763 if (port->commit_end == id) 764 cxl_port_commit_reap(cxld); 765 else 766 dev_dbg(&port->dev, 767 "%s: out of order reset, expected decoder%d.%d\n", 768 dev_name(&cxld->dev), port->id, port->commit_end); 769 770 down_read(&cxl_dpa_rwsem); 771 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 772 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 773 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 774 775 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 776 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 777 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 778 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 779 up_read(&cxl_dpa_rwsem); 780 781 cxld->flags &= ~CXL_DECODER_F_ENABLE; 782 783 /* Userspace is now responsible for reconfiguring this decoder */ 784 if (is_endpoint_decoder(&cxld->dev)) { 785 struct cxl_endpoint_decoder *cxled; 786 787 cxled = to_cxl_endpoint_decoder(&cxld->dev); 788 cxled->state = CXL_DECODER_STATE_MANUAL; 789 } 790 } 791 792 static int cxl_setup_hdm_decoder_from_dvsec( 793 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, 794 int which, struct cxl_endpoint_dvsec_info *info) 795 { 796 struct cxl_endpoint_decoder *cxled; 797 u64 len; 798 int rc; 799 800 if (!is_cxl_endpoint(port)) 801 return -EOPNOTSUPP; 802 803 cxled = to_cxl_endpoint_decoder(&cxld->dev); 804 len = range_len(&info->dvsec_range[which]); 805 if (!len) 806 return -ENOENT; 807 808 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 809 cxld->commit = NULL; 810 cxld->reset = NULL; 811 cxld->hpa_range = info->dvsec_range[which]; 812 813 /* 814 * Set the emulated decoder as locked pending additional support to 815 * change the range registers at run time. 816 */ 817 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; 818 port->commit_end = cxld->id; 819 820 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0); 821 if (rc) { 822 dev_err(&port->dev, 823 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 824 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); 825 return rc; 826 } 827 *dpa_base += len; 828 cxled->state = CXL_DECODER_STATE_AUTO; 829 830 return 0; 831 } 832 833 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 834 int *target_map, void __iomem *hdm, int which, 835 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 836 { 837 struct cxl_endpoint_decoder *cxled = NULL; 838 u64 size, base, skip, dpa_size, lo, hi; 839 bool committed; 840 u32 remainder; 841 int i, rc; 842 u32 ctrl; 843 union { 844 u64 value; 845 unsigned char target_id[8]; 846 } target_list; 847 848 if (should_emulate_decoders(info)) 849 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, 850 which, info); 851 852 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 853 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 854 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which)); 855 base = (hi << 32) + lo; 856 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 857 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); 858 size = (hi << 32) + lo; 859 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 860 cxld->commit = cxl_decoder_commit; 861 cxld->reset = cxl_decoder_reset; 862 863 if (!committed) 864 size = 0; 865 if (base == U64_MAX || size == U64_MAX) { 866 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", 867 port->id, cxld->id); 868 return -ENXIO; 869 } 870 871 if (info) 872 cxled = to_cxl_endpoint_decoder(&cxld->dev); 873 cxld->hpa_range = (struct range) { 874 .start = base, 875 .end = base + size - 1, 876 }; 877 878 /* decoders are enabled if committed */ 879 if (committed) { 880 cxld->flags |= CXL_DECODER_F_ENABLE; 881 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 882 cxld->flags |= CXL_DECODER_F_LOCK; 883 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl)) 884 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 885 else 886 cxld->target_type = CXL_DECODER_DEVMEM; 887 888 guard(rwsem_write)(&cxl_region_rwsem); 889 if (cxld->id != cxl_num_decoders_committed(port)) { 890 dev_warn(&port->dev, 891 "decoder%d.%d: Committed out of order\n", 892 port->id, cxld->id); 893 return -ENXIO; 894 } 895 896 if (size == 0) { 897 dev_warn(&port->dev, 898 "decoder%d.%d: Committed with zero size\n", 899 port->id, cxld->id); 900 return -ENXIO; 901 } 902 port->commit_end = cxld->id; 903 } else { 904 if (cxled) { 905 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 906 struct cxl_dev_state *cxlds = cxlmd->cxlds; 907 908 /* 909 * Default by devtype until a device arrives that needs 910 * more precision. 911 */ 912 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) 913 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 914 else 915 cxld->target_type = CXL_DECODER_DEVMEM; 916 } else { 917 /* To be overridden by region type at commit time */ 918 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 919 } 920 921 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) && 922 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { 923 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY; 924 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 925 } 926 } 927 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 928 &cxld->interleave_ways); 929 if (rc) { 930 dev_warn(&port->dev, 931 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", 932 port->id, cxld->id, ctrl); 933 return rc; 934 } 935 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 936 &cxld->interleave_granularity); 937 if (rc) { 938 dev_warn(&port->dev, 939 "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", 940 port->id, cxld->id, ctrl); 941 return rc; 942 } 943 944 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", 945 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, 946 cxld->interleave_ways, cxld->interleave_granularity); 947 948 if (!cxled) { 949 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 950 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); 951 target_list.value = (hi << 32) + lo; 952 for (i = 0; i < cxld->interleave_ways; i++) 953 target_map[i] = target_list.target_id[i]; 954 955 return 0; 956 } 957 958 if (!committed) 959 return 0; 960 961 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); 962 if (remainder) { 963 dev_err(&port->dev, 964 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", 965 port->id, cxld->id, size, cxld->interleave_ways); 966 return -ENXIO; 967 } 968 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 969 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); 970 skip = (hi << 32) + lo; 971 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 972 if (rc) { 973 dev_err(&port->dev, 974 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 975 port->id, cxld->id, *dpa_base, 976 *dpa_base + dpa_size + skip - 1, rc); 977 return rc; 978 } 979 *dpa_base += dpa_size + skip; 980 981 cxled->state = CXL_DECODER_STATE_AUTO; 982 983 return 0; 984 } 985 986 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm) 987 { 988 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 989 int committed, i; 990 u32 ctrl; 991 992 if (!hdm) 993 return; 994 995 /* 996 * Since the register resource was recently claimed via request_region() 997 * be careful about trusting the "not-committed" status until the commit 998 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0 999 * 8.2.5.12.20), but double it to be tolerant of any clock skew between 1000 * host and target. 1001 */ 1002 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { 1003 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 1004 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) 1005 committed++; 1006 } 1007 1008 /* ensure that future checks of committed can be trusted */ 1009 if (committed != cxlhdm->decoder_count) 1010 msleep(20); 1011 } 1012 1013 /** 1014 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set 1015 * @cxlhdm: Structure to populate with HDM capabilities 1016 * @info: cached DVSEC range register info 1017 */ 1018 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 1019 struct cxl_endpoint_dvsec_info *info) 1020 { 1021 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 1022 struct cxl_port *port = cxlhdm->port; 1023 int i; 1024 u64 dpa_base = 0; 1025 1026 cxl_settle_decoders(cxlhdm); 1027 1028 for (i = 0; i < cxlhdm->decoder_count; i++) { 1029 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 1030 int rc, target_count = cxlhdm->target_count; 1031 struct cxl_decoder *cxld; 1032 1033 if (is_cxl_endpoint(port)) { 1034 struct cxl_endpoint_decoder *cxled; 1035 1036 cxled = cxl_endpoint_decoder_alloc(port); 1037 if (IS_ERR(cxled)) { 1038 dev_warn(&port->dev, 1039 "Failed to allocate decoder%d.%d\n", 1040 port->id, i); 1041 return PTR_ERR(cxled); 1042 } 1043 cxld = &cxled->cxld; 1044 } else { 1045 struct cxl_switch_decoder *cxlsd; 1046 1047 cxlsd = cxl_switch_decoder_alloc(port, target_count); 1048 if (IS_ERR(cxlsd)) { 1049 dev_warn(&port->dev, 1050 "Failed to allocate decoder%d.%d\n", 1051 port->id, i); 1052 return PTR_ERR(cxlsd); 1053 } 1054 cxld = &cxlsd->cxld; 1055 } 1056 1057 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, 1058 &dpa_base, info); 1059 if (rc) { 1060 dev_warn(&port->dev, 1061 "Failed to initialize decoder%d.%d\n", 1062 port->id, i); 1063 put_device(&cxld->dev); 1064 return rc; 1065 } 1066 rc = add_hdm_decoder(port, cxld, target_map); 1067 if (rc) { 1068 dev_warn(&port->dev, 1069 "Failed to add decoder%d.%d\n", port->id, i); 1070 return rc; 1071 } 1072 } 1073 1074 return 0; 1075 } 1076 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); 1077