1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/io-64-nonatomic-hi-lo.h> 4 #include <linux/seq_file.h> 5 #include <linux/device.h> 6 #include <linux/delay.h> 7 8 #include "cxlmem.h" 9 #include "core.h" 10 11 /** 12 * DOC: cxl core hdm 13 * 14 * Compute Express Link Host Managed Device Memory, starting with the 15 * CXL 2.0 specification, is managed by an array of HDM Decoder register 16 * instances per CXL port and per CXL endpoint. Define common helpers 17 * for enumerating these registers and capabilities. 18 */ 19 20 DECLARE_RWSEM(cxl_dpa_rwsem); 21 22 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 23 int *target_map) 24 { 25 int rc; 26 27 rc = cxl_decoder_add_locked(cxld, target_map); 28 if (rc) { 29 put_device(&cxld->dev); 30 dev_err(&port->dev, "Failed to add decoder\n"); 31 return rc; 32 } 33 34 rc = cxl_decoder_autoremove(&port->dev, cxld); 35 if (rc) 36 return rc; 37 38 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 39 40 return 0; 41 } 42 43 /* 44 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) 45 * single ported host-bridges need not publish a decoder capability when a 46 * passthrough decode can be assumed, i.e. all transactions that the uport sees 47 * are claimed and passed to the single dport. Disable the range until the first 48 * CXL region is enumerated / activated. 49 */ 50 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 51 { 52 struct cxl_switch_decoder *cxlsd; 53 struct cxl_dport *dport = NULL; 54 int single_port_map[1]; 55 unsigned long index; 56 57 cxlsd = cxl_switch_decoder_alloc(port, 1); 58 if (IS_ERR(cxlsd)) 59 return PTR_ERR(cxlsd); 60 61 device_lock_assert(&port->dev); 62 63 xa_for_each(&port->dports, index, dport) 64 break; 65 single_port_map[0] = dport->port_id; 66 67 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 68 } 69 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); 70 71 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 72 { 73 u32 hdm_cap; 74 75 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); 76 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); 77 cxlhdm->target_count = 78 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); 79 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap)) 80 cxlhdm->interleave_mask |= GENMASK(11, 8); 81 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) 82 cxlhdm->interleave_mask |= GENMASK(14, 12); 83 } 84 85 static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, 86 struct cxl_component_regs *regs) 87 { 88 struct cxl_register_map map = { 89 .resource = port->component_reg_phys, 90 .base = crb, 91 .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, 92 }; 93 94 cxl_probe_component_regs(&port->dev, crb, &map.component_map); 95 if (!map.component_map.hdm_decoder.valid) { 96 dev_err(&port->dev, "HDM decoder registers invalid\n"); 97 return -ENXIO; 98 } 99 100 return cxl_map_component_regs(&port->dev, regs, &map, 101 BIT(CXL_CM_CAP_CAP_ID_HDM)); 102 } 103 104 /** 105 * devm_cxl_setup_hdm - map HDM decoder component registers 106 * @port: cxl_port to map 107 */ 108 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port) 109 { 110 struct device *dev = &port->dev; 111 struct cxl_hdm *cxlhdm; 112 void __iomem *crb; 113 int rc; 114 115 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); 116 if (!cxlhdm) 117 return ERR_PTR(-ENOMEM); 118 119 cxlhdm->port = port; 120 crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); 121 if (!crb) { 122 dev_err(dev, "No component registers mapped\n"); 123 return ERR_PTR(-ENXIO); 124 } 125 126 rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs); 127 iounmap(crb); 128 if (rc) 129 return ERR_PTR(rc); 130 131 parse_hdm_decoder_caps(cxlhdm); 132 if (cxlhdm->decoder_count == 0) { 133 dev_err(dev, "Spec violation. Caps invalid\n"); 134 return ERR_PTR(-ENXIO); 135 } 136 137 dev_set_drvdata(dev, cxlhdm); 138 139 return cxlhdm; 140 } 141 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); 142 143 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 144 { 145 unsigned long long start = r->start, end = r->end; 146 147 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, 148 r->name); 149 } 150 151 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) 152 { 153 struct resource *p1, *p2; 154 155 down_read(&cxl_dpa_rwsem); 156 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { 157 __cxl_dpa_debug(file, p1, 0); 158 for (p2 = p1->child; p2; p2 = p2->sibling) 159 __cxl_dpa_debug(file, p2, 1); 160 } 161 up_read(&cxl_dpa_rwsem); 162 } 163 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); 164 165 /* 166 * Must be called in a context that synchronizes against this decoder's 167 * port ->remove() callback (like an endpoint decoder sysfs attribute) 168 */ 169 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 170 { 171 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 172 struct cxl_port *port = cxled_to_port(cxled); 173 struct cxl_dev_state *cxlds = cxlmd->cxlds; 174 struct resource *res = cxled->dpa_res; 175 resource_size_t skip_start; 176 177 lockdep_assert_held_write(&cxl_dpa_rwsem); 178 179 /* save @skip_start, before @res is released */ 180 skip_start = res->start - cxled->skip; 181 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); 182 if (cxled->skip) 183 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); 184 cxled->skip = 0; 185 cxled->dpa_res = NULL; 186 put_device(&cxled->cxld.dev); 187 port->hdm_end--; 188 } 189 190 static void cxl_dpa_release(void *cxled) 191 { 192 down_write(&cxl_dpa_rwsem); 193 __cxl_dpa_release(cxled); 194 up_write(&cxl_dpa_rwsem); 195 } 196 197 /* 198 * Must be called from context that will not race port device 199 * unregistration, like decoder sysfs attribute methods 200 */ 201 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 202 { 203 struct cxl_port *port = cxled_to_port(cxled); 204 205 lockdep_assert_held_write(&cxl_dpa_rwsem); 206 devm_remove_action(&port->dev, cxl_dpa_release, cxled); 207 __cxl_dpa_release(cxled); 208 } 209 210 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 211 resource_size_t base, resource_size_t len, 212 resource_size_t skipped) 213 { 214 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 215 struct cxl_port *port = cxled_to_port(cxled); 216 struct cxl_dev_state *cxlds = cxlmd->cxlds; 217 struct device *dev = &port->dev; 218 struct resource *res; 219 220 lockdep_assert_held_write(&cxl_dpa_rwsem); 221 222 if (!len) 223 goto success; 224 225 if (cxled->dpa_res) { 226 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", 227 port->id, cxled->cxld.id, cxled->dpa_res); 228 return -EBUSY; 229 } 230 231 if (port->hdm_end + 1 != cxled->cxld.id) { 232 /* 233 * Assumes alloc and commit order is always in hardware instance 234 * order per expectations from 8.2.5.12.20 Committing Decoder 235 * Programming that enforce decoder[m] committed before 236 * decoder[m+1] commit start. 237 */ 238 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, 239 cxled->cxld.id, port->id, port->hdm_end + 1); 240 return -EBUSY; 241 } 242 243 if (skipped) { 244 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, 245 dev_name(&cxled->cxld.dev), 0); 246 if (!res) { 247 dev_dbg(dev, 248 "decoder%d.%d: failed to reserve skipped space\n", 249 port->id, cxled->cxld.id); 250 return -EBUSY; 251 } 252 } 253 res = __request_region(&cxlds->dpa_res, base, len, 254 dev_name(&cxled->cxld.dev), 0); 255 if (!res) { 256 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", 257 port->id, cxled->cxld.id); 258 if (skipped) 259 __release_region(&cxlds->dpa_res, base - skipped, 260 skipped); 261 return -EBUSY; 262 } 263 cxled->dpa_res = res; 264 cxled->skip = skipped; 265 266 if (resource_contains(&cxlds->pmem_res, res)) 267 cxled->mode = CXL_DECODER_PMEM; 268 else if (resource_contains(&cxlds->ram_res, res)) 269 cxled->mode = CXL_DECODER_RAM; 270 else { 271 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, 272 cxled->cxld.id, cxled->dpa_res); 273 cxled->mode = CXL_DECODER_MIXED; 274 } 275 276 success: 277 port->hdm_end++; 278 get_device(&cxled->cxld.dev); 279 return 0; 280 } 281 282 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 283 resource_size_t base, resource_size_t len, 284 resource_size_t skipped) 285 { 286 struct cxl_port *port = cxled_to_port(cxled); 287 int rc; 288 289 down_write(&cxl_dpa_rwsem); 290 rc = __cxl_dpa_reserve(cxled, base, len, skipped); 291 up_write(&cxl_dpa_rwsem); 292 293 if (rc) 294 return rc; 295 296 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 297 } 298 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); 299 300 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) 301 { 302 resource_size_t size = 0; 303 304 down_read(&cxl_dpa_rwsem); 305 if (cxled->dpa_res) 306 size = resource_size(cxled->dpa_res); 307 up_read(&cxl_dpa_rwsem); 308 309 return size; 310 } 311 312 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) 313 { 314 resource_size_t base = -1; 315 316 down_read(&cxl_dpa_rwsem); 317 if (cxled->dpa_res) 318 base = cxled->dpa_res->start; 319 up_read(&cxl_dpa_rwsem); 320 321 return base; 322 } 323 324 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) 325 { 326 struct cxl_port *port = cxled_to_port(cxled); 327 struct device *dev = &cxled->cxld.dev; 328 int rc; 329 330 down_write(&cxl_dpa_rwsem); 331 if (!cxled->dpa_res) { 332 rc = 0; 333 goto out; 334 } 335 if (cxled->cxld.region) { 336 dev_dbg(dev, "decoder assigned to: %s\n", 337 dev_name(&cxled->cxld.region->dev)); 338 rc = -EBUSY; 339 goto out; 340 } 341 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 342 dev_dbg(dev, "decoder enabled\n"); 343 rc = -EBUSY; 344 goto out; 345 } 346 if (cxled->cxld.id != port->hdm_end) { 347 dev_dbg(dev, "expected decoder%d.%d\n", port->id, 348 port->hdm_end); 349 rc = -EBUSY; 350 goto out; 351 } 352 devm_cxl_dpa_release(cxled); 353 rc = 0; 354 out: 355 up_write(&cxl_dpa_rwsem); 356 return rc; 357 } 358 359 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, 360 enum cxl_decoder_mode mode) 361 { 362 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 363 struct cxl_dev_state *cxlds = cxlmd->cxlds; 364 struct device *dev = &cxled->cxld.dev; 365 int rc; 366 367 switch (mode) { 368 case CXL_DECODER_RAM: 369 case CXL_DECODER_PMEM: 370 break; 371 default: 372 dev_dbg(dev, "unsupported mode: %d\n", mode); 373 return -EINVAL; 374 } 375 376 down_write(&cxl_dpa_rwsem); 377 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 378 rc = -EBUSY; 379 goto out; 380 } 381 382 /* 383 * Only allow modes that are supported by the current partition 384 * configuration 385 */ 386 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 387 dev_dbg(dev, "no available pmem capacity\n"); 388 rc = -ENXIO; 389 goto out; 390 } 391 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 392 dev_dbg(dev, "no available ram capacity\n"); 393 rc = -ENXIO; 394 goto out; 395 } 396 397 cxled->mode = mode; 398 rc = 0; 399 out: 400 up_write(&cxl_dpa_rwsem); 401 402 return rc; 403 } 404 405 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) 406 { 407 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 408 resource_size_t free_ram_start, free_pmem_start; 409 struct cxl_port *port = cxled_to_port(cxled); 410 struct cxl_dev_state *cxlds = cxlmd->cxlds; 411 struct device *dev = &cxled->cxld.dev; 412 resource_size_t start, avail, skip; 413 struct resource *p, *last; 414 int rc; 415 416 down_write(&cxl_dpa_rwsem); 417 if (cxled->cxld.region) { 418 dev_dbg(dev, "decoder attached to %s\n", 419 dev_name(&cxled->cxld.region->dev)); 420 rc = -EBUSY; 421 goto out; 422 } 423 424 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 425 dev_dbg(dev, "decoder enabled\n"); 426 rc = -EBUSY; 427 goto out; 428 } 429 430 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) 431 last = p; 432 if (last) 433 free_ram_start = last->end + 1; 434 else 435 free_ram_start = cxlds->ram_res.start; 436 437 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) 438 last = p; 439 if (last) 440 free_pmem_start = last->end + 1; 441 else 442 free_pmem_start = cxlds->pmem_res.start; 443 444 if (cxled->mode == CXL_DECODER_RAM) { 445 start = free_ram_start; 446 avail = cxlds->ram_res.end - start + 1; 447 skip = 0; 448 } else if (cxled->mode == CXL_DECODER_PMEM) { 449 resource_size_t skip_start, skip_end; 450 451 start = free_pmem_start; 452 avail = cxlds->pmem_res.end - start + 1; 453 skip_start = free_ram_start; 454 455 /* 456 * If some pmem is already allocated, then that allocation 457 * already handled the skip. 458 */ 459 if (cxlds->pmem_res.child && 460 skip_start == cxlds->pmem_res.child->start) 461 skip_end = skip_start - 1; 462 else 463 skip_end = start - 1; 464 skip = skip_end - skip_start + 1; 465 } else { 466 dev_dbg(dev, "mode not set\n"); 467 rc = -EINVAL; 468 goto out; 469 } 470 471 if (size > avail) { 472 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 473 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", 474 &avail); 475 rc = -ENOSPC; 476 goto out; 477 } 478 479 rc = __cxl_dpa_reserve(cxled, start, size, skip); 480 out: 481 up_write(&cxl_dpa_rwsem); 482 483 if (rc) 484 return rc; 485 486 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 487 } 488 489 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 490 { 491 u16 eig; 492 u8 eiw; 493 494 /* 495 * Input validation ensures these warns never fire, but otherwise 496 * suppress unititalized variable usage warnings. 497 */ 498 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), 499 "invalid interleave_ways: %d\n", cxld->interleave_ways)) 500 return; 501 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), 502 "invalid interleave_granularity: %d\n", 503 cxld->interleave_granularity)) 504 return; 505 506 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 507 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 508 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 509 } 510 511 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 512 { 513 u32p_replace_bits(ctrl, !!(cxld->target_type == 3), 514 CXL_HDM_DECODER0_CTRL_TYPE); 515 } 516 517 static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 518 { 519 struct cxl_dport **t = &cxlsd->target[0]; 520 int ways = cxlsd->cxld.interleave_ways; 521 522 if (dev_WARN_ONCE(&cxlsd->cxld.dev, 523 ways > 8 || ways > cxlsd->nr_targets, 524 "ways: %d overflows targets: %d\n", ways, 525 cxlsd->nr_targets)) 526 return -ENXIO; 527 528 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 529 if (ways > 1) 530 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 531 if (ways > 2) 532 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 533 if (ways > 3) 534 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 535 if (ways > 4) 536 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 537 if (ways > 5) 538 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 539 if (ways > 6) 540 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 541 if (ways > 7) 542 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 543 544 return 0; 545 } 546 547 /* 548 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 549 * committed or error within 10ms, but just be generous with 20ms to account for 550 * clock skew and other marginal behavior 551 */ 552 #define COMMIT_TIMEOUT_MS 20 553 static int cxld_await_commit(void __iomem *hdm, int id) 554 { 555 u32 ctrl; 556 int i; 557 558 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 559 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 560 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 561 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 562 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 563 return -EIO; 564 } 565 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 566 return 0; 567 fsleep(1000); 568 } 569 570 return -ETIMEDOUT; 571 } 572 573 static int cxl_decoder_commit(struct cxl_decoder *cxld) 574 { 575 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 576 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 577 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 578 int id = cxld->id, rc; 579 u64 base, size; 580 u32 ctrl; 581 582 if (cxld->flags & CXL_DECODER_F_ENABLE) 583 return 0; 584 585 if (port->commit_end + 1 != id) { 586 dev_dbg(&port->dev, 587 "%s: out of order commit, expected decoder%d.%d\n", 588 dev_name(&cxld->dev), port->id, port->commit_end + 1); 589 return -EBUSY; 590 } 591 592 down_read(&cxl_dpa_rwsem); 593 /* common decoder settings */ 594 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 595 cxld_set_interleave(cxld, &ctrl); 596 cxld_set_type(cxld, &ctrl); 597 base = cxld->hpa_range.start; 598 size = range_len(&cxld->hpa_range); 599 600 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 601 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 602 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 603 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 604 605 if (is_switch_decoder(&cxld->dev)) { 606 struct cxl_switch_decoder *cxlsd = 607 to_cxl_switch_decoder(&cxld->dev); 608 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 609 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 610 u64 targets; 611 612 rc = cxlsd_set_targets(cxlsd, &targets); 613 if (rc) { 614 dev_dbg(&port->dev, "%s: target configuration error\n", 615 dev_name(&cxld->dev)); 616 goto err; 617 } 618 619 writel(upper_32_bits(targets), tl_hi); 620 writel(lower_32_bits(targets), tl_lo); 621 } else { 622 struct cxl_endpoint_decoder *cxled = 623 to_cxl_endpoint_decoder(&cxld->dev); 624 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 625 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 626 627 writel(upper_32_bits(cxled->skip), sk_hi); 628 writel(lower_32_bits(cxled->skip), sk_lo); 629 } 630 631 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 632 up_read(&cxl_dpa_rwsem); 633 634 port->commit_end++; 635 rc = cxld_await_commit(hdm, cxld->id); 636 err: 637 if (rc) { 638 dev_dbg(&port->dev, "%s: error %d committing decoder\n", 639 dev_name(&cxld->dev), rc); 640 cxld->reset(cxld); 641 return rc; 642 } 643 cxld->flags |= CXL_DECODER_F_ENABLE; 644 645 return 0; 646 } 647 648 static int cxl_decoder_reset(struct cxl_decoder *cxld) 649 { 650 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 651 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 652 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 653 int id = cxld->id; 654 u32 ctrl; 655 656 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 657 return 0; 658 659 if (port->commit_end != id) { 660 dev_dbg(&port->dev, 661 "%s: out of order reset, expected decoder%d.%d\n", 662 dev_name(&cxld->dev), port->id, port->commit_end); 663 return -EBUSY; 664 } 665 666 down_read(&cxl_dpa_rwsem); 667 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 668 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 669 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 670 671 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 672 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 673 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 674 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 675 up_read(&cxl_dpa_rwsem); 676 677 port->commit_end--; 678 cxld->flags &= ~CXL_DECODER_F_ENABLE; 679 680 /* Userspace is now responsible for reconfiguring this decoder */ 681 if (is_endpoint_decoder(&cxld->dev)) { 682 struct cxl_endpoint_decoder *cxled; 683 684 cxled = to_cxl_endpoint_decoder(&cxld->dev); 685 cxled->state = CXL_DECODER_STATE_MANUAL; 686 } 687 688 return 0; 689 } 690 691 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 692 int *target_map, void __iomem *hdm, int which, 693 u64 *dpa_base) 694 { 695 struct cxl_endpoint_decoder *cxled = NULL; 696 u64 size, base, skip, dpa_size; 697 bool committed; 698 u32 remainder; 699 int i, rc; 700 u32 ctrl; 701 union { 702 u64 value; 703 unsigned char target_id[8]; 704 } target_list; 705 706 if (is_endpoint_decoder(&cxld->dev)) 707 cxled = to_cxl_endpoint_decoder(&cxld->dev); 708 709 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 710 base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 711 size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 712 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 713 cxld->commit = cxl_decoder_commit; 714 cxld->reset = cxl_decoder_reset; 715 716 if (!committed) 717 size = 0; 718 if (base == U64_MAX || size == U64_MAX) { 719 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", 720 port->id, cxld->id); 721 return -ENXIO; 722 } 723 724 cxld->hpa_range = (struct range) { 725 .start = base, 726 .end = base + size - 1, 727 }; 728 729 /* decoders are enabled if committed */ 730 if (committed) { 731 cxld->flags |= CXL_DECODER_F_ENABLE; 732 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 733 cxld->flags |= CXL_DECODER_F_LOCK; 734 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl)) 735 cxld->target_type = CXL_DECODER_EXPANDER; 736 else 737 cxld->target_type = CXL_DECODER_ACCELERATOR; 738 if (cxld->id != port->commit_end + 1) { 739 dev_warn(&port->dev, 740 "decoder%d.%d: Committed out of order\n", 741 port->id, cxld->id); 742 return -ENXIO; 743 } 744 port->commit_end = cxld->id; 745 } else { 746 /* unless / until type-2 drivers arrive, assume type-3 */ 747 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) { 748 ctrl |= CXL_HDM_DECODER0_CTRL_TYPE; 749 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 750 } 751 cxld->target_type = CXL_DECODER_EXPANDER; 752 } 753 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 754 &cxld->interleave_ways); 755 if (rc) { 756 dev_warn(&port->dev, 757 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", 758 port->id, cxld->id, ctrl); 759 return rc; 760 } 761 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 762 &cxld->interleave_granularity); 763 if (rc) 764 return rc; 765 766 if (!cxled) { 767 target_list.value = 768 ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 769 for (i = 0; i < cxld->interleave_ways; i++) 770 target_map[i] = target_list.target_id[i]; 771 772 return 0; 773 } 774 775 if (!committed) 776 return 0; 777 778 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); 779 if (remainder) { 780 dev_err(&port->dev, 781 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", 782 port->id, cxld->id, size, cxld->interleave_ways); 783 return -ENXIO; 784 } 785 skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 786 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 787 if (rc) { 788 dev_err(&port->dev, 789 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 790 port->id, cxld->id, *dpa_base, 791 *dpa_base + dpa_size + skip - 1, rc); 792 return rc; 793 } 794 *dpa_base += dpa_size + skip; 795 796 cxled->state = CXL_DECODER_STATE_AUTO; 797 798 return 0; 799 } 800 801 /** 802 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set 803 * @cxlhdm: Structure to populate with HDM capabilities 804 */ 805 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) 806 { 807 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 808 struct cxl_port *port = cxlhdm->port; 809 int i, committed; 810 u64 dpa_base = 0; 811 u32 ctrl; 812 813 /* 814 * Since the register resource was recently claimed via request_region() 815 * be careful about trusting the "not-committed" status until the commit 816 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0 817 * 8.2.5.12.20), but double it to be tolerant of any clock skew between 818 * host and target. 819 */ 820 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { 821 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 822 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) 823 committed++; 824 } 825 826 /* ensure that future checks of committed can be trusted */ 827 if (committed != cxlhdm->decoder_count) 828 msleep(20); 829 830 for (i = 0; i < cxlhdm->decoder_count; i++) { 831 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 832 int rc, target_count = cxlhdm->target_count; 833 struct cxl_decoder *cxld; 834 835 if (is_cxl_endpoint(port)) { 836 struct cxl_endpoint_decoder *cxled; 837 838 cxled = cxl_endpoint_decoder_alloc(port); 839 if (IS_ERR(cxled)) { 840 dev_warn(&port->dev, 841 "Failed to allocate decoder%d.%d\n", 842 port->id, i); 843 return PTR_ERR(cxled); 844 } 845 cxld = &cxled->cxld; 846 } else { 847 struct cxl_switch_decoder *cxlsd; 848 849 cxlsd = cxl_switch_decoder_alloc(port, target_count); 850 if (IS_ERR(cxlsd)) { 851 dev_warn(&port->dev, 852 "Failed to allocate decoder%d.%d\n", 853 port->id, i); 854 return PTR_ERR(cxlsd); 855 } 856 cxld = &cxlsd->cxld; 857 } 858 859 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base); 860 if (rc) { 861 dev_warn(&port->dev, 862 "Failed to initialize decoder%d.%d\n", 863 port->id, i); 864 put_device(&cxld->dev); 865 return rc; 866 } 867 rc = add_hdm_decoder(port, cxld, target_map); 868 if (rc) { 869 dev_warn(&port->dev, 870 "Failed to add decoder%d.%d\n", port->id, i); 871 return rc; 872 } 873 } 874 875 return 0; 876 } 877 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); 878