1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/seq_file.h> 4 #include <linux/device.h> 5 #include <linux/delay.h> 6 7 #include "cxlmem.h" 8 #include "core.h" 9 10 /** 11 * DOC: cxl core hdm 12 * 13 * Compute Express Link Host Managed Device Memory, starting with the 14 * CXL 2.0 specification, is managed by an array of HDM Decoder register 15 * instances per CXL port and per CXL endpoint. Define common helpers 16 * for enumerating these registers and capabilities. 17 */ 18 19 DECLARE_RWSEM(cxl_dpa_rwsem); 20 21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 22 int *target_map) 23 { 24 int rc; 25 26 rc = cxl_decoder_add_locked(cxld, target_map); 27 if (rc) { 28 put_device(&cxld->dev); 29 dev_err(&port->dev, "Failed to add decoder\n"); 30 return rc; 31 } 32 33 rc = cxl_decoder_autoremove(&port->dev, cxld); 34 if (rc) 35 return rc; 36 37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 38 39 return 0; 40 } 41 42 /* 43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) 44 * single ported host-bridges need not publish a decoder capability when a 45 * passthrough decode can be assumed, i.e. all transactions that the uport sees 46 * are claimed and passed to the single dport. Disable the range until the first 47 * CXL region is enumerated / activated. 48 */ 49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 50 { 51 struct cxl_switch_decoder *cxlsd; 52 struct cxl_dport *dport = NULL; 53 int single_port_map[1]; 54 unsigned long index; 55 56 cxlsd = cxl_switch_decoder_alloc(port, 1); 57 if (IS_ERR(cxlsd)) 58 return PTR_ERR(cxlsd); 59 60 device_lock_assert(&port->dev); 61 62 xa_for_each(&port->dports, index, dport) 63 break; 64 single_port_map[0] = dport->port_id; 65 66 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 67 } 68 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); 69 70 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 71 { 72 u32 hdm_cap; 73 74 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); 75 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); 76 cxlhdm->target_count = 77 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); 78 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap)) 79 cxlhdm->interleave_mask |= GENMASK(11, 8); 80 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) 81 cxlhdm->interleave_mask |= GENMASK(14, 12); 82 } 83 84 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) 85 { 86 struct cxl_hdm *cxlhdm; 87 void __iomem *hdm; 88 u32 ctrl; 89 int i; 90 91 if (!info) 92 return false; 93 94 cxlhdm = dev_get_drvdata(&info->port->dev); 95 hdm = cxlhdm->regs.hdm_decoder; 96 97 if (!hdm) 98 return true; 99 100 /* 101 * If HDM decoders are present and the driver is in control of 102 * Mem_Enable skip DVSEC based emulation 103 */ 104 if (!info->mem_enabled) 105 return false; 106 107 /* 108 * If any decoders are committed already, there should not be any 109 * emulated DVSEC decoders. 110 */ 111 for (i = 0; i < cxlhdm->decoder_count; i++) { 112 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 113 dev_dbg(&info->port->dev, 114 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n", 115 info->port->id, i, 116 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl), 117 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)), 118 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)), 119 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)), 120 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i))); 121 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 122 return false; 123 } 124 125 return true; 126 } 127 128 /** 129 * devm_cxl_setup_hdm - map HDM decoder component registers 130 * @port: cxl_port to map 131 * @info: cached DVSEC range register info 132 */ 133 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 134 struct cxl_endpoint_dvsec_info *info) 135 { 136 struct cxl_register_map *reg_map = &port->reg_map; 137 struct device *dev = &port->dev; 138 struct cxl_hdm *cxlhdm; 139 int rc; 140 141 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); 142 if (!cxlhdm) 143 return ERR_PTR(-ENOMEM); 144 cxlhdm->port = port; 145 dev_set_drvdata(dev, cxlhdm); 146 147 /* Memory devices can configure device HDM using DVSEC range regs. */ 148 if (reg_map->resource == CXL_RESOURCE_NONE) { 149 if (!info || !info->mem_enabled) { 150 dev_err(dev, "No component registers mapped\n"); 151 return ERR_PTR(-ENXIO); 152 } 153 154 cxlhdm->decoder_count = info->ranges; 155 return cxlhdm; 156 } 157 158 if (!reg_map->component_map.hdm_decoder.valid) { 159 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); 160 /* unique error code to indicate no HDM decoder capability */ 161 return ERR_PTR(-ENODEV); 162 } 163 164 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, 165 BIT(CXL_CM_CAP_CAP_ID_HDM)); 166 if (rc) { 167 dev_err(dev, "Failed to map HDM capability.\n"); 168 return ERR_PTR(rc); 169 } 170 171 parse_hdm_decoder_caps(cxlhdm); 172 if (cxlhdm->decoder_count == 0) { 173 dev_err(dev, "Spec violation. Caps invalid\n"); 174 return ERR_PTR(-ENXIO); 175 } 176 177 /* 178 * Now that the hdm capability is parsed, decide if range 179 * register emulation is needed and fixup cxlhdm accordingly. 180 */ 181 if (should_emulate_decoders(info)) { 182 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, 183 info->ranges > 1 ? "s" : ""); 184 cxlhdm->decoder_count = info->ranges; 185 } 186 187 return cxlhdm; 188 } 189 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); 190 191 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 192 { 193 unsigned long long start = r->start, end = r->end; 194 195 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, 196 r->name); 197 } 198 199 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) 200 { 201 struct resource *p1, *p2; 202 203 down_read(&cxl_dpa_rwsem); 204 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { 205 __cxl_dpa_debug(file, p1, 0); 206 for (p2 = p1->child; p2; p2 = p2->sibling) 207 __cxl_dpa_debug(file, p2, 1); 208 } 209 up_read(&cxl_dpa_rwsem); 210 } 211 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); 212 213 /* 214 * Must be called in a context that synchronizes against this decoder's 215 * port ->remove() callback (like an endpoint decoder sysfs attribute) 216 */ 217 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 218 { 219 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 220 struct cxl_port *port = cxled_to_port(cxled); 221 struct cxl_dev_state *cxlds = cxlmd->cxlds; 222 struct resource *res = cxled->dpa_res; 223 resource_size_t skip_start; 224 225 lockdep_assert_held_write(&cxl_dpa_rwsem); 226 227 /* save @skip_start, before @res is released */ 228 skip_start = res->start - cxled->skip; 229 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); 230 if (cxled->skip) 231 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); 232 cxled->skip = 0; 233 cxled->dpa_res = NULL; 234 put_device(&cxled->cxld.dev); 235 port->hdm_end--; 236 } 237 238 static void cxl_dpa_release(void *cxled) 239 { 240 down_write(&cxl_dpa_rwsem); 241 __cxl_dpa_release(cxled); 242 up_write(&cxl_dpa_rwsem); 243 } 244 245 /* 246 * Must be called from context that will not race port device 247 * unregistration, like decoder sysfs attribute methods 248 */ 249 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 250 { 251 struct cxl_port *port = cxled_to_port(cxled); 252 253 lockdep_assert_held_write(&cxl_dpa_rwsem); 254 devm_remove_action(&port->dev, cxl_dpa_release, cxled); 255 __cxl_dpa_release(cxled); 256 } 257 258 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 259 resource_size_t base, resource_size_t len, 260 resource_size_t skipped) 261 { 262 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 263 struct cxl_port *port = cxled_to_port(cxled); 264 struct cxl_dev_state *cxlds = cxlmd->cxlds; 265 struct device *dev = &port->dev; 266 struct resource *res; 267 268 lockdep_assert_held_write(&cxl_dpa_rwsem); 269 270 if (!len) { 271 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", 272 port->id, cxled->cxld.id); 273 return -EINVAL; 274 } 275 276 if (cxled->dpa_res) { 277 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", 278 port->id, cxled->cxld.id, cxled->dpa_res); 279 return -EBUSY; 280 } 281 282 if (port->hdm_end + 1 != cxled->cxld.id) { 283 /* 284 * Assumes alloc and commit order is always in hardware instance 285 * order per expectations from 8.2.5.12.20 Committing Decoder 286 * Programming that enforce decoder[m] committed before 287 * decoder[m+1] commit start. 288 */ 289 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, 290 cxled->cxld.id, port->id, port->hdm_end + 1); 291 return -EBUSY; 292 } 293 294 if (skipped) { 295 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, 296 dev_name(&cxled->cxld.dev), 0); 297 if (!res) { 298 dev_dbg(dev, 299 "decoder%d.%d: failed to reserve skipped space\n", 300 port->id, cxled->cxld.id); 301 return -EBUSY; 302 } 303 } 304 res = __request_region(&cxlds->dpa_res, base, len, 305 dev_name(&cxled->cxld.dev), 0); 306 if (!res) { 307 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", 308 port->id, cxled->cxld.id); 309 if (skipped) 310 __release_region(&cxlds->dpa_res, base - skipped, 311 skipped); 312 return -EBUSY; 313 } 314 cxled->dpa_res = res; 315 cxled->skip = skipped; 316 317 if (resource_contains(&cxlds->pmem_res, res)) 318 cxled->mode = CXL_DECODER_PMEM; 319 else if (resource_contains(&cxlds->ram_res, res)) 320 cxled->mode = CXL_DECODER_RAM; 321 else { 322 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, 323 cxled->cxld.id, cxled->dpa_res); 324 cxled->mode = CXL_DECODER_MIXED; 325 } 326 327 port->hdm_end++; 328 get_device(&cxled->cxld.dev); 329 return 0; 330 } 331 332 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 333 resource_size_t base, resource_size_t len, 334 resource_size_t skipped) 335 { 336 struct cxl_port *port = cxled_to_port(cxled); 337 int rc; 338 339 down_write(&cxl_dpa_rwsem); 340 rc = __cxl_dpa_reserve(cxled, base, len, skipped); 341 up_write(&cxl_dpa_rwsem); 342 343 if (rc) 344 return rc; 345 346 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 347 } 348 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); 349 350 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) 351 { 352 resource_size_t size = 0; 353 354 down_read(&cxl_dpa_rwsem); 355 if (cxled->dpa_res) 356 size = resource_size(cxled->dpa_res); 357 up_read(&cxl_dpa_rwsem); 358 359 return size; 360 } 361 362 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) 363 { 364 resource_size_t base = -1; 365 366 down_read(&cxl_dpa_rwsem); 367 if (cxled->dpa_res) 368 base = cxled->dpa_res->start; 369 up_read(&cxl_dpa_rwsem); 370 371 return base; 372 } 373 374 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) 375 { 376 struct cxl_port *port = cxled_to_port(cxled); 377 struct device *dev = &cxled->cxld.dev; 378 int rc; 379 380 down_write(&cxl_dpa_rwsem); 381 if (!cxled->dpa_res) { 382 rc = 0; 383 goto out; 384 } 385 if (cxled->cxld.region) { 386 dev_dbg(dev, "decoder assigned to: %s\n", 387 dev_name(&cxled->cxld.region->dev)); 388 rc = -EBUSY; 389 goto out; 390 } 391 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 392 dev_dbg(dev, "decoder enabled\n"); 393 rc = -EBUSY; 394 goto out; 395 } 396 if (cxled->cxld.id != port->hdm_end) { 397 dev_dbg(dev, "expected decoder%d.%d\n", port->id, 398 port->hdm_end); 399 rc = -EBUSY; 400 goto out; 401 } 402 devm_cxl_dpa_release(cxled); 403 rc = 0; 404 out: 405 up_write(&cxl_dpa_rwsem); 406 return rc; 407 } 408 409 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, 410 enum cxl_decoder_mode mode) 411 { 412 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 413 struct cxl_dev_state *cxlds = cxlmd->cxlds; 414 struct device *dev = &cxled->cxld.dev; 415 int rc; 416 417 switch (mode) { 418 case CXL_DECODER_RAM: 419 case CXL_DECODER_PMEM: 420 break; 421 default: 422 dev_dbg(dev, "unsupported mode: %d\n", mode); 423 return -EINVAL; 424 } 425 426 down_write(&cxl_dpa_rwsem); 427 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 428 rc = -EBUSY; 429 goto out; 430 } 431 432 /* 433 * Only allow modes that are supported by the current partition 434 * configuration 435 */ 436 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 437 dev_dbg(dev, "no available pmem capacity\n"); 438 rc = -ENXIO; 439 goto out; 440 } 441 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 442 dev_dbg(dev, "no available ram capacity\n"); 443 rc = -ENXIO; 444 goto out; 445 } 446 447 cxled->mode = mode; 448 rc = 0; 449 out: 450 up_write(&cxl_dpa_rwsem); 451 452 return rc; 453 } 454 455 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) 456 { 457 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 458 resource_size_t free_ram_start, free_pmem_start; 459 struct cxl_port *port = cxled_to_port(cxled); 460 struct cxl_dev_state *cxlds = cxlmd->cxlds; 461 struct device *dev = &cxled->cxld.dev; 462 resource_size_t start, avail, skip; 463 struct resource *p, *last; 464 int rc; 465 466 down_write(&cxl_dpa_rwsem); 467 if (cxled->cxld.region) { 468 dev_dbg(dev, "decoder attached to %s\n", 469 dev_name(&cxled->cxld.region->dev)); 470 rc = -EBUSY; 471 goto out; 472 } 473 474 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 475 dev_dbg(dev, "decoder enabled\n"); 476 rc = -EBUSY; 477 goto out; 478 } 479 480 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) 481 last = p; 482 if (last) 483 free_ram_start = last->end + 1; 484 else 485 free_ram_start = cxlds->ram_res.start; 486 487 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) 488 last = p; 489 if (last) 490 free_pmem_start = last->end + 1; 491 else 492 free_pmem_start = cxlds->pmem_res.start; 493 494 if (cxled->mode == CXL_DECODER_RAM) { 495 start = free_ram_start; 496 avail = cxlds->ram_res.end - start + 1; 497 skip = 0; 498 } else if (cxled->mode == CXL_DECODER_PMEM) { 499 resource_size_t skip_start, skip_end; 500 501 start = free_pmem_start; 502 avail = cxlds->pmem_res.end - start + 1; 503 skip_start = free_ram_start; 504 505 /* 506 * If some pmem is already allocated, then that allocation 507 * already handled the skip. 508 */ 509 if (cxlds->pmem_res.child && 510 skip_start == cxlds->pmem_res.child->start) 511 skip_end = skip_start - 1; 512 else 513 skip_end = start - 1; 514 skip = skip_end - skip_start + 1; 515 } else { 516 dev_dbg(dev, "mode not set\n"); 517 rc = -EINVAL; 518 goto out; 519 } 520 521 if (size > avail) { 522 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 523 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", 524 &avail); 525 rc = -ENOSPC; 526 goto out; 527 } 528 529 rc = __cxl_dpa_reserve(cxled, start, size, skip); 530 out: 531 up_write(&cxl_dpa_rwsem); 532 533 if (rc) 534 return rc; 535 536 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 537 } 538 539 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 540 { 541 u16 eig; 542 u8 eiw; 543 544 /* 545 * Input validation ensures these warns never fire, but otherwise 546 * suppress unititalized variable usage warnings. 547 */ 548 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), 549 "invalid interleave_ways: %d\n", cxld->interleave_ways)) 550 return; 551 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), 552 "invalid interleave_granularity: %d\n", 553 cxld->interleave_granularity)) 554 return; 555 556 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 557 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 558 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 559 } 560 561 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 562 { 563 u32p_replace_bits(ctrl, 564 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), 565 CXL_HDM_DECODER0_CTRL_HOSTONLY); 566 } 567 568 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 569 { 570 struct cxl_dport **t = &cxlsd->target[0]; 571 int ways = cxlsd->cxld.interleave_ways; 572 573 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 574 if (ways > 1) 575 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 576 if (ways > 2) 577 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 578 if (ways > 3) 579 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 580 if (ways > 4) 581 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 582 if (ways > 5) 583 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 584 if (ways > 6) 585 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 586 if (ways > 7) 587 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 588 } 589 590 /* 591 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 592 * committed or error within 10ms, but just be generous with 20ms to account for 593 * clock skew and other marginal behavior 594 */ 595 #define COMMIT_TIMEOUT_MS 20 596 static int cxld_await_commit(void __iomem *hdm, int id) 597 { 598 u32 ctrl; 599 int i; 600 601 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 602 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 603 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 604 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 605 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 606 return -EIO; 607 } 608 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 609 return 0; 610 fsleep(1000); 611 } 612 613 return -ETIMEDOUT; 614 } 615 616 static int cxl_decoder_commit(struct cxl_decoder *cxld) 617 { 618 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 619 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 620 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 621 int id = cxld->id, rc; 622 u64 base, size; 623 u32 ctrl; 624 625 if (cxld->flags & CXL_DECODER_F_ENABLE) 626 return 0; 627 628 if (cxl_num_decoders_committed(port) != id) { 629 dev_dbg(&port->dev, 630 "%s: out of order commit, expected decoder%d.%d\n", 631 dev_name(&cxld->dev), port->id, 632 cxl_num_decoders_committed(port)); 633 return -EBUSY; 634 } 635 636 /* 637 * For endpoint decoders hosted on CXL memory devices that 638 * support the sanitize operation, make sure sanitize is not in-flight. 639 */ 640 if (is_endpoint_decoder(&cxld->dev)) { 641 struct cxl_endpoint_decoder *cxled = 642 to_cxl_endpoint_decoder(&cxld->dev); 643 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 644 struct cxl_memdev_state *mds = 645 to_cxl_memdev_state(cxlmd->cxlds); 646 647 if (mds && mds->security.sanitize_active) { 648 dev_dbg(&cxlmd->dev, 649 "attempted to commit %s during sanitize\n", 650 dev_name(&cxld->dev)); 651 return -EBUSY; 652 } 653 } 654 655 down_read(&cxl_dpa_rwsem); 656 /* common decoder settings */ 657 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 658 cxld_set_interleave(cxld, &ctrl); 659 cxld_set_type(cxld, &ctrl); 660 base = cxld->hpa_range.start; 661 size = range_len(&cxld->hpa_range); 662 663 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 664 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 665 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 666 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 667 668 if (is_switch_decoder(&cxld->dev)) { 669 struct cxl_switch_decoder *cxlsd = 670 to_cxl_switch_decoder(&cxld->dev); 671 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 672 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 673 u64 targets; 674 675 cxlsd_set_targets(cxlsd, &targets); 676 writel(upper_32_bits(targets), tl_hi); 677 writel(lower_32_bits(targets), tl_lo); 678 } else { 679 struct cxl_endpoint_decoder *cxled = 680 to_cxl_endpoint_decoder(&cxld->dev); 681 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 682 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 683 684 writel(upper_32_bits(cxled->skip), sk_hi); 685 writel(lower_32_bits(cxled->skip), sk_lo); 686 } 687 688 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 689 up_read(&cxl_dpa_rwsem); 690 691 port->commit_end++; 692 rc = cxld_await_commit(hdm, cxld->id); 693 if (rc) { 694 dev_dbg(&port->dev, "%s: error %d committing decoder\n", 695 dev_name(&cxld->dev), rc); 696 cxld->reset(cxld); 697 return rc; 698 } 699 cxld->flags |= CXL_DECODER_F_ENABLE; 700 701 return 0; 702 } 703 704 static int cxl_decoder_reset(struct cxl_decoder *cxld) 705 { 706 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 707 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 708 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 709 int id = cxld->id; 710 u32 ctrl; 711 712 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 713 return 0; 714 715 if (port->commit_end != id) { 716 dev_dbg(&port->dev, 717 "%s: out of order reset, expected decoder%d.%d\n", 718 dev_name(&cxld->dev), port->id, port->commit_end); 719 return -EBUSY; 720 } 721 722 down_read(&cxl_dpa_rwsem); 723 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 724 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 725 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 726 727 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 728 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 729 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 730 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 731 up_read(&cxl_dpa_rwsem); 732 733 port->commit_end--; 734 cxld->flags &= ~CXL_DECODER_F_ENABLE; 735 736 /* Userspace is now responsible for reconfiguring this decoder */ 737 if (is_endpoint_decoder(&cxld->dev)) { 738 struct cxl_endpoint_decoder *cxled; 739 740 cxled = to_cxl_endpoint_decoder(&cxld->dev); 741 cxled->state = CXL_DECODER_STATE_MANUAL; 742 } 743 744 return 0; 745 } 746 747 static int cxl_setup_hdm_decoder_from_dvsec( 748 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, 749 int which, struct cxl_endpoint_dvsec_info *info) 750 { 751 struct cxl_endpoint_decoder *cxled; 752 u64 len; 753 int rc; 754 755 if (!is_cxl_endpoint(port)) 756 return -EOPNOTSUPP; 757 758 cxled = to_cxl_endpoint_decoder(&cxld->dev); 759 len = range_len(&info->dvsec_range[which]); 760 if (!len) 761 return -ENOENT; 762 763 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 764 cxld->commit = NULL; 765 cxld->reset = NULL; 766 cxld->hpa_range = info->dvsec_range[which]; 767 768 /* 769 * Set the emulated decoder as locked pending additional support to 770 * change the range registers at run time. 771 */ 772 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; 773 port->commit_end = cxld->id; 774 775 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0); 776 if (rc) { 777 dev_err(&port->dev, 778 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 779 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); 780 return rc; 781 } 782 *dpa_base += len; 783 cxled->state = CXL_DECODER_STATE_AUTO; 784 785 return 0; 786 } 787 788 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 789 int *target_map, void __iomem *hdm, int which, 790 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 791 { 792 struct cxl_endpoint_decoder *cxled = NULL; 793 u64 size, base, skip, dpa_size, lo, hi; 794 bool committed; 795 u32 remainder; 796 int i, rc; 797 u32 ctrl; 798 union { 799 u64 value; 800 unsigned char target_id[8]; 801 } target_list; 802 803 if (should_emulate_decoders(info)) 804 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, 805 which, info); 806 807 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 808 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 809 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which)); 810 base = (hi << 32) + lo; 811 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 812 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); 813 size = (hi << 32) + lo; 814 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 815 cxld->commit = cxl_decoder_commit; 816 cxld->reset = cxl_decoder_reset; 817 818 if (!committed) 819 size = 0; 820 if (base == U64_MAX || size == U64_MAX) { 821 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", 822 port->id, cxld->id); 823 return -ENXIO; 824 } 825 826 if (info) 827 cxled = to_cxl_endpoint_decoder(&cxld->dev); 828 cxld->hpa_range = (struct range) { 829 .start = base, 830 .end = base + size - 1, 831 }; 832 833 /* decoders are enabled if committed */ 834 if (committed) { 835 cxld->flags |= CXL_DECODER_F_ENABLE; 836 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 837 cxld->flags |= CXL_DECODER_F_LOCK; 838 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl)) 839 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 840 else 841 cxld->target_type = CXL_DECODER_DEVMEM; 842 if (cxld->id != cxl_num_decoders_committed(port)) { 843 dev_warn(&port->dev, 844 "decoder%d.%d: Committed out of order\n", 845 port->id, cxld->id); 846 return -ENXIO; 847 } 848 849 if (size == 0) { 850 dev_warn(&port->dev, 851 "decoder%d.%d: Committed with zero size\n", 852 port->id, cxld->id); 853 return -ENXIO; 854 } 855 port->commit_end = cxld->id; 856 } else { 857 if (cxled) { 858 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 859 struct cxl_dev_state *cxlds = cxlmd->cxlds; 860 861 /* 862 * Default by devtype until a device arrives that needs 863 * more precision. 864 */ 865 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) 866 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 867 else 868 cxld->target_type = CXL_DECODER_DEVMEM; 869 } else { 870 /* To be overridden by region type at commit time */ 871 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 872 } 873 874 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) && 875 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { 876 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY; 877 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 878 } 879 } 880 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 881 &cxld->interleave_ways); 882 if (rc) { 883 dev_warn(&port->dev, 884 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", 885 port->id, cxld->id, ctrl); 886 return rc; 887 } 888 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 889 &cxld->interleave_granularity); 890 if (rc) 891 return rc; 892 893 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", 894 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, 895 cxld->interleave_ways, cxld->interleave_granularity); 896 897 if (!cxled) { 898 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 899 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); 900 target_list.value = (hi << 32) + lo; 901 for (i = 0; i < cxld->interleave_ways; i++) 902 target_map[i] = target_list.target_id[i]; 903 904 return 0; 905 } 906 907 if (!committed) 908 return 0; 909 910 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); 911 if (remainder) { 912 dev_err(&port->dev, 913 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", 914 port->id, cxld->id, size, cxld->interleave_ways); 915 return -ENXIO; 916 } 917 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 918 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); 919 skip = (hi << 32) + lo; 920 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 921 if (rc) { 922 dev_err(&port->dev, 923 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 924 port->id, cxld->id, *dpa_base, 925 *dpa_base + dpa_size + skip - 1, rc); 926 return rc; 927 } 928 *dpa_base += dpa_size + skip; 929 930 cxled->state = CXL_DECODER_STATE_AUTO; 931 932 return 0; 933 } 934 935 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm) 936 { 937 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 938 int committed, i; 939 u32 ctrl; 940 941 if (!hdm) 942 return; 943 944 /* 945 * Since the register resource was recently claimed via request_region() 946 * be careful about trusting the "not-committed" status until the commit 947 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0 948 * 8.2.5.12.20), but double it to be tolerant of any clock skew between 949 * host and target. 950 */ 951 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { 952 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 953 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) 954 committed++; 955 } 956 957 /* ensure that future checks of committed can be trusted */ 958 if (committed != cxlhdm->decoder_count) 959 msleep(20); 960 } 961 962 /** 963 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set 964 * @cxlhdm: Structure to populate with HDM capabilities 965 * @info: cached DVSEC range register info 966 */ 967 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 968 struct cxl_endpoint_dvsec_info *info) 969 { 970 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 971 struct cxl_port *port = cxlhdm->port; 972 int i; 973 u64 dpa_base = 0; 974 975 cxl_settle_decoders(cxlhdm); 976 977 for (i = 0; i < cxlhdm->decoder_count; i++) { 978 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 979 int rc, target_count = cxlhdm->target_count; 980 struct cxl_decoder *cxld; 981 982 if (is_cxl_endpoint(port)) { 983 struct cxl_endpoint_decoder *cxled; 984 985 cxled = cxl_endpoint_decoder_alloc(port); 986 if (IS_ERR(cxled)) { 987 dev_warn(&port->dev, 988 "Failed to allocate decoder%d.%d\n", 989 port->id, i); 990 return PTR_ERR(cxled); 991 } 992 cxld = &cxled->cxld; 993 } else { 994 struct cxl_switch_decoder *cxlsd; 995 996 cxlsd = cxl_switch_decoder_alloc(port, target_count); 997 if (IS_ERR(cxlsd)) { 998 dev_warn(&port->dev, 999 "Failed to allocate decoder%d.%d\n", 1000 port->id, i); 1001 return PTR_ERR(cxlsd); 1002 } 1003 cxld = &cxlsd->cxld; 1004 } 1005 1006 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, 1007 &dpa_base, info); 1008 if (rc) { 1009 dev_warn(&port->dev, 1010 "Failed to initialize decoder%d.%d\n", 1011 port->id, i); 1012 put_device(&cxld->dev); 1013 return rc; 1014 } 1015 rc = add_hdm_decoder(port, cxld, target_map); 1016 if (rc) { 1017 dev_warn(&port->dev, 1018 "Failed to add decoder%d.%d\n", port->id, i); 1019 return rc; 1020 } 1021 } 1022 1023 return 0; 1024 } 1025 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); 1026