1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/seq_file.h> 4 #include <linux/device.h> 5 #include <linux/delay.h> 6 7 #include "cxlmem.h" 8 #include "core.h" 9 10 /** 11 * DOC: cxl core hdm 12 * 13 * Compute Express Link Host Managed Device Memory, starting with the 14 * CXL 2.0 specification, is managed by an array of HDM Decoder register 15 * instances per CXL port and per CXL endpoint. Define common helpers 16 * for enumerating these registers and capabilities. 17 */ 18 19 DECLARE_RWSEM(cxl_dpa_rwsem); 20 21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 22 int *target_map) 23 { 24 int rc; 25 26 rc = cxl_decoder_add_locked(cxld, target_map); 27 if (rc) { 28 put_device(&cxld->dev); 29 dev_err(&port->dev, "Failed to add decoder\n"); 30 return rc; 31 } 32 33 rc = cxl_decoder_autoremove(&port->dev, cxld); 34 if (rc) 35 return rc; 36 37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); 38 39 return 0; 40 } 41 42 /* 43 * Per the CXL specification (8.2.5.12 CXL HDM Decoder Capability Structure) 44 * single ported host-bridges need not publish a decoder capability when a 45 * passthrough decode can be assumed, i.e. all transactions that the uport sees 46 * are claimed and passed to the single dport. Disable the range until the first 47 * CXL region is enumerated / activated. 48 */ 49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) 50 { 51 struct cxl_switch_decoder *cxlsd; 52 struct cxl_dport *dport = NULL; 53 int single_port_map[1]; 54 unsigned long index; 55 56 cxlsd = cxl_switch_decoder_alloc(port, 1); 57 if (IS_ERR(cxlsd)) 58 return PTR_ERR(cxlsd); 59 60 device_lock_assert(&port->dev); 61 62 xa_for_each(&port->dports, index, dport) 63 break; 64 single_port_map[0] = dport->port_id; 65 66 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); 67 } 68 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); 69 70 static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) 71 { 72 u32 hdm_cap; 73 74 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); 75 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); 76 cxlhdm->target_count = 77 FIELD_GET(CXL_HDM_DECODER_TARGET_COUNT_MASK, hdm_cap); 78 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_11_8, hdm_cap)) 79 cxlhdm->interleave_mask |= GENMASK(11, 8); 80 if (FIELD_GET(CXL_HDM_DECODER_INTERLEAVE_14_12, hdm_cap)) 81 cxlhdm->interleave_mask |= GENMASK(14, 12); 82 } 83 84 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) 85 { 86 struct cxl_hdm *cxlhdm; 87 void __iomem *hdm; 88 u32 ctrl; 89 int i; 90 91 if (!info) 92 return false; 93 94 cxlhdm = dev_get_drvdata(&info->port->dev); 95 hdm = cxlhdm->regs.hdm_decoder; 96 97 if (!hdm) 98 return true; 99 100 /* 101 * If HDM decoders are present and the driver is in control of 102 * Mem_Enable skip DVSEC based emulation 103 */ 104 if (!info->mem_enabled) 105 return false; 106 107 /* 108 * If any decoders are committed already, there should not be any 109 * emulated DVSEC decoders. 110 */ 111 for (i = 0; i < cxlhdm->decoder_count; i++) { 112 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 113 dev_dbg(&info->port->dev, 114 "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n", 115 info->port->id, i, 116 FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl), 117 readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)), 118 readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)), 119 readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)), 120 readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i))); 121 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 122 return false; 123 } 124 125 return true; 126 } 127 128 /** 129 * devm_cxl_setup_hdm - map HDM decoder component registers 130 * @port: cxl_port to map 131 * @info: cached DVSEC range register info 132 */ 133 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, 134 struct cxl_endpoint_dvsec_info *info) 135 { 136 struct cxl_register_map *reg_map = &port->reg_map; 137 struct device *dev = &port->dev; 138 struct cxl_hdm *cxlhdm; 139 int rc; 140 141 cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); 142 if (!cxlhdm) 143 return ERR_PTR(-ENOMEM); 144 cxlhdm->port = port; 145 dev_set_drvdata(dev, cxlhdm); 146 147 /* Memory devices can configure device HDM using DVSEC range regs. */ 148 if (reg_map->resource == CXL_RESOURCE_NONE) { 149 if (!info || !info->mem_enabled) { 150 dev_err(dev, "No component registers mapped\n"); 151 return ERR_PTR(-ENXIO); 152 } 153 154 cxlhdm->decoder_count = info->ranges; 155 return cxlhdm; 156 } 157 158 if (!reg_map->component_map.hdm_decoder.valid) { 159 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); 160 /* unique error code to indicate no HDM decoder capability */ 161 return ERR_PTR(-ENODEV); 162 } 163 164 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, 165 BIT(CXL_CM_CAP_CAP_ID_HDM)); 166 if (rc) { 167 dev_err(dev, "Failed to map HDM capability.\n"); 168 return ERR_PTR(rc); 169 } 170 171 parse_hdm_decoder_caps(cxlhdm); 172 if (cxlhdm->decoder_count == 0) { 173 dev_err(dev, "Spec violation. Caps invalid\n"); 174 return ERR_PTR(-ENXIO); 175 } 176 177 /* 178 * Now that the hdm capability is parsed, decide if range 179 * register emulation is needed and fixup cxlhdm accordingly. 180 */ 181 if (should_emulate_decoders(info)) { 182 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, 183 info->ranges > 1 ? "s" : ""); 184 cxlhdm->decoder_count = info->ranges; 185 } 186 187 return cxlhdm; 188 } 189 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); 190 191 static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) 192 { 193 unsigned long long start = r->start, end = r->end; 194 195 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, 196 r->name); 197 } 198 199 void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) 200 { 201 struct resource *p1, *p2; 202 203 down_read(&cxl_dpa_rwsem); 204 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { 205 __cxl_dpa_debug(file, p1, 0); 206 for (p2 = p1->child; p2; p2 = p2->sibling) 207 __cxl_dpa_debug(file, p2, 1); 208 } 209 up_read(&cxl_dpa_rwsem); 210 } 211 EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); 212 213 /* 214 * Must be called in a context that synchronizes against this decoder's 215 * port ->remove() callback (like an endpoint decoder sysfs attribute) 216 */ 217 static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 218 { 219 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 220 struct cxl_port *port = cxled_to_port(cxled); 221 struct cxl_dev_state *cxlds = cxlmd->cxlds; 222 struct resource *res = cxled->dpa_res; 223 resource_size_t skip_start; 224 225 lockdep_assert_held_write(&cxl_dpa_rwsem); 226 227 /* save @skip_start, before @res is released */ 228 skip_start = res->start - cxled->skip; 229 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); 230 if (cxled->skip) 231 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); 232 cxled->skip = 0; 233 cxled->dpa_res = NULL; 234 put_device(&cxled->cxld.dev); 235 port->hdm_end--; 236 } 237 238 static void cxl_dpa_release(void *cxled) 239 { 240 down_write(&cxl_dpa_rwsem); 241 __cxl_dpa_release(cxled); 242 up_write(&cxl_dpa_rwsem); 243 } 244 245 /* 246 * Must be called from context that will not race port device 247 * unregistration, like decoder sysfs attribute methods 248 */ 249 static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) 250 { 251 struct cxl_port *port = cxled_to_port(cxled); 252 253 lockdep_assert_held_write(&cxl_dpa_rwsem); 254 devm_remove_action(&port->dev, cxl_dpa_release, cxled); 255 __cxl_dpa_release(cxled); 256 } 257 258 static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 259 resource_size_t base, resource_size_t len, 260 resource_size_t skipped) 261 { 262 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 263 struct cxl_port *port = cxled_to_port(cxled); 264 struct cxl_dev_state *cxlds = cxlmd->cxlds; 265 struct device *dev = &port->dev; 266 struct resource *res; 267 268 lockdep_assert_held_write(&cxl_dpa_rwsem); 269 270 if (!len) { 271 dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", 272 port->id, cxled->cxld.id); 273 return -EINVAL; 274 } 275 276 if (cxled->dpa_res) { 277 dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", 278 port->id, cxled->cxld.id, cxled->dpa_res); 279 return -EBUSY; 280 } 281 282 if (port->hdm_end + 1 != cxled->cxld.id) { 283 /* 284 * Assumes alloc and commit order is always in hardware instance 285 * order per expectations from 8.2.5.12.20 Committing Decoder 286 * Programming that enforce decoder[m] committed before 287 * decoder[m+1] commit start. 288 */ 289 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, 290 cxled->cxld.id, port->id, port->hdm_end + 1); 291 return -EBUSY; 292 } 293 294 if (skipped) { 295 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, 296 dev_name(&cxled->cxld.dev), 0); 297 if (!res) { 298 dev_dbg(dev, 299 "decoder%d.%d: failed to reserve skipped space\n", 300 port->id, cxled->cxld.id); 301 return -EBUSY; 302 } 303 } 304 res = __request_region(&cxlds->dpa_res, base, len, 305 dev_name(&cxled->cxld.dev), 0); 306 if (!res) { 307 dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", 308 port->id, cxled->cxld.id); 309 if (skipped) 310 __release_region(&cxlds->dpa_res, base - skipped, 311 skipped); 312 return -EBUSY; 313 } 314 cxled->dpa_res = res; 315 cxled->skip = skipped; 316 317 if (resource_contains(&cxlds->pmem_res, res)) 318 cxled->mode = CXL_DECODER_PMEM; 319 else if (resource_contains(&cxlds->ram_res, res)) 320 cxled->mode = CXL_DECODER_RAM; 321 else { 322 dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", 323 port->id, cxled->cxld.id, cxled->dpa_res); 324 cxled->mode = CXL_DECODER_MIXED; 325 } 326 327 port->hdm_end++; 328 get_device(&cxled->cxld.dev); 329 return 0; 330 } 331 332 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, 333 resource_size_t base, resource_size_t len, 334 resource_size_t skipped) 335 { 336 struct cxl_port *port = cxled_to_port(cxled); 337 int rc; 338 339 down_write(&cxl_dpa_rwsem); 340 rc = __cxl_dpa_reserve(cxled, base, len, skipped); 341 up_write(&cxl_dpa_rwsem); 342 343 if (rc) 344 return rc; 345 346 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 347 } 348 EXPORT_SYMBOL_NS_GPL(devm_cxl_dpa_reserve, CXL); 349 350 resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) 351 { 352 resource_size_t size = 0; 353 354 down_read(&cxl_dpa_rwsem); 355 if (cxled->dpa_res) 356 size = resource_size(cxled->dpa_res); 357 up_read(&cxl_dpa_rwsem); 358 359 return size; 360 } 361 362 resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) 363 { 364 resource_size_t base = -1; 365 366 lockdep_assert_held(&cxl_dpa_rwsem); 367 if (cxled->dpa_res) 368 base = cxled->dpa_res->start; 369 370 return base; 371 } 372 373 int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) 374 { 375 struct cxl_port *port = cxled_to_port(cxled); 376 struct device *dev = &cxled->cxld.dev; 377 int rc; 378 379 down_write(&cxl_dpa_rwsem); 380 if (!cxled->dpa_res) { 381 rc = 0; 382 goto out; 383 } 384 if (cxled->cxld.region) { 385 dev_dbg(dev, "decoder assigned to: %s\n", 386 dev_name(&cxled->cxld.region->dev)); 387 rc = -EBUSY; 388 goto out; 389 } 390 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 391 dev_dbg(dev, "decoder enabled\n"); 392 rc = -EBUSY; 393 goto out; 394 } 395 if (cxled->cxld.id != port->hdm_end) { 396 dev_dbg(dev, "expected decoder%d.%d\n", port->id, 397 port->hdm_end); 398 rc = -EBUSY; 399 goto out; 400 } 401 devm_cxl_dpa_release(cxled); 402 rc = 0; 403 out: 404 up_write(&cxl_dpa_rwsem); 405 return rc; 406 } 407 408 int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, 409 enum cxl_decoder_mode mode) 410 { 411 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 412 struct cxl_dev_state *cxlds = cxlmd->cxlds; 413 struct device *dev = &cxled->cxld.dev; 414 int rc; 415 416 switch (mode) { 417 case CXL_DECODER_RAM: 418 case CXL_DECODER_PMEM: 419 break; 420 default: 421 dev_dbg(dev, "unsupported mode: %d\n", mode); 422 return -EINVAL; 423 } 424 425 down_write(&cxl_dpa_rwsem); 426 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 427 rc = -EBUSY; 428 goto out; 429 } 430 431 /* 432 * Only allow modes that are supported by the current partition 433 * configuration 434 */ 435 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { 436 dev_dbg(dev, "no available pmem capacity\n"); 437 rc = -ENXIO; 438 goto out; 439 } 440 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { 441 dev_dbg(dev, "no available ram capacity\n"); 442 rc = -ENXIO; 443 goto out; 444 } 445 446 cxled->mode = mode; 447 rc = 0; 448 out: 449 up_write(&cxl_dpa_rwsem); 450 451 return rc; 452 } 453 454 int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) 455 { 456 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 457 resource_size_t free_ram_start, free_pmem_start; 458 struct cxl_port *port = cxled_to_port(cxled); 459 struct cxl_dev_state *cxlds = cxlmd->cxlds; 460 struct device *dev = &cxled->cxld.dev; 461 resource_size_t start, avail, skip; 462 struct resource *p, *last; 463 int rc; 464 465 down_write(&cxl_dpa_rwsem); 466 if (cxled->cxld.region) { 467 dev_dbg(dev, "decoder attached to %s\n", 468 dev_name(&cxled->cxld.region->dev)); 469 rc = -EBUSY; 470 goto out; 471 } 472 473 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { 474 dev_dbg(dev, "decoder enabled\n"); 475 rc = -EBUSY; 476 goto out; 477 } 478 479 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) 480 last = p; 481 if (last) 482 free_ram_start = last->end + 1; 483 else 484 free_ram_start = cxlds->ram_res.start; 485 486 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) 487 last = p; 488 if (last) 489 free_pmem_start = last->end + 1; 490 else 491 free_pmem_start = cxlds->pmem_res.start; 492 493 if (cxled->mode == CXL_DECODER_RAM) { 494 start = free_ram_start; 495 avail = cxlds->ram_res.end - start + 1; 496 skip = 0; 497 } else if (cxled->mode == CXL_DECODER_PMEM) { 498 resource_size_t skip_start, skip_end; 499 500 start = free_pmem_start; 501 avail = cxlds->pmem_res.end - start + 1; 502 skip_start = free_ram_start; 503 504 /* 505 * If some pmem is already allocated, then that allocation 506 * already handled the skip. 507 */ 508 if (cxlds->pmem_res.child && 509 skip_start == cxlds->pmem_res.child->start) 510 skip_end = skip_start - 1; 511 else 512 skip_end = start - 1; 513 skip = skip_end - skip_start + 1; 514 } else { 515 dev_dbg(dev, "mode not set\n"); 516 rc = -EINVAL; 517 goto out; 518 } 519 520 if (size > avail) { 521 dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, 522 cxl_decoder_mode_name(cxled->mode), &avail); 523 rc = -ENOSPC; 524 goto out; 525 } 526 527 rc = __cxl_dpa_reserve(cxled, start, size, skip); 528 out: 529 up_write(&cxl_dpa_rwsem); 530 531 if (rc) 532 return rc; 533 534 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); 535 } 536 537 static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) 538 { 539 u16 eig; 540 u8 eiw; 541 542 /* 543 * Input validation ensures these warns never fire, but otherwise 544 * suppress unititalized variable usage warnings. 545 */ 546 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), 547 "invalid interleave_ways: %d\n", cxld->interleave_ways)) 548 return; 549 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), 550 "invalid interleave_granularity: %d\n", 551 cxld->interleave_granularity)) 552 return; 553 554 u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); 555 u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); 556 *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; 557 } 558 559 static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) 560 { 561 u32p_replace_bits(ctrl, 562 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), 563 CXL_HDM_DECODER0_CTRL_HOSTONLY); 564 } 565 566 static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) 567 { 568 struct cxl_dport **t = &cxlsd->target[0]; 569 int ways = cxlsd->cxld.interleave_ways; 570 571 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); 572 if (ways > 1) 573 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); 574 if (ways > 2) 575 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); 576 if (ways > 3) 577 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); 578 if (ways > 4) 579 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); 580 if (ways > 5) 581 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); 582 if (ways > 6) 583 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); 584 if (ways > 7) 585 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); 586 } 587 588 /* 589 * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set 590 * committed or error within 10ms, but just be generous with 20ms to account for 591 * clock skew and other marginal behavior 592 */ 593 #define COMMIT_TIMEOUT_MS 20 594 static int cxld_await_commit(void __iomem *hdm, int id) 595 { 596 u32 ctrl; 597 int i; 598 599 for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { 600 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 601 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { 602 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 603 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 604 return -EIO; 605 } 606 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) 607 return 0; 608 fsleep(1000); 609 } 610 611 return -ETIMEDOUT; 612 } 613 614 static int cxl_decoder_commit(struct cxl_decoder *cxld) 615 { 616 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 617 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 618 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 619 int id = cxld->id, rc; 620 u64 base, size; 621 u32 ctrl; 622 623 if (cxld->flags & CXL_DECODER_F_ENABLE) 624 return 0; 625 626 if (cxl_num_decoders_committed(port) != id) { 627 dev_dbg(&port->dev, 628 "%s: out of order commit, expected decoder%d.%d\n", 629 dev_name(&cxld->dev), port->id, 630 cxl_num_decoders_committed(port)); 631 return -EBUSY; 632 } 633 634 /* 635 * For endpoint decoders hosted on CXL memory devices that 636 * support the sanitize operation, make sure sanitize is not in-flight. 637 */ 638 if (is_endpoint_decoder(&cxld->dev)) { 639 struct cxl_endpoint_decoder *cxled = 640 to_cxl_endpoint_decoder(&cxld->dev); 641 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 642 struct cxl_memdev_state *mds = 643 to_cxl_memdev_state(cxlmd->cxlds); 644 645 if (mds && mds->security.sanitize_active) { 646 dev_dbg(&cxlmd->dev, 647 "attempted to commit %s during sanitize\n", 648 dev_name(&cxld->dev)); 649 return -EBUSY; 650 } 651 } 652 653 down_read(&cxl_dpa_rwsem); 654 /* common decoder settings */ 655 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); 656 cxld_set_interleave(cxld, &ctrl); 657 cxld_set_type(cxld, &ctrl); 658 base = cxld->hpa_range.start; 659 size = range_len(&cxld->hpa_range); 660 661 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 662 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 663 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 664 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 665 666 if (is_switch_decoder(&cxld->dev)) { 667 struct cxl_switch_decoder *cxlsd = 668 to_cxl_switch_decoder(&cxld->dev); 669 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); 670 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); 671 u64 targets; 672 673 cxlsd_set_targets(cxlsd, &targets); 674 writel(upper_32_bits(targets), tl_hi); 675 writel(lower_32_bits(targets), tl_lo); 676 } else { 677 struct cxl_endpoint_decoder *cxled = 678 to_cxl_endpoint_decoder(&cxld->dev); 679 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); 680 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); 681 682 writel(upper_32_bits(cxled->skip), sk_hi); 683 writel(lower_32_bits(cxled->skip), sk_lo); 684 } 685 686 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 687 up_read(&cxl_dpa_rwsem); 688 689 port->commit_end++; 690 rc = cxld_await_commit(hdm, cxld->id); 691 if (rc) { 692 dev_dbg(&port->dev, "%s: error %d committing decoder\n", 693 dev_name(&cxld->dev), rc); 694 cxld->reset(cxld); 695 return rc; 696 } 697 cxld->flags |= CXL_DECODER_F_ENABLE; 698 699 return 0; 700 } 701 702 static int cxl_decoder_reset(struct cxl_decoder *cxld) 703 { 704 struct cxl_port *port = to_cxl_port(cxld->dev.parent); 705 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); 706 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 707 int id = cxld->id; 708 u32 ctrl; 709 710 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) 711 return 0; 712 713 if (port->commit_end != id) { 714 dev_dbg(&port->dev, 715 "%s: out of order reset, expected decoder%d.%d\n", 716 dev_name(&cxld->dev), port->id, port->commit_end); 717 return -EBUSY; 718 } 719 720 down_read(&cxl_dpa_rwsem); 721 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 722 ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; 723 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); 724 725 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); 726 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); 727 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); 728 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); 729 up_read(&cxl_dpa_rwsem); 730 731 port->commit_end--; 732 cxld->flags &= ~CXL_DECODER_F_ENABLE; 733 734 /* Userspace is now responsible for reconfiguring this decoder */ 735 if (is_endpoint_decoder(&cxld->dev)) { 736 struct cxl_endpoint_decoder *cxled; 737 738 cxled = to_cxl_endpoint_decoder(&cxld->dev); 739 cxled->state = CXL_DECODER_STATE_MANUAL; 740 } 741 742 return 0; 743 } 744 745 static int cxl_setup_hdm_decoder_from_dvsec( 746 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, 747 int which, struct cxl_endpoint_dvsec_info *info) 748 { 749 struct cxl_endpoint_decoder *cxled; 750 u64 len; 751 int rc; 752 753 if (!is_cxl_endpoint(port)) 754 return -EOPNOTSUPP; 755 756 cxled = to_cxl_endpoint_decoder(&cxld->dev); 757 len = range_len(&info->dvsec_range[which]); 758 if (!len) 759 return -ENOENT; 760 761 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 762 cxld->commit = NULL; 763 cxld->reset = NULL; 764 cxld->hpa_range = info->dvsec_range[which]; 765 766 /* 767 * Set the emulated decoder as locked pending additional support to 768 * change the range registers at run time. 769 */ 770 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; 771 port->commit_end = cxld->id; 772 773 rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0); 774 if (rc) { 775 dev_err(&port->dev, 776 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 777 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); 778 return rc; 779 } 780 *dpa_base += len; 781 cxled->state = CXL_DECODER_STATE_AUTO; 782 783 return 0; 784 } 785 786 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, 787 int *target_map, void __iomem *hdm, int which, 788 u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) 789 { 790 struct cxl_endpoint_decoder *cxled = NULL; 791 u64 size, base, skip, dpa_size, lo, hi; 792 bool committed; 793 u32 remainder; 794 int i, rc; 795 u32 ctrl; 796 union { 797 u64 value; 798 unsigned char target_id[8]; 799 } target_list; 800 801 if (should_emulate_decoders(info)) 802 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, 803 which, info); 804 805 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 806 lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); 807 hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which)); 808 base = (hi << 32) + lo; 809 lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); 810 hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); 811 size = (hi << 32) + lo; 812 committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); 813 cxld->commit = cxl_decoder_commit; 814 cxld->reset = cxl_decoder_reset; 815 816 if (!committed) 817 size = 0; 818 if (base == U64_MAX || size == U64_MAX) { 819 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", 820 port->id, cxld->id); 821 return -ENXIO; 822 } 823 824 if (info) 825 cxled = to_cxl_endpoint_decoder(&cxld->dev); 826 cxld->hpa_range = (struct range) { 827 .start = base, 828 .end = base + size - 1, 829 }; 830 831 /* decoders are enabled if committed */ 832 if (committed) { 833 cxld->flags |= CXL_DECODER_F_ENABLE; 834 if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) 835 cxld->flags |= CXL_DECODER_F_LOCK; 836 if (FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl)) 837 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 838 else 839 cxld->target_type = CXL_DECODER_DEVMEM; 840 841 guard(rwsem_write)(&cxl_region_rwsem); 842 if (cxld->id != cxl_num_decoders_committed(port)) { 843 dev_warn(&port->dev, 844 "decoder%d.%d: Committed out of order\n", 845 port->id, cxld->id); 846 return -ENXIO; 847 } 848 849 if (size == 0) { 850 dev_warn(&port->dev, 851 "decoder%d.%d: Committed with zero size\n", 852 port->id, cxld->id); 853 return -ENXIO; 854 } 855 port->commit_end = cxld->id; 856 } else { 857 if (cxled) { 858 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 859 struct cxl_dev_state *cxlds = cxlmd->cxlds; 860 861 /* 862 * Default by devtype until a device arrives that needs 863 * more precision. 864 */ 865 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) 866 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 867 else 868 cxld->target_type = CXL_DECODER_DEVMEM; 869 } else { 870 /* To be overridden by region type at commit time */ 871 cxld->target_type = CXL_DECODER_HOSTONLYMEM; 872 } 873 874 if (!FIELD_GET(CXL_HDM_DECODER0_CTRL_HOSTONLY, ctrl) && 875 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { 876 ctrl |= CXL_HDM_DECODER0_CTRL_HOSTONLY; 877 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); 878 } 879 } 880 rc = eiw_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), 881 &cxld->interleave_ways); 882 if (rc) { 883 dev_warn(&port->dev, 884 "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", 885 port->id, cxld->id, ctrl); 886 return rc; 887 } 888 rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), 889 &cxld->interleave_granularity); 890 if (rc) { 891 dev_warn(&port->dev, 892 "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", 893 port->id, cxld->id, ctrl); 894 return rc; 895 } 896 897 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", 898 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, 899 cxld->interleave_ways, cxld->interleave_granularity); 900 901 if (!cxled) { 902 lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); 903 hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); 904 target_list.value = (hi << 32) + lo; 905 for (i = 0; i < cxld->interleave_ways; i++) 906 target_map[i] = target_list.target_id[i]; 907 908 return 0; 909 } 910 911 if (!committed) 912 return 0; 913 914 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); 915 if (remainder) { 916 dev_err(&port->dev, 917 "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", 918 port->id, cxld->id, size, cxld->interleave_ways); 919 return -ENXIO; 920 } 921 lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); 922 hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); 923 skip = (hi << 32) + lo; 924 rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); 925 if (rc) { 926 dev_err(&port->dev, 927 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", 928 port->id, cxld->id, *dpa_base, 929 *dpa_base + dpa_size + skip - 1, rc); 930 return rc; 931 } 932 *dpa_base += dpa_size + skip; 933 934 cxled->state = CXL_DECODER_STATE_AUTO; 935 936 return 0; 937 } 938 939 static void cxl_settle_decoders(struct cxl_hdm *cxlhdm) 940 { 941 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 942 int committed, i; 943 u32 ctrl; 944 945 if (!hdm) 946 return; 947 948 /* 949 * Since the register resource was recently claimed via request_region() 950 * be careful about trusting the "not-committed" status until the commit 951 * timeout has elapsed. The commit timeout is 10ms (CXL 2.0 952 * 8.2.5.12.20), but double it to be tolerant of any clock skew between 953 * host and target. 954 */ 955 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { 956 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); 957 if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) 958 committed++; 959 } 960 961 /* ensure that future checks of committed can be trusted */ 962 if (committed != cxlhdm->decoder_count) 963 msleep(20); 964 } 965 966 /** 967 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set 968 * @cxlhdm: Structure to populate with HDM capabilities 969 * @info: cached DVSEC range register info 970 */ 971 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm, 972 struct cxl_endpoint_dvsec_info *info) 973 { 974 void __iomem *hdm = cxlhdm->regs.hdm_decoder; 975 struct cxl_port *port = cxlhdm->port; 976 int i; 977 u64 dpa_base = 0; 978 979 cxl_settle_decoders(cxlhdm); 980 981 for (i = 0; i < cxlhdm->decoder_count; i++) { 982 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; 983 int rc, target_count = cxlhdm->target_count; 984 struct cxl_decoder *cxld; 985 986 if (is_cxl_endpoint(port)) { 987 struct cxl_endpoint_decoder *cxled; 988 989 cxled = cxl_endpoint_decoder_alloc(port); 990 if (IS_ERR(cxled)) { 991 dev_warn(&port->dev, 992 "Failed to allocate decoder%d.%d\n", 993 port->id, i); 994 return PTR_ERR(cxled); 995 } 996 cxld = &cxled->cxld; 997 } else { 998 struct cxl_switch_decoder *cxlsd; 999 1000 cxlsd = cxl_switch_decoder_alloc(port, target_count); 1001 if (IS_ERR(cxlsd)) { 1002 dev_warn(&port->dev, 1003 "Failed to allocate decoder%d.%d\n", 1004 port->id, i); 1005 return PTR_ERR(cxlsd); 1006 } 1007 cxld = &cxlsd->cxld; 1008 } 1009 1010 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, 1011 &dpa_base, info); 1012 if (rc) { 1013 dev_warn(&port->dev, 1014 "Failed to initialize decoder%d.%d\n", 1015 port->id, i); 1016 put_device(&cxld->dev); 1017 return rc; 1018 } 1019 rc = add_hdm_decoder(port, cxld, target_map); 1020 if (rc) { 1021 dev_warn(&port->dev, 1022 "Failed to add decoder%d.%d\n", port->id, i); 1023 return rc; 1024 } 1025 } 1026 1027 return 0; 1028 } 1029 EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); 1030