1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 #include <linux/io-64-nonatomic-lo-hi.h> 5 #include <linux/firmware.h> 6 #include <linux/device.h> 7 #include <linux/slab.h> 8 #include <linux/idr.h> 9 #include <linux/pci.h> 10 #include <cxlmem.h> 11 #include "trace.h" 12 #include "core.h" 13 14 static DECLARE_RWSEM(cxl_memdev_rwsem); 15 16 /* 17 * An entire PCI topology full of devices should be enough for any 18 * config 19 */ 20 #define CXL_MEM_MAX_DEVS 65536 21 22 static int cxl_mem_major; 23 static DEFINE_IDA(cxl_memdev_ida); 24 25 static void cxl_memdev_release(struct device *dev) 26 { 27 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 28 29 ida_free(&cxl_memdev_ida, cxlmd->id); 30 kfree(cxlmd); 31 } 32 33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, 34 kgid_t *gid) 35 { 36 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); 37 } 38 39 static ssize_t firmware_version_show(struct device *dev, 40 struct device_attribute *attr, char *buf) 41 { 42 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 43 struct cxl_dev_state *cxlds = cxlmd->cxlds; 44 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 45 46 if (!mds) 47 return sysfs_emit(buf, "\n"); 48 return sysfs_emit(buf, "%.16s\n", mds->firmware_version); 49 } 50 static DEVICE_ATTR_RO(firmware_version); 51 52 static ssize_t payload_max_show(struct device *dev, 53 struct device_attribute *attr, char *buf) 54 { 55 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 56 struct cxl_dev_state *cxlds = cxlmd->cxlds; 57 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 58 59 if (!mds) 60 return sysfs_emit(buf, "\n"); 61 return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size); 62 } 63 static DEVICE_ATTR_RO(payload_max); 64 65 static ssize_t label_storage_size_show(struct device *dev, 66 struct device_attribute *attr, char *buf) 67 { 68 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 69 struct cxl_dev_state *cxlds = cxlmd->cxlds; 70 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 71 72 if (!mds) 73 return sysfs_emit(buf, "\n"); 74 return sysfs_emit(buf, "%zu\n", mds->lsa_size); 75 } 76 static DEVICE_ATTR_RO(label_storage_size); 77 78 static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds) 79 { 80 /* Static RAM is only expected at partition 0. */ 81 if (cxlds->part[0].mode != CXL_PARTMODE_RAM) 82 return 0; 83 return resource_size(&cxlds->part[0].res); 84 } 85 86 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, 87 char *buf) 88 { 89 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 90 struct cxl_dev_state *cxlds = cxlmd->cxlds; 91 unsigned long long len = cxl_ram_size(cxlds); 92 93 return sysfs_emit(buf, "%#llx\n", len); 94 } 95 96 static struct device_attribute dev_attr_ram_size = 97 __ATTR(size, 0444, ram_size_show, NULL); 98 99 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, 100 char *buf) 101 { 102 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 103 struct cxl_dev_state *cxlds = cxlmd->cxlds; 104 unsigned long long len = cxl_pmem_size(cxlds); 105 106 return sysfs_emit(buf, "%#llx\n", len); 107 } 108 109 static struct device_attribute dev_attr_pmem_size = 110 __ATTR(size, 0444, pmem_size_show, NULL); 111 112 static ssize_t serial_show(struct device *dev, struct device_attribute *attr, 113 char *buf) 114 { 115 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 116 struct cxl_dev_state *cxlds = cxlmd->cxlds; 117 118 return sysfs_emit(buf, "%#llx\n", cxlds->serial); 119 } 120 static DEVICE_ATTR_RO(serial); 121 122 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 123 char *buf) 124 { 125 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 126 } 127 static DEVICE_ATTR_RO(numa_node); 128 129 static ssize_t security_state_show(struct device *dev, 130 struct device_attribute *attr, 131 char *buf) 132 { 133 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 134 struct cxl_dev_state *cxlds = cxlmd->cxlds; 135 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 136 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 137 unsigned long state = mds->security.state; 138 int rc = 0; 139 140 /* sync with latest submission state */ 141 mutex_lock(&cxl_mbox->mbox_mutex); 142 if (mds->security.sanitize_active) 143 rc = sysfs_emit(buf, "sanitize\n"); 144 mutex_unlock(&cxl_mbox->mbox_mutex); 145 if (rc) 146 return rc; 147 148 if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) 149 return sysfs_emit(buf, "disabled\n"); 150 if (state & CXL_PMEM_SEC_STATE_FROZEN || 151 state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT || 152 state & CXL_PMEM_SEC_STATE_USER_PLIMIT) 153 return sysfs_emit(buf, "frozen\n"); 154 if (state & CXL_PMEM_SEC_STATE_LOCKED) 155 return sysfs_emit(buf, "locked\n"); 156 157 return sysfs_emit(buf, "unlocked\n"); 158 } 159 static struct device_attribute dev_attr_security_state = 160 __ATTR(state, 0444, security_state_show, NULL); 161 162 static ssize_t security_sanitize_store(struct device *dev, 163 struct device_attribute *attr, 164 const char *buf, size_t len) 165 { 166 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 167 bool sanitize; 168 ssize_t rc; 169 170 if (kstrtobool(buf, &sanitize) || !sanitize) 171 return -EINVAL; 172 173 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE); 174 if (rc) 175 return rc; 176 177 return len; 178 } 179 static struct device_attribute dev_attr_security_sanitize = 180 __ATTR(sanitize, 0200, NULL, security_sanitize_store); 181 182 static ssize_t security_erase_store(struct device *dev, 183 struct device_attribute *attr, 184 const char *buf, size_t len) 185 { 186 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 187 ssize_t rc; 188 bool erase; 189 190 if (kstrtobool(buf, &erase) || !erase) 191 return -EINVAL; 192 193 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE); 194 if (rc) 195 return rc; 196 197 return len; 198 } 199 static struct device_attribute dev_attr_security_erase = 200 __ATTR(erase, 0200, NULL, security_erase_store); 201 202 bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd, 203 enum poison_cmd_enabled_bits cmd) 204 { 205 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 206 207 return test_bit(cmd, mds->poison.enabled_cmds); 208 } 209 210 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) 211 { 212 struct cxl_dev_state *cxlds = cxlmd->cxlds; 213 u64 offset, length; 214 int rc = 0; 215 216 /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ 217 for (int i = 0; i < cxlds->nr_partitions; i++) { 218 const struct resource *res = &cxlds->part[i].res; 219 220 offset = res->start; 221 length = resource_size(res); 222 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 223 /* 224 * Invalid Physical Address is not an error for 225 * volatile addresses. Device support is optional. 226 */ 227 if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM) 228 rc = 0; 229 } 230 return rc; 231 } 232 233 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) 234 { 235 struct cxl_port *port; 236 int rc; 237 238 port = cxlmd->endpoint; 239 if (!port || !is_cxl_endpoint(port)) 240 return -EINVAL; 241 242 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 243 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 244 return rc; 245 246 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 247 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 248 return rc; 249 250 if (cxl_num_decoders_committed(port) == 0) { 251 /* No regions mapped to this memdev */ 252 rc = cxl_get_poison_by_memdev(cxlmd); 253 } else { 254 /* Regions mapped, collect poison by endpoint */ 255 rc = cxl_get_poison_by_endpoint(port); 256 } 257 258 return rc; 259 } 260 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL"); 261 262 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) 263 { 264 struct cxl_dev_state *cxlds = cxlmd->cxlds; 265 266 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 267 return 0; 268 269 if (!resource_size(&cxlds->dpa_res)) { 270 dev_dbg(cxlds->dev, "device has no dpa resource\n"); 271 return -EINVAL; 272 } 273 if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) { 274 dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n", 275 dpa, &cxlds->dpa_res); 276 return -EINVAL; 277 } 278 if (!IS_ALIGNED(dpa, 64)) { 279 dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa); 280 return -EINVAL; 281 } 282 283 return 0; 284 } 285 286 int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa) 287 { 288 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; 289 struct cxl_mbox_inject_poison inject; 290 struct cxl_poison_record record; 291 struct cxl_mbox_cmd mbox_cmd; 292 struct cxl_region *cxlr; 293 int rc; 294 295 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 296 return 0; 297 298 lockdep_assert_held(&cxl_rwsem.dpa); 299 lockdep_assert_held(&cxl_rwsem.region); 300 301 rc = cxl_validate_poison_dpa(cxlmd, dpa); 302 if (rc) 303 return rc; 304 305 inject.address = cpu_to_le64(dpa); 306 mbox_cmd = (struct cxl_mbox_cmd) { 307 .opcode = CXL_MBOX_OP_INJECT_POISON, 308 .size_in = sizeof(inject), 309 .payload_in = &inject, 310 }; 311 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 312 if (rc) 313 return rc; 314 315 cxlr = cxl_dpa_to_region(cxlmd, dpa); 316 if (cxlr) 317 dev_warn_once(cxl_mbox->host, 318 "poison inject dpa:%#llx region: %s\n", dpa, 319 dev_name(&cxlr->dev)); 320 321 record = (struct cxl_poison_record) { 322 .address = cpu_to_le64(dpa), 323 .length = cpu_to_le32(1), 324 }; 325 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT); 326 327 return 0; 328 } 329 330 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) 331 { 332 int rc; 333 334 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 335 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 336 return rc; 337 338 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 339 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 340 return rc; 341 342 return cxl_inject_poison_locked(cxlmd, dpa); 343 } 344 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL"); 345 346 int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa) 347 { 348 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; 349 struct cxl_mbox_clear_poison clear; 350 struct cxl_poison_record record; 351 struct cxl_mbox_cmd mbox_cmd; 352 struct cxl_region *cxlr; 353 int rc; 354 355 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 356 return 0; 357 358 lockdep_assert_held(&cxl_rwsem.dpa); 359 lockdep_assert_held(&cxl_rwsem.region); 360 361 rc = cxl_validate_poison_dpa(cxlmd, dpa); 362 if (rc) 363 return rc; 364 365 /* 366 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command 367 * is defined to accept 64 bytes of write-data, along with the 368 * address to clear. This driver uses zeroes as write-data. 369 */ 370 clear = (struct cxl_mbox_clear_poison) { 371 .address = cpu_to_le64(dpa) 372 }; 373 374 mbox_cmd = (struct cxl_mbox_cmd) { 375 .opcode = CXL_MBOX_OP_CLEAR_POISON, 376 .size_in = sizeof(clear), 377 .payload_in = &clear, 378 }; 379 380 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 381 if (rc) 382 return rc; 383 384 cxlr = cxl_dpa_to_region(cxlmd, dpa); 385 if (cxlr) 386 dev_warn_once(cxl_mbox->host, 387 "poison clear dpa:%#llx region: %s\n", dpa, 388 dev_name(&cxlr->dev)); 389 390 record = (struct cxl_poison_record) { 391 .address = cpu_to_le64(dpa), 392 .length = cpu_to_le32(1), 393 }; 394 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR); 395 396 return 0; 397 } 398 399 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) 400 { 401 int rc; 402 403 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 404 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 405 return rc; 406 407 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 408 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 409 return rc; 410 411 return cxl_clear_poison_locked(cxlmd, dpa); 412 } 413 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL"); 414 415 static struct attribute *cxl_memdev_attributes[] = { 416 &dev_attr_serial.attr, 417 &dev_attr_firmware_version.attr, 418 &dev_attr_payload_max.attr, 419 &dev_attr_label_storage_size.attr, 420 &dev_attr_numa_node.attr, 421 NULL, 422 }; 423 424 static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds) 425 { 426 for (int i = 0; i < cxlds->nr_partitions; i++) 427 if (cxlds->part[i].mode == CXL_PARTMODE_PMEM) 428 return &cxlds->part[i].perf; 429 return NULL; 430 } 431 432 static ssize_t pmem_qos_class_show(struct device *dev, 433 struct device_attribute *attr, char *buf) 434 { 435 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 436 struct cxl_dev_state *cxlds = cxlmd->cxlds; 437 438 return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class); 439 } 440 441 static struct device_attribute dev_attr_pmem_qos_class = 442 __ATTR(qos_class, 0444, pmem_qos_class_show, NULL); 443 444 static struct attribute *cxl_memdev_pmem_attributes[] = { 445 &dev_attr_pmem_size.attr, 446 &dev_attr_pmem_qos_class.attr, 447 NULL, 448 }; 449 450 static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds) 451 { 452 if (cxlds->part[0].mode != CXL_PARTMODE_RAM) 453 return NULL; 454 return &cxlds->part[0].perf; 455 } 456 457 static ssize_t ram_qos_class_show(struct device *dev, 458 struct device_attribute *attr, char *buf) 459 { 460 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 461 struct cxl_dev_state *cxlds = cxlmd->cxlds; 462 463 return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class); 464 } 465 466 static struct device_attribute dev_attr_ram_qos_class = 467 __ATTR(qos_class, 0444, ram_qos_class_show, NULL); 468 469 static struct attribute *cxl_memdev_ram_attributes[] = { 470 &dev_attr_ram_size.attr, 471 &dev_attr_ram_qos_class.attr, 472 NULL, 473 }; 474 475 static struct attribute *cxl_memdev_security_attributes[] = { 476 &dev_attr_security_state.attr, 477 &dev_attr_security_sanitize.attr, 478 &dev_attr_security_erase.attr, 479 NULL, 480 }; 481 482 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a, 483 int n) 484 { 485 if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr) 486 return 0; 487 return a->mode; 488 } 489 490 static struct attribute_group cxl_memdev_attribute_group = { 491 .attrs = cxl_memdev_attributes, 492 .is_visible = cxl_memdev_visible, 493 }; 494 495 static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n) 496 { 497 struct device *dev = kobj_to_dev(kobj); 498 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 499 struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds); 500 501 if (a == &dev_attr_ram_qos_class.attr && 502 (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) 503 return 0; 504 505 return a->mode; 506 } 507 508 static struct attribute_group cxl_memdev_ram_attribute_group = { 509 .name = "ram", 510 .attrs = cxl_memdev_ram_attributes, 511 .is_visible = cxl_ram_visible, 512 }; 513 514 static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n) 515 { 516 struct device *dev = kobj_to_dev(kobj); 517 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 518 struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds); 519 520 if (a == &dev_attr_pmem_qos_class.attr && 521 (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) 522 return 0; 523 524 return a->mode; 525 } 526 527 static struct attribute_group cxl_memdev_pmem_attribute_group = { 528 .name = "pmem", 529 .attrs = cxl_memdev_pmem_attributes, 530 .is_visible = cxl_pmem_visible, 531 }; 532 533 static umode_t cxl_memdev_security_visible(struct kobject *kobj, 534 struct attribute *a, int n) 535 { 536 struct device *dev = kobj_to_dev(kobj); 537 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 538 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 539 540 if (a == &dev_attr_security_sanitize.attr && 541 !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) 542 return 0; 543 544 if (a == &dev_attr_security_erase.attr && 545 !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds)) 546 return 0; 547 548 return a->mode; 549 } 550 551 static struct attribute_group cxl_memdev_security_attribute_group = { 552 .name = "security", 553 .attrs = cxl_memdev_security_attributes, 554 .is_visible = cxl_memdev_security_visible, 555 }; 556 557 static const struct attribute_group *cxl_memdev_attribute_groups[] = { 558 &cxl_memdev_attribute_group, 559 &cxl_memdev_ram_attribute_group, 560 &cxl_memdev_pmem_attribute_group, 561 &cxl_memdev_security_attribute_group, 562 NULL, 563 }; 564 565 void cxl_memdev_update_perf(struct cxl_memdev *cxlmd) 566 { 567 sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group); 568 sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group); 569 } 570 EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL"); 571 572 static const struct device_type cxl_memdev_type = { 573 .name = "cxl_memdev", 574 .release = cxl_memdev_release, 575 .devnode = cxl_memdev_devnode, 576 .groups = cxl_memdev_attribute_groups, 577 }; 578 579 bool is_cxl_memdev(const struct device *dev) 580 { 581 return dev->type == &cxl_memdev_type; 582 } 583 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL"); 584 585 /** 586 * set_exclusive_cxl_commands() - atomically disable user cxl commands 587 * @mds: The device state to operate on 588 * @cmds: bitmap of commands to mark exclusive 589 * 590 * Grab the cxl_memdev_rwsem in write mode to flush in-flight 591 * invocations of the ioctl path and then disable future execution of 592 * commands with the command ids set in @cmds. 593 */ 594 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, 595 unsigned long *cmds) 596 { 597 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 598 599 guard(rwsem_write)(&cxl_memdev_rwsem); 600 bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, 601 cmds, CXL_MEM_COMMAND_ID_MAX); 602 } 603 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL"); 604 605 /** 606 * clear_exclusive_cxl_commands() - atomically enable user cxl commands 607 * @mds: The device state to modify 608 * @cmds: bitmap of commands to mark available for userspace 609 */ 610 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, 611 unsigned long *cmds) 612 { 613 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 614 615 guard(rwsem_write)(&cxl_memdev_rwsem); 616 bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, 617 cmds, CXL_MEM_COMMAND_ID_MAX); 618 } 619 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL"); 620 621 static void cxl_memdev_shutdown(struct device *dev) 622 { 623 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 624 625 guard(rwsem_write)(&cxl_memdev_rwsem); 626 cxlmd->cxlds = NULL; 627 } 628 629 static void cxl_memdev_unregister(void *_cxlmd) 630 { 631 struct cxl_memdev *cxlmd = _cxlmd; 632 struct device *dev = &cxlmd->dev; 633 634 cdev_device_del(&cxlmd->cdev, dev); 635 cxl_memdev_shutdown(dev); 636 put_device(dev); 637 } 638 639 static void detach_memdev(struct work_struct *work) 640 { 641 struct cxl_memdev *cxlmd; 642 643 cxlmd = container_of(work, typeof(*cxlmd), detach_work); 644 645 /* 646 * When the creator of @cxlmd sets ->attach it indicates CXL operation 647 * is required. In that case, @cxlmd detach escalates to parent device 648 * detach. 649 */ 650 if (cxlmd->attach) 651 device_release_driver(cxlmd->dev.parent); 652 else 653 device_release_driver(&cxlmd->dev); 654 put_device(&cxlmd->dev); 655 } 656 657 static struct lock_class_key cxl_memdev_key; 658 659 struct cxl_dev_state *_devm_cxl_dev_state_create(struct device *dev, 660 enum cxl_devtype type, 661 u64 serial, u16 dvsec, 662 size_t size, bool has_mbox) 663 { 664 struct cxl_dev_state *cxlds = devm_kzalloc(dev, size, GFP_KERNEL); 665 666 if (!cxlds) 667 return NULL; 668 669 cxlds->dev = dev; 670 cxlds->type = type; 671 cxlds->serial = serial; 672 cxlds->cxl_dvsec = dvsec; 673 cxlds->reg_map.host = dev; 674 cxlds->reg_map.resource = CXL_RESOURCE_NONE; 675 676 if (has_mbox) 677 cxlds->cxl_mbox.host = dev; 678 679 return cxlds; 680 } 681 EXPORT_SYMBOL_NS_GPL(_devm_cxl_dev_state_create, "CXL"); 682 683 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds, 684 const struct file_operations *fops, 685 const struct cxl_memdev_attach *attach) 686 { 687 struct cxl_memdev *cxlmd; 688 struct device *dev; 689 struct cdev *cdev; 690 int rc; 691 692 cxlmd = kzalloc_obj(*cxlmd); 693 if (!cxlmd) 694 return ERR_PTR(-ENOMEM); 695 696 rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL); 697 if (rc < 0) 698 goto err; 699 cxlmd->id = rc; 700 cxlmd->depth = -1; 701 cxlmd->attach = attach; 702 cxlmd->endpoint = ERR_PTR(-ENXIO); 703 704 dev = &cxlmd->dev; 705 device_initialize(dev); 706 lockdep_set_class(&dev->mutex, &cxl_memdev_key); 707 dev->parent = cxlds->dev; 708 dev->bus = &cxl_bus_type; 709 dev->devt = MKDEV(cxl_mem_major, cxlmd->id); 710 dev->type = &cxl_memdev_type; 711 device_set_pm_not_required(dev); 712 INIT_WORK(&cxlmd->detach_work, detach_memdev); 713 714 cdev = &cxlmd->cdev; 715 cdev_init(cdev, fops); 716 return cxlmd; 717 718 err: 719 kfree(cxlmd); 720 return ERR_PTR(rc); 721 } 722 723 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, 724 unsigned long arg) 725 { 726 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 727 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 728 729 switch (cmd) { 730 case CXL_MEM_QUERY_COMMANDS: 731 return cxl_query_cmd(cxl_mbox, (void __user *)arg); 732 case CXL_MEM_SEND_COMMAND: 733 return cxl_send_cmd(cxl_mbox, (void __user *)arg); 734 default: 735 return -ENOTTY; 736 } 737 } 738 739 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, 740 unsigned long arg) 741 { 742 struct cxl_memdev *cxlmd = file->private_data; 743 struct cxl_dev_state *cxlds; 744 745 guard(rwsem_read)(&cxl_memdev_rwsem); 746 cxlds = cxlmd->cxlds; 747 if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) 748 return __cxl_memdev_ioctl(cxlmd, cmd, arg); 749 750 return -ENXIO; 751 } 752 753 static int cxl_memdev_open(struct inode *inode, struct file *file) 754 { 755 struct cxl_memdev *cxlmd = 756 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 757 758 get_device(&cxlmd->dev); 759 file->private_data = cxlmd; 760 761 return 0; 762 } 763 764 static int cxl_memdev_release_file(struct inode *inode, struct file *file) 765 { 766 struct cxl_memdev *cxlmd = 767 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 768 769 put_device(&cxlmd->dev); 770 771 return 0; 772 } 773 774 /** 775 * cxl_mem_get_fw_info - Get Firmware info 776 * @mds: The device data for the operation 777 * 778 * Retrieve firmware info for the device specified. 779 * 780 * Return: 0 if no error: or the result of the mailbox command. 781 * 782 * See CXL-3.0 8.2.9.3.1 Get FW Info 783 */ 784 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) 785 { 786 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 787 struct cxl_mbox_get_fw_info info; 788 struct cxl_mbox_cmd mbox_cmd; 789 int rc; 790 791 mbox_cmd = (struct cxl_mbox_cmd) { 792 .opcode = CXL_MBOX_OP_GET_FW_INFO, 793 .size_out = sizeof(info), 794 .payload_out = &info, 795 }; 796 797 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 798 if (rc < 0) 799 return rc; 800 801 mds->fw.num_slots = info.num_slots; 802 mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, 803 info.slot_info); 804 805 return 0; 806 } 807 808 /** 809 * cxl_mem_activate_fw - Activate Firmware 810 * @mds: The device data for the operation 811 * @slot: slot number to activate 812 * 813 * Activate firmware in a given slot for the device specified. 814 * 815 * Return: 0 if no error: or the result of the mailbox command. 816 * 817 * See CXL-3.0 8.2.9.3.3 Activate FW 818 */ 819 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) 820 { 821 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 822 struct cxl_mbox_activate_fw activate; 823 struct cxl_mbox_cmd mbox_cmd; 824 825 if (slot == 0 || slot > mds->fw.num_slots) 826 return -EINVAL; 827 828 mbox_cmd = (struct cxl_mbox_cmd) { 829 .opcode = CXL_MBOX_OP_ACTIVATE_FW, 830 .size_in = sizeof(activate), 831 .payload_in = &activate, 832 }; 833 834 /* Only offline activation supported for now */ 835 activate.action = CXL_FW_ACTIVATE_OFFLINE; 836 activate.slot = slot; 837 838 return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 839 } 840 841 /** 842 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer 843 * @mds: The device data for the operation 844 * 845 * Abort an in-progress firmware transfer for the device specified. 846 * 847 * Return: 0 if no error: or the result of the mailbox command. 848 * 849 * See CXL-3.0 8.2.9.3.2 Transfer FW 850 */ 851 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) 852 { 853 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 854 struct cxl_mbox_transfer_fw *transfer; 855 struct cxl_mbox_cmd mbox_cmd; 856 int rc; 857 858 transfer = kzalloc_flex(*transfer, data, 0); 859 if (!transfer) 860 return -ENOMEM; 861 862 /* Set a 1s poll interval and a total wait time of 30s */ 863 mbox_cmd = (struct cxl_mbox_cmd) { 864 .opcode = CXL_MBOX_OP_TRANSFER_FW, 865 .size_in = sizeof(*transfer), 866 .payload_in = transfer, 867 .poll_interval_ms = 1000, 868 .poll_count = 30, 869 }; 870 871 transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; 872 873 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 874 kfree(transfer); 875 return rc; 876 } 877 878 static void cxl_fw_cleanup(struct fw_upload *fwl) 879 { 880 struct cxl_memdev_state *mds = fwl->dd_handle; 881 882 mds->fw.next_slot = 0; 883 } 884 885 static int cxl_fw_do_cancel(struct fw_upload *fwl) 886 { 887 struct cxl_memdev_state *mds = fwl->dd_handle; 888 struct cxl_dev_state *cxlds = &mds->cxlds; 889 struct cxl_memdev *cxlmd = cxlds->cxlmd; 890 int rc; 891 892 rc = cxl_mem_abort_fw_xfer(mds); 893 if (rc < 0) 894 dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc); 895 896 return FW_UPLOAD_ERR_CANCELED; 897 } 898 899 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, 900 u32 size) 901 { 902 struct cxl_memdev_state *mds = fwl->dd_handle; 903 struct cxl_mbox_transfer_fw *transfer; 904 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 905 906 if (!size) 907 return FW_UPLOAD_ERR_INVALID_SIZE; 908 909 mds->fw.oneshot = struct_size(transfer, data, size) < 910 cxl_mbox->payload_size; 911 912 if (cxl_mem_get_fw_info(mds)) 913 return FW_UPLOAD_ERR_HW_ERROR; 914 915 /* 916 * So far no state has been changed, hence no other cleanup is 917 * necessary. Simply return the cancelled status. 918 */ 919 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 920 return FW_UPLOAD_ERR_CANCELED; 921 922 return FW_UPLOAD_ERR_NONE; 923 } 924 925 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, 926 u32 offset, u32 size, u32 *written) 927 { 928 struct cxl_memdev_state *mds = fwl->dd_handle; 929 struct cxl_dev_state *cxlds = &mds->cxlds; 930 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 931 struct cxl_memdev *cxlmd = cxlds->cxlmd; 932 struct cxl_mbox_transfer_fw *transfer; 933 struct cxl_mbox_cmd mbox_cmd; 934 u32 cur_size, remaining; 935 size_t size_in; 936 int rc; 937 938 *written = 0; 939 940 /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */ 941 if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) { 942 dev_err(&cxlmd->dev, 943 "misaligned offset for FW transfer slice (%u)\n", 944 offset); 945 return FW_UPLOAD_ERR_RW_ERROR; 946 } 947 948 /* 949 * Pick transfer size based on mds->payload_size @size must bw 128-byte 950 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and 951 * sizeof(*transfer) is 128. These constraints imply that @cur_size 952 * will always be 128b aligned. 953 */ 954 cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer)); 955 956 remaining = size - cur_size; 957 size_in = struct_size(transfer, data, cur_size); 958 959 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 960 return cxl_fw_do_cancel(fwl); 961 962 /* 963 * Slot numbers are 1-indexed 964 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') 965 * Check for rollover using modulo, and 1-index it by adding 1 966 */ 967 mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1; 968 969 /* Do the transfer via mailbox cmd */ 970 transfer = kzalloc(size_in, GFP_KERNEL); 971 if (!transfer) 972 return FW_UPLOAD_ERR_RW_ERROR; 973 974 transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT); 975 memcpy(transfer->data, data + offset, cur_size); 976 if (mds->fw.oneshot) { 977 transfer->action = CXL_FW_TRANSFER_ACTION_FULL; 978 transfer->slot = mds->fw.next_slot; 979 } else { 980 if (offset == 0) { 981 transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE; 982 } else if (remaining == 0) { 983 transfer->action = CXL_FW_TRANSFER_ACTION_END; 984 transfer->slot = mds->fw.next_slot; 985 } else { 986 transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE; 987 } 988 } 989 990 mbox_cmd = (struct cxl_mbox_cmd) { 991 .opcode = CXL_MBOX_OP_TRANSFER_FW, 992 .size_in = size_in, 993 .payload_in = transfer, 994 .poll_interval_ms = 1000, 995 .poll_count = 30, 996 }; 997 998 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 999 if (rc < 0) { 1000 rc = FW_UPLOAD_ERR_RW_ERROR; 1001 goto out_free; 1002 } 1003 1004 *written = cur_size; 1005 1006 /* Activate FW if oneshot or if the last slice was written */ 1007 if (mds->fw.oneshot || remaining == 0) { 1008 dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n", 1009 mds->fw.next_slot); 1010 rc = cxl_mem_activate_fw(mds, mds->fw.next_slot); 1011 if (rc < 0) { 1012 dev_err(&cxlmd->dev, "Error activating firmware: %d\n", 1013 rc); 1014 rc = FW_UPLOAD_ERR_HW_ERROR; 1015 goto out_free; 1016 } 1017 } 1018 1019 rc = FW_UPLOAD_ERR_NONE; 1020 1021 out_free: 1022 kfree(transfer); 1023 return rc; 1024 } 1025 1026 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) 1027 { 1028 struct cxl_memdev_state *mds = fwl->dd_handle; 1029 1030 /* 1031 * cxl_internal_send_cmd() handles background operations synchronously. 1032 * No need to wait for completions here - any errors would've been 1033 * reported and handled during the ->write() call(s). 1034 * Just check if a cancel request was received, and return success. 1035 */ 1036 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 1037 return cxl_fw_do_cancel(fwl); 1038 1039 return FW_UPLOAD_ERR_NONE; 1040 } 1041 1042 static void cxl_fw_cancel(struct fw_upload *fwl) 1043 { 1044 struct cxl_memdev_state *mds = fwl->dd_handle; 1045 1046 set_bit(CXL_FW_CANCEL, mds->fw.state); 1047 } 1048 1049 static const struct fw_upload_ops cxl_memdev_fw_ops = { 1050 .prepare = cxl_fw_prepare, 1051 .write = cxl_fw_write, 1052 .poll_complete = cxl_fw_poll_complete, 1053 .cancel = cxl_fw_cancel, 1054 .cleanup = cxl_fw_cleanup, 1055 }; 1056 1057 static void cxl_remove_fw_upload(void *fwl) 1058 { 1059 firmware_upload_unregister(fwl); 1060 } 1061 1062 int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds) 1063 { 1064 struct cxl_dev_state *cxlds = &mds->cxlds; 1065 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 1066 struct device *dev = &cxlds->cxlmd->dev; 1067 struct fw_upload *fwl; 1068 1069 if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds)) 1070 return 0; 1071 1072 fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), 1073 &cxl_memdev_fw_ops, mds); 1074 if (IS_ERR(fwl)) 1075 return PTR_ERR(fwl); 1076 return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl); 1077 } 1078 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL"); 1079 1080 static const struct file_operations cxl_memdev_fops = { 1081 .owner = THIS_MODULE, 1082 .unlocked_ioctl = cxl_memdev_ioctl, 1083 .open = cxl_memdev_open, 1084 .release = cxl_memdev_release_file, 1085 .compat_ioctl = compat_ptr_ioctl, 1086 .llseek = noop_llseek, 1087 }; 1088 1089 /* 1090 * Activate ioctl operations, no cxl_memdev_rwsem manipulation needed as this is 1091 * ordered with cdev_add() publishing the device. 1092 */ 1093 static int cxlmd_add(struct cxl_memdev *cxlmd, struct cxl_dev_state *cxlds) 1094 { 1095 int rc; 1096 1097 cxlmd->cxlds = cxlds; 1098 cxlds->cxlmd = cxlmd; 1099 1100 rc = cdev_device_add(&cxlmd->cdev, &cxlmd->dev); 1101 if (rc) { 1102 /* 1103 * The cdev was briefly live, shutdown any ioctl operations that 1104 * saw that state. 1105 */ 1106 cxl_memdev_shutdown(&cxlmd->dev); 1107 return rc; 1108 } 1109 1110 return 0; 1111 } 1112 1113 DEFINE_FREE(put_cxlmd, struct cxl_memdev *, 1114 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) 1115 1116 static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd) 1117 { 1118 /* 1119 * If @attach is provided fail if the driver is not attached upon 1120 * return. Note that failure here could be the result of a race to 1121 * teardown the CXL port topology. I.e. cxl_mem_probe() could have 1122 * succeeded and then cxl_mem unbound before the lock is acquired. 1123 */ 1124 guard(device)(&cxlmd->dev); 1125 return (cxlmd->attach && !cxlmd->dev.driver); 1126 } 1127 1128 static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1129 { 1130 int rc; 1131 1132 if (cxl_memdev_attach_failed(cxlmd)) { 1133 cxl_memdev_unregister(cxlmd); 1134 return ERR_PTR(-ENXIO); 1135 } 1136 1137 rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister, 1138 cxlmd); 1139 if (rc) 1140 return ERR_PTR(rc); 1141 1142 return cxlmd; 1143 } 1144 1145 /* 1146 * Core helper for devm_cxl_add_memdev() that wants to both create a device and 1147 * assert to the caller that upon return cxl_mem::probe() has been invoked. 1148 */ 1149 struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds, 1150 const struct cxl_memdev_attach *attach) 1151 { 1152 struct device *dev; 1153 int rc; 1154 1155 struct cxl_memdev *cxlmd __free(put_cxlmd) = 1156 cxl_memdev_alloc(cxlds, &cxl_memdev_fops, attach); 1157 if (IS_ERR(cxlmd)) 1158 return cxlmd; 1159 1160 dev = &cxlmd->dev; 1161 rc = dev_set_name(dev, "mem%d", cxlmd->id); 1162 if (rc) 1163 return ERR_PTR(rc); 1164 1165 rc = cxlmd_add(cxlmd, cxlds); 1166 if (rc) 1167 return ERR_PTR(rc); 1168 1169 return cxl_memdev_autoremove(no_free_ptr(cxlmd)); 1170 } 1171 EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_memdev, "cxl_mem"); 1172 1173 static void sanitize_teardown_notifier(void *data) 1174 { 1175 struct cxl_memdev_state *mds = data; 1176 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 1177 struct kernfs_node *state; 1178 1179 /* 1180 * Prevent new irq triggered invocations of the workqueue and 1181 * flush inflight invocations. 1182 */ 1183 mutex_lock(&cxl_mbox->mbox_mutex); 1184 state = mds->security.sanitize_node; 1185 mds->security.sanitize_node = NULL; 1186 mutex_unlock(&cxl_mbox->mbox_mutex); 1187 1188 cancel_delayed_work_sync(&mds->security.poll_dwork); 1189 sysfs_put(state); 1190 } 1191 1192 int devm_cxl_sanitize_setup_notifier(struct device *host, 1193 struct cxl_memdev *cxlmd) 1194 { 1195 struct cxl_dev_state *cxlds = cxlmd->cxlds; 1196 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 1197 struct kernfs_node *sec; 1198 1199 if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) 1200 return 0; 1201 1202 /* 1203 * Note, the expectation is that @cxlmd would have failed to be 1204 * created if these sysfs_get_dirent calls fail. 1205 */ 1206 sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security"); 1207 if (!sec) 1208 return -ENOENT; 1209 mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); 1210 sysfs_put(sec); 1211 if (!mds->security.sanitize_node) 1212 return -ENOENT; 1213 1214 return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds); 1215 } 1216 EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL"); 1217 1218 __init int cxl_memdev_init(void) 1219 { 1220 dev_t devt; 1221 int rc; 1222 1223 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); 1224 if (rc) 1225 return rc; 1226 1227 cxl_mem_major = MAJOR(devt); 1228 1229 return 0; 1230 } 1231 1232 void cxl_memdev_exit(void) 1233 { 1234 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); 1235 } 1236