1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2020 Intel Corporation. */ 3 4 #include <linux/io-64-nonatomic-lo-hi.h> 5 #include <linux/firmware.h> 6 #include <linux/device.h> 7 #include <linux/slab.h> 8 #include <linux/idr.h> 9 #include <linux/pci.h> 10 #include <cxlmem.h> 11 #include "trace.h" 12 #include "core.h" 13 14 static DECLARE_RWSEM(cxl_memdev_rwsem); 15 16 /* 17 * An entire PCI topology full of devices should be enough for any 18 * config 19 */ 20 #define CXL_MEM_MAX_DEVS 65536 21 22 static int cxl_mem_major; 23 static DEFINE_IDA(cxl_memdev_ida); 24 25 static void cxl_memdev_release(struct device *dev) 26 { 27 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 28 29 ida_free(&cxl_memdev_ida, cxlmd->id); 30 kfree(cxlmd); 31 } 32 33 static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid, 34 kgid_t *gid) 35 { 36 return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev)); 37 } 38 39 static ssize_t firmware_version_show(struct device *dev, 40 struct device_attribute *attr, char *buf) 41 { 42 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 43 struct cxl_dev_state *cxlds = cxlmd->cxlds; 44 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 45 46 if (!mds) 47 return sysfs_emit(buf, "\n"); 48 return sysfs_emit(buf, "%.16s\n", mds->firmware_version); 49 } 50 static DEVICE_ATTR_RO(firmware_version); 51 52 static ssize_t payload_max_show(struct device *dev, 53 struct device_attribute *attr, char *buf) 54 { 55 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 56 struct cxl_dev_state *cxlds = cxlmd->cxlds; 57 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 58 59 if (!mds) 60 return sysfs_emit(buf, "\n"); 61 return sysfs_emit(buf, "%zu\n", cxlds->cxl_mbox.payload_size); 62 } 63 static DEVICE_ATTR_RO(payload_max); 64 65 static ssize_t label_storage_size_show(struct device *dev, 66 struct device_attribute *attr, char *buf) 67 { 68 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 69 struct cxl_dev_state *cxlds = cxlmd->cxlds; 70 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 71 72 if (!mds) 73 return sysfs_emit(buf, "\n"); 74 return sysfs_emit(buf, "%zu\n", mds->lsa_size); 75 } 76 static DEVICE_ATTR_RO(label_storage_size); 77 78 static resource_size_t cxl_ram_size(struct cxl_dev_state *cxlds) 79 { 80 /* Static RAM is only expected at partition 0. */ 81 if (cxlds->part[0].mode != CXL_PARTMODE_RAM) 82 return 0; 83 return resource_size(&cxlds->part[0].res); 84 } 85 86 static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, 87 char *buf) 88 { 89 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 90 struct cxl_dev_state *cxlds = cxlmd->cxlds; 91 unsigned long long len = cxl_ram_size(cxlds); 92 93 return sysfs_emit(buf, "%#llx\n", len); 94 } 95 96 static struct device_attribute dev_attr_ram_size = 97 __ATTR(size, 0444, ram_size_show, NULL); 98 99 static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, 100 char *buf) 101 { 102 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 103 struct cxl_dev_state *cxlds = cxlmd->cxlds; 104 unsigned long long len = cxl_pmem_size(cxlds); 105 106 return sysfs_emit(buf, "%#llx\n", len); 107 } 108 109 static struct device_attribute dev_attr_pmem_size = 110 __ATTR(size, 0444, pmem_size_show, NULL); 111 112 static ssize_t serial_show(struct device *dev, struct device_attribute *attr, 113 char *buf) 114 { 115 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 116 struct cxl_dev_state *cxlds = cxlmd->cxlds; 117 118 return sysfs_emit(buf, "%#llx\n", cxlds->serial); 119 } 120 static DEVICE_ATTR_RO(serial); 121 122 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 123 char *buf) 124 { 125 return sysfs_emit(buf, "%d\n", dev_to_node(dev)); 126 } 127 static DEVICE_ATTR_RO(numa_node); 128 129 static ssize_t security_state_show(struct device *dev, 130 struct device_attribute *attr, 131 char *buf) 132 { 133 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 134 struct cxl_dev_state *cxlds = cxlmd->cxlds; 135 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 136 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 137 unsigned long state = mds->security.state; 138 int rc = 0; 139 140 /* sync with latest submission state */ 141 mutex_lock(&cxl_mbox->mbox_mutex); 142 if (mds->security.sanitize_active) 143 rc = sysfs_emit(buf, "sanitize\n"); 144 mutex_unlock(&cxl_mbox->mbox_mutex); 145 if (rc) 146 return rc; 147 148 if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) 149 return sysfs_emit(buf, "disabled\n"); 150 if (state & CXL_PMEM_SEC_STATE_FROZEN || 151 state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT || 152 state & CXL_PMEM_SEC_STATE_USER_PLIMIT) 153 return sysfs_emit(buf, "frozen\n"); 154 if (state & CXL_PMEM_SEC_STATE_LOCKED) 155 return sysfs_emit(buf, "locked\n"); 156 157 return sysfs_emit(buf, "unlocked\n"); 158 } 159 static struct device_attribute dev_attr_security_state = 160 __ATTR(state, 0444, security_state_show, NULL); 161 162 static ssize_t security_sanitize_store(struct device *dev, 163 struct device_attribute *attr, 164 const char *buf, size_t len) 165 { 166 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 167 bool sanitize; 168 ssize_t rc; 169 170 if (kstrtobool(buf, &sanitize) || !sanitize) 171 return -EINVAL; 172 173 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE); 174 if (rc) 175 return rc; 176 177 return len; 178 } 179 static struct device_attribute dev_attr_security_sanitize = 180 __ATTR(sanitize, 0200, NULL, security_sanitize_store); 181 182 static ssize_t security_erase_store(struct device *dev, 183 struct device_attribute *attr, 184 const char *buf, size_t len) 185 { 186 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 187 ssize_t rc; 188 bool erase; 189 190 if (kstrtobool(buf, &erase) || !erase) 191 return -EINVAL; 192 193 rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE); 194 if (rc) 195 return rc; 196 197 return len; 198 } 199 static struct device_attribute dev_attr_security_erase = 200 __ATTR(erase, 0200, NULL, security_erase_store); 201 202 bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd, 203 enum poison_cmd_enabled_bits cmd) 204 { 205 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 206 207 if (!mds) 208 return 0; 209 210 return test_bit(cmd, mds->poison.enabled_cmds); 211 } 212 213 static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) 214 { 215 struct cxl_dev_state *cxlds = cxlmd->cxlds; 216 u64 offset, length; 217 int rc = 0; 218 219 /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ 220 for (int i = 0; i < cxlds->nr_partitions; i++) { 221 const struct resource *res = &cxlds->part[i].res; 222 223 offset = res->start; 224 length = resource_size(res); 225 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 226 /* 227 * Invalid Physical Address is not an error for 228 * volatile addresses. Device support is optional. 229 */ 230 if (rc == -EFAULT && cxlds->part[i].mode == CXL_PARTMODE_RAM) 231 rc = 0; 232 } 233 return rc; 234 } 235 236 int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) 237 { 238 struct cxl_port *port; 239 int rc; 240 241 port = cxlmd->endpoint; 242 if (!port || !is_cxl_endpoint(port)) 243 return -EINVAL; 244 245 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 246 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 247 return rc; 248 249 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 250 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 251 return rc; 252 253 if (cxl_num_decoders_committed(port) == 0) { 254 /* No regions mapped to this memdev */ 255 rc = cxl_get_poison_by_memdev(cxlmd); 256 } else { 257 /* Regions mapped, collect poison by endpoint */ 258 rc = cxl_get_poison_by_endpoint(port); 259 } 260 261 return rc; 262 } 263 EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, "CXL"); 264 265 static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) 266 { 267 struct cxl_dev_state *cxlds = cxlmd->cxlds; 268 269 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 270 return 0; 271 272 if (!resource_size(&cxlds->dpa_res)) { 273 dev_dbg(cxlds->dev, "device has no dpa resource\n"); 274 return -EINVAL; 275 } 276 if (!cxl_resource_contains_addr(&cxlds->dpa_res, dpa)) { 277 dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n", 278 dpa, &cxlds->dpa_res); 279 return -EINVAL; 280 } 281 if (!IS_ALIGNED(dpa, 64)) { 282 dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa); 283 return -EINVAL; 284 } 285 286 return 0; 287 } 288 289 int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa) 290 { 291 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; 292 struct cxl_mbox_inject_poison inject; 293 struct cxl_poison_record record; 294 struct cxl_mbox_cmd mbox_cmd; 295 struct cxl_region *cxlr; 296 int rc; 297 298 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 299 return 0; 300 301 lockdep_assert_held(&cxl_rwsem.dpa); 302 lockdep_assert_held(&cxl_rwsem.region); 303 304 rc = cxl_validate_poison_dpa(cxlmd, dpa); 305 if (rc) 306 return rc; 307 308 inject.address = cpu_to_le64(dpa); 309 mbox_cmd = (struct cxl_mbox_cmd) { 310 .opcode = CXL_MBOX_OP_INJECT_POISON, 311 .size_in = sizeof(inject), 312 .payload_in = &inject, 313 }; 314 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 315 if (rc) 316 return rc; 317 318 cxlr = cxl_dpa_to_region(cxlmd, dpa); 319 if (cxlr) 320 dev_warn_once(cxl_mbox->host, 321 "poison inject dpa:%#llx region: %s\n", dpa, 322 dev_name(&cxlr->dev)); 323 324 record = (struct cxl_poison_record) { 325 .address = cpu_to_le64(dpa), 326 .length = cpu_to_le32(1), 327 }; 328 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT); 329 330 return 0; 331 } 332 333 int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) 334 { 335 int rc; 336 337 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 338 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 339 return rc; 340 341 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 342 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 343 return rc; 344 345 return cxl_inject_poison_locked(cxlmd, dpa); 346 } 347 EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL"); 348 349 int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa) 350 { 351 struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox; 352 struct cxl_mbox_clear_poison clear; 353 struct cxl_poison_record record; 354 struct cxl_mbox_cmd mbox_cmd; 355 struct cxl_region *cxlr; 356 int rc; 357 358 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 359 return 0; 360 361 lockdep_assert_held(&cxl_rwsem.dpa); 362 lockdep_assert_held(&cxl_rwsem.region); 363 364 rc = cxl_validate_poison_dpa(cxlmd, dpa); 365 if (rc) 366 return rc; 367 368 /* 369 * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command 370 * is defined to accept 64 bytes of write-data, along with the 371 * address to clear. This driver uses zeroes as write-data. 372 */ 373 clear = (struct cxl_mbox_clear_poison) { 374 .address = cpu_to_le64(dpa) 375 }; 376 377 mbox_cmd = (struct cxl_mbox_cmd) { 378 .opcode = CXL_MBOX_OP_CLEAR_POISON, 379 .size_in = sizeof(clear), 380 .payload_in = &clear, 381 }; 382 383 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 384 if (rc) 385 return rc; 386 387 cxlr = cxl_dpa_to_region(cxlmd, dpa); 388 if (cxlr) 389 dev_warn_once(cxl_mbox->host, 390 "poison clear dpa:%#llx region: %s\n", dpa, 391 dev_name(&cxlr->dev)); 392 393 record = (struct cxl_poison_record) { 394 .address = cpu_to_le64(dpa), 395 .length = cpu_to_le32(1), 396 }; 397 trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR); 398 399 return 0; 400 } 401 402 int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) 403 { 404 int rc; 405 406 ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region); 407 if ((rc = ACQUIRE_ERR(rwsem_read_intr, ®ion_rwsem))) 408 return rc; 409 410 ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa); 411 if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem))) 412 return rc; 413 414 return cxl_clear_poison_locked(cxlmd, dpa); 415 } 416 EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL"); 417 418 static struct attribute *cxl_memdev_attributes[] = { 419 &dev_attr_serial.attr, 420 &dev_attr_firmware_version.attr, 421 &dev_attr_payload_max.attr, 422 &dev_attr_label_storage_size.attr, 423 &dev_attr_numa_node.attr, 424 NULL, 425 }; 426 427 static struct cxl_dpa_perf *to_pmem_perf(struct cxl_dev_state *cxlds) 428 { 429 for (int i = 0; i < cxlds->nr_partitions; i++) 430 if (cxlds->part[i].mode == CXL_PARTMODE_PMEM) 431 return &cxlds->part[i].perf; 432 return NULL; 433 } 434 435 static ssize_t pmem_qos_class_show(struct device *dev, 436 struct device_attribute *attr, char *buf) 437 { 438 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 439 struct cxl_dev_state *cxlds = cxlmd->cxlds; 440 441 return sysfs_emit(buf, "%d\n", to_pmem_perf(cxlds)->qos_class); 442 } 443 444 static struct device_attribute dev_attr_pmem_qos_class = 445 __ATTR(qos_class, 0444, pmem_qos_class_show, NULL); 446 447 static struct attribute *cxl_memdev_pmem_attributes[] = { 448 &dev_attr_pmem_size.attr, 449 &dev_attr_pmem_qos_class.attr, 450 NULL, 451 }; 452 453 static struct cxl_dpa_perf *to_ram_perf(struct cxl_dev_state *cxlds) 454 { 455 if (cxlds->part[0].mode != CXL_PARTMODE_RAM) 456 return NULL; 457 return &cxlds->part[0].perf; 458 } 459 460 static ssize_t ram_qos_class_show(struct device *dev, 461 struct device_attribute *attr, char *buf) 462 { 463 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 464 struct cxl_dev_state *cxlds = cxlmd->cxlds; 465 466 return sysfs_emit(buf, "%d\n", to_ram_perf(cxlds)->qos_class); 467 } 468 469 static struct device_attribute dev_attr_ram_qos_class = 470 __ATTR(qos_class, 0444, ram_qos_class_show, NULL); 471 472 static struct attribute *cxl_memdev_ram_attributes[] = { 473 &dev_attr_ram_size.attr, 474 &dev_attr_ram_qos_class.attr, 475 NULL, 476 }; 477 478 static struct attribute *cxl_memdev_security_attributes[] = { 479 &dev_attr_security_state.attr, 480 &dev_attr_security_sanitize.attr, 481 &dev_attr_security_erase.attr, 482 NULL, 483 }; 484 485 static umode_t cxl_memdev_visible(struct kobject *kobj, struct attribute *a, 486 int n) 487 { 488 if (!IS_ENABLED(CONFIG_NUMA) && a == &dev_attr_numa_node.attr) 489 return 0; 490 return a->mode; 491 } 492 493 static struct attribute_group cxl_memdev_attribute_group = { 494 .attrs = cxl_memdev_attributes, 495 .is_visible = cxl_memdev_visible, 496 }; 497 498 static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n) 499 { 500 struct device *dev = kobj_to_dev(kobj); 501 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 502 struct cxl_dpa_perf *perf = to_ram_perf(cxlmd->cxlds); 503 504 if (a == &dev_attr_ram_qos_class.attr && 505 (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) 506 return 0; 507 508 return a->mode; 509 } 510 511 static struct attribute_group cxl_memdev_ram_attribute_group = { 512 .name = "ram", 513 .attrs = cxl_memdev_ram_attributes, 514 .is_visible = cxl_ram_visible, 515 }; 516 517 static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n) 518 { 519 struct device *dev = kobj_to_dev(kobj); 520 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 521 struct cxl_dpa_perf *perf = to_pmem_perf(cxlmd->cxlds); 522 523 if (a == &dev_attr_pmem_qos_class.attr && 524 (!perf || perf->qos_class == CXL_QOS_CLASS_INVALID)) 525 return 0; 526 527 return a->mode; 528 } 529 530 static struct attribute_group cxl_memdev_pmem_attribute_group = { 531 .name = "pmem", 532 .attrs = cxl_memdev_pmem_attributes, 533 .is_visible = cxl_pmem_visible, 534 }; 535 536 static umode_t cxl_memdev_security_visible(struct kobject *kobj, 537 struct attribute *a, int n) 538 { 539 struct device *dev = kobj_to_dev(kobj); 540 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 541 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 542 543 if (a == &dev_attr_security_sanitize.attr && 544 !test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) 545 return 0; 546 547 if (a == &dev_attr_security_erase.attr && 548 !test_bit(CXL_SEC_ENABLED_SECURE_ERASE, mds->security.enabled_cmds)) 549 return 0; 550 551 return a->mode; 552 } 553 554 static struct attribute_group cxl_memdev_security_attribute_group = { 555 .name = "security", 556 .attrs = cxl_memdev_security_attributes, 557 .is_visible = cxl_memdev_security_visible, 558 }; 559 560 static const struct attribute_group *cxl_memdev_attribute_groups[] = { 561 &cxl_memdev_attribute_group, 562 &cxl_memdev_ram_attribute_group, 563 &cxl_memdev_pmem_attribute_group, 564 &cxl_memdev_security_attribute_group, 565 NULL, 566 }; 567 568 void cxl_memdev_update_perf(struct cxl_memdev *cxlmd) 569 { 570 sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group); 571 sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group); 572 } 573 EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, "CXL"); 574 575 static const struct device_type cxl_memdev_type = { 576 .name = "cxl_memdev", 577 .release = cxl_memdev_release, 578 .devnode = cxl_memdev_devnode, 579 .groups = cxl_memdev_attribute_groups, 580 }; 581 582 bool is_cxl_memdev(const struct device *dev) 583 { 584 return dev->type == &cxl_memdev_type; 585 } 586 EXPORT_SYMBOL_NS_GPL(is_cxl_memdev, "CXL"); 587 588 /** 589 * set_exclusive_cxl_commands() - atomically disable user cxl commands 590 * @mds: The device state to operate on 591 * @cmds: bitmap of commands to mark exclusive 592 * 593 * Grab the cxl_memdev_rwsem in write mode to flush in-flight 594 * invocations of the ioctl path and then disable future execution of 595 * commands with the command ids set in @cmds. 596 */ 597 void set_exclusive_cxl_commands(struct cxl_memdev_state *mds, 598 unsigned long *cmds) 599 { 600 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 601 602 guard(rwsem_write)(&cxl_memdev_rwsem); 603 bitmap_or(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, 604 cmds, CXL_MEM_COMMAND_ID_MAX); 605 } 606 EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, "CXL"); 607 608 /** 609 * clear_exclusive_cxl_commands() - atomically enable user cxl commands 610 * @mds: The device state to modify 611 * @cmds: bitmap of commands to mark available for userspace 612 */ 613 void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds, 614 unsigned long *cmds) 615 { 616 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 617 618 guard(rwsem_write)(&cxl_memdev_rwsem); 619 bitmap_andnot(cxl_mbox->exclusive_cmds, cxl_mbox->exclusive_cmds, 620 cmds, CXL_MEM_COMMAND_ID_MAX); 621 } 622 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, "CXL"); 623 624 static void cxl_memdev_shutdown(struct device *dev) 625 { 626 struct cxl_memdev *cxlmd = to_cxl_memdev(dev); 627 628 guard(rwsem_write)(&cxl_memdev_rwsem); 629 cxlmd->cxlds = NULL; 630 } 631 632 static void cxl_memdev_unregister(void *_cxlmd) 633 { 634 struct cxl_memdev *cxlmd = _cxlmd; 635 struct device *dev = &cxlmd->dev; 636 637 cdev_device_del(&cxlmd->cdev, dev); 638 cxl_memdev_shutdown(dev); 639 put_device(dev); 640 } 641 642 static void detach_memdev(struct work_struct *work) 643 { 644 struct cxl_memdev *cxlmd; 645 646 cxlmd = container_of(work, typeof(*cxlmd), detach_work); 647 648 /* 649 * When the creator of @cxlmd sets ->attach it indicates CXL operation 650 * is required. In that case, @cxlmd detach escalates to parent device 651 * detach. 652 */ 653 if (cxlmd->attach) 654 device_release_driver(cxlmd->dev.parent); 655 else 656 device_release_driver(&cxlmd->dev); 657 put_device(&cxlmd->dev); 658 } 659 660 static struct lock_class_key cxl_memdev_key; 661 662 static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds, 663 const struct file_operations *fops, 664 const struct cxl_memdev_attach *attach) 665 { 666 struct cxl_memdev *cxlmd; 667 struct device *dev; 668 struct cdev *cdev; 669 int rc; 670 671 cxlmd = kzalloc_obj(*cxlmd); 672 if (!cxlmd) 673 return ERR_PTR(-ENOMEM); 674 675 rc = ida_alloc_max(&cxl_memdev_ida, CXL_MEM_MAX_DEVS - 1, GFP_KERNEL); 676 if (rc < 0) 677 goto err; 678 cxlmd->id = rc; 679 cxlmd->depth = -1; 680 cxlmd->attach = attach; 681 cxlmd->endpoint = ERR_PTR(-ENXIO); 682 683 dev = &cxlmd->dev; 684 device_initialize(dev); 685 lockdep_set_class(&dev->mutex, &cxl_memdev_key); 686 dev->parent = cxlds->dev; 687 dev->bus = &cxl_bus_type; 688 dev->devt = MKDEV(cxl_mem_major, cxlmd->id); 689 dev->type = &cxl_memdev_type; 690 device_set_pm_not_required(dev); 691 INIT_WORK(&cxlmd->detach_work, detach_memdev); 692 693 cdev = &cxlmd->cdev; 694 cdev_init(cdev, fops); 695 return cxlmd; 696 697 err: 698 kfree(cxlmd); 699 return ERR_PTR(rc); 700 } 701 702 static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd, 703 unsigned long arg) 704 { 705 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); 706 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 707 708 switch (cmd) { 709 case CXL_MEM_QUERY_COMMANDS: 710 return cxl_query_cmd(cxl_mbox, (void __user *)arg); 711 case CXL_MEM_SEND_COMMAND: 712 return cxl_send_cmd(cxl_mbox, (void __user *)arg); 713 default: 714 return -ENOTTY; 715 } 716 } 717 718 static long cxl_memdev_ioctl(struct file *file, unsigned int cmd, 719 unsigned long arg) 720 { 721 struct cxl_memdev *cxlmd = file->private_data; 722 struct cxl_dev_state *cxlds; 723 724 guard(rwsem_read)(&cxl_memdev_rwsem); 725 cxlds = cxlmd->cxlds; 726 if (cxlds && cxlds->type == CXL_DEVTYPE_CLASSMEM) 727 return __cxl_memdev_ioctl(cxlmd, cmd, arg); 728 729 return -ENXIO; 730 } 731 732 static int cxl_memdev_open(struct inode *inode, struct file *file) 733 { 734 struct cxl_memdev *cxlmd = 735 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 736 737 get_device(&cxlmd->dev); 738 file->private_data = cxlmd; 739 740 return 0; 741 } 742 743 static int cxl_memdev_release_file(struct inode *inode, struct file *file) 744 { 745 struct cxl_memdev *cxlmd = 746 container_of(inode->i_cdev, typeof(*cxlmd), cdev); 747 748 put_device(&cxlmd->dev); 749 750 return 0; 751 } 752 753 /** 754 * cxl_mem_get_fw_info - Get Firmware info 755 * @mds: The device data for the operation 756 * 757 * Retrieve firmware info for the device specified. 758 * 759 * Return: 0 if no error: or the result of the mailbox command. 760 * 761 * See CXL-3.0 8.2.9.3.1 Get FW Info 762 */ 763 static int cxl_mem_get_fw_info(struct cxl_memdev_state *mds) 764 { 765 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 766 struct cxl_mbox_get_fw_info info; 767 struct cxl_mbox_cmd mbox_cmd; 768 int rc; 769 770 mbox_cmd = (struct cxl_mbox_cmd) { 771 .opcode = CXL_MBOX_OP_GET_FW_INFO, 772 .size_out = sizeof(info), 773 .payload_out = &info, 774 }; 775 776 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 777 if (rc < 0) 778 return rc; 779 780 mds->fw.num_slots = info.num_slots; 781 mds->fw.cur_slot = FIELD_GET(CXL_FW_INFO_SLOT_INFO_CUR_MASK, 782 info.slot_info); 783 784 return 0; 785 } 786 787 /** 788 * cxl_mem_activate_fw - Activate Firmware 789 * @mds: The device data for the operation 790 * @slot: slot number to activate 791 * 792 * Activate firmware in a given slot for the device specified. 793 * 794 * Return: 0 if no error: or the result of the mailbox command. 795 * 796 * See CXL-3.0 8.2.9.3.3 Activate FW 797 */ 798 static int cxl_mem_activate_fw(struct cxl_memdev_state *mds, int slot) 799 { 800 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 801 struct cxl_mbox_activate_fw activate; 802 struct cxl_mbox_cmd mbox_cmd; 803 804 if (slot == 0 || slot > mds->fw.num_slots) 805 return -EINVAL; 806 807 mbox_cmd = (struct cxl_mbox_cmd) { 808 .opcode = CXL_MBOX_OP_ACTIVATE_FW, 809 .size_in = sizeof(activate), 810 .payload_in = &activate, 811 }; 812 813 /* Only offline activation supported for now */ 814 activate.action = CXL_FW_ACTIVATE_OFFLINE; 815 activate.slot = slot; 816 817 return cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 818 } 819 820 /** 821 * cxl_mem_abort_fw_xfer - Abort an in-progress FW transfer 822 * @mds: The device data for the operation 823 * 824 * Abort an in-progress firmware transfer for the device specified. 825 * 826 * Return: 0 if no error: or the result of the mailbox command. 827 * 828 * See CXL-3.0 8.2.9.3.2 Transfer FW 829 */ 830 static int cxl_mem_abort_fw_xfer(struct cxl_memdev_state *mds) 831 { 832 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 833 struct cxl_mbox_transfer_fw *transfer; 834 struct cxl_mbox_cmd mbox_cmd; 835 int rc; 836 837 transfer = kzalloc_flex(*transfer, data, 0); 838 if (!transfer) 839 return -ENOMEM; 840 841 /* Set a 1s poll interval and a total wait time of 30s */ 842 mbox_cmd = (struct cxl_mbox_cmd) { 843 .opcode = CXL_MBOX_OP_TRANSFER_FW, 844 .size_in = sizeof(*transfer), 845 .payload_in = transfer, 846 .poll_interval_ms = 1000, 847 .poll_count = 30, 848 }; 849 850 transfer->action = CXL_FW_TRANSFER_ACTION_ABORT; 851 852 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 853 kfree(transfer); 854 return rc; 855 } 856 857 static void cxl_fw_cleanup(struct fw_upload *fwl) 858 { 859 struct cxl_memdev_state *mds = fwl->dd_handle; 860 861 mds->fw.next_slot = 0; 862 } 863 864 static int cxl_fw_do_cancel(struct fw_upload *fwl) 865 { 866 struct cxl_memdev_state *mds = fwl->dd_handle; 867 struct cxl_dev_state *cxlds = &mds->cxlds; 868 struct cxl_memdev *cxlmd = cxlds->cxlmd; 869 int rc; 870 871 rc = cxl_mem_abort_fw_xfer(mds); 872 if (rc < 0) 873 dev_err(&cxlmd->dev, "Error aborting FW transfer: %d\n", rc); 874 875 return FW_UPLOAD_ERR_CANCELED; 876 } 877 878 static enum fw_upload_err cxl_fw_prepare(struct fw_upload *fwl, const u8 *data, 879 u32 size) 880 { 881 struct cxl_memdev_state *mds = fwl->dd_handle; 882 struct cxl_mbox_transfer_fw *transfer; 883 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 884 885 if (!size) 886 return FW_UPLOAD_ERR_INVALID_SIZE; 887 888 mds->fw.oneshot = struct_size(transfer, data, size) < 889 cxl_mbox->payload_size; 890 891 if (cxl_mem_get_fw_info(mds)) 892 return FW_UPLOAD_ERR_HW_ERROR; 893 894 /* 895 * So far no state has been changed, hence no other cleanup is 896 * necessary. Simply return the cancelled status. 897 */ 898 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 899 return FW_UPLOAD_ERR_CANCELED; 900 901 return FW_UPLOAD_ERR_NONE; 902 } 903 904 static enum fw_upload_err cxl_fw_write(struct fw_upload *fwl, const u8 *data, 905 u32 offset, u32 size, u32 *written) 906 { 907 struct cxl_memdev_state *mds = fwl->dd_handle; 908 struct cxl_dev_state *cxlds = &mds->cxlds; 909 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 910 struct cxl_memdev *cxlmd = cxlds->cxlmd; 911 struct cxl_mbox_transfer_fw *transfer; 912 struct cxl_mbox_cmd mbox_cmd; 913 u32 cur_size, remaining; 914 size_t size_in; 915 int rc; 916 917 *written = 0; 918 919 /* Offset has to be aligned to 128B (CXL-3.0 8.2.9.3.2 Table 8-57) */ 920 if (!IS_ALIGNED(offset, CXL_FW_TRANSFER_ALIGNMENT)) { 921 dev_err(&cxlmd->dev, 922 "misaligned offset for FW transfer slice (%u)\n", 923 offset); 924 return FW_UPLOAD_ERR_RW_ERROR; 925 } 926 927 /* 928 * Pick transfer size based on mds->payload_size @size must bw 128-byte 929 * aligned, ->payload_size is a power of 2 starting at 256 bytes, and 930 * sizeof(*transfer) is 128. These constraints imply that @cur_size 931 * will always be 128b aligned. 932 */ 933 cur_size = min_t(size_t, size, cxl_mbox->payload_size - sizeof(*transfer)); 934 935 remaining = size - cur_size; 936 size_in = struct_size(transfer, data, cur_size); 937 938 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 939 return cxl_fw_do_cancel(fwl); 940 941 /* 942 * Slot numbers are 1-indexed 943 * cur_slot is the 0-indexed next_slot (i.e. 'cur_slot - 1 + 1') 944 * Check for rollover using modulo, and 1-index it by adding 1 945 */ 946 mds->fw.next_slot = (mds->fw.cur_slot % mds->fw.num_slots) + 1; 947 948 /* Do the transfer via mailbox cmd */ 949 transfer = kzalloc(size_in, GFP_KERNEL); 950 if (!transfer) 951 return FW_UPLOAD_ERR_RW_ERROR; 952 953 transfer->offset = cpu_to_le32(offset / CXL_FW_TRANSFER_ALIGNMENT); 954 memcpy(transfer->data, data + offset, cur_size); 955 if (mds->fw.oneshot) { 956 transfer->action = CXL_FW_TRANSFER_ACTION_FULL; 957 transfer->slot = mds->fw.next_slot; 958 } else { 959 if (offset == 0) { 960 transfer->action = CXL_FW_TRANSFER_ACTION_INITIATE; 961 } else if (remaining == 0) { 962 transfer->action = CXL_FW_TRANSFER_ACTION_END; 963 transfer->slot = mds->fw.next_slot; 964 } else { 965 transfer->action = CXL_FW_TRANSFER_ACTION_CONTINUE; 966 } 967 } 968 969 mbox_cmd = (struct cxl_mbox_cmd) { 970 .opcode = CXL_MBOX_OP_TRANSFER_FW, 971 .size_in = size_in, 972 .payload_in = transfer, 973 .poll_interval_ms = 1000, 974 .poll_count = 30, 975 }; 976 977 rc = cxl_internal_send_cmd(cxl_mbox, &mbox_cmd); 978 if (rc < 0) { 979 rc = FW_UPLOAD_ERR_RW_ERROR; 980 goto out_free; 981 } 982 983 *written = cur_size; 984 985 /* Activate FW if oneshot or if the last slice was written */ 986 if (mds->fw.oneshot || remaining == 0) { 987 dev_dbg(&cxlmd->dev, "Activating firmware slot: %d\n", 988 mds->fw.next_slot); 989 rc = cxl_mem_activate_fw(mds, mds->fw.next_slot); 990 if (rc < 0) { 991 dev_err(&cxlmd->dev, "Error activating firmware: %d\n", 992 rc); 993 rc = FW_UPLOAD_ERR_HW_ERROR; 994 goto out_free; 995 } 996 } 997 998 rc = FW_UPLOAD_ERR_NONE; 999 1000 out_free: 1001 kfree(transfer); 1002 return rc; 1003 } 1004 1005 static enum fw_upload_err cxl_fw_poll_complete(struct fw_upload *fwl) 1006 { 1007 struct cxl_memdev_state *mds = fwl->dd_handle; 1008 1009 /* 1010 * cxl_internal_send_cmd() handles background operations synchronously. 1011 * No need to wait for completions here - any errors would've been 1012 * reported and handled during the ->write() call(s). 1013 * Just check if a cancel request was received, and return success. 1014 */ 1015 if (test_and_clear_bit(CXL_FW_CANCEL, mds->fw.state)) 1016 return cxl_fw_do_cancel(fwl); 1017 1018 return FW_UPLOAD_ERR_NONE; 1019 } 1020 1021 static void cxl_fw_cancel(struct fw_upload *fwl) 1022 { 1023 struct cxl_memdev_state *mds = fwl->dd_handle; 1024 1025 set_bit(CXL_FW_CANCEL, mds->fw.state); 1026 } 1027 1028 static const struct fw_upload_ops cxl_memdev_fw_ops = { 1029 .prepare = cxl_fw_prepare, 1030 .write = cxl_fw_write, 1031 .poll_complete = cxl_fw_poll_complete, 1032 .cancel = cxl_fw_cancel, 1033 .cleanup = cxl_fw_cleanup, 1034 }; 1035 1036 static void cxl_remove_fw_upload(void *fwl) 1037 { 1038 firmware_upload_unregister(fwl); 1039 } 1040 1041 int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds) 1042 { 1043 struct cxl_dev_state *cxlds = &mds->cxlds; 1044 struct cxl_mailbox *cxl_mbox = &cxlds->cxl_mbox; 1045 struct device *dev = &cxlds->cxlmd->dev; 1046 struct fw_upload *fwl; 1047 1048 if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, cxl_mbox->enabled_cmds)) 1049 return 0; 1050 1051 fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev), 1052 &cxl_memdev_fw_ops, mds); 1053 if (IS_ERR(fwl)) 1054 return PTR_ERR(fwl); 1055 return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl); 1056 } 1057 EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, "CXL"); 1058 1059 static const struct file_operations cxl_memdev_fops = { 1060 .owner = THIS_MODULE, 1061 .unlocked_ioctl = cxl_memdev_ioctl, 1062 .open = cxl_memdev_open, 1063 .release = cxl_memdev_release_file, 1064 .compat_ioctl = compat_ptr_ioctl, 1065 .llseek = noop_llseek, 1066 }; 1067 1068 /* 1069 * Activate ioctl operations, no cxl_memdev_rwsem manipulation needed as this is 1070 * ordered with cdev_add() publishing the device. 1071 */ 1072 static int cxlmd_add(struct cxl_memdev *cxlmd, struct cxl_dev_state *cxlds) 1073 { 1074 int rc; 1075 1076 cxlmd->cxlds = cxlds; 1077 cxlds->cxlmd = cxlmd; 1078 1079 rc = cdev_device_add(&cxlmd->cdev, &cxlmd->dev); 1080 if (rc) { 1081 /* 1082 * The cdev was briefly live, shutdown any ioctl operations that 1083 * saw that state. 1084 */ 1085 cxl_memdev_shutdown(&cxlmd->dev); 1086 return rc; 1087 } 1088 1089 return 0; 1090 } 1091 1092 DEFINE_FREE(put_cxlmd, struct cxl_memdev *, 1093 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->dev)) 1094 1095 static bool cxl_memdev_attach_failed(struct cxl_memdev *cxlmd) 1096 { 1097 /* 1098 * If @attach is provided fail if the driver is not attached upon 1099 * return. Note that failure here could be the result of a race to 1100 * teardown the CXL port topology. I.e. cxl_mem_probe() could have 1101 * succeeded and then cxl_mem unbound before the lock is acquired. 1102 */ 1103 guard(device)(&cxlmd->dev); 1104 return (cxlmd->attach && !cxlmd->dev.driver); 1105 } 1106 1107 static struct cxl_memdev *cxl_memdev_autoremove(struct cxl_memdev *cxlmd) 1108 { 1109 int rc; 1110 1111 if (cxl_memdev_attach_failed(cxlmd)) { 1112 cxl_memdev_unregister(cxlmd); 1113 return ERR_PTR(-ENXIO); 1114 } 1115 1116 rc = devm_add_action_or_reset(cxlmd->cxlds->dev, cxl_memdev_unregister, 1117 cxlmd); 1118 if (rc) 1119 return ERR_PTR(rc); 1120 1121 return cxlmd; 1122 } 1123 1124 /* 1125 * Core helper for devm_cxl_add_memdev() that wants to both create a device and 1126 * assert to the caller that upon return cxl_mem::probe() has been invoked. 1127 */ 1128 struct cxl_memdev *__devm_cxl_add_memdev(struct cxl_dev_state *cxlds, 1129 const struct cxl_memdev_attach *attach) 1130 { 1131 struct device *dev; 1132 int rc; 1133 1134 struct cxl_memdev *cxlmd __free(put_cxlmd) = 1135 cxl_memdev_alloc(cxlds, &cxl_memdev_fops, attach); 1136 if (IS_ERR(cxlmd)) 1137 return cxlmd; 1138 1139 dev = &cxlmd->dev; 1140 rc = dev_set_name(dev, "mem%d", cxlmd->id); 1141 if (rc) 1142 return ERR_PTR(rc); 1143 1144 rc = cxlmd_add(cxlmd, cxlds); 1145 if (rc) 1146 return ERR_PTR(rc); 1147 1148 return cxl_memdev_autoremove(no_free_ptr(cxlmd)); 1149 } 1150 EXPORT_SYMBOL_FOR_MODULES(__devm_cxl_add_memdev, "cxl_mem"); 1151 1152 static void sanitize_teardown_notifier(void *data) 1153 { 1154 struct cxl_memdev_state *mds = data; 1155 struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; 1156 struct kernfs_node *state; 1157 1158 /* 1159 * Prevent new irq triggered invocations of the workqueue and 1160 * flush inflight invocations. 1161 */ 1162 mutex_lock(&cxl_mbox->mbox_mutex); 1163 state = mds->security.sanitize_node; 1164 mds->security.sanitize_node = NULL; 1165 mutex_unlock(&cxl_mbox->mbox_mutex); 1166 1167 cancel_delayed_work_sync(&mds->security.poll_dwork); 1168 sysfs_put(state); 1169 } 1170 1171 int devm_cxl_sanitize_setup_notifier(struct device *host, 1172 struct cxl_memdev *cxlmd) 1173 { 1174 struct cxl_dev_state *cxlds = cxlmd->cxlds; 1175 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); 1176 struct kernfs_node *sec; 1177 1178 if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds)) 1179 return 0; 1180 1181 /* 1182 * Note, the expectation is that @cxlmd would have failed to be 1183 * created if these sysfs_get_dirent calls fail. 1184 */ 1185 sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security"); 1186 if (!sec) 1187 return -ENOENT; 1188 mds->security.sanitize_node = sysfs_get_dirent(sec, "state"); 1189 sysfs_put(sec); 1190 if (!mds->security.sanitize_node) 1191 return -ENOENT; 1192 1193 return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds); 1194 } 1195 EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, "CXL"); 1196 1197 __init int cxl_memdev_init(void) 1198 { 1199 dev_t devt; 1200 int rc; 1201 1202 rc = alloc_chrdev_region(&devt, 0, CXL_MEM_MAX_DEVS, "cxl"); 1203 if (rc) 1204 return rc; 1205 1206 cxl_mem_major = MAJOR(devt); 1207 1208 return 0; 1209 } 1210 1211 void cxl_memdev_exit(void) 1212 { 1213 unregister_chrdev_region(MKDEV(cxl_mem_major, 0), CXL_MEM_MAX_DEVS); 1214 } 1215