1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Intel Vendor Specific Extended Capabilities auxiliary bus driver 4 * 5 * Copyright (c) 2021, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * Author: David E. Box <david.e.box@linux.intel.com> 9 * 10 * This driver discovers and creates auxiliary devices for Intel defined PCIe 11 * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities, 12 * VSEC and DVSEC respectively. The driver supports features on specific PCIe 13 * endpoints that exist primarily to expose them. 14 */ 15 16 #include <linux/auxiliary_bus.h> 17 #include <linux/bits.h> 18 #include <linux/bitops.h> 19 #include <linux/bug.h> 20 #include <linux/cleanup.h> 21 #include <linux/delay.h> 22 #include <linux/idr.h> 23 #include <linux/log2.h> 24 #include <linux/intel_vsec.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/overflow.h> 28 #include <linux/pci.h> 29 #include <linux/string.h> 30 #include <linux/types.h> 31 32 #define PMT_XA_START 0 33 #define PMT_XA_MAX INT_MAX 34 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) 35 36 static DEFINE_IDA(intel_vsec_ida); 37 static DEFINE_IDA(intel_vsec_sdsi_ida); 38 static DEFINE_XARRAY_ALLOC(auxdev_array); 39 40 enum vsec_device_state { 41 STATE_NOT_FOUND, 42 STATE_REGISTERED, 43 STATE_SKIP, 44 }; 45 46 struct vsec_priv { 47 const struct intel_vsec_platform_info *info; 48 struct device *suppliers[VSEC_FEATURE_COUNT]; 49 struct oobmsm_plat_info plat_info; 50 enum vsec_device_state state[VSEC_FEATURE_COUNT]; 51 unsigned long found_caps; 52 }; 53 54 static const char *intel_vsec_name(enum intel_vsec_id id) 55 { 56 switch (id) { 57 case VSEC_ID_TELEMETRY: 58 return "telemetry"; 59 60 case VSEC_ID_WATCHER: 61 return "watcher"; 62 63 case VSEC_ID_CRASHLOG: 64 return "crashlog"; 65 66 case VSEC_ID_SDSI: 67 return "sdsi"; 68 69 case VSEC_ID_TPMI: 70 return "tpmi"; 71 72 case VSEC_ID_DISCOVERY: 73 return "discovery"; 74 75 default: 76 return NULL; 77 } 78 } 79 80 static bool intel_vsec_supported(u16 id, unsigned long caps) 81 { 82 switch (id) { 83 case VSEC_ID_TELEMETRY: 84 return !!(caps & VSEC_CAP_TELEMETRY); 85 case VSEC_ID_WATCHER: 86 return !!(caps & VSEC_CAP_WATCHER); 87 case VSEC_ID_CRASHLOG: 88 return !!(caps & VSEC_CAP_CRASHLOG); 89 case VSEC_ID_SDSI: 90 return !!(caps & VSEC_CAP_SDSI); 91 case VSEC_ID_TPMI: 92 return !!(caps & VSEC_CAP_TPMI); 93 case VSEC_ID_DISCOVERY: 94 return !!(caps & VSEC_CAP_DISCOVERY); 95 default: 96 return false; 97 } 98 } 99 100 static void intel_vsec_remove_aux(void *data) 101 { 102 auxiliary_device_delete(data); 103 auxiliary_device_uninit(data); 104 } 105 106 static void intel_vsec_dev_release(struct device *dev) 107 { 108 struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev); 109 110 xa_erase(&auxdev_array, intel_vsec_dev->id); 111 112 ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id); 113 114 kfree(intel_vsec_dev->acpi_disc); 115 kfree(intel_vsec_dev->resource); 116 kfree(intel_vsec_dev); 117 } 118 119 static const struct vsec_feature_dependency * 120 get_consumer_dependencies(struct vsec_priv *priv, int cap_id) 121 { 122 const struct vsec_feature_dependency *deps = priv->info->deps; 123 int consumer_id = priv->info->num_deps; 124 125 if (!deps) 126 return NULL; 127 128 while (consumer_id--) 129 if (deps[consumer_id].feature == BIT(cap_id)) 130 return &deps[consumer_id]; 131 132 return NULL; 133 } 134 135 static bool vsec_driver_present(int cap_id) 136 { 137 unsigned long bit = BIT(cap_id); 138 139 switch (bit) { 140 case VSEC_CAP_TELEMETRY: 141 return IS_ENABLED(CONFIG_INTEL_PMT_TELEMETRY); 142 case VSEC_CAP_WATCHER: 143 return IS_ENABLED(CONFIG_INTEL_PMT_WATCHER); 144 case VSEC_CAP_CRASHLOG: 145 return IS_ENABLED(CONFIG_INTEL_PMT_CRASHLOG); 146 case VSEC_CAP_SDSI: 147 return IS_ENABLED(CONFIG_INTEL_SDSI); 148 case VSEC_CAP_TPMI: 149 return IS_ENABLED(CONFIG_INTEL_TPMI); 150 case VSEC_CAP_DISCOVERY: 151 return IS_ENABLED(CONFIG_INTEL_PMT_DISCOVERY); 152 default: 153 return false; 154 } 155 } 156 157 /* 158 * Although pci_device_id table is available in the pdev, this prototype is 159 * necessary because the code using it can be called by an exported API that 160 * might pass a different pdev. 161 */ 162 static const struct pci_device_id intel_vsec_pci_ids[]; 163 164 static int intel_vsec_link_devices(struct device *parent, struct device *dev, 165 int consumer_id) 166 { 167 const struct vsec_feature_dependency *deps; 168 enum vsec_device_state *state; 169 struct device **suppliers; 170 struct vsec_priv *priv; 171 struct pci_dev *pdev; 172 int supplier_id; 173 174 if (!consumer_id) 175 return 0; 176 177 if (!dev_is_pci(parent)) 178 return 0; 179 180 pdev = to_pci_dev(parent); 181 if (!pci_match_id(intel_vsec_pci_ids, pdev)) 182 return 0; 183 184 priv = pci_get_drvdata(pdev); 185 state = priv->state; 186 suppliers = priv->suppliers; 187 188 priv->suppliers[consumer_id] = dev; 189 190 deps = get_consumer_dependencies(priv, consumer_id); 191 if (!deps) 192 return 0; 193 194 for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { 195 struct device_link *link; 196 197 if (state[supplier_id] != STATE_REGISTERED || 198 !vsec_driver_present(supplier_id)) 199 continue; 200 201 if (!suppliers[supplier_id]) { 202 dev_err(dev, "Bad supplier list\n"); 203 return -EINVAL; 204 } 205 206 link = device_link_add(dev, suppliers[supplier_id], 207 DL_FLAG_AUTOPROBE_CONSUMER); 208 if (!link) 209 return -EINVAL; 210 } 211 212 return 0; 213 } 214 215 int intel_vsec_add_aux(struct device *parent, 216 struct intel_vsec_device *intel_vsec_dev, 217 const char *name) 218 { 219 struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; 220 int ret, id; 221 222 if (!parent) 223 return -EINVAL; 224 225 ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, 226 PMT_XA_LIMIT, GFP_KERNEL); 227 if (ret < 0) { 228 kfree(intel_vsec_dev->resource); 229 kfree(intel_vsec_dev); 230 return ret; 231 } 232 233 id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); 234 if (id < 0) { 235 xa_erase(&auxdev_array, intel_vsec_dev->id); 236 kfree(intel_vsec_dev->resource); 237 kfree(intel_vsec_dev); 238 return id; 239 } 240 241 auxdev->id = id; 242 auxdev->name = name; 243 auxdev->dev.parent = parent; 244 auxdev->dev.release = intel_vsec_dev_release; 245 246 ret = auxiliary_device_init(auxdev); 247 if (ret < 0) { 248 intel_vsec_dev_release(&auxdev->dev); 249 return ret; 250 } 251 252 /* 253 * Assign a name now to ensure that the device link doesn't contain 254 * a null string for the consumer name. This is a problem when a supplier 255 * supplies more than one consumer and can lead to a duplicate name error 256 * when the link is created in sysfs. 257 */ 258 ret = dev_set_name(&auxdev->dev, "%s.%s.%d", KBUILD_MODNAME, auxdev->name, 259 auxdev->id); 260 if (ret) 261 goto cleanup_aux; 262 263 ret = intel_vsec_link_devices(parent, &auxdev->dev, intel_vsec_dev->cap_id); 264 if (ret) 265 goto cleanup_aux; 266 267 ret = auxiliary_device_add(auxdev); 268 if (ret) 269 goto cleanup_aux; 270 271 return devm_add_action_or_reset(parent, intel_vsec_remove_aux, 272 auxdev); 273 274 cleanup_aux: 275 auxiliary_device_uninit(auxdev); 276 return ret; 277 } 278 EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, "INTEL_VSEC"); 279 280 static int intel_vsec_add_dev(struct device *dev, struct intel_vsec_header *header, 281 const struct intel_vsec_platform_info *info, 282 unsigned long cap_id, u64 base_addr) 283 { 284 struct intel_vsec_device __free(kfree) *intel_vsec_dev = NULL; 285 struct resource __free(kfree) *res = NULL; 286 struct resource *tmp; 287 struct device *parent; 288 unsigned long quirks = info->quirks; 289 int i; 290 291 if (info->parent) 292 parent = info->parent; 293 else 294 parent = dev; 295 296 if (!intel_vsec_supported(header->id, info->caps)) 297 return -EINVAL; 298 299 if (!header->num_entries) { 300 dev_dbg(dev, "Invalid 0 entry count for header id %d\n", header->id); 301 return -EINVAL; 302 } 303 304 if (!header->entry_size) { 305 dev_dbg(dev, "Invalid 0 entry size for header id %d\n", header->id); 306 return -EINVAL; 307 } 308 309 intel_vsec_dev = kzalloc_obj(*intel_vsec_dev); 310 if (!intel_vsec_dev) 311 return -ENOMEM; 312 313 res = kzalloc_objs(*res, header->num_entries); 314 if (!res) 315 return -ENOMEM; 316 317 if (quirks & VSEC_QUIRK_TABLE_SHIFT) 318 header->offset >>= TABLE_OFFSET_SHIFT; 319 320 /* 321 * The DVSEC/VSEC contains the starting offset and count for a block of 322 * discovery tables. Create a resource array of these tables to the 323 * auxiliary device driver. 324 */ 325 for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) { 326 /* 327 * Skip resource mapping check for ACPI-based discovery 328 * since those tables are read from _DSD, not MMIO. 329 */ 330 if (info->src == INTEL_VSEC_DISC_ACPI) 331 break; 332 333 tmp->start = base_addr + header->offset + i * (header->entry_size * sizeof(u32)); 334 tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1; 335 tmp->flags = IORESOURCE_MEM; 336 337 /* Check resource is not in use */ 338 if (!request_mem_region(tmp->start, resource_size(tmp), "")) 339 return -EBUSY; 340 341 release_mem_region(tmp->start, resource_size(tmp)); 342 } 343 344 intel_vsec_dev->dev = dev; 345 intel_vsec_dev->resource = no_free_ptr(res); 346 intel_vsec_dev->num_resources = header->num_entries; 347 intel_vsec_dev->quirks = info->quirks; 348 intel_vsec_dev->base_addr = info->base_addr; 349 intel_vsec_dev->priv_data = info->priv_data; 350 intel_vsec_dev->cap_id = cap_id; 351 intel_vsec_dev->src = info->src; 352 353 if (info->src == INTEL_VSEC_DISC_ACPI) { 354 size_t bytes; 355 356 if (check_mul_overflow(intel_vsec_dev->num_resources, 357 sizeof(*info->acpi_disc), &bytes)) 358 return -EOVERFLOW; 359 360 intel_vsec_dev->acpi_disc = kmemdup(info->acpi_disc, bytes, GFP_KERNEL); 361 if (!intel_vsec_dev->acpi_disc) 362 return -ENOMEM; 363 } 364 365 if (header->id == VSEC_ID_SDSI) 366 intel_vsec_dev->ida = &intel_vsec_sdsi_ida; 367 else 368 intel_vsec_dev->ida = &intel_vsec_ida; 369 370 /* 371 * Pass the ownership of intel_vsec_dev and resource within it to 372 * intel_vsec_add_aux() 373 */ 374 return intel_vsec_add_aux(parent, no_free_ptr(intel_vsec_dev), 375 intel_vsec_name(header->id)); 376 } 377 378 static bool suppliers_ready(struct vsec_priv *priv, 379 const struct vsec_feature_dependency *consumer_deps, 380 int cap_id) 381 { 382 enum vsec_device_state *state = priv->state; 383 int supplier_id; 384 385 if (WARN_ON_ONCE(consumer_deps->feature != BIT(cap_id))) 386 return false; 387 388 /* 389 * Verify that all required suppliers have been found. Return false 390 * immediately if any are still missing. 391 */ 392 for_each_set_bit(supplier_id, &consumer_deps->supplier_bitmap, VSEC_FEATURE_COUNT) { 393 if (state[supplier_id] == STATE_SKIP) 394 continue; 395 396 if (state[supplier_id] == STATE_NOT_FOUND) 397 return false; 398 } 399 400 /* 401 * All suppliers have been found and the consumer is ready to be 402 * registered. 403 */ 404 return true; 405 } 406 407 static int get_cap_id(u32 header_id, unsigned long *cap_id) 408 { 409 switch (header_id) { 410 case VSEC_ID_TELEMETRY: 411 *cap_id = ilog2(VSEC_CAP_TELEMETRY); 412 break; 413 case VSEC_ID_WATCHER: 414 *cap_id = ilog2(VSEC_CAP_WATCHER); 415 break; 416 case VSEC_ID_CRASHLOG: 417 *cap_id = ilog2(VSEC_CAP_CRASHLOG); 418 break; 419 case VSEC_ID_SDSI: 420 *cap_id = ilog2(VSEC_CAP_SDSI); 421 break; 422 case VSEC_ID_TPMI: 423 *cap_id = ilog2(VSEC_CAP_TPMI); 424 break; 425 case VSEC_ID_DISCOVERY: 426 *cap_id = ilog2(VSEC_CAP_DISCOVERY); 427 break; 428 default: 429 return -EINVAL; 430 } 431 432 return 0; 433 } 434 435 static int intel_vsec_register_device(struct device *dev, 436 struct intel_vsec_header *header, 437 const struct intel_vsec_platform_info *info, 438 u64 base_addr) 439 { 440 const struct vsec_feature_dependency *consumer_deps; 441 struct vsec_priv *priv; 442 struct pci_dev *pdev; 443 unsigned long cap_id; 444 int ret; 445 446 ret = get_cap_id(header->id, &cap_id); 447 if (ret) 448 return ret; 449 450 /* 451 * Only track dependencies for devices probed by the VSEC driver. 452 * For others using the exported APIs, add the device directly. 453 */ 454 if (!dev_is_pci(dev)) 455 return intel_vsec_add_dev(dev, header, info, cap_id, base_addr); 456 457 pdev = to_pci_dev(dev); 458 if (!pci_match_id(intel_vsec_pci_ids, pdev)) 459 return intel_vsec_add_dev(dev, header, info, cap_id, base_addr); 460 461 priv = pci_get_drvdata(pdev); 462 if (priv->state[cap_id] == STATE_REGISTERED || 463 priv->state[cap_id] == STATE_SKIP) 464 return -EEXIST; 465 466 priv->found_caps |= BIT(cap_id); 467 468 if (!vsec_driver_present(cap_id)) { 469 priv->state[cap_id] = STATE_SKIP; 470 return -ENODEV; 471 } 472 473 consumer_deps = get_consumer_dependencies(priv, cap_id); 474 if (!consumer_deps || suppliers_ready(priv, consumer_deps, cap_id)) { 475 ret = intel_vsec_add_dev(dev, header, info, cap_id, base_addr); 476 if (ret) 477 priv->state[cap_id] = STATE_SKIP; 478 else 479 priv->state[cap_id] = STATE_REGISTERED; 480 481 return ret; 482 } 483 484 return -EAGAIN; 485 } 486 487 static int intel_vsec_walk_header(struct device *dev, 488 const struct intel_vsec_platform_info *info) 489 { 490 struct intel_vsec_header **header = info->headers; 491 int ret; 492 493 for ( ; *header; header++) { 494 ret = intel_vsec_register_device(dev, *header, info, info->base_addr); 495 if (ret) 496 return ret; 497 } 498 499 return 0; 500 } 501 502 static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, 503 const struct intel_vsec_platform_info *info) 504 { 505 bool have_devices = false; 506 int pos = 0; 507 508 do { 509 struct intel_vsec_header header; 510 u32 table, hdr; 511 u16 vid; 512 int ret; 513 514 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC); 515 if (!pos) 516 break; 517 518 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr); 519 vid = PCI_DVSEC_HEADER1_VID(hdr); 520 if (vid != PCI_VENDOR_ID_INTEL) 521 continue; 522 523 /* Support only revision 1 */ 524 header.rev = PCI_DVSEC_HEADER1_REV(hdr); 525 if (header.rev != 1) { 526 dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev); 527 continue; 528 } 529 530 header.length = PCI_DVSEC_HEADER1_LEN(hdr); 531 532 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries); 533 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size); 534 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table); 535 536 header.tbir = INTEL_DVSEC_TABLE_BAR(table); 537 header.offset = INTEL_DVSEC_TABLE_OFFSET(table); 538 539 pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr); 540 header.id = PCI_DVSEC_HEADER2_ID(hdr); 541 542 ret = intel_vsec_register_device(&pdev->dev, &header, info, 543 pci_resource_start(pdev, header.tbir)); 544 if (ret) 545 continue; 546 547 have_devices = true; 548 } while (true); 549 550 return have_devices; 551 } 552 553 static bool intel_vsec_walk_vsec(struct pci_dev *pdev, 554 const struct intel_vsec_platform_info *info) 555 { 556 bool have_devices = false; 557 int pos = 0; 558 559 do { 560 struct intel_vsec_header header; 561 u32 table, hdr; 562 int ret; 563 564 pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR); 565 if (!pos) 566 break; 567 568 pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr); 569 570 /* Support only revision 1 */ 571 header.rev = PCI_VNDR_HEADER_REV(hdr); 572 if (header.rev != 1) { 573 dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev); 574 continue; 575 } 576 577 header.id = PCI_VNDR_HEADER_ID(hdr); 578 header.length = PCI_VNDR_HEADER_LEN(hdr); 579 580 /* entry, size, and table offset are the same as DVSEC */ 581 pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries); 582 pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size); 583 pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table); 584 585 header.tbir = INTEL_DVSEC_TABLE_BAR(table); 586 header.offset = INTEL_DVSEC_TABLE_OFFSET(table); 587 588 ret = intel_vsec_register_device(&pdev->dev, &header, info, 589 pci_resource_start(pdev, header.tbir)); 590 if (ret) 591 continue; 592 593 have_devices = true; 594 } while (true); 595 596 return have_devices; 597 } 598 599 int intel_vsec_register(struct device *dev, 600 const struct intel_vsec_platform_info *info) 601 { 602 if (!dev || !info || !info->headers) 603 return -EINVAL; 604 605 return intel_vsec_walk_header(dev, info); 606 } 607 EXPORT_SYMBOL_NS_GPL(intel_vsec_register, "INTEL_VSEC"); 608 609 static bool intel_vsec_get_features(struct pci_dev *pdev, 610 const struct intel_vsec_platform_info *info) 611 { 612 bool found = false; 613 614 /* 615 * Both DVSEC and VSEC capabilities can exist on the same device, 616 * so both intel_vsec_walk_dvsec() and intel_vsec_walk_vsec() must be 617 * called independently. Additionally, intel_vsec_walk_header() is 618 * needed for devices that do not have VSEC/DVSEC but provide the 619 * information via device_data. 620 */ 621 if (intel_vsec_walk_dvsec(pdev, info)) 622 found = true; 623 624 if (intel_vsec_walk_vsec(pdev, info)) 625 found = true; 626 627 if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) && 628 intel_vsec_walk_header(&pdev->dev, info)) 629 found = true; 630 631 return found; 632 } 633 634 static void intel_vsec_skip_missing_dependencies(struct pci_dev *pdev) 635 { 636 struct vsec_priv *priv = pci_get_drvdata(pdev); 637 const struct vsec_feature_dependency *deps = priv->info->deps; 638 int consumer_id = priv->info->num_deps; 639 640 while (consumer_id--) { 641 int supplier_id; 642 643 deps = &priv->info->deps[consumer_id]; 644 645 for_each_set_bit(supplier_id, &deps->supplier_bitmap, VSEC_FEATURE_COUNT) { 646 if (!(BIT(supplier_id) & priv->found_caps)) 647 priv->state[supplier_id] = STATE_SKIP; 648 } 649 } 650 } 651 652 static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 653 { 654 const struct intel_vsec_platform_info *info; 655 struct vsec_priv *priv; 656 int num_caps, ret; 657 int run_once = 0; 658 bool found_any = false; 659 660 ret = pcim_enable_device(pdev); 661 if (ret) 662 return ret; 663 664 pci_save_state(pdev); 665 info = (const struct intel_vsec_platform_info *)id->driver_data; 666 if (!info) 667 return -EINVAL; 668 669 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 670 if (!priv) 671 return -ENOMEM; 672 673 priv->info = info; 674 pci_set_drvdata(pdev, priv); 675 676 num_caps = hweight_long(info->caps); 677 while (num_caps--) { 678 found_any |= intel_vsec_get_features(pdev, info); 679 680 if (priv->found_caps == info->caps) 681 break; 682 683 if (!run_once) { 684 intel_vsec_skip_missing_dependencies(pdev); 685 run_once = 1; 686 } 687 } 688 689 if (!found_any) 690 return -ENODEV; 691 692 return 0; 693 } 694 695 int intel_vsec_set_mapping(struct oobmsm_plat_info *plat_info, 696 struct intel_vsec_device *vsec_dev) 697 { 698 struct vsec_priv *priv; 699 700 if (!dev_is_pci(vsec_dev->dev)) 701 return -ENODEV; 702 703 priv = pci_get_drvdata(to_pci_dev(vsec_dev->dev)); 704 if (!priv) 705 return -EINVAL; 706 707 priv->plat_info = *plat_info; 708 709 return 0; 710 } 711 EXPORT_SYMBOL_NS_GPL(intel_vsec_set_mapping, "INTEL_VSEC"); 712 713 struct oobmsm_plat_info *intel_vsec_get_mapping(struct pci_dev *pdev) 714 { 715 struct vsec_priv *priv; 716 717 if (!pci_match_id(intel_vsec_pci_ids, pdev)) 718 return ERR_PTR(-EINVAL); 719 720 priv = pci_get_drvdata(pdev); 721 if (!priv) 722 return ERR_PTR(-EINVAL); 723 724 return &priv->plat_info; 725 } 726 EXPORT_SYMBOL_NS_GPL(intel_vsec_get_mapping, "INTEL_VSEC"); 727 728 /* DG1 info */ 729 static struct intel_vsec_header dg1_header = { 730 .length = 0x10, 731 .id = 2, 732 .num_entries = 1, 733 .entry_size = 3, 734 .tbir = 0, 735 .offset = 0x466000, 736 }; 737 738 static struct intel_vsec_header *dg1_headers[] = { 739 &dg1_header, 740 NULL 741 }; 742 743 static const struct intel_vsec_platform_info dg1_info = { 744 .caps = VSEC_CAP_TELEMETRY, 745 .headers = dg1_headers, 746 .quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW, 747 }; 748 749 /* MTL info */ 750 static const struct intel_vsec_platform_info mtl_info = { 751 .caps = VSEC_CAP_TELEMETRY, 752 }; 753 754 static const struct vsec_feature_dependency oobmsm_deps[] = { 755 { 756 .feature = VSEC_CAP_TELEMETRY, 757 .supplier_bitmap = VSEC_CAP_DISCOVERY | VSEC_CAP_TPMI, 758 }, 759 }; 760 761 /* OOBMSM info */ 762 static const struct intel_vsec_platform_info oobmsm_info = { 763 .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI | 764 VSEC_CAP_DISCOVERY, 765 .deps = oobmsm_deps, 766 .num_deps = ARRAY_SIZE(oobmsm_deps), 767 }; 768 769 /* DMR OOBMSM info */ 770 static const struct intel_vsec_platform_info dmr_oobmsm_info = { 771 .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI | VSEC_CAP_DISCOVERY, 772 .deps = oobmsm_deps, 773 .num_deps = ARRAY_SIZE(oobmsm_deps), 774 }; 775 776 /* TGL info */ 777 static const struct intel_vsec_platform_info tgl_info = { 778 .caps = VSEC_CAP_TELEMETRY, 779 .quirks = VSEC_QUIRK_TABLE_SHIFT | VSEC_QUIRK_EARLY_HW, 780 }; 781 782 /* LNL info */ 783 static const struct intel_vsec_platform_info lnl_info = { 784 .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_WATCHER, 785 }; 786 787 #define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d 788 #define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e 789 #define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d 790 #define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d 791 #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7 792 #define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1 793 #define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d 794 #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d 795 #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d 796 #define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d 797 #define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d 798 #define PCI_DEVICE_ID_INTEL_VSEC_NVL 0xd70d 799 static const struct pci_device_id intel_vsec_pci_ids[] = { 800 { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, 801 { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, 802 { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) }, 803 { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) }, 804 { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) }, 805 { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) }, 806 { PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) }, 807 { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, 808 { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, 809 { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) }, 810 { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) }, 811 { PCI_DEVICE_DATA(INTEL, VSEC_NVL, &mtl_info) }, 812 { } 813 }; 814 MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); 815 816 static pci_ers_result_t intel_vsec_pci_error_detected(struct pci_dev *pdev, 817 pci_channel_state_t state) 818 { 819 pci_ers_result_t status = PCI_ERS_RESULT_NEED_RESET; 820 821 dev_info(&pdev->dev, "PCI error detected, state %d", state); 822 823 if (state == pci_channel_io_perm_failure) 824 status = PCI_ERS_RESULT_DISCONNECT; 825 else 826 pci_disable_device(pdev); 827 828 return status; 829 } 830 831 static pci_ers_result_t intel_vsec_pci_slot_reset(struct pci_dev *pdev) 832 { 833 struct intel_vsec_device *intel_vsec_dev; 834 pci_ers_result_t status = PCI_ERS_RESULT_DISCONNECT; 835 const struct pci_device_id *pci_dev_id; 836 unsigned long index; 837 838 dev_info(&pdev->dev, "Resetting PCI slot\n"); 839 840 msleep(2000); 841 if (pci_enable_device(pdev)) { 842 dev_info(&pdev->dev, 843 "Failed to re-enable PCI device after reset.\n"); 844 goto out; 845 } 846 847 status = PCI_ERS_RESULT_RECOVERED; 848 849 xa_for_each(&auxdev_array, index, intel_vsec_dev) { 850 /* check if pdev doesn't match */ 851 if (&pdev->dev != intel_vsec_dev->dev) 852 continue; 853 devm_release_action(&pdev->dev, intel_vsec_remove_aux, 854 &intel_vsec_dev->auxdev); 855 } 856 pci_disable_device(pdev); 857 pci_restore_state(pdev); 858 pci_dev_id = pci_match_id(intel_vsec_pci_ids, pdev); 859 intel_vsec_pci_probe(pdev, pci_dev_id); 860 861 out: 862 return status; 863 } 864 865 static void intel_vsec_pci_resume(struct pci_dev *pdev) 866 { 867 dev_info(&pdev->dev, "Done resuming PCI device\n"); 868 } 869 870 static const struct pci_error_handlers intel_vsec_pci_err_handlers = { 871 .error_detected = intel_vsec_pci_error_detected, 872 .slot_reset = intel_vsec_pci_slot_reset, 873 .resume = intel_vsec_pci_resume, 874 }; 875 876 static struct pci_driver intel_vsec_pci_driver = { 877 .name = "intel_vsec", 878 .id_table = intel_vsec_pci_ids, 879 .probe = intel_vsec_pci_probe, 880 .err_handler = &intel_vsec_pci_err_handlers, 881 }; 882 module_pci_driver(intel_vsec_pci_driver); 883 884 MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); 885 MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver"); 886 MODULE_LICENSE("GPL v2"); 887