1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel-tpmi : Driver to enumerate TPMI features and create devices 4 * 5 * Copyright (c) 2023, Intel Corporation. 6 * All Rights Reserved. 7 * 8 * The TPMI (Topology Aware Register and PM Capsule Interface) provides a 9 * flexible, extendable and PCIe enumerable MMIO interface for PM features. 10 * 11 * For example Intel RAPL (Running Average Power Limit) provides a MMIO 12 * interface using TPMI. This has advantage over traditional MSR 13 * (Model Specific Register) interface, where a thread needs to be scheduled 14 * on the target CPU to read or write. Also the RAPL features vary between 15 * CPU models, and hence lot of model specific code. Here TPMI provides an 16 * architectural interface by providing hierarchical tables and fields, 17 * which will not need any model specific implementation. 18 * 19 * The TPMI interface uses a PCI VSEC structure to expose the location of 20 * MMIO region. 21 * 22 * This VSEC structure is present in the PCI configuration space of the 23 * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC 24 * driver. The Intel VSEC driver parses VSEC structures present in the PCI 25 * configuration space of the given device and creates an auxiliary device 26 * object for each of them. In particular, it creates an auxiliary device 27 * object representing TPMI that can be bound by an auxiliary driver. 28 * 29 * This TPMI driver will bind to the TPMI auxiliary device object created 30 * by the Intel VSEC driver. 31 * 32 * The TPMI specification defines a PFS (PM Feature Structure) table. 33 * This table is present in the TPMI MMIO region. The starting address 34 * of PFS is derived from the tBIR (Bar Indicator Register) and "Address" 35 * field from the VSEC header. 36 * 37 * Each TPMI PM feature has one entry in the PFS with a unique TPMI 38 * ID and its access details. The TPMI driver creates device nodes 39 * for the supported PM features. 40 * 41 * The names of the devices created by the TPMI driver start with the 42 * "intel_vsec.tpmi-" prefix which is followed by a specific name of the 43 * given PM feature (for example, "intel_vsec.tpmi-rapl.0"). 44 * 45 * The device nodes are create by using interface "intel_vsec_add_aux()" 46 * provided by the Intel VSEC driver. 47 */ 48 49 #include <linux/auxiliary_bus.h> 50 #include <linux/bitfield.h> 51 #include <linux/debugfs.h> 52 #include <linux/delay.h> 53 #include <linux/intel_tpmi.h> 54 #include <linux/io.h> 55 #include <linux/iopoll.h> 56 #include <linux/module.h> 57 #include <linux/pci.h> 58 #include <linux/security.h> 59 #include <linux/sizes.h> 60 #include <linux/string_helpers.h> 61 62 #include "vsec.h" 63 64 /** 65 * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry 66 * @tpmi_id: TPMI feature identifier (what the feature is and its data format). 67 * @num_entries: Number of feature interface instances present in the PFS. 68 * This represents the maximum number of Power domains in the SoC. 69 * @entry_size: Interface instance entry size in 32-bit words. 70 * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC 71 * register bank in KB. 72 * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved. 73 * @reserved: Bits for use in the future. 74 * 75 * Represents one TPMI feature entry data in the PFS retrieved as is 76 * from the hardware. 77 */ 78 struct intel_tpmi_pfs_entry { 79 u64 tpmi_id:8; 80 u64 num_entries:8; 81 u64 entry_size:16; 82 u64 cap_offset:16; 83 u64 attribute:2; 84 u64 reserved:14; 85 } __packed; 86 87 /** 88 * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID 89 * @pfs_header: PFS header retireved from the hardware. 90 * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially 91 * this offset = "Address" from VSEC header + PFS Capability 92 * offset for this feature entry. 93 * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device 94 * 95 * Represents TPMI instance information for one TPMI ID. 96 */ 97 struct intel_tpmi_pm_feature { 98 struct intel_tpmi_pfs_entry pfs_header; 99 u64 vsec_offset; 100 struct intel_vsec_device *vsec_dev; 101 }; 102 103 /** 104 * struct intel_tpmi_info - TPMI information for all IDs in an instance 105 * @tpmi_features: Pointer to a list of TPMI feature instances 106 * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device 107 * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features 108 * @pfs_start: Start of PFS offset for the TPMI instances in this device 109 * @plat_info: Stores platform info which can be used by the client drivers 110 * @tpmi_control_mem: Memory mapped IO for getting control information 111 * @dbgfs_dir: debugfs entry pointer 112 * 113 * Stores the information for all TPMI devices enumerated from a single PCI device. 114 */ 115 struct intel_tpmi_info { 116 struct intel_tpmi_pm_feature *tpmi_features; 117 struct intel_vsec_device *vsec_dev; 118 int feature_count; 119 u64 pfs_start; 120 struct intel_tpmi_plat_info plat_info; 121 void __iomem *tpmi_control_mem; 122 struct dentry *dbgfs_dir; 123 }; 124 125 /** 126 * struct tpmi_info_header - CPU package ID to PCI device mapping information 127 * @fn: PCI function number 128 * @dev: PCI device number 129 * @bus: PCI bus number 130 * @pkg: CPU Package id 131 * @segment: PCI segment id 132 * @partition: Package Partition id 133 * @cdie_mask: Bitmap of compute dies in the current partition 134 * @reserved: Reserved for future use 135 * @lock: When set to 1 the register is locked and becomes read-only 136 * until next reset. Not for use by the OS driver. 137 * 138 * The structure to read hardware provided mapping information. 139 */ 140 struct tpmi_info_header { 141 u64 fn:3; 142 u64 dev:5; 143 u64 bus:8; 144 u64 pkg:8; 145 u64 segment:8; 146 u64 partition:2; 147 u64 cdie_mask:16; 148 u64 reserved:13; 149 u64 lock:1; 150 } __packed; 151 152 /** 153 * struct tpmi_feature_state - Structure to read hardware state of a feature 154 * @enabled: Enable state of a feature, 1: enabled, 0: disabled 155 * @reserved_1: Reserved for future use 156 * @write_blocked: Writes are blocked means all write operations are ignored 157 * @read_blocked: Reads are blocked means will read 0xFFs 158 * @pcs_select: Interface used by out of band software, not used in OS 159 * @reserved_2: Reserved for future use 160 * @id: TPMI ID of the feature 161 * @reserved_3: Reserved for future use 162 * @locked: When set to 1, OS can't change this register. 163 * 164 * The structure is used to read hardware state of a TPMI feature. This 165 * information is used for debug and restricting operations for this feature. 166 */ 167 struct tpmi_feature_state { 168 u32 enabled:1; 169 u32 reserved_1:3; 170 u32 write_blocked:1; 171 u32 read_blocked:1; 172 u32 pcs_select:1; 173 u32 reserved_2:1; 174 u32 id:8; 175 u32 reserved_3:15; 176 u32 locked:1; 177 } __packed; 178 179 /* 180 * The size from hardware is in u32 units. This size is from a trusted hardware, 181 * but better to verify for pre silicon platforms. Set size to 0, when invalid. 182 */ 183 #define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \ 184 ({ \ 185 pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \ 186 }) 187 188 /* Used during auxbus device creation */ 189 static DEFINE_IDA(intel_vsec_tpmi_ida); 190 191 struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev) 192 { 193 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); 194 195 return vsec_dev->priv_data; 196 } 197 EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI); 198 199 int tpmi_get_resource_count(struct auxiliary_device *auxdev) 200 { 201 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); 202 203 if (vsec_dev) 204 return vsec_dev->num_resources; 205 206 return 0; 207 } 208 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI); 209 210 struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index) 211 { 212 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); 213 214 if (vsec_dev && index < vsec_dev->num_resources) 215 return &vsec_dev->resource[index]; 216 217 return NULL; 218 } 219 EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI); 220 221 /* TPMI Control Interface */ 222 223 #define TPMI_CONTROL_STATUS_OFFSET 0x00 224 #define TPMI_COMMAND_OFFSET 0x08 225 #define TMPI_CONTROL_DATA_VAL_OFFSET 0x0c 226 227 /* 228 * Spec is calling for max 1 seconds to get ownership at the worst 229 * case. Read at 10 ms timeouts and repeat up to 1 second. 230 */ 231 #define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC) 232 #define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC) 233 234 #define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC) 235 #define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC 236 237 /* TPMI Control status register defines */ 238 239 #define TPMI_CONTROL_STATUS_RB BIT_ULL(0) 240 241 #define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4) 242 #define TPMI_OWNER_NONE 0 243 #define TPMI_OWNER_IN_BAND 1 244 245 #define TPMI_CONTROL_STATUS_CPL BIT_ULL(6) 246 #define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8) 247 #define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16) 248 249 #define TPMI_CMD_PKT_LEN 2 250 #define TPMI_CMD_STATUS_SUCCESS 0x40 251 252 /* TPMI command data registers */ 253 #define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0) 254 #define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40) 255 256 /* Command to send via control interface */ 257 #define TPMI_CONTROL_GET_STATE_CMD 0x10 258 259 #define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40) 260 261 #define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16) 262 263 /* Mutex to complete get feature status without interruption */ 264 static DEFINE_MUTEX(tpmi_dev_lock); 265 266 static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner) 267 { 268 u64 control; 269 270 return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, 271 control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control), 272 TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US); 273 } 274 275 static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id, 276 struct tpmi_feature_state *feature_state) 277 { 278 u64 control, data; 279 int ret; 280 281 if (!tpmi_info->tpmi_control_mem) 282 return -EFAULT; 283 284 mutex_lock(&tpmi_dev_lock); 285 286 /* Wait for owner bit set to 0 (none) */ 287 ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE); 288 if (ret) 289 goto err_unlock; 290 291 /* set command id to 0x10 for TPMI_GET_STATE */ 292 data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD); 293 294 /* 32 bits for DATA offset and +8 for feature_id field */ 295 data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id); 296 297 /* Write at command offset for qword access */ 298 writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET); 299 300 /* Wait for owner bit set to in-band */ 301 ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND); 302 if (ret) 303 goto err_unlock; 304 305 /* Set Run Busy and packet length of 2 dwords */ 306 control = TPMI_CONTROL_STATUS_RB; 307 control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN); 308 309 /* Write at status offset for qword access */ 310 writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); 311 312 /* Wait for Run Busy clear */ 313 ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET, 314 control, !(control & TPMI_CONTROL_STATUS_RB), 315 TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US); 316 if (ret) 317 goto done_proc; 318 319 control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control); 320 if (control != TPMI_CMD_STATUS_SUCCESS) { 321 ret = -EBUSY; 322 goto done_proc; 323 } 324 325 /* Response is ready */ 326 memcpy_fromio(feature_state, tpmi_info->tpmi_control_mem + TMPI_CONTROL_DATA_VAL_OFFSET, 327 sizeof(*feature_state)); 328 329 ret = 0; 330 331 done_proc: 332 /* Set CPL "completion" bit */ 333 writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET); 334 335 err_unlock: 336 mutex_unlock(&tpmi_dev_lock); 337 338 return ret; 339 } 340 341 int tpmi_get_feature_status(struct auxiliary_device *auxdev, 342 int feature_id, bool *read_blocked, bool *write_blocked) 343 { 344 struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent); 345 struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev); 346 struct tpmi_feature_state feature_state; 347 int ret; 348 349 ret = tpmi_read_feature_status(tpmi_info, feature_id, &feature_state); 350 if (ret) 351 return ret; 352 353 *read_blocked = feature_state.read_blocked; 354 *write_blocked = feature_state.write_blocked; 355 356 return 0; 357 } 358 EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI); 359 360 static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused) 361 { 362 struct intel_tpmi_info *tpmi_info = s->private; 363 int locked, disabled, read_blocked, write_blocked; 364 struct tpmi_feature_state feature_state; 365 struct intel_tpmi_pm_feature *pfs; 366 int ret, i; 367 368 369 seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start); 370 seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\tread_blocked\twrite_blocked\n"); 371 for (i = 0; i < tpmi_info->feature_count; ++i) { 372 pfs = &tpmi_info->tpmi_features[i]; 373 ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); 374 if (ret) { 375 locked = 'U'; 376 disabled = 'U'; 377 read_blocked = 'U'; 378 write_blocked = 'U'; 379 } else { 380 disabled = feature_state.enabled ? 'N' : 'Y'; 381 locked = feature_state.locked ? 'Y' : 'N'; 382 read_blocked = feature_state.read_blocked ? 'Y' : 'N'; 383 write_blocked = feature_state.write_blocked ? 'Y' : 'N'; 384 } 385 seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n", 386 pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries, 387 pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset, 388 pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled, 389 read_blocked, write_blocked); 390 } 391 392 return 0; 393 } 394 DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg); 395 396 #define MEM_DUMP_COLUMN_COUNT 8 397 398 static int tpmi_mem_dump_show(struct seq_file *s, void *unused) 399 { 400 size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32); 401 struct intel_tpmi_pm_feature *pfs = s->private; 402 int count, ret = 0; 403 void __iomem *mem; 404 u32 size; 405 u64 off; 406 u8 *buffer; 407 408 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); 409 if (!size) 410 return -EIO; 411 412 buffer = kmalloc(size, GFP_KERNEL); 413 if (!buffer) 414 return -ENOMEM; 415 416 off = pfs->vsec_offset; 417 418 mutex_lock(&tpmi_dev_lock); 419 420 for (count = 0; count < pfs->pfs_header.num_entries; ++count) { 421 seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off); 422 423 mem = ioremap(off, size); 424 if (!mem) { 425 ret = -ENOMEM; 426 break; 427 } 428 429 memcpy_fromio(buffer, mem, size); 430 431 seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size, 432 false); 433 434 iounmap(mem); 435 436 off += size; 437 } 438 439 mutex_unlock(&tpmi_dev_lock); 440 441 kfree(buffer); 442 443 return ret; 444 } 445 DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump); 446 447 static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos) 448 { 449 struct seq_file *m = file->private_data; 450 struct intel_tpmi_pm_feature *pfs = m->private; 451 u32 addr, value, punit, size; 452 u32 num_elems, *array; 453 void __iomem *mem; 454 int ret; 455 456 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); 457 if (!size) 458 return -EIO; 459 460 ret = parse_int_array_user(userbuf, len, (int **)&array); 461 if (ret < 0) 462 return ret; 463 464 num_elems = *array; 465 if (num_elems != 3) { 466 ret = -EINVAL; 467 goto exit_write; 468 } 469 470 punit = array[1]; 471 addr = array[2]; 472 value = array[3]; 473 474 if (punit >= pfs->pfs_header.num_entries) { 475 ret = -EINVAL; 476 goto exit_write; 477 } 478 479 if (addr >= size) { 480 ret = -EINVAL; 481 goto exit_write; 482 } 483 484 mutex_lock(&tpmi_dev_lock); 485 486 mem = ioremap(pfs->vsec_offset + punit * size, size); 487 if (!mem) { 488 ret = -ENOMEM; 489 goto unlock_mem_write; 490 } 491 492 writel(value, mem + addr); 493 494 iounmap(mem); 495 496 ret = len; 497 498 unlock_mem_write: 499 mutex_unlock(&tpmi_dev_lock); 500 501 exit_write: 502 kfree(array); 503 504 return ret; 505 } 506 507 static int mem_write_show(struct seq_file *s, void *unused) 508 { 509 return 0; 510 } 511 512 static int mem_write_open(struct inode *inode, struct file *file) 513 { 514 return single_open(file, mem_write_show, inode->i_private); 515 } 516 517 static const struct file_operations mem_write_ops = { 518 .open = mem_write_open, 519 .read = seq_read, 520 .write = mem_write, 521 .llseek = seq_lseek, 522 .release = single_release, 523 }; 524 525 #define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev) 526 527 static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info) 528 { 529 char name[64]; 530 int i; 531 532 snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info))); 533 tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL); 534 535 debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops); 536 537 for (i = 0; i < tpmi_info->feature_count; ++i) { 538 struct intel_tpmi_pm_feature *pfs; 539 struct dentry *dir; 540 541 pfs = &tpmi_info->tpmi_features[i]; 542 snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id); 543 dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir); 544 545 debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops); 546 debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops); 547 } 548 } 549 550 static void tpmi_set_control_base(struct auxiliary_device *auxdev, 551 struct intel_tpmi_info *tpmi_info, 552 struct intel_tpmi_pm_feature *pfs) 553 { 554 void __iomem *mem; 555 u32 size; 556 557 size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs); 558 if (!size) 559 return; 560 561 mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size); 562 if (!mem) 563 return; 564 565 /* mem is pointing to TPMI CONTROL base */ 566 tpmi_info->tpmi_control_mem = mem; 567 } 568 569 static const char *intel_tpmi_name(enum intel_tpmi_id id) 570 { 571 switch (id) { 572 case TPMI_ID_RAPL: 573 return "rapl"; 574 case TPMI_ID_PEM: 575 return "pem"; 576 case TPMI_ID_UNCORE: 577 return "uncore"; 578 case TPMI_ID_SST: 579 return "sst"; 580 default: 581 return NULL; 582 } 583 } 584 585 /* String Length for tpmi-"feature_name(upto 8 bytes)" */ 586 #define TPMI_FEATURE_NAME_LEN 14 587 588 static int tpmi_create_device(struct intel_tpmi_info *tpmi_info, 589 struct intel_tpmi_pm_feature *pfs, 590 u64 pfs_start) 591 { 592 struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; 593 char feature_id_name[TPMI_FEATURE_NAME_LEN]; 594 struct intel_vsec_device *feature_vsec_dev; 595 struct tpmi_feature_state feature_state; 596 struct resource *res, *tmp; 597 const char *name; 598 int i, ret; 599 600 ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &feature_state); 601 if (ret) 602 return ret; 603 604 /* 605 * If not enabled, continue to look at other features in the PFS, so return -EOPNOTSUPP. 606 * This will not cause failure of loading of this driver. 607 */ 608 if (!feature_state.enabled) 609 return -EOPNOTSUPP; 610 611 name = intel_tpmi_name(pfs->pfs_header.tpmi_id); 612 if (!name) 613 return -EOPNOTSUPP; 614 615 res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL); 616 if (!res) 617 return -ENOMEM; 618 619 feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL); 620 if (!feature_vsec_dev) { 621 kfree(res); 622 return -ENOMEM; 623 } 624 625 snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name); 626 627 for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) { 628 u64 entry_size_bytes = pfs->pfs_header.entry_size * sizeof(u32); 629 630 tmp->start = pfs->vsec_offset + entry_size_bytes * i; 631 tmp->end = tmp->start + entry_size_bytes - 1; 632 tmp->flags = IORESOURCE_MEM; 633 } 634 635 feature_vsec_dev->pcidev = vsec_dev->pcidev; 636 feature_vsec_dev->resource = res; 637 feature_vsec_dev->num_resources = pfs->pfs_header.num_entries; 638 feature_vsec_dev->priv_data = &tpmi_info->plat_info; 639 feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info); 640 feature_vsec_dev->ida = &intel_vsec_tpmi_ida; 641 642 /* 643 * intel_vsec_add_aux() is resource managed, no explicit 644 * delete is required on error or on module unload. 645 * feature_vsec_dev and res memory are also freed as part of 646 * device deletion. 647 */ 648 return intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev, 649 feature_vsec_dev, feature_id_name); 650 } 651 652 static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) 653 { 654 struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev; 655 int ret, i; 656 657 for (i = 0; i < vsec_dev->num_resources; i++) { 658 ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i], 659 tpmi_info->pfs_start); 660 /* 661 * Fail, if the supported features fails to create device, 662 * otherwise, continue. Even if one device failed to create, 663 * fail the loading of driver. Since intel_vsec_add_aux() 664 * is resource managed, no clean up is required for the 665 * successfully created devices. 666 */ 667 if (ret && ret != -EOPNOTSUPP) 668 return ret; 669 } 670 671 return 0; 672 } 673 674 #define TPMI_INFO_BUS_INFO_OFFSET 0x08 675 #define TPMI_INFO_MAJOR_VERSION 0x00 676 #define TPMI_INFO_MINOR_VERSION 0x02 677 678 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, 679 struct intel_tpmi_pm_feature *pfs) 680 { 681 struct tpmi_info_header header; 682 void __iomem *info_mem; 683 u64 feature_header; 684 int ret = 0; 685 686 info_mem = ioremap(pfs->vsec_offset, pfs->pfs_header.entry_size * sizeof(u32)); 687 if (!info_mem) 688 return -ENOMEM; 689 690 feature_header = readq(info_mem); 691 if (TPMI_MAJOR_VERSION(feature_header) != TPMI_INFO_MAJOR_VERSION) { 692 ret = -ENODEV; 693 goto error_info_header; 694 } 695 696 memcpy_fromio(&header, info_mem + TPMI_INFO_BUS_INFO_OFFSET, sizeof(header)); 697 698 tpmi_info->plat_info.package_id = header.pkg; 699 tpmi_info->plat_info.bus_number = header.bus; 700 tpmi_info->plat_info.device_number = header.dev; 701 tpmi_info->plat_info.function_number = header.fn; 702 703 if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) { 704 tpmi_info->plat_info.cdie_mask = header.cdie_mask; 705 tpmi_info->plat_info.partition = header.partition; 706 tpmi_info->plat_info.segment = header.segment; 707 } 708 709 error_info_header: 710 iounmap(info_mem); 711 712 return ret; 713 } 714 715 static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size) 716 { 717 void __iomem *pfs_mem; 718 719 pfs_mem = ioremap(start, size); 720 if (!pfs_mem) 721 return -ENOMEM; 722 723 memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header)); 724 725 iounmap(pfs_mem); 726 727 return 0; 728 } 729 730 #define TPMI_CAP_OFFSET_UNIT 1024 731 732 static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev) 733 { 734 struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev); 735 struct pci_dev *pci_dev = vsec_dev->pcidev; 736 struct intel_tpmi_info *tpmi_info; 737 u64 pfs_start = 0; 738 int ret, i; 739 740 tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL); 741 if (!tpmi_info) 742 return -ENOMEM; 743 744 tpmi_info->vsec_dev = vsec_dev; 745 tpmi_info->feature_count = vsec_dev->num_resources; 746 tpmi_info->plat_info.bus_number = pci_dev->bus->number; 747 748 tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources, 749 sizeof(*tpmi_info->tpmi_features), 750 GFP_KERNEL); 751 if (!tpmi_info->tpmi_features) 752 return -ENOMEM; 753 754 for (i = 0; i < vsec_dev->num_resources; i++) { 755 struct intel_tpmi_pm_feature *pfs; 756 struct resource *res; 757 u64 res_start; 758 int size, ret; 759 760 pfs = &tpmi_info->tpmi_features[i]; 761 pfs->vsec_dev = vsec_dev; 762 763 res = &vsec_dev->resource[i]; 764 if (!res) 765 continue; 766 767 res_start = res->start; 768 size = resource_size(res); 769 if (size < 0) 770 continue; 771 772 ret = tpmi_fetch_pfs_header(pfs, res_start, size); 773 if (ret) 774 continue; 775 776 if (!pfs_start) 777 pfs_start = res_start; 778 779 pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset * TPMI_CAP_OFFSET_UNIT; 780 781 /* 782 * Process TPMI_INFO to get PCI device to CPU package ID. 783 * Device nodes for TPMI features are not created in this 784 * for loop. So, the mapping information will be available 785 * when actual device nodes created outside this 786 * loop via tpmi_create_devices(). 787 */ 788 if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) { 789 ret = tpmi_process_info(tpmi_info, pfs); 790 if (ret) 791 return ret; 792 } 793 794 if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID) 795 tpmi_set_control_base(auxdev, tpmi_info, pfs); 796 } 797 798 tpmi_info->pfs_start = pfs_start; 799 800 auxiliary_set_drvdata(auxdev, tpmi_info); 801 802 ret = tpmi_create_devices(tpmi_info); 803 if (ret) 804 return ret; 805 806 /* 807 * Allow debugfs when security policy allows. Everything this debugfs 808 * interface provides, can also be done via /dev/mem access. If 809 * /dev/mem interface is locked, don't allow debugfs to present any 810 * information. Also check for CAP_SYS_RAWIO as /dev/mem interface. 811 */ 812 if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO)) 813 tpmi_dbgfs_register(tpmi_info); 814 815 return 0; 816 } 817 818 static int tpmi_probe(struct auxiliary_device *auxdev, 819 const struct auxiliary_device_id *id) 820 { 821 return intel_vsec_tpmi_init(auxdev); 822 } 823 824 static void tpmi_remove(struct auxiliary_device *auxdev) 825 { 826 struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev); 827 828 debugfs_remove_recursive(tpmi_info->dbgfs_dir); 829 } 830 831 static const struct auxiliary_device_id tpmi_id_table[] = { 832 { .name = "intel_vsec.tpmi" }, 833 {} 834 }; 835 MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table); 836 837 static struct auxiliary_driver tpmi_aux_driver = { 838 .id_table = tpmi_id_table, 839 .probe = tpmi_probe, 840 .remove = tpmi_remove, 841 }; 842 843 module_auxiliary_driver(tpmi_aux_driver); 844 845 MODULE_IMPORT_NS(INTEL_VSEC); 846 MODULE_DESCRIPTION("Intel TPMI enumeration module"); 847 MODULE_LICENSE("GPL"); 848