1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Secure Processor device driver 4 * 5 * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/device.h> 14 #include <linux/pci.h> 15 #include <linux/pci_ids.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/kthread.h> 18 #include <linux/sched.h> 19 #include <linux/interrupt.h> 20 #include <linux/spinlock.h> 21 #include <linux/delay.h> 22 #include <linux/ccp.h> 23 24 #include "ccp-dev.h" 25 #include "psp-dev.h" 26 27 #define MSIX_VECTORS 2 28 29 struct sp_pci { 30 int msix_count; 31 struct msix_entry msix_entry[MSIX_VECTORS]; 32 }; 33 static struct sp_device *sp_dev_master; 34 35 #define attribute_show(name, def) \ 36 static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ 37 char *buf) \ 38 { \ 39 struct sp_device *sp = dev_get_drvdata(d); \ 40 struct psp_device *psp = sp->psp_data; \ 41 int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \ 42 return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \ 43 } 44 45 attribute_show(fused_part, FUSED_PART) 46 static DEVICE_ATTR_RO(fused_part); 47 attribute_show(debug_lock_on, DEBUG_LOCK_ON) 48 static DEVICE_ATTR_RO(debug_lock_on); 49 attribute_show(tsme_status, TSME_STATUS) 50 static DEVICE_ATTR_RO(tsme_status); 51 attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) 52 static DEVICE_ATTR_RO(anti_rollback_status); 53 attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) 54 static DEVICE_ATTR_RO(rpmc_production_enabled); 55 attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) 56 static DEVICE_ATTR_RO(rpmc_spirom_available); 57 attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) 58 static DEVICE_ATTR_RO(hsp_tpm_available); 59 attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) 60 static DEVICE_ATTR_RO(rom_armor_enforced); 61 62 static struct attribute *psp_attrs[] = { 63 &dev_attr_fused_part.attr, 64 &dev_attr_debug_lock_on.attr, 65 &dev_attr_tsme_status.attr, 66 &dev_attr_anti_rollback_status.attr, 67 &dev_attr_rpmc_production_enabled.attr, 68 &dev_attr_rpmc_spirom_available.attr, 69 &dev_attr_hsp_tpm_available.attr, 70 &dev_attr_rom_armor_enforced.attr, 71 NULL 72 }; 73 74 static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx) 75 { 76 struct device *dev = kobj_to_dev(kobj); 77 struct sp_device *sp = dev_get_drvdata(dev); 78 struct psp_device *psp = sp->psp_data; 79 80 if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING)) 81 return 0444; 82 83 return 0; 84 } 85 86 static struct attribute_group psp_attr_group = { 87 .attrs = psp_attrs, 88 .is_visible = psp_security_is_visible, 89 }; 90 91 static const struct attribute_group *psp_groups[] = { 92 &psp_attr_group, 93 NULL, 94 }; 95 96 static int sp_get_msix_irqs(struct sp_device *sp) 97 { 98 struct sp_pci *sp_pci = sp->dev_specific; 99 struct device *dev = sp->dev; 100 struct pci_dev *pdev = to_pci_dev(dev); 101 int v, ret; 102 103 for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++) 104 sp_pci->msix_entry[v].entry = v; 105 106 ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v); 107 if (ret < 0) 108 return ret; 109 110 sp_pci->msix_count = ret; 111 sp->use_tasklet = true; 112 113 sp->psp_irq = sp_pci->msix_entry[0].vector; 114 sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector 115 : sp_pci->msix_entry[0].vector; 116 return 0; 117 } 118 119 static int sp_get_msi_irq(struct sp_device *sp) 120 { 121 struct device *dev = sp->dev; 122 struct pci_dev *pdev = to_pci_dev(dev); 123 int ret; 124 125 ret = pci_enable_msi(pdev); 126 if (ret) 127 return ret; 128 129 sp->ccp_irq = pdev->irq; 130 sp->psp_irq = pdev->irq; 131 132 return 0; 133 } 134 135 static int sp_get_irqs(struct sp_device *sp) 136 { 137 struct device *dev = sp->dev; 138 int ret; 139 140 ret = sp_get_msix_irqs(sp); 141 if (!ret) 142 return 0; 143 144 /* Couldn't get MSI-X vectors, try MSI */ 145 dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); 146 ret = sp_get_msi_irq(sp); 147 if (!ret) 148 return 0; 149 150 /* Couldn't get MSI interrupt */ 151 dev_notice(dev, "could not enable MSI (%d)\n", ret); 152 153 return ret; 154 } 155 156 static void sp_free_irqs(struct sp_device *sp) 157 { 158 struct sp_pci *sp_pci = sp->dev_specific; 159 struct device *dev = sp->dev; 160 struct pci_dev *pdev = to_pci_dev(dev); 161 162 if (sp_pci->msix_count) 163 pci_disable_msix(pdev); 164 else if (sp->psp_irq) 165 pci_disable_msi(pdev); 166 167 sp->ccp_irq = 0; 168 sp->psp_irq = 0; 169 } 170 171 static bool sp_pci_is_master(struct sp_device *sp) 172 { 173 struct device *dev_cur, *dev_new; 174 struct pci_dev *pdev_cur, *pdev_new; 175 176 dev_new = sp->dev; 177 dev_cur = sp_dev_master->dev; 178 179 pdev_new = to_pci_dev(dev_new); 180 pdev_cur = to_pci_dev(dev_cur); 181 182 if (pdev_new->bus->number < pdev_cur->bus->number) 183 return true; 184 185 if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) 186 return true; 187 188 if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) 189 return true; 190 191 return false; 192 } 193 194 static void psp_set_master(struct sp_device *sp) 195 { 196 if (!sp_dev_master) { 197 sp_dev_master = sp; 198 return; 199 } 200 201 if (sp_pci_is_master(sp)) 202 sp_dev_master = sp; 203 } 204 205 static struct sp_device *psp_get_master(void) 206 { 207 return sp_dev_master; 208 } 209 210 static void psp_clear_master(struct sp_device *sp) 211 { 212 if (sp == sp_dev_master) { 213 sp_dev_master = NULL; 214 dev_dbg(sp->dev, "Cleared sp_dev_master\n"); 215 } 216 } 217 218 static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 219 { 220 struct sp_device *sp; 221 struct sp_pci *sp_pci; 222 struct device *dev = &pdev->dev; 223 void __iomem * const *iomap_table; 224 int bar_mask; 225 int ret; 226 227 ret = -ENOMEM; 228 sp = sp_alloc_struct(dev); 229 if (!sp) 230 goto e_err; 231 232 sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL); 233 if (!sp_pci) 234 goto e_err; 235 236 sp->dev_specific = sp_pci; 237 sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data; 238 if (!sp->dev_vdata) { 239 ret = -ENODEV; 240 dev_err(dev, "missing driver data\n"); 241 goto e_err; 242 } 243 244 ret = pcim_enable_device(pdev); 245 if (ret) { 246 dev_err(dev, "pcim_enable_device failed (%d)\n", ret); 247 goto e_err; 248 } 249 250 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); 251 ret = pcim_iomap_regions(pdev, bar_mask, "ccp"); 252 if (ret) { 253 dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); 254 goto e_err; 255 } 256 257 iomap_table = pcim_iomap_table(pdev); 258 if (!iomap_table) { 259 dev_err(dev, "pcim_iomap_table failed\n"); 260 ret = -ENOMEM; 261 goto e_err; 262 } 263 264 sp->io_map = iomap_table[sp->dev_vdata->bar]; 265 if (!sp->io_map) { 266 dev_err(dev, "ioremap failed\n"); 267 ret = -ENOMEM; 268 goto e_err; 269 } 270 271 ret = sp_get_irqs(sp); 272 if (ret) 273 goto e_err; 274 275 pci_set_master(pdev); 276 sp->set_psp_master_device = psp_set_master; 277 sp->get_psp_master_device = psp_get_master; 278 sp->clear_psp_master_device = psp_clear_master; 279 280 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 281 if (ret) { 282 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 283 if (ret) { 284 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", 285 ret); 286 goto free_irqs; 287 } 288 } 289 290 dev_set_drvdata(dev, sp); 291 292 ret = sp_init(sp); 293 if (ret) 294 goto free_irqs; 295 296 return 0; 297 298 free_irqs: 299 sp_free_irqs(sp); 300 e_err: 301 dev_notice(dev, "initialization failed\n"); 302 return ret; 303 } 304 305 static void sp_pci_shutdown(struct pci_dev *pdev) 306 { 307 struct device *dev = &pdev->dev; 308 struct sp_device *sp = dev_get_drvdata(dev); 309 310 if (!sp) 311 return; 312 313 sp_destroy(sp); 314 } 315 316 static void sp_pci_remove(struct pci_dev *pdev) 317 { 318 struct device *dev = &pdev->dev; 319 struct sp_device *sp = dev_get_drvdata(dev); 320 321 if (!sp) 322 return; 323 324 sp_destroy(sp); 325 326 sp_free_irqs(sp); 327 } 328 329 static int __maybe_unused sp_pci_suspend(struct device *dev) 330 { 331 struct sp_device *sp = dev_get_drvdata(dev); 332 333 return sp_suspend(sp); 334 } 335 336 static int __maybe_unused sp_pci_resume(struct device *dev) 337 { 338 struct sp_device *sp = dev_get_drvdata(dev); 339 340 return sp_resume(sp); 341 } 342 343 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 344 static const struct sev_vdata sevv1 = { 345 .cmdresp_reg = 0x10580, /* C2PMSG_32 */ 346 .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ 347 .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ 348 }; 349 350 static const struct sev_vdata sevv2 = { 351 .cmdresp_reg = 0x10980, /* C2PMSG_32 */ 352 .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */ 353 .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ 354 }; 355 356 static const struct tee_vdata teev1 = { 357 .cmdresp_reg = 0x10544, /* C2PMSG_17 */ 358 .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ 359 .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */ 360 .ring_wptr_reg = 0x10550, /* C2PMSG_20 */ 361 .ring_rptr_reg = 0x10554, /* C2PMSG_21 */ 362 }; 363 364 static const struct tee_vdata teev2 = { 365 .cmdresp_reg = 0x10944, /* C2PMSG_17 */ 366 .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ 367 .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ 368 .ring_wptr_reg = 0x10950, /* C2PMSG_20 */ 369 .ring_rptr_reg = 0x10954, /* C2PMSG_21 */ 370 }; 371 372 static const struct platform_access_vdata pa_v1 = { 373 .cmdresp_reg = 0x10570, /* C2PMSG_28 */ 374 .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */ 375 .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */ 376 .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ 377 .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ 378 }; 379 380 static const struct platform_access_vdata pa_v2 = { 381 .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ 382 .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ 383 }; 384 385 static const struct psp_vdata pspv1 = { 386 .sev = &sevv1, 387 .feature_reg = 0x105fc, /* C2PMSG_63 */ 388 .inten_reg = 0x10610, /* P2CMSG_INTEN */ 389 .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ 390 }; 391 392 static const struct psp_vdata pspv2 = { 393 .sev = &sevv2, 394 .feature_reg = 0x109fc, /* C2PMSG_63 */ 395 .inten_reg = 0x10690, /* P2CMSG_INTEN */ 396 .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ 397 }; 398 399 static const struct psp_vdata pspv3 = { 400 .tee = &teev1, 401 .platform_access = &pa_v1, 402 .feature_reg = 0x109fc, /* C2PMSG_63 */ 403 .inten_reg = 0x10690, /* P2CMSG_INTEN */ 404 .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ 405 }; 406 407 static const struct psp_vdata pspv4 = { 408 .sev = &sevv2, 409 .tee = &teev1, 410 .feature_reg = 0x109fc, /* C2PMSG_63 */ 411 .inten_reg = 0x10690, /* P2CMSG_INTEN */ 412 .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ 413 }; 414 415 static const struct psp_vdata pspv5 = { 416 .tee = &teev2, 417 .platform_access = &pa_v2, 418 .feature_reg = 0x109fc, /* C2PMSG_63 */ 419 .inten_reg = 0x10510, /* P2CMSG_INTEN */ 420 .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ 421 }; 422 423 static const struct psp_vdata pspv6 = { 424 .sev = &sevv2, 425 .tee = &teev2, 426 .feature_reg = 0x109fc, /* C2PMSG_63 */ 427 .inten_reg = 0x10510, /* P2CMSG_INTEN */ 428 .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ 429 }; 430 431 #endif 432 433 static const struct sp_dev_vdata dev_vdata[] = { 434 { /* 0 */ 435 .bar = 2, 436 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 437 .ccp_vdata = &ccpv3, 438 #endif 439 }, 440 { /* 1 */ 441 .bar = 2, 442 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 443 .ccp_vdata = &ccpv5a, 444 #endif 445 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 446 .psp_vdata = &pspv1, 447 #endif 448 }, 449 { /* 2 */ 450 .bar = 2, 451 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 452 .ccp_vdata = &ccpv5b, 453 #endif 454 }, 455 { /* 3 */ 456 .bar = 2, 457 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 458 .ccp_vdata = &ccpv5a, 459 #endif 460 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 461 .psp_vdata = &pspv2, 462 #endif 463 }, 464 { /* 4 */ 465 .bar = 2, 466 #ifdef CONFIG_CRYPTO_DEV_SP_CCP 467 .ccp_vdata = &ccpv5a, 468 #endif 469 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 470 .psp_vdata = &pspv3, 471 #endif 472 }, 473 { /* 5 */ 474 .bar = 2, 475 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 476 .psp_vdata = &pspv4, 477 #endif 478 }, 479 { /* 6 */ 480 .bar = 2, 481 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 482 .psp_vdata = &pspv3, 483 #endif 484 }, 485 { /* 7 */ 486 .bar = 2, 487 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 488 .psp_vdata = &pspv5, 489 #endif 490 }, 491 { /* 8 */ 492 .bar = 2, 493 #ifdef CONFIG_CRYPTO_DEV_SP_PSP 494 .psp_vdata = &pspv6, 495 #endif 496 }, 497 }; 498 static const struct pci_device_id sp_pci_table[] = { 499 { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, 500 { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, 501 { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, 502 { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, 503 { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, 504 { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, 505 { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, 506 { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, 507 { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, 508 { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, 509 /* Last entry must be zero */ 510 { 0, } 511 }; 512 MODULE_DEVICE_TABLE(pci, sp_pci_table); 513 514 static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume); 515 516 static struct pci_driver sp_pci_driver = { 517 .name = "ccp", 518 .id_table = sp_pci_table, 519 .probe = sp_pci_probe, 520 .remove = sp_pci_remove, 521 .shutdown = sp_pci_shutdown, 522 .driver.pm = &sp_pci_pm_ops, 523 .dev_groups = psp_groups, 524 }; 525 526 int sp_pci_init(void) 527 { 528 return pci_register_driver(&sp_pci_driver); 529 } 530 531 void sp_pci_exit(void) 532 { 533 pci_unregister_driver(&sp_pci_driver); 534 } 535