1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/intel-svm.h> 18 #include <linux/iommu.h> 19 #include <uapi/linux/idxd.h> 20 #include <linux/dmaengine.h> 21 #include "../dmaengine.h" 22 #include "registers.h" 23 #include "idxd.h" 24 #include "perfmon.h" 25 26 MODULE_VERSION(IDXD_DRIVER_VERSION); 27 MODULE_LICENSE("GPL v2"); 28 MODULE_AUTHOR("Intel Corporation"); 29 30 static bool sva = true; 31 module_param(sva, bool, 0644); 32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 33 34 #define DRV_NAME "idxd" 35 36 bool support_enqcmd; 37 DEFINE_IDA(idxd_ida); 38 39 static struct idxd_driver_data idxd_driver_data[] = { 40 [IDXD_TYPE_DSA] = { 41 .name_prefix = "dsa", 42 .type = IDXD_TYPE_DSA, 43 .compl_size = sizeof(struct dsa_completion_record), 44 .align = 32, 45 .dev_type = &dsa_device_type, 46 }, 47 [IDXD_TYPE_IAX] = { 48 .name_prefix = "iax", 49 .type = IDXD_TYPE_IAX, 50 .compl_size = sizeof(struct iax_completion_record), 51 .align = 64, 52 .dev_type = &iax_device_type, 53 }, 54 }; 55 56 static struct pci_device_id idxd_pci_tbl[] = { 57 /* DSA ver 1.0 platforms */ 58 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 59 60 /* IAX ver 1.0 platforms */ 61 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 62 { 0, } 63 }; 64 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 65 66 static int idxd_setup_interrupts(struct idxd_device *idxd) 67 { 68 struct pci_dev *pdev = idxd->pdev; 69 struct device *dev = &pdev->dev; 70 struct idxd_irq_entry *irq_entry; 71 int i, msixcnt; 72 int rc = 0; 73 74 msixcnt = pci_msix_vec_count(pdev); 75 if (msixcnt < 0) { 76 dev_err(dev, "Not MSI-X interrupt capable.\n"); 77 return -ENOSPC; 78 } 79 80 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 81 if (rc != msixcnt) { 82 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 83 return -ENOSPC; 84 } 85 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 86 87 /* 88 * We implement 1 completion list per MSI-X entry except for 89 * entry 0, which is for errors and others. 90 */ 91 idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry), 92 GFP_KERNEL, dev_to_node(dev)); 93 if (!idxd->irq_entries) { 94 rc = -ENOMEM; 95 goto err_irq_entries; 96 } 97 98 for (i = 0; i < msixcnt; i++) { 99 idxd->irq_entries[i].id = i; 100 idxd->irq_entries[i].idxd = idxd; 101 idxd->irq_entries[i].vector = pci_irq_vector(pdev, i); 102 spin_lock_init(&idxd->irq_entries[i].list_lock); 103 } 104 105 idxd_msix_perm_setup(idxd); 106 107 irq_entry = &idxd->irq_entries[0]; 108 rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, 109 0, "idxd-misc", irq_entry); 110 if (rc < 0) { 111 dev_err(dev, "Failed to allocate misc interrupt.\n"); 112 goto err_misc_irq; 113 } 114 115 dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector); 116 117 /* first MSI-X entry is not for wq interrupts */ 118 idxd->num_wq_irqs = msixcnt - 1; 119 120 for (i = 1; i < msixcnt; i++) { 121 irq_entry = &idxd->irq_entries[i]; 122 123 init_llist_head(&idxd->irq_entries[i].pending_llist); 124 INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); 125 rc = request_threaded_irq(irq_entry->vector, NULL, 126 idxd_wq_thread, 0, "idxd-portal", irq_entry); 127 if (rc < 0) { 128 dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector); 129 goto err_wq_irqs; 130 } 131 132 dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector); 133 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { 134 /* 135 * The MSIX vector enumeration starts at 1 with vector 0 being the 136 * misc interrupt that handles non I/O completion events. The 137 * interrupt handles are for IMS enumeration on guest. The misc 138 * interrupt vector does not require a handle and therefore we start 139 * the int_handles at index 0. Since 'i' starts at 1, the first 140 * int_handles index will be 0. 141 */ 142 rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1], 143 IDXD_IRQ_MSIX); 144 if (rc < 0) { 145 free_irq(irq_entry->vector, irq_entry); 146 goto err_wq_irqs; 147 } 148 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]); 149 } 150 } 151 152 idxd_unmask_error_interrupts(idxd); 153 return 0; 154 155 err_wq_irqs: 156 while (--i >= 0) { 157 irq_entry = &idxd->irq_entries[i]; 158 free_irq(irq_entry->vector, irq_entry); 159 if (i != 0) 160 idxd_device_release_int_handle(idxd, 161 idxd->int_handles[i], IDXD_IRQ_MSIX); 162 } 163 err_misc_irq: 164 /* Disable error interrupt generation */ 165 idxd_mask_error_interrupts(idxd); 166 idxd_msix_perm_clear(idxd); 167 err_irq_entries: 168 pci_free_irq_vectors(pdev); 169 dev_err(dev, "No usable interrupts\n"); 170 return rc; 171 } 172 173 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 174 { 175 struct pci_dev *pdev = idxd->pdev; 176 struct idxd_irq_entry *irq_entry; 177 int i, msixcnt; 178 179 msixcnt = pci_msix_vec_count(pdev); 180 if (msixcnt <= 0) 181 return; 182 183 irq_entry = &idxd->irq_entries[0]; 184 free_irq(irq_entry->vector, irq_entry); 185 186 for (i = 1; i < msixcnt; i++) { 187 188 irq_entry = &idxd->irq_entries[i]; 189 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) 190 idxd_device_release_int_handle(idxd, idxd->int_handles[i], 191 IDXD_IRQ_MSIX); 192 free_irq(irq_entry->vector, irq_entry); 193 } 194 195 idxd_mask_error_interrupts(idxd); 196 pci_free_irq_vectors(pdev); 197 } 198 199 static int idxd_setup_wqs(struct idxd_device *idxd) 200 { 201 struct device *dev = &idxd->pdev->dev; 202 struct idxd_wq *wq; 203 struct device *conf_dev; 204 int i, rc; 205 206 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 207 GFP_KERNEL, dev_to_node(dev)); 208 if (!idxd->wqs) 209 return -ENOMEM; 210 211 for (i = 0; i < idxd->max_wqs; i++) { 212 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 213 if (!wq) { 214 rc = -ENOMEM; 215 goto err; 216 } 217 218 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 219 conf_dev = wq_confdev(wq); 220 wq->id = i; 221 wq->idxd = idxd; 222 device_initialize(wq_confdev(wq)); 223 conf_dev->parent = idxd_confdev(idxd); 224 conf_dev->bus = &dsa_bus_type; 225 conf_dev->type = &idxd_wq_device_type; 226 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 227 if (rc < 0) { 228 put_device(conf_dev); 229 goto err; 230 } 231 232 mutex_init(&wq->wq_lock); 233 init_waitqueue_head(&wq->err_queue); 234 init_completion(&wq->wq_dead); 235 wq->max_xfer_bytes = idxd->max_xfer_bytes; 236 wq->max_batch_size = idxd->max_batch_size; 237 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 238 if (!wq->wqcfg) { 239 put_device(conf_dev); 240 rc = -ENOMEM; 241 goto err; 242 } 243 idxd->wqs[i] = wq; 244 } 245 246 return 0; 247 248 err: 249 while (--i >= 0) { 250 wq = idxd->wqs[i]; 251 conf_dev = wq_confdev(wq); 252 put_device(conf_dev); 253 } 254 return rc; 255 } 256 257 static int idxd_setup_engines(struct idxd_device *idxd) 258 { 259 struct idxd_engine *engine; 260 struct device *dev = &idxd->pdev->dev; 261 struct device *conf_dev; 262 int i, rc; 263 264 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 265 GFP_KERNEL, dev_to_node(dev)); 266 if (!idxd->engines) 267 return -ENOMEM; 268 269 for (i = 0; i < idxd->max_engines; i++) { 270 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 271 if (!engine) { 272 rc = -ENOMEM; 273 goto err; 274 } 275 276 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 277 conf_dev = engine_confdev(engine); 278 engine->id = i; 279 engine->idxd = idxd; 280 device_initialize(conf_dev); 281 conf_dev->parent = idxd_confdev(idxd); 282 conf_dev->bus = &dsa_bus_type; 283 conf_dev->type = &idxd_engine_device_type; 284 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 285 if (rc < 0) { 286 put_device(conf_dev); 287 goto err; 288 } 289 290 idxd->engines[i] = engine; 291 } 292 293 return 0; 294 295 err: 296 while (--i >= 0) { 297 engine = idxd->engines[i]; 298 conf_dev = engine_confdev(engine); 299 put_device(conf_dev); 300 } 301 return rc; 302 } 303 304 static int idxd_setup_groups(struct idxd_device *idxd) 305 { 306 struct device *dev = &idxd->pdev->dev; 307 struct device *conf_dev; 308 struct idxd_group *group; 309 int i, rc; 310 311 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 312 GFP_KERNEL, dev_to_node(dev)); 313 if (!idxd->groups) 314 return -ENOMEM; 315 316 for (i = 0; i < idxd->max_groups; i++) { 317 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 318 if (!group) { 319 rc = -ENOMEM; 320 goto err; 321 } 322 323 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 324 conf_dev = group_confdev(group); 325 group->id = i; 326 group->idxd = idxd; 327 device_initialize(conf_dev); 328 conf_dev->parent = idxd_confdev(idxd); 329 conf_dev->bus = &dsa_bus_type; 330 conf_dev->type = &idxd_group_device_type; 331 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 332 if (rc < 0) { 333 put_device(conf_dev); 334 goto err; 335 } 336 337 idxd->groups[i] = group; 338 group->tc_a = -1; 339 group->tc_b = -1; 340 } 341 342 return 0; 343 344 err: 345 while (--i >= 0) { 346 group = idxd->groups[i]; 347 put_device(group_confdev(group)); 348 } 349 return rc; 350 } 351 352 static void idxd_cleanup_internals(struct idxd_device *idxd) 353 { 354 int i; 355 356 for (i = 0; i < idxd->max_groups; i++) 357 put_device(group_confdev(idxd->groups[i])); 358 for (i = 0; i < idxd->max_engines; i++) 359 put_device(engine_confdev(idxd->engines[i])); 360 for (i = 0; i < idxd->max_wqs; i++) 361 put_device(wq_confdev(idxd->wqs[i])); 362 destroy_workqueue(idxd->wq); 363 } 364 365 static int idxd_setup_internals(struct idxd_device *idxd) 366 { 367 struct device *dev = &idxd->pdev->dev; 368 int rc, i; 369 370 init_waitqueue_head(&idxd->cmd_waitq); 371 372 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) { 373 idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL, 374 dev_to_node(dev)); 375 if (!idxd->int_handles) 376 return -ENOMEM; 377 } 378 379 rc = idxd_setup_wqs(idxd); 380 if (rc < 0) 381 goto err_wqs; 382 383 rc = idxd_setup_engines(idxd); 384 if (rc < 0) 385 goto err_engine; 386 387 rc = idxd_setup_groups(idxd); 388 if (rc < 0) 389 goto err_group; 390 391 idxd->wq = create_workqueue(dev_name(dev)); 392 if (!idxd->wq) { 393 rc = -ENOMEM; 394 goto err_wkq_create; 395 } 396 397 return 0; 398 399 err_wkq_create: 400 for (i = 0; i < idxd->max_groups; i++) 401 put_device(group_confdev(idxd->groups[i])); 402 err_group: 403 for (i = 0; i < idxd->max_engines; i++) 404 put_device(engine_confdev(idxd->engines[i])); 405 err_engine: 406 for (i = 0; i < idxd->max_wqs; i++) 407 put_device(wq_confdev(idxd->wqs[i])); 408 err_wqs: 409 kfree(idxd->int_handles); 410 return rc; 411 } 412 413 static void idxd_read_table_offsets(struct idxd_device *idxd) 414 { 415 union offsets_reg offsets; 416 struct device *dev = &idxd->pdev->dev; 417 418 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 419 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 420 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 421 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 422 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 423 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 424 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 425 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 426 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 427 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 428 } 429 430 static void idxd_read_caps(struct idxd_device *idxd) 431 { 432 struct device *dev = &idxd->pdev->dev; 433 int i; 434 435 /* reading generic capabilities */ 436 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 437 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 438 439 if (idxd->hw.gen_cap.cmd_cap) { 440 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 441 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 442 } 443 444 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 445 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 446 idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 447 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 448 if (idxd->hw.gen_cap.config_en) 449 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 450 451 /* reading group capabilities */ 452 idxd->hw.group_cap.bits = 453 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 454 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 455 idxd->max_groups = idxd->hw.group_cap.num_groups; 456 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 457 idxd->max_tokens = idxd->hw.group_cap.total_tokens; 458 dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); 459 idxd->nr_tokens = idxd->max_tokens; 460 461 /* read engine capabilities */ 462 idxd->hw.engine_cap.bits = 463 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 464 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 465 idxd->max_engines = idxd->hw.engine_cap.num_engines; 466 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 467 468 /* read workqueue capabilities */ 469 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 470 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 471 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 472 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 473 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 474 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 475 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 476 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 477 478 /* reading operation capabilities */ 479 for (i = 0; i < 4; i++) { 480 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 481 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 482 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 483 } 484 } 485 486 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 487 { 488 struct device *dev = &pdev->dev; 489 struct device *conf_dev; 490 struct idxd_device *idxd; 491 int rc; 492 493 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 494 if (!idxd) 495 return NULL; 496 497 conf_dev = idxd_confdev(idxd); 498 idxd->pdev = pdev; 499 idxd->data = data; 500 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 501 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 502 if (idxd->id < 0) 503 return NULL; 504 505 device_initialize(conf_dev); 506 conf_dev->parent = dev; 507 conf_dev->bus = &dsa_bus_type; 508 conf_dev->type = idxd->data->dev_type; 509 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 510 if (rc < 0) { 511 put_device(conf_dev); 512 return NULL; 513 } 514 515 spin_lock_init(&idxd->dev_lock); 516 spin_lock_init(&idxd->cmd_lock); 517 518 return idxd; 519 } 520 521 static int idxd_enable_system_pasid(struct idxd_device *idxd) 522 { 523 int flags; 524 unsigned int pasid; 525 struct iommu_sva *sva; 526 527 flags = SVM_FLAG_SUPERVISOR_MODE; 528 529 sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); 530 if (IS_ERR(sva)) { 531 dev_warn(&idxd->pdev->dev, 532 "iommu sva bind failed: %ld\n", PTR_ERR(sva)); 533 return PTR_ERR(sva); 534 } 535 536 pasid = iommu_sva_get_pasid(sva); 537 if (pasid == IOMMU_PASID_INVALID) { 538 iommu_sva_unbind_device(sva); 539 return -ENODEV; 540 } 541 542 idxd->sva = sva; 543 idxd->pasid = pasid; 544 dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); 545 return 0; 546 } 547 548 static void idxd_disable_system_pasid(struct idxd_device *idxd) 549 { 550 551 iommu_sva_unbind_device(idxd->sva); 552 idxd->sva = NULL; 553 } 554 555 static int idxd_probe(struct idxd_device *idxd) 556 { 557 struct pci_dev *pdev = idxd->pdev; 558 struct device *dev = &pdev->dev; 559 int rc; 560 561 dev_dbg(dev, "%s entered and resetting device\n", __func__); 562 rc = idxd_device_init_reset(idxd); 563 if (rc < 0) 564 return rc; 565 566 dev_dbg(dev, "IDXD reset complete\n"); 567 568 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 569 rc = iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA); 570 if (rc == 0) { 571 rc = idxd_enable_system_pasid(idxd); 572 if (rc < 0) { 573 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 574 dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); 575 } else { 576 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 577 } 578 } else { 579 dev_warn(dev, "Unable to turn on SVA feature.\n"); 580 } 581 } else if (!sva) { 582 dev_warn(dev, "User forced SVA off via module param.\n"); 583 } 584 585 idxd_read_caps(idxd); 586 idxd_read_table_offsets(idxd); 587 588 rc = idxd_setup_internals(idxd); 589 if (rc) 590 goto err; 591 592 /* If the configs are readonly, then load them from device */ 593 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 594 dev_dbg(dev, "Loading RO device config\n"); 595 rc = idxd_device_load_config(idxd); 596 if (rc < 0) 597 goto err_config; 598 } 599 600 rc = idxd_setup_interrupts(idxd); 601 if (rc) 602 goto err_config; 603 604 dev_dbg(dev, "IDXD interrupt setup complete.\n"); 605 606 idxd->major = idxd_cdev_get_major(idxd); 607 608 rc = perfmon_pmu_init(idxd); 609 if (rc < 0) 610 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 611 612 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 613 return 0; 614 615 err_config: 616 idxd_cleanup_internals(idxd); 617 err: 618 if (device_pasid_enabled(idxd)) 619 idxd_disable_system_pasid(idxd); 620 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 621 return rc; 622 } 623 624 static void idxd_cleanup(struct idxd_device *idxd) 625 { 626 struct device *dev = &idxd->pdev->dev; 627 628 perfmon_pmu_remove(idxd); 629 idxd_cleanup_interrupts(idxd); 630 idxd_cleanup_internals(idxd); 631 if (device_pasid_enabled(idxd)) 632 idxd_disable_system_pasid(idxd); 633 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 634 } 635 636 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 637 { 638 struct device *dev = &pdev->dev; 639 struct idxd_device *idxd; 640 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 641 int rc; 642 643 rc = pci_enable_device(pdev); 644 if (rc) 645 return rc; 646 647 dev_dbg(dev, "Alloc IDXD context\n"); 648 idxd = idxd_alloc(pdev, data); 649 if (!idxd) { 650 rc = -ENOMEM; 651 goto err_idxd_alloc; 652 } 653 654 dev_dbg(dev, "Mapping BARs\n"); 655 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 656 if (!idxd->reg_base) { 657 rc = -ENOMEM; 658 goto err_iomap; 659 } 660 661 dev_dbg(dev, "Set DMA masks\n"); 662 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 663 if (rc) 664 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 665 if (rc) 666 goto err; 667 668 dev_dbg(dev, "Set PCI master\n"); 669 pci_set_master(pdev); 670 pci_set_drvdata(pdev, idxd); 671 672 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 673 rc = idxd_probe(idxd); 674 if (rc) { 675 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 676 goto err; 677 } 678 679 rc = idxd_register_devices(idxd); 680 if (rc) { 681 dev_err(dev, "IDXD sysfs setup failed\n"); 682 goto err_dev_register; 683 } 684 685 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 686 idxd->hw.version); 687 688 return 0; 689 690 err_dev_register: 691 idxd_cleanup(idxd); 692 err: 693 pci_iounmap(pdev, idxd->reg_base); 694 err_iomap: 695 put_device(idxd_confdev(idxd)); 696 err_idxd_alloc: 697 pci_disable_device(pdev); 698 return rc; 699 } 700 701 static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) 702 { 703 struct idxd_desc *desc, *itr; 704 struct llist_node *head; 705 706 head = llist_del_all(&ie->pending_llist); 707 if (!head) 708 return; 709 710 llist_for_each_entry_safe(desc, itr, head, llnode) { 711 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); 712 idxd_free_desc(desc->wq, desc); 713 } 714 } 715 716 static void idxd_flush_work_list(struct idxd_irq_entry *ie) 717 { 718 struct idxd_desc *desc, *iter; 719 720 list_for_each_entry_safe(desc, iter, &ie->work_list, list) { 721 list_del(&desc->list); 722 idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); 723 idxd_free_desc(desc->wq, desc); 724 } 725 } 726 727 void idxd_wqs_quiesce(struct idxd_device *idxd) 728 { 729 struct idxd_wq *wq; 730 int i; 731 732 for (i = 0; i < idxd->max_wqs; i++) { 733 wq = idxd->wqs[i]; 734 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 735 idxd_wq_quiesce(wq); 736 } 737 } 738 739 static void idxd_release_int_handles(struct idxd_device *idxd) 740 { 741 struct device *dev = &idxd->pdev->dev; 742 int i, rc; 743 744 for (i = 0; i < idxd->num_wq_irqs; i++) { 745 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) { 746 rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i], 747 IDXD_IRQ_MSIX); 748 if (rc < 0) 749 dev_warn(dev, "irq handle %d release failed\n", 750 idxd->int_handles[i]); 751 else 752 dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]); 753 } 754 } 755 } 756 757 static void idxd_shutdown(struct pci_dev *pdev) 758 { 759 struct idxd_device *idxd = pci_get_drvdata(pdev); 760 int rc, i; 761 struct idxd_irq_entry *irq_entry; 762 int msixcnt = pci_msix_vec_count(pdev); 763 764 rc = idxd_device_disable(idxd); 765 if (rc) 766 dev_err(&pdev->dev, "Disabling device failed\n"); 767 768 dev_dbg(&pdev->dev, "%s called\n", __func__); 769 idxd_mask_msix_vectors(idxd); 770 idxd_mask_error_interrupts(idxd); 771 772 for (i = 0; i < msixcnt; i++) { 773 irq_entry = &idxd->irq_entries[i]; 774 synchronize_irq(irq_entry->vector); 775 if (i == 0) 776 continue; 777 idxd_flush_pending_llist(irq_entry); 778 idxd_flush_work_list(irq_entry); 779 } 780 flush_workqueue(idxd->wq); 781 } 782 783 static void idxd_remove(struct pci_dev *pdev) 784 { 785 struct idxd_device *idxd = pci_get_drvdata(pdev); 786 struct idxd_irq_entry *irq_entry; 787 int msixcnt = pci_msix_vec_count(pdev); 788 int i; 789 790 dev_dbg(&pdev->dev, "%s called\n", __func__); 791 idxd_shutdown(pdev); 792 if (device_pasid_enabled(idxd)) 793 idxd_disable_system_pasid(idxd); 794 idxd_unregister_devices(idxd); 795 796 for (i = 0; i < msixcnt; i++) { 797 irq_entry = &idxd->irq_entries[i]; 798 free_irq(irq_entry->vector, irq_entry); 799 } 800 idxd_msix_perm_clear(idxd); 801 idxd_release_int_handles(idxd); 802 pci_free_irq_vectors(pdev); 803 pci_iounmap(pdev, idxd->reg_base); 804 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 805 pci_disable_device(pdev); 806 destroy_workqueue(idxd->wq); 807 perfmon_pmu_remove(idxd); 808 device_unregister(idxd_confdev(idxd)); 809 } 810 811 static struct pci_driver idxd_pci_driver = { 812 .name = DRV_NAME, 813 .id_table = idxd_pci_tbl, 814 .probe = idxd_pci_probe, 815 .remove = idxd_remove, 816 .shutdown = idxd_shutdown, 817 }; 818 819 static int __init idxd_init_module(void) 820 { 821 int err; 822 823 /* 824 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 825 * enumerating the device. We can not utilize it. 826 */ 827 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 828 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 829 return -ENODEV; 830 } 831 832 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 833 pr_warn("Platform does not have ENQCMD(S) support.\n"); 834 else 835 support_enqcmd = true; 836 837 perfmon_init(); 838 839 err = idxd_register_bus_type(); 840 if (err < 0) 841 return err; 842 843 err = idxd_driver_register(&idxd_drv); 844 if (err < 0) 845 goto err_idxd_driver_register; 846 847 err = idxd_driver_register(&dsa_drv); 848 if (err < 0) 849 goto err_dsa_driver_register; 850 851 err = idxd_cdev_register(); 852 if (err) 853 goto err_cdev_register; 854 855 err = pci_register_driver(&idxd_pci_driver); 856 if (err) 857 goto err_pci_register; 858 859 return 0; 860 861 err_pci_register: 862 idxd_cdev_remove(); 863 err_cdev_register: 864 idxd_driver_unregister(&dsa_drv); 865 err_dsa_driver_register: 866 idxd_driver_unregister(&idxd_drv); 867 err_idxd_driver_register: 868 idxd_unregister_bus_type(); 869 return err; 870 } 871 module_init(idxd_init_module); 872 873 static void __exit idxd_exit_module(void) 874 { 875 idxd_driver_unregister(&idxd_drv); 876 idxd_driver_unregister(&dsa_drv); 877 pci_unregister_driver(&idxd_pci_driver); 878 idxd_cdev_remove(); 879 idxd_unregister_bus_type(); 880 perfmon_exit(); 881 } 882 module_exit(idxd_exit_module); 883 884 int __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *owner, 885 const char *mod_name) 886 { 887 struct device_driver *drv = &idxd_drv->drv; 888 889 if (!idxd_drv->type) { 890 pr_debug("driver type not set (%ps)\n", __builtin_return_address(0)); 891 return -EINVAL; 892 } 893 894 drv->name = idxd_drv->name; 895 drv->bus = &dsa_bus_type; 896 drv->owner = owner; 897 drv->mod_name = mod_name; 898 899 return driver_register(drv); 900 } 901 902 void idxd_driver_unregister(struct idxd_device_driver *idxd_drv) 903 { 904 driver_unregister(&idxd_drv->drv); 905 } 906