1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/fs.h> 13 #include <linux/io-64-nonatomic-lo-hi.h> 14 #include <linux/device.h> 15 #include <linux/idr.h> 16 #include <linux/iommu.h> 17 #include <uapi/linux/idxd.h> 18 #include <linux/dmaengine.h> 19 #include "../dmaengine.h" 20 #include "registers.h" 21 #include "idxd.h" 22 #include "perfmon.h" 23 24 MODULE_VERSION(IDXD_DRIVER_VERSION); 25 MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver"); 26 MODULE_LICENSE("GPL v2"); 27 MODULE_AUTHOR("Intel Corporation"); 28 MODULE_IMPORT_NS("IDXD"); 29 30 static bool sva = true; 31 module_param(sva, bool, 0644); 32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 33 34 bool tc_override; 35 module_param(tc_override, bool, 0644); 36 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 37 38 #define DRV_NAME "idxd" 39 40 bool support_enqcmd; 41 DEFINE_IDA(idxd_ida); 42 43 static struct idxd_driver_data idxd_driver_data[] = { 44 [IDXD_TYPE_DSA] = { 45 .name_prefix = "dsa", 46 .type = IDXD_TYPE_DSA, 47 .compl_size = sizeof(struct dsa_completion_record), 48 .align = 32, 49 .dev_type = &dsa_device_type, 50 .evl_cr_off = offsetof(struct dsa_evl_entry, cr), 51 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ 52 .cr_status_off = offsetof(struct dsa_completion_record, status), 53 .cr_result_off = offsetof(struct dsa_completion_record, result), 54 }, 55 [IDXD_TYPE_IAX] = { 56 .name_prefix = "iax", 57 .type = IDXD_TYPE_IAX, 58 .compl_size = sizeof(struct iax_completion_record), 59 .align = 64, 60 .dev_type = &iax_device_type, 61 .evl_cr_off = offsetof(struct iax_evl_entry, cr), 62 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ 63 .cr_status_off = offsetof(struct iax_completion_record, status), 64 .cr_result_off = offsetof(struct iax_completion_record, error_code), 65 .load_device_defaults = idxd_load_iaa_device_defaults, 66 }, 67 }; 68 69 static struct pci_device_id idxd_pci_tbl[] = { 70 /* DSA ver 1.0 platforms */ 71 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 72 /* DSA on GNR-D platforms */ 73 { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) }, 74 /* DSA on DMR platforms */ 75 { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) }, 76 77 /* IAX ver 1.0 platforms */ 78 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 79 /* IAA on DMR platforms */ 80 { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) }, 81 /* IAA PTL platforms */ 82 { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) }, 83 { 0, } 84 }; 85 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 86 87 static int idxd_setup_interrupts(struct idxd_device *idxd) 88 { 89 struct pci_dev *pdev = idxd->pdev; 90 struct device *dev = &pdev->dev; 91 struct idxd_irq_entry *ie; 92 int i, msixcnt; 93 int rc = 0; 94 95 msixcnt = pci_msix_vec_count(pdev); 96 if (msixcnt < 0) { 97 dev_err(dev, "Not MSI-X interrupt capable.\n"); 98 return -ENOSPC; 99 } 100 idxd->irq_cnt = msixcnt; 101 102 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 103 if (rc != msixcnt) { 104 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 105 return -ENOSPC; 106 } 107 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 108 109 110 ie = idxd_get_ie(idxd, 0); 111 ie->vector = pci_irq_vector(pdev, 0); 112 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 113 if (rc < 0) { 114 dev_err(dev, "Failed to allocate misc interrupt.\n"); 115 goto err_misc_irq; 116 } 117 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 118 119 for (i = 0; i < idxd->max_wqs; i++) { 120 int msix_idx = i + 1; 121 122 ie = idxd_get_ie(idxd, msix_idx); 123 ie->id = msix_idx; 124 ie->int_handle = INVALID_INT_HANDLE; 125 ie->pasid = IOMMU_PASID_INVALID; 126 127 spin_lock_init(&ie->list_lock); 128 init_llist_head(&ie->pending_llist); 129 INIT_LIST_HEAD(&ie->work_list); 130 } 131 132 idxd_unmask_error_interrupts(idxd); 133 return 0; 134 135 err_misc_irq: 136 idxd_mask_error_interrupts(idxd); 137 pci_free_irq_vectors(pdev); 138 dev_err(dev, "No usable interrupts\n"); 139 return rc; 140 } 141 142 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 143 { 144 struct pci_dev *pdev = idxd->pdev; 145 struct idxd_irq_entry *ie; 146 int msixcnt; 147 148 msixcnt = pci_msix_vec_count(pdev); 149 if (msixcnt <= 0) 150 return; 151 152 ie = idxd_get_ie(idxd, 0); 153 idxd_mask_error_interrupts(idxd); 154 free_irq(ie->vector, ie); 155 pci_free_irq_vectors(pdev); 156 } 157 158 static void idxd_clean_wqs(struct idxd_device *idxd) 159 { 160 struct idxd_wq *wq; 161 struct device *conf_dev; 162 int i; 163 164 for (i = 0; i < idxd->max_wqs; i++) { 165 wq = idxd->wqs[i]; 166 if (idxd->hw.wq_cap.op_config) 167 bitmap_free(wq->opcap_bmap); 168 kfree(wq->wqcfg); 169 conf_dev = wq_confdev(wq); 170 put_device(conf_dev); 171 kfree(wq); 172 } 173 bitmap_free(idxd->wq_enable_map); 174 kfree(idxd->wqs); 175 } 176 177 static int idxd_setup_wqs(struct idxd_device *idxd) 178 { 179 struct device *dev = &idxd->pdev->dev; 180 struct idxd_wq *wq; 181 struct device *conf_dev; 182 int i, rc; 183 184 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 185 GFP_KERNEL, dev_to_node(dev)); 186 if (!idxd->wqs) 187 return -ENOMEM; 188 189 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 190 if (!idxd->wq_enable_map) { 191 rc = -ENOMEM; 192 goto err_free_wqs; 193 } 194 195 for (i = 0; i < idxd->max_wqs; i++) { 196 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 197 if (!wq) { 198 rc = -ENOMEM; 199 goto err_unwind; 200 } 201 202 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 203 conf_dev = wq_confdev(wq); 204 wq->id = i; 205 wq->idxd = idxd; 206 device_initialize(conf_dev); 207 conf_dev->parent = idxd_confdev(idxd); 208 conf_dev->bus = &dsa_bus_type; 209 conf_dev->type = &idxd_wq_device_type; 210 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 211 if (rc < 0) { 212 put_device(conf_dev); 213 kfree(wq); 214 goto err_unwind; 215 } 216 217 mutex_init(&wq->wq_lock); 218 init_waitqueue_head(&wq->err_queue); 219 init_completion(&wq->wq_dead); 220 init_completion(&wq->wq_resurrect); 221 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 222 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); 223 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 224 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 225 if (!wq->wqcfg) { 226 put_device(conf_dev); 227 kfree(wq); 228 rc = -ENOMEM; 229 goto err_unwind; 230 } 231 232 if (idxd->hw.wq_cap.op_config) { 233 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 234 if (!wq->opcap_bmap) { 235 kfree(wq->wqcfg); 236 put_device(conf_dev); 237 kfree(wq); 238 rc = -ENOMEM; 239 goto err_unwind; 240 } 241 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 242 } 243 mutex_init(&wq->uc_lock); 244 xa_init(&wq->upasid_xa); 245 idxd->wqs[i] = wq; 246 } 247 248 return 0; 249 250 err_unwind: 251 while (--i >= 0) { 252 wq = idxd->wqs[i]; 253 if (idxd->hw.wq_cap.op_config) 254 bitmap_free(wq->opcap_bmap); 255 kfree(wq->wqcfg); 256 conf_dev = wq_confdev(wq); 257 put_device(conf_dev); 258 kfree(wq); 259 } 260 bitmap_free(idxd->wq_enable_map); 261 262 err_free_wqs: 263 kfree(idxd->wqs); 264 265 return rc; 266 } 267 268 static void idxd_clean_engines(struct idxd_device *idxd) 269 { 270 struct idxd_engine *engine; 271 struct device *conf_dev; 272 int i; 273 274 for (i = 0; i < idxd->max_engines; i++) { 275 engine = idxd->engines[i]; 276 conf_dev = engine_confdev(engine); 277 put_device(conf_dev); 278 kfree(engine); 279 } 280 kfree(idxd->engines); 281 } 282 283 static int idxd_setup_engines(struct idxd_device *idxd) 284 { 285 struct idxd_engine *engine; 286 struct device *dev = &idxd->pdev->dev; 287 struct device *conf_dev; 288 int i, rc; 289 290 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 291 GFP_KERNEL, dev_to_node(dev)); 292 if (!idxd->engines) 293 return -ENOMEM; 294 295 for (i = 0; i < idxd->max_engines; i++) { 296 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 297 if (!engine) { 298 rc = -ENOMEM; 299 goto err; 300 } 301 302 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 303 conf_dev = engine_confdev(engine); 304 engine->id = i; 305 engine->idxd = idxd; 306 device_initialize(conf_dev); 307 conf_dev->parent = idxd_confdev(idxd); 308 conf_dev->bus = &dsa_bus_type; 309 conf_dev->type = &idxd_engine_device_type; 310 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 311 if (rc < 0) { 312 put_device(conf_dev); 313 kfree(engine); 314 goto err; 315 } 316 317 idxd->engines[i] = engine; 318 } 319 320 return 0; 321 322 err: 323 while (--i >= 0) { 324 engine = idxd->engines[i]; 325 conf_dev = engine_confdev(engine); 326 put_device(conf_dev); 327 kfree(engine); 328 } 329 kfree(idxd->engines); 330 331 return rc; 332 } 333 334 static void idxd_clean_groups(struct idxd_device *idxd) 335 { 336 struct idxd_group *group; 337 int i; 338 339 for (i = 0; i < idxd->max_groups; i++) { 340 group = idxd->groups[i]; 341 put_device(group_confdev(group)); 342 kfree(group); 343 } 344 kfree(idxd->groups); 345 } 346 347 static int idxd_setup_groups(struct idxd_device *idxd) 348 { 349 struct device *dev = &idxd->pdev->dev; 350 struct device *conf_dev; 351 struct idxd_group *group; 352 int i, rc; 353 354 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 355 GFP_KERNEL, dev_to_node(dev)); 356 if (!idxd->groups) 357 return -ENOMEM; 358 359 for (i = 0; i < idxd->max_groups; i++) { 360 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 361 if (!group) { 362 rc = -ENOMEM; 363 goto err; 364 } 365 366 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 367 conf_dev = group_confdev(group); 368 group->id = i; 369 group->idxd = idxd; 370 device_initialize(conf_dev); 371 conf_dev->parent = idxd_confdev(idxd); 372 conf_dev->bus = &dsa_bus_type; 373 conf_dev->type = &idxd_group_device_type; 374 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 375 if (rc < 0) { 376 put_device(conf_dev); 377 kfree(group); 378 goto err; 379 } 380 381 idxd->groups[i] = group; 382 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { 383 group->tc_a = 1; 384 group->tc_b = 1; 385 } else { 386 group->tc_a = -1; 387 group->tc_b = -1; 388 } 389 /* 390 * The default value is the same as the value of 391 * total read buffers in GRPCAP. 392 */ 393 group->rdbufs_allowed = idxd->max_rdbufs; 394 } 395 396 return 0; 397 398 err: 399 while (--i >= 0) { 400 group = idxd->groups[i]; 401 put_device(group_confdev(group)); 402 kfree(group); 403 } 404 kfree(idxd->groups); 405 406 return rc; 407 } 408 409 static void idxd_cleanup_internals(struct idxd_device *idxd) 410 { 411 idxd_clean_groups(idxd); 412 idxd_clean_engines(idxd); 413 idxd_clean_wqs(idxd); 414 destroy_workqueue(idxd->wq); 415 } 416 417 static int idxd_init_evl(struct idxd_device *idxd) 418 { 419 struct device *dev = &idxd->pdev->dev; 420 unsigned int evl_cache_size; 421 struct idxd_evl *evl; 422 const char *idxd_name; 423 424 if (idxd->hw.gen_cap.evl_support == 0) 425 return 0; 426 427 evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev)); 428 if (!evl) 429 return -ENOMEM; 430 431 mutex_init(&evl->lock); 432 evl->size = IDXD_EVL_SIZE_MIN; 433 434 idxd_name = dev_name(idxd_confdev(idxd)); 435 evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); 436 /* 437 * Since completion record in evl_cache will be copied to user 438 * when handling completion record page fault, need to create 439 * the cache suitable for user copy. 440 */ 441 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, 442 0, 0, 0, evl_cache_size, 443 NULL); 444 if (!idxd->evl_cache) { 445 kfree(evl); 446 return -ENOMEM; 447 } 448 449 idxd->evl = evl; 450 return 0; 451 } 452 453 static int idxd_setup_internals(struct idxd_device *idxd) 454 { 455 struct device *dev = &idxd->pdev->dev; 456 int rc; 457 458 init_waitqueue_head(&idxd->cmd_waitq); 459 460 rc = idxd_setup_wqs(idxd); 461 if (rc < 0) 462 goto err_wqs; 463 464 rc = idxd_setup_engines(idxd); 465 if (rc < 0) 466 goto err_engine; 467 468 rc = idxd_setup_groups(idxd); 469 if (rc < 0) 470 goto err_group; 471 472 idxd->wq = create_workqueue(dev_name(dev)); 473 if (!idxd->wq) { 474 rc = -ENOMEM; 475 goto err_wkq_create; 476 } 477 478 rc = idxd_init_evl(idxd); 479 if (rc < 0) 480 goto err_evl; 481 482 return 0; 483 484 err_evl: 485 destroy_workqueue(idxd->wq); 486 err_wkq_create: 487 idxd_clean_groups(idxd); 488 err_group: 489 idxd_clean_engines(idxd); 490 err_engine: 491 idxd_clean_wqs(idxd); 492 err_wqs: 493 return rc; 494 } 495 496 static void idxd_read_table_offsets(struct idxd_device *idxd) 497 { 498 union offsets_reg offsets; 499 struct device *dev = &idxd->pdev->dev; 500 501 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 502 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 503 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 504 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 505 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 506 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 507 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 508 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 509 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 510 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 511 } 512 513 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 514 { 515 int i, j, nr; 516 517 for (i = 0, nr = 0; i < count; i++) { 518 for (j = 0; j < BITS_PER_LONG_LONG; j++) { 519 if (val[i] & BIT(j)) 520 set_bit(nr, bmap); 521 nr++; 522 } 523 } 524 } 525 526 static void idxd_read_caps(struct idxd_device *idxd) 527 { 528 struct device *dev = &idxd->pdev->dev; 529 int i; 530 531 /* reading generic capabilities */ 532 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 533 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 534 535 if (idxd->hw.gen_cap.cmd_cap) { 536 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 537 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 538 } 539 540 /* reading command capabilities */ 541 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 542 idxd->request_int_handles = true; 543 544 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 545 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 546 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); 547 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 548 if (idxd->hw.gen_cap.config_en) 549 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 550 551 /* reading group capabilities */ 552 idxd->hw.group_cap.bits = 553 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 554 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 555 idxd->max_groups = idxd->hw.group_cap.num_groups; 556 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 557 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 558 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 559 idxd->nr_rdbufs = idxd->max_rdbufs; 560 561 /* read engine capabilities */ 562 idxd->hw.engine_cap.bits = 563 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 564 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 565 idxd->max_engines = idxd->hw.engine_cap.num_engines; 566 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 567 568 /* read workqueue capabilities */ 569 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 570 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 571 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 572 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 573 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 574 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 575 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 576 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 577 578 /* reading operation capabilities */ 579 for (i = 0; i < 4; i++) { 580 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 581 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 582 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 583 } 584 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 585 586 /* read iaa cap */ 587 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) 588 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); 589 } 590 591 static void idxd_free(struct idxd_device *idxd) 592 { 593 if (!idxd) 594 return; 595 596 put_device(idxd_confdev(idxd)); 597 bitmap_free(idxd->opcap_bmap); 598 ida_free(&idxd_ida, idxd->id); 599 kfree(idxd); 600 } 601 602 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 603 { 604 struct device *dev = &pdev->dev; 605 struct device *conf_dev; 606 struct idxd_device *idxd; 607 int rc; 608 609 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 610 if (!idxd) 611 return NULL; 612 613 conf_dev = idxd_confdev(idxd); 614 idxd->pdev = pdev; 615 idxd->data = data; 616 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 617 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 618 if (idxd->id < 0) 619 goto err_ida; 620 621 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 622 if (!idxd->opcap_bmap) 623 goto err_opcap; 624 625 device_initialize(conf_dev); 626 conf_dev->parent = dev; 627 conf_dev->bus = &dsa_bus_type; 628 conf_dev->type = idxd->data->dev_type; 629 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 630 if (rc < 0) 631 goto err_name; 632 633 spin_lock_init(&idxd->dev_lock); 634 spin_lock_init(&idxd->cmd_lock); 635 636 return idxd; 637 638 err_name: 639 put_device(conf_dev); 640 bitmap_free(idxd->opcap_bmap); 641 err_opcap: 642 ida_free(&idxd_ida, idxd->id); 643 err_ida: 644 kfree(idxd); 645 646 return NULL; 647 } 648 649 static int idxd_enable_system_pasid(struct idxd_device *idxd) 650 { 651 struct pci_dev *pdev = idxd->pdev; 652 struct device *dev = &pdev->dev; 653 struct iommu_domain *domain; 654 ioasid_t pasid; 655 int ret; 656 657 /* 658 * Attach a global PASID to the DMA domain so that we can use ENQCMDS 659 * to submit work on buffers mapped by DMA API. 660 */ 661 domain = iommu_get_domain_for_dev(dev); 662 if (!domain) 663 return -EPERM; 664 665 pasid = iommu_alloc_global_pasid(dev); 666 if (pasid == IOMMU_PASID_INVALID) 667 return -ENOSPC; 668 669 /* 670 * DMA domain is owned by the driver, it should support all valid 671 * types such as DMA-FQ, identity, etc. 672 */ 673 ret = iommu_attach_device_pasid(domain, dev, pasid, NULL); 674 if (ret) { 675 dev_err(dev, "failed to attach device pasid %d, domain type %d", 676 pasid, domain->type); 677 iommu_free_global_pasid(pasid); 678 return ret; 679 } 680 681 /* Since we set user privilege for kernel DMA, enable completion IRQ */ 682 idxd_set_user_intr(idxd, 1); 683 idxd->pasid = pasid; 684 685 return ret; 686 } 687 688 static void idxd_disable_system_pasid(struct idxd_device *idxd) 689 { 690 struct pci_dev *pdev = idxd->pdev; 691 struct device *dev = &pdev->dev; 692 struct iommu_domain *domain; 693 694 domain = iommu_get_domain_for_dev(dev); 695 if (!domain) 696 return; 697 698 iommu_detach_device_pasid(domain, dev, idxd->pasid); 699 iommu_free_global_pasid(idxd->pasid); 700 701 idxd_set_user_intr(idxd, 0); 702 idxd->sva = NULL; 703 idxd->pasid = IOMMU_PASID_INVALID; 704 } 705 706 static int idxd_probe(struct idxd_device *idxd) 707 { 708 struct pci_dev *pdev = idxd->pdev; 709 struct device *dev = &pdev->dev; 710 int rc; 711 712 dev_dbg(dev, "%s entered and resetting device\n", __func__); 713 rc = idxd_device_init_reset(idxd); 714 if (rc < 0) 715 return rc; 716 717 dev_dbg(dev, "IDXD reset complete\n"); 718 719 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 720 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 721 722 rc = idxd_enable_system_pasid(idxd); 723 if (rc) 724 dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); 725 else 726 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 727 } else if (!sva) { 728 dev_warn(dev, "User forced SVA off via module param.\n"); 729 } 730 731 idxd_read_caps(idxd); 732 idxd_read_table_offsets(idxd); 733 734 rc = idxd_setup_internals(idxd); 735 if (rc) 736 goto err; 737 738 /* If the configs are readonly, then load them from device */ 739 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 740 dev_dbg(dev, "Loading RO device config\n"); 741 rc = idxd_device_load_config(idxd); 742 if (rc < 0) 743 goto err_config; 744 } 745 746 rc = idxd_setup_interrupts(idxd); 747 if (rc) 748 goto err_config; 749 750 idxd->major = idxd_cdev_get_major(idxd); 751 752 rc = perfmon_pmu_init(idxd); 753 if (rc < 0) 754 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 755 756 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 757 return 0; 758 759 err_config: 760 idxd_cleanup_internals(idxd); 761 err: 762 if (device_pasid_enabled(idxd)) 763 idxd_disable_system_pasid(idxd); 764 return rc; 765 } 766 767 static void idxd_cleanup(struct idxd_device *idxd) 768 { 769 perfmon_pmu_remove(idxd); 770 idxd_cleanup_interrupts(idxd); 771 idxd_cleanup_internals(idxd); 772 if (device_pasid_enabled(idxd)) 773 idxd_disable_system_pasid(idxd); 774 } 775 776 /* 777 * Attach IDXD device to IDXD driver. 778 */ 779 static int idxd_bind(struct device_driver *drv, const char *buf) 780 { 781 const struct bus_type *bus = drv->bus; 782 struct device *dev; 783 int err = -ENODEV; 784 785 dev = bus_find_device_by_name(bus, NULL, buf); 786 if (dev) 787 err = device_driver_attach(drv, dev); 788 789 put_device(dev); 790 791 return err; 792 } 793 794 /* 795 * Detach IDXD device from driver. 796 */ 797 static void idxd_unbind(struct device_driver *drv, const char *buf) 798 { 799 const struct bus_type *bus = drv->bus; 800 struct device *dev; 801 802 dev = bus_find_device_by_name(bus, NULL, buf); 803 if (dev && dev->driver == drv) 804 device_release_driver(dev); 805 806 put_device(dev); 807 } 808 809 #define idxd_free_saved_configs(saved_configs, count) \ 810 do { \ 811 int i; \ 812 \ 813 for (i = 0; i < (count); i++) \ 814 kfree(saved_configs[i]); \ 815 } while (0) 816 817 static void idxd_free_saved(struct idxd_group **saved_groups, 818 struct idxd_engine **saved_engines, 819 struct idxd_wq **saved_wqs, 820 struct idxd_device *idxd) 821 { 822 if (saved_groups) 823 idxd_free_saved_configs(saved_groups, idxd->max_groups); 824 if (saved_engines) 825 idxd_free_saved_configs(saved_engines, idxd->max_engines); 826 if (saved_wqs) 827 idxd_free_saved_configs(saved_wqs, idxd->max_wqs); 828 } 829 830 /* 831 * Save IDXD device configurations including engines, groups, wqs etc. 832 * The saved configurations can be restored when needed. 833 */ 834 static int idxd_device_config_save(struct idxd_device *idxd, 835 struct idxd_saved_states *idxd_saved) 836 { 837 struct device *dev = &idxd->pdev->dev; 838 int i; 839 840 memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd)); 841 842 if (idxd->evl) { 843 memcpy(&idxd_saved->saved_evl, idxd->evl, 844 sizeof(struct idxd_evl)); 845 } 846 847 struct idxd_group **saved_groups __free(kfree) = 848 kcalloc_node(idxd->max_groups, 849 sizeof(struct idxd_group *), 850 GFP_KERNEL, dev_to_node(dev)); 851 if (!saved_groups) 852 return -ENOMEM; 853 854 for (i = 0; i < idxd->max_groups; i++) { 855 struct idxd_group *saved_group __free(kfree) = 856 kzalloc_node(sizeof(*saved_group), GFP_KERNEL, 857 dev_to_node(dev)); 858 859 if (!saved_group) { 860 /* Free saved groups */ 861 idxd_free_saved(saved_groups, NULL, NULL, idxd); 862 863 return -ENOMEM; 864 } 865 866 memcpy(saved_group, idxd->groups[i], sizeof(*saved_group)); 867 saved_groups[i] = no_free_ptr(saved_group); 868 } 869 870 struct idxd_engine **saved_engines = 871 kcalloc_node(idxd->max_engines, 872 sizeof(struct idxd_engine *), 873 GFP_KERNEL, dev_to_node(dev)); 874 if (!saved_engines) { 875 /* Free saved groups */ 876 idxd_free_saved(saved_groups, NULL, NULL, idxd); 877 878 return -ENOMEM; 879 } 880 for (i = 0; i < idxd->max_engines; i++) { 881 struct idxd_engine *saved_engine __free(kfree) = 882 kzalloc_node(sizeof(*saved_engine), GFP_KERNEL, 883 dev_to_node(dev)); 884 if (!saved_engine) { 885 /* Free saved groups and engines */ 886 idxd_free_saved(saved_groups, saved_engines, NULL, 887 idxd); 888 889 return -ENOMEM; 890 } 891 892 memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine)); 893 saved_engines[i] = no_free_ptr(saved_engine); 894 } 895 896 unsigned long *saved_wq_enable_map __free(bitmap) = 897 bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, 898 dev_to_node(dev)); 899 if (!saved_wq_enable_map) { 900 /* Free saved groups and engines */ 901 idxd_free_saved(saved_groups, saved_engines, NULL, idxd); 902 903 return -ENOMEM; 904 } 905 906 bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs); 907 908 struct idxd_wq **saved_wqs __free(kfree) = 909 kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 910 GFP_KERNEL, dev_to_node(dev)); 911 if (!saved_wqs) { 912 /* Free saved groups and engines */ 913 idxd_free_saved(saved_groups, saved_engines, NULL, idxd); 914 915 return -ENOMEM; 916 } 917 918 for (i = 0; i < idxd->max_wqs; i++) { 919 struct idxd_wq *saved_wq __free(kfree) = 920 kzalloc_node(sizeof(*saved_wq), GFP_KERNEL, 921 dev_to_node(dev)); 922 struct idxd_wq *wq; 923 924 if (!saved_wq) { 925 /* Free saved groups, engines, and wqs */ 926 idxd_free_saved(saved_groups, saved_engines, saved_wqs, 927 idxd); 928 929 return -ENOMEM; 930 } 931 932 if (!test_bit(i, saved_wq_enable_map)) 933 continue; 934 935 wq = idxd->wqs[i]; 936 mutex_lock(&wq->wq_lock); 937 memcpy(saved_wq, wq, sizeof(*saved_wq)); 938 saved_wqs[i] = no_free_ptr(saved_wq); 939 mutex_unlock(&wq->wq_lock); 940 } 941 942 /* Save configurations */ 943 idxd_saved->saved_groups = no_free_ptr(saved_groups); 944 idxd_saved->saved_engines = no_free_ptr(saved_engines); 945 idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map); 946 idxd_saved->saved_wqs = no_free_ptr(saved_wqs); 947 948 return 0; 949 } 950 951 /* 952 * Restore IDXD device configurations including engines, groups, wqs etc 953 * that were saved before. 954 */ 955 static void idxd_device_config_restore(struct idxd_device *idxd, 956 struct idxd_saved_states *idxd_saved) 957 { 958 struct idxd_evl *saved_evl = &idxd_saved->saved_evl; 959 int i; 960 961 idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit; 962 963 idxd->evl->size = saved_evl->size; 964 965 for (i = 0; i < idxd->max_groups; i++) { 966 struct idxd_group *saved_group, *group; 967 968 saved_group = idxd_saved->saved_groups[i]; 969 group = idxd->groups[i]; 970 971 group->rdbufs_allowed = saved_group->rdbufs_allowed; 972 group->rdbufs_reserved = saved_group->rdbufs_reserved; 973 group->tc_a = saved_group->tc_a; 974 group->tc_b = saved_group->tc_b; 975 group->use_rdbuf_limit = saved_group->use_rdbuf_limit; 976 977 kfree(saved_group); 978 } 979 kfree(idxd_saved->saved_groups); 980 981 for (i = 0; i < idxd->max_engines; i++) { 982 struct idxd_engine *saved_engine, *engine; 983 984 saved_engine = idxd_saved->saved_engines[i]; 985 engine = idxd->engines[i]; 986 987 engine->group = saved_engine->group; 988 989 kfree(saved_engine); 990 } 991 kfree(idxd_saved->saved_engines); 992 993 bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map, 994 idxd->max_wqs); 995 bitmap_free(idxd_saved->saved_wq_enable_map); 996 997 for (i = 0; i < idxd->max_wqs; i++) { 998 struct idxd_wq *saved_wq, *wq; 999 size_t len; 1000 1001 if (!test_bit(i, idxd->wq_enable_map)) 1002 continue; 1003 1004 saved_wq = idxd_saved->saved_wqs[i]; 1005 wq = idxd->wqs[i]; 1006 1007 mutex_lock(&wq->wq_lock); 1008 1009 wq->group = saved_wq->group; 1010 wq->flags = saved_wq->flags; 1011 wq->threshold = saved_wq->threshold; 1012 wq->size = saved_wq->size; 1013 wq->priority = saved_wq->priority; 1014 wq->type = saved_wq->type; 1015 len = strlen(saved_wq->name) + 1; 1016 strscpy(wq->name, saved_wq->name, len); 1017 wq->max_xfer_bytes = saved_wq->max_xfer_bytes; 1018 wq->max_batch_size = saved_wq->max_batch_size; 1019 wq->enqcmds_retries = saved_wq->enqcmds_retries; 1020 wq->descs = saved_wq->descs; 1021 wq->idxd_chan = saved_wq->idxd_chan; 1022 len = strlen(saved_wq->driver_name) + 1; 1023 strscpy(wq->driver_name, saved_wq->driver_name, len); 1024 1025 mutex_unlock(&wq->wq_lock); 1026 1027 kfree(saved_wq); 1028 } 1029 1030 kfree(idxd_saved->saved_wqs); 1031 } 1032 1033 static void idxd_reset_prepare(struct pci_dev *pdev) 1034 { 1035 struct idxd_device *idxd = pci_get_drvdata(pdev); 1036 struct device *dev = &idxd->pdev->dev; 1037 const char *idxd_name; 1038 int rc; 1039 1040 idxd_name = dev_name(idxd_confdev(idxd)); 1041 1042 struct idxd_saved_states *idxd_saved __free(kfree) = 1043 kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL, 1044 dev_to_node(&pdev->dev)); 1045 if (!idxd_saved) { 1046 dev_err(dev, "HALT: no memory\n"); 1047 1048 return; 1049 } 1050 1051 /* Save IDXD configurations. */ 1052 rc = idxd_device_config_save(idxd, idxd_saved); 1053 if (rc < 0) { 1054 dev_err(dev, "HALT: cannot save %s configs\n", idxd_name); 1055 1056 return; 1057 } 1058 1059 idxd->idxd_saved = no_free_ptr(idxd_saved); 1060 1061 /* Save PCI device state. */ 1062 pci_save_state(idxd->pdev); 1063 } 1064 1065 static void idxd_reset_done(struct pci_dev *pdev) 1066 { 1067 struct idxd_device *idxd = pci_get_drvdata(pdev); 1068 const char *idxd_name; 1069 struct device *dev; 1070 int rc, i; 1071 1072 if (!idxd->idxd_saved) 1073 return; 1074 1075 dev = &idxd->pdev->dev; 1076 idxd_name = dev_name(idxd_confdev(idxd)); 1077 1078 /* Restore PCI device state. */ 1079 pci_restore_state(idxd->pdev); 1080 1081 /* Unbind idxd device from driver. */ 1082 idxd_unbind(&idxd_drv.drv, idxd_name); 1083 1084 /* 1085 * Probe PCI device without allocating or changing 1086 * idxd software data which keeps the same as before FLR. 1087 */ 1088 idxd_pci_probe_alloc(idxd, NULL, NULL); 1089 1090 /* Restore IDXD configurations. */ 1091 idxd_device_config_restore(idxd, idxd->idxd_saved); 1092 1093 /* Re-configure IDXD device if allowed. */ 1094 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 1095 rc = idxd_device_config(idxd); 1096 if (rc < 0) { 1097 dev_err(dev, "HALT: %s config fails\n", idxd_name); 1098 goto out; 1099 } 1100 } 1101 1102 /* Bind IDXD device to driver. */ 1103 rc = idxd_bind(&idxd_drv.drv, idxd_name); 1104 if (rc < 0) { 1105 dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name); 1106 goto out; 1107 } 1108 1109 /* Bind enabled wq in the IDXD device to driver. */ 1110 for (i = 0; i < idxd->max_wqs; i++) { 1111 if (test_bit(i, idxd->wq_enable_map)) { 1112 struct idxd_wq *wq = idxd->wqs[i]; 1113 char wq_name[32]; 1114 1115 wq->state = IDXD_WQ_DISABLED; 1116 sprintf(wq_name, "wq%d.%d", idxd->id, wq->id); 1117 /* 1118 * Bind to user driver depending on wq type. 1119 * 1120 * Currently only support user type WQ. Will support 1121 * kernel type WQ in the future. 1122 */ 1123 if (wq->type == IDXD_WQT_USER) 1124 rc = idxd_bind(&idxd_user_drv.drv, wq_name); 1125 else 1126 rc = -EINVAL; 1127 if (rc < 0) { 1128 clear_bit(i, idxd->wq_enable_map); 1129 dev_err(dev, 1130 "HALT: unable to re-enable wq %s\n", 1131 dev_name(wq_confdev(wq))); 1132 } 1133 } 1134 } 1135 out: 1136 kfree(idxd->idxd_saved); 1137 } 1138 1139 static const struct pci_error_handlers idxd_error_handler = { 1140 .reset_prepare = idxd_reset_prepare, 1141 .reset_done = idxd_reset_done, 1142 }; 1143 1144 /* 1145 * Probe idxd PCI device. 1146 * If idxd is not given, need to allocate idxd and set up its data. 1147 * 1148 * If idxd is given, idxd was allocated and setup already. Just need to 1149 * configure device without re-allocating and re-configuring idxd data. 1150 * This is useful for recovering from FLR. 1151 */ 1152 int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev, 1153 const struct pci_device_id *id) 1154 { 1155 bool alloc_idxd = idxd ? false : true; 1156 struct idxd_driver_data *data; 1157 struct device *dev; 1158 int rc; 1159 1160 pdev = idxd ? idxd->pdev : pdev; 1161 dev = &pdev->dev; 1162 data = id ? (struct idxd_driver_data *)id->driver_data : NULL; 1163 rc = pci_enable_device(pdev); 1164 if (rc) 1165 return rc; 1166 1167 if (alloc_idxd) { 1168 dev_dbg(dev, "Alloc IDXD context\n"); 1169 idxd = idxd_alloc(pdev, data); 1170 if (!idxd) { 1171 rc = -ENOMEM; 1172 goto err_idxd_alloc; 1173 } 1174 1175 dev_dbg(dev, "Mapping BARs\n"); 1176 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 1177 if (!idxd->reg_base) { 1178 rc = -ENOMEM; 1179 goto err_iomap; 1180 } 1181 1182 dev_dbg(dev, "Set DMA masks\n"); 1183 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 1184 if (rc) 1185 goto err; 1186 } 1187 1188 dev_dbg(dev, "Set PCI master\n"); 1189 pci_set_master(pdev); 1190 pci_set_drvdata(pdev, idxd); 1191 1192 if (alloc_idxd) { 1193 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 1194 rc = idxd_probe(idxd); 1195 if (rc) { 1196 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 1197 goto err; 1198 } 1199 1200 if (data->load_device_defaults) { 1201 rc = data->load_device_defaults(idxd); 1202 if (rc) 1203 dev_warn(dev, "IDXD loading device defaults failed\n"); 1204 } 1205 1206 rc = idxd_register_devices(idxd); 1207 if (rc) { 1208 dev_err(dev, "IDXD sysfs setup failed\n"); 1209 goto err_dev_register; 1210 } 1211 1212 rc = idxd_device_init_debugfs(idxd); 1213 if (rc) 1214 dev_warn(dev, "IDXD debugfs failed to setup\n"); 1215 } 1216 1217 if (!alloc_idxd) { 1218 /* Release interrupts in the IDXD device. */ 1219 idxd_cleanup_interrupts(idxd); 1220 1221 /* Re-enable interrupts in the IDXD device. */ 1222 rc = idxd_setup_interrupts(idxd); 1223 if (rc) 1224 dev_warn(dev, "IDXD interrupts failed to setup\n"); 1225 } 1226 1227 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 1228 idxd->hw.version); 1229 1230 if (data) 1231 idxd->user_submission_safe = data->user_submission_safe; 1232 1233 return 0; 1234 1235 err_dev_register: 1236 idxd_cleanup(idxd); 1237 err: 1238 pci_iounmap(pdev, idxd->reg_base); 1239 err_iomap: 1240 idxd_free(idxd); 1241 err_idxd_alloc: 1242 pci_disable_device(pdev); 1243 return rc; 1244 } 1245 1246 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1247 { 1248 return idxd_pci_probe_alloc(NULL, pdev, id); 1249 } 1250 1251 void idxd_wqs_quiesce(struct idxd_device *idxd) 1252 { 1253 struct idxd_wq *wq; 1254 int i; 1255 1256 for (i = 0; i < idxd->max_wqs; i++) { 1257 wq = idxd->wqs[i]; 1258 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 1259 idxd_wq_quiesce(wq); 1260 } 1261 } 1262 1263 static void idxd_shutdown(struct pci_dev *pdev) 1264 { 1265 struct idxd_device *idxd = pci_get_drvdata(pdev); 1266 struct idxd_irq_entry *irq_entry; 1267 int rc; 1268 1269 rc = idxd_device_disable(idxd); 1270 if (rc) 1271 dev_err(&pdev->dev, "Disabling device failed\n"); 1272 1273 irq_entry = &idxd->ie; 1274 synchronize_irq(irq_entry->vector); 1275 idxd_mask_error_interrupts(idxd); 1276 flush_workqueue(idxd->wq); 1277 } 1278 1279 static void idxd_remove(struct pci_dev *pdev) 1280 { 1281 struct idxd_device *idxd = pci_get_drvdata(pdev); 1282 1283 idxd_unregister_devices(idxd); 1284 /* 1285 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 1286 * to the idxd context. The driver still needs those bits in order to do the rest of 1287 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 1288 * on the device here to hold off the freeing while allowing the idxd sub-driver 1289 * to unbind. 1290 */ 1291 get_device(idxd_confdev(idxd)); 1292 device_unregister(idxd_confdev(idxd)); 1293 idxd_shutdown(pdev); 1294 idxd_device_remove_debugfs(idxd); 1295 perfmon_pmu_remove(idxd); 1296 idxd_cleanup_interrupts(idxd); 1297 if (device_pasid_enabled(idxd)) 1298 idxd_disable_system_pasid(idxd); 1299 pci_iounmap(pdev, idxd->reg_base); 1300 put_device(idxd_confdev(idxd)); 1301 pci_disable_device(pdev); 1302 } 1303 1304 static struct pci_driver idxd_pci_driver = { 1305 .name = DRV_NAME, 1306 .id_table = idxd_pci_tbl, 1307 .probe = idxd_pci_probe, 1308 .remove = idxd_remove, 1309 .shutdown = idxd_shutdown, 1310 .err_handler = &idxd_error_handler, 1311 }; 1312 1313 static int __init idxd_init_module(void) 1314 { 1315 int err; 1316 1317 /* 1318 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 1319 * enumerating the device. We can not utilize it. 1320 */ 1321 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 1322 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 1323 return -ENODEV; 1324 } 1325 1326 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 1327 pr_warn("Platform does not have ENQCMD(S) support.\n"); 1328 else 1329 support_enqcmd = true; 1330 1331 err = idxd_driver_register(&idxd_drv); 1332 if (err < 0) 1333 goto err_idxd_driver_register; 1334 1335 err = idxd_driver_register(&idxd_dmaengine_drv); 1336 if (err < 0) 1337 goto err_idxd_dmaengine_driver_register; 1338 1339 err = idxd_driver_register(&idxd_user_drv); 1340 if (err < 0) 1341 goto err_idxd_user_driver_register; 1342 1343 err = idxd_cdev_register(); 1344 if (err) 1345 goto err_cdev_register; 1346 1347 err = idxd_init_debugfs(); 1348 if (err) 1349 goto err_debugfs; 1350 1351 err = pci_register_driver(&idxd_pci_driver); 1352 if (err) 1353 goto err_pci_register; 1354 1355 return 0; 1356 1357 err_pci_register: 1358 idxd_remove_debugfs(); 1359 err_debugfs: 1360 idxd_cdev_remove(); 1361 err_cdev_register: 1362 idxd_driver_unregister(&idxd_user_drv); 1363 err_idxd_user_driver_register: 1364 idxd_driver_unregister(&idxd_dmaengine_drv); 1365 err_idxd_dmaengine_driver_register: 1366 idxd_driver_unregister(&idxd_drv); 1367 err_idxd_driver_register: 1368 return err; 1369 } 1370 module_init(idxd_init_module); 1371 1372 static void __exit idxd_exit_module(void) 1373 { 1374 idxd_driver_unregister(&idxd_user_drv); 1375 idxd_driver_unregister(&idxd_dmaengine_drv); 1376 idxd_driver_unregister(&idxd_drv); 1377 pci_unregister_driver(&idxd_pci_driver); 1378 idxd_cdev_remove(); 1379 idxd_remove_debugfs(); 1380 } 1381 module_exit(idxd_exit_module); 1382