1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/slab.h> 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/workqueue.h> 12 #include <linux/aer.h> 13 #include <linux/fs.h> 14 #include <linux/io-64-nonatomic-lo-hi.h> 15 #include <linux/device.h> 16 #include <linux/idr.h> 17 #include <linux/iommu.h> 18 #include <uapi/linux/idxd.h> 19 #include <linux/dmaengine.h> 20 #include "../dmaengine.h" 21 #include "registers.h" 22 #include "idxd.h" 23 #include "perfmon.h" 24 25 MODULE_VERSION(IDXD_DRIVER_VERSION); 26 MODULE_LICENSE("GPL v2"); 27 MODULE_AUTHOR("Intel Corporation"); 28 MODULE_IMPORT_NS(IDXD); 29 30 static bool sva = true; 31 module_param(sva, bool, 0644); 32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 33 34 bool tc_override; 35 module_param(tc_override, bool, 0644); 36 MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 37 38 #define DRV_NAME "idxd" 39 40 bool support_enqcmd; 41 DEFINE_IDA(idxd_ida); 42 43 static struct idxd_driver_data idxd_driver_data[] = { 44 [IDXD_TYPE_DSA] = { 45 .name_prefix = "dsa", 46 .type = IDXD_TYPE_DSA, 47 .compl_size = sizeof(struct dsa_completion_record), 48 .align = 32, 49 .dev_type = &dsa_device_type, 50 }, 51 [IDXD_TYPE_IAX] = { 52 .name_prefix = "iax", 53 .type = IDXD_TYPE_IAX, 54 .compl_size = sizeof(struct iax_completion_record), 55 .align = 64, 56 .dev_type = &iax_device_type, 57 }, 58 }; 59 60 static struct pci_device_id idxd_pci_tbl[] = { 61 /* DSA ver 1.0 platforms */ 62 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 63 64 /* IAX ver 1.0 platforms */ 65 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 66 { 0, } 67 }; 68 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 69 70 static int idxd_setup_interrupts(struct idxd_device *idxd) 71 { 72 struct pci_dev *pdev = idxd->pdev; 73 struct device *dev = &pdev->dev; 74 struct idxd_irq_entry *ie; 75 int i, msixcnt; 76 int rc = 0; 77 78 msixcnt = pci_msix_vec_count(pdev); 79 if (msixcnt < 0) { 80 dev_err(dev, "Not MSI-X interrupt capable.\n"); 81 return -ENOSPC; 82 } 83 idxd->irq_cnt = msixcnt; 84 85 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 86 if (rc != msixcnt) { 87 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 88 return -ENOSPC; 89 } 90 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 91 92 93 ie = idxd_get_ie(idxd, 0); 94 ie->vector = pci_irq_vector(pdev, 0); 95 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 96 if (rc < 0) { 97 dev_err(dev, "Failed to allocate misc interrupt.\n"); 98 goto err_misc_irq; 99 } 100 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 101 102 for (i = 0; i < idxd->max_wqs; i++) { 103 int msix_idx = i + 1; 104 105 ie = idxd_get_ie(idxd, msix_idx); 106 ie->id = msix_idx; 107 ie->int_handle = INVALID_INT_HANDLE; 108 ie->pasid = INVALID_IOASID; 109 110 spin_lock_init(&ie->list_lock); 111 init_llist_head(&ie->pending_llist); 112 INIT_LIST_HEAD(&ie->work_list); 113 } 114 115 idxd_unmask_error_interrupts(idxd); 116 return 0; 117 118 err_misc_irq: 119 idxd_mask_error_interrupts(idxd); 120 pci_free_irq_vectors(pdev); 121 dev_err(dev, "No usable interrupts\n"); 122 return rc; 123 } 124 125 static void idxd_cleanup_interrupts(struct idxd_device *idxd) 126 { 127 struct pci_dev *pdev = idxd->pdev; 128 struct idxd_irq_entry *ie; 129 int msixcnt; 130 131 msixcnt = pci_msix_vec_count(pdev); 132 if (msixcnt <= 0) 133 return; 134 135 ie = idxd_get_ie(idxd, 0); 136 idxd_mask_error_interrupts(idxd); 137 free_irq(ie->vector, ie); 138 pci_free_irq_vectors(pdev); 139 } 140 141 static int idxd_setup_wqs(struct idxd_device *idxd) 142 { 143 struct device *dev = &idxd->pdev->dev; 144 struct idxd_wq *wq; 145 struct device *conf_dev; 146 int i, rc; 147 148 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 149 GFP_KERNEL, dev_to_node(dev)); 150 if (!idxd->wqs) 151 return -ENOMEM; 152 153 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 154 if (!idxd->wq_enable_map) { 155 kfree(idxd->wqs); 156 return -ENOMEM; 157 } 158 159 for (i = 0; i < idxd->max_wqs; i++) { 160 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 161 if (!wq) { 162 rc = -ENOMEM; 163 goto err; 164 } 165 166 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 167 conf_dev = wq_confdev(wq); 168 wq->id = i; 169 wq->idxd = idxd; 170 device_initialize(wq_confdev(wq)); 171 conf_dev->parent = idxd_confdev(idxd); 172 conf_dev->bus = &dsa_bus_type; 173 conf_dev->type = &idxd_wq_device_type; 174 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 175 if (rc < 0) { 176 put_device(conf_dev); 177 goto err; 178 } 179 180 mutex_init(&wq->wq_lock); 181 init_waitqueue_head(&wq->err_queue); 182 init_completion(&wq->wq_dead); 183 init_completion(&wq->wq_resurrect); 184 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 185 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); 186 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 187 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 188 if (!wq->wqcfg) { 189 put_device(conf_dev); 190 rc = -ENOMEM; 191 goto err; 192 } 193 194 if (idxd->hw.wq_cap.op_config) { 195 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 196 if (!wq->opcap_bmap) { 197 put_device(conf_dev); 198 rc = -ENOMEM; 199 goto err; 200 } 201 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 202 } 203 idxd->wqs[i] = wq; 204 } 205 206 return 0; 207 208 err: 209 while (--i >= 0) { 210 wq = idxd->wqs[i]; 211 conf_dev = wq_confdev(wq); 212 put_device(conf_dev); 213 } 214 return rc; 215 } 216 217 static int idxd_setup_engines(struct idxd_device *idxd) 218 { 219 struct idxd_engine *engine; 220 struct device *dev = &idxd->pdev->dev; 221 struct device *conf_dev; 222 int i, rc; 223 224 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 225 GFP_KERNEL, dev_to_node(dev)); 226 if (!idxd->engines) 227 return -ENOMEM; 228 229 for (i = 0; i < idxd->max_engines; i++) { 230 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 231 if (!engine) { 232 rc = -ENOMEM; 233 goto err; 234 } 235 236 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 237 conf_dev = engine_confdev(engine); 238 engine->id = i; 239 engine->idxd = idxd; 240 device_initialize(conf_dev); 241 conf_dev->parent = idxd_confdev(idxd); 242 conf_dev->bus = &dsa_bus_type; 243 conf_dev->type = &idxd_engine_device_type; 244 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 245 if (rc < 0) { 246 put_device(conf_dev); 247 goto err; 248 } 249 250 idxd->engines[i] = engine; 251 } 252 253 return 0; 254 255 err: 256 while (--i >= 0) { 257 engine = idxd->engines[i]; 258 conf_dev = engine_confdev(engine); 259 put_device(conf_dev); 260 } 261 return rc; 262 } 263 264 static int idxd_setup_groups(struct idxd_device *idxd) 265 { 266 struct device *dev = &idxd->pdev->dev; 267 struct device *conf_dev; 268 struct idxd_group *group; 269 int i, rc; 270 271 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 272 GFP_KERNEL, dev_to_node(dev)); 273 if (!idxd->groups) 274 return -ENOMEM; 275 276 for (i = 0; i < idxd->max_groups; i++) { 277 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 278 if (!group) { 279 rc = -ENOMEM; 280 goto err; 281 } 282 283 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 284 conf_dev = group_confdev(group); 285 group->id = i; 286 group->idxd = idxd; 287 device_initialize(conf_dev); 288 conf_dev->parent = idxd_confdev(idxd); 289 conf_dev->bus = &dsa_bus_type; 290 conf_dev->type = &idxd_group_device_type; 291 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 292 if (rc < 0) { 293 put_device(conf_dev); 294 goto err; 295 } 296 297 idxd->groups[i] = group; 298 if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 299 group->tc_a = 1; 300 group->tc_b = 1; 301 } else { 302 group->tc_a = -1; 303 group->tc_b = -1; 304 } 305 } 306 307 return 0; 308 309 err: 310 while (--i >= 0) { 311 group = idxd->groups[i]; 312 put_device(group_confdev(group)); 313 } 314 return rc; 315 } 316 317 static void idxd_cleanup_internals(struct idxd_device *idxd) 318 { 319 int i; 320 321 for (i = 0; i < idxd->max_groups; i++) 322 put_device(group_confdev(idxd->groups[i])); 323 for (i = 0; i < idxd->max_engines; i++) 324 put_device(engine_confdev(idxd->engines[i])); 325 for (i = 0; i < idxd->max_wqs; i++) 326 put_device(wq_confdev(idxd->wqs[i])); 327 destroy_workqueue(idxd->wq); 328 } 329 330 static int idxd_setup_internals(struct idxd_device *idxd) 331 { 332 struct device *dev = &idxd->pdev->dev; 333 int rc, i; 334 335 init_waitqueue_head(&idxd->cmd_waitq); 336 337 rc = idxd_setup_wqs(idxd); 338 if (rc < 0) 339 goto err_wqs; 340 341 rc = idxd_setup_engines(idxd); 342 if (rc < 0) 343 goto err_engine; 344 345 rc = idxd_setup_groups(idxd); 346 if (rc < 0) 347 goto err_group; 348 349 idxd->wq = create_workqueue(dev_name(dev)); 350 if (!idxd->wq) { 351 rc = -ENOMEM; 352 goto err_wkq_create; 353 } 354 355 return 0; 356 357 err_wkq_create: 358 for (i = 0; i < idxd->max_groups; i++) 359 put_device(group_confdev(idxd->groups[i])); 360 err_group: 361 for (i = 0; i < idxd->max_engines; i++) 362 put_device(engine_confdev(idxd->engines[i])); 363 err_engine: 364 for (i = 0; i < idxd->max_wqs; i++) 365 put_device(wq_confdev(idxd->wqs[i])); 366 err_wqs: 367 return rc; 368 } 369 370 static void idxd_read_table_offsets(struct idxd_device *idxd) 371 { 372 union offsets_reg offsets; 373 struct device *dev = &idxd->pdev->dev; 374 375 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 376 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 377 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 378 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 379 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 380 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 381 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 382 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 383 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 384 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 385 } 386 387 static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 388 { 389 int i, j, nr; 390 391 for (i = 0, nr = 0; i < count; i++) { 392 for (j = 0; j < BITS_PER_LONG_LONG; j++) { 393 if (val[i] & BIT(j)) 394 set_bit(nr, bmap); 395 nr++; 396 } 397 } 398 } 399 400 static void idxd_read_caps(struct idxd_device *idxd) 401 { 402 struct device *dev = &idxd->pdev->dev; 403 int i; 404 405 /* reading generic capabilities */ 406 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 407 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 408 409 if (idxd->hw.gen_cap.cmd_cap) { 410 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 411 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 412 } 413 414 /* reading command capabilities */ 415 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 416 idxd->request_int_handles = true; 417 418 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 419 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 420 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); 421 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 422 if (idxd->hw.gen_cap.config_en) 423 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 424 425 /* reading group capabilities */ 426 idxd->hw.group_cap.bits = 427 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 428 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 429 idxd->max_groups = idxd->hw.group_cap.num_groups; 430 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 431 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 432 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 433 idxd->nr_rdbufs = idxd->max_rdbufs; 434 435 /* read engine capabilities */ 436 idxd->hw.engine_cap.bits = 437 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 438 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 439 idxd->max_engines = idxd->hw.engine_cap.num_engines; 440 dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 441 442 /* read workqueue capabilities */ 443 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 444 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 445 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 446 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 447 idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 448 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 449 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 450 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 451 452 /* reading operation capabilities */ 453 for (i = 0; i < 4; i++) { 454 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 455 IDXD_OPCAP_OFFSET + i * sizeof(u64)); 456 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 457 } 458 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 459 } 460 461 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 462 { 463 struct device *dev = &pdev->dev; 464 struct device *conf_dev; 465 struct idxd_device *idxd; 466 int rc; 467 468 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 469 if (!idxd) 470 return NULL; 471 472 conf_dev = idxd_confdev(idxd); 473 idxd->pdev = pdev; 474 idxd->data = data; 475 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 476 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 477 if (idxd->id < 0) 478 return NULL; 479 480 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 481 if (!idxd->opcap_bmap) { 482 ida_free(&idxd_ida, idxd->id); 483 return NULL; 484 } 485 486 device_initialize(conf_dev); 487 conf_dev->parent = dev; 488 conf_dev->bus = &dsa_bus_type; 489 conf_dev->type = idxd->data->dev_type; 490 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 491 if (rc < 0) { 492 put_device(conf_dev); 493 return NULL; 494 } 495 496 spin_lock_init(&idxd->dev_lock); 497 spin_lock_init(&idxd->cmd_lock); 498 499 return idxd; 500 } 501 502 static int idxd_enable_system_pasid(struct idxd_device *idxd) 503 { 504 return -EOPNOTSUPP; 505 } 506 507 static void idxd_disable_system_pasid(struct idxd_device *idxd) 508 { 509 510 iommu_sva_unbind_device(idxd->sva); 511 idxd->sva = NULL; 512 } 513 514 static int idxd_probe(struct idxd_device *idxd) 515 { 516 struct pci_dev *pdev = idxd->pdev; 517 struct device *dev = &pdev->dev; 518 int rc; 519 520 dev_dbg(dev, "%s entered and resetting device\n", __func__); 521 rc = idxd_device_init_reset(idxd); 522 if (rc < 0) 523 return rc; 524 525 dev_dbg(dev, "IDXD reset complete\n"); 526 527 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 528 if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { 529 dev_warn(dev, "Unable to turn on user SVA feature.\n"); 530 } else { 531 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 532 533 if (idxd_enable_system_pasid(idxd)) 534 dev_warn(dev, "No in-kernel DMA with PASID.\n"); 535 else 536 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 537 } 538 } else if (!sva) { 539 dev_warn(dev, "User forced SVA off via module param.\n"); 540 } 541 542 idxd_read_caps(idxd); 543 idxd_read_table_offsets(idxd); 544 545 rc = idxd_setup_internals(idxd); 546 if (rc) 547 goto err; 548 549 /* If the configs are readonly, then load them from device */ 550 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 551 dev_dbg(dev, "Loading RO device config\n"); 552 rc = idxd_device_load_config(idxd); 553 if (rc < 0) 554 goto err_config; 555 } 556 557 rc = idxd_setup_interrupts(idxd); 558 if (rc) 559 goto err_config; 560 561 idxd->major = idxd_cdev_get_major(idxd); 562 563 rc = perfmon_pmu_init(idxd); 564 if (rc < 0) 565 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 566 567 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 568 return 0; 569 570 err_config: 571 idxd_cleanup_internals(idxd); 572 err: 573 if (device_pasid_enabled(idxd)) 574 idxd_disable_system_pasid(idxd); 575 if (device_user_pasid_enabled(idxd)) 576 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 577 return rc; 578 } 579 580 static void idxd_cleanup(struct idxd_device *idxd) 581 { 582 struct device *dev = &idxd->pdev->dev; 583 584 perfmon_pmu_remove(idxd); 585 idxd_cleanup_interrupts(idxd); 586 idxd_cleanup_internals(idxd); 587 if (device_pasid_enabled(idxd)) 588 idxd_disable_system_pasid(idxd); 589 if (device_user_pasid_enabled(idxd)) 590 iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 591 } 592 593 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 594 { 595 struct device *dev = &pdev->dev; 596 struct idxd_device *idxd; 597 struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 598 int rc; 599 600 rc = pci_enable_device(pdev); 601 if (rc) 602 return rc; 603 604 dev_dbg(dev, "Alloc IDXD context\n"); 605 idxd = idxd_alloc(pdev, data); 606 if (!idxd) { 607 rc = -ENOMEM; 608 goto err_idxd_alloc; 609 } 610 611 dev_dbg(dev, "Mapping BARs\n"); 612 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 613 if (!idxd->reg_base) { 614 rc = -ENOMEM; 615 goto err_iomap; 616 } 617 618 dev_dbg(dev, "Set DMA masks\n"); 619 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 620 if (rc) 621 goto err; 622 623 dev_dbg(dev, "Set PCI master\n"); 624 pci_set_master(pdev); 625 pci_set_drvdata(pdev, idxd); 626 627 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 628 rc = idxd_probe(idxd); 629 if (rc) { 630 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 631 goto err; 632 } 633 634 rc = idxd_register_devices(idxd); 635 if (rc) { 636 dev_err(dev, "IDXD sysfs setup failed\n"); 637 goto err_dev_register; 638 } 639 640 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 641 idxd->hw.version); 642 643 return 0; 644 645 err_dev_register: 646 idxd_cleanup(idxd); 647 err: 648 pci_iounmap(pdev, idxd->reg_base); 649 err_iomap: 650 put_device(idxd_confdev(idxd)); 651 err_idxd_alloc: 652 pci_disable_device(pdev); 653 return rc; 654 } 655 656 void idxd_wqs_quiesce(struct idxd_device *idxd) 657 { 658 struct idxd_wq *wq; 659 int i; 660 661 for (i = 0; i < idxd->max_wqs; i++) { 662 wq = idxd->wqs[i]; 663 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 664 idxd_wq_quiesce(wq); 665 } 666 } 667 668 static void idxd_shutdown(struct pci_dev *pdev) 669 { 670 struct idxd_device *idxd = pci_get_drvdata(pdev); 671 struct idxd_irq_entry *irq_entry; 672 int rc; 673 674 rc = idxd_device_disable(idxd); 675 if (rc) 676 dev_err(&pdev->dev, "Disabling device failed\n"); 677 678 irq_entry = &idxd->ie; 679 synchronize_irq(irq_entry->vector); 680 idxd_mask_error_interrupts(idxd); 681 flush_workqueue(idxd->wq); 682 } 683 684 static void idxd_remove(struct pci_dev *pdev) 685 { 686 struct idxd_device *idxd = pci_get_drvdata(pdev); 687 struct idxd_irq_entry *irq_entry; 688 689 idxd_unregister_devices(idxd); 690 /* 691 * When ->release() is called for the idxd->conf_dev, it frees all the memory related 692 * to the idxd context. The driver still needs those bits in order to do the rest of 693 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 694 * on the device here to hold off the freeing while allowing the idxd sub-driver 695 * to unbind. 696 */ 697 get_device(idxd_confdev(idxd)); 698 device_unregister(idxd_confdev(idxd)); 699 idxd_shutdown(pdev); 700 if (device_pasid_enabled(idxd)) 701 idxd_disable_system_pasid(idxd); 702 703 irq_entry = idxd_get_ie(idxd, 0); 704 free_irq(irq_entry->vector, irq_entry); 705 pci_free_irq_vectors(pdev); 706 pci_iounmap(pdev, idxd->reg_base); 707 if (device_user_pasid_enabled(idxd)) 708 iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 709 pci_disable_device(pdev); 710 destroy_workqueue(idxd->wq); 711 perfmon_pmu_remove(idxd); 712 put_device(idxd_confdev(idxd)); 713 } 714 715 static struct pci_driver idxd_pci_driver = { 716 .name = DRV_NAME, 717 .id_table = idxd_pci_tbl, 718 .probe = idxd_pci_probe, 719 .remove = idxd_remove, 720 .shutdown = idxd_shutdown, 721 }; 722 723 static int __init idxd_init_module(void) 724 { 725 int err; 726 727 /* 728 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 729 * enumerating the device. We can not utilize it. 730 */ 731 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 732 pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 733 return -ENODEV; 734 } 735 736 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 737 pr_warn("Platform does not have ENQCMD(S) support.\n"); 738 else 739 support_enqcmd = true; 740 741 perfmon_init(); 742 743 err = idxd_driver_register(&idxd_drv); 744 if (err < 0) 745 goto err_idxd_driver_register; 746 747 err = idxd_driver_register(&idxd_dmaengine_drv); 748 if (err < 0) 749 goto err_idxd_dmaengine_driver_register; 750 751 err = idxd_driver_register(&idxd_user_drv); 752 if (err < 0) 753 goto err_idxd_user_driver_register; 754 755 err = idxd_cdev_register(); 756 if (err) 757 goto err_cdev_register; 758 759 err = pci_register_driver(&idxd_pci_driver); 760 if (err) 761 goto err_pci_register; 762 763 return 0; 764 765 err_pci_register: 766 idxd_cdev_remove(); 767 err_cdev_register: 768 idxd_driver_unregister(&idxd_user_drv); 769 err_idxd_user_driver_register: 770 idxd_driver_unregister(&idxd_dmaengine_drv); 771 err_idxd_dmaengine_driver_register: 772 idxd_driver_unregister(&idxd_drv); 773 err_idxd_driver_register: 774 return err; 775 } 776 module_init(idxd_init_module); 777 778 static void __exit idxd_exit_module(void) 779 { 780 idxd_driver_unregister(&idxd_user_drv); 781 idxd_driver_unregister(&idxd_dmaengine_drv); 782 idxd_driver_unregister(&idxd_drv); 783 pci_unregister_driver(&idxd_pci_driver); 784 idxd_cdev_remove(); 785 perfmon_exit(); 786 } 787 module_exit(idxd_exit_module); 788