1bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0 2bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3bfe1d560SDave Jiang #include <linux/init.h> 4bfe1d560SDave Jiang #include <linux/kernel.h> 5bfe1d560SDave Jiang #include <linux/module.h> 6bfe1d560SDave Jiang #include <linux/slab.h> 7bfe1d560SDave Jiang #include <linux/pci.h> 8bfe1d560SDave Jiang #include <linux/interrupt.h> 9bfe1d560SDave Jiang #include <linux/delay.h> 10bfe1d560SDave Jiang #include <linux/dma-mapping.h> 11bfe1d560SDave Jiang #include <linux/workqueue.h> 12bfe1d560SDave Jiang #include <linux/aer.h> 13bfe1d560SDave Jiang #include <linux/fs.h> 14bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h> 15bfe1d560SDave Jiang #include <linux/device.h> 16bfe1d560SDave Jiang #include <linux/idr.h> 178e50d392SDave Jiang #include <linux/intel-svm.h> 188e50d392SDave Jiang #include <linux/iommu.h> 19bfe1d560SDave Jiang #include <uapi/linux/idxd.h> 208f47d1a5SDave Jiang #include <linux/dmaengine.h> 218f47d1a5SDave Jiang #include "../dmaengine.h" 22bfe1d560SDave Jiang #include "registers.h" 23bfe1d560SDave Jiang #include "idxd.h" 240bde4444STom Zanussi #include "perfmon.h" 25bfe1d560SDave Jiang 26bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION); 27bfe1d560SDave Jiang MODULE_LICENSE("GPL v2"); 28bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation"); 29d9e5481fSDave Jiang MODULE_IMPORT_NS(IDXD); 30bfe1d560SDave Jiang 3103d939c7SDave Jiang static bool sva = true; 3203d939c7SDave Jiang module_param(sva, bool, 0644); 3303d939c7SDave Jiang MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 3403d939c7SDave Jiang 35ade8a86bSDave Jiang bool tc_override; 36ade8a86bSDave Jiang module_param(tc_override, bool, 0644); 37ade8a86bSDave Jiang MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 38ade8a86bSDave Jiang 39bfe1d560SDave Jiang #define DRV_NAME "idxd" 40bfe1d560SDave Jiang 418e50d392SDave Jiang bool support_enqcmd; 424b73e4ebSDave Jiang DEFINE_IDA(idxd_ida); 43bfe1d560SDave Jiang 44435b512dSDave Jiang static struct idxd_driver_data idxd_driver_data[] = { 45435b512dSDave Jiang [IDXD_TYPE_DSA] = { 46435b512dSDave Jiang .name_prefix = "dsa", 47435b512dSDave Jiang .type = IDXD_TYPE_DSA, 48435b512dSDave Jiang .compl_size = sizeof(struct dsa_completion_record), 49435b512dSDave Jiang .align = 32, 50435b512dSDave Jiang .dev_type = &dsa_device_type, 51435b512dSDave Jiang }, 52435b512dSDave Jiang [IDXD_TYPE_IAX] = { 53435b512dSDave Jiang .name_prefix = "iax", 54435b512dSDave Jiang .type = IDXD_TYPE_IAX, 55435b512dSDave Jiang .compl_size = sizeof(struct iax_completion_record), 56435b512dSDave Jiang .align = 64, 57435b512dSDave Jiang .dev_type = &iax_device_type, 58435b512dSDave Jiang }, 59435b512dSDave Jiang }; 60435b512dSDave Jiang 61bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = { 62bfe1d560SDave Jiang /* DSA ver 1.0 platforms */ 63435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 64f25b4638SDave Jiang 65f25b4638SDave Jiang /* IAX ver 1.0 platforms */ 66435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 67bfe1d560SDave Jiang { 0, } 68bfe1d560SDave Jiang }; 69bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 70bfe1d560SDave Jiang 71bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd) 72bfe1d560SDave Jiang { 73bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 74bfe1d560SDave Jiang struct device *dev = &pdev->dev; 75ec0d6423SDave Jiang struct idxd_irq_entry *ie; 76bfe1d560SDave Jiang int i, msixcnt; 77bfe1d560SDave Jiang int rc = 0; 78bfe1d560SDave Jiang 79bfe1d560SDave Jiang msixcnt = pci_msix_vec_count(pdev); 80bfe1d560SDave Jiang if (msixcnt < 0) { 81bfe1d560SDave Jiang dev_err(dev, "Not MSI-X interrupt capable.\n"); 825fc8e85fSDave Jiang return -ENOSPC; 83bfe1d560SDave Jiang } 848b67426eSDave Jiang idxd->irq_cnt = msixcnt; 85bfe1d560SDave Jiang 865fc8e85fSDave Jiang rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 875fc8e85fSDave Jiang if (rc != msixcnt) { 885fc8e85fSDave Jiang dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 895fc8e85fSDave Jiang return -ENOSPC; 90bfe1d560SDave Jiang } 91bfe1d560SDave Jiang dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 92bfe1d560SDave Jiang 93d5c10e0fSDave Jiang 94ec0d6423SDave Jiang ie = idxd_get_ie(idxd, 0); 95ec0d6423SDave Jiang ie->vector = pci_irq_vector(pdev, 0); 96ec0d6423SDave Jiang rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 97bfe1d560SDave Jiang if (rc < 0) { 98bfe1d560SDave Jiang dev_err(dev, "Failed to allocate misc interrupt.\n"); 995fc8e85fSDave Jiang goto err_misc_irq; 100bfe1d560SDave Jiang } 101403a2e23SDave Jiang dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 102bfe1d560SDave Jiang 103ec0d6423SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 104ec0d6423SDave Jiang int msix_idx = i + 1; 105bfe1d560SDave Jiang 106ec0d6423SDave Jiang ie = idxd_get_ie(idxd, msix_idx); 107ec0d6423SDave Jiang ie->id = msix_idx; 108ec0d6423SDave Jiang ie->int_handle = INVALID_INT_HANDLE; 109ec0d6423SDave Jiang ie->pasid = INVALID_IOASID; 110403a2e23SDave Jiang 111ec0d6423SDave Jiang spin_lock_init(&ie->list_lock); 112ec0d6423SDave Jiang init_llist_head(&ie->pending_llist); 113ec0d6423SDave Jiang INIT_LIST_HEAD(&ie->work_list); 114bfe1d560SDave Jiang } 115bfe1d560SDave Jiang 116bfe1d560SDave Jiang idxd_unmask_error_interrupts(idxd); 117bfe1d560SDave Jiang return 0; 118bfe1d560SDave Jiang 1195fc8e85fSDave Jiang err_misc_irq: 120bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 1215fc8e85fSDave Jiang pci_free_irq_vectors(pdev); 122bfe1d560SDave Jiang dev_err(dev, "No usable interrupts\n"); 123bfe1d560SDave Jiang return rc; 124bfe1d560SDave Jiang } 125bfe1d560SDave Jiang 126ddf742d4SDave Jiang static void idxd_cleanup_interrupts(struct idxd_device *idxd) 127ddf742d4SDave Jiang { 128ddf742d4SDave Jiang struct pci_dev *pdev = idxd->pdev; 129ec0d6423SDave Jiang struct idxd_irq_entry *ie; 130403a2e23SDave Jiang int msixcnt; 131ddf742d4SDave Jiang 132403a2e23SDave Jiang msixcnt = pci_msix_vec_count(pdev); 133403a2e23SDave Jiang if (msixcnt <= 0) 134403a2e23SDave Jiang return; 135ddf742d4SDave Jiang 136403a2e23SDave Jiang ie = idxd_get_ie(idxd, 0); 137ddf742d4SDave Jiang idxd_mask_error_interrupts(idxd); 138403a2e23SDave Jiang free_irq(ie->vector, ie); 139ddf742d4SDave Jiang pci_free_irq_vectors(pdev); 140ddf742d4SDave Jiang } 141ddf742d4SDave Jiang 1427c5dd23eSDave Jiang static int idxd_setup_wqs(struct idxd_device *idxd) 1437c5dd23eSDave Jiang { 1447c5dd23eSDave Jiang struct device *dev = &idxd->pdev->dev; 1457c5dd23eSDave Jiang struct idxd_wq *wq; 146700af3a0SDave Jiang struct device *conf_dev; 1477c5dd23eSDave Jiang int i, rc; 1487c5dd23eSDave Jiang 1497c5dd23eSDave Jiang idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 1507c5dd23eSDave Jiang GFP_KERNEL, dev_to_node(dev)); 1517c5dd23eSDave Jiang if (!idxd->wqs) 1527c5dd23eSDave Jiang return -ENOMEM; 1537c5dd23eSDave Jiang 154de5819b9SJerry Snitselaar idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 155de5819b9SJerry Snitselaar if (!idxd->wq_enable_map) { 156de5819b9SJerry Snitselaar kfree(idxd->wqs); 157de5819b9SJerry Snitselaar return -ENOMEM; 158de5819b9SJerry Snitselaar } 159de5819b9SJerry Snitselaar 1607c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 1617c5dd23eSDave Jiang wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 1627c5dd23eSDave Jiang if (!wq) { 1637c5dd23eSDave Jiang rc = -ENOMEM; 1647c5dd23eSDave Jiang goto err; 1657c5dd23eSDave Jiang } 1667c5dd23eSDave Jiang 167700af3a0SDave Jiang idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 168700af3a0SDave Jiang conf_dev = wq_confdev(wq); 1697c5dd23eSDave Jiang wq->id = i; 1707c5dd23eSDave Jiang wq->idxd = idxd; 171700af3a0SDave Jiang device_initialize(wq_confdev(wq)); 172700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 173700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 174700af3a0SDave Jiang conf_dev->type = &idxd_wq_device_type; 175700af3a0SDave Jiang rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 1767c5dd23eSDave Jiang if (rc < 0) { 177700af3a0SDave Jiang put_device(conf_dev); 1787c5dd23eSDave Jiang goto err; 1797c5dd23eSDave Jiang } 1807c5dd23eSDave Jiang 1817c5dd23eSDave Jiang mutex_init(&wq->wq_lock); 18204922b74SDave Jiang init_waitqueue_head(&wq->err_queue); 18393a40a6dSDave Jiang init_completion(&wq->wq_dead); 18456fc39f5SDave Jiang init_completion(&wq->wq_resurrect); 18592452a72SDave Jiang wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 18692452a72SDave Jiang wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; 1877930d855SDave Jiang wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 1887c5dd23eSDave Jiang wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 1897c5dd23eSDave Jiang if (!wq->wqcfg) { 190700af3a0SDave Jiang put_device(conf_dev); 1917c5dd23eSDave Jiang rc = -ENOMEM; 1927c5dd23eSDave Jiang goto err; 1937c5dd23eSDave Jiang } 194*b0325aefSDave Jiang 195*b0325aefSDave Jiang if (idxd->hw.wq_cap.op_config) { 196*b0325aefSDave Jiang wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 197*b0325aefSDave Jiang if (!wq->opcap_bmap) { 198*b0325aefSDave Jiang put_device(conf_dev); 199*b0325aefSDave Jiang rc = -ENOMEM; 200*b0325aefSDave Jiang goto err; 201*b0325aefSDave Jiang } 202*b0325aefSDave Jiang bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 203*b0325aefSDave Jiang } 2047c5dd23eSDave Jiang idxd->wqs[i] = wq; 2057c5dd23eSDave Jiang } 2067c5dd23eSDave Jiang 2077c5dd23eSDave Jiang return 0; 2087c5dd23eSDave Jiang 2097c5dd23eSDave Jiang err: 210700af3a0SDave Jiang while (--i >= 0) { 211700af3a0SDave Jiang wq = idxd->wqs[i]; 212700af3a0SDave Jiang conf_dev = wq_confdev(wq); 213700af3a0SDave Jiang put_device(conf_dev); 214700af3a0SDave Jiang } 2157c5dd23eSDave Jiang return rc; 2167c5dd23eSDave Jiang } 2177c5dd23eSDave Jiang 21875b91130SDave Jiang static int idxd_setup_engines(struct idxd_device *idxd) 21975b91130SDave Jiang { 22075b91130SDave Jiang struct idxd_engine *engine; 22175b91130SDave Jiang struct device *dev = &idxd->pdev->dev; 222700af3a0SDave Jiang struct device *conf_dev; 22375b91130SDave Jiang int i, rc; 22475b91130SDave Jiang 22575b91130SDave Jiang idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 22675b91130SDave Jiang GFP_KERNEL, dev_to_node(dev)); 22775b91130SDave Jiang if (!idxd->engines) 22875b91130SDave Jiang return -ENOMEM; 22975b91130SDave Jiang 23075b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) { 23175b91130SDave Jiang engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 23275b91130SDave Jiang if (!engine) { 23375b91130SDave Jiang rc = -ENOMEM; 23475b91130SDave Jiang goto err; 23575b91130SDave Jiang } 23675b91130SDave Jiang 237700af3a0SDave Jiang idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 238700af3a0SDave Jiang conf_dev = engine_confdev(engine); 23975b91130SDave Jiang engine->id = i; 24075b91130SDave Jiang engine->idxd = idxd; 241700af3a0SDave Jiang device_initialize(conf_dev); 242700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 243700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 244700af3a0SDave Jiang conf_dev->type = &idxd_engine_device_type; 245700af3a0SDave Jiang rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 24675b91130SDave Jiang if (rc < 0) { 247700af3a0SDave Jiang put_device(conf_dev); 24875b91130SDave Jiang goto err; 24975b91130SDave Jiang } 25075b91130SDave Jiang 25175b91130SDave Jiang idxd->engines[i] = engine; 25275b91130SDave Jiang } 25375b91130SDave Jiang 25475b91130SDave Jiang return 0; 25575b91130SDave Jiang 25675b91130SDave Jiang err: 257700af3a0SDave Jiang while (--i >= 0) { 258700af3a0SDave Jiang engine = idxd->engines[i]; 259700af3a0SDave Jiang conf_dev = engine_confdev(engine); 260700af3a0SDave Jiang put_device(conf_dev); 261700af3a0SDave Jiang } 26275b91130SDave Jiang return rc; 26375b91130SDave Jiang } 26475b91130SDave Jiang 265defe49f9SDave Jiang static int idxd_setup_groups(struct idxd_device *idxd) 266defe49f9SDave Jiang { 267defe49f9SDave Jiang struct device *dev = &idxd->pdev->dev; 268700af3a0SDave Jiang struct device *conf_dev; 269defe49f9SDave Jiang struct idxd_group *group; 270defe49f9SDave Jiang int i, rc; 271defe49f9SDave Jiang 272defe49f9SDave Jiang idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 273defe49f9SDave Jiang GFP_KERNEL, dev_to_node(dev)); 274defe49f9SDave Jiang if (!idxd->groups) 275defe49f9SDave Jiang return -ENOMEM; 276defe49f9SDave Jiang 277defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) { 278defe49f9SDave Jiang group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 279defe49f9SDave Jiang if (!group) { 280defe49f9SDave Jiang rc = -ENOMEM; 281defe49f9SDave Jiang goto err; 282defe49f9SDave Jiang } 283defe49f9SDave Jiang 284700af3a0SDave Jiang idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 285700af3a0SDave Jiang conf_dev = group_confdev(group); 286defe49f9SDave Jiang group->id = i; 287defe49f9SDave Jiang group->idxd = idxd; 288700af3a0SDave Jiang device_initialize(conf_dev); 289700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 290700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 291700af3a0SDave Jiang conf_dev->type = &idxd_group_device_type; 292700af3a0SDave Jiang rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 293defe49f9SDave Jiang if (rc < 0) { 294700af3a0SDave Jiang put_device(conf_dev); 295defe49f9SDave Jiang goto err; 296defe49f9SDave Jiang } 297defe49f9SDave Jiang 298defe49f9SDave Jiang idxd->groups[i] = group; 299ade8a86bSDave Jiang if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) { 300ade8a86bSDave Jiang group->tc_a = 1; 301ade8a86bSDave Jiang group->tc_b = 1; 302ade8a86bSDave Jiang } else { 303defe49f9SDave Jiang group->tc_a = -1; 304defe49f9SDave Jiang group->tc_b = -1; 305defe49f9SDave Jiang } 306ade8a86bSDave Jiang } 307defe49f9SDave Jiang 308defe49f9SDave Jiang return 0; 309defe49f9SDave Jiang 310defe49f9SDave Jiang err: 311700af3a0SDave Jiang while (--i >= 0) { 312700af3a0SDave Jiang group = idxd->groups[i]; 313700af3a0SDave Jiang put_device(group_confdev(group)); 314700af3a0SDave Jiang } 315defe49f9SDave Jiang return rc; 316defe49f9SDave Jiang } 317defe49f9SDave Jiang 318ddf742d4SDave Jiang static void idxd_cleanup_internals(struct idxd_device *idxd) 319ddf742d4SDave Jiang { 320ddf742d4SDave Jiang int i; 321ddf742d4SDave Jiang 322ddf742d4SDave Jiang for (i = 0; i < idxd->max_groups; i++) 323700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 324ddf742d4SDave Jiang for (i = 0; i < idxd->max_engines; i++) 325700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 326ddf742d4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) 327700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 328ddf742d4SDave Jiang destroy_workqueue(idxd->wq); 329ddf742d4SDave Jiang } 330ddf742d4SDave Jiang 331bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd) 332bfe1d560SDave Jiang { 333bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 334defe49f9SDave Jiang int rc, i; 335bfe1d560SDave Jiang 3360d5c10b4SDave Jiang init_waitqueue_head(&idxd->cmd_waitq); 3377c5dd23eSDave Jiang 3387c5dd23eSDave Jiang rc = idxd_setup_wqs(idxd); 3397c5dd23eSDave Jiang if (rc < 0) 340eb15e715SDave Jiang goto err_wqs; 3417c5dd23eSDave Jiang 34275b91130SDave Jiang rc = idxd_setup_engines(idxd); 34375b91130SDave Jiang if (rc < 0) 34475b91130SDave Jiang goto err_engine; 34575b91130SDave Jiang 346defe49f9SDave Jiang rc = idxd_setup_groups(idxd); 347defe49f9SDave Jiang if (rc < 0) 348defe49f9SDave Jiang goto err_group; 349bfe1d560SDave Jiang 3500d5c10b4SDave Jiang idxd->wq = create_workqueue(dev_name(dev)); 3517c5dd23eSDave Jiang if (!idxd->wq) { 3527c5dd23eSDave Jiang rc = -ENOMEM; 353defe49f9SDave Jiang goto err_wkq_create; 3547c5dd23eSDave Jiang } 3550d5c10b4SDave Jiang 356bfe1d560SDave Jiang return 0; 3577c5dd23eSDave Jiang 358defe49f9SDave Jiang err_wkq_create: 359defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) 360700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 361defe49f9SDave Jiang err_group: 36275b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) 363700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 36475b91130SDave Jiang err_engine: 3657c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) 366700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 367eb15e715SDave Jiang err_wqs: 3687c5dd23eSDave Jiang return rc; 369bfe1d560SDave Jiang } 370bfe1d560SDave Jiang 371bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd) 372bfe1d560SDave Jiang { 373bfe1d560SDave Jiang union offsets_reg offsets; 374bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 375bfe1d560SDave Jiang 376bfe1d560SDave Jiang offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 3772f8417a9SDave Jiang offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 3782f8417a9SDave Jiang idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 379bfe1d560SDave Jiang dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 3802f8417a9SDave Jiang idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 3812f8417a9SDave Jiang dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 3822f8417a9SDave Jiang idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 3832f8417a9SDave Jiang dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 3842f8417a9SDave Jiang idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 385bfe1d560SDave Jiang dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 386bfe1d560SDave Jiang } 387bfe1d560SDave Jiang 388a8563a33SDave Jiang static void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 389a8563a33SDave Jiang { 390a8563a33SDave Jiang int i, j, nr; 391a8563a33SDave Jiang 392a8563a33SDave Jiang for (i = 0, nr = 0; i < count; i++) { 393a8563a33SDave Jiang for (j = 0; j < BITS_PER_LONG_LONG; j++) { 394a8563a33SDave Jiang if (val[i] & BIT(j)) 395a8563a33SDave Jiang set_bit(nr, bmap); 396a8563a33SDave Jiang nr++; 397a8563a33SDave Jiang } 398a8563a33SDave Jiang } 399a8563a33SDave Jiang } 400a8563a33SDave Jiang 401bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd) 402bfe1d560SDave Jiang { 403bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 404bfe1d560SDave Jiang int i; 405bfe1d560SDave Jiang 406bfe1d560SDave Jiang /* reading generic capabilities */ 407bfe1d560SDave Jiang idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 408bfe1d560SDave Jiang dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 409eb15e715SDave Jiang 410eb15e715SDave Jiang if (idxd->hw.gen_cap.cmd_cap) { 411eb15e715SDave Jiang idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 412eb15e715SDave Jiang dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 413eb15e715SDave Jiang } 414eb15e715SDave Jiang 4158b67426eSDave Jiang /* reading command capabilities */ 4168b67426eSDave Jiang if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 4178b67426eSDave Jiang idxd->request_int_handles = true; 4188b67426eSDave Jiang 419bfe1d560SDave Jiang idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 420bfe1d560SDave Jiang dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 421bfe1d560SDave Jiang idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 422bfe1d560SDave Jiang dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 423bfe1d560SDave Jiang if (idxd->hw.gen_cap.config_en) 424bfe1d560SDave Jiang set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 425bfe1d560SDave Jiang 426bfe1d560SDave Jiang /* reading group capabilities */ 427bfe1d560SDave Jiang idxd->hw.group_cap.bits = 428bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 429bfe1d560SDave Jiang dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 430bfe1d560SDave Jiang idxd->max_groups = idxd->hw.group_cap.num_groups; 431bfe1d560SDave Jiang dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 4327ed6f1b8SDave Jiang idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 4337ed6f1b8SDave Jiang dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 4347ed6f1b8SDave Jiang idxd->nr_rdbufs = idxd->max_rdbufs; 435bfe1d560SDave Jiang 436bfe1d560SDave Jiang /* read engine capabilities */ 437bfe1d560SDave Jiang idxd->hw.engine_cap.bits = 438bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 439bfe1d560SDave Jiang dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 440bfe1d560SDave Jiang idxd->max_engines = idxd->hw.engine_cap.num_engines; 441bfe1d560SDave Jiang dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 442bfe1d560SDave Jiang 443bfe1d560SDave Jiang /* read workqueue capabilities */ 444bfe1d560SDave Jiang idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 445bfe1d560SDave Jiang dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 446bfe1d560SDave Jiang idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 447bfe1d560SDave Jiang dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 448bfe1d560SDave Jiang idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 449bfe1d560SDave Jiang dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 450d98793b5SDave Jiang idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 451d98793b5SDave Jiang dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 452bfe1d560SDave Jiang 453bfe1d560SDave Jiang /* reading operation capabilities */ 454bfe1d560SDave Jiang for (i = 0; i < 4; i++) { 455bfe1d560SDave Jiang idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 456bfe1d560SDave Jiang IDXD_OPCAP_OFFSET + i * sizeof(u64)); 457bfe1d560SDave Jiang dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 458bfe1d560SDave Jiang } 459a8563a33SDave Jiang multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 460bfe1d560SDave Jiang } 461bfe1d560SDave Jiang 462435b512dSDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 463bfe1d560SDave Jiang { 464bfe1d560SDave Jiang struct device *dev = &pdev->dev; 465700af3a0SDave Jiang struct device *conf_dev; 466bfe1d560SDave Jiang struct idxd_device *idxd; 46747c16ac2SDave Jiang int rc; 468bfe1d560SDave Jiang 46947c16ac2SDave Jiang idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 470bfe1d560SDave Jiang if (!idxd) 471bfe1d560SDave Jiang return NULL; 472bfe1d560SDave Jiang 473700af3a0SDave Jiang conf_dev = idxd_confdev(idxd); 474bfe1d560SDave Jiang idxd->pdev = pdev; 475435b512dSDave Jiang idxd->data = data; 476700af3a0SDave Jiang idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 4774b73e4ebSDave Jiang idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 47847c16ac2SDave Jiang if (idxd->id < 0) 47947c16ac2SDave Jiang return NULL; 48047c16ac2SDave Jiang 481a8563a33SDave Jiang idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 482a8563a33SDave Jiang if (!idxd->opcap_bmap) { 483a8563a33SDave Jiang ida_free(&idxd_ida, idxd->id); 484a8563a33SDave Jiang return NULL; 485a8563a33SDave Jiang } 486a8563a33SDave Jiang 487700af3a0SDave Jiang device_initialize(conf_dev); 488700af3a0SDave Jiang conf_dev->parent = dev; 489700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 490700af3a0SDave Jiang conf_dev->type = idxd->data->dev_type; 491700af3a0SDave Jiang rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 49247c16ac2SDave Jiang if (rc < 0) { 493700af3a0SDave Jiang put_device(conf_dev); 49447c16ac2SDave Jiang return NULL; 49547c16ac2SDave Jiang } 49647c16ac2SDave Jiang 497bfe1d560SDave Jiang spin_lock_init(&idxd->dev_lock); 49853b2ee7fSDave Jiang spin_lock_init(&idxd->cmd_lock); 499bfe1d560SDave Jiang 500bfe1d560SDave Jiang return idxd; 501bfe1d560SDave Jiang } 502bfe1d560SDave Jiang 5038e50d392SDave Jiang static int idxd_enable_system_pasid(struct idxd_device *idxd) 5048e50d392SDave Jiang { 5058e50d392SDave Jiang int flags; 5068e50d392SDave Jiang unsigned int pasid; 5078e50d392SDave Jiang struct iommu_sva *sva; 5088e50d392SDave Jiang 5098e50d392SDave Jiang flags = SVM_FLAG_SUPERVISOR_MODE; 5108e50d392SDave Jiang 5118e50d392SDave Jiang sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); 5128e50d392SDave Jiang if (IS_ERR(sva)) { 5138e50d392SDave Jiang dev_warn(&idxd->pdev->dev, 5148e50d392SDave Jiang "iommu sva bind failed: %ld\n", PTR_ERR(sva)); 5158e50d392SDave Jiang return PTR_ERR(sva); 5168e50d392SDave Jiang } 5178e50d392SDave Jiang 5188e50d392SDave Jiang pasid = iommu_sva_get_pasid(sva); 5198e50d392SDave Jiang if (pasid == IOMMU_PASID_INVALID) { 5208e50d392SDave Jiang iommu_sva_unbind_device(sva); 5218e50d392SDave Jiang return -ENODEV; 5228e50d392SDave Jiang } 5238e50d392SDave Jiang 5248e50d392SDave Jiang idxd->sva = sva; 5258e50d392SDave Jiang idxd->pasid = pasid; 5268e50d392SDave Jiang dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); 5278e50d392SDave Jiang return 0; 5288e50d392SDave Jiang } 5298e50d392SDave Jiang 5308e50d392SDave Jiang static void idxd_disable_system_pasid(struct idxd_device *idxd) 5318e50d392SDave Jiang { 5328e50d392SDave Jiang 5338e50d392SDave Jiang iommu_sva_unbind_device(idxd->sva); 5348e50d392SDave Jiang idxd->sva = NULL; 5358e50d392SDave Jiang } 5368e50d392SDave Jiang 537bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd) 538bfe1d560SDave Jiang { 539bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 540bfe1d560SDave Jiang struct device *dev = &pdev->dev; 541bfe1d560SDave Jiang int rc; 542bfe1d560SDave Jiang 543bfe1d560SDave Jiang dev_dbg(dev, "%s entered and resetting device\n", __func__); 54489e3becdSDave Jiang rc = idxd_device_init_reset(idxd); 54589e3becdSDave Jiang if (rc < 0) 54689e3becdSDave Jiang return rc; 54789e3becdSDave Jiang 548bfe1d560SDave Jiang dev_dbg(dev, "IDXD reset complete\n"); 549bfe1d560SDave Jiang 55003d939c7SDave Jiang if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 5518ffccd11SJerry Snitselaar if (iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA)) { 55242a1b738SDave Jiang dev_warn(dev, "Unable to turn on user SVA feature.\n"); 5538ffccd11SJerry Snitselaar } else { 55442a1b738SDave Jiang set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 55542a1b738SDave Jiang 55642a1b738SDave Jiang if (idxd_enable_system_pasid(idxd)) 55742a1b738SDave Jiang dev_warn(dev, "No in-kernel DMA with PASID.\n"); 55842a1b738SDave Jiang else 5598e50d392SDave Jiang set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 5608ffccd11SJerry Snitselaar } 56103d939c7SDave Jiang } else if (!sva) { 56203d939c7SDave Jiang dev_warn(dev, "User forced SVA off via module param.\n"); 5638e50d392SDave Jiang } 5648e50d392SDave Jiang 565bfe1d560SDave Jiang idxd_read_caps(idxd); 566bfe1d560SDave Jiang idxd_read_table_offsets(idxd); 567bfe1d560SDave Jiang 568bfe1d560SDave Jiang rc = idxd_setup_internals(idxd); 569bfe1d560SDave Jiang if (rc) 5707c5dd23eSDave Jiang goto err; 571bfe1d560SDave Jiang 5728c66bbdcSDave Jiang /* If the configs are readonly, then load them from device */ 5738c66bbdcSDave Jiang if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 5748c66bbdcSDave Jiang dev_dbg(dev, "Loading RO device config\n"); 5758c66bbdcSDave Jiang rc = idxd_device_load_config(idxd); 5768c66bbdcSDave Jiang if (rc < 0) 577ddf742d4SDave Jiang goto err_config; 5788c66bbdcSDave Jiang } 5798c66bbdcSDave Jiang 580bfe1d560SDave Jiang rc = idxd_setup_interrupts(idxd); 581bfe1d560SDave Jiang if (rc) 582ddf742d4SDave Jiang goto err_config; 583bfe1d560SDave Jiang 58442d279f9SDave Jiang idxd->major = idxd_cdev_get_major(idxd); 58542d279f9SDave Jiang 5860bde4444STom Zanussi rc = perfmon_pmu_init(idxd); 5870bde4444STom Zanussi if (rc < 0) 5880bde4444STom Zanussi dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 5890bde4444STom Zanussi 590bfe1d560SDave Jiang dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 591bfe1d560SDave Jiang return 0; 592bfe1d560SDave Jiang 593ddf742d4SDave Jiang err_config: 594ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 5957c5dd23eSDave Jiang err: 5968e50d392SDave Jiang if (device_pasid_enabled(idxd)) 5978e50d392SDave Jiang idxd_disable_system_pasid(idxd); 59842a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 599cf5f86a7SDave Jiang iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 600bfe1d560SDave Jiang return rc; 601bfe1d560SDave Jiang } 602bfe1d560SDave Jiang 603ddf742d4SDave Jiang static void idxd_cleanup(struct idxd_device *idxd) 604ddf742d4SDave Jiang { 605ddf742d4SDave Jiang struct device *dev = &idxd->pdev->dev; 606ddf742d4SDave Jiang 607ddf742d4SDave Jiang perfmon_pmu_remove(idxd); 608ddf742d4SDave Jiang idxd_cleanup_interrupts(idxd); 609ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 610ddf742d4SDave Jiang if (device_pasid_enabled(idxd)) 611ddf742d4SDave Jiang idxd_disable_system_pasid(idxd); 61242a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 613ddf742d4SDave Jiang iommu_dev_disable_feature(dev, IOMMU_DEV_FEAT_SVA); 614ddf742d4SDave Jiang } 615ddf742d4SDave Jiang 616bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 617bfe1d560SDave Jiang { 618bfe1d560SDave Jiang struct device *dev = &pdev->dev; 619bfe1d560SDave Jiang struct idxd_device *idxd; 620435b512dSDave Jiang struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 621bfe1d560SDave Jiang int rc; 622bfe1d560SDave Jiang 623a39c7cd0SDave Jiang rc = pci_enable_device(pdev); 624bfe1d560SDave Jiang if (rc) 625bfe1d560SDave Jiang return rc; 626bfe1d560SDave Jiang 6278e50d392SDave Jiang dev_dbg(dev, "Alloc IDXD context\n"); 628435b512dSDave Jiang idxd = idxd_alloc(pdev, data); 629a39c7cd0SDave Jiang if (!idxd) { 630a39c7cd0SDave Jiang rc = -ENOMEM; 631a39c7cd0SDave Jiang goto err_idxd_alloc; 632a39c7cd0SDave Jiang } 633bfe1d560SDave Jiang 6348e50d392SDave Jiang dev_dbg(dev, "Mapping BARs\n"); 635a39c7cd0SDave Jiang idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 636a39c7cd0SDave Jiang if (!idxd->reg_base) { 637a39c7cd0SDave Jiang rc = -ENOMEM; 638a39c7cd0SDave Jiang goto err_iomap; 639a39c7cd0SDave Jiang } 640bfe1d560SDave Jiang 641bfe1d560SDave Jiang dev_dbg(dev, "Set DMA masks\n"); 64253b50458SChristophe JAILLET rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 643bfe1d560SDave Jiang if (rc) 644a39c7cd0SDave Jiang goto err; 645bfe1d560SDave Jiang 646bfe1d560SDave Jiang dev_dbg(dev, "Set PCI master\n"); 647bfe1d560SDave Jiang pci_set_master(pdev); 648bfe1d560SDave Jiang pci_set_drvdata(pdev, idxd); 649bfe1d560SDave Jiang 650bfe1d560SDave Jiang idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 651bfe1d560SDave Jiang rc = idxd_probe(idxd); 652bfe1d560SDave Jiang if (rc) { 653bfe1d560SDave Jiang dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 654a39c7cd0SDave Jiang goto err; 655bfe1d560SDave Jiang } 656bfe1d560SDave Jiang 65747c16ac2SDave Jiang rc = idxd_register_devices(idxd); 658c52ca478SDave Jiang if (rc) { 659c52ca478SDave Jiang dev_err(dev, "IDXD sysfs setup failed\n"); 660ddf742d4SDave Jiang goto err_dev_register; 661c52ca478SDave Jiang } 662c52ca478SDave Jiang 663bfe1d560SDave Jiang dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 664bfe1d560SDave Jiang idxd->hw.version); 665bfe1d560SDave Jiang 666bfe1d560SDave Jiang return 0; 667a39c7cd0SDave Jiang 668ddf742d4SDave Jiang err_dev_register: 669ddf742d4SDave Jiang idxd_cleanup(idxd); 670a39c7cd0SDave Jiang err: 671a39c7cd0SDave Jiang pci_iounmap(pdev, idxd->reg_base); 672a39c7cd0SDave Jiang err_iomap: 673700af3a0SDave Jiang put_device(idxd_confdev(idxd)); 674a39c7cd0SDave Jiang err_idxd_alloc: 675a39c7cd0SDave Jiang pci_disable_device(pdev); 676a39c7cd0SDave Jiang return rc; 677bfe1d560SDave Jiang } 678bfe1d560SDave Jiang 6795b0c68c4SDave Jiang void idxd_wqs_quiesce(struct idxd_device *idxd) 6805b0c68c4SDave Jiang { 6815b0c68c4SDave Jiang struct idxd_wq *wq; 6825b0c68c4SDave Jiang int i; 6835b0c68c4SDave Jiang 6845b0c68c4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 6855b0c68c4SDave Jiang wq = idxd->wqs[i]; 6865b0c68c4SDave Jiang if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 6875b0c68c4SDave Jiang idxd_wq_quiesce(wq); 6885b0c68c4SDave Jiang } 6895b0c68c4SDave Jiang } 6905b0c68c4SDave Jiang 691bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev) 692bfe1d560SDave Jiang { 693bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 694bfe1d560SDave Jiang struct idxd_irq_entry *irq_entry; 695403a2e23SDave Jiang int rc; 696bfe1d560SDave Jiang 697bfe1d560SDave Jiang rc = idxd_device_disable(idxd); 698bfe1d560SDave Jiang if (rc) 699bfe1d560SDave Jiang dev_err(&pdev->dev, "Disabling device failed\n"); 700bfe1d560SDave Jiang 701403a2e23SDave Jiang irq_entry = &idxd->ie; 7025fc8e85fSDave Jiang synchronize_irq(irq_entry->vector); 703403a2e23SDave Jiang idxd_mask_error_interrupts(idxd); 70449c4959fSDave Jiang flush_workqueue(idxd->wq); 705bfe1d560SDave Jiang } 706bfe1d560SDave Jiang 707bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev) 708bfe1d560SDave Jiang { 709bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 71049c4959fSDave Jiang struct idxd_irq_entry *irq_entry; 711bfe1d560SDave Jiang 71298da0106SDave Jiang idxd_unregister_devices(idxd); 71398da0106SDave Jiang /* 71498da0106SDave Jiang * When ->release() is called for the idxd->conf_dev, it frees all the memory related 71598da0106SDave Jiang * to the idxd context. The driver still needs those bits in order to do the rest of 71698da0106SDave Jiang * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 71798da0106SDave Jiang * on the device here to hold off the freeing while allowing the idxd sub-driver 71898da0106SDave Jiang * to unbind. 71998da0106SDave Jiang */ 72098da0106SDave Jiang get_device(idxd_confdev(idxd)); 72198da0106SDave Jiang device_unregister(idxd_confdev(idxd)); 722bfe1d560SDave Jiang idxd_shutdown(pdev); 7238e50d392SDave Jiang if (device_pasid_enabled(idxd)) 7248e50d392SDave Jiang idxd_disable_system_pasid(idxd); 72549c4959fSDave Jiang 726403a2e23SDave Jiang irq_entry = idxd_get_ie(idxd, 0); 72749c4959fSDave Jiang free_irq(irq_entry->vector, irq_entry); 72849c4959fSDave Jiang pci_free_irq_vectors(pdev); 72949c4959fSDave Jiang pci_iounmap(pdev, idxd->reg_base); 73042a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 731cf5f86a7SDave Jiang iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 73249c4959fSDave Jiang pci_disable_device(pdev); 73349c4959fSDave Jiang destroy_workqueue(idxd->wq); 73449c4959fSDave Jiang perfmon_pmu_remove(idxd); 73598da0106SDave Jiang put_device(idxd_confdev(idxd)); 736bfe1d560SDave Jiang } 737bfe1d560SDave Jiang 738bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = { 739bfe1d560SDave Jiang .name = DRV_NAME, 740bfe1d560SDave Jiang .id_table = idxd_pci_tbl, 741bfe1d560SDave Jiang .probe = idxd_pci_probe, 742bfe1d560SDave Jiang .remove = idxd_remove, 743bfe1d560SDave Jiang .shutdown = idxd_shutdown, 744bfe1d560SDave Jiang }; 745bfe1d560SDave Jiang 746bfe1d560SDave Jiang static int __init idxd_init_module(void) 747bfe1d560SDave Jiang { 7484b73e4ebSDave Jiang int err; 749bfe1d560SDave Jiang 750bfe1d560SDave Jiang /* 7518e50d392SDave Jiang * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 752bfe1d560SDave Jiang * enumerating the device. We can not utilize it. 753bfe1d560SDave Jiang */ 75474b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 755bfe1d560SDave Jiang pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 756bfe1d560SDave Jiang return -ENODEV; 757bfe1d560SDave Jiang } 758bfe1d560SDave Jiang 75974b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 7608e50d392SDave Jiang pr_warn("Platform does not have ENQCMD(S) support.\n"); 7618e50d392SDave Jiang else 7628e50d392SDave Jiang support_enqcmd = true; 763bfe1d560SDave Jiang 7640bde4444STom Zanussi perfmon_init(); 7650bde4444STom Zanussi 766034b3290SDave Jiang err = idxd_driver_register(&idxd_drv); 767034b3290SDave Jiang if (err < 0) 768034b3290SDave Jiang goto err_idxd_driver_register; 769034b3290SDave Jiang 7700cda4f69SDave Jiang err = idxd_driver_register(&idxd_dmaengine_drv); 7710cda4f69SDave Jiang if (err < 0) 7720cda4f69SDave Jiang goto err_idxd_dmaengine_driver_register; 7730cda4f69SDave Jiang 774448c3de8SDave Jiang err = idxd_driver_register(&idxd_user_drv); 775448c3de8SDave Jiang if (err < 0) 776448c3de8SDave Jiang goto err_idxd_user_driver_register; 777448c3de8SDave Jiang 77842d279f9SDave Jiang err = idxd_cdev_register(); 77942d279f9SDave Jiang if (err) 78042d279f9SDave Jiang goto err_cdev_register; 78142d279f9SDave Jiang 782c52ca478SDave Jiang err = pci_register_driver(&idxd_pci_driver); 783c52ca478SDave Jiang if (err) 784c52ca478SDave Jiang goto err_pci_register; 785c52ca478SDave Jiang 786bfe1d560SDave Jiang return 0; 787c52ca478SDave Jiang 788c52ca478SDave Jiang err_pci_register: 78942d279f9SDave Jiang idxd_cdev_remove(); 79042d279f9SDave Jiang err_cdev_register: 791448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 792448c3de8SDave Jiang err_idxd_user_driver_register: 7930cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 7940cda4f69SDave Jiang err_idxd_dmaengine_driver_register: 795034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 796034b3290SDave Jiang err_idxd_driver_register: 797c52ca478SDave Jiang return err; 798bfe1d560SDave Jiang } 799bfe1d560SDave Jiang module_init(idxd_init_module); 800bfe1d560SDave Jiang 801bfe1d560SDave Jiang static void __exit idxd_exit_module(void) 802bfe1d560SDave Jiang { 803448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 8040cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 805034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 806bfe1d560SDave Jiang pci_unregister_driver(&idxd_pci_driver); 80742d279f9SDave Jiang idxd_cdev_remove(); 8080bde4444STom Zanussi perfmon_exit(); 809bfe1d560SDave Jiang } 810bfe1d560SDave Jiang module_exit(idxd_exit_module); 811