1bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0 2bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3bfe1d560SDave Jiang #include <linux/init.h> 4bfe1d560SDave Jiang #include <linux/kernel.h> 5bfe1d560SDave Jiang #include <linux/module.h> 6bfe1d560SDave Jiang #include <linux/slab.h> 7bfe1d560SDave Jiang #include <linux/pci.h> 8bfe1d560SDave Jiang #include <linux/interrupt.h> 9bfe1d560SDave Jiang #include <linux/delay.h> 10bfe1d560SDave Jiang #include <linux/dma-mapping.h> 11bfe1d560SDave Jiang #include <linux/workqueue.h> 12bfe1d560SDave Jiang #include <linux/fs.h> 13bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h> 14bfe1d560SDave Jiang #include <linux/device.h> 15bfe1d560SDave Jiang #include <linux/idr.h> 168e50d392SDave Jiang #include <linux/iommu.h> 17bfe1d560SDave Jiang #include <uapi/linux/idxd.h> 188f47d1a5SDave Jiang #include <linux/dmaengine.h> 198f47d1a5SDave Jiang #include "../dmaengine.h" 20bfe1d560SDave Jiang #include "registers.h" 21bfe1d560SDave Jiang #include "idxd.h" 220bde4444STom Zanussi #include "perfmon.h" 23bfe1d560SDave Jiang 24bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION); 25bfe1d560SDave Jiang MODULE_LICENSE("GPL v2"); 26bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation"); 27d9e5481fSDave Jiang MODULE_IMPORT_NS(IDXD); 28bfe1d560SDave Jiang 2903d939c7SDave Jiang static bool sva = true; 3003d939c7SDave Jiang module_param(sva, bool, 0644); 3103d939c7SDave Jiang MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 3203d939c7SDave Jiang 33ade8a86bSDave Jiang bool tc_override; 34ade8a86bSDave Jiang module_param(tc_override, bool, 0644); 35ade8a86bSDave Jiang MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 36ade8a86bSDave Jiang 37bfe1d560SDave Jiang #define DRV_NAME "idxd" 38bfe1d560SDave Jiang 398e50d392SDave Jiang bool support_enqcmd; 404b73e4ebSDave Jiang DEFINE_IDA(idxd_ida); 41bfe1d560SDave Jiang 42435b512dSDave Jiang static struct idxd_driver_data idxd_driver_data[] = { 43435b512dSDave Jiang [IDXD_TYPE_DSA] = { 44435b512dSDave Jiang .name_prefix = "dsa", 45435b512dSDave Jiang .type = IDXD_TYPE_DSA, 46435b512dSDave Jiang .compl_size = sizeof(struct dsa_completion_record), 47435b512dSDave Jiang .align = 32, 48435b512dSDave Jiang .dev_type = &dsa_device_type, 49c40bd7d9SDave Jiang .evl_cr_off = offsetof(struct dsa_evl_entry, cr), 502442b747SDave Jiang .cr_status_off = offsetof(struct dsa_completion_record, status), 512442b747SDave Jiang .cr_result_off = offsetof(struct dsa_completion_record, result), 52435b512dSDave Jiang }, 53435b512dSDave Jiang [IDXD_TYPE_IAX] = { 54435b512dSDave Jiang .name_prefix = "iax", 55435b512dSDave Jiang .type = IDXD_TYPE_IAX, 56435b512dSDave Jiang .compl_size = sizeof(struct iax_completion_record), 57435b512dSDave Jiang .align = 64, 58435b512dSDave Jiang .dev_type = &iax_device_type, 59c40bd7d9SDave Jiang .evl_cr_off = offsetof(struct iax_evl_entry, cr), 602442b747SDave Jiang .cr_status_off = offsetof(struct iax_completion_record, status), 612442b747SDave Jiang .cr_result_off = offsetof(struct iax_completion_record, error_code), 62979f6dedSTom Zanussi .load_device_defaults = idxd_load_iaa_device_defaults, 63435b512dSDave Jiang }, 64435b512dSDave Jiang }; 65435b512dSDave Jiang 66bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = { 67bfe1d560SDave Jiang /* DSA ver 1.0 platforms */ 68435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 69f25b4638SDave Jiang 70f25b4638SDave Jiang /* IAX ver 1.0 platforms */ 71435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 72bfe1d560SDave Jiang { 0, } 73bfe1d560SDave Jiang }; 74bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 75bfe1d560SDave Jiang 76bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd) 77bfe1d560SDave Jiang { 78bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 79bfe1d560SDave Jiang struct device *dev = &pdev->dev; 80ec0d6423SDave Jiang struct idxd_irq_entry *ie; 81bfe1d560SDave Jiang int i, msixcnt; 82bfe1d560SDave Jiang int rc = 0; 83bfe1d560SDave Jiang 84bfe1d560SDave Jiang msixcnt = pci_msix_vec_count(pdev); 85bfe1d560SDave Jiang if (msixcnt < 0) { 86bfe1d560SDave Jiang dev_err(dev, "Not MSI-X interrupt capable.\n"); 875fc8e85fSDave Jiang return -ENOSPC; 88bfe1d560SDave Jiang } 898b67426eSDave Jiang idxd->irq_cnt = msixcnt; 90bfe1d560SDave Jiang 915fc8e85fSDave Jiang rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 925fc8e85fSDave Jiang if (rc != msixcnt) { 935fc8e85fSDave Jiang dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 945fc8e85fSDave Jiang return -ENOSPC; 95bfe1d560SDave Jiang } 96bfe1d560SDave Jiang dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 97bfe1d560SDave Jiang 98d5c10e0fSDave Jiang 99ec0d6423SDave Jiang ie = idxd_get_ie(idxd, 0); 100ec0d6423SDave Jiang ie->vector = pci_irq_vector(pdev, 0); 101ec0d6423SDave Jiang rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 102bfe1d560SDave Jiang if (rc < 0) { 103bfe1d560SDave Jiang dev_err(dev, "Failed to allocate misc interrupt.\n"); 1045fc8e85fSDave Jiang goto err_misc_irq; 105bfe1d560SDave Jiang } 106403a2e23SDave Jiang dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 107bfe1d560SDave Jiang 108ec0d6423SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 109ec0d6423SDave Jiang int msix_idx = i + 1; 110bfe1d560SDave Jiang 111ec0d6423SDave Jiang ie = idxd_get_ie(idxd, msix_idx); 112ec0d6423SDave Jiang ie->id = msix_idx; 113ec0d6423SDave Jiang ie->int_handle = INVALID_INT_HANDLE; 114fffaed1eSJacob Pan ie->pasid = IOMMU_PASID_INVALID; 115403a2e23SDave Jiang 116ec0d6423SDave Jiang spin_lock_init(&ie->list_lock); 117ec0d6423SDave Jiang init_llist_head(&ie->pending_llist); 118ec0d6423SDave Jiang INIT_LIST_HEAD(&ie->work_list); 119bfe1d560SDave Jiang } 120bfe1d560SDave Jiang 121bfe1d560SDave Jiang idxd_unmask_error_interrupts(idxd); 122bfe1d560SDave Jiang return 0; 123bfe1d560SDave Jiang 1245fc8e85fSDave Jiang err_misc_irq: 125bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 1265fc8e85fSDave Jiang pci_free_irq_vectors(pdev); 127bfe1d560SDave Jiang dev_err(dev, "No usable interrupts\n"); 128bfe1d560SDave Jiang return rc; 129bfe1d560SDave Jiang } 130bfe1d560SDave Jiang 131ddf742d4SDave Jiang static void idxd_cleanup_interrupts(struct idxd_device *idxd) 132ddf742d4SDave Jiang { 133ddf742d4SDave Jiang struct pci_dev *pdev = idxd->pdev; 134ec0d6423SDave Jiang struct idxd_irq_entry *ie; 135403a2e23SDave Jiang int msixcnt; 136ddf742d4SDave Jiang 137403a2e23SDave Jiang msixcnt = pci_msix_vec_count(pdev); 138403a2e23SDave Jiang if (msixcnt <= 0) 139403a2e23SDave Jiang return; 140ddf742d4SDave Jiang 141403a2e23SDave Jiang ie = idxd_get_ie(idxd, 0); 142ddf742d4SDave Jiang idxd_mask_error_interrupts(idxd); 143403a2e23SDave Jiang free_irq(ie->vector, ie); 144ddf742d4SDave Jiang pci_free_irq_vectors(pdev); 145ddf742d4SDave Jiang } 146ddf742d4SDave Jiang 1477c5dd23eSDave Jiang static int idxd_setup_wqs(struct idxd_device *idxd) 1487c5dd23eSDave Jiang { 1497c5dd23eSDave Jiang struct device *dev = &idxd->pdev->dev; 1507c5dd23eSDave Jiang struct idxd_wq *wq; 151700af3a0SDave Jiang struct device *conf_dev; 1527c5dd23eSDave Jiang int i, rc; 1537c5dd23eSDave Jiang 1547c5dd23eSDave Jiang idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 1557c5dd23eSDave Jiang GFP_KERNEL, dev_to_node(dev)); 1567c5dd23eSDave Jiang if (!idxd->wqs) 1577c5dd23eSDave Jiang return -ENOMEM; 1587c5dd23eSDave Jiang 159de5819b9SJerry Snitselaar idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 160de5819b9SJerry Snitselaar if (!idxd->wq_enable_map) { 161de5819b9SJerry Snitselaar kfree(idxd->wqs); 162de5819b9SJerry Snitselaar return -ENOMEM; 163de5819b9SJerry Snitselaar } 164de5819b9SJerry Snitselaar 1657c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 1667c5dd23eSDave Jiang wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 1677c5dd23eSDave Jiang if (!wq) { 1687c5dd23eSDave Jiang rc = -ENOMEM; 1697c5dd23eSDave Jiang goto err; 1707c5dd23eSDave Jiang } 1717c5dd23eSDave Jiang 172700af3a0SDave Jiang idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 173700af3a0SDave Jiang conf_dev = wq_confdev(wq); 1747c5dd23eSDave Jiang wq->id = i; 1757c5dd23eSDave Jiang wq->idxd = idxd; 176700af3a0SDave Jiang device_initialize(wq_confdev(wq)); 177700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 178700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 179700af3a0SDave Jiang conf_dev->type = &idxd_wq_device_type; 180700af3a0SDave Jiang rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 1817c5dd23eSDave Jiang if (rc < 0) { 182700af3a0SDave Jiang put_device(conf_dev); 1837c5dd23eSDave Jiang goto err; 1847c5dd23eSDave Jiang } 1857c5dd23eSDave Jiang 1867c5dd23eSDave Jiang mutex_init(&wq->wq_lock); 18704922b74SDave Jiang init_waitqueue_head(&wq->err_queue); 18893a40a6dSDave Jiang init_completion(&wq->wq_dead); 18956fc39f5SDave Jiang init_completion(&wq->wq_resurrect); 19092452a72SDave Jiang wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 191e8dbd644SXiaochen Shen idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); 1927930d855SDave Jiang wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 1937c5dd23eSDave Jiang wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 1947c5dd23eSDave Jiang if (!wq->wqcfg) { 195700af3a0SDave Jiang put_device(conf_dev); 1967c5dd23eSDave Jiang rc = -ENOMEM; 1977c5dd23eSDave Jiang goto err; 1987c5dd23eSDave Jiang } 199b0325aefSDave Jiang 200b0325aefSDave Jiang if (idxd->hw.wq_cap.op_config) { 201b0325aefSDave Jiang wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 202b0325aefSDave Jiang if (!wq->opcap_bmap) { 203b0325aefSDave Jiang put_device(conf_dev); 204b0325aefSDave Jiang rc = -ENOMEM; 205b0325aefSDave Jiang goto err; 206b0325aefSDave Jiang } 207b0325aefSDave Jiang bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 208b0325aefSDave Jiang } 209b022f597SFenghua Yu mutex_init(&wq->uc_lock); 210b022f597SFenghua Yu xa_init(&wq->upasid_xa); 2117c5dd23eSDave Jiang idxd->wqs[i] = wq; 2127c5dd23eSDave Jiang } 2137c5dd23eSDave Jiang 2147c5dd23eSDave Jiang return 0; 2157c5dd23eSDave Jiang 2167c5dd23eSDave Jiang err: 217700af3a0SDave Jiang while (--i >= 0) { 218700af3a0SDave Jiang wq = idxd->wqs[i]; 219700af3a0SDave Jiang conf_dev = wq_confdev(wq); 220700af3a0SDave Jiang put_device(conf_dev); 221700af3a0SDave Jiang } 2227c5dd23eSDave Jiang return rc; 2237c5dd23eSDave Jiang } 2247c5dd23eSDave Jiang 22575b91130SDave Jiang static int idxd_setup_engines(struct idxd_device *idxd) 22675b91130SDave Jiang { 22775b91130SDave Jiang struct idxd_engine *engine; 22875b91130SDave Jiang struct device *dev = &idxd->pdev->dev; 229700af3a0SDave Jiang struct device *conf_dev; 23075b91130SDave Jiang int i, rc; 23175b91130SDave Jiang 23275b91130SDave Jiang idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 23375b91130SDave Jiang GFP_KERNEL, dev_to_node(dev)); 23475b91130SDave Jiang if (!idxd->engines) 23575b91130SDave Jiang return -ENOMEM; 23675b91130SDave Jiang 23775b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) { 23875b91130SDave Jiang engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 23975b91130SDave Jiang if (!engine) { 24075b91130SDave Jiang rc = -ENOMEM; 24175b91130SDave Jiang goto err; 24275b91130SDave Jiang } 24375b91130SDave Jiang 244700af3a0SDave Jiang idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 245700af3a0SDave Jiang conf_dev = engine_confdev(engine); 24675b91130SDave Jiang engine->id = i; 24775b91130SDave Jiang engine->idxd = idxd; 248700af3a0SDave Jiang device_initialize(conf_dev); 249700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 250700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 251700af3a0SDave Jiang conf_dev->type = &idxd_engine_device_type; 252700af3a0SDave Jiang rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 25375b91130SDave Jiang if (rc < 0) { 254700af3a0SDave Jiang put_device(conf_dev); 25575b91130SDave Jiang goto err; 25675b91130SDave Jiang } 25775b91130SDave Jiang 25875b91130SDave Jiang idxd->engines[i] = engine; 25975b91130SDave Jiang } 26075b91130SDave Jiang 26175b91130SDave Jiang return 0; 26275b91130SDave Jiang 26375b91130SDave Jiang err: 264700af3a0SDave Jiang while (--i >= 0) { 265700af3a0SDave Jiang engine = idxd->engines[i]; 266700af3a0SDave Jiang conf_dev = engine_confdev(engine); 267700af3a0SDave Jiang put_device(conf_dev); 268700af3a0SDave Jiang } 26975b91130SDave Jiang return rc; 27075b91130SDave Jiang } 27175b91130SDave Jiang 272defe49f9SDave Jiang static int idxd_setup_groups(struct idxd_device *idxd) 273defe49f9SDave Jiang { 274defe49f9SDave Jiang struct device *dev = &idxd->pdev->dev; 275700af3a0SDave Jiang struct device *conf_dev; 276defe49f9SDave Jiang struct idxd_group *group; 277defe49f9SDave Jiang int i, rc; 278defe49f9SDave Jiang 279defe49f9SDave Jiang idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 280defe49f9SDave Jiang GFP_KERNEL, dev_to_node(dev)); 281defe49f9SDave Jiang if (!idxd->groups) 282defe49f9SDave Jiang return -ENOMEM; 283defe49f9SDave Jiang 284defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) { 285defe49f9SDave Jiang group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 286defe49f9SDave Jiang if (!group) { 287defe49f9SDave Jiang rc = -ENOMEM; 288defe49f9SDave Jiang goto err; 289defe49f9SDave Jiang } 290defe49f9SDave Jiang 291700af3a0SDave Jiang idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 292700af3a0SDave Jiang conf_dev = group_confdev(group); 293defe49f9SDave Jiang group->id = i; 294defe49f9SDave Jiang group->idxd = idxd; 295700af3a0SDave Jiang device_initialize(conf_dev); 296700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 297700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 298700af3a0SDave Jiang conf_dev->type = &idxd_group_device_type; 299700af3a0SDave Jiang rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 300defe49f9SDave Jiang if (rc < 0) { 301700af3a0SDave Jiang put_device(conf_dev); 302defe49f9SDave Jiang goto err; 303defe49f9SDave Jiang } 304defe49f9SDave Jiang 305defe49f9SDave Jiang idxd->groups[i] = group; 3069735bde3SFenghua Yu if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { 307ade8a86bSDave Jiang group->tc_a = 1; 308ade8a86bSDave Jiang group->tc_b = 1; 309ade8a86bSDave Jiang } else { 310defe49f9SDave Jiang group->tc_a = -1; 311defe49f9SDave Jiang group->tc_b = -1; 312defe49f9SDave Jiang } 313601bdadaSFenghua Yu /* 314601bdadaSFenghua Yu * The default value is the same as the value of 315601bdadaSFenghua Yu * total read buffers in GRPCAP. 316601bdadaSFenghua Yu */ 317601bdadaSFenghua Yu group->rdbufs_allowed = idxd->max_rdbufs; 318ade8a86bSDave Jiang } 319defe49f9SDave Jiang 320defe49f9SDave Jiang return 0; 321defe49f9SDave Jiang 322defe49f9SDave Jiang err: 323700af3a0SDave Jiang while (--i >= 0) { 324700af3a0SDave Jiang group = idxd->groups[i]; 325700af3a0SDave Jiang put_device(group_confdev(group)); 326700af3a0SDave Jiang } 327defe49f9SDave Jiang return rc; 328defe49f9SDave Jiang } 329defe49f9SDave Jiang 330ddf742d4SDave Jiang static void idxd_cleanup_internals(struct idxd_device *idxd) 331ddf742d4SDave Jiang { 332ddf742d4SDave Jiang int i; 333ddf742d4SDave Jiang 334ddf742d4SDave Jiang for (i = 0; i < idxd->max_groups; i++) 335700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 336ddf742d4SDave Jiang for (i = 0; i < idxd->max_engines; i++) 337700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 338ddf742d4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) 339700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 340ddf742d4SDave Jiang destroy_workqueue(idxd->wq); 341ddf742d4SDave Jiang } 342ddf742d4SDave Jiang 3431649091fSDave Jiang static int idxd_init_evl(struct idxd_device *idxd) 3441649091fSDave Jiang { 3451649091fSDave Jiang struct device *dev = &idxd->pdev->dev; 346d3ea125dSFenghua Yu unsigned int evl_cache_size; 3471649091fSDave Jiang struct idxd_evl *evl; 348d3ea125dSFenghua Yu const char *idxd_name; 3491649091fSDave Jiang 3501649091fSDave Jiang if (idxd->hw.gen_cap.evl_support == 0) 3511649091fSDave Jiang return 0; 3521649091fSDave Jiang 3531649091fSDave Jiang evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev)); 3541649091fSDave Jiang if (!evl) 3551649091fSDave Jiang return -ENOMEM; 3561649091fSDave Jiang 357*d5638de8SRex Zhang mutex_init(&evl->lock); 3581649091fSDave Jiang evl->size = IDXD_EVL_SIZE_MIN; 359c2f156bfSDave Jiang 360d3ea125dSFenghua Yu idxd_name = dev_name(idxd_confdev(idxd)); 361d3ea125dSFenghua Yu evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); 362d3ea125dSFenghua Yu /* 363d3ea125dSFenghua Yu * Since completion record in evl_cache will be copied to user 364d3ea125dSFenghua Yu * when handling completion record page fault, need to create 365d3ea125dSFenghua Yu * the cache suitable for user copy. 366d3ea125dSFenghua Yu */ 367d3ea125dSFenghua Yu idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, 368d3ea125dSFenghua Yu 0, 0, 0, evl_cache_size, 369d3ea125dSFenghua Yu NULL); 370c2f156bfSDave Jiang if (!idxd->evl_cache) { 371c2f156bfSDave Jiang kfree(evl); 372c2f156bfSDave Jiang return -ENOMEM; 373c2f156bfSDave Jiang } 374c2f156bfSDave Jiang 3751649091fSDave Jiang idxd->evl = evl; 3761649091fSDave Jiang return 0; 3771649091fSDave Jiang } 3781649091fSDave Jiang 379bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd) 380bfe1d560SDave Jiang { 381bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 382defe49f9SDave Jiang int rc, i; 383bfe1d560SDave Jiang 3840d5c10b4SDave Jiang init_waitqueue_head(&idxd->cmd_waitq); 3857c5dd23eSDave Jiang 3867c5dd23eSDave Jiang rc = idxd_setup_wqs(idxd); 3877c5dd23eSDave Jiang if (rc < 0) 388eb15e715SDave Jiang goto err_wqs; 3897c5dd23eSDave Jiang 39075b91130SDave Jiang rc = idxd_setup_engines(idxd); 39175b91130SDave Jiang if (rc < 0) 39275b91130SDave Jiang goto err_engine; 39375b91130SDave Jiang 394defe49f9SDave Jiang rc = idxd_setup_groups(idxd); 395defe49f9SDave Jiang if (rc < 0) 396defe49f9SDave Jiang goto err_group; 397bfe1d560SDave Jiang 3980d5c10b4SDave Jiang idxd->wq = create_workqueue(dev_name(dev)); 3997c5dd23eSDave Jiang if (!idxd->wq) { 4007c5dd23eSDave Jiang rc = -ENOMEM; 401defe49f9SDave Jiang goto err_wkq_create; 4027c5dd23eSDave Jiang } 4030d5c10b4SDave Jiang 4041649091fSDave Jiang rc = idxd_init_evl(idxd); 4051649091fSDave Jiang if (rc < 0) 4061649091fSDave Jiang goto err_evl; 4071649091fSDave Jiang 408bfe1d560SDave Jiang return 0; 4097c5dd23eSDave Jiang 4101649091fSDave Jiang err_evl: 4111649091fSDave Jiang destroy_workqueue(idxd->wq); 412defe49f9SDave Jiang err_wkq_create: 413defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) 414700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 415defe49f9SDave Jiang err_group: 41675b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) 417700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 41875b91130SDave Jiang err_engine: 4197c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) 420700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 421eb15e715SDave Jiang err_wqs: 4227c5dd23eSDave Jiang return rc; 423bfe1d560SDave Jiang } 424bfe1d560SDave Jiang 425bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd) 426bfe1d560SDave Jiang { 427bfe1d560SDave Jiang union offsets_reg offsets; 428bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 429bfe1d560SDave Jiang 430bfe1d560SDave Jiang offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 4312f8417a9SDave Jiang offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 4322f8417a9SDave Jiang idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 433bfe1d560SDave Jiang dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 4342f8417a9SDave Jiang idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 4352f8417a9SDave Jiang dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 4362f8417a9SDave Jiang idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 4372f8417a9SDave Jiang dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 4382f8417a9SDave Jiang idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 439bfe1d560SDave Jiang dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 440bfe1d560SDave Jiang } 441bfe1d560SDave Jiang 44234ca0066SDave Jiang void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 443a8563a33SDave Jiang { 444a8563a33SDave Jiang int i, j, nr; 445a8563a33SDave Jiang 446a8563a33SDave Jiang for (i = 0, nr = 0; i < count; i++) { 447a8563a33SDave Jiang for (j = 0; j < BITS_PER_LONG_LONG; j++) { 448a8563a33SDave Jiang if (val[i] & BIT(j)) 449a8563a33SDave Jiang set_bit(nr, bmap); 450a8563a33SDave Jiang nr++; 451a8563a33SDave Jiang } 452a8563a33SDave Jiang } 453a8563a33SDave Jiang } 454a8563a33SDave Jiang 455bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd) 456bfe1d560SDave Jiang { 457bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 458bfe1d560SDave Jiang int i; 459bfe1d560SDave Jiang 460bfe1d560SDave Jiang /* reading generic capabilities */ 461bfe1d560SDave Jiang idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 462bfe1d560SDave Jiang dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 463eb15e715SDave Jiang 464eb15e715SDave Jiang if (idxd->hw.gen_cap.cmd_cap) { 465eb15e715SDave Jiang idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 466eb15e715SDave Jiang dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 467eb15e715SDave Jiang } 468eb15e715SDave Jiang 4698b67426eSDave Jiang /* reading command capabilities */ 4708b67426eSDave Jiang if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 4718b67426eSDave Jiang idxd->request_int_handles = true; 4728b67426eSDave Jiang 473bfe1d560SDave Jiang idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 474bfe1d560SDave Jiang dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 475e8dbd644SXiaochen Shen idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); 476bfe1d560SDave Jiang dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 477bfe1d560SDave Jiang if (idxd->hw.gen_cap.config_en) 478bfe1d560SDave Jiang set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 479bfe1d560SDave Jiang 480bfe1d560SDave Jiang /* reading group capabilities */ 481bfe1d560SDave Jiang idxd->hw.group_cap.bits = 482bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 483bfe1d560SDave Jiang dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 484bfe1d560SDave Jiang idxd->max_groups = idxd->hw.group_cap.num_groups; 485bfe1d560SDave Jiang dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 4867ed6f1b8SDave Jiang idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 4877ed6f1b8SDave Jiang dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 4887ed6f1b8SDave Jiang idxd->nr_rdbufs = idxd->max_rdbufs; 489bfe1d560SDave Jiang 490bfe1d560SDave Jiang /* read engine capabilities */ 491bfe1d560SDave Jiang idxd->hw.engine_cap.bits = 492bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 493bfe1d560SDave Jiang dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 494bfe1d560SDave Jiang idxd->max_engines = idxd->hw.engine_cap.num_engines; 495bfe1d560SDave Jiang dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 496bfe1d560SDave Jiang 497bfe1d560SDave Jiang /* read workqueue capabilities */ 498bfe1d560SDave Jiang idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 499bfe1d560SDave Jiang dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 500bfe1d560SDave Jiang idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 501bfe1d560SDave Jiang dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 502bfe1d560SDave Jiang idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 503bfe1d560SDave Jiang dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 504d98793b5SDave Jiang idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 505d98793b5SDave Jiang dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 506bfe1d560SDave Jiang 507bfe1d560SDave Jiang /* reading operation capabilities */ 508bfe1d560SDave Jiang for (i = 0; i < 4; i++) { 509bfe1d560SDave Jiang idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 510bfe1d560SDave Jiang IDXD_OPCAP_OFFSET + i * sizeof(u64)); 511bfe1d560SDave Jiang dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 512bfe1d560SDave Jiang } 513a8563a33SDave Jiang multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 5149f0d99b3SDave Jiang 5159f0d99b3SDave Jiang /* read iaa cap */ 5169f0d99b3SDave Jiang if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) 5179f0d99b3SDave Jiang idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); 518bfe1d560SDave Jiang } 519bfe1d560SDave Jiang 520435b512dSDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 521bfe1d560SDave Jiang { 522bfe1d560SDave Jiang struct device *dev = &pdev->dev; 523700af3a0SDave Jiang struct device *conf_dev; 524bfe1d560SDave Jiang struct idxd_device *idxd; 52547c16ac2SDave Jiang int rc; 526bfe1d560SDave Jiang 52747c16ac2SDave Jiang idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 528bfe1d560SDave Jiang if (!idxd) 529bfe1d560SDave Jiang return NULL; 530bfe1d560SDave Jiang 531700af3a0SDave Jiang conf_dev = idxd_confdev(idxd); 532bfe1d560SDave Jiang idxd->pdev = pdev; 533435b512dSDave Jiang idxd->data = data; 534700af3a0SDave Jiang idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 5354b73e4ebSDave Jiang idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 53647c16ac2SDave Jiang if (idxd->id < 0) 53747c16ac2SDave Jiang return NULL; 53847c16ac2SDave Jiang 539a8563a33SDave Jiang idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 540a8563a33SDave Jiang if (!idxd->opcap_bmap) { 541a8563a33SDave Jiang ida_free(&idxd_ida, idxd->id); 542a8563a33SDave Jiang return NULL; 543a8563a33SDave Jiang } 544a8563a33SDave Jiang 545700af3a0SDave Jiang device_initialize(conf_dev); 546700af3a0SDave Jiang conf_dev->parent = dev; 547700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 548700af3a0SDave Jiang conf_dev->type = idxd->data->dev_type; 549700af3a0SDave Jiang rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 55047c16ac2SDave Jiang if (rc < 0) { 551700af3a0SDave Jiang put_device(conf_dev); 55247c16ac2SDave Jiang return NULL; 55347c16ac2SDave Jiang } 55447c16ac2SDave Jiang 555bfe1d560SDave Jiang spin_lock_init(&idxd->dev_lock); 55653b2ee7fSDave Jiang spin_lock_init(&idxd->cmd_lock); 557bfe1d560SDave Jiang 558bfe1d560SDave Jiang return idxd; 559bfe1d560SDave Jiang } 560bfe1d560SDave Jiang 5618e50d392SDave Jiang static int idxd_enable_system_pasid(struct idxd_device *idxd) 5628e50d392SDave Jiang { 563f5ccf55eSJacob Pan struct pci_dev *pdev = idxd->pdev; 564f5ccf55eSJacob Pan struct device *dev = &pdev->dev; 565f5ccf55eSJacob Pan struct iommu_domain *domain; 566f5ccf55eSJacob Pan ioasid_t pasid; 567f5ccf55eSJacob Pan int ret; 568f5ccf55eSJacob Pan 569f5ccf55eSJacob Pan /* 570f5ccf55eSJacob Pan * Attach a global PASID to the DMA domain so that we can use ENQCMDS 571f5ccf55eSJacob Pan * to submit work on buffers mapped by DMA API. 572f5ccf55eSJacob Pan */ 573f5ccf55eSJacob Pan domain = iommu_get_domain_for_dev(dev); 574f5ccf55eSJacob Pan if (!domain) 575f5ccf55eSJacob Pan return -EPERM; 576f5ccf55eSJacob Pan 577f5ccf55eSJacob Pan pasid = iommu_alloc_global_pasid(dev); 578f5ccf55eSJacob Pan if (pasid == IOMMU_PASID_INVALID) 579f5ccf55eSJacob Pan return -ENOSPC; 580f5ccf55eSJacob Pan 581f5ccf55eSJacob Pan /* 582f5ccf55eSJacob Pan * DMA domain is owned by the driver, it should support all valid 583f5ccf55eSJacob Pan * types such as DMA-FQ, identity, etc. 584f5ccf55eSJacob Pan */ 585f5ccf55eSJacob Pan ret = iommu_attach_device_pasid(domain, dev, pasid); 586f5ccf55eSJacob Pan if (ret) { 587f5ccf55eSJacob Pan dev_err(dev, "failed to attach device pasid %d, domain type %d", 588f5ccf55eSJacob Pan pasid, domain->type); 589f5ccf55eSJacob Pan iommu_free_global_pasid(pasid); 590f5ccf55eSJacob Pan return ret; 591f5ccf55eSJacob Pan } 592f5ccf55eSJacob Pan 593f5ccf55eSJacob Pan /* Since we set user privilege for kernel DMA, enable completion IRQ */ 594f5ccf55eSJacob Pan idxd_set_user_intr(idxd, 1); 595f5ccf55eSJacob Pan idxd->pasid = pasid; 596f5ccf55eSJacob Pan 597f5ccf55eSJacob Pan return ret; 5988e50d392SDave Jiang } 5998e50d392SDave Jiang 6008e50d392SDave Jiang static void idxd_disable_system_pasid(struct idxd_device *idxd) 6018e50d392SDave Jiang { 602f5ccf55eSJacob Pan struct pci_dev *pdev = idxd->pdev; 603f5ccf55eSJacob Pan struct device *dev = &pdev->dev; 604f5ccf55eSJacob Pan struct iommu_domain *domain; 6058e50d392SDave Jiang 606f5ccf55eSJacob Pan domain = iommu_get_domain_for_dev(dev); 607f5ccf55eSJacob Pan if (!domain) 608f5ccf55eSJacob Pan return; 609f5ccf55eSJacob Pan 610f5ccf55eSJacob Pan iommu_detach_device_pasid(domain, dev, idxd->pasid); 611f5ccf55eSJacob Pan iommu_free_global_pasid(idxd->pasid); 612f5ccf55eSJacob Pan 613f5ccf55eSJacob Pan idxd_set_user_intr(idxd, 0); 6148e50d392SDave Jiang idxd->sva = NULL; 615f5ccf55eSJacob Pan idxd->pasid = IOMMU_PASID_INVALID; 6168e50d392SDave Jiang } 6178e50d392SDave Jiang 61884c9ef72SLu Baolu static int idxd_enable_sva(struct pci_dev *pdev) 61984c9ef72SLu Baolu { 62084c9ef72SLu Baolu int ret; 62184c9ef72SLu Baolu 62284c9ef72SLu Baolu ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 62384c9ef72SLu Baolu if (ret) 62484c9ef72SLu Baolu return ret; 62584c9ef72SLu Baolu 62684c9ef72SLu Baolu ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 62784c9ef72SLu Baolu if (ret) 62884c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 62984c9ef72SLu Baolu 63084c9ef72SLu Baolu return ret; 63184c9ef72SLu Baolu } 63284c9ef72SLu Baolu 63384c9ef72SLu Baolu static void idxd_disable_sva(struct pci_dev *pdev) 63484c9ef72SLu Baolu { 63584c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 63684c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 63784c9ef72SLu Baolu } 63884c9ef72SLu Baolu 639bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd) 640bfe1d560SDave Jiang { 641bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 642bfe1d560SDave Jiang struct device *dev = &pdev->dev; 643bfe1d560SDave Jiang int rc; 644bfe1d560SDave Jiang 645bfe1d560SDave Jiang dev_dbg(dev, "%s entered and resetting device\n", __func__); 64689e3becdSDave Jiang rc = idxd_device_init_reset(idxd); 64789e3becdSDave Jiang if (rc < 0) 64889e3becdSDave Jiang return rc; 64989e3becdSDave Jiang 650bfe1d560SDave Jiang dev_dbg(dev, "IDXD reset complete\n"); 651bfe1d560SDave Jiang 65203d939c7SDave Jiang if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 65384c9ef72SLu Baolu if (idxd_enable_sva(pdev)) { 65442a1b738SDave Jiang dev_warn(dev, "Unable to turn on user SVA feature.\n"); 6558ffccd11SJerry Snitselaar } else { 65642a1b738SDave Jiang set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 65742a1b738SDave Jiang 658f5ccf55eSJacob Pan rc = idxd_enable_system_pasid(idxd); 659f5ccf55eSJacob Pan if (rc) 660f5ccf55eSJacob Pan dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); 66142a1b738SDave Jiang else 6628e50d392SDave Jiang set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 6638ffccd11SJerry Snitselaar } 66403d939c7SDave Jiang } else if (!sva) { 66503d939c7SDave Jiang dev_warn(dev, "User forced SVA off via module param.\n"); 6668e50d392SDave Jiang } 6678e50d392SDave Jiang 668bfe1d560SDave Jiang idxd_read_caps(idxd); 669bfe1d560SDave Jiang idxd_read_table_offsets(idxd); 670bfe1d560SDave Jiang 671bfe1d560SDave Jiang rc = idxd_setup_internals(idxd); 672bfe1d560SDave Jiang if (rc) 6737c5dd23eSDave Jiang goto err; 674bfe1d560SDave Jiang 6758c66bbdcSDave Jiang /* If the configs are readonly, then load them from device */ 6768c66bbdcSDave Jiang if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 6778c66bbdcSDave Jiang dev_dbg(dev, "Loading RO device config\n"); 6788c66bbdcSDave Jiang rc = idxd_device_load_config(idxd); 6798c66bbdcSDave Jiang if (rc < 0) 680ddf742d4SDave Jiang goto err_config; 6818c66bbdcSDave Jiang } 6828c66bbdcSDave Jiang 683bfe1d560SDave Jiang rc = idxd_setup_interrupts(idxd); 684bfe1d560SDave Jiang if (rc) 685ddf742d4SDave Jiang goto err_config; 686bfe1d560SDave Jiang 68742d279f9SDave Jiang idxd->major = idxd_cdev_get_major(idxd); 68842d279f9SDave Jiang 6890bde4444STom Zanussi rc = perfmon_pmu_init(idxd); 6900bde4444STom Zanussi if (rc < 0) 6910bde4444STom Zanussi dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 6920bde4444STom Zanussi 693bfe1d560SDave Jiang dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 694bfe1d560SDave Jiang return 0; 695bfe1d560SDave Jiang 696ddf742d4SDave Jiang err_config: 697ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 6987c5dd23eSDave Jiang err: 6998e50d392SDave Jiang if (device_pasid_enabled(idxd)) 7008e50d392SDave Jiang idxd_disable_system_pasid(idxd); 70142a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 70284c9ef72SLu Baolu idxd_disable_sva(pdev); 703bfe1d560SDave Jiang return rc; 704bfe1d560SDave Jiang } 705bfe1d560SDave Jiang 706ddf742d4SDave Jiang static void idxd_cleanup(struct idxd_device *idxd) 707ddf742d4SDave Jiang { 708ddf742d4SDave Jiang perfmon_pmu_remove(idxd); 709ddf742d4SDave Jiang idxd_cleanup_interrupts(idxd); 710ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 711ddf742d4SDave Jiang if (device_pasid_enabled(idxd)) 712ddf742d4SDave Jiang idxd_disable_system_pasid(idxd); 71342a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 71484c9ef72SLu Baolu idxd_disable_sva(idxd->pdev); 715ddf742d4SDave Jiang } 716ddf742d4SDave Jiang 717bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 718bfe1d560SDave Jiang { 719bfe1d560SDave Jiang struct device *dev = &pdev->dev; 720bfe1d560SDave Jiang struct idxd_device *idxd; 721435b512dSDave Jiang struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 722bfe1d560SDave Jiang int rc; 723bfe1d560SDave Jiang 724a39c7cd0SDave Jiang rc = pci_enable_device(pdev); 725bfe1d560SDave Jiang if (rc) 726bfe1d560SDave Jiang return rc; 727bfe1d560SDave Jiang 7288e50d392SDave Jiang dev_dbg(dev, "Alloc IDXD context\n"); 729435b512dSDave Jiang idxd = idxd_alloc(pdev, data); 730a39c7cd0SDave Jiang if (!idxd) { 731a39c7cd0SDave Jiang rc = -ENOMEM; 732a39c7cd0SDave Jiang goto err_idxd_alloc; 733a39c7cd0SDave Jiang } 734bfe1d560SDave Jiang 7358e50d392SDave Jiang dev_dbg(dev, "Mapping BARs\n"); 736a39c7cd0SDave Jiang idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 737a39c7cd0SDave Jiang if (!idxd->reg_base) { 738a39c7cd0SDave Jiang rc = -ENOMEM; 739a39c7cd0SDave Jiang goto err_iomap; 740a39c7cd0SDave Jiang } 741bfe1d560SDave Jiang 742bfe1d560SDave Jiang dev_dbg(dev, "Set DMA masks\n"); 74353b50458SChristophe JAILLET rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 744bfe1d560SDave Jiang if (rc) 745a39c7cd0SDave Jiang goto err; 746bfe1d560SDave Jiang 747bfe1d560SDave Jiang dev_dbg(dev, "Set PCI master\n"); 748bfe1d560SDave Jiang pci_set_master(pdev); 749bfe1d560SDave Jiang pci_set_drvdata(pdev, idxd); 750bfe1d560SDave Jiang 751bfe1d560SDave Jiang idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 752bfe1d560SDave Jiang rc = idxd_probe(idxd); 753bfe1d560SDave Jiang if (rc) { 754bfe1d560SDave Jiang dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 755a39c7cd0SDave Jiang goto err; 756bfe1d560SDave Jiang } 757bfe1d560SDave Jiang 758979f6dedSTom Zanussi if (data->load_device_defaults) { 759979f6dedSTom Zanussi rc = data->load_device_defaults(idxd); 760979f6dedSTom Zanussi if (rc) 761979f6dedSTom Zanussi dev_warn(dev, "IDXD loading device defaults failed\n"); 762979f6dedSTom Zanussi } 763979f6dedSTom Zanussi 76447c16ac2SDave Jiang rc = idxd_register_devices(idxd); 765c52ca478SDave Jiang if (rc) { 766c52ca478SDave Jiang dev_err(dev, "IDXD sysfs setup failed\n"); 767ddf742d4SDave Jiang goto err_dev_register; 768c52ca478SDave Jiang } 769c52ca478SDave Jiang 7705fbe6503SDave Jiang rc = idxd_device_init_debugfs(idxd); 7715fbe6503SDave Jiang if (rc) 7725fbe6503SDave Jiang dev_warn(dev, "IDXD debugfs failed to setup\n"); 7735fbe6503SDave Jiang 774bfe1d560SDave Jiang dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 775bfe1d560SDave Jiang idxd->hw.version); 776bfe1d560SDave Jiang 777bfe1d560SDave Jiang return 0; 778a39c7cd0SDave Jiang 779ddf742d4SDave Jiang err_dev_register: 780ddf742d4SDave Jiang idxd_cleanup(idxd); 781a39c7cd0SDave Jiang err: 782a39c7cd0SDave Jiang pci_iounmap(pdev, idxd->reg_base); 783a39c7cd0SDave Jiang err_iomap: 784700af3a0SDave Jiang put_device(idxd_confdev(idxd)); 785a39c7cd0SDave Jiang err_idxd_alloc: 786a39c7cd0SDave Jiang pci_disable_device(pdev); 787a39c7cd0SDave Jiang return rc; 788bfe1d560SDave Jiang } 789bfe1d560SDave Jiang 7905b0c68c4SDave Jiang void idxd_wqs_quiesce(struct idxd_device *idxd) 7915b0c68c4SDave Jiang { 7925b0c68c4SDave Jiang struct idxd_wq *wq; 7935b0c68c4SDave Jiang int i; 7945b0c68c4SDave Jiang 7955b0c68c4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 7965b0c68c4SDave Jiang wq = idxd->wqs[i]; 7975b0c68c4SDave Jiang if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 7985b0c68c4SDave Jiang idxd_wq_quiesce(wq); 7995b0c68c4SDave Jiang } 8005b0c68c4SDave Jiang } 8015b0c68c4SDave Jiang 802bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev) 803bfe1d560SDave Jiang { 804bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 805bfe1d560SDave Jiang struct idxd_irq_entry *irq_entry; 806403a2e23SDave Jiang int rc; 807bfe1d560SDave Jiang 808bfe1d560SDave Jiang rc = idxd_device_disable(idxd); 809bfe1d560SDave Jiang if (rc) 810bfe1d560SDave Jiang dev_err(&pdev->dev, "Disabling device failed\n"); 811bfe1d560SDave Jiang 812403a2e23SDave Jiang irq_entry = &idxd->ie; 8135fc8e85fSDave Jiang synchronize_irq(irq_entry->vector); 814403a2e23SDave Jiang idxd_mask_error_interrupts(idxd); 81549c4959fSDave Jiang flush_workqueue(idxd->wq); 816bfe1d560SDave Jiang } 817bfe1d560SDave Jiang 818bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev) 819bfe1d560SDave Jiang { 820bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 82149c4959fSDave Jiang struct idxd_irq_entry *irq_entry; 822bfe1d560SDave Jiang 82398da0106SDave Jiang idxd_unregister_devices(idxd); 82498da0106SDave Jiang /* 82598da0106SDave Jiang * When ->release() is called for the idxd->conf_dev, it frees all the memory related 82698da0106SDave Jiang * to the idxd context. The driver still needs those bits in order to do the rest of 82798da0106SDave Jiang * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 82898da0106SDave Jiang * on the device here to hold off the freeing while allowing the idxd sub-driver 82998da0106SDave Jiang * to unbind. 83098da0106SDave Jiang */ 83198da0106SDave Jiang get_device(idxd_confdev(idxd)); 83298da0106SDave Jiang device_unregister(idxd_confdev(idxd)); 833bfe1d560SDave Jiang idxd_shutdown(pdev); 8348e50d392SDave Jiang if (device_pasid_enabled(idxd)) 8358e50d392SDave Jiang idxd_disable_system_pasid(idxd); 8365fbe6503SDave Jiang idxd_device_remove_debugfs(idxd); 83749c4959fSDave Jiang 838403a2e23SDave Jiang irq_entry = idxd_get_ie(idxd, 0); 83949c4959fSDave Jiang free_irq(irq_entry->vector, irq_entry); 84049c4959fSDave Jiang pci_free_irq_vectors(pdev); 84149c4959fSDave Jiang pci_iounmap(pdev, idxd->reg_base); 84242a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 84384c9ef72SLu Baolu idxd_disable_sva(pdev); 84449c4959fSDave Jiang pci_disable_device(pdev); 84549c4959fSDave Jiang destroy_workqueue(idxd->wq); 84649c4959fSDave Jiang perfmon_pmu_remove(idxd); 84798da0106SDave Jiang put_device(idxd_confdev(idxd)); 848bfe1d560SDave Jiang } 849bfe1d560SDave Jiang 850bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = { 851bfe1d560SDave Jiang .name = DRV_NAME, 852bfe1d560SDave Jiang .id_table = idxd_pci_tbl, 853bfe1d560SDave Jiang .probe = idxd_pci_probe, 854bfe1d560SDave Jiang .remove = idxd_remove, 855bfe1d560SDave Jiang .shutdown = idxd_shutdown, 856bfe1d560SDave Jiang }; 857bfe1d560SDave Jiang 858bfe1d560SDave Jiang static int __init idxd_init_module(void) 859bfe1d560SDave Jiang { 8604b73e4ebSDave Jiang int err; 861bfe1d560SDave Jiang 862bfe1d560SDave Jiang /* 8638e50d392SDave Jiang * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 864bfe1d560SDave Jiang * enumerating the device. We can not utilize it. 865bfe1d560SDave Jiang */ 86674b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 867bfe1d560SDave Jiang pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 868bfe1d560SDave Jiang return -ENODEV; 869bfe1d560SDave Jiang } 870bfe1d560SDave Jiang 87174b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 8728e50d392SDave Jiang pr_warn("Platform does not have ENQCMD(S) support.\n"); 8738e50d392SDave Jiang else 8748e50d392SDave Jiang support_enqcmd = true; 875bfe1d560SDave Jiang 8760bde4444STom Zanussi perfmon_init(); 8770bde4444STom Zanussi 878034b3290SDave Jiang err = idxd_driver_register(&idxd_drv); 879034b3290SDave Jiang if (err < 0) 880034b3290SDave Jiang goto err_idxd_driver_register; 881034b3290SDave Jiang 8820cda4f69SDave Jiang err = idxd_driver_register(&idxd_dmaengine_drv); 8830cda4f69SDave Jiang if (err < 0) 8840cda4f69SDave Jiang goto err_idxd_dmaengine_driver_register; 8850cda4f69SDave Jiang 886448c3de8SDave Jiang err = idxd_driver_register(&idxd_user_drv); 887448c3de8SDave Jiang if (err < 0) 888448c3de8SDave Jiang goto err_idxd_user_driver_register; 889448c3de8SDave Jiang 89042d279f9SDave Jiang err = idxd_cdev_register(); 89142d279f9SDave Jiang if (err) 89242d279f9SDave Jiang goto err_cdev_register; 89342d279f9SDave Jiang 8945fbe6503SDave Jiang err = idxd_init_debugfs(); 8955fbe6503SDave Jiang if (err) 8965fbe6503SDave Jiang goto err_debugfs; 8975fbe6503SDave Jiang 898c52ca478SDave Jiang err = pci_register_driver(&idxd_pci_driver); 899c52ca478SDave Jiang if (err) 900c52ca478SDave Jiang goto err_pci_register; 901c52ca478SDave Jiang 902bfe1d560SDave Jiang return 0; 903c52ca478SDave Jiang 904c52ca478SDave Jiang err_pci_register: 9055fbe6503SDave Jiang idxd_remove_debugfs(); 9065fbe6503SDave Jiang err_debugfs: 90742d279f9SDave Jiang idxd_cdev_remove(); 90842d279f9SDave Jiang err_cdev_register: 909448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 910448c3de8SDave Jiang err_idxd_user_driver_register: 9110cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 9120cda4f69SDave Jiang err_idxd_dmaengine_driver_register: 913034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 914034b3290SDave Jiang err_idxd_driver_register: 915c52ca478SDave Jiang return err; 916bfe1d560SDave Jiang } 917bfe1d560SDave Jiang module_init(idxd_init_module); 918bfe1d560SDave Jiang 919bfe1d560SDave Jiang static void __exit idxd_exit_module(void) 920bfe1d560SDave Jiang { 921448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 9220cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 923034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 924bfe1d560SDave Jiang pci_unregister_driver(&idxd_pci_driver); 92542d279f9SDave Jiang idxd_cdev_remove(); 9260bde4444STom Zanussi perfmon_exit(); 9275fbe6503SDave Jiang idxd_remove_debugfs(); 928bfe1d560SDave Jiang } 929bfe1d560SDave Jiang module_exit(idxd_exit_module); 930