1bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0 2bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3bfe1d560SDave Jiang #include <linux/init.h> 4bfe1d560SDave Jiang #include <linux/kernel.h> 5bfe1d560SDave Jiang #include <linux/module.h> 6bfe1d560SDave Jiang #include <linux/slab.h> 7bfe1d560SDave Jiang #include <linux/pci.h> 8bfe1d560SDave Jiang #include <linux/interrupt.h> 9bfe1d560SDave Jiang #include <linux/delay.h> 10bfe1d560SDave Jiang #include <linux/dma-mapping.h> 11bfe1d560SDave Jiang #include <linux/workqueue.h> 12bfe1d560SDave Jiang #include <linux/fs.h> 13bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h> 14bfe1d560SDave Jiang #include <linux/device.h> 15bfe1d560SDave Jiang #include <linux/idr.h> 168e50d392SDave Jiang #include <linux/iommu.h> 17bfe1d560SDave Jiang #include <uapi/linux/idxd.h> 188f47d1a5SDave Jiang #include <linux/dmaengine.h> 198f47d1a5SDave Jiang #include "../dmaengine.h" 20bfe1d560SDave Jiang #include "registers.h" 21bfe1d560SDave Jiang #include "idxd.h" 220bde4444STom Zanussi #include "perfmon.h" 23bfe1d560SDave Jiang 24bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION); 25*6e2fb806SJeff Johnson MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver"); 26bfe1d560SDave Jiang MODULE_LICENSE("GPL v2"); 27bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation"); 28d9e5481fSDave Jiang MODULE_IMPORT_NS(IDXD); 29bfe1d560SDave Jiang 3003d939c7SDave Jiang static bool sva = true; 3103d939c7SDave Jiang module_param(sva, bool, 0644); 3203d939c7SDave Jiang MODULE_PARM_DESC(sva, "Toggle SVA support on/off"); 3303d939c7SDave Jiang 34ade8a86bSDave Jiang bool tc_override; 35ade8a86bSDave Jiang module_param(tc_override, bool, 0644); 36ade8a86bSDave Jiang MODULE_PARM_DESC(tc_override, "Override traffic class defaults"); 37ade8a86bSDave Jiang 38bfe1d560SDave Jiang #define DRV_NAME "idxd" 39bfe1d560SDave Jiang 408e50d392SDave Jiang bool support_enqcmd; 414b73e4ebSDave Jiang DEFINE_IDA(idxd_ida); 42bfe1d560SDave Jiang 43435b512dSDave Jiang static struct idxd_driver_data idxd_driver_data[] = { 44435b512dSDave Jiang [IDXD_TYPE_DSA] = { 45435b512dSDave Jiang .name_prefix = "dsa", 46435b512dSDave Jiang .type = IDXD_TYPE_DSA, 47435b512dSDave Jiang .compl_size = sizeof(struct dsa_completion_record), 48435b512dSDave Jiang .align = 32, 49435b512dSDave Jiang .dev_type = &dsa_device_type, 50c40bd7d9SDave Jiang .evl_cr_off = offsetof(struct dsa_evl_entry, cr), 51e11452ebSArjan van de Ven .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ 522442b747SDave Jiang .cr_status_off = offsetof(struct dsa_completion_record, status), 532442b747SDave Jiang .cr_result_off = offsetof(struct dsa_completion_record, result), 54435b512dSDave Jiang }, 55435b512dSDave Jiang [IDXD_TYPE_IAX] = { 56435b512dSDave Jiang .name_prefix = "iax", 57435b512dSDave Jiang .type = IDXD_TYPE_IAX, 58435b512dSDave Jiang .compl_size = sizeof(struct iax_completion_record), 59435b512dSDave Jiang .align = 64, 60435b512dSDave Jiang .dev_type = &iax_device_type, 61c40bd7d9SDave Jiang .evl_cr_off = offsetof(struct iax_evl_entry, cr), 62e11452ebSArjan van de Ven .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ 632442b747SDave Jiang .cr_status_off = offsetof(struct iax_completion_record, status), 642442b747SDave Jiang .cr_result_off = offsetof(struct iax_completion_record, error_code), 65979f6dedSTom Zanussi .load_device_defaults = idxd_load_iaa_device_defaults, 66435b512dSDave Jiang }, 67435b512dSDave Jiang }; 68435b512dSDave Jiang 69bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = { 70bfe1d560SDave Jiang /* DSA ver 1.0 platforms */ 71435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) }, 72f25b4638SDave Jiang 73f25b4638SDave Jiang /* IAX ver 1.0 platforms */ 74435b512dSDave Jiang { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) }, 75bfe1d560SDave Jiang { 0, } 76bfe1d560SDave Jiang }; 77bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 78bfe1d560SDave Jiang 79bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd) 80bfe1d560SDave Jiang { 81bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 82bfe1d560SDave Jiang struct device *dev = &pdev->dev; 83ec0d6423SDave Jiang struct idxd_irq_entry *ie; 84bfe1d560SDave Jiang int i, msixcnt; 85bfe1d560SDave Jiang int rc = 0; 86bfe1d560SDave Jiang 87bfe1d560SDave Jiang msixcnt = pci_msix_vec_count(pdev); 88bfe1d560SDave Jiang if (msixcnt < 0) { 89bfe1d560SDave Jiang dev_err(dev, "Not MSI-X interrupt capable.\n"); 905fc8e85fSDave Jiang return -ENOSPC; 91bfe1d560SDave Jiang } 928b67426eSDave Jiang idxd->irq_cnt = msixcnt; 93bfe1d560SDave Jiang 945fc8e85fSDave Jiang rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX); 955fc8e85fSDave Jiang if (rc != msixcnt) { 965fc8e85fSDave Jiang dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc); 975fc8e85fSDave Jiang return -ENOSPC; 98bfe1d560SDave Jiang } 99bfe1d560SDave Jiang dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 100bfe1d560SDave Jiang 101d5c10e0fSDave Jiang 102ec0d6423SDave Jiang ie = idxd_get_ie(idxd, 0); 103ec0d6423SDave Jiang ie->vector = pci_irq_vector(pdev, 0); 104ec0d6423SDave Jiang rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie); 105bfe1d560SDave Jiang if (rc < 0) { 106bfe1d560SDave Jiang dev_err(dev, "Failed to allocate misc interrupt.\n"); 1075fc8e85fSDave Jiang goto err_misc_irq; 108bfe1d560SDave Jiang } 109403a2e23SDave Jiang dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector); 110bfe1d560SDave Jiang 111ec0d6423SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 112ec0d6423SDave Jiang int msix_idx = i + 1; 113bfe1d560SDave Jiang 114ec0d6423SDave Jiang ie = idxd_get_ie(idxd, msix_idx); 115ec0d6423SDave Jiang ie->id = msix_idx; 116ec0d6423SDave Jiang ie->int_handle = INVALID_INT_HANDLE; 117fffaed1eSJacob Pan ie->pasid = IOMMU_PASID_INVALID; 118403a2e23SDave Jiang 119ec0d6423SDave Jiang spin_lock_init(&ie->list_lock); 120ec0d6423SDave Jiang init_llist_head(&ie->pending_llist); 121ec0d6423SDave Jiang INIT_LIST_HEAD(&ie->work_list); 122bfe1d560SDave Jiang } 123bfe1d560SDave Jiang 124bfe1d560SDave Jiang idxd_unmask_error_interrupts(idxd); 125bfe1d560SDave Jiang return 0; 126bfe1d560SDave Jiang 1275fc8e85fSDave Jiang err_misc_irq: 128bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 1295fc8e85fSDave Jiang pci_free_irq_vectors(pdev); 130bfe1d560SDave Jiang dev_err(dev, "No usable interrupts\n"); 131bfe1d560SDave Jiang return rc; 132bfe1d560SDave Jiang } 133bfe1d560SDave Jiang 134ddf742d4SDave Jiang static void idxd_cleanup_interrupts(struct idxd_device *idxd) 135ddf742d4SDave Jiang { 136ddf742d4SDave Jiang struct pci_dev *pdev = idxd->pdev; 137ec0d6423SDave Jiang struct idxd_irq_entry *ie; 138403a2e23SDave Jiang int msixcnt; 139ddf742d4SDave Jiang 140403a2e23SDave Jiang msixcnt = pci_msix_vec_count(pdev); 141403a2e23SDave Jiang if (msixcnt <= 0) 142403a2e23SDave Jiang return; 143ddf742d4SDave Jiang 144403a2e23SDave Jiang ie = idxd_get_ie(idxd, 0); 145ddf742d4SDave Jiang idxd_mask_error_interrupts(idxd); 146403a2e23SDave Jiang free_irq(ie->vector, ie); 147ddf742d4SDave Jiang pci_free_irq_vectors(pdev); 148ddf742d4SDave Jiang } 149ddf742d4SDave Jiang 1507c5dd23eSDave Jiang static int idxd_setup_wqs(struct idxd_device *idxd) 1517c5dd23eSDave Jiang { 1527c5dd23eSDave Jiang struct device *dev = &idxd->pdev->dev; 1537c5dd23eSDave Jiang struct idxd_wq *wq; 154700af3a0SDave Jiang struct device *conf_dev; 1557c5dd23eSDave Jiang int i, rc; 1567c5dd23eSDave Jiang 1577c5dd23eSDave Jiang idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *), 1587c5dd23eSDave Jiang GFP_KERNEL, dev_to_node(dev)); 1597c5dd23eSDave Jiang if (!idxd->wqs) 1607c5dd23eSDave Jiang return -ENOMEM; 1617c5dd23eSDave Jiang 162de5819b9SJerry Snitselaar idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); 163de5819b9SJerry Snitselaar if (!idxd->wq_enable_map) { 164de5819b9SJerry Snitselaar kfree(idxd->wqs); 165de5819b9SJerry Snitselaar return -ENOMEM; 166de5819b9SJerry Snitselaar } 167de5819b9SJerry Snitselaar 1687c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 1697c5dd23eSDave Jiang wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev)); 1707c5dd23eSDave Jiang if (!wq) { 1717c5dd23eSDave Jiang rc = -ENOMEM; 1727c5dd23eSDave Jiang goto err; 1737c5dd23eSDave Jiang } 1747c5dd23eSDave Jiang 175700af3a0SDave Jiang idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ); 176700af3a0SDave Jiang conf_dev = wq_confdev(wq); 1777c5dd23eSDave Jiang wq->id = i; 1787c5dd23eSDave Jiang wq->idxd = idxd; 179700af3a0SDave Jiang device_initialize(wq_confdev(wq)); 180700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 181700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 182700af3a0SDave Jiang conf_dev->type = &idxd_wq_device_type; 183700af3a0SDave Jiang rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); 1847c5dd23eSDave Jiang if (rc < 0) { 185700af3a0SDave Jiang put_device(conf_dev); 1867c5dd23eSDave Jiang goto err; 1877c5dd23eSDave Jiang } 1887c5dd23eSDave Jiang 1897c5dd23eSDave Jiang mutex_init(&wq->wq_lock); 19004922b74SDave Jiang init_waitqueue_head(&wq->err_queue); 19193a40a6dSDave Jiang init_completion(&wq->wq_dead); 19256fc39f5SDave Jiang init_completion(&wq->wq_resurrect); 19392452a72SDave Jiang wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; 194e8dbd644SXiaochen Shen idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); 1957930d855SDave Jiang wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; 1967c5dd23eSDave Jiang wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); 1977c5dd23eSDave Jiang if (!wq->wqcfg) { 198700af3a0SDave Jiang put_device(conf_dev); 1997c5dd23eSDave Jiang rc = -ENOMEM; 2007c5dd23eSDave Jiang goto err; 2017c5dd23eSDave Jiang } 202b0325aefSDave Jiang 203b0325aefSDave Jiang if (idxd->hw.wq_cap.op_config) { 204b0325aefSDave Jiang wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); 205b0325aefSDave Jiang if (!wq->opcap_bmap) { 206b0325aefSDave Jiang put_device(conf_dev); 207b0325aefSDave Jiang rc = -ENOMEM; 208b0325aefSDave Jiang goto err; 209b0325aefSDave Jiang } 210b0325aefSDave Jiang bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); 211b0325aefSDave Jiang } 212b022f597SFenghua Yu mutex_init(&wq->uc_lock); 213b022f597SFenghua Yu xa_init(&wq->upasid_xa); 2147c5dd23eSDave Jiang idxd->wqs[i] = wq; 2157c5dd23eSDave Jiang } 2167c5dd23eSDave Jiang 2177c5dd23eSDave Jiang return 0; 2187c5dd23eSDave Jiang 2197c5dd23eSDave Jiang err: 220700af3a0SDave Jiang while (--i >= 0) { 221700af3a0SDave Jiang wq = idxd->wqs[i]; 222700af3a0SDave Jiang conf_dev = wq_confdev(wq); 223700af3a0SDave Jiang put_device(conf_dev); 224700af3a0SDave Jiang } 2257c5dd23eSDave Jiang return rc; 2267c5dd23eSDave Jiang } 2277c5dd23eSDave Jiang 22875b91130SDave Jiang static int idxd_setup_engines(struct idxd_device *idxd) 22975b91130SDave Jiang { 23075b91130SDave Jiang struct idxd_engine *engine; 23175b91130SDave Jiang struct device *dev = &idxd->pdev->dev; 232700af3a0SDave Jiang struct device *conf_dev; 23375b91130SDave Jiang int i, rc; 23475b91130SDave Jiang 23575b91130SDave Jiang idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *), 23675b91130SDave Jiang GFP_KERNEL, dev_to_node(dev)); 23775b91130SDave Jiang if (!idxd->engines) 23875b91130SDave Jiang return -ENOMEM; 23975b91130SDave Jiang 24075b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) { 24175b91130SDave Jiang engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev)); 24275b91130SDave Jiang if (!engine) { 24375b91130SDave Jiang rc = -ENOMEM; 24475b91130SDave Jiang goto err; 24575b91130SDave Jiang } 24675b91130SDave Jiang 247700af3a0SDave Jiang idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE); 248700af3a0SDave Jiang conf_dev = engine_confdev(engine); 24975b91130SDave Jiang engine->id = i; 25075b91130SDave Jiang engine->idxd = idxd; 251700af3a0SDave Jiang device_initialize(conf_dev); 252700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 253700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 254700af3a0SDave Jiang conf_dev->type = &idxd_engine_device_type; 255700af3a0SDave Jiang rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); 25675b91130SDave Jiang if (rc < 0) { 257700af3a0SDave Jiang put_device(conf_dev); 25875b91130SDave Jiang goto err; 25975b91130SDave Jiang } 26075b91130SDave Jiang 26175b91130SDave Jiang idxd->engines[i] = engine; 26275b91130SDave Jiang } 26375b91130SDave Jiang 26475b91130SDave Jiang return 0; 26575b91130SDave Jiang 26675b91130SDave Jiang err: 267700af3a0SDave Jiang while (--i >= 0) { 268700af3a0SDave Jiang engine = idxd->engines[i]; 269700af3a0SDave Jiang conf_dev = engine_confdev(engine); 270700af3a0SDave Jiang put_device(conf_dev); 271700af3a0SDave Jiang } 27275b91130SDave Jiang return rc; 27375b91130SDave Jiang } 27475b91130SDave Jiang 275defe49f9SDave Jiang static int idxd_setup_groups(struct idxd_device *idxd) 276defe49f9SDave Jiang { 277defe49f9SDave Jiang struct device *dev = &idxd->pdev->dev; 278700af3a0SDave Jiang struct device *conf_dev; 279defe49f9SDave Jiang struct idxd_group *group; 280defe49f9SDave Jiang int i, rc; 281defe49f9SDave Jiang 282defe49f9SDave Jiang idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *), 283defe49f9SDave Jiang GFP_KERNEL, dev_to_node(dev)); 284defe49f9SDave Jiang if (!idxd->groups) 285defe49f9SDave Jiang return -ENOMEM; 286defe49f9SDave Jiang 287defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) { 288defe49f9SDave Jiang group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev)); 289defe49f9SDave Jiang if (!group) { 290defe49f9SDave Jiang rc = -ENOMEM; 291defe49f9SDave Jiang goto err; 292defe49f9SDave Jiang } 293defe49f9SDave Jiang 294700af3a0SDave Jiang idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP); 295700af3a0SDave Jiang conf_dev = group_confdev(group); 296defe49f9SDave Jiang group->id = i; 297defe49f9SDave Jiang group->idxd = idxd; 298700af3a0SDave Jiang device_initialize(conf_dev); 299700af3a0SDave Jiang conf_dev->parent = idxd_confdev(idxd); 300700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 301700af3a0SDave Jiang conf_dev->type = &idxd_group_device_type; 302700af3a0SDave Jiang rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); 303defe49f9SDave Jiang if (rc < 0) { 304700af3a0SDave Jiang put_device(conf_dev); 305defe49f9SDave Jiang goto err; 306defe49f9SDave Jiang } 307defe49f9SDave Jiang 308defe49f9SDave Jiang idxd->groups[i] = group; 3099735bde3SFenghua Yu if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { 310ade8a86bSDave Jiang group->tc_a = 1; 311ade8a86bSDave Jiang group->tc_b = 1; 312ade8a86bSDave Jiang } else { 313defe49f9SDave Jiang group->tc_a = -1; 314defe49f9SDave Jiang group->tc_b = -1; 315defe49f9SDave Jiang } 316601bdadaSFenghua Yu /* 317601bdadaSFenghua Yu * The default value is the same as the value of 318601bdadaSFenghua Yu * total read buffers in GRPCAP. 319601bdadaSFenghua Yu */ 320601bdadaSFenghua Yu group->rdbufs_allowed = idxd->max_rdbufs; 321ade8a86bSDave Jiang } 322defe49f9SDave Jiang 323defe49f9SDave Jiang return 0; 324defe49f9SDave Jiang 325defe49f9SDave Jiang err: 326700af3a0SDave Jiang while (--i >= 0) { 327700af3a0SDave Jiang group = idxd->groups[i]; 328700af3a0SDave Jiang put_device(group_confdev(group)); 329700af3a0SDave Jiang } 330defe49f9SDave Jiang return rc; 331defe49f9SDave Jiang } 332defe49f9SDave Jiang 333ddf742d4SDave Jiang static void idxd_cleanup_internals(struct idxd_device *idxd) 334ddf742d4SDave Jiang { 335ddf742d4SDave Jiang int i; 336ddf742d4SDave Jiang 337ddf742d4SDave Jiang for (i = 0; i < idxd->max_groups; i++) 338700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 339ddf742d4SDave Jiang for (i = 0; i < idxd->max_engines; i++) 340700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 341ddf742d4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) 342700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 343ddf742d4SDave Jiang destroy_workqueue(idxd->wq); 344ddf742d4SDave Jiang } 345ddf742d4SDave Jiang 3461649091fSDave Jiang static int idxd_init_evl(struct idxd_device *idxd) 3471649091fSDave Jiang { 3481649091fSDave Jiang struct device *dev = &idxd->pdev->dev; 349d3ea125dSFenghua Yu unsigned int evl_cache_size; 3501649091fSDave Jiang struct idxd_evl *evl; 351d3ea125dSFenghua Yu const char *idxd_name; 3521649091fSDave Jiang 3531649091fSDave Jiang if (idxd->hw.gen_cap.evl_support == 0) 3541649091fSDave Jiang return 0; 3551649091fSDave Jiang 3561649091fSDave Jiang evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev)); 3571649091fSDave Jiang if (!evl) 3581649091fSDave Jiang return -ENOMEM; 3591649091fSDave Jiang 360d5638de8SRex Zhang mutex_init(&evl->lock); 3611649091fSDave Jiang evl->size = IDXD_EVL_SIZE_MIN; 362c2f156bfSDave Jiang 363d3ea125dSFenghua Yu idxd_name = dev_name(idxd_confdev(idxd)); 364d3ea125dSFenghua Yu evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd); 365d3ea125dSFenghua Yu /* 366d3ea125dSFenghua Yu * Since completion record in evl_cache will be copied to user 367d3ea125dSFenghua Yu * when handling completion record page fault, need to create 368d3ea125dSFenghua Yu * the cache suitable for user copy. 369d3ea125dSFenghua Yu */ 370d3ea125dSFenghua Yu idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size, 371d3ea125dSFenghua Yu 0, 0, 0, evl_cache_size, 372d3ea125dSFenghua Yu NULL); 373c2f156bfSDave Jiang if (!idxd->evl_cache) { 374c2f156bfSDave Jiang kfree(evl); 375c2f156bfSDave Jiang return -ENOMEM; 376c2f156bfSDave Jiang } 377c2f156bfSDave Jiang 3781649091fSDave Jiang idxd->evl = evl; 3791649091fSDave Jiang return 0; 3801649091fSDave Jiang } 3811649091fSDave Jiang 382bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd) 383bfe1d560SDave Jiang { 384bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 385defe49f9SDave Jiang int rc, i; 386bfe1d560SDave Jiang 3870d5c10b4SDave Jiang init_waitqueue_head(&idxd->cmd_waitq); 3887c5dd23eSDave Jiang 3897c5dd23eSDave Jiang rc = idxd_setup_wqs(idxd); 3907c5dd23eSDave Jiang if (rc < 0) 391eb15e715SDave Jiang goto err_wqs; 3927c5dd23eSDave Jiang 39375b91130SDave Jiang rc = idxd_setup_engines(idxd); 39475b91130SDave Jiang if (rc < 0) 39575b91130SDave Jiang goto err_engine; 39675b91130SDave Jiang 397defe49f9SDave Jiang rc = idxd_setup_groups(idxd); 398defe49f9SDave Jiang if (rc < 0) 399defe49f9SDave Jiang goto err_group; 400bfe1d560SDave Jiang 4010d5c10b4SDave Jiang idxd->wq = create_workqueue(dev_name(dev)); 4027c5dd23eSDave Jiang if (!idxd->wq) { 4037c5dd23eSDave Jiang rc = -ENOMEM; 404defe49f9SDave Jiang goto err_wkq_create; 4057c5dd23eSDave Jiang } 4060d5c10b4SDave Jiang 4071649091fSDave Jiang rc = idxd_init_evl(idxd); 4081649091fSDave Jiang if (rc < 0) 4091649091fSDave Jiang goto err_evl; 4101649091fSDave Jiang 411bfe1d560SDave Jiang return 0; 4127c5dd23eSDave Jiang 4131649091fSDave Jiang err_evl: 4141649091fSDave Jiang destroy_workqueue(idxd->wq); 415defe49f9SDave Jiang err_wkq_create: 416defe49f9SDave Jiang for (i = 0; i < idxd->max_groups; i++) 417700af3a0SDave Jiang put_device(group_confdev(idxd->groups[i])); 418defe49f9SDave Jiang err_group: 41975b91130SDave Jiang for (i = 0; i < idxd->max_engines; i++) 420700af3a0SDave Jiang put_device(engine_confdev(idxd->engines[i])); 42175b91130SDave Jiang err_engine: 4227c5dd23eSDave Jiang for (i = 0; i < idxd->max_wqs; i++) 423700af3a0SDave Jiang put_device(wq_confdev(idxd->wqs[i])); 424eb15e715SDave Jiang err_wqs: 4257c5dd23eSDave Jiang return rc; 426bfe1d560SDave Jiang } 427bfe1d560SDave Jiang 428bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd) 429bfe1d560SDave Jiang { 430bfe1d560SDave Jiang union offsets_reg offsets; 431bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 432bfe1d560SDave Jiang 433bfe1d560SDave Jiang offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 4342f8417a9SDave Jiang offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); 4352f8417a9SDave Jiang idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; 436bfe1d560SDave Jiang dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 4372f8417a9SDave Jiang idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; 4382f8417a9SDave Jiang dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); 4392f8417a9SDave Jiang idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; 4402f8417a9SDave Jiang dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); 4412f8417a9SDave Jiang idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; 442bfe1d560SDave Jiang dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 443bfe1d560SDave Jiang } 444bfe1d560SDave Jiang 44534ca0066SDave Jiang void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count) 446a8563a33SDave Jiang { 447a8563a33SDave Jiang int i, j, nr; 448a8563a33SDave Jiang 449a8563a33SDave Jiang for (i = 0, nr = 0; i < count; i++) { 450a8563a33SDave Jiang for (j = 0; j < BITS_PER_LONG_LONG; j++) { 451a8563a33SDave Jiang if (val[i] & BIT(j)) 452a8563a33SDave Jiang set_bit(nr, bmap); 453a8563a33SDave Jiang nr++; 454a8563a33SDave Jiang } 455a8563a33SDave Jiang } 456a8563a33SDave Jiang } 457a8563a33SDave Jiang 458bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd) 459bfe1d560SDave Jiang { 460bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 461bfe1d560SDave Jiang int i; 462bfe1d560SDave Jiang 463bfe1d560SDave Jiang /* reading generic capabilities */ 464bfe1d560SDave Jiang idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 465bfe1d560SDave Jiang dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 466eb15e715SDave Jiang 467eb15e715SDave Jiang if (idxd->hw.gen_cap.cmd_cap) { 468eb15e715SDave Jiang idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET); 469eb15e715SDave Jiang dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap); 470eb15e715SDave Jiang } 471eb15e715SDave Jiang 4728b67426eSDave Jiang /* reading command capabilities */ 4738b67426eSDave Jiang if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) 4748b67426eSDave Jiang idxd->request_int_handles = true; 4758b67426eSDave Jiang 476bfe1d560SDave Jiang idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 477bfe1d560SDave Jiang dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 478e8dbd644SXiaochen Shen idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); 479bfe1d560SDave Jiang dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 480bfe1d560SDave Jiang if (idxd->hw.gen_cap.config_en) 481bfe1d560SDave Jiang set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 482bfe1d560SDave Jiang 483bfe1d560SDave Jiang /* reading group capabilities */ 484bfe1d560SDave Jiang idxd->hw.group_cap.bits = 485bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 486bfe1d560SDave Jiang dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 487bfe1d560SDave Jiang idxd->max_groups = idxd->hw.group_cap.num_groups; 488bfe1d560SDave Jiang dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 4897ed6f1b8SDave Jiang idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 4907ed6f1b8SDave Jiang dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 4917ed6f1b8SDave Jiang idxd->nr_rdbufs = idxd->max_rdbufs; 492bfe1d560SDave Jiang 493bfe1d560SDave Jiang /* read engine capabilities */ 494bfe1d560SDave Jiang idxd->hw.engine_cap.bits = 495bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 496bfe1d560SDave Jiang dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 497bfe1d560SDave Jiang idxd->max_engines = idxd->hw.engine_cap.num_engines; 498bfe1d560SDave Jiang dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 499bfe1d560SDave Jiang 500bfe1d560SDave Jiang /* read workqueue capabilities */ 501bfe1d560SDave Jiang idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 502bfe1d560SDave Jiang dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 503bfe1d560SDave Jiang idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 504bfe1d560SDave Jiang dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 505bfe1d560SDave Jiang idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 506bfe1d560SDave Jiang dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 507d98793b5SDave Jiang idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN); 508d98793b5SDave Jiang dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size); 509bfe1d560SDave Jiang 510bfe1d560SDave Jiang /* reading operation capabilities */ 511bfe1d560SDave Jiang for (i = 0; i < 4; i++) { 512bfe1d560SDave Jiang idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 513bfe1d560SDave Jiang IDXD_OPCAP_OFFSET + i * sizeof(u64)); 514bfe1d560SDave Jiang dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 515bfe1d560SDave Jiang } 516a8563a33SDave Jiang multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); 5179f0d99b3SDave Jiang 5189f0d99b3SDave Jiang /* read iaa cap */ 5199f0d99b3SDave Jiang if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2) 5209f0d99b3SDave Jiang idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET); 521bfe1d560SDave Jiang } 522bfe1d560SDave Jiang 523435b512dSDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) 524bfe1d560SDave Jiang { 525bfe1d560SDave Jiang struct device *dev = &pdev->dev; 526700af3a0SDave Jiang struct device *conf_dev; 527bfe1d560SDave Jiang struct idxd_device *idxd; 52847c16ac2SDave Jiang int rc; 529bfe1d560SDave Jiang 53047c16ac2SDave Jiang idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev)); 531bfe1d560SDave Jiang if (!idxd) 532bfe1d560SDave Jiang return NULL; 533bfe1d560SDave Jiang 534700af3a0SDave Jiang conf_dev = idxd_confdev(idxd); 535bfe1d560SDave Jiang idxd->pdev = pdev; 536435b512dSDave Jiang idxd->data = data; 537700af3a0SDave Jiang idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); 5384b73e4ebSDave Jiang idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); 53947c16ac2SDave Jiang if (idxd->id < 0) 54047c16ac2SDave Jiang return NULL; 54147c16ac2SDave Jiang 542a8563a33SDave Jiang idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); 543a8563a33SDave Jiang if (!idxd->opcap_bmap) { 544a8563a33SDave Jiang ida_free(&idxd_ida, idxd->id); 545a8563a33SDave Jiang return NULL; 546a8563a33SDave Jiang } 547a8563a33SDave Jiang 548700af3a0SDave Jiang device_initialize(conf_dev); 549700af3a0SDave Jiang conf_dev->parent = dev; 550700af3a0SDave Jiang conf_dev->bus = &dsa_bus_type; 551700af3a0SDave Jiang conf_dev->type = idxd->data->dev_type; 552700af3a0SDave Jiang rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); 55347c16ac2SDave Jiang if (rc < 0) { 554700af3a0SDave Jiang put_device(conf_dev); 55547c16ac2SDave Jiang return NULL; 55647c16ac2SDave Jiang } 55747c16ac2SDave Jiang 558bfe1d560SDave Jiang spin_lock_init(&idxd->dev_lock); 55953b2ee7fSDave Jiang spin_lock_init(&idxd->cmd_lock); 560bfe1d560SDave Jiang 561bfe1d560SDave Jiang return idxd; 562bfe1d560SDave Jiang } 563bfe1d560SDave Jiang 5648e50d392SDave Jiang static int idxd_enable_system_pasid(struct idxd_device *idxd) 5658e50d392SDave Jiang { 566f5ccf55eSJacob Pan struct pci_dev *pdev = idxd->pdev; 567f5ccf55eSJacob Pan struct device *dev = &pdev->dev; 568f5ccf55eSJacob Pan struct iommu_domain *domain; 569f5ccf55eSJacob Pan ioasid_t pasid; 570f5ccf55eSJacob Pan int ret; 571f5ccf55eSJacob Pan 572f5ccf55eSJacob Pan /* 573f5ccf55eSJacob Pan * Attach a global PASID to the DMA domain so that we can use ENQCMDS 574f5ccf55eSJacob Pan * to submit work on buffers mapped by DMA API. 575f5ccf55eSJacob Pan */ 576f5ccf55eSJacob Pan domain = iommu_get_domain_for_dev(dev); 577f5ccf55eSJacob Pan if (!domain) 578f5ccf55eSJacob Pan return -EPERM; 579f5ccf55eSJacob Pan 580f5ccf55eSJacob Pan pasid = iommu_alloc_global_pasid(dev); 581f5ccf55eSJacob Pan if (pasid == IOMMU_PASID_INVALID) 582f5ccf55eSJacob Pan return -ENOSPC; 583f5ccf55eSJacob Pan 584f5ccf55eSJacob Pan /* 585f5ccf55eSJacob Pan * DMA domain is owned by the driver, it should support all valid 586f5ccf55eSJacob Pan * types such as DMA-FQ, identity, etc. 587f5ccf55eSJacob Pan */ 588f5ccf55eSJacob Pan ret = iommu_attach_device_pasid(domain, dev, pasid); 589f5ccf55eSJacob Pan if (ret) { 590f5ccf55eSJacob Pan dev_err(dev, "failed to attach device pasid %d, domain type %d", 591f5ccf55eSJacob Pan pasid, domain->type); 592f5ccf55eSJacob Pan iommu_free_global_pasid(pasid); 593f5ccf55eSJacob Pan return ret; 594f5ccf55eSJacob Pan } 595f5ccf55eSJacob Pan 596f5ccf55eSJacob Pan /* Since we set user privilege for kernel DMA, enable completion IRQ */ 597f5ccf55eSJacob Pan idxd_set_user_intr(idxd, 1); 598f5ccf55eSJacob Pan idxd->pasid = pasid; 599f5ccf55eSJacob Pan 600f5ccf55eSJacob Pan return ret; 6018e50d392SDave Jiang } 6028e50d392SDave Jiang 6038e50d392SDave Jiang static void idxd_disable_system_pasid(struct idxd_device *idxd) 6048e50d392SDave Jiang { 605f5ccf55eSJacob Pan struct pci_dev *pdev = idxd->pdev; 606f5ccf55eSJacob Pan struct device *dev = &pdev->dev; 607f5ccf55eSJacob Pan struct iommu_domain *domain; 6088e50d392SDave Jiang 609f5ccf55eSJacob Pan domain = iommu_get_domain_for_dev(dev); 610f5ccf55eSJacob Pan if (!domain) 611f5ccf55eSJacob Pan return; 612f5ccf55eSJacob Pan 613f5ccf55eSJacob Pan iommu_detach_device_pasid(domain, dev, idxd->pasid); 614f5ccf55eSJacob Pan iommu_free_global_pasid(idxd->pasid); 615f5ccf55eSJacob Pan 616f5ccf55eSJacob Pan idxd_set_user_intr(idxd, 0); 6178e50d392SDave Jiang idxd->sva = NULL; 618f5ccf55eSJacob Pan idxd->pasid = IOMMU_PASID_INVALID; 6198e50d392SDave Jiang } 6208e50d392SDave Jiang 62184c9ef72SLu Baolu static int idxd_enable_sva(struct pci_dev *pdev) 62284c9ef72SLu Baolu { 62384c9ef72SLu Baolu int ret; 62484c9ef72SLu Baolu 62584c9ef72SLu Baolu ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 62684c9ef72SLu Baolu if (ret) 62784c9ef72SLu Baolu return ret; 62884c9ef72SLu Baolu 62984c9ef72SLu Baolu ret = iommu_dev_enable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 63084c9ef72SLu Baolu if (ret) 63184c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 63284c9ef72SLu Baolu 63384c9ef72SLu Baolu return ret; 63484c9ef72SLu Baolu } 63584c9ef72SLu Baolu 63684c9ef72SLu Baolu static void idxd_disable_sva(struct pci_dev *pdev) 63784c9ef72SLu Baolu { 63884c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); 63984c9ef72SLu Baolu iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_IOPF); 64084c9ef72SLu Baolu } 64184c9ef72SLu Baolu 642bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd) 643bfe1d560SDave Jiang { 644bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 645bfe1d560SDave Jiang struct device *dev = &pdev->dev; 646bfe1d560SDave Jiang int rc; 647bfe1d560SDave Jiang 648bfe1d560SDave Jiang dev_dbg(dev, "%s entered and resetting device\n", __func__); 64989e3becdSDave Jiang rc = idxd_device_init_reset(idxd); 65089e3becdSDave Jiang if (rc < 0) 65189e3becdSDave Jiang return rc; 65289e3becdSDave Jiang 653bfe1d560SDave Jiang dev_dbg(dev, "IDXD reset complete\n"); 654bfe1d560SDave Jiang 65503d939c7SDave Jiang if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) { 65684c9ef72SLu Baolu if (idxd_enable_sva(pdev)) { 65742a1b738SDave Jiang dev_warn(dev, "Unable to turn on user SVA feature.\n"); 6588ffccd11SJerry Snitselaar } else { 65942a1b738SDave Jiang set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags); 66042a1b738SDave Jiang 661f5ccf55eSJacob Pan rc = idxd_enable_system_pasid(idxd); 662f5ccf55eSJacob Pan if (rc) 663f5ccf55eSJacob Pan dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc); 66442a1b738SDave Jiang else 6658e50d392SDave Jiang set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); 6668ffccd11SJerry Snitselaar } 66703d939c7SDave Jiang } else if (!sva) { 66803d939c7SDave Jiang dev_warn(dev, "User forced SVA off via module param.\n"); 6698e50d392SDave Jiang } 6708e50d392SDave Jiang 671bfe1d560SDave Jiang idxd_read_caps(idxd); 672bfe1d560SDave Jiang idxd_read_table_offsets(idxd); 673bfe1d560SDave Jiang 674bfe1d560SDave Jiang rc = idxd_setup_internals(idxd); 675bfe1d560SDave Jiang if (rc) 6767c5dd23eSDave Jiang goto err; 677bfe1d560SDave Jiang 6788c66bbdcSDave Jiang /* If the configs are readonly, then load them from device */ 6798c66bbdcSDave Jiang if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { 6808c66bbdcSDave Jiang dev_dbg(dev, "Loading RO device config\n"); 6818c66bbdcSDave Jiang rc = idxd_device_load_config(idxd); 6828c66bbdcSDave Jiang if (rc < 0) 683ddf742d4SDave Jiang goto err_config; 6848c66bbdcSDave Jiang } 6858c66bbdcSDave Jiang 686bfe1d560SDave Jiang rc = idxd_setup_interrupts(idxd); 687bfe1d560SDave Jiang if (rc) 688ddf742d4SDave Jiang goto err_config; 689bfe1d560SDave Jiang 69042d279f9SDave Jiang idxd->major = idxd_cdev_get_major(idxd); 69142d279f9SDave Jiang 6920bde4444STom Zanussi rc = perfmon_pmu_init(idxd); 6930bde4444STom Zanussi if (rc < 0) 6940bde4444STom Zanussi dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc); 6950bde4444STom Zanussi 696bfe1d560SDave Jiang dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 697bfe1d560SDave Jiang return 0; 698bfe1d560SDave Jiang 699ddf742d4SDave Jiang err_config: 700ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 7017c5dd23eSDave Jiang err: 7028e50d392SDave Jiang if (device_pasid_enabled(idxd)) 7038e50d392SDave Jiang idxd_disable_system_pasid(idxd); 70442a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 70584c9ef72SLu Baolu idxd_disable_sva(pdev); 706bfe1d560SDave Jiang return rc; 707bfe1d560SDave Jiang } 708bfe1d560SDave Jiang 709ddf742d4SDave Jiang static void idxd_cleanup(struct idxd_device *idxd) 710ddf742d4SDave Jiang { 711ddf742d4SDave Jiang perfmon_pmu_remove(idxd); 712ddf742d4SDave Jiang idxd_cleanup_interrupts(idxd); 713ddf742d4SDave Jiang idxd_cleanup_internals(idxd); 714ddf742d4SDave Jiang if (device_pasid_enabled(idxd)) 715ddf742d4SDave Jiang idxd_disable_system_pasid(idxd); 71642a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 71784c9ef72SLu Baolu idxd_disable_sva(idxd->pdev); 718ddf742d4SDave Jiang } 719ddf742d4SDave Jiang 720bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 721bfe1d560SDave Jiang { 722bfe1d560SDave Jiang struct device *dev = &pdev->dev; 723bfe1d560SDave Jiang struct idxd_device *idxd; 724435b512dSDave Jiang struct idxd_driver_data *data = (struct idxd_driver_data *)id->driver_data; 725bfe1d560SDave Jiang int rc; 726bfe1d560SDave Jiang 727a39c7cd0SDave Jiang rc = pci_enable_device(pdev); 728bfe1d560SDave Jiang if (rc) 729bfe1d560SDave Jiang return rc; 730bfe1d560SDave Jiang 7318e50d392SDave Jiang dev_dbg(dev, "Alloc IDXD context\n"); 732435b512dSDave Jiang idxd = idxd_alloc(pdev, data); 733a39c7cd0SDave Jiang if (!idxd) { 734a39c7cd0SDave Jiang rc = -ENOMEM; 735a39c7cd0SDave Jiang goto err_idxd_alloc; 736a39c7cd0SDave Jiang } 737bfe1d560SDave Jiang 7388e50d392SDave Jiang dev_dbg(dev, "Mapping BARs\n"); 739a39c7cd0SDave Jiang idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0); 740a39c7cd0SDave Jiang if (!idxd->reg_base) { 741a39c7cd0SDave Jiang rc = -ENOMEM; 742a39c7cd0SDave Jiang goto err_iomap; 743a39c7cd0SDave Jiang } 744bfe1d560SDave Jiang 745bfe1d560SDave Jiang dev_dbg(dev, "Set DMA masks\n"); 74653b50458SChristophe JAILLET rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 747bfe1d560SDave Jiang if (rc) 748a39c7cd0SDave Jiang goto err; 749bfe1d560SDave Jiang 750bfe1d560SDave Jiang dev_dbg(dev, "Set PCI master\n"); 751bfe1d560SDave Jiang pci_set_master(pdev); 752bfe1d560SDave Jiang pci_set_drvdata(pdev, idxd); 753bfe1d560SDave Jiang 754bfe1d560SDave Jiang idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 755bfe1d560SDave Jiang rc = idxd_probe(idxd); 756bfe1d560SDave Jiang if (rc) { 757bfe1d560SDave Jiang dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 758a39c7cd0SDave Jiang goto err; 759bfe1d560SDave Jiang } 760bfe1d560SDave Jiang 761979f6dedSTom Zanussi if (data->load_device_defaults) { 762979f6dedSTom Zanussi rc = data->load_device_defaults(idxd); 763979f6dedSTom Zanussi if (rc) 764979f6dedSTom Zanussi dev_warn(dev, "IDXD loading device defaults failed\n"); 765979f6dedSTom Zanussi } 766979f6dedSTom Zanussi 76747c16ac2SDave Jiang rc = idxd_register_devices(idxd); 768c52ca478SDave Jiang if (rc) { 769c52ca478SDave Jiang dev_err(dev, "IDXD sysfs setup failed\n"); 770ddf742d4SDave Jiang goto err_dev_register; 771c52ca478SDave Jiang } 772c52ca478SDave Jiang 7735fbe6503SDave Jiang rc = idxd_device_init_debugfs(idxd); 7745fbe6503SDave Jiang if (rc) 7755fbe6503SDave Jiang dev_warn(dev, "IDXD debugfs failed to setup\n"); 7765fbe6503SDave Jiang 777bfe1d560SDave Jiang dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 778bfe1d560SDave Jiang idxd->hw.version); 779bfe1d560SDave Jiang 780e11452ebSArjan van de Ven idxd->user_submission_safe = data->user_submission_safe; 781e11452ebSArjan van de Ven 782bfe1d560SDave Jiang return 0; 783a39c7cd0SDave Jiang 784ddf742d4SDave Jiang err_dev_register: 785ddf742d4SDave Jiang idxd_cleanup(idxd); 786a39c7cd0SDave Jiang err: 787a39c7cd0SDave Jiang pci_iounmap(pdev, idxd->reg_base); 788a39c7cd0SDave Jiang err_iomap: 789700af3a0SDave Jiang put_device(idxd_confdev(idxd)); 790a39c7cd0SDave Jiang err_idxd_alloc: 791a39c7cd0SDave Jiang pci_disable_device(pdev); 792a39c7cd0SDave Jiang return rc; 793bfe1d560SDave Jiang } 794bfe1d560SDave Jiang 7955b0c68c4SDave Jiang void idxd_wqs_quiesce(struct idxd_device *idxd) 7965b0c68c4SDave Jiang { 7975b0c68c4SDave Jiang struct idxd_wq *wq; 7985b0c68c4SDave Jiang int i; 7995b0c68c4SDave Jiang 8005b0c68c4SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 8015b0c68c4SDave Jiang wq = idxd->wqs[i]; 8025b0c68c4SDave Jiang if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL) 8035b0c68c4SDave Jiang idxd_wq_quiesce(wq); 8045b0c68c4SDave Jiang } 8055b0c68c4SDave Jiang } 8065b0c68c4SDave Jiang 807bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev) 808bfe1d560SDave Jiang { 809bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 810bfe1d560SDave Jiang struct idxd_irq_entry *irq_entry; 811403a2e23SDave Jiang int rc; 812bfe1d560SDave Jiang 813bfe1d560SDave Jiang rc = idxd_device_disable(idxd); 814bfe1d560SDave Jiang if (rc) 815bfe1d560SDave Jiang dev_err(&pdev->dev, "Disabling device failed\n"); 816bfe1d560SDave Jiang 817403a2e23SDave Jiang irq_entry = &idxd->ie; 8185fc8e85fSDave Jiang synchronize_irq(irq_entry->vector); 819403a2e23SDave Jiang idxd_mask_error_interrupts(idxd); 82049c4959fSDave Jiang flush_workqueue(idxd->wq); 821bfe1d560SDave Jiang } 822bfe1d560SDave Jiang 823bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev) 824bfe1d560SDave Jiang { 825bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 82649c4959fSDave Jiang struct idxd_irq_entry *irq_entry; 827bfe1d560SDave Jiang 82898da0106SDave Jiang idxd_unregister_devices(idxd); 82998da0106SDave Jiang /* 83098da0106SDave Jiang * When ->release() is called for the idxd->conf_dev, it frees all the memory related 83198da0106SDave Jiang * to the idxd context. The driver still needs those bits in order to do the rest of 83298da0106SDave Jiang * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref 83398da0106SDave Jiang * on the device here to hold off the freeing while allowing the idxd sub-driver 83498da0106SDave Jiang * to unbind. 83598da0106SDave Jiang */ 83698da0106SDave Jiang get_device(idxd_confdev(idxd)); 83798da0106SDave Jiang device_unregister(idxd_confdev(idxd)); 838bfe1d560SDave Jiang idxd_shutdown(pdev); 8398e50d392SDave Jiang if (device_pasid_enabled(idxd)) 8408e50d392SDave Jiang idxd_disable_system_pasid(idxd); 8415fbe6503SDave Jiang idxd_device_remove_debugfs(idxd); 84249c4959fSDave Jiang 843403a2e23SDave Jiang irq_entry = idxd_get_ie(idxd, 0); 84449c4959fSDave Jiang free_irq(irq_entry->vector, irq_entry); 84549c4959fSDave Jiang pci_free_irq_vectors(pdev); 84649c4959fSDave Jiang pci_iounmap(pdev, idxd->reg_base); 84742a1b738SDave Jiang if (device_user_pasid_enabled(idxd)) 84884c9ef72SLu Baolu idxd_disable_sva(pdev); 84949c4959fSDave Jiang pci_disable_device(pdev); 85049c4959fSDave Jiang destroy_workqueue(idxd->wq); 85149c4959fSDave Jiang perfmon_pmu_remove(idxd); 85298da0106SDave Jiang put_device(idxd_confdev(idxd)); 853bfe1d560SDave Jiang } 854bfe1d560SDave Jiang 855bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = { 856bfe1d560SDave Jiang .name = DRV_NAME, 857bfe1d560SDave Jiang .id_table = idxd_pci_tbl, 858bfe1d560SDave Jiang .probe = idxd_pci_probe, 859bfe1d560SDave Jiang .remove = idxd_remove, 860bfe1d560SDave Jiang .shutdown = idxd_shutdown, 861bfe1d560SDave Jiang }; 862bfe1d560SDave Jiang 863bfe1d560SDave Jiang static int __init idxd_init_module(void) 864bfe1d560SDave Jiang { 8654b73e4ebSDave Jiang int err; 866bfe1d560SDave Jiang 867bfe1d560SDave Jiang /* 8688e50d392SDave Jiang * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in 869bfe1d560SDave Jiang * enumerating the device. We can not utilize it. 870bfe1d560SDave Jiang */ 87174b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { 872bfe1d560SDave Jiang pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 873bfe1d560SDave Jiang return -ENODEV; 874bfe1d560SDave Jiang } 875bfe1d560SDave Jiang 87674b2fc88SBorislav Petkov if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) 8778e50d392SDave Jiang pr_warn("Platform does not have ENQCMD(S) support.\n"); 8788e50d392SDave Jiang else 8798e50d392SDave Jiang support_enqcmd = true; 880bfe1d560SDave Jiang 8810bde4444STom Zanussi perfmon_init(); 8820bde4444STom Zanussi 883034b3290SDave Jiang err = idxd_driver_register(&idxd_drv); 884034b3290SDave Jiang if (err < 0) 885034b3290SDave Jiang goto err_idxd_driver_register; 886034b3290SDave Jiang 8870cda4f69SDave Jiang err = idxd_driver_register(&idxd_dmaengine_drv); 8880cda4f69SDave Jiang if (err < 0) 8890cda4f69SDave Jiang goto err_idxd_dmaengine_driver_register; 8900cda4f69SDave Jiang 891448c3de8SDave Jiang err = idxd_driver_register(&idxd_user_drv); 892448c3de8SDave Jiang if (err < 0) 893448c3de8SDave Jiang goto err_idxd_user_driver_register; 894448c3de8SDave Jiang 89542d279f9SDave Jiang err = idxd_cdev_register(); 89642d279f9SDave Jiang if (err) 89742d279f9SDave Jiang goto err_cdev_register; 89842d279f9SDave Jiang 8995fbe6503SDave Jiang err = idxd_init_debugfs(); 9005fbe6503SDave Jiang if (err) 9015fbe6503SDave Jiang goto err_debugfs; 9025fbe6503SDave Jiang 903c52ca478SDave Jiang err = pci_register_driver(&idxd_pci_driver); 904c52ca478SDave Jiang if (err) 905c52ca478SDave Jiang goto err_pci_register; 906c52ca478SDave Jiang 907bfe1d560SDave Jiang return 0; 908c52ca478SDave Jiang 909c52ca478SDave Jiang err_pci_register: 9105fbe6503SDave Jiang idxd_remove_debugfs(); 9115fbe6503SDave Jiang err_debugfs: 91242d279f9SDave Jiang idxd_cdev_remove(); 91342d279f9SDave Jiang err_cdev_register: 914448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 915448c3de8SDave Jiang err_idxd_user_driver_register: 9160cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 9170cda4f69SDave Jiang err_idxd_dmaengine_driver_register: 918034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 919034b3290SDave Jiang err_idxd_driver_register: 920c52ca478SDave Jiang return err; 921bfe1d560SDave Jiang } 922bfe1d560SDave Jiang module_init(idxd_init_module); 923bfe1d560SDave Jiang 924bfe1d560SDave Jiang static void __exit idxd_exit_module(void) 925bfe1d560SDave Jiang { 926448c3de8SDave Jiang idxd_driver_unregister(&idxd_user_drv); 9270cda4f69SDave Jiang idxd_driver_unregister(&idxd_dmaengine_drv); 928034b3290SDave Jiang idxd_driver_unregister(&idxd_drv); 929bfe1d560SDave Jiang pci_unregister_driver(&idxd_pci_driver); 93042d279f9SDave Jiang idxd_cdev_remove(); 9310bde4444STom Zanussi perfmon_exit(); 9325fbe6503SDave Jiang idxd_remove_debugfs(); 933bfe1d560SDave Jiang } 934bfe1d560SDave Jiang module_exit(idxd_exit_module); 935