1bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0 2bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3bfe1d560SDave Jiang #include <linux/init.h> 4bfe1d560SDave Jiang #include <linux/kernel.h> 5bfe1d560SDave Jiang #include <linux/module.h> 6bfe1d560SDave Jiang #include <linux/slab.h> 7bfe1d560SDave Jiang #include <linux/pci.h> 8bfe1d560SDave Jiang #include <linux/interrupt.h> 9bfe1d560SDave Jiang #include <linux/delay.h> 10bfe1d560SDave Jiang #include <linux/dma-mapping.h> 11bfe1d560SDave Jiang #include <linux/workqueue.h> 12bfe1d560SDave Jiang #include <linux/aer.h> 13bfe1d560SDave Jiang #include <linux/fs.h> 14bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h> 15bfe1d560SDave Jiang #include <linux/device.h> 16bfe1d560SDave Jiang #include <linux/idr.h> 17bfe1d560SDave Jiang #include <uapi/linux/idxd.h> 188f47d1a5SDave Jiang #include <linux/dmaengine.h> 198f47d1a5SDave Jiang #include "../dmaengine.h" 20bfe1d560SDave Jiang #include "registers.h" 21bfe1d560SDave Jiang #include "idxd.h" 22bfe1d560SDave Jiang 23bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION); 24bfe1d560SDave Jiang MODULE_LICENSE("GPL v2"); 25bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation"); 26bfe1d560SDave Jiang 27bfe1d560SDave Jiang #define DRV_NAME "idxd" 28bfe1d560SDave Jiang 29bfe1d560SDave Jiang static struct idr idxd_idrs[IDXD_TYPE_MAX]; 30bfe1d560SDave Jiang static struct mutex idxd_idr_lock; 31bfe1d560SDave Jiang 32bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = { 33bfe1d560SDave Jiang /* DSA ver 1.0 platforms */ 34bfe1d560SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, 35bfe1d560SDave Jiang { 0, } 36bfe1d560SDave Jiang }; 37bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); 38bfe1d560SDave Jiang 39bfe1d560SDave Jiang static char *idxd_name[] = { 40bfe1d560SDave Jiang "dsa", 41bfe1d560SDave Jiang }; 42bfe1d560SDave Jiang 43bfe1d560SDave Jiang const char *idxd_get_dev_name(struct idxd_device *idxd) 44bfe1d560SDave Jiang { 45bfe1d560SDave Jiang return idxd_name[idxd->type]; 46bfe1d560SDave Jiang } 47bfe1d560SDave Jiang 48bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd) 49bfe1d560SDave Jiang { 50bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 51bfe1d560SDave Jiang struct device *dev = &pdev->dev; 52bfe1d560SDave Jiang struct msix_entry *msix; 53bfe1d560SDave Jiang struct idxd_irq_entry *irq_entry; 54bfe1d560SDave Jiang int i, msixcnt; 55bfe1d560SDave Jiang int rc = 0; 56bfe1d560SDave Jiang 57bfe1d560SDave Jiang msixcnt = pci_msix_vec_count(pdev); 58bfe1d560SDave Jiang if (msixcnt < 0) { 59bfe1d560SDave Jiang dev_err(dev, "Not MSI-X interrupt capable.\n"); 60bfe1d560SDave Jiang goto err_no_irq; 61bfe1d560SDave Jiang } 62bfe1d560SDave Jiang 63bfe1d560SDave Jiang idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * 64bfe1d560SDave Jiang msixcnt, GFP_KERNEL); 65bfe1d560SDave Jiang if (!idxd->msix_entries) { 66bfe1d560SDave Jiang rc = -ENOMEM; 67bfe1d560SDave Jiang goto err_no_irq; 68bfe1d560SDave Jiang } 69bfe1d560SDave Jiang 70bfe1d560SDave Jiang for (i = 0; i < msixcnt; i++) 71bfe1d560SDave Jiang idxd->msix_entries[i].entry = i; 72bfe1d560SDave Jiang 73bfe1d560SDave Jiang rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); 74bfe1d560SDave Jiang if (rc) { 75bfe1d560SDave Jiang dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); 76bfe1d560SDave Jiang goto err_no_irq; 77bfe1d560SDave Jiang } 78bfe1d560SDave Jiang dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); 79bfe1d560SDave Jiang 80bfe1d560SDave Jiang /* 81bfe1d560SDave Jiang * We implement 1 completion list per MSI-X entry except for 82bfe1d560SDave Jiang * entry 0, which is for errors and others. 83bfe1d560SDave Jiang */ 84bfe1d560SDave Jiang idxd->irq_entries = devm_kcalloc(dev, msixcnt, 85bfe1d560SDave Jiang sizeof(struct idxd_irq_entry), 86bfe1d560SDave Jiang GFP_KERNEL); 87bfe1d560SDave Jiang if (!idxd->irq_entries) { 88bfe1d560SDave Jiang rc = -ENOMEM; 89bfe1d560SDave Jiang goto err_no_irq; 90bfe1d560SDave Jiang } 91bfe1d560SDave Jiang 92bfe1d560SDave Jiang for (i = 0; i < msixcnt; i++) { 93bfe1d560SDave Jiang idxd->irq_entries[i].id = i; 94bfe1d560SDave Jiang idxd->irq_entries[i].idxd = idxd; 95bfe1d560SDave Jiang } 96bfe1d560SDave Jiang 97bfe1d560SDave Jiang msix = &idxd->msix_entries[0]; 98bfe1d560SDave Jiang irq_entry = &idxd->irq_entries[0]; 99bfe1d560SDave Jiang rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, 100bfe1d560SDave Jiang idxd_misc_thread, 0, "idxd-misc", 101bfe1d560SDave Jiang irq_entry); 102bfe1d560SDave Jiang if (rc < 0) { 103bfe1d560SDave Jiang dev_err(dev, "Failed to allocate misc interrupt.\n"); 104bfe1d560SDave Jiang goto err_no_irq; 105bfe1d560SDave Jiang } 106bfe1d560SDave Jiang 107bfe1d560SDave Jiang dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", 108bfe1d560SDave Jiang msix->vector); 109bfe1d560SDave Jiang 110bfe1d560SDave Jiang /* first MSI-X entry is not for wq interrupts */ 111bfe1d560SDave Jiang idxd->num_wq_irqs = msixcnt - 1; 112bfe1d560SDave Jiang 113bfe1d560SDave Jiang for (i = 1; i < msixcnt; i++) { 114bfe1d560SDave Jiang msix = &idxd->msix_entries[i]; 115bfe1d560SDave Jiang irq_entry = &idxd->irq_entries[i]; 116bfe1d560SDave Jiang 117bfe1d560SDave Jiang init_llist_head(&idxd->irq_entries[i].pending_llist); 118bfe1d560SDave Jiang INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); 119bfe1d560SDave Jiang rc = devm_request_threaded_irq(dev, msix->vector, 120bfe1d560SDave Jiang idxd_irq_handler, 121bfe1d560SDave Jiang idxd_wq_thread, 0, 122bfe1d560SDave Jiang "idxd-portal", irq_entry); 123bfe1d560SDave Jiang if (rc < 0) { 124bfe1d560SDave Jiang dev_err(dev, "Failed to allocate irq %d.\n", 125bfe1d560SDave Jiang msix->vector); 126bfe1d560SDave Jiang goto err_no_irq; 127bfe1d560SDave Jiang } 128bfe1d560SDave Jiang dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", 129bfe1d560SDave Jiang i, msix->vector); 130bfe1d560SDave Jiang } 131bfe1d560SDave Jiang 132bfe1d560SDave Jiang idxd_unmask_error_interrupts(idxd); 133bfe1d560SDave Jiang 134bfe1d560SDave Jiang return 0; 135bfe1d560SDave Jiang 136bfe1d560SDave Jiang err_no_irq: 137bfe1d560SDave Jiang /* Disable error interrupt generation */ 138bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 139bfe1d560SDave Jiang pci_disable_msix(pdev); 140bfe1d560SDave Jiang dev_err(dev, "No usable interrupts\n"); 141bfe1d560SDave Jiang return rc; 142bfe1d560SDave Jiang } 143bfe1d560SDave Jiang 144bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd) 145bfe1d560SDave Jiang { 146bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 147bfe1d560SDave Jiang int i; 148bfe1d560SDave Jiang 1490d5c10b4SDave Jiang init_waitqueue_head(&idxd->cmd_waitq); 150bfe1d560SDave Jiang idxd->groups = devm_kcalloc(dev, idxd->max_groups, 151bfe1d560SDave Jiang sizeof(struct idxd_group), GFP_KERNEL); 152bfe1d560SDave Jiang if (!idxd->groups) 153bfe1d560SDave Jiang return -ENOMEM; 154bfe1d560SDave Jiang 155bfe1d560SDave Jiang for (i = 0; i < idxd->max_groups; i++) { 156bfe1d560SDave Jiang idxd->groups[i].idxd = idxd; 157bfe1d560SDave Jiang idxd->groups[i].id = i; 158bfe1d560SDave Jiang idxd->groups[i].tc_a = -1; 159bfe1d560SDave Jiang idxd->groups[i].tc_b = -1; 160bfe1d560SDave Jiang } 161bfe1d560SDave Jiang 162bfe1d560SDave Jiang idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), 163bfe1d560SDave Jiang GFP_KERNEL); 164bfe1d560SDave Jiang if (!idxd->wqs) 165bfe1d560SDave Jiang return -ENOMEM; 166bfe1d560SDave Jiang 167bfe1d560SDave Jiang idxd->engines = devm_kcalloc(dev, idxd->max_engines, 168bfe1d560SDave Jiang sizeof(struct idxd_engine), GFP_KERNEL); 169bfe1d560SDave Jiang if (!idxd->engines) 170bfe1d560SDave Jiang return -ENOMEM; 171bfe1d560SDave Jiang 172bfe1d560SDave Jiang for (i = 0; i < idxd->max_wqs; i++) { 173bfe1d560SDave Jiang struct idxd_wq *wq = &idxd->wqs[i]; 174bfe1d560SDave Jiang 175bfe1d560SDave Jiang wq->id = i; 176bfe1d560SDave Jiang wq->idxd = idxd; 177bfe1d560SDave Jiang mutex_init(&wq->wq_lock); 17842d279f9SDave Jiang wq->idxd_cdev.minor = -1; 179*d7aad555SDave Jiang wq->max_xfer_bytes = idxd->max_xfer_bytes; 180bfe1d560SDave Jiang } 181bfe1d560SDave Jiang 182bfe1d560SDave Jiang for (i = 0; i < idxd->max_engines; i++) { 183bfe1d560SDave Jiang idxd->engines[i].idxd = idxd; 184bfe1d560SDave Jiang idxd->engines[i].id = i; 185bfe1d560SDave Jiang } 186bfe1d560SDave Jiang 1870d5c10b4SDave Jiang idxd->wq = create_workqueue(dev_name(dev)); 1880d5c10b4SDave Jiang if (!idxd->wq) 1890d5c10b4SDave Jiang return -ENOMEM; 1900d5c10b4SDave Jiang 191bfe1d560SDave Jiang return 0; 192bfe1d560SDave Jiang } 193bfe1d560SDave Jiang 194bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd) 195bfe1d560SDave Jiang { 196bfe1d560SDave Jiang union offsets_reg offsets; 197bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 198bfe1d560SDave Jiang 199bfe1d560SDave Jiang offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); 200bfe1d560SDave Jiang offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET 201bfe1d560SDave Jiang + sizeof(u64)); 202bfe1d560SDave Jiang idxd->grpcfg_offset = offsets.grpcfg * 0x100; 203bfe1d560SDave Jiang dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); 204bfe1d560SDave Jiang idxd->wqcfg_offset = offsets.wqcfg * 0x100; 205bfe1d560SDave Jiang dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", 206bfe1d560SDave Jiang idxd->wqcfg_offset); 207bfe1d560SDave Jiang idxd->msix_perm_offset = offsets.msix_perm * 0x100; 208bfe1d560SDave Jiang dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", 209bfe1d560SDave Jiang idxd->msix_perm_offset); 210bfe1d560SDave Jiang idxd->perfmon_offset = offsets.perfmon * 0x100; 211bfe1d560SDave Jiang dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); 212bfe1d560SDave Jiang } 213bfe1d560SDave Jiang 214bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd) 215bfe1d560SDave Jiang { 216bfe1d560SDave Jiang struct device *dev = &idxd->pdev->dev; 217bfe1d560SDave Jiang int i; 218bfe1d560SDave Jiang 219bfe1d560SDave Jiang /* reading generic capabilities */ 220bfe1d560SDave Jiang idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); 221bfe1d560SDave Jiang dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); 222bfe1d560SDave Jiang idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; 223bfe1d560SDave Jiang dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); 224bfe1d560SDave Jiang idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; 225bfe1d560SDave Jiang dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); 226bfe1d560SDave Jiang if (idxd->hw.gen_cap.config_en) 227bfe1d560SDave Jiang set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); 228bfe1d560SDave Jiang 229bfe1d560SDave Jiang /* reading group capabilities */ 230bfe1d560SDave Jiang idxd->hw.group_cap.bits = 231bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); 232bfe1d560SDave Jiang dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 233bfe1d560SDave Jiang idxd->max_groups = idxd->hw.group_cap.num_groups; 234bfe1d560SDave Jiang dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 235bfe1d560SDave Jiang idxd->max_tokens = idxd->hw.group_cap.total_tokens; 236bfe1d560SDave Jiang dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); 237c52ca478SDave Jiang idxd->nr_tokens = idxd->max_tokens; 238bfe1d560SDave Jiang 239bfe1d560SDave Jiang /* read engine capabilities */ 240bfe1d560SDave Jiang idxd->hw.engine_cap.bits = 241bfe1d560SDave Jiang ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); 242bfe1d560SDave Jiang dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); 243bfe1d560SDave Jiang idxd->max_engines = idxd->hw.engine_cap.num_engines; 244bfe1d560SDave Jiang dev_dbg(dev, "max engines: %u\n", idxd->max_engines); 245bfe1d560SDave Jiang 246bfe1d560SDave Jiang /* read workqueue capabilities */ 247bfe1d560SDave Jiang idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); 248bfe1d560SDave Jiang dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); 249bfe1d560SDave Jiang idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; 250bfe1d560SDave Jiang dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); 251bfe1d560SDave Jiang idxd->max_wqs = idxd->hw.wq_cap.num_wqs; 252bfe1d560SDave Jiang dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); 253bfe1d560SDave Jiang 254bfe1d560SDave Jiang /* reading operation capabilities */ 255bfe1d560SDave Jiang for (i = 0; i < 4; i++) { 256bfe1d560SDave Jiang idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + 257bfe1d560SDave Jiang IDXD_OPCAP_OFFSET + i * sizeof(u64)); 258bfe1d560SDave Jiang dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); 259bfe1d560SDave Jiang } 260bfe1d560SDave Jiang } 261bfe1d560SDave Jiang 262bfe1d560SDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev, 263bfe1d560SDave Jiang void __iomem * const *iomap) 264bfe1d560SDave Jiang { 265bfe1d560SDave Jiang struct device *dev = &pdev->dev; 266bfe1d560SDave Jiang struct idxd_device *idxd; 267bfe1d560SDave Jiang 268bfe1d560SDave Jiang idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); 269bfe1d560SDave Jiang if (!idxd) 270bfe1d560SDave Jiang return NULL; 271bfe1d560SDave Jiang 272bfe1d560SDave Jiang idxd->pdev = pdev; 273bfe1d560SDave Jiang idxd->reg_base = iomap[IDXD_MMIO_BAR]; 274bfe1d560SDave Jiang spin_lock_init(&idxd->dev_lock); 275bfe1d560SDave Jiang 276bfe1d560SDave Jiang return idxd; 277bfe1d560SDave Jiang } 278bfe1d560SDave Jiang 279bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd) 280bfe1d560SDave Jiang { 281bfe1d560SDave Jiang struct pci_dev *pdev = idxd->pdev; 282bfe1d560SDave Jiang struct device *dev = &pdev->dev; 283bfe1d560SDave Jiang int rc; 284bfe1d560SDave Jiang 285bfe1d560SDave Jiang dev_dbg(dev, "%s entered and resetting device\n", __func__); 2860d5c10b4SDave Jiang idxd_device_init_reset(idxd); 287bfe1d560SDave Jiang dev_dbg(dev, "IDXD reset complete\n"); 288bfe1d560SDave Jiang 289bfe1d560SDave Jiang idxd_read_caps(idxd); 290bfe1d560SDave Jiang idxd_read_table_offsets(idxd); 291bfe1d560SDave Jiang 292bfe1d560SDave Jiang rc = idxd_setup_internals(idxd); 293bfe1d560SDave Jiang if (rc) 294bfe1d560SDave Jiang goto err_setup; 295bfe1d560SDave Jiang 296bfe1d560SDave Jiang rc = idxd_setup_interrupts(idxd); 297bfe1d560SDave Jiang if (rc) 298bfe1d560SDave Jiang goto err_setup; 299bfe1d560SDave Jiang 300bfe1d560SDave Jiang dev_dbg(dev, "IDXD interrupt setup complete.\n"); 301bfe1d560SDave Jiang 302bfe1d560SDave Jiang mutex_lock(&idxd_idr_lock); 303bfe1d560SDave Jiang idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); 304bfe1d560SDave Jiang mutex_unlock(&idxd_idr_lock); 305bfe1d560SDave Jiang if (idxd->id < 0) { 306bfe1d560SDave Jiang rc = -ENOMEM; 307bfe1d560SDave Jiang goto err_idr_fail; 308bfe1d560SDave Jiang } 309bfe1d560SDave Jiang 31042d279f9SDave Jiang idxd->major = idxd_cdev_get_major(idxd); 31142d279f9SDave Jiang 312bfe1d560SDave Jiang dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); 313bfe1d560SDave Jiang return 0; 314bfe1d560SDave Jiang 315bfe1d560SDave Jiang err_idr_fail: 316bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 317bfe1d560SDave Jiang idxd_mask_msix_vectors(idxd); 318bfe1d560SDave Jiang err_setup: 319bfe1d560SDave Jiang return rc; 320bfe1d560SDave Jiang } 321bfe1d560SDave Jiang 322bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 323bfe1d560SDave Jiang { 324bfe1d560SDave Jiang void __iomem * const *iomap; 325bfe1d560SDave Jiang struct device *dev = &pdev->dev; 326bfe1d560SDave Jiang struct idxd_device *idxd; 327bfe1d560SDave Jiang int rc; 328bfe1d560SDave Jiang unsigned int mask; 329bfe1d560SDave Jiang 330bfe1d560SDave Jiang rc = pcim_enable_device(pdev); 331bfe1d560SDave Jiang if (rc) 332bfe1d560SDave Jiang return rc; 333bfe1d560SDave Jiang 334bfe1d560SDave Jiang dev_dbg(dev, "Mapping BARs\n"); 335bfe1d560SDave Jiang mask = (1 << IDXD_MMIO_BAR); 336bfe1d560SDave Jiang rc = pcim_iomap_regions(pdev, mask, DRV_NAME); 337bfe1d560SDave Jiang if (rc) 338bfe1d560SDave Jiang return rc; 339bfe1d560SDave Jiang 340bfe1d560SDave Jiang iomap = pcim_iomap_table(pdev); 341bfe1d560SDave Jiang if (!iomap) 342bfe1d560SDave Jiang return -ENOMEM; 343bfe1d560SDave Jiang 344bfe1d560SDave Jiang dev_dbg(dev, "Set DMA masks\n"); 345bfe1d560SDave Jiang rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 346bfe1d560SDave Jiang if (rc) 347bfe1d560SDave Jiang rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 348bfe1d560SDave Jiang if (rc) 349bfe1d560SDave Jiang return rc; 350bfe1d560SDave Jiang 351bfe1d560SDave Jiang rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 352bfe1d560SDave Jiang if (rc) 353bfe1d560SDave Jiang rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 354bfe1d560SDave Jiang if (rc) 355bfe1d560SDave Jiang return rc; 356bfe1d560SDave Jiang 357bfe1d560SDave Jiang dev_dbg(dev, "Alloc IDXD context\n"); 358bfe1d560SDave Jiang idxd = idxd_alloc(pdev, iomap); 359bfe1d560SDave Jiang if (!idxd) 360bfe1d560SDave Jiang return -ENOMEM; 361bfe1d560SDave Jiang 362bfe1d560SDave Jiang idxd_set_type(idxd); 363bfe1d560SDave Jiang 364bfe1d560SDave Jiang dev_dbg(dev, "Set PCI master\n"); 365bfe1d560SDave Jiang pci_set_master(pdev); 366bfe1d560SDave Jiang pci_set_drvdata(pdev, idxd); 367bfe1d560SDave Jiang 368bfe1d560SDave Jiang idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); 369bfe1d560SDave Jiang rc = idxd_probe(idxd); 370bfe1d560SDave Jiang if (rc) { 371bfe1d560SDave Jiang dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); 372bfe1d560SDave Jiang return -ENODEV; 373bfe1d560SDave Jiang } 374bfe1d560SDave Jiang 375c52ca478SDave Jiang rc = idxd_setup_sysfs(idxd); 376c52ca478SDave Jiang if (rc) { 377c52ca478SDave Jiang dev_err(dev, "IDXD sysfs setup failed\n"); 378c52ca478SDave Jiang return -ENODEV; 379c52ca478SDave Jiang } 380c52ca478SDave Jiang 381c52ca478SDave Jiang idxd->state = IDXD_DEV_CONF_READY; 382c52ca478SDave Jiang 383bfe1d560SDave Jiang dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", 384bfe1d560SDave Jiang idxd->hw.version); 385bfe1d560SDave Jiang 386bfe1d560SDave Jiang return 0; 387bfe1d560SDave Jiang } 388bfe1d560SDave Jiang 3898f47d1a5SDave Jiang static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) 3908f47d1a5SDave Jiang { 3918f47d1a5SDave Jiang struct idxd_desc *desc, *itr; 3928f47d1a5SDave Jiang struct llist_node *head; 3938f47d1a5SDave Jiang 3948f47d1a5SDave Jiang head = llist_del_all(&ie->pending_llist); 3958f47d1a5SDave Jiang if (!head) 3968f47d1a5SDave Jiang return; 3978f47d1a5SDave Jiang 3988f47d1a5SDave Jiang llist_for_each_entry_safe(desc, itr, head, llnode) { 3998f47d1a5SDave Jiang idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); 4008f47d1a5SDave Jiang idxd_free_desc(desc->wq, desc); 4018f47d1a5SDave Jiang } 4028f47d1a5SDave Jiang } 4038f47d1a5SDave Jiang 4048f47d1a5SDave Jiang static void idxd_flush_work_list(struct idxd_irq_entry *ie) 4058f47d1a5SDave Jiang { 4068f47d1a5SDave Jiang struct idxd_desc *desc, *iter; 4078f47d1a5SDave Jiang 4088f47d1a5SDave Jiang list_for_each_entry_safe(desc, iter, &ie->work_list, list) { 4098f47d1a5SDave Jiang list_del(&desc->list); 4108f47d1a5SDave Jiang idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); 4118f47d1a5SDave Jiang idxd_free_desc(desc->wq, desc); 4128f47d1a5SDave Jiang } 4138f47d1a5SDave Jiang } 4148f47d1a5SDave Jiang 415bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev) 416bfe1d560SDave Jiang { 417bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 418bfe1d560SDave Jiang int rc, i; 419bfe1d560SDave Jiang struct idxd_irq_entry *irq_entry; 420bfe1d560SDave Jiang int msixcnt = pci_msix_vec_count(pdev); 421bfe1d560SDave Jiang 422bfe1d560SDave Jiang rc = idxd_device_disable(idxd); 423bfe1d560SDave Jiang if (rc) 424bfe1d560SDave Jiang dev_err(&pdev->dev, "Disabling device failed\n"); 425bfe1d560SDave Jiang 426bfe1d560SDave Jiang dev_dbg(&pdev->dev, "%s called\n", __func__); 427bfe1d560SDave Jiang idxd_mask_msix_vectors(idxd); 428bfe1d560SDave Jiang idxd_mask_error_interrupts(idxd); 429bfe1d560SDave Jiang 430bfe1d560SDave Jiang for (i = 0; i < msixcnt; i++) { 431bfe1d560SDave Jiang irq_entry = &idxd->irq_entries[i]; 432bfe1d560SDave Jiang synchronize_irq(idxd->msix_entries[i].vector); 433bfe1d560SDave Jiang if (i == 0) 434bfe1d560SDave Jiang continue; 4358f47d1a5SDave Jiang idxd_flush_pending_llist(irq_entry); 4368f47d1a5SDave Jiang idxd_flush_work_list(irq_entry); 437bfe1d560SDave Jiang } 4380d5c10b4SDave Jiang 4390d5c10b4SDave Jiang destroy_workqueue(idxd->wq); 440bfe1d560SDave Jiang } 441bfe1d560SDave Jiang 442bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev) 443bfe1d560SDave Jiang { 444bfe1d560SDave Jiang struct idxd_device *idxd = pci_get_drvdata(pdev); 445bfe1d560SDave Jiang 446bfe1d560SDave Jiang dev_dbg(&pdev->dev, "%s called\n", __func__); 447c52ca478SDave Jiang idxd_cleanup_sysfs(idxd); 448bfe1d560SDave Jiang idxd_shutdown(pdev); 449bfe1d560SDave Jiang mutex_lock(&idxd_idr_lock); 450bfe1d560SDave Jiang idr_remove(&idxd_idrs[idxd->type], idxd->id); 451bfe1d560SDave Jiang mutex_unlock(&idxd_idr_lock); 452bfe1d560SDave Jiang } 453bfe1d560SDave Jiang 454bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = { 455bfe1d560SDave Jiang .name = DRV_NAME, 456bfe1d560SDave Jiang .id_table = idxd_pci_tbl, 457bfe1d560SDave Jiang .probe = idxd_pci_probe, 458bfe1d560SDave Jiang .remove = idxd_remove, 459bfe1d560SDave Jiang .shutdown = idxd_shutdown, 460bfe1d560SDave Jiang }; 461bfe1d560SDave Jiang 462bfe1d560SDave Jiang static int __init idxd_init_module(void) 463bfe1d560SDave Jiang { 464bfe1d560SDave Jiang int err, i; 465bfe1d560SDave Jiang 466bfe1d560SDave Jiang /* 467bfe1d560SDave Jiang * If the CPU does not support write512, there's no point in 468bfe1d560SDave Jiang * enumerating the device. We can not utilize it. 469bfe1d560SDave Jiang */ 470bfe1d560SDave Jiang if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { 471bfe1d560SDave Jiang pr_warn("idxd driver failed to load without MOVDIR64B.\n"); 472bfe1d560SDave Jiang return -ENODEV; 473bfe1d560SDave Jiang } 474bfe1d560SDave Jiang 475bfe1d560SDave Jiang pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", 476bfe1d560SDave Jiang DRV_NAME, IDXD_DRIVER_VERSION); 477bfe1d560SDave Jiang 478bfe1d560SDave Jiang mutex_init(&idxd_idr_lock); 479bfe1d560SDave Jiang for (i = 0; i < IDXD_TYPE_MAX; i++) 480bfe1d560SDave Jiang idr_init(&idxd_idrs[i]); 481bfe1d560SDave Jiang 482c52ca478SDave Jiang err = idxd_register_bus_type(); 483c52ca478SDave Jiang if (err < 0) 484bfe1d560SDave Jiang return err; 485bfe1d560SDave Jiang 486c52ca478SDave Jiang err = idxd_register_driver(); 487c52ca478SDave Jiang if (err < 0) 488c52ca478SDave Jiang goto err_idxd_driver_register; 489c52ca478SDave Jiang 49042d279f9SDave Jiang err = idxd_cdev_register(); 49142d279f9SDave Jiang if (err) 49242d279f9SDave Jiang goto err_cdev_register; 49342d279f9SDave Jiang 494c52ca478SDave Jiang err = pci_register_driver(&idxd_pci_driver); 495c52ca478SDave Jiang if (err) 496c52ca478SDave Jiang goto err_pci_register; 497c52ca478SDave Jiang 498bfe1d560SDave Jiang return 0; 499c52ca478SDave Jiang 500c52ca478SDave Jiang err_pci_register: 50142d279f9SDave Jiang idxd_cdev_remove(); 50242d279f9SDave Jiang err_cdev_register: 503c52ca478SDave Jiang idxd_unregister_driver(); 504c52ca478SDave Jiang err_idxd_driver_register: 505c52ca478SDave Jiang idxd_unregister_bus_type(); 506c52ca478SDave Jiang return err; 507bfe1d560SDave Jiang } 508bfe1d560SDave Jiang module_init(idxd_init_module); 509bfe1d560SDave Jiang 510bfe1d560SDave Jiang static void __exit idxd_exit_module(void) 511bfe1d560SDave Jiang { 512bfe1d560SDave Jiang pci_unregister_driver(&idxd_pci_driver); 51342d279f9SDave Jiang idxd_cdev_remove(); 514c52ca478SDave Jiang idxd_unregister_bus_type(); 515bfe1d560SDave Jiang } 516bfe1d560SDave Jiang module_exit(idxd_exit_module); 517