1*c0f28ce6SDave Jiang /* 2*c0f28ce6SDave Jiang * Intel I/OAT DMA Linux driver 3*c0f28ce6SDave Jiang * Copyright(c) 2004 - 2015 Intel Corporation. 4*c0f28ce6SDave Jiang * 5*c0f28ce6SDave Jiang * This program is free software; you can redistribute it and/or modify it 6*c0f28ce6SDave Jiang * under the terms and conditions of the GNU General Public License, 7*c0f28ce6SDave Jiang * version 2, as published by the Free Software Foundation. 8*c0f28ce6SDave Jiang * 9*c0f28ce6SDave Jiang * This program is distributed in the hope that it will be useful, but WITHOUT 10*c0f28ce6SDave Jiang * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11*c0f28ce6SDave Jiang * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12*c0f28ce6SDave Jiang * more details. 13*c0f28ce6SDave Jiang * 14*c0f28ce6SDave Jiang * The full GNU General Public License is included in this distribution in 15*c0f28ce6SDave Jiang * the file called "COPYING". 16*c0f28ce6SDave Jiang * 17*c0f28ce6SDave Jiang */ 18*c0f28ce6SDave Jiang 19*c0f28ce6SDave Jiang #include <linux/init.h> 20*c0f28ce6SDave Jiang #include <linux/module.h> 21*c0f28ce6SDave Jiang #include <linux/slab.h> 22*c0f28ce6SDave Jiang #include <linux/pci.h> 23*c0f28ce6SDave Jiang #include <linux/interrupt.h> 24*c0f28ce6SDave Jiang #include <linux/dmaengine.h> 25*c0f28ce6SDave Jiang #include <linux/delay.h> 26*c0f28ce6SDave Jiang #include <linux/dma-mapping.h> 27*c0f28ce6SDave Jiang #include <linux/workqueue.h> 28*c0f28ce6SDave Jiang #include <linux/prefetch.h> 29*c0f28ce6SDave Jiang #include <linux/dca.h> 30*c0f28ce6SDave Jiang #include "dma.h" 31*c0f28ce6SDave Jiang #include "registers.h" 32*c0f28ce6SDave Jiang #include "hw.h" 33*c0f28ce6SDave Jiang 34*c0f28ce6SDave Jiang #include "../dmaengine.h" 35*c0f28ce6SDave Jiang 36*c0f28ce6SDave Jiang MODULE_VERSION(IOAT_DMA_VERSION); 37*c0f28ce6SDave Jiang MODULE_LICENSE("Dual BSD/GPL"); 38*c0f28ce6SDave Jiang MODULE_AUTHOR("Intel Corporation"); 39*c0f28ce6SDave Jiang 40*c0f28ce6SDave Jiang static struct pci_device_id ioat_pci_tbl[] = { 41*c0f28ce6SDave Jiang /* I/OAT v3 platforms */ 42*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, 43*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, 44*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, 45*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, 46*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, 47*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, 48*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, 49*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, 50*c0f28ce6SDave Jiang 51*c0f28ce6SDave Jiang /* I/OAT v3.2 platforms */ 52*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, 53*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, 54*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, 55*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, 56*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, 57*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, 58*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, 59*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, 60*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, 61*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, 62*c0f28ce6SDave Jiang 63*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) }, 64*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) }, 65*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) }, 66*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) }, 67*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) }, 68*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) }, 69*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) }, 70*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, 71*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, 72*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, 73*c0f28ce6SDave Jiang 74*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, 75*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, 76*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, 77*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, 78*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, 79*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, 80*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, 81*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, 82*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, 83*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, 84*c0f28ce6SDave Jiang 85*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, 86*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, 87*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, 88*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, 89*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, 90*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, 91*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, 92*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, 93*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, 94*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, 95*c0f28ce6SDave Jiang 96*c0f28ce6SDave Jiang /* I/OAT v3.3 platforms */ 97*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, 98*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, 99*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 100*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 101*c0f28ce6SDave Jiang 102*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, 103*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, 104*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, 105*c0f28ce6SDave Jiang { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, 106*c0f28ce6SDave Jiang 107*c0f28ce6SDave Jiang { 0, } 108*c0f28ce6SDave Jiang }; 109*c0f28ce6SDave Jiang MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); 110*c0f28ce6SDave Jiang 111*c0f28ce6SDave Jiang static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); 112*c0f28ce6SDave Jiang static void ioat_remove(struct pci_dev *pdev); 113*c0f28ce6SDave Jiang 114*c0f28ce6SDave Jiang static int ioat_dca_enabled = 1; 115*c0f28ce6SDave Jiang module_param(ioat_dca_enabled, int, 0644); 116*c0f28ce6SDave Jiang MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); 117*c0f28ce6SDave Jiang int ioat_pending_level = 4; 118*c0f28ce6SDave Jiang module_param(ioat_pending_level, int, 0644); 119*c0f28ce6SDave Jiang MODULE_PARM_DESC(ioat_pending_level, 120*c0f28ce6SDave Jiang "high-water mark for pushing ioat descriptors (default: 4)"); 121*c0f28ce6SDave Jiang int ioat_ring_alloc_order = 8; 122*c0f28ce6SDave Jiang module_param(ioat_ring_alloc_order, int, 0644); 123*c0f28ce6SDave Jiang MODULE_PARM_DESC(ioat_ring_alloc_order, 124*c0f28ce6SDave Jiang "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)"); 125*c0f28ce6SDave Jiang int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; 126*c0f28ce6SDave Jiang module_param(ioat_ring_max_alloc_order, int, 0644); 127*c0f28ce6SDave Jiang MODULE_PARM_DESC(ioat_ring_max_alloc_order, 128*c0f28ce6SDave Jiang "ioat+: upper limit for ring size (default: 16)"); 129*c0f28ce6SDave Jiang static char ioat_interrupt_style[32] = "msix"; 130*c0f28ce6SDave Jiang module_param_string(ioat_interrupt_style, ioat_interrupt_style, 131*c0f28ce6SDave Jiang sizeof(ioat_interrupt_style), 0644); 132*c0f28ce6SDave Jiang MODULE_PARM_DESC(ioat_interrupt_style, 133*c0f28ce6SDave Jiang "set ioat interrupt style: msix (default), msi, intx"); 134*c0f28ce6SDave Jiang 135*c0f28ce6SDave Jiang struct kmem_cache *ioat_cache; 136*c0f28ce6SDave Jiang struct kmem_cache *ioat_sed_cache; 137*c0f28ce6SDave Jiang 138*c0f28ce6SDave Jiang static bool is_jf_ioat(struct pci_dev *pdev) 139*c0f28ce6SDave Jiang { 140*c0f28ce6SDave Jiang switch (pdev->device) { 141*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 142*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 143*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 144*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 145*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 146*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 147*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 148*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 149*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 150*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 151*c0f28ce6SDave Jiang return true; 152*c0f28ce6SDave Jiang default: 153*c0f28ce6SDave Jiang return false; 154*c0f28ce6SDave Jiang } 155*c0f28ce6SDave Jiang } 156*c0f28ce6SDave Jiang 157*c0f28ce6SDave Jiang static bool is_snb_ioat(struct pci_dev *pdev) 158*c0f28ce6SDave Jiang { 159*c0f28ce6SDave Jiang switch (pdev->device) { 160*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 161*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 162*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 163*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 164*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 165*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 166*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 167*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 168*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 169*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 170*c0f28ce6SDave Jiang return true; 171*c0f28ce6SDave Jiang default: 172*c0f28ce6SDave Jiang return false; 173*c0f28ce6SDave Jiang } 174*c0f28ce6SDave Jiang } 175*c0f28ce6SDave Jiang 176*c0f28ce6SDave Jiang static bool is_ivb_ioat(struct pci_dev *pdev) 177*c0f28ce6SDave Jiang { 178*c0f28ce6SDave Jiang switch (pdev->device) { 179*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 180*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 181*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 182*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 183*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 184*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 185*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 186*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 187*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 188*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 189*c0f28ce6SDave Jiang return true; 190*c0f28ce6SDave Jiang default: 191*c0f28ce6SDave Jiang return false; 192*c0f28ce6SDave Jiang } 193*c0f28ce6SDave Jiang 194*c0f28ce6SDave Jiang } 195*c0f28ce6SDave Jiang 196*c0f28ce6SDave Jiang static bool is_hsw_ioat(struct pci_dev *pdev) 197*c0f28ce6SDave Jiang { 198*c0f28ce6SDave Jiang switch (pdev->device) { 199*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW0: 200*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW1: 201*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW2: 202*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW3: 203*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW4: 204*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW5: 205*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW6: 206*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW7: 207*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW8: 208*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_HSW9: 209*c0f28ce6SDave Jiang return true; 210*c0f28ce6SDave Jiang default: 211*c0f28ce6SDave Jiang return false; 212*c0f28ce6SDave Jiang } 213*c0f28ce6SDave Jiang 214*c0f28ce6SDave Jiang } 215*c0f28ce6SDave Jiang 216*c0f28ce6SDave Jiang static bool is_xeon_cb32(struct pci_dev *pdev) 217*c0f28ce6SDave Jiang { 218*c0f28ce6SDave Jiang return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 219*c0f28ce6SDave Jiang is_hsw_ioat(pdev); 220*c0f28ce6SDave Jiang } 221*c0f28ce6SDave Jiang 222*c0f28ce6SDave Jiang bool is_bwd_ioat(struct pci_dev *pdev) 223*c0f28ce6SDave Jiang { 224*c0f28ce6SDave Jiang switch (pdev->device) { 225*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD0: 226*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 227*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 228*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 229*c0f28ce6SDave Jiang /* even though not Atom, BDX-DE has same DMA silicon */ 230*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: 231*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: 232*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: 233*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: 234*c0f28ce6SDave Jiang return true; 235*c0f28ce6SDave Jiang default: 236*c0f28ce6SDave Jiang return false; 237*c0f28ce6SDave Jiang } 238*c0f28ce6SDave Jiang } 239*c0f28ce6SDave Jiang 240*c0f28ce6SDave Jiang static bool is_bwd_noraid(struct pci_dev *pdev) 241*c0f28ce6SDave Jiang { 242*c0f28ce6SDave Jiang switch (pdev->device) { 243*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 244*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 245*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: 246*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: 247*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: 248*c0f28ce6SDave Jiang case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: 249*c0f28ce6SDave Jiang return true; 250*c0f28ce6SDave Jiang default: 251*c0f28ce6SDave Jiang return false; 252*c0f28ce6SDave Jiang } 253*c0f28ce6SDave Jiang 254*c0f28ce6SDave Jiang } 255*c0f28ce6SDave Jiang 256*c0f28ce6SDave Jiang /* 257*c0f28ce6SDave Jiang * Perform a IOAT transaction to verify the HW works. 258*c0f28ce6SDave Jiang */ 259*c0f28ce6SDave Jiang #define IOAT_TEST_SIZE 2000 260*c0f28ce6SDave Jiang 261*c0f28ce6SDave Jiang static void ioat_dma_test_callback(void *dma_async_param) 262*c0f28ce6SDave Jiang { 263*c0f28ce6SDave Jiang struct completion *cmp = dma_async_param; 264*c0f28ce6SDave Jiang 265*c0f28ce6SDave Jiang complete(cmp); 266*c0f28ce6SDave Jiang } 267*c0f28ce6SDave Jiang 268*c0f28ce6SDave Jiang /** 269*c0f28ce6SDave Jiang * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. 270*c0f28ce6SDave Jiang * @ioat_dma: dma device to be tested 271*c0f28ce6SDave Jiang */ 272*c0f28ce6SDave Jiang int ioat_dma_self_test(struct ioatdma_device *ioat_dma) 273*c0f28ce6SDave Jiang { 274*c0f28ce6SDave Jiang int i; 275*c0f28ce6SDave Jiang u8 *src; 276*c0f28ce6SDave Jiang u8 *dest; 277*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 278*c0f28ce6SDave Jiang struct device *dev = &ioat_dma->pdev->dev; 279*c0f28ce6SDave Jiang struct dma_chan *dma_chan; 280*c0f28ce6SDave Jiang struct dma_async_tx_descriptor *tx; 281*c0f28ce6SDave Jiang dma_addr_t dma_dest, dma_src; 282*c0f28ce6SDave Jiang dma_cookie_t cookie; 283*c0f28ce6SDave Jiang int err = 0; 284*c0f28ce6SDave Jiang struct completion cmp; 285*c0f28ce6SDave Jiang unsigned long tmo; 286*c0f28ce6SDave Jiang unsigned long flags; 287*c0f28ce6SDave Jiang 288*c0f28ce6SDave Jiang src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 289*c0f28ce6SDave Jiang if (!src) 290*c0f28ce6SDave Jiang return -ENOMEM; 291*c0f28ce6SDave Jiang dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL); 292*c0f28ce6SDave Jiang if (!dest) { 293*c0f28ce6SDave Jiang kfree(src); 294*c0f28ce6SDave Jiang return -ENOMEM; 295*c0f28ce6SDave Jiang } 296*c0f28ce6SDave Jiang 297*c0f28ce6SDave Jiang /* Fill in src buffer */ 298*c0f28ce6SDave Jiang for (i = 0; i < IOAT_TEST_SIZE; i++) 299*c0f28ce6SDave Jiang src[i] = (u8)i; 300*c0f28ce6SDave Jiang 301*c0f28ce6SDave Jiang /* Start copy, using first DMA channel */ 302*c0f28ce6SDave Jiang dma_chan = container_of(dma->channels.next, struct dma_chan, 303*c0f28ce6SDave Jiang device_node); 304*c0f28ce6SDave Jiang if (dma->device_alloc_chan_resources(dma_chan) < 1) { 305*c0f28ce6SDave Jiang dev_err(dev, "selftest cannot allocate chan resource\n"); 306*c0f28ce6SDave Jiang err = -ENODEV; 307*c0f28ce6SDave Jiang goto out; 308*c0f28ce6SDave Jiang } 309*c0f28ce6SDave Jiang 310*c0f28ce6SDave Jiang dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 311*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dma_src)) { 312*c0f28ce6SDave Jiang dev_err(dev, "mapping src buffer failed\n"); 313*c0f28ce6SDave Jiang goto free_resources; 314*c0f28ce6SDave Jiang } 315*c0f28ce6SDave Jiang dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 316*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dma_dest)) { 317*c0f28ce6SDave Jiang dev_err(dev, "mapping dest buffer failed\n"); 318*c0f28ce6SDave Jiang goto unmap_src; 319*c0f28ce6SDave Jiang } 320*c0f28ce6SDave Jiang flags = DMA_PREP_INTERRUPT; 321*c0f28ce6SDave Jiang tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest, 322*c0f28ce6SDave Jiang dma_src, IOAT_TEST_SIZE, 323*c0f28ce6SDave Jiang flags); 324*c0f28ce6SDave Jiang if (!tx) { 325*c0f28ce6SDave Jiang dev_err(dev, "Self-test prep failed, disabling\n"); 326*c0f28ce6SDave Jiang err = -ENODEV; 327*c0f28ce6SDave Jiang goto unmap_dma; 328*c0f28ce6SDave Jiang } 329*c0f28ce6SDave Jiang 330*c0f28ce6SDave Jiang async_tx_ack(tx); 331*c0f28ce6SDave Jiang init_completion(&cmp); 332*c0f28ce6SDave Jiang tx->callback = ioat_dma_test_callback; 333*c0f28ce6SDave Jiang tx->callback_param = &cmp; 334*c0f28ce6SDave Jiang cookie = tx->tx_submit(tx); 335*c0f28ce6SDave Jiang if (cookie < 0) { 336*c0f28ce6SDave Jiang dev_err(dev, "Self-test setup failed, disabling\n"); 337*c0f28ce6SDave Jiang err = -ENODEV; 338*c0f28ce6SDave Jiang goto unmap_dma; 339*c0f28ce6SDave Jiang } 340*c0f28ce6SDave Jiang dma->device_issue_pending(dma_chan); 341*c0f28ce6SDave Jiang 342*c0f28ce6SDave Jiang tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 343*c0f28ce6SDave Jiang 344*c0f28ce6SDave Jiang if (tmo == 0 || 345*c0f28ce6SDave Jiang dma->device_tx_status(dma_chan, cookie, NULL) 346*c0f28ce6SDave Jiang != DMA_COMPLETE) { 347*c0f28ce6SDave Jiang dev_err(dev, "Self-test copy timed out, disabling\n"); 348*c0f28ce6SDave Jiang err = -ENODEV; 349*c0f28ce6SDave Jiang goto unmap_dma; 350*c0f28ce6SDave Jiang } 351*c0f28ce6SDave Jiang if (memcmp(src, dest, IOAT_TEST_SIZE)) { 352*c0f28ce6SDave Jiang dev_err(dev, "Self-test copy failed compare, disabling\n"); 353*c0f28ce6SDave Jiang err = -ENODEV; 354*c0f28ce6SDave Jiang goto free_resources; 355*c0f28ce6SDave Jiang } 356*c0f28ce6SDave Jiang 357*c0f28ce6SDave Jiang unmap_dma: 358*c0f28ce6SDave Jiang dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); 359*c0f28ce6SDave Jiang unmap_src: 360*c0f28ce6SDave Jiang dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE); 361*c0f28ce6SDave Jiang free_resources: 362*c0f28ce6SDave Jiang dma->device_free_chan_resources(dma_chan); 363*c0f28ce6SDave Jiang out: 364*c0f28ce6SDave Jiang kfree(src); 365*c0f28ce6SDave Jiang kfree(dest); 366*c0f28ce6SDave Jiang return err; 367*c0f28ce6SDave Jiang } 368*c0f28ce6SDave Jiang 369*c0f28ce6SDave Jiang /** 370*c0f28ce6SDave Jiang * ioat_dma_setup_interrupts - setup interrupt handler 371*c0f28ce6SDave Jiang * @ioat_dma: ioat dma device 372*c0f28ce6SDave Jiang */ 373*c0f28ce6SDave Jiang int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma) 374*c0f28ce6SDave Jiang { 375*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan; 376*c0f28ce6SDave Jiang struct pci_dev *pdev = ioat_dma->pdev; 377*c0f28ce6SDave Jiang struct device *dev = &pdev->dev; 378*c0f28ce6SDave Jiang struct msix_entry *msix; 379*c0f28ce6SDave Jiang int i, j, msixcnt; 380*c0f28ce6SDave Jiang int err = -EINVAL; 381*c0f28ce6SDave Jiang u8 intrctrl = 0; 382*c0f28ce6SDave Jiang 383*c0f28ce6SDave Jiang if (!strcmp(ioat_interrupt_style, "msix")) 384*c0f28ce6SDave Jiang goto msix; 385*c0f28ce6SDave Jiang if (!strcmp(ioat_interrupt_style, "msi")) 386*c0f28ce6SDave Jiang goto msi; 387*c0f28ce6SDave Jiang if (!strcmp(ioat_interrupt_style, "intx")) 388*c0f28ce6SDave Jiang goto intx; 389*c0f28ce6SDave Jiang dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); 390*c0f28ce6SDave Jiang goto err_no_irq; 391*c0f28ce6SDave Jiang 392*c0f28ce6SDave Jiang msix: 393*c0f28ce6SDave Jiang /* The number of MSI-X vectors should equal the number of channels */ 394*c0f28ce6SDave Jiang msixcnt = ioat_dma->dma_dev.chancnt; 395*c0f28ce6SDave Jiang for (i = 0; i < msixcnt; i++) 396*c0f28ce6SDave Jiang ioat_dma->msix_entries[i].entry = i; 397*c0f28ce6SDave Jiang 398*c0f28ce6SDave Jiang err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt); 399*c0f28ce6SDave Jiang if (err) 400*c0f28ce6SDave Jiang goto msi; 401*c0f28ce6SDave Jiang 402*c0f28ce6SDave Jiang for (i = 0; i < msixcnt; i++) { 403*c0f28ce6SDave Jiang msix = &ioat_dma->msix_entries[i]; 404*c0f28ce6SDave Jiang ioat_chan = ioat_chan_by_index(ioat_dma, i); 405*c0f28ce6SDave Jiang err = devm_request_irq(dev, msix->vector, 406*c0f28ce6SDave Jiang ioat_dma_do_interrupt_msix, 0, 407*c0f28ce6SDave Jiang "ioat-msix", ioat_chan); 408*c0f28ce6SDave Jiang if (err) { 409*c0f28ce6SDave Jiang for (j = 0; j < i; j++) { 410*c0f28ce6SDave Jiang msix = &ioat_dma->msix_entries[j]; 411*c0f28ce6SDave Jiang ioat_chan = ioat_chan_by_index(ioat_dma, j); 412*c0f28ce6SDave Jiang devm_free_irq(dev, msix->vector, ioat_chan); 413*c0f28ce6SDave Jiang } 414*c0f28ce6SDave Jiang goto msi; 415*c0f28ce6SDave Jiang } 416*c0f28ce6SDave Jiang } 417*c0f28ce6SDave Jiang intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 418*c0f28ce6SDave Jiang ioat_dma->irq_mode = IOAT_MSIX; 419*c0f28ce6SDave Jiang goto done; 420*c0f28ce6SDave Jiang 421*c0f28ce6SDave Jiang msi: 422*c0f28ce6SDave Jiang err = pci_enable_msi(pdev); 423*c0f28ce6SDave Jiang if (err) 424*c0f28ce6SDave Jiang goto intx; 425*c0f28ce6SDave Jiang 426*c0f28ce6SDave Jiang err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, 427*c0f28ce6SDave Jiang "ioat-msi", ioat_dma); 428*c0f28ce6SDave Jiang if (err) { 429*c0f28ce6SDave Jiang pci_disable_msi(pdev); 430*c0f28ce6SDave Jiang goto intx; 431*c0f28ce6SDave Jiang } 432*c0f28ce6SDave Jiang ioat_dma->irq_mode = IOAT_MSI; 433*c0f28ce6SDave Jiang goto done; 434*c0f28ce6SDave Jiang 435*c0f28ce6SDave Jiang intx: 436*c0f28ce6SDave Jiang err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 437*c0f28ce6SDave Jiang IRQF_SHARED, "ioat-intx", ioat_dma); 438*c0f28ce6SDave Jiang if (err) 439*c0f28ce6SDave Jiang goto err_no_irq; 440*c0f28ce6SDave Jiang 441*c0f28ce6SDave Jiang ioat_dma->irq_mode = IOAT_INTX; 442*c0f28ce6SDave Jiang done: 443*c0f28ce6SDave Jiang if (ioat_dma->intr_quirk) 444*c0f28ce6SDave Jiang ioat_dma->intr_quirk(ioat_dma); 445*c0f28ce6SDave Jiang intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; 446*c0f28ce6SDave Jiang writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 447*c0f28ce6SDave Jiang return 0; 448*c0f28ce6SDave Jiang 449*c0f28ce6SDave Jiang err_no_irq: 450*c0f28ce6SDave Jiang /* Disable all interrupt generation */ 451*c0f28ce6SDave Jiang writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 452*c0f28ce6SDave Jiang ioat_dma->irq_mode = IOAT_NOIRQ; 453*c0f28ce6SDave Jiang dev_err(dev, "no usable interrupts\n"); 454*c0f28ce6SDave Jiang return err; 455*c0f28ce6SDave Jiang } 456*c0f28ce6SDave Jiang EXPORT_SYMBOL(ioat_dma_setup_interrupts); 457*c0f28ce6SDave Jiang 458*c0f28ce6SDave Jiang static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma) 459*c0f28ce6SDave Jiang { 460*c0f28ce6SDave Jiang /* Disable all interrupt generation */ 461*c0f28ce6SDave Jiang writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET); 462*c0f28ce6SDave Jiang } 463*c0f28ce6SDave Jiang 464*c0f28ce6SDave Jiang int ioat_probe(struct ioatdma_device *ioat_dma) 465*c0f28ce6SDave Jiang { 466*c0f28ce6SDave Jiang int err = -ENODEV; 467*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 468*c0f28ce6SDave Jiang struct pci_dev *pdev = ioat_dma->pdev; 469*c0f28ce6SDave Jiang struct device *dev = &pdev->dev; 470*c0f28ce6SDave Jiang 471*c0f28ce6SDave Jiang /* DMA coherent memory pool for DMA descriptor allocations */ 472*c0f28ce6SDave Jiang ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev, 473*c0f28ce6SDave Jiang sizeof(struct ioat_dma_descriptor), 474*c0f28ce6SDave Jiang 64, 0); 475*c0f28ce6SDave Jiang if (!ioat_dma->dma_pool) { 476*c0f28ce6SDave Jiang err = -ENOMEM; 477*c0f28ce6SDave Jiang goto err_dma_pool; 478*c0f28ce6SDave Jiang } 479*c0f28ce6SDave Jiang 480*c0f28ce6SDave Jiang ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev, 481*c0f28ce6SDave Jiang sizeof(u64), 482*c0f28ce6SDave Jiang SMP_CACHE_BYTES, 483*c0f28ce6SDave Jiang SMP_CACHE_BYTES); 484*c0f28ce6SDave Jiang 485*c0f28ce6SDave Jiang if (!ioat_dma->completion_pool) { 486*c0f28ce6SDave Jiang err = -ENOMEM; 487*c0f28ce6SDave Jiang goto err_completion_pool; 488*c0f28ce6SDave Jiang } 489*c0f28ce6SDave Jiang 490*c0f28ce6SDave Jiang ioat_dma->enumerate_channels(ioat_dma); 491*c0f28ce6SDave Jiang 492*c0f28ce6SDave Jiang dma_cap_set(DMA_MEMCPY, dma->cap_mask); 493*c0f28ce6SDave Jiang dma->dev = &pdev->dev; 494*c0f28ce6SDave Jiang 495*c0f28ce6SDave Jiang if (!dma->chancnt) { 496*c0f28ce6SDave Jiang dev_err(dev, "channel enumeration error\n"); 497*c0f28ce6SDave Jiang goto err_setup_interrupts; 498*c0f28ce6SDave Jiang } 499*c0f28ce6SDave Jiang 500*c0f28ce6SDave Jiang err = ioat_dma_setup_interrupts(ioat_dma); 501*c0f28ce6SDave Jiang if (err) 502*c0f28ce6SDave Jiang goto err_setup_interrupts; 503*c0f28ce6SDave Jiang 504*c0f28ce6SDave Jiang err = ioat_dma->self_test(ioat_dma); 505*c0f28ce6SDave Jiang if (err) 506*c0f28ce6SDave Jiang goto err_self_test; 507*c0f28ce6SDave Jiang 508*c0f28ce6SDave Jiang return 0; 509*c0f28ce6SDave Jiang 510*c0f28ce6SDave Jiang err_self_test: 511*c0f28ce6SDave Jiang ioat_disable_interrupts(ioat_dma); 512*c0f28ce6SDave Jiang err_setup_interrupts: 513*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->completion_pool); 514*c0f28ce6SDave Jiang err_completion_pool: 515*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->dma_pool); 516*c0f28ce6SDave Jiang err_dma_pool: 517*c0f28ce6SDave Jiang return err; 518*c0f28ce6SDave Jiang } 519*c0f28ce6SDave Jiang 520*c0f28ce6SDave Jiang int ioat_register(struct ioatdma_device *ioat_dma) 521*c0f28ce6SDave Jiang { 522*c0f28ce6SDave Jiang int err = dma_async_device_register(&ioat_dma->dma_dev); 523*c0f28ce6SDave Jiang 524*c0f28ce6SDave Jiang if (err) { 525*c0f28ce6SDave Jiang ioat_disable_interrupts(ioat_dma); 526*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->completion_pool); 527*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->dma_pool); 528*c0f28ce6SDave Jiang } 529*c0f28ce6SDave Jiang 530*c0f28ce6SDave Jiang return err; 531*c0f28ce6SDave Jiang } 532*c0f28ce6SDave Jiang 533*c0f28ce6SDave Jiang void ioat_dma_remove(struct ioatdma_device *ioat_dma) 534*c0f28ce6SDave Jiang { 535*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 536*c0f28ce6SDave Jiang 537*c0f28ce6SDave Jiang ioat_disable_interrupts(ioat_dma); 538*c0f28ce6SDave Jiang 539*c0f28ce6SDave Jiang ioat_kobject_del(ioat_dma); 540*c0f28ce6SDave Jiang 541*c0f28ce6SDave Jiang dma_async_device_unregister(dma); 542*c0f28ce6SDave Jiang 543*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->dma_pool); 544*c0f28ce6SDave Jiang pci_pool_destroy(ioat_dma->completion_pool); 545*c0f28ce6SDave Jiang 546*c0f28ce6SDave Jiang INIT_LIST_HEAD(&dma->channels); 547*c0f28ce6SDave Jiang } 548*c0f28ce6SDave Jiang 549*c0f28ce6SDave Jiang /** 550*c0f28ce6SDave Jiang * ioat_enumerate_channels - find and initialize the device's channels 551*c0f28ce6SDave Jiang * @ioat_dma: the ioat dma device to be enumerated 552*c0f28ce6SDave Jiang */ 553*c0f28ce6SDave Jiang int ioat_enumerate_channels(struct ioatdma_device *ioat_dma) 554*c0f28ce6SDave Jiang { 555*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan; 556*c0f28ce6SDave Jiang struct device *dev = &ioat_dma->pdev->dev; 557*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 558*c0f28ce6SDave Jiang u8 xfercap_log; 559*c0f28ce6SDave Jiang int i; 560*c0f28ce6SDave Jiang 561*c0f28ce6SDave Jiang INIT_LIST_HEAD(&dma->channels); 562*c0f28ce6SDave Jiang dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET); 563*c0f28ce6SDave Jiang dma->chancnt &= 0x1f; /* bits [4:0] valid */ 564*c0f28ce6SDave Jiang if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) { 565*c0f28ce6SDave Jiang dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", 566*c0f28ce6SDave Jiang dma->chancnt, ARRAY_SIZE(ioat_dma->idx)); 567*c0f28ce6SDave Jiang dma->chancnt = ARRAY_SIZE(ioat_dma->idx); 568*c0f28ce6SDave Jiang } 569*c0f28ce6SDave Jiang xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET); 570*c0f28ce6SDave Jiang xfercap_log &= 0x1f; /* bits [4:0] valid */ 571*c0f28ce6SDave Jiang if (xfercap_log == 0) 572*c0f28ce6SDave Jiang return 0; 573*c0f28ce6SDave Jiang dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); 574*c0f28ce6SDave Jiang 575*c0f28ce6SDave Jiang for (i = 0; i < dma->chancnt; i++) { 576*c0f28ce6SDave Jiang ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); 577*c0f28ce6SDave Jiang if (!ioat_chan) 578*c0f28ce6SDave Jiang break; 579*c0f28ce6SDave Jiang 580*c0f28ce6SDave Jiang ioat_init_channel(ioat_dma, ioat_chan, i); 581*c0f28ce6SDave Jiang ioat_chan->xfercap_log = xfercap_log; 582*c0f28ce6SDave Jiang spin_lock_init(&ioat_chan->prep_lock); 583*c0f28ce6SDave Jiang if (ioat_dma->reset_hw(ioat_chan)) { 584*c0f28ce6SDave Jiang i = 0; 585*c0f28ce6SDave Jiang break; 586*c0f28ce6SDave Jiang } 587*c0f28ce6SDave Jiang } 588*c0f28ce6SDave Jiang dma->chancnt = i; 589*c0f28ce6SDave Jiang return i; 590*c0f28ce6SDave Jiang } 591*c0f28ce6SDave Jiang 592*c0f28ce6SDave Jiang /** 593*c0f28ce6SDave Jiang * ioat_free_chan_resources - release all the descriptors 594*c0f28ce6SDave Jiang * @chan: the channel to be cleaned 595*c0f28ce6SDave Jiang */ 596*c0f28ce6SDave Jiang void ioat_free_chan_resources(struct dma_chan *c) 597*c0f28ce6SDave Jiang { 598*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan = to_ioat_chan(c); 599*c0f28ce6SDave Jiang struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; 600*c0f28ce6SDave Jiang struct ioat_ring_ent *desc; 601*c0f28ce6SDave Jiang const int total_descs = 1 << ioat_chan->alloc_order; 602*c0f28ce6SDave Jiang int descs; 603*c0f28ce6SDave Jiang int i; 604*c0f28ce6SDave Jiang 605*c0f28ce6SDave Jiang /* Before freeing channel resources first check 606*c0f28ce6SDave Jiang * if they have been previously allocated for this channel. 607*c0f28ce6SDave Jiang */ 608*c0f28ce6SDave Jiang if (!ioat_chan->ring) 609*c0f28ce6SDave Jiang return; 610*c0f28ce6SDave Jiang 611*c0f28ce6SDave Jiang ioat_stop(ioat_chan); 612*c0f28ce6SDave Jiang ioat_dma->reset_hw(ioat_chan); 613*c0f28ce6SDave Jiang 614*c0f28ce6SDave Jiang spin_lock_bh(&ioat_chan->cleanup_lock); 615*c0f28ce6SDave Jiang spin_lock_bh(&ioat_chan->prep_lock); 616*c0f28ce6SDave Jiang descs = ioat_ring_space(ioat_chan); 617*c0f28ce6SDave Jiang dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); 618*c0f28ce6SDave Jiang for (i = 0; i < descs; i++) { 619*c0f28ce6SDave Jiang desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i); 620*c0f28ce6SDave Jiang ioat_free_ring_ent(desc, c); 621*c0f28ce6SDave Jiang } 622*c0f28ce6SDave Jiang 623*c0f28ce6SDave Jiang if (descs < total_descs) 624*c0f28ce6SDave Jiang dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", 625*c0f28ce6SDave Jiang total_descs - descs); 626*c0f28ce6SDave Jiang 627*c0f28ce6SDave Jiang for (i = 0; i < total_descs - descs; i++) { 628*c0f28ce6SDave Jiang desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i); 629*c0f28ce6SDave Jiang dump_desc_dbg(ioat_chan, desc); 630*c0f28ce6SDave Jiang ioat_free_ring_ent(desc, c); 631*c0f28ce6SDave Jiang } 632*c0f28ce6SDave Jiang 633*c0f28ce6SDave Jiang kfree(ioat_chan->ring); 634*c0f28ce6SDave Jiang ioat_chan->ring = NULL; 635*c0f28ce6SDave Jiang ioat_chan->alloc_order = 0; 636*c0f28ce6SDave Jiang pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion, 637*c0f28ce6SDave Jiang ioat_chan->completion_dma); 638*c0f28ce6SDave Jiang spin_unlock_bh(&ioat_chan->prep_lock); 639*c0f28ce6SDave Jiang spin_unlock_bh(&ioat_chan->cleanup_lock); 640*c0f28ce6SDave Jiang 641*c0f28ce6SDave Jiang ioat_chan->last_completion = 0; 642*c0f28ce6SDave Jiang ioat_chan->completion_dma = 0; 643*c0f28ce6SDave Jiang ioat_chan->dmacount = 0; 644*c0f28ce6SDave Jiang } 645*c0f28ce6SDave Jiang 646*c0f28ce6SDave Jiang /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring 647*c0f28ce6SDave Jiang * @chan: channel to be initialized 648*c0f28ce6SDave Jiang */ 649*c0f28ce6SDave Jiang int ioat_alloc_chan_resources(struct dma_chan *c) 650*c0f28ce6SDave Jiang { 651*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan = to_ioat_chan(c); 652*c0f28ce6SDave Jiang struct ioat_ring_ent **ring; 653*c0f28ce6SDave Jiang u64 status; 654*c0f28ce6SDave Jiang int order; 655*c0f28ce6SDave Jiang int i = 0; 656*c0f28ce6SDave Jiang u32 chanerr; 657*c0f28ce6SDave Jiang 658*c0f28ce6SDave Jiang /* have we already been set up? */ 659*c0f28ce6SDave Jiang if (ioat_chan->ring) 660*c0f28ce6SDave Jiang return 1 << ioat_chan->alloc_order; 661*c0f28ce6SDave Jiang 662*c0f28ce6SDave Jiang /* Setup register to interrupt and write completion status on error */ 663*c0f28ce6SDave Jiang writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); 664*c0f28ce6SDave Jiang 665*c0f28ce6SDave Jiang /* allocate a completion writeback area */ 666*c0f28ce6SDave Jiang /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ 667*c0f28ce6SDave Jiang ioat_chan->completion = 668*c0f28ce6SDave Jiang pci_pool_alloc(ioat_chan->ioat_dma->completion_pool, 669*c0f28ce6SDave Jiang GFP_KERNEL, &ioat_chan->completion_dma); 670*c0f28ce6SDave Jiang if (!ioat_chan->completion) 671*c0f28ce6SDave Jiang return -ENOMEM; 672*c0f28ce6SDave Jiang 673*c0f28ce6SDave Jiang memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion)); 674*c0f28ce6SDave Jiang writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF, 675*c0f28ce6SDave Jiang ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 676*c0f28ce6SDave Jiang writel(((u64)ioat_chan->completion_dma) >> 32, 677*c0f28ce6SDave Jiang ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 678*c0f28ce6SDave Jiang 679*c0f28ce6SDave Jiang order = ioat_get_alloc_order(); 680*c0f28ce6SDave Jiang ring = ioat_alloc_ring(c, order, GFP_KERNEL); 681*c0f28ce6SDave Jiang if (!ring) 682*c0f28ce6SDave Jiang return -ENOMEM; 683*c0f28ce6SDave Jiang 684*c0f28ce6SDave Jiang spin_lock_bh(&ioat_chan->cleanup_lock); 685*c0f28ce6SDave Jiang spin_lock_bh(&ioat_chan->prep_lock); 686*c0f28ce6SDave Jiang ioat_chan->ring = ring; 687*c0f28ce6SDave Jiang ioat_chan->head = 0; 688*c0f28ce6SDave Jiang ioat_chan->issued = 0; 689*c0f28ce6SDave Jiang ioat_chan->tail = 0; 690*c0f28ce6SDave Jiang ioat_chan->alloc_order = order; 691*c0f28ce6SDave Jiang set_bit(IOAT_RUN, &ioat_chan->state); 692*c0f28ce6SDave Jiang spin_unlock_bh(&ioat_chan->prep_lock); 693*c0f28ce6SDave Jiang spin_unlock_bh(&ioat_chan->cleanup_lock); 694*c0f28ce6SDave Jiang 695*c0f28ce6SDave Jiang ioat_start_null_desc(ioat_chan); 696*c0f28ce6SDave Jiang 697*c0f28ce6SDave Jiang /* check that we got off the ground */ 698*c0f28ce6SDave Jiang do { 699*c0f28ce6SDave Jiang udelay(1); 700*c0f28ce6SDave Jiang status = ioat_chansts(ioat_chan); 701*c0f28ce6SDave Jiang } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status)); 702*c0f28ce6SDave Jiang 703*c0f28ce6SDave Jiang if (is_ioat_active(status) || is_ioat_idle(status)) 704*c0f28ce6SDave Jiang return 1 << ioat_chan->alloc_order; 705*c0f28ce6SDave Jiang 706*c0f28ce6SDave Jiang chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); 707*c0f28ce6SDave Jiang 708*c0f28ce6SDave Jiang dev_WARN(to_dev(ioat_chan), 709*c0f28ce6SDave Jiang "failed to start channel chanerr: %#x\n", chanerr); 710*c0f28ce6SDave Jiang ioat_free_chan_resources(c); 711*c0f28ce6SDave Jiang return -EFAULT; 712*c0f28ce6SDave Jiang } 713*c0f28ce6SDave Jiang 714*c0f28ce6SDave Jiang /* common channel initialization */ 715*c0f28ce6SDave Jiang void 716*c0f28ce6SDave Jiang ioat_init_channel(struct ioatdma_device *ioat_dma, 717*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan, int idx) 718*c0f28ce6SDave Jiang { 719*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 720*c0f28ce6SDave Jiang struct dma_chan *c = &ioat_chan->dma_chan; 721*c0f28ce6SDave Jiang unsigned long data = (unsigned long) c; 722*c0f28ce6SDave Jiang 723*c0f28ce6SDave Jiang ioat_chan->ioat_dma = ioat_dma; 724*c0f28ce6SDave Jiang ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1)); 725*c0f28ce6SDave Jiang spin_lock_init(&ioat_chan->cleanup_lock); 726*c0f28ce6SDave Jiang ioat_chan->dma_chan.device = dma; 727*c0f28ce6SDave Jiang dma_cookie_init(&ioat_chan->dma_chan); 728*c0f28ce6SDave Jiang list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels); 729*c0f28ce6SDave Jiang ioat_dma->idx[idx] = ioat_chan; 730*c0f28ce6SDave Jiang init_timer(&ioat_chan->timer); 731*c0f28ce6SDave Jiang ioat_chan->timer.function = ioat_dma->timer_fn; 732*c0f28ce6SDave Jiang ioat_chan->timer.data = data; 733*c0f28ce6SDave Jiang tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data); 734*c0f28ce6SDave Jiang } 735*c0f28ce6SDave Jiang 736*c0f28ce6SDave Jiang static void ioat3_dma_test_callback(void *dma_async_param) 737*c0f28ce6SDave Jiang { 738*c0f28ce6SDave Jiang struct completion *cmp = dma_async_param; 739*c0f28ce6SDave Jiang 740*c0f28ce6SDave Jiang complete(cmp); 741*c0f28ce6SDave Jiang } 742*c0f28ce6SDave Jiang 743*c0f28ce6SDave Jiang #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ 744*c0f28ce6SDave Jiang static int ioat_xor_val_self_test(struct ioatdma_device *ioat_dma) 745*c0f28ce6SDave Jiang { 746*c0f28ce6SDave Jiang int i, src_idx; 747*c0f28ce6SDave Jiang struct page *dest; 748*c0f28ce6SDave Jiang struct page *xor_srcs[IOAT_NUM_SRC_TEST]; 749*c0f28ce6SDave Jiang struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; 750*c0f28ce6SDave Jiang dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; 751*c0f28ce6SDave Jiang dma_addr_t dest_dma; 752*c0f28ce6SDave Jiang struct dma_async_tx_descriptor *tx; 753*c0f28ce6SDave Jiang struct dma_chan *dma_chan; 754*c0f28ce6SDave Jiang dma_cookie_t cookie; 755*c0f28ce6SDave Jiang u8 cmp_byte = 0; 756*c0f28ce6SDave Jiang u32 cmp_word; 757*c0f28ce6SDave Jiang u32 xor_val_result; 758*c0f28ce6SDave Jiang int err = 0; 759*c0f28ce6SDave Jiang struct completion cmp; 760*c0f28ce6SDave Jiang unsigned long tmo; 761*c0f28ce6SDave Jiang struct device *dev = &ioat_dma->pdev->dev; 762*c0f28ce6SDave Jiang struct dma_device *dma = &ioat_dma->dma_dev; 763*c0f28ce6SDave Jiang u8 op = 0; 764*c0f28ce6SDave Jiang 765*c0f28ce6SDave Jiang dev_dbg(dev, "%s\n", __func__); 766*c0f28ce6SDave Jiang 767*c0f28ce6SDave Jiang if (!dma_has_cap(DMA_XOR, dma->cap_mask)) 768*c0f28ce6SDave Jiang return 0; 769*c0f28ce6SDave Jiang 770*c0f28ce6SDave Jiang for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 771*c0f28ce6SDave Jiang xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 772*c0f28ce6SDave Jiang if (!xor_srcs[src_idx]) { 773*c0f28ce6SDave Jiang while (src_idx--) 774*c0f28ce6SDave Jiang __free_page(xor_srcs[src_idx]); 775*c0f28ce6SDave Jiang return -ENOMEM; 776*c0f28ce6SDave Jiang } 777*c0f28ce6SDave Jiang } 778*c0f28ce6SDave Jiang 779*c0f28ce6SDave Jiang dest = alloc_page(GFP_KERNEL); 780*c0f28ce6SDave Jiang if (!dest) { 781*c0f28ce6SDave Jiang while (src_idx--) 782*c0f28ce6SDave Jiang __free_page(xor_srcs[src_idx]); 783*c0f28ce6SDave Jiang return -ENOMEM; 784*c0f28ce6SDave Jiang } 785*c0f28ce6SDave Jiang 786*c0f28ce6SDave Jiang /* Fill in src buffers */ 787*c0f28ce6SDave Jiang for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { 788*c0f28ce6SDave Jiang u8 *ptr = page_address(xor_srcs[src_idx]); 789*c0f28ce6SDave Jiang 790*c0f28ce6SDave Jiang for (i = 0; i < PAGE_SIZE; i++) 791*c0f28ce6SDave Jiang ptr[i] = (1 << src_idx); 792*c0f28ce6SDave Jiang } 793*c0f28ce6SDave Jiang 794*c0f28ce6SDave Jiang for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) 795*c0f28ce6SDave Jiang cmp_byte ^= (u8) (1 << src_idx); 796*c0f28ce6SDave Jiang 797*c0f28ce6SDave Jiang cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 798*c0f28ce6SDave Jiang (cmp_byte << 8) | cmp_byte; 799*c0f28ce6SDave Jiang 800*c0f28ce6SDave Jiang memset(page_address(dest), 0, PAGE_SIZE); 801*c0f28ce6SDave Jiang 802*c0f28ce6SDave Jiang dma_chan = container_of(dma->channels.next, struct dma_chan, 803*c0f28ce6SDave Jiang device_node); 804*c0f28ce6SDave Jiang if (dma->device_alloc_chan_resources(dma_chan) < 1) { 805*c0f28ce6SDave Jiang err = -ENODEV; 806*c0f28ce6SDave Jiang goto out; 807*c0f28ce6SDave Jiang } 808*c0f28ce6SDave Jiang 809*c0f28ce6SDave Jiang /* test xor */ 810*c0f28ce6SDave Jiang op = IOAT_OP_XOR; 811*c0f28ce6SDave Jiang 812*c0f28ce6SDave Jiang dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 813*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dest_dma)) 814*c0f28ce6SDave Jiang goto dma_unmap; 815*c0f28ce6SDave Jiang 816*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 817*c0f28ce6SDave Jiang dma_srcs[i] = DMA_ERROR_CODE; 818*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST; i++) { 819*c0f28ce6SDave Jiang dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 820*c0f28ce6SDave Jiang DMA_TO_DEVICE); 821*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dma_srcs[i])) 822*c0f28ce6SDave Jiang goto dma_unmap; 823*c0f28ce6SDave Jiang } 824*c0f28ce6SDave Jiang tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 825*c0f28ce6SDave Jiang IOAT_NUM_SRC_TEST, PAGE_SIZE, 826*c0f28ce6SDave Jiang DMA_PREP_INTERRUPT); 827*c0f28ce6SDave Jiang 828*c0f28ce6SDave Jiang if (!tx) { 829*c0f28ce6SDave Jiang dev_err(dev, "Self-test xor prep failed\n"); 830*c0f28ce6SDave Jiang err = -ENODEV; 831*c0f28ce6SDave Jiang goto dma_unmap; 832*c0f28ce6SDave Jiang } 833*c0f28ce6SDave Jiang 834*c0f28ce6SDave Jiang async_tx_ack(tx); 835*c0f28ce6SDave Jiang init_completion(&cmp); 836*c0f28ce6SDave Jiang tx->callback = ioat3_dma_test_callback; 837*c0f28ce6SDave Jiang tx->callback_param = &cmp; 838*c0f28ce6SDave Jiang cookie = tx->tx_submit(tx); 839*c0f28ce6SDave Jiang if (cookie < 0) { 840*c0f28ce6SDave Jiang dev_err(dev, "Self-test xor setup failed\n"); 841*c0f28ce6SDave Jiang err = -ENODEV; 842*c0f28ce6SDave Jiang goto dma_unmap; 843*c0f28ce6SDave Jiang } 844*c0f28ce6SDave Jiang dma->device_issue_pending(dma_chan); 845*c0f28ce6SDave Jiang 846*c0f28ce6SDave Jiang tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 847*c0f28ce6SDave Jiang 848*c0f28ce6SDave Jiang if (tmo == 0 || 849*c0f28ce6SDave Jiang dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 850*c0f28ce6SDave Jiang dev_err(dev, "Self-test xor timed out\n"); 851*c0f28ce6SDave Jiang err = -ENODEV; 852*c0f28ce6SDave Jiang goto dma_unmap; 853*c0f28ce6SDave Jiang } 854*c0f28ce6SDave Jiang 855*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 856*c0f28ce6SDave Jiang dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 857*c0f28ce6SDave Jiang 858*c0f28ce6SDave Jiang dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 859*c0f28ce6SDave Jiang for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 860*c0f28ce6SDave Jiang u32 *ptr = page_address(dest); 861*c0f28ce6SDave Jiang 862*c0f28ce6SDave Jiang if (ptr[i] != cmp_word) { 863*c0f28ce6SDave Jiang dev_err(dev, "Self-test xor failed compare\n"); 864*c0f28ce6SDave Jiang err = -ENODEV; 865*c0f28ce6SDave Jiang goto free_resources; 866*c0f28ce6SDave Jiang } 867*c0f28ce6SDave Jiang } 868*c0f28ce6SDave Jiang dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 869*c0f28ce6SDave Jiang 870*c0f28ce6SDave Jiang dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 871*c0f28ce6SDave Jiang 872*c0f28ce6SDave Jiang /* skip validate if the capability is not present */ 873*c0f28ce6SDave Jiang if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 874*c0f28ce6SDave Jiang goto free_resources; 875*c0f28ce6SDave Jiang 876*c0f28ce6SDave Jiang op = IOAT_OP_XOR_VAL; 877*c0f28ce6SDave Jiang 878*c0f28ce6SDave Jiang /* validate the sources with the destintation page */ 879*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 880*c0f28ce6SDave Jiang xor_val_srcs[i] = xor_srcs[i]; 881*c0f28ce6SDave Jiang xor_val_srcs[i] = dest; 882*c0f28ce6SDave Jiang 883*c0f28ce6SDave Jiang xor_val_result = 1; 884*c0f28ce6SDave Jiang 885*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 886*c0f28ce6SDave Jiang dma_srcs[i] = DMA_ERROR_CODE; 887*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { 888*c0f28ce6SDave Jiang dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 889*c0f28ce6SDave Jiang DMA_TO_DEVICE); 890*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dma_srcs[i])) 891*c0f28ce6SDave Jiang goto dma_unmap; 892*c0f28ce6SDave Jiang } 893*c0f28ce6SDave Jiang tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 894*c0f28ce6SDave Jiang IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 895*c0f28ce6SDave Jiang &xor_val_result, DMA_PREP_INTERRUPT); 896*c0f28ce6SDave Jiang if (!tx) { 897*c0f28ce6SDave Jiang dev_err(dev, "Self-test zero prep failed\n"); 898*c0f28ce6SDave Jiang err = -ENODEV; 899*c0f28ce6SDave Jiang goto dma_unmap; 900*c0f28ce6SDave Jiang } 901*c0f28ce6SDave Jiang 902*c0f28ce6SDave Jiang async_tx_ack(tx); 903*c0f28ce6SDave Jiang init_completion(&cmp); 904*c0f28ce6SDave Jiang tx->callback = ioat3_dma_test_callback; 905*c0f28ce6SDave Jiang tx->callback_param = &cmp; 906*c0f28ce6SDave Jiang cookie = tx->tx_submit(tx); 907*c0f28ce6SDave Jiang if (cookie < 0) { 908*c0f28ce6SDave Jiang dev_err(dev, "Self-test zero setup failed\n"); 909*c0f28ce6SDave Jiang err = -ENODEV; 910*c0f28ce6SDave Jiang goto dma_unmap; 911*c0f28ce6SDave Jiang } 912*c0f28ce6SDave Jiang dma->device_issue_pending(dma_chan); 913*c0f28ce6SDave Jiang 914*c0f28ce6SDave Jiang tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 915*c0f28ce6SDave Jiang 916*c0f28ce6SDave Jiang if (tmo == 0 || 917*c0f28ce6SDave Jiang dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 918*c0f28ce6SDave Jiang dev_err(dev, "Self-test validate timed out\n"); 919*c0f28ce6SDave Jiang err = -ENODEV; 920*c0f28ce6SDave Jiang goto dma_unmap; 921*c0f28ce6SDave Jiang } 922*c0f28ce6SDave Jiang 923*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 924*c0f28ce6SDave Jiang dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 925*c0f28ce6SDave Jiang 926*c0f28ce6SDave Jiang if (xor_val_result != 0) { 927*c0f28ce6SDave Jiang dev_err(dev, "Self-test validate failed compare\n"); 928*c0f28ce6SDave Jiang err = -ENODEV; 929*c0f28ce6SDave Jiang goto free_resources; 930*c0f28ce6SDave Jiang } 931*c0f28ce6SDave Jiang 932*c0f28ce6SDave Jiang memset(page_address(dest), 0, PAGE_SIZE); 933*c0f28ce6SDave Jiang 934*c0f28ce6SDave Jiang /* test for non-zero parity sum */ 935*c0f28ce6SDave Jiang op = IOAT_OP_XOR_VAL; 936*c0f28ce6SDave Jiang 937*c0f28ce6SDave Jiang xor_val_result = 0; 938*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 939*c0f28ce6SDave Jiang dma_srcs[i] = DMA_ERROR_CODE; 940*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) { 941*c0f28ce6SDave Jiang dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 942*c0f28ce6SDave Jiang DMA_TO_DEVICE); 943*c0f28ce6SDave Jiang if (dma_mapping_error(dev, dma_srcs[i])) 944*c0f28ce6SDave Jiang goto dma_unmap; 945*c0f28ce6SDave Jiang } 946*c0f28ce6SDave Jiang tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 947*c0f28ce6SDave Jiang IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 948*c0f28ce6SDave Jiang &xor_val_result, DMA_PREP_INTERRUPT); 949*c0f28ce6SDave Jiang if (!tx) { 950*c0f28ce6SDave Jiang dev_err(dev, "Self-test 2nd zero prep failed\n"); 951*c0f28ce6SDave Jiang err = -ENODEV; 952*c0f28ce6SDave Jiang goto dma_unmap; 953*c0f28ce6SDave Jiang } 954*c0f28ce6SDave Jiang 955*c0f28ce6SDave Jiang async_tx_ack(tx); 956*c0f28ce6SDave Jiang init_completion(&cmp); 957*c0f28ce6SDave Jiang tx->callback = ioat3_dma_test_callback; 958*c0f28ce6SDave Jiang tx->callback_param = &cmp; 959*c0f28ce6SDave Jiang cookie = tx->tx_submit(tx); 960*c0f28ce6SDave Jiang if (cookie < 0) { 961*c0f28ce6SDave Jiang dev_err(dev, "Self-test 2nd zero setup failed\n"); 962*c0f28ce6SDave Jiang err = -ENODEV; 963*c0f28ce6SDave Jiang goto dma_unmap; 964*c0f28ce6SDave Jiang } 965*c0f28ce6SDave Jiang dma->device_issue_pending(dma_chan); 966*c0f28ce6SDave Jiang 967*c0f28ce6SDave Jiang tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 968*c0f28ce6SDave Jiang 969*c0f28ce6SDave Jiang if (tmo == 0 || 970*c0f28ce6SDave Jiang dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { 971*c0f28ce6SDave Jiang dev_err(dev, "Self-test 2nd validate timed out\n"); 972*c0f28ce6SDave Jiang err = -ENODEV; 973*c0f28ce6SDave Jiang goto dma_unmap; 974*c0f28ce6SDave Jiang } 975*c0f28ce6SDave Jiang 976*c0f28ce6SDave Jiang if (xor_val_result != SUM_CHECK_P_RESULT) { 977*c0f28ce6SDave Jiang dev_err(dev, "Self-test validate failed compare\n"); 978*c0f28ce6SDave Jiang err = -ENODEV; 979*c0f28ce6SDave Jiang goto dma_unmap; 980*c0f28ce6SDave Jiang } 981*c0f28ce6SDave Jiang 982*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 983*c0f28ce6SDave Jiang dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 984*c0f28ce6SDave Jiang 985*c0f28ce6SDave Jiang goto free_resources; 986*c0f28ce6SDave Jiang dma_unmap: 987*c0f28ce6SDave Jiang if (op == IOAT_OP_XOR) { 988*c0f28ce6SDave Jiang if (dest_dma != DMA_ERROR_CODE) 989*c0f28ce6SDave Jiang dma_unmap_page(dev, dest_dma, PAGE_SIZE, 990*c0f28ce6SDave Jiang DMA_FROM_DEVICE); 991*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 992*c0f28ce6SDave Jiang if (dma_srcs[i] != DMA_ERROR_CODE) 993*c0f28ce6SDave Jiang dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 994*c0f28ce6SDave Jiang DMA_TO_DEVICE); 995*c0f28ce6SDave Jiang } else if (op == IOAT_OP_XOR_VAL) { 996*c0f28ce6SDave Jiang for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 997*c0f28ce6SDave Jiang if (dma_srcs[i] != DMA_ERROR_CODE) 998*c0f28ce6SDave Jiang dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 999*c0f28ce6SDave Jiang DMA_TO_DEVICE); 1000*c0f28ce6SDave Jiang } 1001*c0f28ce6SDave Jiang free_resources: 1002*c0f28ce6SDave Jiang dma->device_free_chan_resources(dma_chan); 1003*c0f28ce6SDave Jiang out: 1004*c0f28ce6SDave Jiang src_idx = IOAT_NUM_SRC_TEST; 1005*c0f28ce6SDave Jiang while (src_idx--) 1006*c0f28ce6SDave Jiang __free_page(xor_srcs[src_idx]); 1007*c0f28ce6SDave Jiang __free_page(dest); 1008*c0f28ce6SDave Jiang return err; 1009*c0f28ce6SDave Jiang } 1010*c0f28ce6SDave Jiang 1011*c0f28ce6SDave Jiang static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma) 1012*c0f28ce6SDave Jiang { 1013*c0f28ce6SDave Jiang int rc = ioat_dma_self_test(ioat_dma); 1014*c0f28ce6SDave Jiang 1015*c0f28ce6SDave Jiang if (rc) 1016*c0f28ce6SDave Jiang return rc; 1017*c0f28ce6SDave Jiang 1018*c0f28ce6SDave Jiang rc = ioat_xor_val_self_test(ioat_dma); 1019*c0f28ce6SDave Jiang if (rc) 1020*c0f28ce6SDave Jiang return rc; 1021*c0f28ce6SDave Jiang 1022*c0f28ce6SDave Jiang return 0; 1023*c0f28ce6SDave Jiang } 1024*c0f28ce6SDave Jiang 1025*c0f28ce6SDave Jiang static void ioat3_intr_quirk(struct ioatdma_device *ioat_dma) 1026*c0f28ce6SDave Jiang { 1027*c0f28ce6SDave Jiang struct dma_device *dma; 1028*c0f28ce6SDave Jiang struct dma_chan *c; 1029*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan; 1030*c0f28ce6SDave Jiang u32 errmask; 1031*c0f28ce6SDave Jiang 1032*c0f28ce6SDave Jiang dma = &ioat_dma->dma_dev; 1033*c0f28ce6SDave Jiang 1034*c0f28ce6SDave Jiang /* 1035*c0f28ce6SDave Jiang * if we have descriptor write back error status, we mask the 1036*c0f28ce6SDave Jiang * error interrupts 1037*c0f28ce6SDave Jiang */ 1038*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_DWBES) { 1039*c0f28ce6SDave Jiang list_for_each_entry(c, &dma->channels, device_node) { 1040*c0f28ce6SDave Jiang ioat_chan = to_ioat_chan(c); 1041*c0f28ce6SDave Jiang errmask = readl(ioat_chan->reg_base + 1042*c0f28ce6SDave Jiang IOAT_CHANERR_MASK_OFFSET); 1043*c0f28ce6SDave Jiang errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | 1044*c0f28ce6SDave Jiang IOAT_CHANERR_XOR_Q_ERR; 1045*c0f28ce6SDave Jiang writel(errmask, ioat_chan->reg_base + 1046*c0f28ce6SDave Jiang IOAT_CHANERR_MASK_OFFSET); 1047*c0f28ce6SDave Jiang } 1048*c0f28ce6SDave Jiang } 1049*c0f28ce6SDave Jiang } 1050*c0f28ce6SDave Jiang 1051*c0f28ce6SDave Jiang int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) 1052*c0f28ce6SDave Jiang { 1053*c0f28ce6SDave Jiang struct pci_dev *pdev = ioat_dma->pdev; 1054*c0f28ce6SDave Jiang int dca_en = system_has_dca_enabled(pdev); 1055*c0f28ce6SDave Jiang struct dma_device *dma; 1056*c0f28ce6SDave Jiang struct dma_chan *c; 1057*c0f28ce6SDave Jiang struct ioatdma_chan *ioat_chan; 1058*c0f28ce6SDave Jiang bool is_raid_device = false; 1059*c0f28ce6SDave Jiang int err; 1060*c0f28ce6SDave Jiang 1061*c0f28ce6SDave Jiang ioat_dma->enumerate_channels = ioat_enumerate_channels; 1062*c0f28ce6SDave Jiang ioat_dma->reset_hw = ioat_reset_hw; 1063*c0f28ce6SDave Jiang ioat_dma->self_test = ioat3_dma_self_test; 1064*c0f28ce6SDave Jiang ioat_dma->intr_quirk = ioat3_intr_quirk; 1065*c0f28ce6SDave Jiang dma = &ioat_dma->dma_dev; 1066*c0f28ce6SDave Jiang dma->device_prep_dma_memcpy = ioat_dma_prep_memcpy_lock; 1067*c0f28ce6SDave Jiang dma->device_issue_pending = ioat_issue_pending; 1068*c0f28ce6SDave Jiang dma->device_alloc_chan_resources = ioat_alloc_chan_resources; 1069*c0f28ce6SDave Jiang dma->device_free_chan_resources = ioat_free_chan_resources; 1070*c0f28ce6SDave Jiang 1071*c0f28ce6SDave Jiang dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1072*c0f28ce6SDave Jiang dma->device_prep_dma_interrupt = ioat_prep_interrupt_lock; 1073*c0f28ce6SDave Jiang 1074*c0f28ce6SDave Jiang ioat_dma->cap = readl(ioat_dma->reg_base + IOAT_DMA_CAP_OFFSET); 1075*c0f28ce6SDave Jiang 1076*c0f28ce6SDave Jiang if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev)) 1077*c0f28ce6SDave Jiang ioat_dma->cap &= 1078*c0f28ce6SDave Jiang ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1079*c0f28ce6SDave Jiang 1080*c0f28ce6SDave Jiang /* dca is incompatible with raid operations */ 1081*c0f28ce6SDave Jiang if (dca_en && (ioat_dma->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1082*c0f28ce6SDave Jiang ioat_dma->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1083*c0f28ce6SDave Jiang 1084*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_XOR) { 1085*c0f28ce6SDave Jiang is_raid_device = true; 1086*c0f28ce6SDave Jiang dma->max_xor = 8; 1087*c0f28ce6SDave Jiang 1088*c0f28ce6SDave Jiang dma_cap_set(DMA_XOR, dma->cap_mask); 1089*c0f28ce6SDave Jiang dma->device_prep_dma_xor = ioat_prep_xor; 1090*c0f28ce6SDave Jiang 1091*c0f28ce6SDave Jiang dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1092*c0f28ce6SDave Jiang dma->device_prep_dma_xor_val = ioat_prep_xor_val; 1093*c0f28ce6SDave Jiang } 1094*c0f28ce6SDave Jiang 1095*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_PQ) { 1096*c0f28ce6SDave Jiang is_raid_device = true; 1097*c0f28ce6SDave Jiang 1098*c0f28ce6SDave Jiang dma->device_prep_dma_pq = ioat_prep_pq; 1099*c0f28ce6SDave Jiang dma->device_prep_dma_pq_val = ioat_prep_pq_val; 1100*c0f28ce6SDave Jiang dma_cap_set(DMA_PQ, dma->cap_mask); 1101*c0f28ce6SDave Jiang dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1102*c0f28ce6SDave Jiang 1103*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_RAID16SS) 1104*c0f28ce6SDave Jiang dma_set_maxpq(dma, 16, 0); 1105*c0f28ce6SDave Jiang else 1106*c0f28ce6SDave Jiang dma_set_maxpq(dma, 8, 0); 1107*c0f28ce6SDave Jiang 1108*c0f28ce6SDave Jiang if (!(ioat_dma->cap & IOAT_CAP_XOR)) { 1109*c0f28ce6SDave Jiang dma->device_prep_dma_xor = ioat_prep_pqxor; 1110*c0f28ce6SDave Jiang dma->device_prep_dma_xor_val = ioat_prep_pqxor_val; 1111*c0f28ce6SDave Jiang dma_cap_set(DMA_XOR, dma->cap_mask); 1112*c0f28ce6SDave Jiang dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1113*c0f28ce6SDave Jiang 1114*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_RAID16SS) 1115*c0f28ce6SDave Jiang dma->max_xor = 16; 1116*c0f28ce6SDave Jiang else 1117*c0f28ce6SDave Jiang dma->max_xor = 8; 1118*c0f28ce6SDave Jiang } 1119*c0f28ce6SDave Jiang } 1120*c0f28ce6SDave Jiang 1121*c0f28ce6SDave Jiang dma->device_tx_status = ioat_tx_status; 1122*c0f28ce6SDave Jiang ioat_dma->cleanup_fn = ioat_cleanup_event; 1123*c0f28ce6SDave Jiang ioat_dma->timer_fn = ioat_timer_event; 1124*c0f28ce6SDave Jiang 1125*c0f28ce6SDave Jiang /* starting with CB3.3 super extended descriptors are supported */ 1126*c0f28ce6SDave Jiang if (ioat_dma->cap & IOAT_CAP_RAID16SS) { 1127*c0f28ce6SDave Jiang char pool_name[14]; 1128*c0f28ce6SDave Jiang int i; 1129*c0f28ce6SDave Jiang 1130*c0f28ce6SDave Jiang for (i = 0; i < MAX_SED_POOLS; i++) { 1131*c0f28ce6SDave Jiang snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1132*c0f28ce6SDave Jiang 1133*c0f28ce6SDave Jiang /* allocate SED DMA pool */ 1134*c0f28ce6SDave Jiang ioat_dma->sed_hw_pool[i] = dmam_pool_create(pool_name, 1135*c0f28ce6SDave Jiang &pdev->dev, 1136*c0f28ce6SDave Jiang SED_SIZE * (i + 1), 64, 0); 1137*c0f28ce6SDave Jiang if (!ioat_dma->sed_hw_pool[i]) 1138*c0f28ce6SDave Jiang return -ENOMEM; 1139*c0f28ce6SDave Jiang 1140*c0f28ce6SDave Jiang } 1141*c0f28ce6SDave Jiang } 1142*c0f28ce6SDave Jiang 1143*c0f28ce6SDave Jiang if (!(ioat_dma->cap & (IOAT_CAP_XOR | IOAT_CAP_PQ))) 1144*c0f28ce6SDave Jiang dma_cap_set(DMA_PRIVATE, dma->cap_mask); 1145*c0f28ce6SDave Jiang 1146*c0f28ce6SDave Jiang err = ioat_probe(ioat_dma); 1147*c0f28ce6SDave Jiang if (err) 1148*c0f28ce6SDave Jiang return err; 1149*c0f28ce6SDave Jiang 1150*c0f28ce6SDave Jiang list_for_each_entry(c, &dma->channels, device_node) { 1151*c0f28ce6SDave Jiang ioat_chan = to_ioat_chan(c); 1152*c0f28ce6SDave Jiang writel(IOAT_DMA_DCA_ANY_CPU, 1153*c0f28ce6SDave Jiang ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); 1154*c0f28ce6SDave Jiang } 1155*c0f28ce6SDave Jiang 1156*c0f28ce6SDave Jiang err = ioat_register(ioat_dma); 1157*c0f28ce6SDave Jiang if (err) 1158*c0f28ce6SDave Jiang return err; 1159*c0f28ce6SDave Jiang 1160*c0f28ce6SDave Jiang ioat_kobject_add(ioat_dma, &ioat_ktype); 1161*c0f28ce6SDave Jiang 1162*c0f28ce6SDave Jiang if (dca) 1163*c0f28ce6SDave Jiang ioat_dma->dca = ioat3_dca_init(pdev, ioat_dma->reg_base); 1164*c0f28ce6SDave Jiang 1165*c0f28ce6SDave Jiang return 0; 1166*c0f28ce6SDave Jiang } 1167*c0f28ce6SDave Jiang 1168*c0f28ce6SDave Jiang #define DRV_NAME "ioatdma" 1169*c0f28ce6SDave Jiang 1170*c0f28ce6SDave Jiang static struct pci_driver ioat_pci_driver = { 1171*c0f28ce6SDave Jiang .name = DRV_NAME, 1172*c0f28ce6SDave Jiang .id_table = ioat_pci_tbl, 1173*c0f28ce6SDave Jiang .probe = ioat_pci_probe, 1174*c0f28ce6SDave Jiang .remove = ioat_remove, 1175*c0f28ce6SDave Jiang }; 1176*c0f28ce6SDave Jiang 1177*c0f28ce6SDave Jiang static struct ioatdma_device * 1178*c0f28ce6SDave Jiang alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) 1179*c0f28ce6SDave Jiang { 1180*c0f28ce6SDave Jiang struct device *dev = &pdev->dev; 1181*c0f28ce6SDave Jiang struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); 1182*c0f28ce6SDave Jiang 1183*c0f28ce6SDave Jiang if (!d) 1184*c0f28ce6SDave Jiang return NULL; 1185*c0f28ce6SDave Jiang d->pdev = pdev; 1186*c0f28ce6SDave Jiang d->reg_base = iobase; 1187*c0f28ce6SDave Jiang return d; 1188*c0f28ce6SDave Jiang } 1189*c0f28ce6SDave Jiang 1190*c0f28ce6SDave Jiang static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1191*c0f28ce6SDave Jiang { 1192*c0f28ce6SDave Jiang void __iomem * const *iomap; 1193*c0f28ce6SDave Jiang struct device *dev = &pdev->dev; 1194*c0f28ce6SDave Jiang struct ioatdma_device *device; 1195*c0f28ce6SDave Jiang int err; 1196*c0f28ce6SDave Jiang 1197*c0f28ce6SDave Jiang err = pcim_enable_device(pdev); 1198*c0f28ce6SDave Jiang if (err) 1199*c0f28ce6SDave Jiang return err; 1200*c0f28ce6SDave Jiang 1201*c0f28ce6SDave Jiang err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); 1202*c0f28ce6SDave Jiang if (err) 1203*c0f28ce6SDave Jiang return err; 1204*c0f28ce6SDave Jiang iomap = pcim_iomap_table(pdev); 1205*c0f28ce6SDave Jiang if (!iomap) 1206*c0f28ce6SDave Jiang return -ENOMEM; 1207*c0f28ce6SDave Jiang 1208*c0f28ce6SDave Jiang err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1209*c0f28ce6SDave Jiang if (err) 1210*c0f28ce6SDave Jiang err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1211*c0f28ce6SDave Jiang if (err) 1212*c0f28ce6SDave Jiang return err; 1213*c0f28ce6SDave Jiang 1214*c0f28ce6SDave Jiang err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1215*c0f28ce6SDave Jiang if (err) 1216*c0f28ce6SDave Jiang err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1217*c0f28ce6SDave Jiang if (err) 1218*c0f28ce6SDave Jiang return err; 1219*c0f28ce6SDave Jiang 1220*c0f28ce6SDave Jiang device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); 1221*c0f28ce6SDave Jiang if (!device) 1222*c0f28ce6SDave Jiang return -ENOMEM; 1223*c0f28ce6SDave Jiang pci_set_master(pdev); 1224*c0f28ce6SDave Jiang pci_set_drvdata(pdev, device); 1225*c0f28ce6SDave Jiang 1226*c0f28ce6SDave Jiang device->version = readb(device->reg_base + IOAT_VER_OFFSET); 1227*c0f28ce6SDave Jiang if (device->version >= IOAT_VER_3_0) 1228*c0f28ce6SDave Jiang err = ioat3_dma_probe(device, ioat_dca_enabled); 1229*c0f28ce6SDave Jiang else 1230*c0f28ce6SDave Jiang return -ENODEV; 1231*c0f28ce6SDave Jiang 1232*c0f28ce6SDave Jiang if (err) { 1233*c0f28ce6SDave Jiang dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); 1234*c0f28ce6SDave Jiang return -ENODEV; 1235*c0f28ce6SDave Jiang } 1236*c0f28ce6SDave Jiang 1237*c0f28ce6SDave Jiang return 0; 1238*c0f28ce6SDave Jiang } 1239*c0f28ce6SDave Jiang 1240*c0f28ce6SDave Jiang static void ioat_remove(struct pci_dev *pdev) 1241*c0f28ce6SDave Jiang { 1242*c0f28ce6SDave Jiang struct ioatdma_device *device = pci_get_drvdata(pdev); 1243*c0f28ce6SDave Jiang 1244*c0f28ce6SDave Jiang if (!device) 1245*c0f28ce6SDave Jiang return; 1246*c0f28ce6SDave Jiang 1247*c0f28ce6SDave Jiang dev_err(&pdev->dev, "Removing dma and dca services\n"); 1248*c0f28ce6SDave Jiang if (device->dca) { 1249*c0f28ce6SDave Jiang unregister_dca_provider(device->dca, &pdev->dev); 1250*c0f28ce6SDave Jiang free_dca_provider(device->dca); 1251*c0f28ce6SDave Jiang device->dca = NULL; 1252*c0f28ce6SDave Jiang } 1253*c0f28ce6SDave Jiang ioat_dma_remove(device); 1254*c0f28ce6SDave Jiang } 1255*c0f28ce6SDave Jiang 1256*c0f28ce6SDave Jiang static int __init ioat_init_module(void) 1257*c0f28ce6SDave Jiang { 1258*c0f28ce6SDave Jiang int err = -ENOMEM; 1259*c0f28ce6SDave Jiang 1260*c0f28ce6SDave Jiang pr_info("%s: Intel(R) QuickData Technology Driver %s\n", 1261*c0f28ce6SDave Jiang DRV_NAME, IOAT_DMA_VERSION); 1262*c0f28ce6SDave Jiang 1263*c0f28ce6SDave Jiang ioat_cache = kmem_cache_create("ioat", sizeof(struct ioat_ring_ent), 1264*c0f28ce6SDave Jiang 0, SLAB_HWCACHE_ALIGN, NULL); 1265*c0f28ce6SDave Jiang if (!ioat_cache) 1266*c0f28ce6SDave Jiang return -ENOMEM; 1267*c0f28ce6SDave Jiang 1268*c0f28ce6SDave Jiang ioat_sed_cache = KMEM_CACHE(ioat_sed_ent, 0); 1269*c0f28ce6SDave Jiang if (!ioat_sed_cache) 1270*c0f28ce6SDave Jiang goto err_ioat_cache; 1271*c0f28ce6SDave Jiang 1272*c0f28ce6SDave Jiang err = pci_register_driver(&ioat_pci_driver); 1273*c0f28ce6SDave Jiang if (err) 1274*c0f28ce6SDave Jiang goto err_ioat3_cache; 1275*c0f28ce6SDave Jiang 1276*c0f28ce6SDave Jiang return 0; 1277*c0f28ce6SDave Jiang 1278*c0f28ce6SDave Jiang err_ioat3_cache: 1279*c0f28ce6SDave Jiang kmem_cache_destroy(ioat_sed_cache); 1280*c0f28ce6SDave Jiang 1281*c0f28ce6SDave Jiang err_ioat_cache: 1282*c0f28ce6SDave Jiang kmem_cache_destroy(ioat_cache); 1283*c0f28ce6SDave Jiang 1284*c0f28ce6SDave Jiang return err; 1285*c0f28ce6SDave Jiang } 1286*c0f28ce6SDave Jiang module_init(ioat_init_module); 1287*c0f28ce6SDave Jiang 1288*c0f28ce6SDave Jiang static void __exit ioat_exit_module(void) 1289*c0f28ce6SDave Jiang { 1290*c0f28ce6SDave Jiang pci_unregister_driver(&ioat_pci_driver); 1291*c0f28ce6SDave Jiang kmem_cache_destroy(ioat_cache); 1292*c0f28ce6SDave Jiang } 1293*c0f28ce6SDave Jiang module_exit(ioat_exit_module); 1294