xref: /linux/drivers/dma/idxd/init.c (revision bfe1d56091c1a404b3d4ce7e9809d745fc4453bb)
1*bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0
2*bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3*bfe1d560SDave Jiang #include <linux/init.h>
4*bfe1d560SDave Jiang #include <linux/kernel.h>
5*bfe1d560SDave Jiang #include <linux/module.h>
6*bfe1d560SDave Jiang #include <linux/slab.h>
7*bfe1d560SDave Jiang #include <linux/pci.h>
8*bfe1d560SDave Jiang #include <linux/interrupt.h>
9*bfe1d560SDave Jiang #include <linux/delay.h>
10*bfe1d560SDave Jiang #include <linux/dma-mapping.h>
11*bfe1d560SDave Jiang #include <linux/workqueue.h>
12*bfe1d560SDave Jiang #include <linux/aer.h>
13*bfe1d560SDave Jiang #include <linux/fs.h>
14*bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h>
15*bfe1d560SDave Jiang #include <linux/device.h>
16*bfe1d560SDave Jiang #include <linux/idr.h>
17*bfe1d560SDave Jiang #include <uapi/linux/idxd.h>
18*bfe1d560SDave Jiang #include "registers.h"
19*bfe1d560SDave Jiang #include "idxd.h"
20*bfe1d560SDave Jiang 
21*bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION);
22*bfe1d560SDave Jiang MODULE_LICENSE("GPL v2");
23*bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation");
24*bfe1d560SDave Jiang 
25*bfe1d560SDave Jiang #define DRV_NAME "idxd"
26*bfe1d560SDave Jiang 
27*bfe1d560SDave Jiang static struct idr idxd_idrs[IDXD_TYPE_MAX];
28*bfe1d560SDave Jiang static struct mutex idxd_idr_lock;
29*bfe1d560SDave Jiang 
30*bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = {
31*bfe1d560SDave Jiang 	/* DSA ver 1.0 platforms */
32*bfe1d560SDave Jiang 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
33*bfe1d560SDave Jiang 	{ 0, }
34*bfe1d560SDave Jiang };
35*bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
36*bfe1d560SDave Jiang 
37*bfe1d560SDave Jiang static char *idxd_name[] = {
38*bfe1d560SDave Jiang 	"dsa",
39*bfe1d560SDave Jiang };
40*bfe1d560SDave Jiang 
41*bfe1d560SDave Jiang const char *idxd_get_dev_name(struct idxd_device *idxd)
42*bfe1d560SDave Jiang {
43*bfe1d560SDave Jiang 	return idxd_name[idxd->type];
44*bfe1d560SDave Jiang }
45*bfe1d560SDave Jiang 
46*bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd)
47*bfe1d560SDave Jiang {
48*bfe1d560SDave Jiang 	struct pci_dev *pdev = idxd->pdev;
49*bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
50*bfe1d560SDave Jiang 	struct msix_entry *msix;
51*bfe1d560SDave Jiang 	struct idxd_irq_entry *irq_entry;
52*bfe1d560SDave Jiang 	int i, msixcnt;
53*bfe1d560SDave Jiang 	int rc = 0;
54*bfe1d560SDave Jiang 
55*bfe1d560SDave Jiang 	msixcnt = pci_msix_vec_count(pdev);
56*bfe1d560SDave Jiang 	if (msixcnt < 0) {
57*bfe1d560SDave Jiang 		dev_err(dev, "Not MSI-X interrupt capable.\n");
58*bfe1d560SDave Jiang 		goto err_no_irq;
59*bfe1d560SDave Jiang 	}
60*bfe1d560SDave Jiang 
61*bfe1d560SDave Jiang 	idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) *
62*bfe1d560SDave Jiang 			msixcnt, GFP_KERNEL);
63*bfe1d560SDave Jiang 	if (!idxd->msix_entries) {
64*bfe1d560SDave Jiang 		rc = -ENOMEM;
65*bfe1d560SDave Jiang 		goto err_no_irq;
66*bfe1d560SDave Jiang 	}
67*bfe1d560SDave Jiang 
68*bfe1d560SDave Jiang 	for (i = 0; i < msixcnt; i++)
69*bfe1d560SDave Jiang 		idxd->msix_entries[i].entry = i;
70*bfe1d560SDave Jiang 
71*bfe1d560SDave Jiang 	rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt);
72*bfe1d560SDave Jiang 	if (rc) {
73*bfe1d560SDave Jiang 		dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt);
74*bfe1d560SDave Jiang 		goto err_no_irq;
75*bfe1d560SDave Jiang 	}
76*bfe1d560SDave Jiang 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
77*bfe1d560SDave Jiang 
78*bfe1d560SDave Jiang 	/*
79*bfe1d560SDave Jiang 	 * We implement 1 completion list per MSI-X entry except for
80*bfe1d560SDave Jiang 	 * entry 0, which is for errors and others.
81*bfe1d560SDave Jiang 	 */
82*bfe1d560SDave Jiang 	idxd->irq_entries = devm_kcalloc(dev, msixcnt,
83*bfe1d560SDave Jiang 					 sizeof(struct idxd_irq_entry),
84*bfe1d560SDave Jiang 					 GFP_KERNEL);
85*bfe1d560SDave Jiang 	if (!idxd->irq_entries) {
86*bfe1d560SDave Jiang 		rc = -ENOMEM;
87*bfe1d560SDave Jiang 		goto err_no_irq;
88*bfe1d560SDave Jiang 	}
89*bfe1d560SDave Jiang 
90*bfe1d560SDave Jiang 	for (i = 0; i < msixcnt; i++) {
91*bfe1d560SDave Jiang 		idxd->irq_entries[i].id = i;
92*bfe1d560SDave Jiang 		idxd->irq_entries[i].idxd = idxd;
93*bfe1d560SDave Jiang 	}
94*bfe1d560SDave Jiang 
95*bfe1d560SDave Jiang 	msix = &idxd->msix_entries[0];
96*bfe1d560SDave Jiang 	irq_entry = &idxd->irq_entries[0];
97*bfe1d560SDave Jiang 	rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler,
98*bfe1d560SDave Jiang 				       idxd_misc_thread, 0, "idxd-misc",
99*bfe1d560SDave Jiang 				       irq_entry);
100*bfe1d560SDave Jiang 	if (rc < 0) {
101*bfe1d560SDave Jiang 		dev_err(dev, "Failed to allocate misc interrupt.\n");
102*bfe1d560SDave Jiang 		goto err_no_irq;
103*bfe1d560SDave Jiang 	}
104*bfe1d560SDave Jiang 
105*bfe1d560SDave Jiang 	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n",
106*bfe1d560SDave Jiang 		msix->vector);
107*bfe1d560SDave Jiang 
108*bfe1d560SDave Jiang 	/* first MSI-X entry is not for wq interrupts */
109*bfe1d560SDave Jiang 	idxd->num_wq_irqs = msixcnt - 1;
110*bfe1d560SDave Jiang 
111*bfe1d560SDave Jiang 	for (i = 1; i < msixcnt; i++) {
112*bfe1d560SDave Jiang 		msix = &idxd->msix_entries[i];
113*bfe1d560SDave Jiang 		irq_entry = &idxd->irq_entries[i];
114*bfe1d560SDave Jiang 
115*bfe1d560SDave Jiang 		init_llist_head(&idxd->irq_entries[i].pending_llist);
116*bfe1d560SDave Jiang 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
117*bfe1d560SDave Jiang 		rc = devm_request_threaded_irq(dev, msix->vector,
118*bfe1d560SDave Jiang 					       idxd_irq_handler,
119*bfe1d560SDave Jiang 					       idxd_wq_thread, 0,
120*bfe1d560SDave Jiang 					       "idxd-portal", irq_entry);
121*bfe1d560SDave Jiang 		if (rc < 0) {
122*bfe1d560SDave Jiang 			dev_err(dev, "Failed to allocate irq %d.\n",
123*bfe1d560SDave Jiang 				msix->vector);
124*bfe1d560SDave Jiang 			goto err_no_irq;
125*bfe1d560SDave Jiang 		}
126*bfe1d560SDave Jiang 		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
127*bfe1d560SDave Jiang 			i, msix->vector);
128*bfe1d560SDave Jiang 	}
129*bfe1d560SDave Jiang 
130*bfe1d560SDave Jiang 	idxd_unmask_error_interrupts(idxd);
131*bfe1d560SDave Jiang 
132*bfe1d560SDave Jiang 	return 0;
133*bfe1d560SDave Jiang 
134*bfe1d560SDave Jiang  err_no_irq:
135*bfe1d560SDave Jiang 	/* Disable error interrupt generation */
136*bfe1d560SDave Jiang 	idxd_mask_error_interrupts(idxd);
137*bfe1d560SDave Jiang 	pci_disable_msix(pdev);
138*bfe1d560SDave Jiang 	dev_err(dev, "No usable interrupts\n");
139*bfe1d560SDave Jiang 	return rc;
140*bfe1d560SDave Jiang }
141*bfe1d560SDave Jiang 
142*bfe1d560SDave Jiang static void idxd_wqs_free_lock(struct idxd_device *idxd)
143*bfe1d560SDave Jiang {
144*bfe1d560SDave Jiang 	int i;
145*bfe1d560SDave Jiang 
146*bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_wqs; i++) {
147*bfe1d560SDave Jiang 		struct idxd_wq *wq = &idxd->wqs[i];
148*bfe1d560SDave Jiang 
149*bfe1d560SDave Jiang 		percpu_free_rwsem(&wq->submit_lock);
150*bfe1d560SDave Jiang 	}
151*bfe1d560SDave Jiang }
152*bfe1d560SDave Jiang 
153*bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd)
154*bfe1d560SDave Jiang {
155*bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
156*bfe1d560SDave Jiang 	int i;
157*bfe1d560SDave Jiang 
158*bfe1d560SDave Jiang 	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
159*bfe1d560SDave Jiang 				    sizeof(struct idxd_group), GFP_KERNEL);
160*bfe1d560SDave Jiang 	if (!idxd->groups)
161*bfe1d560SDave Jiang 		return -ENOMEM;
162*bfe1d560SDave Jiang 
163*bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_groups; i++) {
164*bfe1d560SDave Jiang 		idxd->groups[i].idxd = idxd;
165*bfe1d560SDave Jiang 		idxd->groups[i].id = i;
166*bfe1d560SDave Jiang 		idxd->groups[i].tc_a = -1;
167*bfe1d560SDave Jiang 		idxd->groups[i].tc_b = -1;
168*bfe1d560SDave Jiang 	}
169*bfe1d560SDave Jiang 
170*bfe1d560SDave Jiang 	idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq),
171*bfe1d560SDave Jiang 				 GFP_KERNEL);
172*bfe1d560SDave Jiang 	if (!idxd->wqs)
173*bfe1d560SDave Jiang 		return -ENOMEM;
174*bfe1d560SDave Jiang 
175*bfe1d560SDave Jiang 	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
176*bfe1d560SDave Jiang 				     sizeof(struct idxd_engine), GFP_KERNEL);
177*bfe1d560SDave Jiang 	if (!idxd->engines)
178*bfe1d560SDave Jiang 		return -ENOMEM;
179*bfe1d560SDave Jiang 
180*bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_wqs; i++) {
181*bfe1d560SDave Jiang 		struct idxd_wq *wq = &idxd->wqs[i];
182*bfe1d560SDave Jiang 		int rc;
183*bfe1d560SDave Jiang 
184*bfe1d560SDave Jiang 		wq->id = i;
185*bfe1d560SDave Jiang 		wq->idxd = idxd;
186*bfe1d560SDave Jiang 		mutex_init(&wq->wq_lock);
187*bfe1d560SDave Jiang 		atomic_set(&wq->dq_count, 0);
188*bfe1d560SDave Jiang 		init_waitqueue_head(&wq->submit_waitq);
189*bfe1d560SDave Jiang 		rc = percpu_init_rwsem(&wq->submit_lock);
190*bfe1d560SDave Jiang 		if (rc < 0) {
191*bfe1d560SDave Jiang 			idxd_wqs_free_lock(idxd);
192*bfe1d560SDave Jiang 			return rc;
193*bfe1d560SDave Jiang 		}
194*bfe1d560SDave Jiang 	}
195*bfe1d560SDave Jiang 
196*bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_engines; i++) {
197*bfe1d560SDave Jiang 		idxd->engines[i].idxd = idxd;
198*bfe1d560SDave Jiang 		idxd->engines[i].id = i;
199*bfe1d560SDave Jiang 	}
200*bfe1d560SDave Jiang 
201*bfe1d560SDave Jiang 	return 0;
202*bfe1d560SDave Jiang }
203*bfe1d560SDave Jiang 
204*bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd)
205*bfe1d560SDave Jiang {
206*bfe1d560SDave Jiang 	union offsets_reg offsets;
207*bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
208*bfe1d560SDave Jiang 
209*bfe1d560SDave Jiang 	offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
210*bfe1d560SDave Jiang 	offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
211*bfe1d560SDave Jiang 			+ sizeof(u64));
212*bfe1d560SDave Jiang 	idxd->grpcfg_offset = offsets.grpcfg * 0x100;
213*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
214*bfe1d560SDave Jiang 	idxd->wqcfg_offset = offsets.wqcfg * 0x100;
215*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
216*bfe1d560SDave Jiang 		idxd->wqcfg_offset);
217*bfe1d560SDave Jiang 	idxd->msix_perm_offset = offsets.msix_perm * 0x100;
218*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
219*bfe1d560SDave Jiang 		idxd->msix_perm_offset);
220*bfe1d560SDave Jiang 	idxd->perfmon_offset = offsets.perfmon * 0x100;
221*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
222*bfe1d560SDave Jiang }
223*bfe1d560SDave Jiang 
224*bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd)
225*bfe1d560SDave Jiang {
226*bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
227*bfe1d560SDave Jiang 	int i;
228*bfe1d560SDave Jiang 
229*bfe1d560SDave Jiang 	/* reading generic capabilities */
230*bfe1d560SDave Jiang 	idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
231*bfe1d560SDave Jiang 	dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
232*bfe1d560SDave Jiang 	idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
233*bfe1d560SDave Jiang 	dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
234*bfe1d560SDave Jiang 	idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
235*bfe1d560SDave Jiang 	dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
236*bfe1d560SDave Jiang 	if (idxd->hw.gen_cap.config_en)
237*bfe1d560SDave Jiang 		set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
238*bfe1d560SDave Jiang 
239*bfe1d560SDave Jiang 	/* reading group capabilities */
240*bfe1d560SDave Jiang 	idxd->hw.group_cap.bits =
241*bfe1d560SDave Jiang 		ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
242*bfe1d560SDave Jiang 	dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
243*bfe1d560SDave Jiang 	idxd->max_groups = idxd->hw.group_cap.num_groups;
244*bfe1d560SDave Jiang 	dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
245*bfe1d560SDave Jiang 	idxd->max_tokens = idxd->hw.group_cap.total_tokens;
246*bfe1d560SDave Jiang 	dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
247*bfe1d560SDave Jiang 
248*bfe1d560SDave Jiang 	/* read engine capabilities */
249*bfe1d560SDave Jiang 	idxd->hw.engine_cap.bits =
250*bfe1d560SDave Jiang 		ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
251*bfe1d560SDave Jiang 	dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
252*bfe1d560SDave Jiang 	idxd->max_engines = idxd->hw.engine_cap.num_engines;
253*bfe1d560SDave Jiang 	dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
254*bfe1d560SDave Jiang 
255*bfe1d560SDave Jiang 	/* read workqueue capabilities */
256*bfe1d560SDave Jiang 	idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
257*bfe1d560SDave Jiang 	dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
258*bfe1d560SDave Jiang 	idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
259*bfe1d560SDave Jiang 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
260*bfe1d560SDave Jiang 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
261*bfe1d560SDave Jiang 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
262*bfe1d560SDave Jiang 
263*bfe1d560SDave Jiang 	/* reading operation capabilities */
264*bfe1d560SDave Jiang 	for (i = 0; i < 4; i++) {
265*bfe1d560SDave Jiang 		idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
266*bfe1d560SDave Jiang 				IDXD_OPCAP_OFFSET + i * sizeof(u64));
267*bfe1d560SDave Jiang 		dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
268*bfe1d560SDave Jiang 	}
269*bfe1d560SDave Jiang }
270*bfe1d560SDave Jiang 
271*bfe1d560SDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
272*bfe1d560SDave Jiang 				      void __iomem * const *iomap)
273*bfe1d560SDave Jiang {
274*bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
275*bfe1d560SDave Jiang 	struct idxd_device *idxd;
276*bfe1d560SDave Jiang 
277*bfe1d560SDave Jiang 	idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL);
278*bfe1d560SDave Jiang 	if (!idxd)
279*bfe1d560SDave Jiang 		return NULL;
280*bfe1d560SDave Jiang 
281*bfe1d560SDave Jiang 	idxd->pdev = pdev;
282*bfe1d560SDave Jiang 	idxd->reg_base = iomap[IDXD_MMIO_BAR];
283*bfe1d560SDave Jiang 	spin_lock_init(&idxd->dev_lock);
284*bfe1d560SDave Jiang 
285*bfe1d560SDave Jiang 	return idxd;
286*bfe1d560SDave Jiang }
287*bfe1d560SDave Jiang 
288*bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd)
289*bfe1d560SDave Jiang {
290*bfe1d560SDave Jiang 	struct pci_dev *pdev = idxd->pdev;
291*bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
292*bfe1d560SDave Jiang 	int rc;
293*bfe1d560SDave Jiang 
294*bfe1d560SDave Jiang 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
295*bfe1d560SDave Jiang 	rc = idxd_device_reset(idxd);
296*bfe1d560SDave Jiang 	if (rc < 0)
297*bfe1d560SDave Jiang 		return rc;
298*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD reset complete\n");
299*bfe1d560SDave Jiang 
300*bfe1d560SDave Jiang 	idxd_read_caps(idxd);
301*bfe1d560SDave Jiang 	idxd_read_table_offsets(idxd);
302*bfe1d560SDave Jiang 
303*bfe1d560SDave Jiang 	rc = idxd_setup_internals(idxd);
304*bfe1d560SDave Jiang 	if (rc)
305*bfe1d560SDave Jiang 		goto err_setup;
306*bfe1d560SDave Jiang 
307*bfe1d560SDave Jiang 	rc = idxd_setup_interrupts(idxd);
308*bfe1d560SDave Jiang 	if (rc)
309*bfe1d560SDave Jiang 		goto err_setup;
310*bfe1d560SDave Jiang 
311*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
312*bfe1d560SDave Jiang 
313*bfe1d560SDave Jiang 	mutex_lock(&idxd_idr_lock);
314*bfe1d560SDave Jiang 	idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL);
315*bfe1d560SDave Jiang 	mutex_unlock(&idxd_idr_lock);
316*bfe1d560SDave Jiang 	if (idxd->id < 0) {
317*bfe1d560SDave Jiang 		rc = -ENOMEM;
318*bfe1d560SDave Jiang 		goto err_idr_fail;
319*bfe1d560SDave Jiang 	}
320*bfe1d560SDave Jiang 
321*bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
322*bfe1d560SDave Jiang 	return 0;
323*bfe1d560SDave Jiang 
324*bfe1d560SDave Jiang  err_idr_fail:
325*bfe1d560SDave Jiang 	idxd_mask_error_interrupts(idxd);
326*bfe1d560SDave Jiang 	idxd_mask_msix_vectors(idxd);
327*bfe1d560SDave Jiang  err_setup:
328*bfe1d560SDave Jiang 	return rc;
329*bfe1d560SDave Jiang }
330*bfe1d560SDave Jiang 
331*bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
332*bfe1d560SDave Jiang {
333*bfe1d560SDave Jiang 	void __iomem * const *iomap;
334*bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
335*bfe1d560SDave Jiang 	struct idxd_device *idxd;
336*bfe1d560SDave Jiang 	int rc;
337*bfe1d560SDave Jiang 	unsigned int mask;
338*bfe1d560SDave Jiang 
339*bfe1d560SDave Jiang 	rc = pcim_enable_device(pdev);
340*bfe1d560SDave Jiang 	if (rc)
341*bfe1d560SDave Jiang 		return rc;
342*bfe1d560SDave Jiang 
343*bfe1d560SDave Jiang 	dev_dbg(dev, "Mapping BARs\n");
344*bfe1d560SDave Jiang 	mask = (1 << IDXD_MMIO_BAR);
345*bfe1d560SDave Jiang 	rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
346*bfe1d560SDave Jiang 	if (rc)
347*bfe1d560SDave Jiang 		return rc;
348*bfe1d560SDave Jiang 
349*bfe1d560SDave Jiang 	iomap = pcim_iomap_table(pdev);
350*bfe1d560SDave Jiang 	if (!iomap)
351*bfe1d560SDave Jiang 		return -ENOMEM;
352*bfe1d560SDave Jiang 
353*bfe1d560SDave Jiang 	dev_dbg(dev, "Set DMA masks\n");
354*bfe1d560SDave Jiang 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
355*bfe1d560SDave Jiang 	if (rc)
356*bfe1d560SDave Jiang 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
357*bfe1d560SDave Jiang 	if (rc)
358*bfe1d560SDave Jiang 		return rc;
359*bfe1d560SDave Jiang 
360*bfe1d560SDave Jiang 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
361*bfe1d560SDave Jiang 	if (rc)
362*bfe1d560SDave Jiang 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
363*bfe1d560SDave Jiang 	if (rc)
364*bfe1d560SDave Jiang 		return rc;
365*bfe1d560SDave Jiang 
366*bfe1d560SDave Jiang 	dev_dbg(dev, "Alloc IDXD context\n");
367*bfe1d560SDave Jiang 	idxd = idxd_alloc(pdev, iomap);
368*bfe1d560SDave Jiang 	if (!idxd)
369*bfe1d560SDave Jiang 		return -ENOMEM;
370*bfe1d560SDave Jiang 
371*bfe1d560SDave Jiang 	idxd_set_type(idxd);
372*bfe1d560SDave Jiang 
373*bfe1d560SDave Jiang 	dev_dbg(dev, "Set PCI master\n");
374*bfe1d560SDave Jiang 	pci_set_master(pdev);
375*bfe1d560SDave Jiang 	pci_set_drvdata(pdev, idxd);
376*bfe1d560SDave Jiang 
377*bfe1d560SDave Jiang 	idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
378*bfe1d560SDave Jiang 	rc = idxd_probe(idxd);
379*bfe1d560SDave Jiang 	if (rc) {
380*bfe1d560SDave Jiang 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
381*bfe1d560SDave Jiang 		return -ENODEV;
382*bfe1d560SDave Jiang 	}
383*bfe1d560SDave Jiang 
384*bfe1d560SDave Jiang 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
385*bfe1d560SDave Jiang 		 idxd->hw.version);
386*bfe1d560SDave Jiang 
387*bfe1d560SDave Jiang 	return 0;
388*bfe1d560SDave Jiang }
389*bfe1d560SDave Jiang 
390*bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev)
391*bfe1d560SDave Jiang {
392*bfe1d560SDave Jiang 	struct idxd_device *idxd = pci_get_drvdata(pdev);
393*bfe1d560SDave Jiang 	int rc, i;
394*bfe1d560SDave Jiang 	struct idxd_irq_entry *irq_entry;
395*bfe1d560SDave Jiang 	int msixcnt = pci_msix_vec_count(pdev);
396*bfe1d560SDave Jiang 	unsigned long flags;
397*bfe1d560SDave Jiang 
398*bfe1d560SDave Jiang 	spin_lock_irqsave(&idxd->dev_lock, flags);
399*bfe1d560SDave Jiang 	rc = idxd_device_disable(idxd);
400*bfe1d560SDave Jiang 	spin_unlock_irqrestore(&idxd->dev_lock, flags);
401*bfe1d560SDave Jiang 	if (rc)
402*bfe1d560SDave Jiang 		dev_err(&pdev->dev, "Disabling device failed\n");
403*bfe1d560SDave Jiang 
404*bfe1d560SDave Jiang 	dev_dbg(&pdev->dev, "%s called\n", __func__);
405*bfe1d560SDave Jiang 	idxd_mask_msix_vectors(idxd);
406*bfe1d560SDave Jiang 	idxd_mask_error_interrupts(idxd);
407*bfe1d560SDave Jiang 
408*bfe1d560SDave Jiang 	for (i = 0; i < msixcnt; i++) {
409*bfe1d560SDave Jiang 		irq_entry = &idxd->irq_entries[i];
410*bfe1d560SDave Jiang 		synchronize_irq(idxd->msix_entries[i].vector);
411*bfe1d560SDave Jiang 		if (i == 0)
412*bfe1d560SDave Jiang 			continue;
413*bfe1d560SDave Jiang 	}
414*bfe1d560SDave Jiang }
415*bfe1d560SDave Jiang 
416*bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev)
417*bfe1d560SDave Jiang {
418*bfe1d560SDave Jiang 	struct idxd_device *idxd = pci_get_drvdata(pdev);
419*bfe1d560SDave Jiang 
420*bfe1d560SDave Jiang 	dev_dbg(&pdev->dev, "%s called\n", __func__);
421*bfe1d560SDave Jiang 	idxd_shutdown(pdev);
422*bfe1d560SDave Jiang 	idxd_wqs_free_lock(idxd);
423*bfe1d560SDave Jiang 	mutex_lock(&idxd_idr_lock);
424*bfe1d560SDave Jiang 	idr_remove(&idxd_idrs[idxd->type], idxd->id);
425*bfe1d560SDave Jiang 	mutex_unlock(&idxd_idr_lock);
426*bfe1d560SDave Jiang }
427*bfe1d560SDave Jiang 
428*bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = {
429*bfe1d560SDave Jiang 	.name		= DRV_NAME,
430*bfe1d560SDave Jiang 	.id_table	= idxd_pci_tbl,
431*bfe1d560SDave Jiang 	.probe		= idxd_pci_probe,
432*bfe1d560SDave Jiang 	.remove		= idxd_remove,
433*bfe1d560SDave Jiang 	.shutdown	= idxd_shutdown,
434*bfe1d560SDave Jiang };
435*bfe1d560SDave Jiang 
436*bfe1d560SDave Jiang static int __init idxd_init_module(void)
437*bfe1d560SDave Jiang {
438*bfe1d560SDave Jiang 	int err, i;
439*bfe1d560SDave Jiang 
440*bfe1d560SDave Jiang 	/*
441*bfe1d560SDave Jiang 	 * If the CPU does not support write512, there's no point in
442*bfe1d560SDave Jiang 	 * enumerating the device. We can not utilize it.
443*bfe1d560SDave Jiang 	 */
444*bfe1d560SDave Jiang 	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
445*bfe1d560SDave Jiang 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
446*bfe1d560SDave Jiang 		return -ENODEV;
447*bfe1d560SDave Jiang 	}
448*bfe1d560SDave Jiang 
449*bfe1d560SDave Jiang 	pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
450*bfe1d560SDave Jiang 		DRV_NAME, IDXD_DRIVER_VERSION);
451*bfe1d560SDave Jiang 
452*bfe1d560SDave Jiang 	mutex_init(&idxd_idr_lock);
453*bfe1d560SDave Jiang 	for (i = 0; i < IDXD_TYPE_MAX; i++)
454*bfe1d560SDave Jiang 		idr_init(&idxd_idrs[i]);
455*bfe1d560SDave Jiang 
456*bfe1d560SDave Jiang 	err = pci_register_driver(&idxd_pci_driver);
457*bfe1d560SDave Jiang 	if (err)
458*bfe1d560SDave Jiang 		return err;
459*bfe1d560SDave Jiang 
460*bfe1d560SDave Jiang 	return 0;
461*bfe1d560SDave Jiang }
462*bfe1d560SDave Jiang module_init(idxd_init_module);
463*bfe1d560SDave Jiang 
464*bfe1d560SDave Jiang static void __exit idxd_exit_module(void)
465*bfe1d560SDave Jiang {
466*bfe1d560SDave Jiang 	pci_unregister_driver(&idxd_pci_driver);
467*bfe1d560SDave Jiang }
468*bfe1d560SDave Jiang module_exit(idxd_exit_module);
469