xref: /linux/drivers/dma/idxd/init.c (revision 7c5dd23e57c14cf7177b8a5e0fd08916e0c60005)
1bfe1d560SDave Jiang // SPDX-License-Identifier: GPL-2.0
2bfe1d560SDave Jiang /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3bfe1d560SDave Jiang #include <linux/init.h>
4bfe1d560SDave Jiang #include <linux/kernel.h>
5bfe1d560SDave Jiang #include <linux/module.h>
6bfe1d560SDave Jiang #include <linux/slab.h>
7bfe1d560SDave Jiang #include <linux/pci.h>
8bfe1d560SDave Jiang #include <linux/interrupt.h>
9bfe1d560SDave Jiang #include <linux/delay.h>
10bfe1d560SDave Jiang #include <linux/dma-mapping.h>
11bfe1d560SDave Jiang #include <linux/workqueue.h>
12bfe1d560SDave Jiang #include <linux/aer.h>
13bfe1d560SDave Jiang #include <linux/fs.h>
14bfe1d560SDave Jiang #include <linux/io-64-nonatomic-lo-hi.h>
15bfe1d560SDave Jiang #include <linux/device.h>
16bfe1d560SDave Jiang #include <linux/idr.h>
178e50d392SDave Jiang #include <linux/intel-svm.h>
188e50d392SDave Jiang #include <linux/iommu.h>
19bfe1d560SDave Jiang #include <uapi/linux/idxd.h>
208f47d1a5SDave Jiang #include <linux/dmaengine.h>
218f47d1a5SDave Jiang #include "../dmaengine.h"
22bfe1d560SDave Jiang #include "registers.h"
23bfe1d560SDave Jiang #include "idxd.h"
24bfe1d560SDave Jiang 
25bfe1d560SDave Jiang MODULE_VERSION(IDXD_DRIVER_VERSION);
26bfe1d560SDave Jiang MODULE_LICENSE("GPL v2");
27bfe1d560SDave Jiang MODULE_AUTHOR("Intel Corporation");
28bfe1d560SDave Jiang 
2903d939c7SDave Jiang static bool sva = true;
3003d939c7SDave Jiang module_param(sva, bool, 0644);
3103d939c7SDave Jiang MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
3203d939c7SDave Jiang 
33bfe1d560SDave Jiang #define DRV_NAME "idxd"
34bfe1d560SDave Jiang 
358e50d392SDave Jiang bool support_enqcmd;
368e50d392SDave Jiang 
37f7f77398SDave Jiang static struct ida idxd_idas[IDXD_TYPE_MAX];
38bfe1d560SDave Jiang 
39bfe1d560SDave Jiang static struct pci_device_id idxd_pci_tbl[] = {
40bfe1d560SDave Jiang 	/* DSA ver 1.0 platforms */
41bfe1d560SDave Jiang 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
42f25b4638SDave Jiang 
43f25b4638SDave Jiang 	/* IAX ver 1.0 platforms */
44f25b4638SDave Jiang 	{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
45bfe1d560SDave Jiang 	{ 0, }
46bfe1d560SDave Jiang };
47bfe1d560SDave Jiang MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
48bfe1d560SDave Jiang 
49bfe1d560SDave Jiang static char *idxd_name[] = {
50bfe1d560SDave Jiang 	"dsa",
51f25b4638SDave Jiang 	"iax"
52bfe1d560SDave Jiang };
53bfe1d560SDave Jiang 
5447c16ac2SDave Jiang struct ida *idxd_ida(struct idxd_device *idxd)
5547c16ac2SDave Jiang {
5647c16ac2SDave Jiang 	return &idxd_idas[idxd->type];
5747c16ac2SDave Jiang }
5847c16ac2SDave Jiang 
59bfe1d560SDave Jiang const char *idxd_get_dev_name(struct idxd_device *idxd)
60bfe1d560SDave Jiang {
61bfe1d560SDave Jiang 	return idxd_name[idxd->type];
62bfe1d560SDave Jiang }
63bfe1d560SDave Jiang 
64bfe1d560SDave Jiang static int idxd_setup_interrupts(struct idxd_device *idxd)
65bfe1d560SDave Jiang {
66bfe1d560SDave Jiang 	struct pci_dev *pdev = idxd->pdev;
67bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
68bfe1d560SDave Jiang 	struct idxd_irq_entry *irq_entry;
69bfe1d560SDave Jiang 	int i, msixcnt;
70bfe1d560SDave Jiang 	int rc = 0;
71bfe1d560SDave Jiang 
72bfe1d560SDave Jiang 	msixcnt = pci_msix_vec_count(pdev);
73bfe1d560SDave Jiang 	if (msixcnt < 0) {
74bfe1d560SDave Jiang 		dev_err(dev, "Not MSI-X interrupt capable.\n");
755fc8e85fSDave Jiang 		return -ENOSPC;
76bfe1d560SDave Jiang 	}
77bfe1d560SDave Jiang 
785fc8e85fSDave Jiang 	rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
795fc8e85fSDave Jiang 	if (rc != msixcnt) {
805fc8e85fSDave Jiang 		dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
815fc8e85fSDave Jiang 		return -ENOSPC;
82bfe1d560SDave Jiang 	}
83bfe1d560SDave Jiang 	dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
84bfe1d560SDave Jiang 
85bfe1d560SDave Jiang 	/*
86bfe1d560SDave Jiang 	 * We implement 1 completion list per MSI-X entry except for
87bfe1d560SDave Jiang 	 * entry 0, which is for errors and others.
88bfe1d560SDave Jiang 	 */
8947c16ac2SDave Jiang 	idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
9047c16ac2SDave Jiang 					 GFP_KERNEL, dev_to_node(dev));
91bfe1d560SDave Jiang 	if (!idxd->irq_entries) {
92bfe1d560SDave Jiang 		rc = -ENOMEM;
935fc8e85fSDave Jiang 		goto err_irq_entries;
94bfe1d560SDave Jiang 	}
95bfe1d560SDave Jiang 
96bfe1d560SDave Jiang 	for (i = 0; i < msixcnt; i++) {
97bfe1d560SDave Jiang 		idxd->irq_entries[i].id = i;
98bfe1d560SDave Jiang 		idxd->irq_entries[i].idxd = idxd;
995fc8e85fSDave Jiang 		idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
100e4f4d8cdSDave Jiang 		spin_lock_init(&idxd->irq_entries[i].list_lock);
101bfe1d560SDave Jiang 	}
102bfe1d560SDave Jiang 
103bfe1d560SDave Jiang 	irq_entry = &idxd->irq_entries[0];
1045fc8e85fSDave Jiang 	rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler, idxd_misc_thread,
1055fc8e85fSDave Jiang 				  0, "idxd-misc", irq_entry);
106bfe1d560SDave Jiang 	if (rc < 0) {
107bfe1d560SDave Jiang 		dev_err(dev, "Failed to allocate misc interrupt.\n");
1085fc8e85fSDave Jiang 		goto err_misc_irq;
109bfe1d560SDave Jiang 	}
110bfe1d560SDave Jiang 
1115fc8e85fSDave Jiang 	dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
112bfe1d560SDave Jiang 
113bfe1d560SDave Jiang 	/* first MSI-X entry is not for wq interrupts */
114bfe1d560SDave Jiang 	idxd->num_wq_irqs = msixcnt - 1;
115bfe1d560SDave Jiang 
116bfe1d560SDave Jiang 	for (i = 1; i < msixcnt; i++) {
117bfe1d560SDave Jiang 		irq_entry = &idxd->irq_entries[i];
118bfe1d560SDave Jiang 
119bfe1d560SDave Jiang 		init_llist_head(&idxd->irq_entries[i].pending_llist);
120bfe1d560SDave Jiang 		INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
1215fc8e85fSDave Jiang 		rc = request_threaded_irq(irq_entry->vector, idxd_irq_handler,
1225fc8e85fSDave Jiang 					  idxd_wq_thread, 0, "idxd-portal", irq_entry);
123bfe1d560SDave Jiang 		if (rc < 0) {
1245fc8e85fSDave Jiang 			dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
1255fc8e85fSDave Jiang 			goto err_wq_irqs;
126bfe1d560SDave Jiang 		}
1275fc8e85fSDave Jiang 		dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
128bfe1d560SDave Jiang 	}
129bfe1d560SDave Jiang 
130bfe1d560SDave Jiang 	idxd_unmask_error_interrupts(idxd);
1316df0e6c5SDave Jiang 	idxd_msix_perm_setup(idxd);
132bfe1d560SDave Jiang 	return 0;
133bfe1d560SDave Jiang 
1345fc8e85fSDave Jiang  err_wq_irqs:
1355fc8e85fSDave Jiang 	while (--i >= 0) {
1365fc8e85fSDave Jiang 		irq_entry = &idxd->irq_entries[i];
1375fc8e85fSDave Jiang 		free_irq(irq_entry->vector, irq_entry);
1385fc8e85fSDave Jiang 	}
1395fc8e85fSDave Jiang  err_misc_irq:
140bfe1d560SDave Jiang 	/* Disable error interrupt generation */
141bfe1d560SDave Jiang 	idxd_mask_error_interrupts(idxd);
1425fc8e85fSDave Jiang  err_irq_entries:
1435fc8e85fSDave Jiang 	pci_free_irq_vectors(pdev);
144bfe1d560SDave Jiang 	dev_err(dev, "No usable interrupts\n");
145bfe1d560SDave Jiang 	return rc;
146bfe1d560SDave Jiang }
147bfe1d560SDave Jiang 
148*7c5dd23eSDave Jiang static int idxd_setup_wqs(struct idxd_device *idxd)
149*7c5dd23eSDave Jiang {
150*7c5dd23eSDave Jiang 	struct device *dev = &idxd->pdev->dev;
151*7c5dd23eSDave Jiang 	struct idxd_wq *wq;
152*7c5dd23eSDave Jiang 	int i, rc;
153*7c5dd23eSDave Jiang 
154*7c5dd23eSDave Jiang 	idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
155*7c5dd23eSDave Jiang 				 GFP_KERNEL, dev_to_node(dev));
156*7c5dd23eSDave Jiang 	if (!idxd->wqs)
157*7c5dd23eSDave Jiang 		return -ENOMEM;
158*7c5dd23eSDave Jiang 
159*7c5dd23eSDave Jiang 	for (i = 0; i < idxd->max_wqs; i++) {
160*7c5dd23eSDave Jiang 		wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
161*7c5dd23eSDave Jiang 		if (!wq) {
162*7c5dd23eSDave Jiang 			rc = -ENOMEM;
163*7c5dd23eSDave Jiang 			goto err;
164*7c5dd23eSDave Jiang 		}
165*7c5dd23eSDave Jiang 
166*7c5dd23eSDave Jiang 		wq->id = i;
167*7c5dd23eSDave Jiang 		wq->idxd = idxd;
168*7c5dd23eSDave Jiang 		device_initialize(&wq->conf_dev);
169*7c5dd23eSDave Jiang 		wq->conf_dev.parent = &idxd->conf_dev;
170*7c5dd23eSDave Jiang 		wq->conf_dev.bus = idxd_get_bus_type(idxd);
171*7c5dd23eSDave Jiang 		wq->conf_dev.type = &idxd_wq_device_type;
172*7c5dd23eSDave Jiang 		rc = dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
173*7c5dd23eSDave Jiang 		if (rc < 0) {
174*7c5dd23eSDave Jiang 			put_device(&wq->conf_dev);
175*7c5dd23eSDave Jiang 			goto err;
176*7c5dd23eSDave Jiang 		}
177*7c5dd23eSDave Jiang 
178*7c5dd23eSDave Jiang 		mutex_init(&wq->wq_lock);
179*7c5dd23eSDave Jiang 		wq->idxd_cdev.minor = -1;
180*7c5dd23eSDave Jiang 		wq->max_xfer_bytes = idxd->max_xfer_bytes;
181*7c5dd23eSDave Jiang 		wq->max_batch_size = idxd->max_batch_size;
182*7c5dd23eSDave Jiang 		wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
183*7c5dd23eSDave Jiang 		if (!wq->wqcfg) {
184*7c5dd23eSDave Jiang 			put_device(&wq->conf_dev);
185*7c5dd23eSDave Jiang 			rc = -ENOMEM;
186*7c5dd23eSDave Jiang 			goto err;
187*7c5dd23eSDave Jiang 		}
188*7c5dd23eSDave Jiang 		idxd->wqs[i] = wq;
189*7c5dd23eSDave Jiang 	}
190*7c5dd23eSDave Jiang 
191*7c5dd23eSDave Jiang 	return 0;
192*7c5dd23eSDave Jiang 
193*7c5dd23eSDave Jiang  err:
194*7c5dd23eSDave Jiang 	while (--i >= 0)
195*7c5dd23eSDave Jiang 		put_device(&idxd->wqs[i]->conf_dev);
196*7c5dd23eSDave Jiang 	return rc;
197*7c5dd23eSDave Jiang }
198*7c5dd23eSDave Jiang 
199bfe1d560SDave Jiang static int idxd_setup_internals(struct idxd_device *idxd)
200bfe1d560SDave Jiang {
201bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
202*7c5dd23eSDave Jiang 	int i, rc;
203bfe1d560SDave Jiang 
2040d5c10b4SDave Jiang 	init_waitqueue_head(&idxd->cmd_waitq);
205*7c5dd23eSDave Jiang 
206*7c5dd23eSDave Jiang 	rc = idxd_setup_wqs(idxd);
207*7c5dd23eSDave Jiang 	if (rc < 0)
208*7c5dd23eSDave Jiang 		return rc;
209*7c5dd23eSDave Jiang 
210bfe1d560SDave Jiang 	idxd->groups = devm_kcalloc(dev, idxd->max_groups,
211bfe1d560SDave Jiang 				    sizeof(struct idxd_group), GFP_KERNEL);
212*7c5dd23eSDave Jiang 	if (!idxd->groups) {
213*7c5dd23eSDave Jiang 		rc = -ENOMEM;
214*7c5dd23eSDave Jiang 		goto err;
215*7c5dd23eSDave Jiang 	}
216bfe1d560SDave Jiang 
217bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_groups; i++) {
218bfe1d560SDave Jiang 		idxd->groups[i].idxd = idxd;
219bfe1d560SDave Jiang 		idxd->groups[i].id = i;
220bfe1d560SDave Jiang 		idxd->groups[i].tc_a = -1;
221bfe1d560SDave Jiang 		idxd->groups[i].tc_b = -1;
222bfe1d560SDave Jiang 	}
223bfe1d560SDave Jiang 
224bfe1d560SDave Jiang 	idxd->engines = devm_kcalloc(dev, idxd->max_engines,
225bfe1d560SDave Jiang 				     sizeof(struct idxd_engine), GFP_KERNEL);
226*7c5dd23eSDave Jiang 	if (!idxd->engines) {
227*7c5dd23eSDave Jiang 		rc = -ENOMEM;
228*7c5dd23eSDave Jiang 		goto err;
229bfe1d560SDave Jiang 	}
230bfe1d560SDave Jiang 
231*7c5dd23eSDave Jiang 
232bfe1d560SDave Jiang 	for (i = 0; i < idxd->max_engines; i++) {
233bfe1d560SDave Jiang 		idxd->engines[i].idxd = idxd;
234bfe1d560SDave Jiang 		idxd->engines[i].id = i;
235bfe1d560SDave Jiang 	}
236bfe1d560SDave Jiang 
2370d5c10b4SDave Jiang 	idxd->wq = create_workqueue(dev_name(dev));
238*7c5dd23eSDave Jiang 	if (!idxd->wq) {
239*7c5dd23eSDave Jiang 		rc = -ENOMEM;
240*7c5dd23eSDave Jiang 		goto err;
241*7c5dd23eSDave Jiang 	}
2420d5c10b4SDave Jiang 
243bfe1d560SDave Jiang 	return 0;
244*7c5dd23eSDave Jiang 
245*7c5dd23eSDave Jiang  err:
246*7c5dd23eSDave Jiang 	for (i = 0; i < idxd->max_wqs; i++)
247*7c5dd23eSDave Jiang 		put_device(&idxd->wqs[i]->conf_dev);
248*7c5dd23eSDave Jiang 	return rc;
249bfe1d560SDave Jiang }
250bfe1d560SDave Jiang 
251bfe1d560SDave Jiang static void idxd_read_table_offsets(struct idxd_device *idxd)
252bfe1d560SDave Jiang {
253bfe1d560SDave Jiang 	union offsets_reg offsets;
254bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
255bfe1d560SDave Jiang 
256bfe1d560SDave Jiang 	offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
2572f8417a9SDave Jiang 	offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
2582f8417a9SDave Jiang 	idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
259bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
2602f8417a9SDave Jiang 	idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
2612f8417a9SDave Jiang 	dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
2622f8417a9SDave Jiang 	idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
2632f8417a9SDave Jiang 	dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
2642f8417a9SDave Jiang 	idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
265bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
266bfe1d560SDave Jiang }
267bfe1d560SDave Jiang 
268bfe1d560SDave Jiang static void idxd_read_caps(struct idxd_device *idxd)
269bfe1d560SDave Jiang {
270bfe1d560SDave Jiang 	struct device *dev = &idxd->pdev->dev;
271bfe1d560SDave Jiang 	int i;
272bfe1d560SDave Jiang 
273bfe1d560SDave Jiang 	/* reading generic capabilities */
274bfe1d560SDave Jiang 	idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
275bfe1d560SDave Jiang 	dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
276bfe1d560SDave Jiang 	idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
277bfe1d560SDave Jiang 	dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
278bfe1d560SDave Jiang 	idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
279bfe1d560SDave Jiang 	dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
280bfe1d560SDave Jiang 	if (idxd->hw.gen_cap.config_en)
281bfe1d560SDave Jiang 		set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
282bfe1d560SDave Jiang 
283bfe1d560SDave Jiang 	/* reading group capabilities */
284bfe1d560SDave Jiang 	idxd->hw.group_cap.bits =
285bfe1d560SDave Jiang 		ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
286bfe1d560SDave Jiang 	dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
287bfe1d560SDave Jiang 	idxd->max_groups = idxd->hw.group_cap.num_groups;
288bfe1d560SDave Jiang 	dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
289bfe1d560SDave Jiang 	idxd->max_tokens = idxd->hw.group_cap.total_tokens;
290bfe1d560SDave Jiang 	dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
291c52ca478SDave Jiang 	idxd->nr_tokens = idxd->max_tokens;
292bfe1d560SDave Jiang 
293bfe1d560SDave Jiang 	/* read engine capabilities */
294bfe1d560SDave Jiang 	idxd->hw.engine_cap.bits =
295bfe1d560SDave Jiang 		ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
296bfe1d560SDave Jiang 	dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
297bfe1d560SDave Jiang 	idxd->max_engines = idxd->hw.engine_cap.num_engines;
298bfe1d560SDave Jiang 	dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
299bfe1d560SDave Jiang 
300bfe1d560SDave Jiang 	/* read workqueue capabilities */
301bfe1d560SDave Jiang 	idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
302bfe1d560SDave Jiang 	dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
303bfe1d560SDave Jiang 	idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
304bfe1d560SDave Jiang 	dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
305bfe1d560SDave Jiang 	idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
306bfe1d560SDave Jiang 	dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
307d98793b5SDave Jiang 	idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
308d98793b5SDave Jiang 	dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
309bfe1d560SDave Jiang 
310bfe1d560SDave Jiang 	/* reading operation capabilities */
311bfe1d560SDave Jiang 	for (i = 0; i < 4; i++) {
312bfe1d560SDave Jiang 		idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
313bfe1d560SDave Jiang 				IDXD_OPCAP_OFFSET + i * sizeof(u64));
314bfe1d560SDave Jiang 		dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
315bfe1d560SDave Jiang 	}
316bfe1d560SDave Jiang }
317bfe1d560SDave Jiang 
31847c16ac2SDave Jiang static inline void idxd_set_type(struct idxd_device *idxd)
31947c16ac2SDave Jiang {
32047c16ac2SDave Jiang 	struct pci_dev *pdev = idxd->pdev;
32147c16ac2SDave Jiang 
32247c16ac2SDave Jiang 	if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
32347c16ac2SDave Jiang 		idxd->type = IDXD_TYPE_DSA;
32447c16ac2SDave Jiang 	else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
32547c16ac2SDave Jiang 		idxd->type = IDXD_TYPE_IAX;
32647c16ac2SDave Jiang 	else
32747c16ac2SDave Jiang 		idxd->type = IDXD_TYPE_UNKNOWN;
32847c16ac2SDave Jiang }
32947c16ac2SDave Jiang 
3308e50d392SDave Jiang static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
331bfe1d560SDave Jiang {
332bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
333bfe1d560SDave Jiang 	struct idxd_device *idxd;
33447c16ac2SDave Jiang 	int rc;
335bfe1d560SDave Jiang 
33647c16ac2SDave Jiang 	idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
337bfe1d560SDave Jiang 	if (!idxd)
338bfe1d560SDave Jiang 		return NULL;
339bfe1d560SDave Jiang 
340bfe1d560SDave Jiang 	idxd->pdev = pdev;
34147c16ac2SDave Jiang 	idxd_set_type(idxd);
34247c16ac2SDave Jiang 	idxd->id = ida_alloc(idxd_ida(idxd), GFP_KERNEL);
34347c16ac2SDave Jiang 	if (idxd->id < 0)
34447c16ac2SDave Jiang 		return NULL;
34547c16ac2SDave Jiang 
34647c16ac2SDave Jiang 	device_initialize(&idxd->conf_dev);
34747c16ac2SDave Jiang 	idxd->conf_dev.parent = dev;
34847c16ac2SDave Jiang 	idxd->conf_dev.bus = idxd_get_bus_type(idxd);
34947c16ac2SDave Jiang 	idxd->conf_dev.type = idxd_get_device_type(idxd);
35047c16ac2SDave Jiang 	rc = dev_set_name(&idxd->conf_dev, "%s%d", idxd_get_dev_name(idxd), idxd->id);
35147c16ac2SDave Jiang 	if (rc < 0) {
35247c16ac2SDave Jiang 		put_device(&idxd->conf_dev);
35347c16ac2SDave Jiang 		return NULL;
35447c16ac2SDave Jiang 	}
35547c16ac2SDave Jiang 
356bfe1d560SDave Jiang 	spin_lock_init(&idxd->dev_lock);
357bfe1d560SDave Jiang 
358bfe1d560SDave Jiang 	return idxd;
359bfe1d560SDave Jiang }
360bfe1d560SDave Jiang 
3618e50d392SDave Jiang static int idxd_enable_system_pasid(struct idxd_device *idxd)
3628e50d392SDave Jiang {
3638e50d392SDave Jiang 	int flags;
3648e50d392SDave Jiang 	unsigned int pasid;
3658e50d392SDave Jiang 	struct iommu_sva *sva;
3668e50d392SDave Jiang 
3678e50d392SDave Jiang 	flags = SVM_FLAG_SUPERVISOR_MODE;
3688e50d392SDave Jiang 
3698e50d392SDave Jiang 	sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
3708e50d392SDave Jiang 	if (IS_ERR(sva)) {
3718e50d392SDave Jiang 		dev_warn(&idxd->pdev->dev,
3728e50d392SDave Jiang 			 "iommu sva bind failed: %ld\n", PTR_ERR(sva));
3738e50d392SDave Jiang 		return PTR_ERR(sva);
3748e50d392SDave Jiang 	}
3758e50d392SDave Jiang 
3768e50d392SDave Jiang 	pasid = iommu_sva_get_pasid(sva);
3778e50d392SDave Jiang 	if (pasid == IOMMU_PASID_INVALID) {
3788e50d392SDave Jiang 		iommu_sva_unbind_device(sva);
3798e50d392SDave Jiang 		return -ENODEV;
3808e50d392SDave Jiang 	}
3818e50d392SDave Jiang 
3828e50d392SDave Jiang 	idxd->sva = sva;
3838e50d392SDave Jiang 	idxd->pasid = pasid;
3848e50d392SDave Jiang 	dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
3858e50d392SDave Jiang 	return 0;
3868e50d392SDave Jiang }
3878e50d392SDave Jiang 
3888e50d392SDave Jiang static void idxd_disable_system_pasid(struct idxd_device *idxd)
3898e50d392SDave Jiang {
3908e50d392SDave Jiang 
3918e50d392SDave Jiang 	iommu_sva_unbind_device(idxd->sva);
3928e50d392SDave Jiang 	idxd->sva = NULL;
3938e50d392SDave Jiang }
3948e50d392SDave Jiang 
395bfe1d560SDave Jiang static int idxd_probe(struct idxd_device *idxd)
396bfe1d560SDave Jiang {
397bfe1d560SDave Jiang 	struct pci_dev *pdev = idxd->pdev;
398bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
399bfe1d560SDave Jiang 	int rc;
400bfe1d560SDave Jiang 
401bfe1d560SDave Jiang 	dev_dbg(dev, "%s entered and resetting device\n", __func__);
40289e3becdSDave Jiang 	rc = idxd_device_init_reset(idxd);
40389e3becdSDave Jiang 	if (rc < 0)
40489e3becdSDave Jiang 		return rc;
40589e3becdSDave Jiang 
406bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD reset complete\n");
407bfe1d560SDave Jiang 
40803d939c7SDave Jiang 	if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
4098e50d392SDave Jiang 		rc = idxd_enable_system_pasid(idxd);
4108e50d392SDave Jiang 		if (rc < 0)
4118e50d392SDave Jiang 			dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
4128e50d392SDave Jiang 		else
4138e50d392SDave Jiang 			set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
41403d939c7SDave Jiang 	} else if (!sva) {
41503d939c7SDave Jiang 		dev_warn(dev, "User forced SVA off via module param.\n");
4168e50d392SDave Jiang 	}
4178e50d392SDave Jiang 
418bfe1d560SDave Jiang 	idxd_read_caps(idxd);
419bfe1d560SDave Jiang 	idxd_read_table_offsets(idxd);
420bfe1d560SDave Jiang 
421bfe1d560SDave Jiang 	rc = idxd_setup_internals(idxd);
422bfe1d560SDave Jiang 	if (rc)
423*7c5dd23eSDave Jiang 		goto err;
424bfe1d560SDave Jiang 
425bfe1d560SDave Jiang 	rc = idxd_setup_interrupts(idxd);
426bfe1d560SDave Jiang 	if (rc)
427*7c5dd23eSDave Jiang 		goto err;
428bfe1d560SDave Jiang 
429bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD interrupt setup complete.\n");
430bfe1d560SDave Jiang 
43142d279f9SDave Jiang 	idxd->major = idxd_cdev_get_major(idxd);
43242d279f9SDave Jiang 
433bfe1d560SDave Jiang 	dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
434bfe1d560SDave Jiang 	return 0;
435bfe1d560SDave Jiang 
436*7c5dd23eSDave Jiang  err:
4378e50d392SDave Jiang 	if (device_pasid_enabled(idxd))
4388e50d392SDave Jiang 		idxd_disable_system_pasid(idxd);
439bfe1d560SDave Jiang 	return rc;
440bfe1d560SDave Jiang }
441bfe1d560SDave Jiang 
442f25b4638SDave Jiang static void idxd_type_init(struct idxd_device *idxd)
443f25b4638SDave Jiang {
444f25b4638SDave Jiang 	if (idxd->type == IDXD_TYPE_DSA)
445f25b4638SDave Jiang 		idxd->compl_size = sizeof(struct dsa_completion_record);
446f25b4638SDave Jiang 	else if (idxd->type == IDXD_TYPE_IAX)
447f25b4638SDave Jiang 		idxd->compl_size = sizeof(struct iax_completion_record);
448f25b4638SDave Jiang }
449f25b4638SDave Jiang 
450bfe1d560SDave Jiang static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
451bfe1d560SDave Jiang {
452bfe1d560SDave Jiang 	struct device *dev = &pdev->dev;
453bfe1d560SDave Jiang 	struct idxd_device *idxd;
454bfe1d560SDave Jiang 	int rc;
455bfe1d560SDave Jiang 
456a39c7cd0SDave Jiang 	rc = pci_enable_device(pdev);
457bfe1d560SDave Jiang 	if (rc)
458bfe1d560SDave Jiang 		return rc;
459bfe1d560SDave Jiang 
4608e50d392SDave Jiang 	dev_dbg(dev, "Alloc IDXD context\n");
4618e50d392SDave Jiang 	idxd = idxd_alloc(pdev);
462a39c7cd0SDave Jiang 	if (!idxd) {
463a39c7cd0SDave Jiang 		rc = -ENOMEM;
464a39c7cd0SDave Jiang 		goto err_idxd_alloc;
465a39c7cd0SDave Jiang 	}
466bfe1d560SDave Jiang 
4678e50d392SDave Jiang 	dev_dbg(dev, "Mapping BARs\n");
468a39c7cd0SDave Jiang 	idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
469a39c7cd0SDave Jiang 	if (!idxd->reg_base) {
470a39c7cd0SDave Jiang 		rc = -ENOMEM;
471a39c7cd0SDave Jiang 		goto err_iomap;
472a39c7cd0SDave Jiang 	}
473bfe1d560SDave Jiang 
474bfe1d560SDave Jiang 	dev_dbg(dev, "Set DMA masks\n");
475bfe1d560SDave Jiang 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
476bfe1d560SDave Jiang 	if (rc)
477bfe1d560SDave Jiang 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
478bfe1d560SDave Jiang 	if (rc)
479a39c7cd0SDave Jiang 		goto err;
480bfe1d560SDave Jiang 
481bfe1d560SDave Jiang 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
482bfe1d560SDave Jiang 	if (rc)
483bfe1d560SDave Jiang 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
484bfe1d560SDave Jiang 	if (rc)
485a39c7cd0SDave Jiang 		goto err;
486bfe1d560SDave Jiang 
487bfe1d560SDave Jiang 
488f25b4638SDave Jiang 	idxd_type_init(idxd);
489f25b4638SDave Jiang 
490bfe1d560SDave Jiang 	dev_dbg(dev, "Set PCI master\n");
491bfe1d560SDave Jiang 	pci_set_master(pdev);
492bfe1d560SDave Jiang 	pci_set_drvdata(pdev, idxd);
493bfe1d560SDave Jiang 
494bfe1d560SDave Jiang 	idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
495bfe1d560SDave Jiang 	rc = idxd_probe(idxd);
496bfe1d560SDave Jiang 	if (rc) {
497bfe1d560SDave Jiang 		dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
498a39c7cd0SDave Jiang 		goto err;
499bfe1d560SDave Jiang 	}
500bfe1d560SDave Jiang 
50147c16ac2SDave Jiang 	rc = idxd_register_devices(idxd);
502c52ca478SDave Jiang 	if (rc) {
503c52ca478SDave Jiang 		dev_err(dev, "IDXD sysfs setup failed\n");
504a39c7cd0SDave Jiang 		goto err;
505c52ca478SDave Jiang 	}
506c52ca478SDave Jiang 
507c52ca478SDave Jiang 	idxd->state = IDXD_DEV_CONF_READY;
508c52ca478SDave Jiang 
509bfe1d560SDave Jiang 	dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
510bfe1d560SDave Jiang 		 idxd->hw.version);
511bfe1d560SDave Jiang 
512bfe1d560SDave Jiang 	return 0;
513a39c7cd0SDave Jiang 
514a39c7cd0SDave Jiang  err:
515a39c7cd0SDave Jiang 	pci_iounmap(pdev, idxd->reg_base);
516a39c7cd0SDave Jiang  err_iomap:
51747c16ac2SDave Jiang 	put_device(&idxd->conf_dev);
518a39c7cd0SDave Jiang  err_idxd_alloc:
519a39c7cd0SDave Jiang 	pci_disable_device(pdev);
520a39c7cd0SDave Jiang 	return rc;
521bfe1d560SDave Jiang }
522bfe1d560SDave Jiang 
5238f47d1a5SDave Jiang static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
5248f47d1a5SDave Jiang {
5258f47d1a5SDave Jiang 	struct idxd_desc *desc, *itr;
5268f47d1a5SDave Jiang 	struct llist_node *head;
5278f47d1a5SDave Jiang 
5288f47d1a5SDave Jiang 	head = llist_del_all(&ie->pending_llist);
5298f47d1a5SDave Jiang 	if (!head)
5308f47d1a5SDave Jiang 		return;
5318f47d1a5SDave Jiang 
5328f47d1a5SDave Jiang 	llist_for_each_entry_safe(desc, itr, head, llnode) {
5338f47d1a5SDave Jiang 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
5348f47d1a5SDave Jiang 		idxd_free_desc(desc->wq, desc);
5358f47d1a5SDave Jiang 	}
5368f47d1a5SDave Jiang }
5378f47d1a5SDave Jiang 
5388f47d1a5SDave Jiang static void idxd_flush_work_list(struct idxd_irq_entry *ie)
5398f47d1a5SDave Jiang {
5408f47d1a5SDave Jiang 	struct idxd_desc *desc, *iter;
5418f47d1a5SDave Jiang 
5428f47d1a5SDave Jiang 	list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
5438f47d1a5SDave Jiang 		list_del(&desc->list);
5448f47d1a5SDave Jiang 		idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
5458f47d1a5SDave Jiang 		idxd_free_desc(desc->wq, desc);
5468f47d1a5SDave Jiang 	}
5478f47d1a5SDave Jiang }
5488f47d1a5SDave Jiang 
549bfe1d560SDave Jiang static void idxd_shutdown(struct pci_dev *pdev)
550bfe1d560SDave Jiang {
551bfe1d560SDave Jiang 	struct idxd_device *idxd = pci_get_drvdata(pdev);
552bfe1d560SDave Jiang 	int rc, i;
553bfe1d560SDave Jiang 	struct idxd_irq_entry *irq_entry;
554bfe1d560SDave Jiang 	int msixcnt = pci_msix_vec_count(pdev);
555bfe1d560SDave Jiang 
556bfe1d560SDave Jiang 	rc = idxd_device_disable(idxd);
557bfe1d560SDave Jiang 	if (rc)
558bfe1d560SDave Jiang 		dev_err(&pdev->dev, "Disabling device failed\n");
559bfe1d560SDave Jiang 
560bfe1d560SDave Jiang 	dev_dbg(&pdev->dev, "%s called\n", __func__);
561bfe1d560SDave Jiang 	idxd_mask_msix_vectors(idxd);
562bfe1d560SDave Jiang 	idxd_mask_error_interrupts(idxd);
563bfe1d560SDave Jiang 
564bfe1d560SDave Jiang 	for (i = 0; i < msixcnt; i++) {
565bfe1d560SDave Jiang 		irq_entry = &idxd->irq_entries[i];
5665fc8e85fSDave Jiang 		synchronize_irq(irq_entry->vector);
5675fc8e85fSDave Jiang 		free_irq(irq_entry->vector, irq_entry);
568bfe1d560SDave Jiang 		if (i == 0)
569bfe1d560SDave Jiang 			continue;
5708f47d1a5SDave Jiang 		idxd_flush_pending_llist(irq_entry);
5718f47d1a5SDave Jiang 		idxd_flush_work_list(irq_entry);
572bfe1d560SDave Jiang 	}
5730d5c10b4SDave Jiang 
5746df0e6c5SDave Jiang 	idxd_msix_perm_clear(idxd);
5755fc8e85fSDave Jiang 	pci_free_irq_vectors(pdev);
576a39c7cd0SDave Jiang 	pci_iounmap(pdev, idxd->reg_base);
577a39c7cd0SDave Jiang 	pci_disable_device(pdev);
5780d5c10b4SDave Jiang 	destroy_workqueue(idxd->wq);
579bfe1d560SDave Jiang }
580bfe1d560SDave Jiang 
581bfe1d560SDave Jiang static void idxd_remove(struct pci_dev *pdev)
582bfe1d560SDave Jiang {
583bfe1d560SDave Jiang 	struct idxd_device *idxd = pci_get_drvdata(pdev);
584bfe1d560SDave Jiang 
585bfe1d560SDave Jiang 	dev_dbg(&pdev->dev, "%s called\n", __func__);
586bfe1d560SDave Jiang 	idxd_shutdown(pdev);
5878e50d392SDave Jiang 	if (device_pasid_enabled(idxd))
5888e50d392SDave Jiang 		idxd_disable_system_pasid(idxd);
58947c16ac2SDave Jiang 	idxd_unregister_devices(idxd);
590bfe1d560SDave Jiang }
591bfe1d560SDave Jiang 
592bfe1d560SDave Jiang static struct pci_driver idxd_pci_driver = {
593bfe1d560SDave Jiang 	.name		= DRV_NAME,
594bfe1d560SDave Jiang 	.id_table	= idxd_pci_tbl,
595bfe1d560SDave Jiang 	.probe		= idxd_pci_probe,
596bfe1d560SDave Jiang 	.remove		= idxd_remove,
597bfe1d560SDave Jiang 	.shutdown	= idxd_shutdown,
598bfe1d560SDave Jiang };
599bfe1d560SDave Jiang 
600bfe1d560SDave Jiang static int __init idxd_init_module(void)
601bfe1d560SDave Jiang {
602bfe1d560SDave Jiang 	int err, i;
603bfe1d560SDave Jiang 
604bfe1d560SDave Jiang 	/*
6058e50d392SDave Jiang 	 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
606bfe1d560SDave Jiang 	 * enumerating the device. We can not utilize it.
607bfe1d560SDave Jiang 	 */
608bfe1d560SDave Jiang 	if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
609bfe1d560SDave Jiang 		pr_warn("idxd driver failed to load without MOVDIR64B.\n");
610bfe1d560SDave Jiang 		return -ENODEV;
611bfe1d560SDave Jiang 	}
612bfe1d560SDave Jiang 
6138e50d392SDave Jiang 	if (!boot_cpu_has(X86_FEATURE_ENQCMD))
6148e50d392SDave Jiang 		pr_warn("Platform does not have ENQCMD(S) support.\n");
6158e50d392SDave Jiang 	else
6168e50d392SDave Jiang 		support_enqcmd = true;
617bfe1d560SDave Jiang 
618bfe1d560SDave Jiang 	for (i = 0; i < IDXD_TYPE_MAX; i++)
619f7f77398SDave Jiang 		ida_init(&idxd_idas[i]);
620bfe1d560SDave Jiang 
621c52ca478SDave Jiang 	err = idxd_register_bus_type();
622c52ca478SDave Jiang 	if (err < 0)
623bfe1d560SDave Jiang 		return err;
624bfe1d560SDave Jiang 
625c52ca478SDave Jiang 	err = idxd_register_driver();
626c52ca478SDave Jiang 	if (err < 0)
627c52ca478SDave Jiang 		goto err_idxd_driver_register;
628c52ca478SDave Jiang 
62942d279f9SDave Jiang 	err = idxd_cdev_register();
63042d279f9SDave Jiang 	if (err)
63142d279f9SDave Jiang 		goto err_cdev_register;
63242d279f9SDave Jiang 
633c52ca478SDave Jiang 	err = pci_register_driver(&idxd_pci_driver);
634c52ca478SDave Jiang 	if (err)
635c52ca478SDave Jiang 		goto err_pci_register;
636c52ca478SDave Jiang 
637bfe1d560SDave Jiang 	return 0;
638c52ca478SDave Jiang 
639c52ca478SDave Jiang err_pci_register:
64042d279f9SDave Jiang 	idxd_cdev_remove();
64142d279f9SDave Jiang err_cdev_register:
642c52ca478SDave Jiang 	idxd_unregister_driver();
643c52ca478SDave Jiang err_idxd_driver_register:
644c52ca478SDave Jiang 	idxd_unregister_bus_type();
645c52ca478SDave Jiang 	return err;
646bfe1d560SDave Jiang }
647bfe1d560SDave Jiang module_init(idxd_init_module);
648bfe1d560SDave Jiang 
649bfe1d560SDave Jiang static void __exit idxd_exit_module(void)
650bfe1d560SDave Jiang {
651bfe1d560SDave Jiang 	pci_unregister_driver(&idxd_pci_driver);
65242d279f9SDave Jiang 	idxd_cdev_remove();
653c52ca478SDave Jiang 	idxd_unregister_bus_type();
654bfe1d560SDave Jiang }
655bfe1d560SDave Jiang module_exit(idxd_exit_module);
656