1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/fs.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/iommu.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
21 #include "idxd.h"
22 #include "perfmon.h"
23
24 MODULE_VERSION(IDXD_DRIVER_VERSION);
25 MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR("Intel Corporation");
28 MODULE_IMPORT_NS("IDXD");
29
30 static bool sva = true;
31 module_param(sva, bool, 0644);
32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33
34 bool tc_override;
35 module_param(tc_override, bool, 0644);
36 MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
37
38 #define DRV_NAME "idxd"
39
40 bool support_enqcmd;
41 DEFINE_IDA(idxd_ida);
42
43 static struct idxd_driver_data idxd_driver_data[] = {
44 [IDXD_TYPE_DSA] = {
45 .name_prefix = "dsa",
46 .type = IDXD_TYPE_DSA,
47 .compl_size = sizeof(struct dsa_completion_record),
48 .align = 32,
49 .dev_type = &dsa_device_type,
50 .evl_cr_off = offsetof(struct dsa_evl_entry, cr),
51 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
52 .cr_status_off = offsetof(struct dsa_completion_record, status),
53 .cr_result_off = offsetof(struct dsa_completion_record, result),
54 },
55 [IDXD_TYPE_IAX] = {
56 .name_prefix = "iax",
57 .type = IDXD_TYPE_IAX,
58 .compl_size = sizeof(struct iax_completion_record),
59 .align = 64,
60 .dev_type = &iax_device_type,
61 .evl_cr_off = offsetof(struct iax_evl_entry, cr),
62 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
63 .cr_status_off = offsetof(struct iax_completion_record, status),
64 .cr_result_off = offsetof(struct iax_completion_record, error_code),
65 .load_device_defaults = idxd_load_iaa_device_defaults,
66 },
67 };
68
69 static struct pci_device_id idxd_pci_tbl[] = {
70 /* DSA ver 1.0 platforms */
71 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
72 /* DSA on GNR-D platforms */
73 { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
74 /* DSA on DMR platforms */
75 { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
76
77 /* IAX ver 1.0 platforms */
78 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
79 /* IAA on DMR platforms */
80 { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
81 /* IAA PTL platforms */
82 { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
83 /* IAA WCL platforms */
84 { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
85 { 0, }
86 };
87 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
88
idxd_setup_interrupts(struct idxd_device * idxd)89 static int idxd_setup_interrupts(struct idxd_device *idxd)
90 {
91 struct pci_dev *pdev = idxd->pdev;
92 struct device *dev = &pdev->dev;
93 struct idxd_irq_entry *ie;
94 int i, msixcnt;
95 int rc = 0;
96
97 msixcnt = pci_msix_vec_count(pdev);
98 if (msixcnt < 0) {
99 dev_err(dev, "Not MSI-X interrupt capable.\n");
100 return -ENOSPC;
101 }
102 idxd->irq_cnt = msixcnt;
103
104 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
105 if (rc != msixcnt) {
106 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
107 return -ENOSPC;
108 }
109 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
110
111
112 ie = idxd_get_ie(idxd, 0);
113 ie->vector = pci_irq_vector(pdev, 0);
114 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
115 if (rc < 0) {
116 dev_err(dev, "Failed to allocate misc interrupt.\n");
117 goto err_misc_irq;
118 }
119 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
120
121 for (i = 0; i < idxd->max_wqs; i++) {
122 int msix_idx = i + 1;
123
124 ie = idxd_get_ie(idxd, msix_idx);
125 ie->id = msix_idx;
126 ie->int_handle = INVALID_INT_HANDLE;
127 ie->pasid = IOMMU_PASID_INVALID;
128
129 spin_lock_init(&ie->list_lock);
130 init_llist_head(&ie->pending_llist);
131 INIT_LIST_HEAD(&ie->work_list);
132 }
133
134 idxd_unmask_error_interrupts(idxd);
135 return 0;
136
137 err_misc_irq:
138 idxd_mask_error_interrupts(idxd);
139 pci_free_irq_vectors(pdev);
140 dev_err(dev, "No usable interrupts\n");
141 return rc;
142 }
143
idxd_cleanup_interrupts(struct idxd_device * idxd)144 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
145 {
146 struct pci_dev *pdev = idxd->pdev;
147 struct idxd_irq_entry *ie;
148 int msixcnt;
149
150 msixcnt = pci_msix_vec_count(pdev);
151 if (msixcnt <= 0)
152 return;
153
154 ie = idxd_get_ie(idxd, 0);
155 idxd_mask_error_interrupts(idxd);
156 free_irq(ie->vector, ie);
157 pci_free_irq_vectors(pdev);
158 }
159
idxd_clean_wqs(struct idxd_device * idxd)160 static void idxd_clean_wqs(struct idxd_device *idxd)
161 {
162 struct idxd_wq *wq;
163 struct device *conf_dev;
164 int i;
165
166 for (i = 0; i < idxd->max_wqs; i++) {
167 wq = idxd->wqs[i];
168 if (idxd->hw.wq_cap.op_config)
169 bitmap_free(wq->opcap_bmap);
170 kfree(wq->wqcfg);
171 conf_dev = wq_confdev(wq);
172 put_device(conf_dev);
173 kfree(wq);
174 }
175 bitmap_free(idxd->wq_enable_map);
176 kfree(idxd->wqs);
177 }
178
idxd_setup_wqs(struct idxd_device * idxd)179 static int idxd_setup_wqs(struct idxd_device *idxd)
180 {
181 struct device *dev = &idxd->pdev->dev;
182 struct idxd_wq *wq;
183 struct device *conf_dev;
184 int i, rc;
185
186 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
187 GFP_KERNEL, dev_to_node(dev));
188 if (!idxd->wqs)
189 return -ENOMEM;
190
191 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
192 if (!idxd->wq_enable_map) {
193 rc = -ENOMEM;
194 goto err_free_wqs;
195 }
196
197 for (i = 0; i < idxd->max_wqs; i++) {
198 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
199 if (!wq) {
200 rc = -ENOMEM;
201 goto err_unwind;
202 }
203
204 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
205 conf_dev = wq_confdev(wq);
206 wq->id = i;
207 wq->idxd = idxd;
208 device_initialize(conf_dev);
209 conf_dev->parent = idxd_confdev(idxd);
210 conf_dev->bus = &dsa_bus_type;
211 conf_dev->type = &idxd_wq_device_type;
212 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
213 if (rc < 0) {
214 put_device(conf_dev);
215 kfree(wq);
216 goto err_unwind;
217 }
218
219 mutex_init(&wq->wq_lock);
220 init_waitqueue_head(&wq->err_queue);
221 init_completion(&wq->wq_dead);
222 init_completion(&wq->wq_resurrect);
223 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
224 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
225 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
226 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
227 if (!wq->wqcfg) {
228 put_device(conf_dev);
229 kfree(wq);
230 rc = -ENOMEM;
231 goto err_unwind;
232 }
233
234 if (idxd->hw.wq_cap.op_config) {
235 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
236 if (!wq->opcap_bmap) {
237 kfree(wq->wqcfg);
238 put_device(conf_dev);
239 kfree(wq);
240 rc = -ENOMEM;
241 goto err_unwind;
242 }
243 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
244 }
245 mutex_init(&wq->uc_lock);
246 xa_init(&wq->upasid_xa);
247 idxd->wqs[i] = wq;
248 }
249
250 return 0;
251
252 err_unwind:
253 while (--i >= 0) {
254 wq = idxd->wqs[i];
255 if (idxd->hw.wq_cap.op_config)
256 bitmap_free(wq->opcap_bmap);
257 kfree(wq->wqcfg);
258 conf_dev = wq_confdev(wq);
259 put_device(conf_dev);
260 kfree(wq);
261 }
262 bitmap_free(idxd->wq_enable_map);
263
264 err_free_wqs:
265 kfree(idxd->wqs);
266
267 return rc;
268 }
269
idxd_clean_engines(struct idxd_device * idxd)270 static void idxd_clean_engines(struct idxd_device *idxd)
271 {
272 struct idxd_engine *engine;
273 struct device *conf_dev;
274 int i;
275
276 for (i = 0; i < idxd->max_engines; i++) {
277 engine = idxd->engines[i];
278 conf_dev = engine_confdev(engine);
279 put_device(conf_dev);
280 kfree(engine);
281 }
282 kfree(idxd->engines);
283 }
284
idxd_setup_engines(struct idxd_device * idxd)285 static int idxd_setup_engines(struct idxd_device *idxd)
286 {
287 struct idxd_engine *engine;
288 struct device *dev = &idxd->pdev->dev;
289 struct device *conf_dev;
290 int i, rc;
291
292 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
293 GFP_KERNEL, dev_to_node(dev));
294 if (!idxd->engines)
295 return -ENOMEM;
296
297 for (i = 0; i < idxd->max_engines; i++) {
298 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
299 if (!engine) {
300 rc = -ENOMEM;
301 goto err;
302 }
303
304 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
305 conf_dev = engine_confdev(engine);
306 engine->id = i;
307 engine->idxd = idxd;
308 device_initialize(conf_dev);
309 conf_dev->parent = idxd_confdev(idxd);
310 conf_dev->bus = &dsa_bus_type;
311 conf_dev->type = &idxd_engine_device_type;
312 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
313 if (rc < 0) {
314 put_device(conf_dev);
315 kfree(engine);
316 goto err;
317 }
318
319 idxd->engines[i] = engine;
320 }
321
322 return 0;
323
324 err:
325 while (--i >= 0) {
326 engine = idxd->engines[i];
327 conf_dev = engine_confdev(engine);
328 put_device(conf_dev);
329 kfree(engine);
330 }
331 kfree(idxd->engines);
332
333 return rc;
334 }
335
idxd_clean_groups(struct idxd_device * idxd)336 static void idxd_clean_groups(struct idxd_device *idxd)
337 {
338 struct idxd_group *group;
339 int i;
340
341 for (i = 0; i < idxd->max_groups; i++) {
342 group = idxd->groups[i];
343 put_device(group_confdev(group));
344 kfree(group);
345 }
346 kfree(idxd->groups);
347 }
348
idxd_setup_groups(struct idxd_device * idxd)349 static int idxd_setup_groups(struct idxd_device *idxd)
350 {
351 struct device *dev = &idxd->pdev->dev;
352 struct device *conf_dev;
353 struct idxd_group *group;
354 int i, rc;
355
356 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
357 GFP_KERNEL, dev_to_node(dev));
358 if (!idxd->groups)
359 return -ENOMEM;
360
361 for (i = 0; i < idxd->max_groups; i++) {
362 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
363 if (!group) {
364 rc = -ENOMEM;
365 goto err;
366 }
367
368 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
369 conf_dev = group_confdev(group);
370 group->id = i;
371 group->idxd = idxd;
372 device_initialize(conf_dev);
373 conf_dev->parent = idxd_confdev(idxd);
374 conf_dev->bus = &dsa_bus_type;
375 conf_dev->type = &idxd_group_device_type;
376 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
377 if (rc < 0) {
378 put_device(conf_dev);
379 kfree(group);
380 goto err;
381 }
382
383 idxd->groups[i] = group;
384 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
385 group->tc_a = 1;
386 group->tc_b = 1;
387 } else {
388 group->tc_a = -1;
389 group->tc_b = -1;
390 }
391 /*
392 * The default value is the same as the value of
393 * total read buffers in GRPCAP.
394 */
395 group->rdbufs_allowed = idxd->max_rdbufs;
396 }
397
398 return 0;
399
400 err:
401 while (--i >= 0) {
402 group = idxd->groups[i];
403 put_device(group_confdev(group));
404 kfree(group);
405 }
406 kfree(idxd->groups);
407
408 return rc;
409 }
410
idxd_cleanup_internals(struct idxd_device * idxd)411 static void idxd_cleanup_internals(struct idxd_device *idxd)
412 {
413 idxd_clean_groups(idxd);
414 idxd_clean_engines(idxd);
415 idxd_clean_wqs(idxd);
416 destroy_workqueue(idxd->wq);
417 }
418
idxd_init_evl(struct idxd_device * idxd)419 static int idxd_init_evl(struct idxd_device *idxd)
420 {
421 struct device *dev = &idxd->pdev->dev;
422 unsigned int evl_cache_size;
423 struct idxd_evl *evl;
424 const char *idxd_name;
425
426 if (idxd->hw.gen_cap.evl_support == 0)
427 return 0;
428
429 evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
430 if (!evl)
431 return -ENOMEM;
432
433 mutex_init(&evl->lock);
434 evl->size = IDXD_EVL_SIZE_MIN;
435
436 idxd_name = dev_name(idxd_confdev(idxd));
437 evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
438 /*
439 * Since completion record in evl_cache will be copied to user
440 * when handling completion record page fault, need to create
441 * the cache suitable for user copy.
442 */
443 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
444 0, 0, 0, evl_cache_size,
445 NULL);
446 if (!idxd->evl_cache) {
447 kfree(evl);
448 return -ENOMEM;
449 }
450
451 idxd->evl = evl;
452 return 0;
453 }
454
idxd_setup_internals(struct idxd_device * idxd)455 static int idxd_setup_internals(struct idxd_device *idxd)
456 {
457 struct device *dev = &idxd->pdev->dev;
458 int rc;
459
460 init_waitqueue_head(&idxd->cmd_waitq);
461
462 rc = idxd_setup_wqs(idxd);
463 if (rc < 0)
464 goto err_wqs;
465
466 rc = idxd_setup_engines(idxd);
467 if (rc < 0)
468 goto err_engine;
469
470 rc = idxd_setup_groups(idxd);
471 if (rc < 0)
472 goto err_group;
473
474 idxd->wq = create_workqueue(dev_name(dev));
475 if (!idxd->wq) {
476 rc = -ENOMEM;
477 goto err_wkq_create;
478 }
479
480 rc = idxd_init_evl(idxd);
481 if (rc < 0)
482 goto err_evl;
483
484 return 0;
485
486 err_evl:
487 destroy_workqueue(idxd->wq);
488 err_wkq_create:
489 idxd_clean_groups(idxd);
490 err_group:
491 idxd_clean_engines(idxd);
492 err_engine:
493 idxd_clean_wqs(idxd);
494 err_wqs:
495 return rc;
496 }
497
idxd_read_table_offsets(struct idxd_device * idxd)498 static void idxd_read_table_offsets(struct idxd_device *idxd)
499 {
500 union offsets_reg offsets;
501 struct device *dev = &idxd->pdev->dev;
502
503 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
504 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
505 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
506 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
507 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
508 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
509 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
510 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
511 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
512 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
513 }
514
multi_u64_to_bmap(unsigned long * bmap,u64 * val,int count)515 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
516 {
517 int i, j, nr;
518
519 for (i = 0, nr = 0; i < count; i++) {
520 for (j = 0; j < BITS_PER_LONG_LONG; j++) {
521 if (val[i] & BIT(j))
522 set_bit(nr, bmap);
523 nr++;
524 }
525 }
526 }
527
idxd_read_caps(struct idxd_device * idxd)528 static void idxd_read_caps(struct idxd_device *idxd)
529 {
530 struct device *dev = &idxd->pdev->dev;
531 int i;
532
533 /* reading generic capabilities */
534 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
535 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
536
537 if (idxd->hw.gen_cap.cmd_cap) {
538 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
539 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
540 }
541
542 /* reading command capabilities */
543 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
544 idxd->request_int_handles = true;
545
546 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
547 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
548 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
549 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
550 if (idxd->hw.gen_cap.config_en)
551 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
552
553 /* reading group capabilities */
554 idxd->hw.group_cap.bits =
555 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
556 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
557 idxd->max_groups = idxd->hw.group_cap.num_groups;
558 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
559 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
560 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
561 idxd->nr_rdbufs = idxd->max_rdbufs;
562
563 /* read engine capabilities */
564 idxd->hw.engine_cap.bits =
565 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
566 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
567 idxd->max_engines = idxd->hw.engine_cap.num_engines;
568 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
569
570 /* read workqueue capabilities */
571 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
572 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
573 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
574 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
575 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
576 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
577 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
578 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
579
580 /* reading operation capabilities */
581 for (i = 0; i < 4; i++) {
582 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
583 IDXD_OPCAP_OFFSET + i * sizeof(u64));
584 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
585 }
586 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
587
588 /* read iaa cap */
589 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
590 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
591 }
592
idxd_free(struct idxd_device * idxd)593 static void idxd_free(struct idxd_device *idxd)
594 {
595 if (!idxd)
596 return;
597
598 put_device(idxd_confdev(idxd));
599 bitmap_free(idxd->opcap_bmap);
600 ida_free(&idxd_ida, idxd->id);
601 kfree(idxd);
602 }
603
idxd_alloc(struct pci_dev * pdev,struct idxd_driver_data * data)604 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
605 {
606 struct device *dev = &pdev->dev;
607 struct device *conf_dev;
608 struct idxd_device *idxd;
609 int rc;
610
611 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
612 if (!idxd)
613 return NULL;
614
615 conf_dev = idxd_confdev(idxd);
616 idxd->pdev = pdev;
617 idxd->data = data;
618 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
619 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
620 if (idxd->id < 0)
621 goto err_ida;
622
623 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
624 if (!idxd->opcap_bmap)
625 goto err_opcap;
626
627 device_initialize(conf_dev);
628 conf_dev->parent = dev;
629 conf_dev->bus = &dsa_bus_type;
630 conf_dev->type = idxd->data->dev_type;
631 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
632 if (rc < 0)
633 goto err_name;
634
635 spin_lock_init(&idxd->dev_lock);
636 spin_lock_init(&idxd->cmd_lock);
637
638 return idxd;
639
640 err_name:
641 put_device(conf_dev);
642 bitmap_free(idxd->opcap_bmap);
643 err_opcap:
644 ida_free(&idxd_ida, idxd->id);
645 err_ida:
646 kfree(idxd);
647
648 return NULL;
649 }
650
idxd_enable_system_pasid(struct idxd_device * idxd)651 static int idxd_enable_system_pasid(struct idxd_device *idxd)
652 {
653 struct pci_dev *pdev = idxd->pdev;
654 struct device *dev = &pdev->dev;
655 struct iommu_domain *domain;
656 ioasid_t pasid;
657 int ret;
658
659 /*
660 * Attach a global PASID to the DMA domain so that we can use ENQCMDS
661 * to submit work on buffers mapped by DMA API.
662 */
663 domain = iommu_get_domain_for_dev(dev);
664 if (!domain)
665 return -EPERM;
666
667 pasid = iommu_alloc_global_pasid(dev);
668 if (pasid == IOMMU_PASID_INVALID)
669 return -ENOSPC;
670
671 /*
672 * DMA domain is owned by the driver, it should support all valid
673 * types such as DMA-FQ, identity, etc.
674 */
675 ret = iommu_attach_device_pasid(domain, dev, pasid, NULL);
676 if (ret) {
677 dev_err(dev, "failed to attach device pasid %d, domain type %d",
678 pasid, domain->type);
679 iommu_free_global_pasid(pasid);
680 return ret;
681 }
682
683 /* Since we set user privilege for kernel DMA, enable completion IRQ */
684 idxd_set_user_intr(idxd, 1);
685 idxd->pasid = pasid;
686
687 return ret;
688 }
689
idxd_disable_system_pasid(struct idxd_device * idxd)690 static void idxd_disable_system_pasid(struct idxd_device *idxd)
691 {
692 struct pci_dev *pdev = idxd->pdev;
693 struct device *dev = &pdev->dev;
694 struct iommu_domain *domain;
695
696 domain = iommu_get_domain_for_dev(dev);
697 if (!domain)
698 return;
699
700 iommu_detach_device_pasid(domain, dev, idxd->pasid);
701 iommu_free_global_pasid(idxd->pasid);
702
703 idxd_set_user_intr(idxd, 0);
704 idxd->sva = NULL;
705 idxd->pasid = IOMMU_PASID_INVALID;
706 }
707
idxd_probe(struct idxd_device * idxd)708 static int idxd_probe(struct idxd_device *idxd)
709 {
710 struct pci_dev *pdev = idxd->pdev;
711 struct device *dev = &pdev->dev;
712 int rc;
713
714 dev_dbg(dev, "%s entered and resetting device\n", __func__);
715 rc = idxd_device_init_reset(idxd);
716 if (rc < 0)
717 return rc;
718
719 dev_dbg(dev, "IDXD reset complete\n");
720
721 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
722 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
723
724 rc = idxd_enable_system_pasid(idxd);
725 if (rc)
726 dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
727 else
728 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
729 } else if (!sva) {
730 dev_warn(dev, "User forced SVA off via module param.\n");
731 }
732
733 idxd_read_caps(idxd);
734 idxd_read_table_offsets(idxd);
735
736 rc = idxd_setup_internals(idxd);
737 if (rc)
738 goto err;
739
740 /* If the configs are readonly, then load them from device */
741 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
742 dev_dbg(dev, "Loading RO device config\n");
743 rc = idxd_device_load_config(idxd);
744 if (rc < 0)
745 goto err_config;
746 }
747
748 rc = idxd_setup_interrupts(idxd);
749 if (rc)
750 goto err_config;
751
752 idxd->major = idxd_cdev_get_major(idxd);
753
754 rc = perfmon_pmu_init(idxd);
755 if (rc < 0)
756 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
757
758 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
759 return 0;
760
761 err_config:
762 idxd_cleanup_internals(idxd);
763 err:
764 if (device_pasid_enabled(idxd))
765 idxd_disable_system_pasid(idxd);
766 return rc;
767 }
768
idxd_cleanup(struct idxd_device * idxd)769 static void idxd_cleanup(struct idxd_device *idxd)
770 {
771 perfmon_pmu_remove(idxd);
772 idxd_cleanup_interrupts(idxd);
773 idxd_cleanup_internals(idxd);
774 if (device_pasid_enabled(idxd))
775 idxd_disable_system_pasid(idxd);
776 }
777
778 /*
779 * Attach IDXD device to IDXD driver.
780 */
idxd_bind(struct device_driver * drv,const char * buf)781 static int idxd_bind(struct device_driver *drv, const char *buf)
782 {
783 const struct bus_type *bus = drv->bus;
784 struct device *dev;
785 int err = -ENODEV;
786
787 dev = bus_find_device_by_name(bus, NULL, buf);
788 if (dev)
789 err = device_driver_attach(drv, dev);
790
791 put_device(dev);
792
793 return err;
794 }
795
796 /*
797 * Detach IDXD device from driver.
798 */
idxd_unbind(struct device_driver * drv,const char * buf)799 static void idxd_unbind(struct device_driver *drv, const char *buf)
800 {
801 const struct bus_type *bus = drv->bus;
802 struct device *dev;
803
804 dev = bus_find_device_by_name(bus, NULL, buf);
805 if (dev && dev->driver == drv)
806 device_release_driver(dev);
807
808 put_device(dev);
809 }
810
811 #define idxd_free_saved_configs(saved_configs, count) \
812 do { \
813 int i; \
814 \
815 for (i = 0; i < (count); i++) \
816 kfree(saved_configs[i]); \
817 } while (0)
818
idxd_free_saved(struct idxd_group ** saved_groups,struct idxd_engine ** saved_engines,struct idxd_wq ** saved_wqs,struct idxd_device * idxd)819 static void idxd_free_saved(struct idxd_group **saved_groups,
820 struct idxd_engine **saved_engines,
821 struct idxd_wq **saved_wqs,
822 struct idxd_device *idxd)
823 {
824 if (saved_groups)
825 idxd_free_saved_configs(saved_groups, idxd->max_groups);
826 if (saved_engines)
827 idxd_free_saved_configs(saved_engines, idxd->max_engines);
828 if (saved_wqs)
829 idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
830 }
831
832 /*
833 * Save IDXD device configurations including engines, groups, wqs etc.
834 * The saved configurations can be restored when needed.
835 */
idxd_device_config_save(struct idxd_device * idxd,struct idxd_saved_states * idxd_saved)836 static int idxd_device_config_save(struct idxd_device *idxd,
837 struct idxd_saved_states *idxd_saved)
838 {
839 struct device *dev = &idxd->pdev->dev;
840 int i;
841
842 memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
843
844 if (idxd->evl) {
845 memcpy(&idxd_saved->saved_evl, idxd->evl,
846 sizeof(struct idxd_evl));
847 }
848
849 struct idxd_group **saved_groups __free(kfree) =
850 kcalloc_node(idxd->max_groups,
851 sizeof(struct idxd_group *),
852 GFP_KERNEL, dev_to_node(dev));
853 if (!saved_groups)
854 return -ENOMEM;
855
856 for (i = 0; i < idxd->max_groups; i++) {
857 struct idxd_group *saved_group __free(kfree) =
858 kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
859 dev_to_node(dev));
860
861 if (!saved_group) {
862 /* Free saved groups */
863 idxd_free_saved(saved_groups, NULL, NULL, idxd);
864
865 return -ENOMEM;
866 }
867
868 memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
869 saved_groups[i] = no_free_ptr(saved_group);
870 }
871
872 struct idxd_engine **saved_engines =
873 kcalloc_node(idxd->max_engines,
874 sizeof(struct idxd_engine *),
875 GFP_KERNEL, dev_to_node(dev));
876 if (!saved_engines) {
877 /* Free saved groups */
878 idxd_free_saved(saved_groups, NULL, NULL, idxd);
879
880 return -ENOMEM;
881 }
882 for (i = 0; i < idxd->max_engines; i++) {
883 struct idxd_engine *saved_engine __free(kfree) =
884 kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
885 dev_to_node(dev));
886 if (!saved_engine) {
887 /* Free saved groups and engines */
888 idxd_free_saved(saved_groups, saved_engines, NULL,
889 idxd);
890
891 return -ENOMEM;
892 }
893
894 memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
895 saved_engines[i] = no_free_ptr(saved_engine);
896 }
897
898 unsigned long *saved_wq_enable_map __free(bitmap) =
899 bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
900 dev_to_node(dev));
901 if (!saved_wq_enable_map) {
902 /* Free saved groups and engines */
903 idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
904
905 return -ENOMEM;
906 }
907
908 bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
909
910 struct idxd_wq **saved_wqs __free(kfree) =
911 kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
912 GFP_KERNEL, dev_to_node(dev));
913 if (!saved_wqs) {
914 /* Free saved groups and engines */
915 idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
916
917 return -ENOMEM;
918 }
919
920 for (i = 0; i < idxd->max_wqs; i++) {
921 struct idxd_wq *saved_wq __free(kfree) =
922 kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
923 dev_to_node(dev));
924 struct idxd_wq *wq;
925
926 if (!saved_wq) {
927 /* Free saved groups, engines, and wqs */
928 idxd_free_saved(saved_groups, saved_engines, saved_wqs,
929 idxd);
930
931 return -ENOMEM;
932 }
933
934 if (!test_bit(i, saved_wq_enable_map))
935 continue;
936
937 wq = idxd->wqs[i];
938 mutex_lock(&wq->wq_lock);
939 memcpy(saved_wq, wq, sizeof(*saved_wq));
940 saved_wqs[i] = no_free_ptr(saved_wq);
941 mutex_unlock(&wq->wq_lock);
942 }
943
944 /* Save configurations */
945 idxd_saved->saved_groups = no_free_ptr(saved_groups);
946 idxd_saved->saved_engines = no_free_ptr(saved_engines);
947 idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
948 idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
949
950 return 0;
951 }
952
953 /*
954 * Restore IDXD device configurations including engines, groups, wqs etc
955 * that were saved before.
956 */
idxd_device_config_restore(struct idxd_device * idxd,struct idxd_saved_states * idxd_saved)957 static void idxd_device_config_restore(struct idxd_device *idxd,
958 struct idxd_saved_states *idxd_saved)
959 {
960 struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
961 int i;
962
963 idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
964
965 idxd->evl->size = saved_evl->size;
966
967 for (i = 0; i < idxd->max_groups; i++) {
968 struct idxd_group *saved_group, *group;
969
970 saved_group = idxd_saved->saved_groups[i];
971 group = idxd->groups[i];
972
973 group->rdbufs_allowed = saved_group->rdbufs_allowed;
974 group->rdbufs_reserved = saved_group->rdbufs_reserved;
975 group->tc_a = saved_group->tc_a;
976 group->tc_b = saved_group->tc_b;
977 group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
978
979 kfree(saved_group);
980 }
981 kfree(idxd_saved->saved_groups);
982
983 for (i = 0; i < idxd->max_engines; i++) {
984 struct idxd_engine *saved_engine, *engine;
985
986 saved_engine = idxd_saved->saved_engines[i];
987 engine = idxd->engines[i];
988
989 engine->group = saved_engine->group;
990
991 kfree(saved_engine);
992 }
993 kfree(idxd_saved->saved_engines);
994
995 bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
996 idxd->max_wqs);
997 bitmap_free(idxd_saved->saved_wq_enable_map);
998
999 for (i = 0; i < idxd->max_wqs; i++) {
1000 struct idxd_wq *saved_wq, *wq;
1001 size_t len;
1002
1003 if (!test_bit(i, idxd->wq_enable_map))
1004 continue;
1005
1006 saved_wq = idxd_saved->saved_wqs[i];
1007 wq = idxd->wqs[i];
1008
1009 mutex_lock(&wq->wq_lock);
1010
1011 wq->group = saved_wq->group;
1012 wq->flags = saved_wq->flags;
1013 wq->threshold = saved_wq->threshold;
1014 wq->size = saved_wq->size;
1015 wq->priority = saved_wq->priority;
1016 wq->type = saved_wq->type;
1017 len = strlen(saved_wq->name) + 1;
1018 strscpy(wq->name, saved_wq->name, len);
1019 wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
1020 wq->max_batch_size = saved_wq->max_batch_size;
1021 wq->enqcmds_retries = saved_wq->enqcmds_retries;
1022 wq->descs = saved_wq->descs;
1023 wq->idxd_chan = saved_wq->idxd_chan;
1024 len = strlen(saved_wq->driver_name) + 1;
1025 strscpy(wq->driver_name, saved_wq->driver_name, len);
1026
1027 mutex_unlock(&wq->wq_lock);
1028
1029 kfree(saved_wq);
1030 }
1031
1032 kfree(idxd_saved->saved_wqs);
1033 }
1034
idxd_reset_prepare(struct pci_dev * pdev)1035 static void idxd_reset_prepare(struct pci_dev *pdev)
1036 {
1037 struct idxd_device *idxd = pci_get_drvdata(pdev);
1038 struct device *dev = &idxd->pdev->dev;
1039 const char *idxd_name;
1040 int rc;
1041
1042 idxd_name = dev_name(idxd_confdev(idxd));
1043
1044 struct idxd_saved_states *idxd_saved __free(kfree) =
1045 kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
1046 dev_to_node(&pdev->dev));
1047 if (!idxd_saved) {
1048 dev_err(dev, "HALT: no memory\n");
1049
1050 return;
1051 }
1052
1053 /* Save IDXD configurations. */
1054 rc = idxd_device_config_save(idxd, idxd_saved);
1055 if (rc < 0) {
1056 dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
1057
1058 return;
1059 }
1060
1061 idxd->idxd_saved = no_free_ptr(idxd_saved);
1062
1063 /* Save PCI device state. */
1064 pci_save_state(idxd->pdev);
1065 }
1066
idxd_reset_done(struct pci_dev * pdev)1067 static void idxd_reset_done(struct pci_dev *pdev)
1068 {
1069 struct idxd_device *idxd = pci_get_drvdata(pdev);
1070 const char *idxd_name;
1071 struct device *dev;
1072 int rc, i;
1073
1074 if (!idxd->idxd_saved)
1075 return;
1076
1077 dev = &idxd->pdev->dev;
1078 idxd_name = dev_name(idxd_confdev(idxd));
1079
1080 /* Restore PCI device state. */
1081 pci_restore_state(idxd->pdev);
1082
1083 /* Unbind idxd device from driver. */
1084 idxd_unbind(&idxd_drv.drv, idxd_name);
1085
1086 /*
1087 * Probe PCI device without allocating or changing
1088 * idxd software data which keeps the same as before FLR.
1089 */
1090 idxd_pci_probe_alloc(idxd, NULL, NULL);
1091
1092 /* Restore IDXD configurations. */
1093 idxd_device_config_restore(idxd, idxd->idxd_saved);
1094
1095 /* Re-configure IDXD device if allowed. */
1096 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1097 rc = idxd_device_config(idxd);
1098 if (rc < 0) {
1099 dev_err(dev, "HALT: %s config fails\n", idxd_name);
1100 goto out;
1101 }
1102 }
1103
1104 /* Bind IDXD device to driver. */
1105 rc = idxd_bind(&idxd_drv.drv, idxd_name);
1106 if (rc < 0) {
1107 dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
1108 goto out;
1109 }
1110
1111 /* Bind enabled wq in the IDXD device to driver. */
1112 for (i = 0; i < idxd->max_wqs; i++) {
1113 if (test_bit(i, idxd->wq_enable_map)) {
1114 struct idxd_wq *wq = idxd->wqs[i];
1115 char wq_name[32];
1116
1117 wq->state = IDXD_WQ_DISABLED;
1118 sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
1119 /*
1120 * Bind to user driver depending on wq type.
1121 *
1122 * Currently only support user type WQ. Will support
1123 * kernel type WQ in the future.
1124 */
1125 if (wq->type == IDXD_WQT_USER)
1126 rc = idxd_bind(&idxd_user_drv.drv, wq_name);
1127 else
1128 rc = -EINVAL;
1129 if (rc < 0) {
1130 clear_bit(i, idxd->wq_enable_map);
1131 dev_err(dev,
1132 "HALT: unable to re-enable wq %s\n",
1133 dev_name(wq_confdev(wq)));
1134 }
1135 }
1136 }
1137 out:
1138 kfree(idxd->idxd_saved);
1139 }
1140
1141 static const struct pci_error_handlers idxd_error_handler = {
1142 .reset_prepare = idxd_reset_prepare,
1143 .reset_done = idxd_reset_done,
1144 };
1145
1146 /*
1147 * Probe idxd PCI device.
1148 * If idxd is not given, need to allocate idxd and set up its data.
1149 *
1150 * If idxd is given, idxd was allocated and setup already. Just need to
1151 * configure device without re-allocating and re-configuring idxd data.
1152 * This is useful for recovering from FLR.
1153 */
idxd_pci_probe_alloc(struct idxd_device * idxd,struct pci_dev * pdev,const struct pci_device_id * id)1154 int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
1155 const struct pci_device_id *id)
1156 {
1157 bool alloc_idxd = idxd ? false : true;
1158 struct idxd_driver_data *data;
1159 struct device *dev;
1160 int rc;
1161
1162 pdev = idxd ? idxd->pdev : pdev;
1163 dev = &pdev->dev;
1164 data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
1165 rc = pci_enable_device(pdev);
1166 if (rc)
1167 return rc;
1168
1169 if (alloc_idxd) {
1170 dev_dbg(dev, "Alloc IDXD context\n");
1171 idxd = idxd_alloc(pdev, data);
1172 if (!idxd) {
1173 rc = -ENOMEM;
1174 goto err_idxd_alloc;
1175 }
1176
1177 dev_dbg(dev, "Mapping BARs\n");
1178 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
1179 if (!idxd->reg_base) {
1180 rc = -ENOMEM;
1181 goto err_iomap;
1182 }
1183
1184 dev_dbg(dev, "Set DMA masks\n");
1185 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1186 if (rc)
1187 goto err;
1188 }
1189
1190 dev_dbg(dev, "Set PCI master\n");
1191 pci_set_master(pdev);
1192 pci_set_drvdata(pdev, idxd);
1193
1194 if (alloc_idxd) {
1195 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
1196 rc = idxd_probe(idxd);
1197 if (rc) {
1198 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
1199 goto err;
1200 }
1201
1202 if (data->load_device_defaults) {
1203 rc = data->load_device_defaults(idxd);
1204 if (rc)
1205 dev_warn(dev, "IDXD loading device defaults failed\n");
1206 }
1207
1208 rc = idxd_register_devices(idxd);
1209 if (rc) {
1210 dev_err(dev, "IDXD sysfs setup failed\n");
1211 goto err_dev_register;
1212 }
1213
1214 rc = idxd_device_init_debugfs(idxd);
1215 if (rc)
1216 dev_warn(dev, "IDXD debugfs failed to setup\n");
1217 }
1218
1219 if (!alloc_idxd) {
1220 /* Release interrupts in the IDXD device. */
1221 idxd_cleanup_interrupts(idxd);
1222
1223 /* Re-enable interrupts in the IDXD device. */
1224 rc = idxd_setup_interrupts(idxd);
1225 if (rc)
1226 dev_warn(dev, "IDXD interrupts failed to setup\n");
1227 }
1228
1229 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
1230 idxd->hw.version);
1231
1232 if (data)
1233 idxd->user_submission_safe = data->user_submission_safe;
1234
1235 return 0;
1236
1237 err_dev_register:
1238 idxd_cleanup(idxd);
1239 err:
1240 pci_iounmap(pdev, idxd->reg_base);
1241 err_iomap:
1242 idxd_free(idxd);
1243 err_idxd_alloc:
1244 pci_disable_device(pdev);
1245 return rc;
1246 }
1247
idxd_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1248 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1249 {
1250 return idxd_pci_probe_alloc(NULL, pdev, id);
1251 }
1252
idxd_wqs_quiesce(struct idxd_device * idxd)1253 void idxd_wqs_quiesce(struct idxd_device *idxd)
1254 {
1255 struct idxd_wq *wq;
1256 int i;
1257
1258 for (i = 0; i < idxd->max_wqs; i++) {
1259 wq = idxd->wqs[i];
1260 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
1261 idxd_wq_quiesce(wq);
1262 }
1263 }
1264
idxd_shutdown(struct pci_dev * pdev)1265 static void idxd_shutdown(struct pci_dev *pdev)
1266 {
1267 struct idxd_device *idxd = pci_get_drvdata(pdev);
1268 struct idxd_irq_entry *irq_entry;
1269 int rc;
1270
1271 rc = idxd_device_disable(idxd);
1272 if (rc)
1273 dev_err(&pdev->dev, "Disabling device failed\n");
1274
1275 irq_entry = &idxd->ie;
1276 synchronize_irq(irq_entry->vector);
1277 idxd_mask_error_interrupts(idxd);
1278 flush_workqueue(idxd->wq);
1279 }
1280
idxd_remove(struct pci_dev * pdev)1281 static void idxd_remove(struct pci_dev *pdev)
1282 {
1283 struct idxd_device *idxd = pci_get_drvdata(pdev);
1284
1285 idxd_unregister_devices(idxd);
1286 /*
1287 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
1288 * to the idxd context. The driver still needs those bits in order to do the rest of
1289 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
1290 * on the device here to hold off the freeing while allowing the idxd sub-driver
1291 * to unbind.
1292 */
1293 get_device(idxd_confdev(idxd));
1294 device_unregister(idxd_confdev(idxd));
1295 idxd_shutdown(pdev);
1296 idxd_device_remove_debugfs(idxd);
1297 perfmon_pmu_remove(idxd);
1298 idxd_cleanup_interrupts(idxd);
1299 if (device_pasid_enabled(idxd))
1300 idxd_disable_system_pasid(idxd);
1301 pci_iounmap(pdev, idxd->reg_base);
1302 put_device(idxd_confdev(idxd));
1303 pci_disable_device(pdev);
1304 }
1305
1306 static struct pci_driver idxd_pci_driver = {
1307 .name = DRV_NAME,
1308 .id_table = idxd_pci_tbl,
1309 .probe = idxd_pci_probe,
1310 .remove = idxd_remove,
1311 .shutdown = idxd_shutdown,
1312 .err_handler = &idxd_error_handler,
1313 };
1314
idxd_init_module(void)1315 static int __init idxd_init_module(void)
1316 {
1317 int err;
1318
1319 /*
1320 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
1321 * enumerating the device. We can not utilize it.
1322 */
1323 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
1324 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
1325 return -ENODEV;
1326 }
1327
1328 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
1329 pr_warn("Platform does not have ENQCMD(S) support.\n");
1330 else
1331 support_enqcmd = true;
1332
1333 err = idxd_driver_register(&idxd_drv);
1334 if (err < 0)
1335 goto err_idxd_driver_register;
1336
1337 err = idxd_driver_register(&idxd_dmaengine_drv);
1338 if (err < 0)
1339 goto err_idxd_dmaengine_driver_register;
1340
1341 err = idxd_driver_register(&idxd_user_drv);
1342 if (err < 0)
1343 goto err_idxd_user_driver_register;
1344
1345 err = idxd_cdev_register();
1346 if (err)
1347 goto err_cdev_register;
1348
1349 err = idxd_init_debugfs();
1350 if (err)
1351 goto err_debugfs;
1352
1353 err = pci_register_driver(&idxd_pci_driver);
1354 if (err)
1355 goto err_pci_register;
1356
1357 return 0;
1358
1359 err_pci_register:
1360 idxd_remove_debugfs();
1361 err_debugfs:
1362 idxd_cdev_remove();
1363 err_cdev_register:
1364 idxd_driver_unregister(&idxd_user_drv);
1365 err_idxd_user_driver_register:
1366 idxd_driver_unregister(&idxd_dmaengine_drv);
1367 err_idxd_dmaengine_driver_register:
1368 idxd_driver_unregister(&idxd_drv);
1369 err_idxd_driver_register:
1370 return err;
1371 }
1372 module_init(idxd_init_module);
1373
idxd_exit_module(void)1374 static void __exit idxd_exit_module(void)
1375 {
1376 idxd_driver_unregister(&idxd_user_drv);
1377 idxd_driver_unregister(&idxd_dmaengine_drv);
1378 idxd_driver_unregister(&idxd_drv);
1379 pci_unregister_driver(&idxd_pci_driver);
1380 idxd_cdev_remove();
1381 idxd_remove_debugfs();
1382 }
1383 module_exit(idxd_exit_module);
1384