1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/workqueue.h>
12 #include <linux/fs.h>
13 #include <linux/io-64-nonatomic-lo-hi.h>
14 #include <linux/device.h>
15 #include <linux/idr.h>
16 #include <linux/iommu.h>
17 #include <uapi/linux/idxd.h>
18 #include <linux/dmaengine.h>
19 #include "../dmaengine.h"
20 #include "registers.h"
21 #include "idxd.h"
22 #include "perfmon.h"
23
24 MODULE_VERSION(IDXD_DRIVER_VERSION);
25 MODULE_DESCRIPTION("Intel Data Streaming Accelerator and In-Memory Analytics Accelerator common driver");
26 MODULE_LICENSE("GPL v2");
27 MODULE_AUTHOR("Intel Corporation");
28 MODULE_IMPORT_NS("IDXD");
29
30 static bool sva = true;
31 module_param(sva, bool, 0644);
32 MODULE_PARM_DESC(sva, "Toggle SVA support on/off");
33
34 bool tc_override;
35 module_param(tc_override, bool, 0644);
36 MODULE_PARM_DESC(tc_override, "Override traffic class defaults");
37
38 #define DRV_NAME "idxd"
39
40 bool support_enqcmd;
41 DEFINE_IDA(idxd_ida);
42
43 static struct idxd_driver_data idxd_driver_data[] = {
44 [IDXD_TYPE_DSA] = {
45 .name_prefix = "dsa",
46 .type = IDXD_TYPE_DSA,
47 .compl_size = sizeof(struct dsa_completion_record),
48 .align = 32,
49 .dev_type = &dsa_device_type,
50 .evl_cr_off = offsetof(struct dsa_evl_entry, cr),
51 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
52 .cr_status_off = offsetof(struct dsa_completion_record, status),
53 .cr_result_off = offsetof(struct dsa_completion_record, result),
54 },
55 [IDXD_TYPE_IAX] = {
56 .name_prefix = "iax",
57 .type = IDXD_TYPE_IAX,
58 .compl_size = sizeof(struct iax_completion_record),
59 .align = 64,
60 .dev_type = &iax_device_type,
61 .evl_cr_off = offsetof(struct iax_evl_entry, cr),
62 .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */
63 .cr_status_off = offsetof(struct iax_completion_record, status),
64 .cr_result_off = offsetof(struct iax_completion_record, error_code),
65 .load_device_defaults = idxd_load_iaa_device_defaults,
66 },
67 };
68
69 static struct pci_device_id idxd_pci_tbl[] = {
70 /* DSA ver 1.0 platforms */
71 { PCI_DEVICE_DATA(INTEL, DSA_SPR0, &idxd_driver_data[IDXD_TYPE_DSA]) },
72 /* DSA on GNR-D platforms */
73 { PCI_DEVICE_DATA(INTEL, DSA_GNRD, &idxd_driver_data[IDXD_TYPE_DSA]) },
74 /* DSA on DMR platforms */
75 { PCI_DEVICE_DATA(INTEL, DSA_DMR, &idxd_driver_data[IDXD_TYPE_DSA]) },
76
77 /* IAX ver 1.0 platforms */
78 { PCI_DEVICE_DATA(INTEL, IAX_SPR0, &idxd_driver_data[IDXD_TYPE_IAX]) },
79 /* IAA on DMR platforms */
80 { PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
81 /* IAA PTL platforms */
82 { PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
83 { 0, }
84 };
85 MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
86
idxd_setup_interrupts(struct idxd_device * idxd)87 static int idxd_setup_interrupts(struct idxd_device *idxd)
88 {
89 struct pci_dev *pdev = idxd->pdev;
90 struct device *dev = &pdev->dev;
91 struct idxd_irq_entry *ie;
92 int i, msixcnt;
93 int rc = 0;
94
95 msixcnt = pci_msix_vec_count(pdev);
96 if (msixcnt < 0) {
97 dev_err(dev, "Not MSI-X interrupt capable.\n");
98 return -ENOSPC;
99 }
100 idxd->irq_cnt = msixcnt;
101
102 rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
103 if (rc != msixcnt) {
104 dev_err(dev, "Failed enabling %d MSIX entries: %d\n", msixcnt, rc);
105 return -ENOSPC;
106 }
107 dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
108
109
110 ie = idxd_get_ie(idxd, 0);
111 ie->vector = pci_irq_vector(pdev, 0);
112 rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
113 if (rc < 0) {
114 dev_err(dev, "Failed to allocate misc interrupt.\n");
115 goto err_misc_irq;
116 }
117 dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
118
119 for (i = 0; i < idxd->max_wqs; i++) {
120 int msix_idx = i + 1;
121
122 ie = idxd_get_ie(idxd, msix_idx);
123 ie->id = msix_idx;
124 ie->int_handle = INVALID_INT_HANDLE;
125 ie->pasid = IOMMU_PASID_INVALID;
126
127 spin_lock_init(&ie->list_lock);
128 init_llist_head(&ie->pending_llist);
129 INIT_LIST_HEAD(&ie->work_list);
130 }
131
132 idxd_unmask_error_interrupts(idxd);
133 return 0;
134
135 err_misc_irq:
136 idxd_mask_error_interrupts(idxd);
137 pci_free_irq_vectors(pdev);
138 dev_err(dev, "No usable interrupts\n");
139 return rc;
140 }
141
idxd_cleanup_interrupts(struct idxd_device * idxd)142 static void idxd_cleanup_interrupts(struct idxd_device *idxd)
143 {
144 struct pci_dev *pdev = idxd->pdev;
145 struct idxd_irq_entry *ie;
146 int msixcnt;
147
148 msixcnt = pci_msix_vec_count(pdev);
149 if (msixcnt <= 0)
150 return;
151
152 ie = idxd_get_ie(idxd, 0);
153 idxd_mask_error_interrupts(idxd);
154 free_irq(ie->vector, ie);
155 pci_free_irq_vectors(pdev);
156 }
157
idxd_clean_wqs(struct idxd_device * idxd)158 static void idxd_clean_wqs(struct idxd_device *idxd)
159 {
160 struct idxd_wq *wq;
161 struct device *conf_dev;
162 int i;
163
164 for (i = 0; i < idxd->max_wqs; i++) {
165 wq = idxd->wqs[i];
166 if (idxd->hw.wq_cap.op_config)
167 bitmap_free(wq->opcap_bmap);
168 kfree(wq->wqcfg);
169 conf_dev = wq_confdev(wq);
170 put_device(conf_dev);
171 kfree(wq);
172 }
173 bitmap_free(idxd->wq_enable_map);
174 kfree(idxd->wqs);
175 }
176
idxd_setup_wqs(struct idxd_device * idxd)177 static int idxd_setup_wqs(struct idxd_device *idxd)
178 {
179 struct device *dev = &idxd->pdev->dev;
180 struct idxd_wq *wq;
181 struct device *conf_dev;
182 int i, rc;
183
184 idxd->wqs = kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
185 GFP_KERNEL, dev_to_node(dev));
186 if (!idxd->wqs)
187 return -ENOMEM;
188
189 idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev));
190 if (!idxd->wq_enable_map) {
191 rc = -ENOMEM;
192 goto err_bitmap;
193 }
194
195 for (i = 0; i < idxd->max_wqs; i++) {
196 wq = kzalloc_node(sizeof(*wq), GFP_KERNEL, dev_to_node(dev));
197 if (!wq) {
198 rc = -ENOMEM;
199 goto err;
200 }
201
202 idxd_dev_set_type(&wq->idxd_dev, IDXD_DEV_WQ);
203 conf_dev = wq_confdev(wq);
204 wq->id = i;
205 wq->idxd = idxd;
206 device_initialize(wq_confdev(wq));
207 conf_dev->parent = idxd_confdev(idxd);
208 conf_dev->bus = &dsa_bus_type;
209 conf_dev->type = &idxd_wq_device_type;
210 rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id);
211 if (rc < 0)
212 goto err;
213
214 mutex_init(&wq->wq_lock);
215 init_waitqueue_head(&wq->err_queue);
216 init_completion(&wq->wq_dead);
217 init_completion(&wq->wq_resurrect);
218 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
219 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
220 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
221 wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
222 if (!wq->wqcfg) {
223 rc = -ENOMEM;
224 goto err;
225 }
226
227 if (idxd->hw.wq_cap.op_config) {
228 wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL);
229 if (!wq->opcap_bmap) {
230 rc = -ENOMEM;
231 goto err_opcap_bmap;
232 }
233 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS);
234 }
235 mutex_init(&wq->uc_lock);
236 xa_init(&wq->upasid_xa);
237 idxd->wqs[i] = wq;
238 }
239
240 return 0;
241
242 err_opcap_bmap:
243 kfree(wq->wqcfg);
244
245 err:
246 put_device(conf_dev);
247 kfree(wq);
248
249 while (--i >= 0) {
250 wq = idxd->wqs[i];
251 if (idxd->hw.wq_cap.op_config)
252 bitmap_free(wq->opcap_bmap);
253 kfree(wq->wqcfg);
254 conf_dev = wq_confdev(wq);
255 put_device(conf_dev);
256 kfree(wq);
257
258 }
259 bitmap_free(idxd->wq_enable_map);
260
261 err_bitmap:
262 kfree(idxd->wqs);
263
264 return rc;
265 }
266
idxd_clean_engines(struct idxd_device * idxd)267 static void idxd_clean_engines(struct idxd_device *idxd)
268 {
269 struct idxd_engine *engine;
270 struct device *conf_dev;
271 int i;
272
273 for (i = 0; i < idxd->max_engines; i++) {
274 engine = idxd->engines[i];
275 conf_dev = engine_confdev(engine);
276 put_device(conf_dev);
277 kfree(engine);
278 }
279 kfree(idxd->engines);
280 }
281
idxd_setup_engines(struct idxd_device * idxd)282 static int idxd_setup_engines(struct idxd_device *idxd)
283 {
284 struct idxd_engine *engine;
285 struct device *dev = &idxd->pdev->dev;
286 struct device *conf_dev;
287 int i, rc;
288
289 idxd->engines = kcalloc_node(idxd->max_engines, sizeof(struct idxd_engine *),
290 GFP_KERNEL, dev_to_node(dev));
291 if (!idxd->engines)
292 return -ENOMEM;
293
294 for (i = 0; i < idxd->max_engines; i++) {
295 engine = kzalloc_node(sizeof(*engine), GFP_KERNEL, dev_to_node(dev));
296 if (!engine) {
297 rc = -ENOMEM;
298 goto err;
299 }
300
301 idxd_dev_set_type(&engine->idxd_dev, IDXD_DEV_ENGINE);
302 conf_dev = engine_confdev(engine);
303 engine->id = i;
304 engine->idxd = idxd;
305 device_initialize(conf_dev);
306 conf_dev->parent = idxd_confdev(idxd);
307 conf_dev->bus = &dsa_bus_type;
308 conf_dev->type = &idxd_engine_device_type;
309 rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id);
310 if (rc < 0) {
311 put_device(conf_dev);
312 kfree(engine);
313 goto err;
314 }
315
316 idxd->engines[i] = engine;
317 }
318
319 return 0;
320
321 err:
322 while (--i >= 0) {
323 engine = idxd->engines[i];
324 conf_dev = engine_confdev(engine);
325 put_device(conf_dev);
326 kfree(engine);
327 }
328 kfree(idxd->engines);
329
330 return rc;
331 }
332
idxd_clean_groups(struct idxd_device * idxd)333 static void idxd_clean_groups(struct idxd_device *idxd)
334 {
335 struct idxd_group *group;
336 int i;
337
338 for (i = 0; i < idxd->max_groups; i++) {
339 group = idxd->groups[i];
340 put_device(group_confdev(group));
341 kfree(group);
342 }
343 kfree(idxd->groups);
344 }
345
idxd_setup_groups(struct idxd_device * idxd)346 static int idxd_setup_groups(struct idxd_device *idxd)
347 {
348 struct device *dev = &idxd->pdev->dev;
349 struct device *conf_dev;
350 struct idxd_group *group;
351 int i, rc;
352
353 idxd->groups = kcalloc_node(idxd->max_groups, sizeof(struct idxd_group *),
354 GFP_KERNEL, dev_to_node(dev));
355 if (!idxd->groups)
356 return -ENOMEM;
357
358 for (i = 0; i < idxd->max_groups; i++) {
359 group = kzalloc_node(sizeof(*group), GFP_KERNEL, dev_to_node(dev));
360 if (!group) {
361 rc = -ENOMEM;
362 goto err;
363 }
364
365 idxd_dev_set_type(&group->idxd_dev, IDXD_DEV_GROUP);
366 conf_dev = group_confdev(group);
367 group->id = i;
368 group->idxd = idxd;
369 device_initialize(conf_dev);
370 conf_dev->parent = idxd_confdev(idxd);
371 conf_dev->bus = &dsa_bus_type;
372 conf_dev->type = &idxd_group_device_type;
373 rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id);
374 if (rc < 0) {
375 put_device(conf_dev);
376 kfree(group);
377 goto err;
378 }
379
380 idxd->groups[i] = group;
381 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
382 group->tc_a = 1;
383 group->tc_b = 1;
384 } else {
385 group->tc_a = -1;
386 group->tc_b = -1;
387 }
388 /*
389 * The default value is the same as the value of
390 * total read buffers in GRPCAP.
391 */
392 group->rdbufs_allowed = idxd->max_rdbufs;
393 }
394
395 return 0;
396
397 err:
398 while (--i >= 0) {
399 group = idxd->groups[i];
400 put_device(group_confdev(group));
401 kfree(group);
402 }
403 kfree(idxd->groups);
404
405 return rc;
406 }
407
idxd_cleanup_internals(struct idxd_device * idxd)408 static void idxd_cleanup_internals(struct idxd_device *idxd)
409 {
410 idxd_clean_groups(idxd);
411 idxd_clean_engines(idxd);
412 idxd_clean_wqs(idxd);
413 destroy_workqueue(idxd->wq);
414 }
415
idxd_init_evl(struct idxd_device * idxd)416 static int idxd_init_evl(struct idxd_device *idxd)
417 {
418 struct device *dev = &idxd->pdev->dev;
419 unsigned int evl_cache_size;
420 struct idxd_evl *evl;
421 const char *idxd_name;
422
423 if (idxd->hw.gen_cap.evl_support == 0)
424 return 0;
425
426 evl = kzalloc_node(sizeof(*evl), GFP_KERNEL, dev_to_node(dev));
427 if (!evl)
428 return -ENOMEM;
429
430 mutex_init(&evl->lock);
431 evl->size = IDXD_EVL_SIZE_MIN;
432
433 idxd_name = dev_name(idxd_confdev(idxd));
434 evl_cache_size = sizeof(struct idxd_evl_fault) + evl_ent_size(idxd);
435 /*
436 * Since completion record in evl_cache will be copied to user
437 * when handling completion record page fault, need to create
438 * the cache suitable for user copy.
439 */
440 idxd->evl_cache = kmem_cache_create_usercopy(idxd_name, evl_cache_size,
441 0, 0, 0, evl_cache_size,
442 NULL);
443 if (!idxd->evl_cache) {
444 kfree(evl);
445 return -ENOMEM;
446 }
447
448 idxd->evl = evl;
449 return 0;
450 }
451
idxd_setup_internals(struct idxd_device * idxd)452 static int idxd_setup_internals(struct idxd_device *idxd)
453 {
454 struct device *dev = &idxd->pdev->dev;
455 int rc;
456
457 init_waitqueue_head(&idxd->cmd_waitq);
458
459 rc = idxd_setup_wqs(idxd);
460 if (rc < 0)
461 goto err_wqs;
462
463 rc = idxd_setup_engines(idxd);
464 if (rc < 0)
465 goto err_engine;
466
467 rc = idxd_setup_groups(idxd);
468 if (rc < 0)
469 goto err_group;
470
471 idxd->wq = create_workqueue(dev_name(dev));
472 if (!idxd->wq) {
473 rc = -ENOMEM;
474 goto err_wkq_create;
475 }
476
477 rc = idxd_init_evl(idxd);
478 if (rc < 0)
479 goto err_evl;
480
481 return 0;
482
483 err_evl:
484 destroy_workqueue(idxd->wq);
485 err_wkq_create:
486 idxd_clean_groups(idxd);
487 err_group:
488 idxd_clean_engines(idxd);
489 err_engine:
490 idxd_clean_wqs(idxd);
491 err_wqs:
492 return rc;
493 }
494
idxd_read_table_offsets(struct idxd_device * idxd)495 static void idxd_read_table_offsets(struct idxd_device *idxd)
496 {
497 union offsets_reg offsets;
498 struct device *dev = &idxd->pdev->dev;
499
500 offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
501 offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
502 idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
503 dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
504 idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
505 dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
506 idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
507 dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
508 idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
509 dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
510 }
511
multi_u64_to_bmap(unsigned long * bmap,u64 * val,int count)512 void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count)
513 {
514 int i, j, nr;
515
516 for (i = 0, nr = 0; i < count; i++) {
517 for (j = 0; j < BITS_PER_LONG_LONG; j++) {
518 if (val[i] & BIT(j))
519 set_bit(nr, bmap);
520 nr++;
521 }
522 }
523 }
524
idxd_read_caps(struct idxd_device * idxd)525 static void idxd_read_caps(struct idxd_device *idxd)
526 {
527 struct device *dev = &idxd->pdev->dev;
528 int i;
529
530 /* reading generic capabilities */
531 idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET);
532 dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits);
533
534 if (idxd->hw.gen_cap.cmd_cap) {
535 idxd->hw.cmd_cap = ioread32(idxd->reg_base + IDXD_CMDCAP_OFFSET);
536 dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
537 }
538
539 /* reading command capabilities */
540 if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
541 idxd->request_int_handles = true;
542
543 idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
544 dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
545 idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift);
546 dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size);
547 if (idxd->hw.gen_cap.config_en)
548 set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags);
549
550 /* reading group capabilities */
551 idxd->hw.group_cap.bits =
552 ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET);
553 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
554 idxd->max_groups = idxd->hw.group_cap.num_groups;
555 dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
556 idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
557 dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
558 idxd->nr_rdbufs = idxd->max_rdbufs;
559
560 /* read engine capabilities */
561 idxd->hw.engine_cap.bits =
562 ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET);
563 dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits);
564 idxd->max_engines = idxd->hw.engine_cap.num_engines;
565 dev_dbg(dev, "max engines: %u\n", idxd->max_engines);
566
567 /* read workqueue capabilities */
568 idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET);
569 dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits);
570 idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size;
571 dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size);
572 idxd->max_wqs = idxd->hw.wq_cap.num_wqs;
573 dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs);
574 idxd->wqcfg_size = 1 << (idxd->hw.wq_cap.wqcfg_size + IDXD_WQCFG_MIN);
575 dev_dbg(dev, "wqcfg size: %u\n", idxd->wqcfg_size);
576
577 /* reading operation capabilities */
578 for (i = 0; i < 4; i++) {
579 idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base +
580 IDXD_OPCAP_OFFSET + i * sizeof(u64));
581 dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]);
582 }
583 multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4);
584
585 /* read iaa cap */
586 if (idxd->data->type == IDXD_TYPE_IAX && idxd->hw.version >= DEVICE_VERSION_2)
587 idxd->hw.iaa_cap.bits = ioread64(idxd->reg_base + IDXD_IAACAP_OFFSET);
588 }
589
idxd_free(struct idxd_device * idxd)590 static void idxd_free(struct idxd_device *idxd)
591 {
592 if (!idxd)
593 return;
594
595 put_device(idxd_confdev(idxd));
596 bitmap_free(idxd->opcap_bmap);
597 ida_free(&idxd_ida, idxd->id);
598 kfree(idxd);
599 }
600
idxd_alloc(struct pci_dev * pdev,struct idxd_driver_data * data)601 static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data)
602 {
603 struct device *dev = &pdev->dev;
604 struct device *conf_dev;
605 struct idxd_device *idxd;
606 int rc;
607
608 idxd = kzalloc_node(sizeof(*idxd), GFP_KERNEL, dev_to_node(dev));
609 if (!idxd)
610 return NULL;
611
612 conf_dev = idxd_confdev(idxd);
613 idxd->pdev = pdev;
614 idxd->data = data;
615 idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type);
616 idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL);
617 if (idxd->id < 0)
618 goto err_ida;
619
620 idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev));
621 if (!idxd->opcap_bmap)
622 goto err_opcap;
623
624 device_initialize(conf_dev);
625 conf_dev->parent = dev;
626 conf_dev->bus = &dsa_bus_type;
627 conf_dev->type = idxd->data->dev_type;
628 rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id);
629 if (rc < 0)
630 goto err_name;
631
632 spin_lock_init(&idxd->dev_lock);
633 spin_lock_init(&idxd->cmd_lock);
634
635 return idxd;
636
637 err_name:
638 put_device(conf_dev);
639 bitmap_free(idxd->opcap_bmap);
640 err_opcap:
641 ida_free(&idxd_ida, idxd->id);
642 err_ida:
643 kfree(idxd);
644
645 return NULL;
646 }
647
idxd_enable_system_pasid(struct idxd_device * idxd)648 static int idxd_enable_system_pasid(struct idxd_device *idxd)
649 {
650 struct pci_dev *pdev = idxd->pdev;
651 struct device *dev = &pdev->dev;
652 struct iommu_domain *domain;
653 ioasid_t pasid;
654 int ret;
655
656 /*
657 * Attach a global PASID to the DMA domain so that we can use ENQCMDS
658 * to submit work on buffers mapped by DMA API.
659 */
660 domain = iommu_get_domain_for_dev(dev);
661 if (!domain)
662 return -EPERM;
663
664 pasid = iommu_alloc_global_pasid(dev);
665 if (pasid == IOMMU_PASID_INVALID)
666 return -ENOSPC;
667
668 /*
669 * DMA domain is owned by the driver, it should support all valid
670 * types such as DMA-FQ, identity, etc.
671 */
672 ret = iommu_attach_device_pasid(domain, dev, pasid, NULL);
673 if (ret) {
674 dev_err(dev, "failed to attach device pasid %d, domain type %d",
675 pasid, domain->type);
676 iommu_free_global_pasid(pasid);
677 return ret;
678 }
679
680 /* Since we set user privilege for kernel DMA, enable completion IRQ */
681 idxd_set_user_intr(idxd, 1);
682 idxd->pasid = pasid;
683
684 return ret;
685 }
686
idxd_disable_system_pasid(struct idxd_device * idxd)687 static void idxd_disable_system_pasid(struct idxd_device *idxd)
688 {
689 struct pci_dev *pdev = idxd->pdev;
690 struct device *dev = &pdev->dev;
691 struct iommu_domain *domain;
692
693 domain = iommu_get_domain_for_dev(dev);
694 if (!domain)
695 return;
696
697 iommu_detach_device_pasid(domain, dev, idxd->pasid);
698 iommu_free_global_pasid(idxd->pasid);
699
700 idxd_set_user_intr(idxd, 0);
701 idxd->sva = NULL;
702 idxd->pasid = IOMMU_PASID_INVALID;
703 }
704
idxd_probe(struct idxd_device * idxd)705 static int idxd_probe(struct idxd_device *idxd)
706 {
707 struct pci_dev *pdev = idxd->pdev;
708 struct device *dev = &pdev->dev;
709 int rc;
710
711 dev_dbg(dev, "%s entered and resetting device\n", __func__);
712 rc = idxd_device_init_reset(idxd);
713 if (rc < 0)
714 return rc;
715
716 dev_dbg(dev, "IDXD reset complete\n");
717
718 if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM) && sva) {
719 set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
720
721 rc = idxd_enable_system_pasid(idxd);
722 if (rc)
723 dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
724 else
725 set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
726 } else if (!sva) {
727 dev_warn(dev, "User forced SVA off via module param.\n");
728 }
729
730 idxd_read_caps(idxd);
731 idxd_read_table_offsets(idxd);
732
733 rc = idxd_setup_internals(idxd);
734 if (rc)
735 goto err;
736
737 /* If the configs are readonly, then load them from device */
738 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
739 dev_dbg(dev, "Loading RO device config\n");
740 rc = idxd_device_load_config(idxd);
741 if (rc < 0)
742 goto err_config;
743 }
744
745 rc = idxd_setup_interrupts(idxd);
746 if (rc)
747 goto err_config;
748
749 idxd->major = idxd_cdev_get_major(idxd);
750
751 rc = perfmon_pmu_init(idxd);
752 if (rc < 0)
753 dev_warn(dev, "Failed to initialize perfmon. No PMU support: %d\n", rc);
754
755 dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id);
756 return 0;
757
758 err_config:
759 idxd_cleanup_internals(idxd);
760 err:
761 if (device_pasid_enabled(idxd))
762 idxd_disable_system_pasid(idxd);
763 return rc;
764 }
765
idxd_cleanup(struct idxd_device * idxd)766 static void idxd_cleanup(struct idxd_device *idxd)
767 {
768 perfmon_pmu_remove(idxd);
769 idxd_cleanup_interrupts(idxd);
770 idxd_cleanup_internals(idxd);
771 if (device_pasid_enabled(idxd))
772 idxd_disable_system_pasid(idxd);
773 }
774
775 /*
776 * Attach IDXD device to IDXD driver.
777 */
idxd_bind(struct device_driver * drv,const char * buf)778 static int idxd_bind(struct device_driver *drv, const char *buf)
779 {
780 const struct bus_type *bus = drv->bus;
781 struct device *dev;
782 int err = -ENODEV;
783
784 dev = bus_find_device_by_name(bus, NULL, buf);
785 if (dev)
786 err = device_driver_attach(drv, dev);
787
788 put_device(dev);
789
790 return err;
791 }
792
793 /*
794 * Detach IDXD device from driver.
795 */
idxd_unbind(struct device_driver * drv,const char * buf)796 static void idxd_unbind(struct device_driver *drv, const char *buf)
797 {
798 const struct bus_type *bus = drv->bus;
799 struct device *dev;
800
801 dev = bus_find_device_by_name(bus, NULL, buf);
802 if (dev && dev->driver == drv)
803 device_release_driver(dev);
804
805 put_device(dev);
806 }
807
808 #define idxd_free_saved_configs(saved_configs, count) \
809 do { \
810 int i; \
811 \
812 for (i = 0; i < (count); i++) \
813 kfree(saved_configs[i]); \
814 } while (0)
815
idxd_free_saved(struct idxd_group ** saved_groups,struct idxd_engine ** saved_engines,struct idxd_wq ** saved_wqs,struct idxd_device * idxd)816 static void idxd_free_saved(struct idxd_group **saved_groups,
817 struct idxd_engine **saved_engines,
818 struct idxd_wq **saved_wqs,
819 struct idxd_device *idxd)
820 {
821 if (saved_groups)
822 idxd_free_saved_configs(saved_groups, idxd->max_groups);
823 if (saved_engines)
824 idxd_free_saved_configs(saved_engines, idxd->max_engines);
825 if (saved_wqs)
826 idxd_free_saved_configs(saved_wqs, idxd->max_wqs);
827 }
828
829 /*
830 * Save IDXD device configurations including engines, groups, wqs etc.
831 * The saved configurations can be restored when needed.
832 */
idxd_device_config_save(struct idxd_device * idxd,struct idxd_saved_states * idxd_saved)833 static int idxd_device_config_save(struct idxd_device *idxd,
834 struct idxd_saved_states *idxd_saved)
835 {
836 struct device *dev = &idxd->pdev->dev;
837 int i;
838
839 memcpy(&idxd_saved->saved_idxd, idxd, sizeof(*idxd));
840
841 if (idxd->evl) {
842 memcpy(&idxd_saved->saved_evl, idxd->evl,
843 sizeof(struct idxd_evl));
844 }
845
846 struct idxd_group **saved_groups __free(kfree) =
847 kcalloc_node(idxd->max_groups,
848 sizeof(struct idxd_group *),
849 GFP_KERNEL, dev_to_node(dev));
850 if (!saved_groups)
851 return -ENOMEM;
852
853 for (i = 0; i < idxd->max_groups; i++) {
854 struct idxd_group *saved_group __free(kfree) =
855 kzalloc_node(sizeof(*saved_group), GFP_KERNEL,
856 dev_to_node(dev));
857
858 if (!saved_group) {
859 /* Free saved groups */
860 idxd_free_saved(saved_groups, NULL, NULL, idxd);
861
862 return -ENOMEM;
863 }
864
865 memcpy(saved_group, idxd->groups[i], sizeof(*saved_group));
866 saved_groups[i] = no_free_ptr(saved_group);
867 }
868
869 struct idxd_engine **saved_engines =
870 kcalloc_node(idxd->max_engines,
871 sizeof(struct idxd_engine *),
872 GFP_KERNEL, dev_to_node(dev));
873 if (!saved_engines) {
874 /* Free saved groups */
875 idxd_free_saved(saved_groups, NULL, NULL, idxd);
876
877 return -ENOMEM;
878 }
879 for (i = 0; i < idxd->max_engines; i++) {
880 struct idxd_engine *saved_engine __free(kfree) =
881 kzalloc_node(sizeof(*saved_engine), GFP_KERNEL,
882 dev_to_node(dev));
883 if (!saved_engine) {
884 /* Free saved groups and engines */
885 idxd_free_saved(saved_groups, saved_engines, NULL,
886 idxd);
887
888 return -ENOMEM;
889 }
890
891 memcpy(saved_engine, idxd->engines[i], sizeof(*saved_engine));
892 saved_engines[i] = no_free_ptr(saved_engine);
893 }
894
895 unsigned long *saved_wq_enable_map __free(bitmap) =
896 bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL,
897 dev_to_node(dev));
898 if (!saved_wq_enable_map) {
899 /* Free saved groups and engines */
900 idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
901
902 return -ENOMEM;
903 }
904
905 bitmap_copy(saved_wq_enable_map, idxd->wq_enable_map, idxd->max_wqs);
906
907 struct idxd_wq **saved_wqs __free(kfree) =
908 kcalloc_node(idxd->max_wqs, sizeof(struct idxd_wq *),
909 GFP_KERNEL, dev_to_node(dev));
910 if (!saved_wqs) {
911 /* Free saved groups and engines */
912 idxd_free_saved(saved_groups, saved_engines, NULL, idxd);
913
914 return -ENOMEM;
915 }
916
917 for (i = 0; i < idxd->max_wqs; i++) {
918 struct idxd_wq *saved_wq __free(kfree) =
919 kzalloc_node(sizeof(*saved_wq), GFP_KERNEL,
920 dev_to_node(dev));
921 struct idxd_wq *wq;
922
923 if (!saved_wq) {
924 /* Free saved groups, engines, and wqs */
925 idxd_free_saved(saved_groups, saved_engines, saved_wqs,
926 idxd);
927
928 return -ENOMEM;
929 }
930
931 if (!test_bit(i, saved_wq_enable_map))
932 continue;
933
934 wq = idxd->wqs[i];
935 mutex_lock(&wq->wq_lock);
936 memcpy(saved_wq, wq, sizeof(*saved_wq));
937 saved_wqs[i] = no_free_ptr(saved_wq);
938 mutex_unlock(&wq->wq_lock);
939 }
940
941 /* Save configurations */
942 idxd_saved->saved_groups = no_free_ptr(saved_groups);
943 idxd_saved->saved_engines = no_free_ptr(saved_engines);
944 idxd_saved->saved_wq_enable_map = no_free_ptr(saved_wq_enable_map);
945 idxd_saved->saved_wqs = no_free_ptr(saved_wqs);
946
947 return 0;
948 }
949
950 /*
951 * Restore IDXD device configurations including engines, groups, wqs etc
952 * that were saved before.
953 */
idxd_device_config_restore(struct idxd_device * idxd,struct idxd_saved_states * idxd_saved)954 static void idxd_device_config_restore(struct idxd_device *idxd,
955 struct idxd_saved_states *idxd_saved)
956 {
957 struct idxd_evl *saved_evl = &idxd_saved->saved_evl;
958 int i;
959
960 idxd->rdbuf_limit = idxd_saved->saved_idxd.rdbuf_limit;
961
962 idxd->evl->size = saved_evl->size;
963
964 for (i = 0; i < idxd->max_groups; i++) {
965 struct idxd_group *saved_group, *group;
966
967 saved_group = idxd_saved->saved_groups[i];
968 group = idxd->groups[i];
969
970 group->rdbufs_allowed = saved_group->rdbufs_allowed;
971 group->rdbufs_reserved = saved_group->rdbufs_reserved;
972 group->tc_a = saved_group->tc_a;
973 group->tc_b = saved_group->tc_b;
974 group->use_rdbuf_limit = saved_group->use_rdbuf_limit;
975
976 kfree(saved_group);
977 }
978 kfree(idxd_saved->saved_groups);
979
980 for (i = 0; i < idxd->max_engines; i++) {
981 struct idxd_engine *saved_engine, *engine;
982
983 saved_engine = idxd_saved->saved_engines[i];
984 engine = idxd->engines[i];
985
986 engine->group = saved_engine->group;
987
988 kfree(saved_engine);
989 }
990 kfree(idxd_saved->saved_engines);
991
992 bitmap_copy(idxd->wq_enable_map, idxd_saved->saved_wq_enable_map,
993 idxd->max_wqs);
994 bitmap_free(idxd_saved->saved_wq_enable_map);
995
996 for (i = 0; i < idxd->max_wqs; i++) {
997 struct idxd_wq *saved_wq, *wq;
998 size_t len;
999
1000 if (!test_bit(i, idxd->wq_enable_map))
1001 continue;
1002
1003 saved_wq = idxd_saved->saved_wqs[i];
1004 wq = idxd->wqs[i];
1005
1006 mutex_lock(&wq->wq_lock);
1007
1008 wq->group = saved_wq->group;
1009 wq->flags = saved_wq->flags;
1010 wq->threshold = saved_wq->threshold;
1011 wq->size = saved_wq->size;
1012 wq->priority = saved_wq->priority;
1013 wq->type = saved_wq->type;
1014 len = strlen(saved_wq->name) + 1;
1015 strscpy(wq->name, saved_wq->name, len);
1016 wq->max_xfer_bytes = saved_wq->max_xfer_bytes;
1017 wq->max_batch_size = saved_wq->max_batch_size;
1018 wq->enqcmds_retries = saved_wq->enqcmds_retries;
1019 wq->descs = saved_wq->descs;
1020 wq->idxd_chan = saved_wq->idxd_chan;
1021 len = strlen(saved_wq->driver_name) + 1;
1022 strscpy(wq->driver_name, saved_wq->driver_name, len);
1023
1024 mutex_unlock(&wq->wq_lock);
1025
1026 kfree(saved_wq);
1027 }
1028
1029 kfree(idxd_saved->saved_wqs);
1030 }
1031
idxd_reset_prepare(struct pci_dev * pdev)1032 static void idxd_reset_prepare(struct pci_dev *pdev)
1033 {
1034 struct idxd_device *idxd = pci_get_drvdata(pdev);
1035 struct device *dev = &idxd->pdev->dev;
1036 const char *idxd_name;
1037 int rc;
1038
1039 dev = &idxd->pdev->dev;
1040 idxd_name = dev_name(idxd_confdev(idxd));
1041
1042 struct idxd_saved_states *idxd_saved __free(kfree) =
1043 kzalloc_node(sizeof(*idxd_saved), GFP_KERNEL,
1044 dev_to_node(&pdev->dev));
1045 if (!idxd_saved) {
1046 dev_err(dev, "HALT: no memory\n");
1047
1048 return;
1049 }
1050
1051 /* Save IDXD configurations. */
1052 rc = idxd_device_config_save(idxd, idxd_saved);
1053 if (rc < 0) {
1054 dev_err(dev, "HALT: cannot save %s configs\n", idxd_name);
1055
1056 return;
1057 }
1058
1059 idxd->idxd_saved = no_free_ptr(idxd_saved);
1060
1061 /* Save PCI device state. */
1062 pci_save_state(idxd->pdev);
1063 }
1064
idxd_reset_done(struct pci_dev * pdev)1065 static void idxd_reset_done(struct pci_dev *pdev)
1066 {
1067 struct idxd_device *idxd = pci_get_drvdata(pdev);
1068 const char *idxd_name;
1069 struct device *dev;
1070 int rc, i;
1071
1072 if (!idxd->idxd_saved)
1073 return;
1074
1075 dev = &idxd->pdev->dev;
1076 idxd_name = dev_name(idxd_confdev(idxd));
1077
1078 /* Restore PCI device state. */
1079 pci_restore_state(idxd->pdev);
1080
1081 /* Unbind idxd device from driver. */
1082 idxd_unbind(&idxd_drv.drv, idxd_name);
1083
1084 /*
1085 * Probe PCI device without allocating or changing
1086 * idxd software data which keeps the same as before FLR.
1087 */
1088 idxd_pci_probe_alloc(idxd, NULL, NULL);
1089
1090 /* Restore IDXD configurations. */
1091 idxd_device_config_restore(idxd, idxd->idxd_saved);
1092
1093 /* Re-configure IDXD device if allowed. */
1094 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
1095 rc = idxd_device_config(idxd);
1096 if (rc < 0) {
1097 dev_err(dev, "HALT: %s config fails\n", idxd_name);
1098 goto out;
1099 }
1100 }
1101
1102 /* Bind IDXD device to driver. */
1103 rc = idxd_bind(&idxd_drv.drv, idxd_name);
1104 if (rc < 0) {
1105 dev_err(dev, "HALT: binding %s to driver fails\n", idxd_name);
1106 goto out;
1107 }
1108
1109 /* Bind enabled wq in the IDXD device to driver. */
1110 for (i = 0; i < idxd->max_wqs; i++) {
1111 if (test_bit(i, idxd->wq_enable_map)) {
1112 struct idxd_wq *wq = idxd->wqs[i];
1113 char wq_name[32];
1114
1115 wq->state = IDXD_WQ_DISABLED;
1116 sprintf(wq_name, "wq%d.%d", idxd->id, wq->id);
1117 /*
1118 * Bind to user driver depending on wq type.
1119 *
1120 * Currently only support user type WQ. Will support
1121 * kernel type WQ in the future.
1122 */
1123 if (wq->type == IDXD_WQT_USER)
1124 rc = idxd_bind(&idxd_user_drv.drv, wq_name);
1125 else
1126 rc = -EINVAL;
1127 if (rc < 0) {
1128 clear_bit(i, idxd->wq_enable_map);
1129 dev_err(dev,
1130 "HALT: unable to re-enable wq %s\n",
1131 dev_name(wq_confdev(wq)));
1132 }
1133 }
1134 }
1135 out:
1136 kfree(idxd->idxd_saved);
1137 }
1138
1139 static const struct pci_error_handlers idxd_error_handler = {
1140 .reset_prepare = idxd_reset_prepare,
1141 .reset_done = idxd_reset_done,
1142 };
1143
1144 /*
1145 * Probe idxd PCI device.
1146 * If idxd is not given, need to allocate idxd and set up its data.
1147 *
1148 * If idxd is given, idxd was allocated and setup already. Just need to
1149 * configure device without re-allocating and re-configuring idxd data.
1150 * This is useful for recovering from FLR.
1151 */
idxd_pci_probe_alloc(struct idxd_device * idxd,struct pci_dev * pdev,const struct pci_device_id * id)1152 int idxd_pci_probe_alloc(struct idxd_device *idxd, struct pci_dev *pdev,
1153 const struct pci_device_id *id)
1154 {
1155 bool alloc_idxd = idxd ? false : true;
1156 struct idxd_driver_data *data;
1157 struct device *dev;
1158 int rc;
1159
1160 pdev = idxd ? idxd->pdev : pdev;
1161 dev = &pdev->dev;
1162 data = id ? (struct idxd_driver_data *)id->driver_data : NULL;
1163 rc = pci_enable_device(pdev);
1164 if (rc)
1165 return rc;
1166
1167 if (alloc_idxd) {
1168 dev_dbg(dev, "Alloc IDXD context\n");
1169 idxd = idxd_alloc(pdev, data);
1170 if (!idxd) {
1171 rc = -ENOMEM;
1172 goto err_idxd_alloc;
1173 }
1174
1175 dev_dbg(dev, "Mapping BARs\n");
1176 idxd->reg_base = pci_iomap(pdev, IDXD_MMIO_BAR, 0);
1177 if (!idxd->reg_base) {
1178 rc = -ENOMEM;
1179 goto err_iomap;
1180 }
1181
1182 dev_dbg(dev, "Set DMA masks\n");
1183 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1184 if (rc)
1185 goto err;
1186 }
1187
1188 dev_dbg(dev, "Set PCI master\n");
1189 pci_set_master(pdev);
1190 pci_set_drvdata(pdev, idxd);
1191
1192 if (alloc_idxd) {
1193 idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET);
1194 rc = idxd_probe(idxd);
1195 if (rc) {
1196 dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n");
1197 goto err;
1198 }
1199
1200 if (data->load_device_defaults) {
1201 rc = data->load_device_defaults(idxd);
1202 if (rc)
1203 dev_warn(dev, "IDXD loading device defaults failed\n");
1204 }
1205
1206 rc = idxd_register_devices(idxd);
1207 if (rc) {
1208 dev_err(dev, "IDXD sysfs setup failed\n");
1209 goto err_dev_register;
1210 }
1211
1212 rc = idxd_device_init_debugfs(idxd);
1213 if (rc)
1214 dev_warn(dev, "IDXD debugfs failed to setup\n");
1215 }
1216
1217 if (!alloc_idxd) {
1218 /* Release interrupts in the IDXD device. */
1219 idxd_cleanup_interrupts(idxd);
1220
1221 /* Re-enable interrupts in the IDXD device. */
1222 rc = idxd_setup_interrupts(idxd);
1223 if (rc)
1224 dev_warn(dev, "IDXD interrupts failed to setup\n");
1225 }
1226
1227 dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n",
1228 idxd->hw.version);
1229
1230 if (data)
1231 idxd->user_submission_safe = data->user_submission_safe;
1232
1233 return 0;
1234
1235 err_dev_register:
1236 idxd_cleanup(idxd);
1237 err:
1238 pci_iounmap(pdev, idxd->reg_base);
1239 err_iomap:
1240 idxd_free(idxd);
1241 err_idxd_alloc:
1242 pci_disable_device(pdev);
1243 return rc;
1244 }
1245
idxd_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)1246 static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1247 {
1248 return idxd_pci_probe_alloc(NULL, pdev, id);
1249 }
1250
idxd_wqs_quiesce(struct idxd_device * idxd)1251 void idxd_wqs_quiesce(struct idxd_device *idxd)
1252 {
1253 struct idxd_wq *wq;
1254 int i;
1255
1256 for (i = 0; i < idxd->max_wqs; i++) {
1257 wq = idxd->wqs[i];
1258 if (wq->state == IDXD_WQ_ENABLED && wq->type == IDXD_WQT_KERNEL)
1259 idxd_wq_quiesce(wq);
1260 }
1261 }
1262
idxd_shutdown(struct pci_dev * pdev)1263 static void idxd_shutdown(struct pci_dev *pdev)
1264 {
1265 struct idxd_device *idxd = pci_get_drvdata(pdev);
1266 struct idxd_irq_entry *irq_entry;
1267 int rc;
1268
1269 rc = idxd_device_disable(idxd);
1270 if (rc)
1271 dev_err(&pdev->dev, "Disabling device failed\n");
1272
1273 irq_entry = &idxd->ie;
1274 synchronize_irq(irq_entry->vector);
1275 idxd_mask_error_interrupts(idxd);
1276 flush_workqueue(idxd->wq);
1277 }
1278
idxd_remove(struct pci_dev * pdev)1279 static void idxd_remove(struct pci_dev *pdev)
1280 {
1281 struct idxd_device *idxd = pci_get_drvdata(pdev);
1282
1283 idxd_unregister_devices(idxd);
1284 /*
1285 * When ->release() is called for the idxd->conf_dev, it frees all the memory related
1286 * to the idxd context. The driver still needs those bits in order to do the rest of
1287 * the cleanup. However, we do need to unbound the idxd sub-driver. So take a ref
1288 * on the device here to hold off the freeing while allowing the idxd sub-driver
1289 * to unbind.
1290 */
1291 get_device(idxd_confdev(idxd));
1292 device_unregister(idxd_confdev(idxd));
1293 idxd_shutdown(pdev);
1294 idxd_device_remove_debugfs(idxd);
1295 idxd_cleanup(idxd);
1296 pci_iounmap(pdev, idxd->reg_base);
1297 put_device(idxd_confdev(idxd));
1298 idxd_free(idxd);
1299 pci_disable_device(pdev);
1300 }
1301
1302 static struct pci_driver idxd_pci_driver = {
1303 .name = DRV_NAME,
1304 .id_table = idxd_pci_tbl,
1305 .probe = idxd_pci_probe,
1306 .remove = idxd_remove,
1307 .shutdown = idxd_shutdown,
1308 .err_handler = &idxd_error_handler,
1309 };
1310
idxd_init_module(void)1311 static int __init idxd_init_module(void)
1312 {
1313 int err;
1314
1315 /*
1316 * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
1317 * enumerating the device. We can not utilize it.
1318 */
1319 if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) {
1320 pr_warn("idxd driver failed to load without MOVDIR64B.\n");
1321 return -ENODEV;
1322 }
1323
1324 if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
1325 pr_warn("Platform does not have ENQCMD(S) support.\n");
1326 else
1327 support_enqcmd = true;
1328
1329 err = idxd_driver_register(&idxd_drv);
1330 if (err < 0)
1331 goto err_idxd_driver_register;
1332
1333 err = idxd_driver_register(&idxd_dmaengine_drv);
1334 if (err < 0)
1335 goto err_idxd_dmaengine_driver_register;
1336
1337 err = idxd_driver_register(&idxd_user_drv);
1338 if (err < 0)
1339 goto err_idxd_user_driver_register;
1340
1341 err = idxd_cdev_register();
1342 if (err)
1343 goto err_cdev_register;
1344
1345 err = idxd_init_debugfs();
1346 if (err)
1347 goto err_debugfs;
1348
1349 err = pci_register_driver(&idxd_pci_driver);
1350 if (err)
1351 goto err_pci_register;
1352
1353 return 0;
1354
1355 err_pci_register:
1356 idxd_remove_debugfs();
1357 err_debugfs:
1358 idxd_cdev_remove();
1359 err_cdev_register:
1360 idxd_driver_unregister(&idxd_user_drv);
1361 err_idxd_user_driver_register:
1362 idxd_driver_unregister(&idxd_dmaengine_drv);
1363 err_idxd_dmaengine_driver_register:
1364 idxd_driver_unregister(&idxd_drv);
1365 err_idxd_driver_register:
1366 return err;
1367 }
1368 module_init(idxd_init_module);
1369
idxd_exit_module(void)1370 static void __exit idxd_exit_module(void)
1371 {
1372 idxd_driver_unregister(&idxd_user_drv);
1373 idxd_driver_unregister(&idxd_dmaengine_drv);
1374 idxd_driver_unregister(&idxd_drv);
1375 pci_unregister_driver(&idxd_pci_driver);
1376 idxd_cdev_remove();
1377 idxd_remove_debugfs();
1378 }
1379 module_exit(idxd_exit_module);
1380