xref: /linux/drivers/crypto/intel/iaa/iaa_crypto_main.c (revision 78c3925c048c752334873f56c3a3d1c9d53e0416)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
3 
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include <linux/device.h>
9 #include <linux/iommu.h>
10 #include <uapi/linux/idxd.h>
11 #include <linux/highmem.h>
12 #include <linux/sched/smt.h>
13 #include <crypto/internal/acompress.h>
14 
15 #include "idxd.h"
16 #include "iaa_crypto.h"
17 #include "iaa_crypto_stats.h"
18 
19 #ifdef pr_fmt
20 #undef pr_fmt
21 #endif
22 
23 #define pr_fmt(fmt)			"idxd: " IDXD_SUBDRIVER_NAME ": " fmt
24 
25 #define IAA_ALG_PRIORITY               300
26 
27 /* number of iaa instances probed */
28 static unsigned int nr_iaa;
29 static unsigned int nr_cpus;
30 static unsigned int nr_nodes;
31 static unsigned int nr_cpus_per_node;
32 
33 /* Number of physical cpus sharing each iaa instance */
34 static unsigned int cpus_per_iaa;
35 
36 static struct crypto_comp *deflate_generic_tfm;
37 
38 /* Per-cpu lookup table for balanced wqs */
39 static struct wq_table_entry __percpu *wq_table;
40 
41 static struct idxd_wq *wq_table_next_wq(int cpu)
42 {
43 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
44 
45 	if (++entry->cur_wq >= entry->n_wqs)
46 		entry->cur_wq = 0;
47 
48 	if (!entry->wqs[entry->cur_wq])
49 		return NULL;
50 
51 	pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
52 		 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id,
53 		 entry->wqs[entry->cur_wq]->id, cpu);
54 
55 	return entry->wqs[entry->cur_wq];
56 }
57 
58 static void wq_table_add(int cpu, struct idxd_wq *wq)
59 {
60 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
61 
62 	if (WARN_ON(entry->n_wqs == entry->max_wqs))
63 		return;
64 
65 	entry->wqs[entry->n_wqs++] = wq;
66 
67 	pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
68 		 entry->wqs[entry->n_wqs - 1]->idxd->id,
69 		 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu);
70 }
71 
72 static void wq_table_free_entry(int cpu)
73 {
74 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
75 
76 	kfree(entry->wqs);
77 	memset(entry, 0, sizeof(*entry));
78 }
79 
80 static void wq_table_clear_entry(int cpu)
81 {
82 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
83 
84 	entry->n_wqs = 0;
85 	entry->cur_wq = 0;
86 	memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *));
87 }
88 
89 LIST_HEAD(iaa_devices);
90 DEFINE_MUTEX(iaa_devices_lock);
91 
92 /* If enabled, IAA hw crypto algos are registered, unavailable otherwise */
93 static bool iaa_crypto_enabled;
94 static bool iaa_crypto_registered;
95 
96 /* Verify results of IAA compress or not */
97 static bool iaa_verify_compress = true;
98 
99 static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
100 {
101 	return sprintf(buf, "%d\n", iaa_verify_compress);
102 }
103 
104 static ssize_t verify_compress_store(struct device_driver *driver,
105 				     const char *buf, size_t count)
106 {
107 	int ret = -EBUSY;
108 
109 	mutex_lock(&iaa_devices_lock);
110 
111 	if (iaa_crypto_enabled)
112 		goto out;
113 
114 	ret = kstrtobool(buf, &iaa_verify_compress);
115 	if (ret)
116 		goto out;
117 
118 	ret = count;
119 out:
120 	mutex_unlock(&iaa_devices_lock);
121 
122 	return ret;
123 }
124 static DRIVER_ATTR_RW(verify_compress);
125 
126 /*
127  * The iaa crypto driver supports three 'sync' methods determining how
128  * compressions and decompressions are performed:
129  *
130  * - sync:      the compression or decompression completes before
131  *              returning.  This is the mode used by the async crypto
132  *              interface when the sync mode is set to 'sync' and by
133  *              the sync crypto interface regardless of setting.
134  *
135  * - async:     the compression or decompression is submitted and returns
136  *              immediately.  Completion interrupts are not used so
137  *              the caller is responsible for polling the descriptor
138  *              for completion.  This mode is applicable to only the
139  *              async crypto interface and is ignored for anything
140  *              else.
141  *
142  * - async_irq: the compression or decompression is submitted and
143  *              returns immediately.  Completion interrupts are
144  *              enabled so the caller can wait for the completion and
145  *              yield to other threads.  When the compression or
146  *              decompression completes, the completion is signaled
147  *              and the caller awakened.  This mode is applicable to
148  *              only the async crypto interface and is ignored for
149  *              anything else.
150  *
151  * These modes can be set using the iaa_crypto sync_mode driver
152  * attribute.
153  */
154 
155 /* Use async mode */
156 static bool async_mode;
157 /* Use interrupts */
158 static bool use_irq;
159 
160 /**
161  * set_iaa_sync_mode - Set IAA sync mode
162  * @name: The name of the sync mode
163  *
164  * Make the IAA sync mode named @name the current sync mode used by
165  * compression/decompression.
166  */
167 
168 static int set_iaa_sync_mode(const char *name)
169 {
170 	int ret = 0;
171 
172 	if (sysfs_streq(name, "sync")) {
173 		async_mode = false;
174 		use_irq = false;
175 	} else if (sysfs_streq(name, "async")) {
176 		async_mode = true;
177 		use_irq = false;
178 	} else if (sysfs_streq(name, "async_irq")) {
179 		async_mode = true;
180 		use_irq = true;
181 	} else {
182 		ret = -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
188 static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
189 {
190 	int ret = 0;
191 
192 	if (!async_mode && !use_irq)
193 		ret = sprintf(buf, "%s\n", "sync");
194 	else if (async_mode && !use_irq)
195 		ret = sprintf(buf, "%s\n", "async");
196 	else if (async_mode && use_irq)
197 		ret = sprintf(buf, "%s\n", "async_irq");
198 
199 	return ret;
200 }
201 
202 static ssize_t sync_mode_store(struct device_driver *driver,
203 			       const char *buf, size_t count)
204 {
205 	int ret = -EBUSY;
206 
207 	mutex_lock(&iaa_devices_lock);
208 
209 	if (iaa_crypto_enabled)
210 		goto out;
211 
212 	ret = set_iaa_sync_mode(buf);
213 	if (ret == 0)
214 		ret = count;
215 out:
216 	mutex_unlock(&iaa_devices_lock);
217 
218 	return ret;
219 }
220 static DRIVER_ATTR_RW(sync_mode);
221 
222 static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
223 
224 static int find_empty_iaa_compression_mode(void)
225 {
226 	int i = -EINVAL;
227 
228 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
229 		if (iaa_compression_modes[i])
230 			continue;
231 		break;
232 	}
233 
234 	return i;
235 }
236 
237 static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
238 {
239 	struct iaa_compression_mode *mode;
240 	int i;
241 
242 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
243 		mode = iaa_compression_modes[i];
244 		if (!mode)
245 			continue;
246 
247 		if (!strcmp(mode->name, name)) {
248 			*idx = i;
249 			return iaa_compression_modes[i];
250 		}
251 	}
252 
253 	return NULL;
254 }
255 
256 static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
257 {
258 	kfree(mode->name);
259 	kfree(mode->ll_table);
260 	kfree(mode->d_table);
261 
262 	kfree(mode);
263 }
264 
265 /*
266  * IAA Compression modes are defined by an ll_table and a d_table.
267  * These tables are typically generated and captured using statistics
268  * collected from running actual compress/decompress workloads.
269  *
270  * A module or other kernel code can add and remove compression modes
271  * with a given name using the exported @add_iaa_compression_mode()
272  * and @remove_iaa_compression_mode functions.
273  *
274  * When a new compression mode is added, the tables are saved in a
275  * global compression mode list.  When IAA devices are added, a
276  * per-IAA device dma mapping is created for each IAA device, for each
277  * compression mode.  These are the tables used to do the actual
278  * compression/deccompression and are unmapped if/when the devices are
279  * removed.  Currently, compression modes must be added before any
280  * device is added, and removed after all devices have been removed.
281  */
282 
283 /**
284  * remove_iaa_compression_mode - Remove an IAA compression mode
285  * @name: The name the compression mode will be known as
286  *
287  * Remove the IAA compression mode named @name.
288  */
289 void remove_iaa_compression_mode(const char *name)
290 {
291 	struct iaa_compression_mode *mode;
292 	int idx;
293 
294 	mutex_lock(&iaa_devices_lock);
295 
296 	if (!list_empty(&iaa_devices))
297 		goto out;
298 
299 	mode = find_iaa_compression_mode(name, &idx);
300 	if (mode) {
301 		free_iaa_compression_mode(mode);
302 		iaa_compression_modes[idx] = NULL;
303 	}
304 out:
305 	mutex_unlock(&iaa_devices_lock);
306 }
307 EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
308 
309 /**
310  * add_iaa_compression_mode - Add an IAA compression mode
311  * @name: The name the compression mode will be known as
312  * @ll_table: The ll table
313  * @ll_table_size: The ll table size in bytes
314  * @d_table: The d table
315  * @d_table_size: The d table size in bytes
316  * @init: Optional callback function to init the compression mode data
317  * @free: Optional callback function to free the compression mode data
318  *
319  * Add a new IAA compression mode named @name.
320  *
321  * Returns 0 if successful, errcode otherwise.
322  */
323 int add_iaa_compression_mode(const char *name,
324 			     const u32 *ll_table,
325 			     int ll_table_size,
326 			     const u32 *d_table,
327 			     int d_table_size,
328 			     iaa_dev_comp_init_fn_t init,
329 			     iaa_dev_comp_free_fn_t free)
330 {
331 	struct iaa_compression_mode *mode;
332 	int idx, ret = -ENOMEM;
333 
334 	mutex_lock(&iaa_devices_lock);
335 
336 	if (!list_empty(&iaa_devices)) {
337 		ret = -EBUSY;
338 		goto out;
339 	}
340 
341 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
342 	if (!mode)
343 		goto out;
344 
345 	mode->name = kstrdup(name, GFP_KERNEL);
346 	if (!mode->name)
347 		goto free;
348 
349 	if (ll_table) {
350 		mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
351 		if (!mode->ll_table)
352 			goto free;
353 		memcpy(mode->ll_table, ll_table, ll_table_size);
354 		mode->ll_table_size = ll_table_size;
355 	}
356 
357 	if (d_table) {
358 		mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
359 		if (!mode->d_table)
360 			goto free;
361 		memcpy(mode->d_table, d_table, d_table_size);
362 		mode->d_table_size = d_table_size;
363 	}
364 
365 	mode->init = init;
366 	mode->free = free;
367 
368 	idx = find_empty_iaa_compression_mode();
369 	if (idx < 0)
370 		goto free;
371 
372 	pr_debug("IAA compression mode %s added at idx %d\n",
373 		 mode->name, idx);
374 
375 	iaa_compression_modes[idx] = mode;
376 
377 	ret = 0;
378 out:
379 	mutex_unlock(&iaa_devices_lock);
380 
381 	return ret;
382 free:
383 	free_iaa_compression_mode(mode);
384 	goto out;
385 }
386 EXPORT_SYMBOL_GPL(add_iaa_compression_mode);
387 
388 static struct iaa_device_compression_mode *
389 get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx)
390 {
391 	return iaa_device->compression_modes[idx];
392 }
393 
394 static void free_device_compression_mode(struct iaa_device *iaa_device,
395 					 struct iaa_device_compression_mode *device_mode)
396 {
397 	size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
398 	struct device *dev = &iaa_device->idxd->pdev->dev;
399 
400 	kfree(device_mode->name);
401 
402 	if (device_mode->aecs_comp_table)
403 		dma_free_coherent(dev, size, device_mode->aecs_comp_table,
404 				  device_mode->aecs_comp_table_dma_addr);
405 	kfree(device_mode);
406 }
407 
408 #define IDXD_OP_FLAG_AECS_RW_TGLS       0x400000
409 #define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC)
410 #define IAX_AECS_COMPRESS_FLAG	(IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
411 #define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
412 #define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \
413 						IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \
414 						IDXD_OP_FLAG_AECS_RW_TGLS)
415 
416 static int check_completion(struct device *dev,
417 			    struct iax_completion_record *comp,
418 			    bool compress,
419 			    bool only_once);
420 
421 static int init_device_compression_mode(struct iaa_device *iaa_device,
422 					struct iaa_compression_mode *mode,
423 					int idx, struct idxd_wq *wq)
424 {
425 	size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
426 	struct device *dev = &iaa_device->idxd->pdev->dev;
427 	struct iaa_device_compression_mode *device_mode;
428 	int ret = -ENOMEM;
429 
430 	device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL);
431 	if (!device_mode)
432 		return -ENOMEM;
433 
434 	device_mode->name = kstrdup(mode->name, GFP_KERNEL);
435 	if (!device_mode->name)
436 		goto free;
437 
438 	device_mode->aecs_comp_table = dma_alloc_coherent(dev, size,
439 							  &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL);
440 	if (!device_mode->aecs_comp_table)
441 		goto free;
442 
443 	/* Add Huffman table to aecs */
444 	memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
445 	memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
446 	memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
447 
448 	if (mode->init) {
449 		ret = mode->init(device_mode);
450 		if (ret)
451 			goto free;
452 	}
453 
454 	/* mode index should match iaa_compression_modes idx */
455 	iaa_device->compression_modes[idx] = device_mode;
456 
457 	pr_debug("IAA %s compression mode initialized for iaa device %d\n",
458 		 mode->name, iaa_device->idxd->id);
459 
460 	ret = 0;
461 out:
462 	return ret;
463 free:
464 	pr_debug("IAA %s compression mode initialization failed for iaa device %d\n",
465 		 mode->name, iaa_device->idxd->id);
466 
467 	free_device_compression_mode(iaa_device, device_mode);
468 	goto out;
469 }
470 
471 static int init_device_compression_modes(struct iaa_device *iaa_device,
472 					 struct idxd_wq *wq)
473 {
474 	struct iaa_compression_mode *mode;
475 	int i, ret = 0;
476 
477 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
478 		mode = iaa_compression_modes[i];
479 		if (!mode)
480 			continue;
481 
482 		ret = init_device_compression_mode(iaa_device, mode, i, wq);
483 		if (ret)
484 			break;
485 	}
486 
487 	return ret;
488 }
489 
490 static void remove_device_compression_modes(struct iaa_device *iaa_device)
491 {
492 	struct iaa_device_compression_mode *device_mode;
493 	int i;
494 
495 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
496 		device_mode = iaa_device->compression_modes[i];
497 		if (!device_mode)
498 			continue;
499 
500 		free_device_compression_mode(iaa_device, device_mode);
501 		iaa_device->compression_modes[i] = NULL;
502 		if (iaa_compression_modes[i]->free)
503 			iaa_compression_modes[i]->free(device_mode);
504 	}
505 }
506 
507 static struct iaa_device *iaa_device_alloc(void)
508 {
509 	struct iaa_device *iaa_device;
510 
511 	iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL);
512 	if (!iaa_device)
513 		return NULL;
514 
515 	INIT_LIST_HEAD(&iaa_device->wqs);
516 
517 	return iaa_device;
518 }
519 
520 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
521 {
522 	struct iaa_wq *iaa_wq;
523 
524 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
525 		if (iaa_wq->wq == wq)
526 			return true;
527 	}
528 
529 	return false;
530 }
531 
532 static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
533 {
534 	struct iaa_device *iaa_device;
535 
536 	iaa_device = iaa_device_alloc();
537 	if (!iaa_device)
538 		return NULL;
539 
540 	iaa_device->idxd = idxd;
541 
542 	list_add_tail(&iaa_device->list, &iaa_devices);
543 
544 	nr_iaa++;
545 
546 	return iaa_device;
547 }
548 
549 static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
550 {
551 	int ret = 0;
552 
553 	ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
554 	if (ret)
555 		return ret;
556 
557 	return ret;
558 }
559 
560 static void del_iaa_device(struct iaa_device *iaa_device)
561 {
562 	list_del(&iaa_device->list);
563 
564 	nr_iaa--;
565 }
566 
567 static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
568 		      struct iaa_wq **new_wq)
569 {
570 	struct idxd_device *idxd = iaa_device->idxd;
571 	struct pci_dev *pdev = idxd->pdev;
572 	struct device *dev = &pdev->dev;
573 	struct iaa_wq *iaa_wq;
574 
575 	iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL);
576 	if (!iaa_wq)
577 		return -ENOMEM;
578 
579 	iaa_wq->wq = wq;
580 	iaa_wq->iaa_device = iaa_device;
581 	idxd_wq_set_private(wq, iaa_wq);
582 
583 	list_add_tail(&iaa_wq->list, &iaa_device->wqs);
584 
585 	iaa_device->n_wq++;
586 
587 	if (new_wq)
588 		*new_wq = iaa_wq;
589 
590 	dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n",
591 		wq->id, iaa_device->idxd->id, iaa_device->n_wq);
592 
593 	return 0;
594 }
595 
596 static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
597 {
598 	struct idxd_device *idxd = iaa_device->idxd;
599 	struct pci_dev *pdev = idxd->pdev;
600 	struct device *dev = &pdev->dev;
601 	struct iaa_wq *iaa_wq;
602 
603 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
604 		if (iaa_wq->wq == wq) {
605 			list_del(&iaa_wq->list);
606 			iaa_device->n_wq--;
607 
608 			dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n",
609 				wq->id, iaa_device->idxd->id,
610 				iaa_device->n_wq, nr_iaa);
611 
612 			if (iaa_device->n_wq == 0)
613 				del_iaa_device(iaa_device);
614 			break;
615 		}
616 	}
617 }
618 
619 static void clear_wq_table(void)
620 {
621 	int cpu;
622 
623 	for (cpu = 0; cpu < nr_cpus; cpu++)
624 		wq_table_clear_entry(cpu);
625 
626 	pr_debug("cleared wq table\n");
627 }
628 
629 static void free_iaa_device(struct iaa_device *iaa_device)
630 {
631 	if (!iaa_device)
632 		return;
633 
634 	remove_device_compression_modes(iaa_device);
635 	kfree(iaa_device);
636 }
637 
638 static void __free_iaa_wq(struct iaa_wq *iaa_wq)
639 {
640 	struct iaa_device *iaa_device;
641 
642 	if (!iaa_wq)
643 		return;
644 
645 	iaa_device = iaa_wq->iaa_device;
646 	if (iaa_device->n_wq == 0)
647 		free_iaa_device(iaa_wq->iaa_device);
648 }
649 
650 static void free_iaa_wq(struct iaa_wq *iaa_wq)
651 {
652 	struct idxd_wq *wq;
653 
654 	__free_iaa_wq(iaa_wq);
655 
656 	wq = iaa_wq->wq;
657 
658 	kfree(iaa_wq);
659 	idxd_wq_set_private(wq, NULL);
660 }
661 
662 static int iaa_wq_get(struct idxd_wq *wq)
663 {
664 	struct idxd_device *idxd = wq->idxd;
665 	struct iaa_wq *iaa_wq;
666 	int ret = 0;
667 
668 	spin_lock(&idxd->dev_lock);
669 	iaa_wq = idxd_wq_get_private(wq);
670 	if (iaa_wq && !iaa_wq->remove) {
671 		iaa_wq->ref++;
672 		idxd_wq_get(wq);
673 	} else {
674 		ret = -ENODEV;
675 	}
676 	spin_unlock(&idxd->dev_lock);
677 
678 	return ret;
679 }
680 
681 static int iaa_wq_put(struct idxd_wq *wq)
682 {
683 	struct idxd_device *idxd = wq->idxd;
684 	struct iaa_wq *iaa_wq;
685 	bool free = false;
686 	int ret = 0;
687 
688 	spin_lock(&idxd->dev_lock);
689 	iaa_wq = idxd_wq_get_private(wq);
690 	if (iaa_wq) {
691 		iaa_wq->ref--;
692 		if (iaa_wq->ref == 0 && iaa_wq->remove) {
693 			idxd_wq_set_private(wq, NULL);
694 			free = true;
695 		}
696 		idxd_wq_put(wq);
697 	} else {
698 		ret = -ENODEV;
699 	}
700 	spin_unlock(&idxd->dev_lock);
701 	if (free) {
702 		__free_iaa_wq(iaa_wq);
703 		kfree(iaa_wq);
704 	}
705 
706 	return ret;
707 }
708 
709 static void free_wq_table(void)
710 {
711 	int cpu;
712 
713 	for (cpu = 0; cpu < nr_cpus; cpu++)
714 		wq_table_free_entry(cpu);
715 
716 	free_percpu(wq_table);
717 
718 	pr_debug("freed wq table\n");
719 }
720 
721 static int alloc_wq_table(int max_wqs)
722 {
723 	struct wq_table_entry *entry;
724 	int cpu;
725 
726 	wq_table = alloc_percpu(struct wq_table_entry);
727 	if (!wq_table)
728 		return -ENOMEM;
729 
730 	for (cpu = 0; cpu < nr_cpus; cpu++) {
731 		entry = per_cpu_ptr(wq_table, cpu);
732 		entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
733 		if (!entry->wqs) {
734 			free_wq_table();
735 			return -ENOMEM;
736 		}
737 
738 		entry->max_wqs = max_wqs;
739 	}
740 
741 	pr_debug("initialized wq table\n");
742 
743 	return 0;
744 }
745 
746 static int save_iaa_wq(struct idxd_wq *wq)
747 {
748 	struct iaa_device *iaa_device, *found = NULL;
749 	struct idxd_device *idxd;
750 	struct pci_dev *pdev;
751 	struct device *dev;
752 	int ret = 0;
753 
754 	list_for_each_entry(iaa_device, &iaa_devices, list) {
755 		if (iaa_device->idxd == wq->idxd) {
756 			idxd = iaa_device->idxd;
757 			pdev = idxd->pdev;
758 			dev = &pdev->dev;
759 			/*
760 			 * Check to see that we don't already have this wq.
761 			 * Shouldn't happen but we don't control probing.
762 			 */
763 			if (iaa_has_wq(iaa_device, wq)) {
764 				dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n",
765 					iaa_device);
766 				goto out;
767 			}
768 
769 			found = iaa_device;
770 
771 			ret = add_iaa_wq(iaa_device, wq, NULL);
772 			if (ret)
773 				goto out;
774 
775 			break;
776 		}
777 	}
778 
779 	if (!found) {
780 		struct iaa_device *new_device;
781 		struct iaa_wq *new_wq;
782 
783 		new_device = add_iaa_device(wq->idxd);
784 		if (!new_device) {
785 			ret = -ENOMEM;
786 			goto out;
787 		}
788 
789 		ret = add_iaa_wq(new_device, wq, &new_wq);
790 		if (ret) {
791 			del_iaa_device(new_device);
792 			free_iaa_device(new_device);
793 			goto out;
794 		}
795 
796 		ret = init_iaa_device(new_device, new_wq);
797 		if (ret) {
798 			del_iaa_wq(new_device, new_wq->wq);
799 			del_iaa_device(new_device);
800 			free_iaa_wq(new_wq);
801 			goto out;
802 		}
803 	}
804 
805 	if (WARN_ON(nr_iaa == 0))
806 		return -EINVAL;
807 
808 	cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
809 out:
810 	return 0;
811 }
812 
813 static void remove_iaa_wq(struct idxd_wq *wq)
814 {
815 	struct iaa_device *iaa_device;
816 
817 	list_for_each_entry(iaa_device, &iaa_devices, list) {
818 		if (iaa_has_wq(iaa_device, wq)) {
819 			del_iaa_wq(iaa_device, wq);
820 			break;
821 		}
822 	}
823 
824 	if (nr_iaa)
825 		cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
826 	else
827 		cpus_per_iaa = 0;
828 }
829 
830 static int wq_table_add_wqs(int iaa, int cpu)
831 {
832 	struct iaa_device *iaa_device, *found_device = NULL;
833 	int ret = 0, cur_iaa = 0, n_wqs_added = 0;
834 	struct idxd_device *idxd;
835 	struct iaa_wq *iaa_wq;
836 	struct pci_dev *pdev;
837 	struct device *dev;
838 
839 	list_for_each_entry(iaa_device, &iaa_devices, list) {
840 		idxd = iaa_device->idxd;
841 		pdev = idxd->pdev;
842 		dev = &pdev->dev;
843 
844 		if (cur_iaa != iaa) {
845 			cur_iaa++;
846 			continue;
847 		}
848 
849 		found_device = iaa_device;
850 		dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n",
851 			found_device->idxd->id, cur_iaa);
852 		break;
853 	}
854 
855 	if (!found_device) {
856 		found_device = list_first_entry_or_null(&iaa_devices,
857 							struct iaa_device, list);
858 		if (!found_device) {
859 			pr_debug("couldn't find any iaa devices with wqs!\n");
860 			ret = -EINVAL;
861 			goto out;
862 		}
863 		cur_iaa = 0;
864 
865 		idxd = found_device->idxd;
866 		pdev = idxd->pdev;
867 		dev = &pdev->dev;
868 		dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n",
869 			found_device->idxd->id, cur_iaa);
870 	}
871 
872 	list_for_each_entry(iaa_wq, &found_device->wqs, list) {
873 		wq_table_add(cpu, iaa_wq->wq);
874 		pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n",
875 			 cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id);
876 		n_wqs_added++;
877 	}
878 
879 	if (!n_wqs_added) {
880 		pr_debug("couldn't find any iaa wqs!\n");
881 		ret = -EINVAL;
882 		goto out;
883 	}
884 out:
885 	return ret;
886 }
887 
888 /*
889  * Rebalance the wq table so that given a cpu, it's easy to find the
890  * closest IAA instance.  The idea is to try to choose the most
891  * appropriate IAA instance for a caller and spread available
892  * workqueues around to clients.
893  */
894 static void rebalance_wq_table(void)
895 {
896 	const struct cpumask *node_cpus;
897 	int node, cpu, iaa = -1;
898 
899 	if (nr_iaa == 0)
900 		return;
901 
902 	pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n",
903 		 nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa);
904 
905 	clear_wq_table();
906 
907 	if (nr_iaa == 1) {
908 		for (cpu = 0; cpu < nr_cpus; cpu++) {
909 			if (WARN_ON(wq_table_add_wqs(0, cpu))) {
910 				pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
911 				return;
912 			}
913 		}
914 
915 		return;
916 	}
917 
918 	for_each_node_with_cpus(node) {
919 		node_cpus = cpumask_of_node(node);
920 
921 		for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
922 			int node_cpu = cpumask_nth(cpu, node_cpus);
923 
924 			if (WARN_ON(node_cpu >= nr_cpu_ids)) {
925 				pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
926 				return;
927 			}
928 
929 			if ((cpu % cpus_per_iaa) == 0)
930 				iaa++;
931 
932 			if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
933 				pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
934 				return;
935 			}
936 		}
937 	}
938 }
939 
940 static inline int check_completion(struct device *dev,
941 				   struct iax_completion_record *comp,
942 				   bool compress,
943 				   bool only_once)
944 {
945 	char *op_str = compress ? "compress" : "decompress";
946 	int ret = 0;
947 
948 	while (!comp->status) {
949 		if (only_once)
950 			return -EAGAIN;
951 		cpu_relax();
952 	}
953 
954 	if (comp->status != IAX_COMP_SUCCESS) {
955 		if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) {
956 			ret = -ETIMEDOUT;
957 			dev_dbg(dev, "%s timed out, size=0x%x\n",
958 				op_str, comp->output_size);
959 			update_completion_timeout_errs();
960 			goto out;
961 		}
962 
963 		if (comp->status == IAA_ANALYTICS_ERROR &&
964 		    comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) {
965 			ret = -E2BIG;
966 			dev_dbg(dev, "compressed > uncompressed size,"
967 				" not compressing, size=0x%x\n",
968 				comp->output_size);
969 			update_completion_comp_buf_overflow_errs();
970 			goto out;
971 		}
972 
973 		if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) {
974 			ret = -EOVERFLOW;
975 			goto out;
976 		}
977 
978 		ret = -EINVAL;
979 		dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n",
980 			op_str, comp->status, comp->error_code, comp->output_size);
981 		print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0);
982 		update_completion_einval_errs();
983 
984 		goto out;
985 	}
986 out:
987 	return ret;
988 }
989 
990 static int deflate_generic_decompress(struct acomp_req *req)
991 {
992 	void *src, *dst;
993 	int ret;
994 
995 	src = kmap_local_page(sg_page(req->src)) + req->src->offset;
996 	dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
997 
998 	ret = crypto_comp_decompress(deflate_generic_tfm,
999 				     src, req->slen, dst, &req->dlen);
1000 
1001 	kunmap_local(src);
1002 	kunmap_local(dst);
1003 
1004 	update_total_sw_decomp_calls();
1005 
1006 	return ret;
1007 }
1008 
1009 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
1010 				struct acomp_req *req,
1011 				dma_addr_t *src_addr, dma_addr_t *dst_addr);
1012 
1013 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
1014 			       struct idxd_wq *wq,
1015 			       dma_addr_t src_addr, unsigned int slen,
1016 			       dma_addr_t dst_addr, unsigned int *dlen,
1017 			       u32 compression_crc);
1018 
1019 static void iaa_desc_complete(struct idxd_desc *idxd_desc,
1020 			      enum idxd_complete_type comp_type,
1021 			      bool free_desc, void *__ctx,
1022 			      u32 *status)
1023 {
1024 	struct iaa_device_compression_mode *active_compression_mode;
1025 	struct iaa_compression_ctx *compression_ctx;
1026 	struct crypto_ctx *ctx = __ctx;
1027 	struct iaa_device *iaa_device;
1028 	struct idxd_device *idxd;
1029 	struct iaa_wq *iaa_wq;
1030 	struct pci_dev *pdev;
1031 	struct device *dev;
1032 	int ret, err = 0;
1033 
1034 	compression_ctx = crypto_tfm_ctx(ctx->tfm);
1035 
1036 	iaa_wq = idxd_wq_get_private(idxd_desc->wq);
1037 	iaa_device = iaa_wq->iaa_device;
1038 	idxd = iaa_device->idxd;
1039 	pdev = idxd->pdev;
1040 	dev = &pdev->dev;
1041 
1042 	active_compression_mode = get_iaa_device_compression_mode(iaa_device,
1043 								  compression_ctx->mode);
1044 	dev_dbg(dev, "%s: compression mode %s,"
1045 		" ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
1046 		active_compression_mode->name,
1047 		ctx->src_addr, ctx->dst_addr);
1048 
1049 	ret = check_completion(dev, idxd_desc->iax_completion,
1050 			       ctx->compress, false);
1051 	if (ret) {
1052 		dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
1053 		if (!ctx->compress &&
1054 		    idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
1055 			pr_warn("%s: falling back to deflate-generic decompress, "
1056 				"analytics error code %x\n", __func__,
1057 				idxd_desc->iax_completion->error_code);
1058 			ret = deflate_generic_decompress(ctx->req);
1059 			if (ret) {
1060 				dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
1061 					__func__, ret);
1062 				err = -EIO;
1063 				goto err;
1064 			}
1065 		} else {
1066 			err = -EIO;
1067 			goto err;
1068 		}
1069 	} else {
1070 		ctx->req->dlen = idxd_desc->iax_completion->output_size;
1071 	}
1072 
1073 	/* Update stats */
1074 	if (ctx->compress) {
1075 		update_total_comp_bytes_out(ctx->req->dlen);
1076 		update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
1077 	} else {
1078 		update_total_decomp_bytes_in(ctx->req->dlen);
1079 		update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
1080 	}
1081 
1082 	if (ctx->compress && compression_ctx->verify_compress) {
1083 		dma_addr_t src_addr, dst_addr;
1084 		u32 compression_crc;
1085 
1086 		compression_crc = idxd_desc->iax_completion->crc;
1087 
1088 		ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
1089 		if (ret) {
1090 			dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
1091 			err = -EIO;
1092 			goto out;
1093 		}
1094 
1095 		ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
1096 					  ctx->req->slen, dst_addr, &ctx->req->dlen,
1097 					  compression_crc);
1098 		if (ret) {
1099 			dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
1100 			err = -EIO;
1101 		}
1102 
1103 		dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
1104 		dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);
1105 
1106 		goto out;
1107 	}
1108 err:
1109 	dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
1110 	dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
1111 out:
1112 	if (ret != 0)
1113 		dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
1114 
1115 	if (ctx->req->base.complete)
1116 		acomp_request_complete(ctx->req, err);
1117 
1118 	if (free_desc)
1119 		idxd_free_desc(idxd_desc->wq, idxd_desc);
1120 	iaa_wq_put(idxd_desc->wq);
1121 }
1122 
1123 static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
1124 			struct idxd_wq *wq,
1125 			dma_addr_t src_addr, unsigned int slen,
1126 			dma_addr_t dst_addr, unsigned int *dlen,
1127 			u32 *compression_crc,
1128 			bool disable_async)
1129 {
1130 	struct iaa_device_compression_mode *active_compression_mode;
1131 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1132 	struct iaa_device *iaa_device;
1133 	struct idxd_desc *idxd_desc;
1134 	struct iax_hw_desc *desc;
1135 	struct idxd_device *idxd;
1136 	struct iaa_wq *iaa_wq;
1137 	struct pci_dev *pdev;
1138 	struct device *dev;
1139 	int ret = 0;
1140 
1141 	iaa_wq = idxd_wq_get_private(wq);
1142 	iaa_device = iaa_wq->iaa_device;
1143 	idxd = iaa_device->idxd;
1144 	pdev = idxd->pdev;
1145 	dev = &pdev->dev;
1146 
1147 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1148 
1149 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1150 	if (IS_ERR(idxd_desc)) {
1151 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1152 		dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc));
1153 		return PTR_ERR(idxd_desc);
1154 	}
1155 	desc = idxd_desc->iax_hw;
1156 
1157 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR |
1158 		IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC;
1159 	desc->opcode = IAX_OPCODE_COMPRESS;
1160 	desc->compr_flags = IAA_COMP_FLAGS;
1161 	desc->priv = 0;
1162 
1163 	desc->src1_addr = (u64)src_addr;
1164 	desc->src1_size = slen;
1165 	desc->dst_addr = (u64)dst_addr;
1166 	desc->max_dst_size = *dlen;
1167 	desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr;
1168 	desc->src2_size = sizeof(struct aecs_comp_table_record);
1169 	desc->completion_addr = idxd_desc->compl_dma;
1170 
1171 	if (ctx->use_irq && !disable_async) {
1172 		desc->flags |= IDXD_OP_FLAG_RCI;
1173 
1174 		idxd_desc->crypto.req = req;
1175 		idxd_desc->crypto.tfm = tfm;
1176 		idxd_desc->crypto.src_addr = src_addr;
1177 		idxd_desc->crypto.dst_addr = dst_addr;
1178 		idxd_desc->crypto.compress = true;
1179 
1180 		dev_dbg(dev, "%s use_async_irq: compression mode %s,"
1181 			" src_addr %llx, dst_addr %llx\n", __func__,
1182 			active_compression_mode->name,
1183 			src_addr, dst_addr);
1184 	} else if (ctx->async_mode && !disable_async)
1185 		req->base.data = idxd_desc;
1186 
1187 	dev_dbg(dev, "%s: compression mode %s,"
1188 		" desc->src1_addr %llx, desc->src1_size %d,"
1189 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1190 		" desc->src2_addr %llx, desc->src2_size %d\n", __func__,
1191 		active_compression_mode->name,
1192 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1193 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1194 
1195 	ret = idxd_submit_desc(wq, idxd_desc);
1196 	if (ret) {
1197 		dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
1198 		goto err;
1199 	}
1200 
1201 	/* Update stats */
1202 	update_total_comp_calls();
1203 	update_wq_comp_calls(wq);
1204 
1205 	if (ctx->async_mode && !disable_async) {
1206 		ret = -EINPROGRESS;
1207 		dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1208 		goto out;
1209 	}
1210 
1211 	ret = check_completion(dev, idxd_desc->iax_completion, true, false);
1212 	if (ret) {
1213 		dev_dbg(dev, "check_completion failed ret=%d\n", ret);
1214 		goto err;
1215 	}
1216 
1217 	*dlen = idxd_desc->iax_completion->output_size;
1218 
1219 	/* Update stats */
1220 	update_total_comp_bytes_out(*dlen);
1221 	update_wq_comp_bytes(wq, *dlen);
1222 
1223 	*compression_crc = idxd_desc->iax_completion->crc;
1224 
1225 	if (!ctx->async_mode || disable_async)
1226 		idxd_free_desc(wq, idxd_desc);
1227 out:
1228 	return ret;
1229 err:
1230 	idxd_free_desc(wq, idxd_desc);
1231 	dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
1232 
1233 	goto out;
1234 }
1235 
1236 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
1237 				struct acomp_req *req,
1238 				dma_addr_t *src_addr, dma_addr_t *dst_addr)
1239 {
1240 	int ret = 0;
1241 	int nr_sgs;
1242 
1243 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1244 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1245 
1246 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1247 	if (nr_sgs <= 0 || nr_sgs > 1) {
1248 		dev_dbg(dev, "verify: couldn't map src sg for iaa device %d,"
1249 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1250 			iaa_wq->wq->id, ret);
1251 		ret = -EIO;
1252 		goto out;
1253 	}
1254 	*src_addr = sg_dma_address(req->src);
1255 	dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1256 		" req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs,
1257 		req->src, req->slen, sg_dma_len(req->src));
1258 
1259 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
1260 	if (nr_sgs <= 0 || nr_sgs > 1) {
1261 		dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d,"
1262 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1263 			iaa_wq->wq->id, ret);
1264 		ret = -EIO;
1265 		dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1266 		goto out;
1267 	}
1268 	*dst_addr = sg_dma_address(req->dst);
1269 	dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1270 		" req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs,
1271 		req->dst, req->dlen, sg_dma_len(req->dst));
1272 out:
1273 	return ret;
1274 }
1275 
1276 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
1277 			       struct idxd_wq *wq,
1278 			       dma_addr_t src_addr, unsigned int slen,
1279 			       dma_addr_t dst_addr, unsigned int *dlen,
1280 			       u32 compression_crc)
1281 {
1282 	struct iaa_device_compression_mode *active_compression_mode;
1283 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1284 	struct iaa_device *iaa_device;
1285 	struct idxd_desc *idxd_desc;
1286 	struct iax_hw_desc *desc;
1287 	struct idxd_device *idxd;
1288 	struct iaa_wq *iaa_wq;
1289 	struct pci_dev *pdev;
1290 	struct device *dev;
1291 	int ret = 0;
1292 
1293 	iaa_wq = idxd_wq_get_private(wq);
1294 	iaa_device = iaa_wq->iaa_device;
1295 	idxd = iaa_device->idxd;
1296 	pdev = idxd->pdev;
1297 	dev = &pdev->dev;
1298 
1299 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1300 
1301 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1302 	if (IS_ERR(idxd_desc)) {
1303 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1304 		dev_dbg(dev, "iaa compress failed: ret=%ld\n",
1305 			PTR_ERR(idxd_desc));
1306 		return PTR_ERR(idxd_desc);
1307 	}
1308 	desc = idxd_desc->iax_hw;
1309 
1310 	/* Verify (optional) - decompress and check crc, suppress dest write */
1311 
1312 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
1313 	desc->opcode = IAX_OPCODE_DECOMPRESS;
1314 	desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT;
1315 	desc->priv = 0;
1316 
1317 	desc->src1_addr = (u64)dst_addr;
1318 	desc->src1_size = *dlen;
1319 	desc->dst_addr = (u64)src_addr;
1320 	desc->max_dst_size = slen;
1321 	desc->completion_addr = idxd_desc->compl_dma;
1322 
1323 	dev_dbg(dev, "(verify) compression mode %s,"
1324 		" desc->src1_addr %llx, desc->src1_size %d,"
1325 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1326 		" desc->src2_addr %llx, desc->src2_size %d\n",
1327 		active_compression_mode->name,
1328 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1329 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1330 
1331 	ret = idxd_submit_desc(wq, idxd_desc);
1332 	if (ret) {
1333 		dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret);
1334 		goto err;
1335 	}
1336 
1337 	ret = check_completion(dev, idxd_desc->iax_completion, false, false);
1338 	if (ret) {
1339 		dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret);
1340 		goto err;
1341 	}
1342 
1343 	if (compression_crc != idxd_desc->iax_completion->crc) {
1344 		ret = -EINVAL;
1345 		dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
1346 			" comp=0x%x, decomp=0x%x\n", compression_crc,
1347 			idxd_desc->iax_completion->crc);
1348 		print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
1349 			       8, 1, idxd_desc->iax_completion, 64, 0);
1350 		goto err;
1351 	}
1352 
1353 	idxd_free_desc(wq, idxd_desc);
1354 out:
1355 	return ret;
1356 err:
1357 	idxd_free_desc(wq, idxd_desc);
1358 	dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
1359 
1360 	goto out;
1361 }
1362 
1363 static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
1364 			  struct idxd_wq *wq,
1365 			  dma_addr_t src_addr, unsigned int slen,
1366 			  dma_addr_t dst_addr, unsigned int *dlen,
1367 			  bool disable_async)
1368 {
1369 	struct iaa_device_compression_mode *active_compression_mode;
1370 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1371 	struct iaa_device *iaa_device;
1372 	struct idxd_desc *idxd_desc;
1373 	struct iax_hw_desc *desc;
1374 	struct idxd_device *idxd;
1375 	struct iaa_wq *iaa_wq;
1376 	struct pci_dev *pdev;
1377 	struct device *dev;
1378 	int ret = 0;
1379 
1380 	iaa_wq = idxd_wq_get_private(wq);
1381 	iaa_device = iaa_wq->iaa_device;
1382 	idxd = iaa_device->idxd;
1383 	pdev = idxd->pdev;
1384 	dev = &pdev->dev;
1385 
1386 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1387 
1388 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1389 	if (IS_ERR(idxd_desc)) {
1390 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1391 		dev_dbg(dev, "iaa decompress failed: ret=%ld\n",
1392 			PTR_ERR(idxd_desc));
1393 		return PTR_ERR(idxd_desc);
1394 	}
1395 	desc = idxd_desc->iax_hw;
1396 
1397 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
1398 	desc->opcode = IAX_OPCODE_DECOMPRESS;
1399 	desc->max_dst_size = PAGE_SIZE;
1400 	desc->decompr_flags = IAA_DECOMP_FLAGS;
1401 	desc->priv = 0;
1402 
1403 	desc->src1_addr = (u64)src_addr;
1404 	desc->dst_addr = (u64)dst_addr;
1405 	desc->max_dst_size = *dlen;
1406 	desc->src1_size = slen;
1407 	desc->completion_addr = idxd_desc->compl_dma;
1408 
1409 	if (ctx->use_irq && !disable_async) {
1410 		desc->flags |= IDXD_OP_FLAG_RCI;
1411 
1412 		idxd_desc->crypto.req = req;
1413 		idxd_desc->crypto.tfm = tfm;
1414 		idxd_desc->crypto.src_addr = src_addr;
1415 		idxd_desc->crypto.dst_addr = dst_addr;
1416 		idxd_desc->crypto.compress = false;
1417 
1418 		dev_dbg(dev, "%s: use_async_irq compression mode %s,"
1419 			" src_addr %llx, dst_addr %llx\n", __func__,
1420 			active_compression_mode->name,
1421 			src_addr, dst_addr);
1422 	} else if (ctx->async_mode && !disable_async)
1423 		req->base.data = idxd_desc;
1424 
1425 	dev_dbg(dev, "%s: decompression mode %s,"
1426 		" desc->src1_addr %llx, desc->src1_size %d,"
1427 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1428 		" desc->src2_addr %llx, desc->src2_size %d\n", __func__,
1429 		active_compression_mode->name,
1430 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1431 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1432 
1433 	ret = idxd_submit_desc(wq, idxd_desc);
1434 	if (ret) {
1435 		dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
1436 		goto err;
1437 	}
1438 
1439 	/* Update stats */
1440 	update_total_decomp_calls();
1441 	update_wq_decomp_calls(wq);
1442 
1443 	if (ctx->async_mode && !disable_async) {
1444 		ret = -EINPROGRESS;
1445 		dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1446 		goto out;
1447 	}
1448 
1449 	ret = check_completion(dev, idxd_desc->iax_completion, false, false);
1450 	if (ret) {
1451 		dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
1452 		if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
1453 			pr_warn("%s: falling back to deflate-generic decompress, "
1454 				"analytics error code %x\n", __func__,
1455 				idxd_desc->iax_completion->error_code);
1456 			ret = deflate_generic_decompress(req);
1457 			if (ret) {
1458 				dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
1459 					__func__, ret);
1460 				goto err;
1461 			}
1462 		} else {
1463 			goto err;
1464 		}
1465 	} else {
1466 		req->dlen = idxd_desc->iax_completion->output_size;
1467 	}
1468 
1469 	*dlen = req->dlen;
1470 
1471 	if (!ctx->async_mode || disable_async)
1472 		idxd_free_desc(wq, idxd_desc);
1473 
1474 	/* Update stats */
1475 	update_total_decomp_bytes_in(slen);
1476 	update_wq_decomp_bytes(wq, slen);
1477 out:
1478 	return ret;
1479 err:
1480 	idxd_free_desc(wq, idxd_desc);
1481 	dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret);
1482 
1483 	goto out;
1484 }
1485 
1486 static int iaa_comp_acompress(struct acomp_req *req)
1487 {
1488 	struct iaa_compression_ctx *compression_ctx;
1489 	struct crypto_tfm *tfm = req->base.tfm;
1490 	dma_addr_t src_addr, dst_addr;
1491 	bool disable_async = false;
1492 	int nr_sgs, cpu, ret = 0;
1493 	struct iaa_wq *iaa_wq;
1494 	u32 compression_crc;
1495 	struct idxd_wq *wq;
1496 	struct device *dev;
1497 	u64 start_time_ns;
1498 	int order = -1;
1499 
1500 	compression_ctx = crypto_tfm_ctx(tfm);
1501 
1502 	if (!iaa_crypto_enabled) {
1503 		pr_debug("iaa_crypto disabled, not compressing\n");
1504 		return -ENODEV;
1505 	}
1506 
1507 	if (!req->src || !req->slen) {
1508 		pr_debug("invalid src, not compressing\n");
1509 		return -EINVAL;
1510 	}
1511 
1512 	cpu = get_cpu();
1513 	wq = wq_table_next_wq(cpu);
1514 	put_cpu();
1515 	if (!wq) {
1516 		pr_debug("no wq configured for cpu=%d\n", cpu);
1517 		return -ENODEV;
1518 	}
1519 
1520 	ret = iaa_wq_get(wq);
1521 	if (ret) {
1522 		pr_debug("no wq available for cpu=%d\n", cpu);
1523 		return -ENODEV;
1524 	}
1525 
1526 	iaa_wq = idxd_wq_get_private(wq);
1527 
1528 	if (!req->dst) {
1529 		gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
1530 
1531 		/* incompressible data will always be < 2 * slen */
1532 		req->dlen = 2 * req->slen;
1533 		order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
1534 		req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
1535 		if (!req->dst) {
1536 			ret = -ENOMEM;
1537 			order = -1;
1538 			goto out;
1539 		}
1540 		disable_async = true;
1541 	}
1542 
1543 	dev = &wq->idxd->pdev->dev;
1544 
1545 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1546 	if (nr_sgs <= 0 || nr_sgs > 1) {
1547 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1548 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1549 			iaa_wq->wq->id, ret);
1550 		ret = -EIO;
1551 		goto out;
1552 	}
1553 	src_addr = sg_dma_address(req->src);
1554 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1555 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1556 		req->src, req->slen, sg_dma_len(req->src));
1557 
1558 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1559 	if (nr_sgs <= 0 || nr_sgs > 1) {
1560 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1561 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1562 			iaa_wq->wq->id, ret);
1563 		ret = -EIO;
1564 		goto err_map_dst;
1565 	}
1566 	dst_addr = sg_dma_address(req->dst);
1567 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1568 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1569 		req->dst, req->dlen, sg_dma_len(req->dst));
1570 
1571 	start_time_ns = iaa_get_ts();
1572 	ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
1573 			   &req->dlen, &compression_crc, disable_async);
1574 	update_max_comp_delay_ns(start_time_ns);
1575 	if (ret == -EINPROGRESS)
1576 		return ret;
1577 
1578 	if (!ret && compression_ctx->verify_compress) {
1579 		ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr);
1580 		if (ret) {
1581 			dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
1582 			goto out;
1583 		}
1584 
1585 		ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
1586 					  dst_addr, &req->dlen, compression_crc);
1587 		if (ret)
1588 			dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
1589 
1590 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
1591 		dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1592 
1593 		goto out;
1594 	}
1595 
1596 	if (ret)
1597 		dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
1598 
1599 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1600 err_map_dst:
1601 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1602 out:
1603 	iaa_wq_put(wq);
1604 
1605 	if (order >= 0)
1606 		sgl_free_order(req->dst, order);
1607 
1608 	return ret;
1609 }
1610 
1611 static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
1612 {
1613 	gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1614 		GFP_KERNEL : GFP_ATOMIC;
1615 	struct crypto_tfm *tfm = req->base.tfm;
1616 	dma_addr_t src_addr, dst_addr;
1617 	int nr_sgs, cpu, ret = 0;
1618 	struct iaa_wq *iaa_wq;
1619 	struct device *dev;
1620 	struct idxd_wq *wq;
1621 	u64 start_time_ns;
1622 	int order = -1;
1623 
1624 	cpu = get_cpu();
1625 	wq = wq_table_next_wq(cpu);
1626 	put_cpu();
1627 	if (!wq) {
1628 		pr_debug("no wq configured for cpu=%d\n", cpu);
1629 		return -ENODEV;
1630 	}
1631 
1632 	ret = iaa_wq_get(wq);
1633 	if (ret) {
1634 		pr_debug("no wq available for cpu=%d\n", cpu);
1635 		return -ENODEV;
1636 	}
1637 
1638 	iaa_wq = idxd_wq_get_private(wq);
1639 
1640 	dev = &wq->idxd->pdev->dev;
1641 
1642 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1643 	if (nr_sgs <= 0 || nr_sgs > 1) {
1644 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1645 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1646 			iaa_wq->wq->id, ret);
1647 		ret = -EIO;
1648 		goto out;
1649 	}
1650 	src_addr = sg_dma_address(req->src);
1651 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1652 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1653 		req->src, req->slen, sg_dma_len(req->src));
1654 
1655 	req->dlen = 4 * req->slen; /* start with ~avg comp rato */
1656 alloc_dest:
1657 	order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
1658 	req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
1659 	if (!req->dst) {
1660 		ret = -ENOMEM;
1661 		order = -1;
1662 		goto out;
1663 	}
1664 
1665 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1666 	if (nr_sgs <= 0 || nr_sgs > 1) {
1667 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1668 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1669 			iaa_wq->wq->id, ret);
1670 		ret = -EIO;
1671 		goto err_map_dst;
1672 	}
1673 
1674 	dst_addr = sg_dma_address(req->dst);
1675 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1676 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1677 		req->dst, req->dlen, sg_dma_len(req->dst));
1678 	start_time_ns = iaa_get_ts();
1679 	ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
1680 			     dst_addr, &req->dlen, true);
1681 	update_max_decomp_delay_ns(start_time_ns);
1682 	if (ret == -EOVERFLOW) {
1683 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1684 		req->dlen *= 2;
1685 		if (req->dlen > CRYPTO_ACOMP_DST_MAX)
1686 			goto err_map_dst;
1687 		goto alloc_dest;
1688 	}
1689 
1690 	if (ret != 0)
1691 		dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
1692 
1693 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1694 err_map_dst:
1695 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1696 out:
1697 	iaa_wq_put(wq);
1698 
1699 	if (order >= 0)
1700 		sgl_free_order(req->dst, order);
1701 
1702 	return ret;
1703 }
1704 
1705 static int iaa_comp_adecompress(struct acomp_req *req)
1706 {
1707 	struct crypto_tfm *tfm = req->base.tfm;
1708 	dma_addr_t src_addr, dst_addr;
1709 	int nr_sgs, cpu, ret = 0;
1710 	struct iaa_wq *iaa_wq;
1711 	struct device *dev;
1712 	u64 start_time_ns;
1713 	struct idxd_wq *wq;
1714 
1715 	if (!iaa_crypto_enabled) {
1716 		pr_debug("iaa_crypto disabled, not decompressing\n");
1717 		return -ENODEV;
1718 	}
1719 
1720 	if (!req->src || !req->slen) {
1721 		pr_debug("invalid src, not decompressing\n");
1722 		return -EINVAL;
1723 	}
1724 
1725 	if (!req->dst)
1726 		return iaa_comp_adecompress_alloc_dest(req);
1727 
1728 	cpu = get_cpu();
1729 	wq = wq_table_next_wq(cpu);
1730 	put_cpu();
1731 	if (!wq) {
1732 		pr_debug("no wq configured for cpu=%d\n", cpu);
1733 		return -ENODEV;
1734 	}
1735 
1736 	ret = iaa_wq_get(wq);
1737 	if (ret) {
1738 		pr_debug("no wq available for cpu=%d\n", cpu);
1739 		return -ENODEV;
1740 	}
1741 
1742 	iaa_wq = idxd_wq_get_private(wq);
1743 
1744 	dev = &wq->idxd->pdev->dev;
1745 
1746 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1747 	if (nr_sgs <= 0 || nr_sgs > 1) {
1748 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1749 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1750 			iaa_wq->wq->id, ret);
1751 		ret = -EIO;
1752 		goto out;
1753 	}
1754 	src_addr = sg_dma_address(req->src);
1755 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1756 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1757 		req->src, req->slen, sg_dma_len(req->src));
1758 
1759 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1760 	if (nr_sgs <= 0 || nr_sgs > 1) {
1761 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1762 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1763 			iaa_wq->wq->id, ret);
1764 		ret = -EIO;
1765 		goto err_map_dst;
1766 	}
1767 	dst_addr = sg_dma_address(req->dst);
1768 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1769 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1770 		req->dst, req->dlen, sg_dma_len(req->dst));
1771 
1772 	start_time_ns = iaa_get_ts();
1773 	ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
1774 			     dst_addr, &req->dlen, false);
1775 	update_max_decomp_delay_ns(start_time_ns);
1776 	if (ret == -EINPROGRESS)
1777 		return ret;
1778 
1779 	if (ret != 0)
1780 		dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
1781 
1782 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1783 err_map_dst:
1784 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1785 out:
1786 	iaa_wq_put(wq);
1787 
1788 	return ret;
1789 }
1790 
1791 static void compression_ctx_init(struct iaa_compression_ctx *ctx)
1792 {
1793 	ctx->verify_compress = iaa_verify_compress;
1794 	ctx->async_mode = async_mode;
1795 	ctx->use_irq = use_irq;
1796 }
1797 
1798 static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
1799 {
1800 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
1801 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1802 
1803 	compression_ctx_init(ctx);
1804 
1805 	ctx->mode = IAA_MODE_FIXED;
1806 
1807 	return 0;
1808 }
1809 
1810 static void dst_free(struct scatterlist *sgl)
1811 {
1812 	/*
1813 	 * Called for req->dst = NULL cases but we free elsewhere
1814 	 * using sgl_free_order().
1815 	 */
1816 }
1817 
1818 static struct acomp_alg iaa_acomp_fixed_deflate = {
1819 	.init			= iaa_comp_init_fixed,
1820 	.compress		= iaa_comp_acompress,
1821 	.decompress		= iaa_comp_adecompress,
1822 	.dst_free               = dst_free,
1823 	.base			= {
1824 		.cra_name		= "deflate",
1825 		.cra_driver_name	= "deflate-iaa",
1826 		.cra_flags		= CRYPTO_ALG_ASYNC,
1827 		.cra_ctxsize		= sizeof(struct iaa_compression_ctx),
1828 		.cra_module		= THIS_MODULE,
1829 		.cra_priority		= IAA_ALG_PRIORITY,
1830 	}
1831 };
1832 
1833 static int iaa_register_compression_device(void)
1834 {
1835 	int ret;
1836 
1837 	ret = crypto_register_acomp(&iaa_acomp_fixed_deflate);
1838 	if (ret) {
1839 		pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret);
1840 		goto out;
1841 	}
1842 
1843 	iaa_crypto_registered = true;
1844 out:
1845 	return ret;
1846 }
1847 
1848 static int iaa_unregister_compression_device(void)
1849 {
1850 	if (iaa_crypto_registered)
1851 		crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
1852 
1853 	return 0;
1854 }
1855 
1856 static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
1857 {
1858 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
1859 	struct idxd_device *idxd = wq->idxd;
1860 	struct idxd_driver_data *data = idxd->data;
1861 	struct device *dev = &idxd_dev->conf_dev;
1862 	bool first_wq = false;
1863 	int ret = 0;
1864 
1865 	if (idxd->state != IDXD_DEV_ENABLED)
1866 		return -ENXIO;
1867 
1868 	if (data->type != IDXD_TYPE_IAX)
1869 		return -ENODEV;
1870 
1871 	mutex_lock(&wq->wq_lock);
1872 
1873 	if (idxd_wq_get_private(wq)) {
1874 		mutex_unlock(&wq->wq_lock);
1875 		return -EBUSY;
1876 	}
1877 
1878 	if (!idxd_wq_driver_name_match(wq, dev)) {
1879 		dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n",
1880 			idxd->id, wq->id, wq->driver_name, dev->driver->name);
1881 		idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
1882 		ret = -ENODEV;
1883 		goto err;
1884 	}
1885 
1886 	wq->type = IDXD_WQT_KERNEL;
1887 
1888 	ret = idxd_drv_enable_wq(wq);
1889 	if (ret < 0) {
1890 		dev_dbg(dev, "enable wq %d.%d failed: %d\n",
1891 			idxd->id, wq->id, ret);
1892 		ret = -ENXIO;
1893 		goto err;
1894 	}
1895 
1896 	mutex_lock(&iaa_devices_lock);
1897 
1898 	if (list_empty(&iaa_devices)) {
1899 		ret = alloc_wq_table(wq->idxd->max_wqs);
1900 		if (ret)
1901 			goto err_alloc;
1902 		first_wq = true;
1903 	}
1904 
1905 	ret = save_iaa_wq(wq);
1906 	if (ret)
1907 		goto err_save;
1908 
1909 	rebalance_wq_table();
1910 
1911 	if (first_wq) {
1912 		iaa_crypto_enabled = true;
1913 		ret = iaa_register_compression_device();
1914 		if (ret != 0) {
1915 			iaa_crypto_enabled = false;
1916 			dev_dbg(dev, "IAA compression device registration failed\n");
1917 			goto err_register;
1918 		}
1919 		try_module_get(THIS_MODULE);
1920 
1921 		pr_info("iaa_crypto now ENABLED\n");
1922 	}
1923 
1924 	mutex_unlock(&iaa_devices_lock);
1925 out:
1926 	mutex_unlock(&wq->wq_lock);
1927 
1928 	return ret;
1929 
1930 err_register:
1931 	remove_iaa_wq(wq);
1932 	free_iaa_wq(idxd_wq_get_private(wq));
1933 err_save:
1934 	if (first_wq)
1935 		free_wq_table();
1936 err_alloc:
1937 	mutex_unlock(&iaa_devices_lock);
1938 	idxd_drv_disable_wq(wq);
1939 err:
1940 	wq->type = IDXD_WQT_NONE;
1941 
1942 	goto out;
1943 }
1944 
1945 static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
1946 {
1947 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
1948 	struct idxd_device *idxd = wq->idxd;
1949 	struct iaa_wq *iaa_wq;
1950 	bool free = false;
1951 
1952 	idxd_wq_quiesce(wq);
1953 
1954 	mutex_lock(&wq->wq_lock);
1955 	mutex_lock(&iaa_devices_lock);
1956 
1957 	remove_iaa_wq(wq);
1958 
1959 	spin_lock(&idxd->dev_lock);
1960 	iaa_wq = idxd_wq_get_private(wq);
1961 	if (!iaa_wq) {
1962 		spin_unlock(&idxd->dev_lock);
1963 		pr_err("%s: no iaa_wq available to remove\n", __func__);
1964 		goto out;
1965 	}
1966 
1967 	if (iaa_wq->ref) {
1968 		iaa_wq->remove = true;
1969 	} else {
1970 		wq = iaa_wq->wq;
1971 		idxd_wq_set_private(wq, NULL);
1972 		free = true;
1973 	}
1974 	spin_unlock(&idxd->dev_lock);
1975 	if (free) {
1976 		__free_iaa_wq(iaa_wq);
1977 		kfree(iaa_wq);
1978 	}
1979 
1980 	idxd_drv_disable_wq(wq);
1981 	rebalance_wq_table();
1982 
1983 	if (nr_iaa == 0) {
1984 		iaa_crypto_enabled = false;
1985 		free_wq_table();
1986 		module_put(THIS_MODULE);
1987 
1988 		pr_info("iaa_crypto now DISABLED\n");
1989 	}
1990 out:
1991 	mutex_unlock(&iaa_devices_lock);
1992 	mutex_unlock(&wq->wq_lock);
1993 }
1994 
1995 static enum idxd_dev_type dev_types[] = {
1996 	IDXD_DEV_WQ,
1997 	IDXD_DEV_NONE,
1998 };
1999 
2000 static struct idxd_device_driver iaa_crypto_driver = {
2001 	.probe = iaa_crypto_probe,
2002 	.remove = iaa_crypto_remove,
2003 	.name = IDXD_SUBDRIVER_NAME,
2004 	.type = dev_types,
2005 	.desc_complete = iaa_desc_complete,
2006 };
2007 
2008 static int __init iaa_crypto_init_module(void)
2009 {
2010 	int ret = 0;
2011 	int node;
2012 
2013 	nr_cpus = num_online_cpus();
2014 	for_each_node_with_cpus(node)
2015 		nr_nodes++;
2016 	if (!nr_nodes) {
2017 		pr_err("IAA couldn't find any nodes with cpus\n");
2018 		return -ENODEV;
2019 	}
2020 	nr_cpus_per_node = nr_cpus / nr_nodes;
2021 
2022 	if (crypto_has_comp("deflate-generic", 0, 0))
2023 		deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
2024 
2025 	if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
2026 		pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
2027 		       "deflate-generic", PTR_ERR(deflate_generic_tfm));
2028 		return -ENOMEM;
2029 	}
2030 
2031 	ret = iaa_aecs_init_fixed();
2032 	if (ret < 0) {
2033 		pr_debug("IAA fixed compression mode init failed\n");
2034 		goto err_aecs_init;
2035 	}
2036 
2037 	ret = idxd_driver_register(&iaa_crypto_driver);
2038 	if (ret) {
2039 		pr_debug("IAA wq sub-driver registration failed\n");
2040 		goto err_driver_reg;
2041 	}
2042 
2043 	ret = driver_create_file(&iaa_crypto_driver.drv,
2044 				 &driver_attr_verify_compress);
2045 	if (ret) {
2046 		pr_debug("IAA verify_compress attr creation failed\n");
2047 		goto err_verify_attr_create;
2048 	}
2049 
2050 	ret = driver_create_file(&iaa_crypto_driver.drv,
2051 				 &driver_attr_sync_mode);
2052 	if (ret) {
2053 		pr_debug("IAA sync mode attr creation failed\n");
2054 		goto err_sync_attr_create;
2055 	}
2056 
2057 	if (iaa_crypto_debugfs_init())
2058 		pr_warn("debugfs init failed, stats not available\n");
2059 
2060 	pr_debug("initialized\n");
2061 out:
2062 	return ret;
2063 
2064 err_sync_attr_create:
2065 	driver_remove_file(&iaa_crypto_driver.drv,
2066 			   &driver_attr_verify_compress);
2067 err_verify_attr_create:
2068 	idxd_driver_unregister(&iaa_crypto_driver);
2069 err_driver_reg:
2070 	iaa_aecs_cleanup_fixed();
2071 err_aecs_init:
2072 	crypto_free_comp(deflate_generic_tfm);
2073 
2074 	goto out;
2075 }
2076 
2077 static void __exit iaa_crypto_cleanup_module(void)
2078 {
2079 	if (iaa_unregister_compression_device())
2080 		pr_debug("IAA compression device unregister failed\n");
2081 
2082 	iaa_crypto_debugfs_cleanup();
2083 	driver_remove_file(&iaa_crypto_driver.drv,
2084 			   &driver_attr_sync_mode);
2085 	driver_remove_file(&iaa_crypto_driver.drv,
2086 			   &driver_attr_verify_compress);
2087 	idxd_driver_unregister(&iaa_crypto_driver);
2088 	iaa_aecs_cleanup_fixed();
2089 	crypto_free_comp(deflate_generic_tfm);
2090 
2091 	pr_debug("cleaned up\n");
2092 }
2093 
2094 MODULE_IMPORT_NS(IDXD);
2095 MODULE_LICENSE("GPL");
2096 MODULE_ALIAS_IDXD_DEVICE(0);
2097 MODULE_AUTHOR("Intel Corporation");
2098 MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver");
2099 
2100 module_init(iaa_crypto_init_module);
2101 module_exit(iaa_crypto_cleanup_module);
2102