xref: /linux/drivers/crypto/intel/iaa/iaa_crypto_main.c (revision 85ffc6e4ed3712f8b3fedb3fbe42afae644a699c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
3 
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include <linux/device.h>
9 #include <linux/iommu.h>
10 #include <uapi/linux/idxd.h>
11 #include <linux/highmem.h>
12 #include <linux/sched/smt.h>
13 #include <crypto/internal/acompress.h>
14 
15 #include "idxd.h"
16 #include "iaa_crypto.h"
17 #include "iaa_crypto_stats.h"
18 
19 #ifdef pr_fmt
20 #undef pr_fmt
21 #endif
22 
23 #define pr_fmt(fmt)			"idxd: " IDXD_SUBDRIVER_NAME ": " fmt
24 
25 #define IAA_ALG_PRIORITY               300
26 
27 /* number of iaa instances probed */
28 static unsigned int nr_iaa;
29 static unsigned int nr_cpus;
30 static unsigned int nr_nodes;
31 static unsigned int nr_cpus_per_node;
32 
33 /* Number of physical cpus sharing each iaa instance */
34 static unsigned int cpus_per_iaa;
35 
36 static struct crypto_comp *deflate_generic_tfm;
37 
38 /* Per-cpu lookup table for balanced wqs */
39 static struct wq_table_entry __percpu *wq_table;
40 
wq_table_next_wq(int cpu)41 static struct idxd_wq *wq_table_next_wq(int cpu)
42 {
43 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
44 
45 	if (++entry->cur_wq >= entry->n_wqs)
46 		entry->cur_wq = 0;
47 
48 	if (!entry->wqs[entry->cur_wq])
49 		return NULL;
50 
51 	pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
52 		 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id,
53 		 entry->wqs[entry->cur_wq]->id, cpu);
54 
55 	return entry->wqs[entry->cur_wq];
56 }
57 
wq_table_add(int cpu,struct idxd_wq * wq)58 static void wq_table_add(int cpu, struct idxd_wq *wq)
59 {
60 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
61 
62 	if (WARN_ON(entry->n_wqs == entry->max_wqs))
63 		return;
64 
65 	entry->wqs[entry->n_wqs++] = wq;
66 
67 	pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
68 		 entry->wqs[entry->n_wqs - 1]->idxd->id,
69 		 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu);
70 }
71 
wq_table_free_entry(int cpu)72 static void wq_table_free_entry(int cpu)
73 {
74 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
75 
76 	kfree(entry->wqs);
77 	memset(entry, 0, sizeof(*entry));
78 }
79 
wq_table_clear_entry(int cpu)80 static void wq_table_clear_entry(int cpu)
81 {
82 	struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
83 
84 	entry->n_wqs = 0;
85 	entry->cur_wq = 0;
86 	memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *));
87 }
88 
89 LIST_HEAD(iaa_devices);
90 DEFINE_MUTEX(iaa_devices_lock);
91 
92 /* If enabled, IAA hw crypto algos are registered, unavailable otherwise */
93 static bool iaa_crypto_enabled;
94 static bool iaa_crypto_registered;
95 
96 /* Verify results of IAA compress or not */
97 static bool iaa_verify_compress = true;
98 
verify_compress_show(struct device_driver * driver,char * buf)99 static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
100 {
101 	return sprintf(buf, "%d\n", iaa_verify_compress);
102 }
103 
verify_compress_store(struct device_driver * driver,const char * buf,size_t count)104 static ssize_t verify_compress_store(struct device_driver *driver,
105 				     const char *buf, size_t count)
106 {
107 	int ret = -EBUSY;
108 
109 	mutex_lock(&iaa_devices_lock);
110 
111 	if (iaa_crypto_enabled)
112 		goto out;
113 
114 	ret = kstrtobool(buf, &iaa_verify_compress);
115 	if (ret)
116 		goto out;
117 
118 	ret = count;
119 out:
120 	mutex_unlock(&iaa_devices_lock);
121 
122 	return ret;
123 }
124 static DRIVER_ATTR_RW(verify_compress);
125 
126 /*
127  * The iaa crypto driver supports three 'sync' methods determining how
128  * compressions and decompressions are performed:
129  *
130  * - sync:      the compression or decompression completes before
131  *              returning.  This is the mode used by the async crypto
132  *              interface when the sync mode is set to 'sync' and by
133  *              the sync crypto interface regardless of setting.
134  *
135  * - async:     the compression or decompression is submitted and returns
136  *              immediately.  Completion interrupts are not used so
137  *              the caller is responsible for polling the descriptor
138  *              for completion.  This mode is applicable to only the
139  *              async crypto interface and is ignored for anything
140  *              else.
141  *
142  * - async_irq: the compression or decompression is submitted and
143  *              returns immediately.  Completion interrupts are
144  *              enabled so the caller can wait for the completion and
145  *              yield to other threads.  When the compression or
146  *              decompression completes, the completion is signaled
147  *              and the caller awakened.  This mode is applicable to
148  *              only the async crypto interface and is ignored for
149  *              anything else.
150  *
151  * These modes can be set using the iaa_crypto sync_mode driver
152  * attribute.
153  */
154 
155 /* Use async mode */
156 static bool async_mode;
157 /* Use interrupts */
158 static bool use_irq;
159 
160 /**
161  * set_iaa_sync_mode - Set IAA sync mode
162  * @name: The name of the sync mode
163  *
164  * Make the IAA sync mode named @name the current sync mode used by
165  * compression/decompression.
166  */
167 
set_iaa_sync_mode(const char * name)168 static int set_iaa_sync_mode(const char *name)
169 {
170 	int ret = 0;
171 
172 	if (sysfs_streq(name, "sync")) {
173 		async_mode = false;
174 		use_irq = false;
175 	} else if (sysfs_streq(name, "async")) {
176 		async_mode = true;
177 		use_irq = false;
178 	} else if (sysfs_streq(name, "async_irq")) {
179 		async_mode = true;
180 		use_irq = true;
181 	} else {
182 		ret = -EINVAL;
183 	}
184 
185 	return ret;
186 }
187 
sync_mode_show(struct device_driver * driver,char * buf)188 static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
189 {
190 	int ret = 0;
191 
192 	if (!async_mode && !use_irq)
193 		ret = sprintf(buf, "%s\n", "sync");
194 	else if (async_mode && !use_irq)
195 		ret = sprintf(buf, "%s\n", "async");
196 	else if (async_mode && use_irq)
197 		ret = sprintf(buf, "%s\n", "async_irq");
198 
199 	return ret;
200 }
201 
sync_mode_store(struct device_driver * driver,const char * buf,size_t count)202 static ssize_t sync_mode_store(struct device_driver *driver,
203 			       const char *buf, size_t count)
204 {
205 	int ret = -EBUSY;
206 
207 	mutex_lock(&iaa_devices_lock);
208 
209 	if (iaa_crypto_enabled)
210 		goto out;
211 
212 	ret = set_iaa_sync_mode(buf);
213 	if (ret == 0)
214 		ret = count;
215 out:
216 	mutex_unlock(&iaa_devices_lock);
217 
218 	return ret;
219 }
220 static DRIVER_ATTR_RW(sync_mode);
221 
222 static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
223 
find_empty_iaa_compression_mode(void)224 static int find_empty_iaa_compression_mode(void)
225 {
226 	int i = -EINVAL;
227 
228 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
229 		if (iaa_compression_modes[i])
230 			continue;
231 		break;
232 	}
233 
234 	return i;
235 }
236 
find_iaa_compression_mode(const char * name,int * idx)237 static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
238 {
239 	struct iaa_compression_mode *mode;
240 	int i;
241 
242 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
243 		mode = iaa_compression_modes[i];
244 		if (!mode)
245 			continue;
246 
247 		if (!strcmp(mode->name, name)) {
248 			*idx = i;
249 			return iaa_compression_modes[i];
250 		}
251 	}
252 
253 	return NULL;
254 }
255 
free_iaa_compression_mode(struct iaa_compression_mode * mode)256 static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
257 {
258 	kfree(mode->name);
259 	kfree(mode->ll_table);
260 	kfree(mode->d_table);
261 
262 	kfree(mode);
263 }
264 
265 /*
266  * IAA Compression modes are defined by an ll_table and a d_table.
267  * These tables are typically generated and captured using statistics
268  * collected from running actual compress/decompress workloads.
269  *
270  * A module or other kernel code can add and remove compression modes
271  * with a given name using the exported @add_iaa_compression_mode()
272  * and @remove_iaa_compression_mode functions.
273  *
274  * When a new compression mode is added, the tables are saved in a
275  * global compression mode list.  When IAA devices are added, a
276  * per-IAA device dma mapping is created for each IAA device, for each
277  * compression mode.  These are the tables used to do the actual
278  * compression/deccompression and are unmapped if/when the devices are
279  * removed.  Currently, compression modes must be added before any
280  * device is added, and removed after all devices have been removed.
281  */
282 
283 /**
284  * remove_iaa_compression_mode - Remove an IAA compression mode
285  * @name: The name the compression mode will be known as
286  *
287  * Remove the IAA compression mode named @name.
288  */
remove_iaa_compression_mode(const char * name)289 void remove_iaa_compression_mode(const char *name)
290 {
291 	struct iaa_compression_mode *mode;
292 	int idx;
293 
294 	mutex_lock(&iaa_devices_lock);
295 
296 	if (!list_empty(&iaa_devices))
297 		goto out;
298 
299 	mode = find_iaa_compression_mode(name, &idx);
300 	if (mode) {
301 		free_iaa_compression_mode(mode);
302 		iaa_compression_modes[idx] = NULL;
303 	}
304 out:
305 	mutex_unlock(&iaa_devices_lock);
306 }
307 EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
308 
309 /**
310  * add_iaa_compression_mode - Add an IAA compression mode
311  * @name: The name the compression mode will be known as
312  * @ll_table: The ll table
313  * @ll_table_size: The ll table size in bytes
314  * @d_table: The d table
315  * @d_table_size: The d table size in bytes
316  * @init: Optional callback function to init the compression mode data
317  * @free: Optional callback function to free the compression mode data
318  *
319  * Add a new IAA compression mode named @name.
320  *
321  * Returns 0 if successful, errcode otherwise.
322  */
add_iaa_compression_mode(const char * name,const u32 * ll_table,int ll_table_size,const u32 * d_table,int d_table_size,iaa_dev_comp_init_fn_t init,iaa_dev_comp_free_fn_t free)323 int add_iaa_compression_mode(const char *name,
324 			     const u32 *ll_table,
325 			     int ll_table_size,
326 			     const u32 *d_table,
327 			     int d_table_size,
328 			     iaa_dev_comp_init_fn_t init,
329 			     iaa_dev_comp_free_fn_t free)
330 {
331 	struct iaa_compression_mode *mode;
332 	int idx, ret = -ENOMEM;
333 
334 	mutex_lock(&iaa_devices_lock);
335 
336 	if (!list_empty(&iaa_devices)) {
337 		ret = -EBUSY;
338 		goto out;
339 	}
340 
341 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
342 	if (!mode)
343 		goto out;
344 
345 	mode->name = kstrdup(name, GFP_KERNEL);
346 	if (!mode->name)
347 		goto free;
348 
349 	if (ll_table) {
350 		mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL);
351 		if (!mode->ll_table)
352 			goto free;
353 		mode->ll_table_size = ll_table_size;
354 	}
355 
356 	if (d_table) {
357 		mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL);
358 		if (!mode->d_table)
359 			goto free;
360 		mode->d_table_size = d_table_size;
361 	}
362 
363 	mode->init = init;
364 	mode->free = free;
365 
366 	idx = find_empty_iaa_compression_mode();
367 	if (idx < 0)
368 		goto free;
369 
370 	pr_debug("IAA compression mode %s added at idx %d\n",
371 		 mode->name, idx);
372 
373 	iaa_compression_modes[idx] = mode;
374 
375 	ret = 0;
376 out:
377 	mutex_unlock(&iaa_devices_lock);
378 
379 	return ret;
380 free:
381 	free_iaa_compression_mode(mode);
382 	goto out;
383 }
384 EXPORT_SYMBOL_GPL(add_iaa_compression_mode);
385 
386 static struct iaa_device_compression_mode *
get_iaa_device_compression_mode(struct iaa_device * iaa_device,int idx)387 get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx)
388 {
389 	return iaa_device->compression_modes[idx];
390 }
391 
free_device_compression_mode(struct iaa_device * iaa_device,struct iaa_device_compression_mode * device_mode)392 static void free_device_compression_mode(struct iaa_device *iaa_device,
393 					 struct iaa_device_compression_mode *device_mode)
394 {
395 	size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
396 	struct device *dev = &iaa_device->idxd->pdev->dev;
397 
398 	kfree(device_mode->name);
399 
400 	if (device_mode->aecs_comp_table)
401 		dma_free_coherent(dev, size, device_mode->aecs_comp_table,
402 				  device_mode->aecs_comp_table_dma_addr);
403 	kfree(device_mode);
404 }
405 
406 #define IDXD_OP_FLAG_AECS_RW_TGLS       0x400000
407 #define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC)
408 #define IAX_AECS_COMPRESS_FLAG	(IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
409 #define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
410 #define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \
411 						IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \
412 						IDXD_OP_FLAG_AECS_RW_TGLS)
413 
414 static int check_completion(struct device *dev,
415 			    struct iax_completion_record *comp,
416 			    bool compress,
417 			    bool only_once);
418 
init_device_compression_mode(struct iaa_device * iaa_device,struct iaa_compression_mode * mode,int idx,struct idxd_wq * wq)419 static int init_device_compression_mode(struct iaa_device *iaa_device,
420 					struct iaa_compression_mode *mode,
421 					int idx, struct idxd_wq *wq)
422 {
423 	size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
424 	struct device *dev = &iaa_device->idxd->pdev->dev;
425 	struct iaa_device_compression_mode *device_mode;
426 	int ret = -ENOMEM;
427 
428 	device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL);
429 	if (!device_mode)
430 		return -ENOMEM;
431 
432 	device_mode->name = kstrdup(mode->name, GFP_KERNEL);
433 	if (!device_mode->name)
434 		goto free;
435 
436 	device_mode->aecs_comp_table = dma_alloc_coherent(dev, size,
437 							  &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL);
438 	if (!device_mode->aecs_comp_table)
439 		goto free;
440 
441 	/* Add Huffman table to aecs */
442 	memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
443 	memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
444 	memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
445 
446 	if (mode->init) {
447 		ret = mode->init(device_mode);
448 		if (ret)
449 			goto free;
450 	}
451 
452 	/* mode index should match iaa_compression_modes idx */
453 	iaa_device->compression_modes[idx] = device_mode;
454 
455 	pr_debug("IAA %s compression mode initialized for iaa device %d\n",
456 		 mode->name, iaa_device->idxd->id);
457 
458 	ret = 0;
459 out:
460 	return ret;
461 free:
462 	pr_debug("IAA %s compression mode initialization failed for iaa device %d\n",
463 		 mode->name, iaa_device->idxd->id);
464 
465 	free_device_compression_mode(iaa_device, device_mode);
466 	goto out;
467 }
468 
init_device_compression_modes(struct iaa_device * iaa_device,struct idxd_wq * wq)469 static int init_device_compression_modes(struct iaa_device *iaa_device,
470 					 struct idxd_wq *wq)
471 {
472 	struct iaa_compression_mode *mode;
473 	int i, ret = 0;
474 
475 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
476 		mode = iaa_compression_modes[i];
477 		if (!mode)
478 			continue;
479 
480 		ret = init_device_compression_mode(iaa_device, mode, i, wq);
481 		if (ret)
482 			break;
483 	}
484 
485 	return ret;
486 }
487 
remove_device_compression_modes(struct iaa_device * iaa_device)488 static void remove_device_compression_modes(struct iaa_device *iaa_device)
489 {
490 	struct iaa_device_compression_mode *device_mode;
491 	int i;
492 
493 	for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
494 		device_mode = iaa_device->compression_modes[i];
495 		if (!device_mode)
496 			continue;
497 
498 		if (iaa_compression_modes[i]->free)
499 			iaa_compression_modes[i]->free(device_mode);
500 		free_device_compression_mode(iaa_device, device_mode);
501 		iaa_device->compression_modes[i] = NULL;
502 	}
503 }
504 
iaa_device_alloc(void)505 static struct iaa_device *iaa_device_alloc(void)
506 {
507 	struct iaa_device *iaa_device;
508 
509 	iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL);
510 	if (!iaa_device)
511 		return NULL;
512 
513 	INIT_LIST_HEAD(&iaa_device->wqs);
514 
515 	return iaa_device;
516 }
517 
iaa_has_wq(struct iaa_device * iaa_device,struct idxd_wq * wq)518 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
519 {
520 	struct iaa_wq *iaa_wq;
521 
522 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
523 		if (iaa_wq->wq == wq)
524 			return true;
525 	}
526 
527 	return false;
528 }
529 
add_iaa_device(struct idxd_device * idxd)530 static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
531 {
532 	struct iaa_device *iaa_device;
533 
534 	iaa_device = iaa_device_alloc();
535 	if (!iaa_device)
536 		return NULL;
537 
538 	iaa_device->idxd = idxd;
539 
540 	list_add_tail(&iaa_device->list, &iaa_devices);
541 
542 	nr_iaa++;
543 
544 	return iaa_device;
545 }
546 
init_iaa_device(struct iaa_device * iaa_device,struct iaa_wq * iaa_wq)547 static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
548 {
549 	int ret = 0;
550 
551 	ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
552 	if (ret)
553 		return ret;
554 
555 	return ret;
556 }
557 
del_iaa_device(struct iaa_device * iaa_device)558 static void del_iaa_device(struct iaa_device *iaa_device)
559 {
560 	list_del(&iaa_device->list);
561 
562 	nr_iaa--;
563 }
564 
add_iaa_wq(struct iaa_device * iaa_device,struct idxd_wq * wq,struct iaa_wq ** new_wq)565 static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
566 		      struct iaa_wq **new_wq)
567 {
568 	struct idxd_device *idxd = iaa_device->idxd;
569 	struct pci_dev *pdev = idxd->pdev;
570 	struct device *dev = &pdev->dev;
571 	struct iaa_wq *iaa_wq;
572 
573 	iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL);
574 	if (!iaa_wq)
575 		return -ENOMEM;
576 
577 	iaa_wq->wq = wq;
578 	iaa_wq->iaa_device = iaa_device;
579 	idxd_wq_set_private(wq, iaa_wq);
580 
581 	list_add_tail(&iaa_wq->list, &iaa_device->wqs);
582 
583 	iaa_device->n_wq++;
584 
585 	if (new_wq)
586 		*new_wq = iaa_wq;
587 
588 	dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n",
589 		wq->id, iaa_device->idxd->id, iaa_device->n_wq);
590 
591 	return 0;
592 }
593 
del_iaa_wq(struct iaa_device * iaa_device,struct idxd_wq * wq)594 static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
595 {
596 	struct idxd_device *idxd = iaa_device->idxd;
597 	struct pci_dev *pdev = idxd->pdev;
598 	struct device *dev = &pdev->dev;
599 	struct iaa_wq *iaa_wq;
600 
601 	list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
602 		if (iaa_wq->wq == wq) {
603 			list_del(&iaa_wq->list);
604 			iaa_device->n_wq--;
605 
606 			dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n",
607 				wq->id, iaa_device->idxd->id,
608 				iaa_device->n_wq, nr_iaa);
609 
610 			if (iaa_device->n_wq == 0)
611 				del_iaa_device(iaa_device);
612 			break;
613 		}
614 	}
615 }
616 
clear_wq_table(void)617 static void clear_wq_table(void)
618 {
619 	int cpu;
620 
621 	for (cpu = 0; cpu < nr_cpus; cpu++)
622 		wq_table_clear_entry(cpu);
623 
624 	pr_debug("cleared wq table\n");
625 }
626 
free_iaa_device(struct iaa_device * iaa_device)627 static void free_iaa_device(struct iaa_device *iaa_device)
628 {
629 	if (!iaa_device)
630 		return;
631 
632 	remove_device_compression_modes(iaa_device);
633 	kfree(iaa_device);
634 }
635 
__free_iaa_wq(struct iaa_wq * iaa_wq)636 static void __free_iaa_wq(struct iaa_wq *iaa_wq)
637 {
638 	struct iaa_device *iaa_device;
639 
640 	if (!iaa_wq)
641 		return;
642 
643 	iaa_device = iaa_wq->iaa_device;
644 	if (iaa_device->n_wq == 0)
645 		free_iaa_device(iaa_wq->iaa_device);
646 }
647 
free_iaa_wq(struct iaa_wq * iaa_wq)648 static void free_iaa_wq(struct iaa_wq *iaa_wq)
649 {
650 	struct idxd_wq *wq;
651 
652 	__free_iaa_wq(iaa_wq);
653 
654 	wq = iaa_wq->wq;
655 
656 	kfree(iaa_wq);
657 	idxd_wq_set_private(wq, NULL);
658 }
659 
iaa_wq_get(struct idxd_wq * wq)660 static int iaa_wq_get(struct idxd_wq *wq)
661 {
662 	struct idxd_device *idxd = wq->idxd;
663 	struct iaa_wq *iaa_wq;
664 	int ret = 0;
665 
666 	spin_lock(&idxd->dev_lock);
667 	iaa_wq = idxd_wq_get_private(wq);
668 	if (iaa_wq && !iaa_wq->remove) {
669 		iaa_wq->ref++;
670 		idxd_wq_get(wq);
671 	} else {
672 		ret = -ENODEV;
673 	}
674 	spin_unlock(&idxd->dev_lock);
675 
676 	return ret;
677 }
678 
iaa_wq_put(struct idxd_wq * wq)679 static int iaa_wq_put(struct idxd_wq *wq)
680 {
681 	struct idxd_device *idxd = wq->idxd;
682 	struct iaa_wq *iaa_wq;
683 	bool free = false;
684 	int ret = 0;
685 
686 	spin_lock(&idxd->dev_lock);
687 	iaa_wq = idxd_wq_get_private(wq);
688 	if (iaa_wq) {
689 		iaa_wq->ref--;
690 		if (iaa_wq->ref == 0 && iaa_wq->remove) {
691 			idxd_wq_set_private(wq, NULL);
692 			free = true;
693 		}
694 		idxd_wq_put(wq);
695 	} else {
696 		ret = -ENODEV;
697 	}
698 	spin_unlock(&idxd->dev_lock);
699 	if (free) {
700 		__free_iaa_wq(iaa_wq);
701 		kfree(iaa_wq);
702 	}
703 
704 	return ret;
705 }
706 
free_wq_table(void)707 static void free_wq_table(void)
708 {
709 	int cpu;
710 
711 	for (cpu = 0; cpu < nr_cpus; cpu++)
712 		wq_table_free_entry(cpu);
713 
714 	free_percpu(wq_table);
715 
716 	pr_debug("freed wq table\n");
717 }
718 
alloc_wq_table(int max_wqs)719 static int alloc_wq_table(int max_wqs)
720 {
721 	struct wq_table_entry *entry;
722 	int cpu;
723 
724 	wq_table = alloc_percpu(struct wq_table_entry);
725 	if (!wq_table)
726 		return -ENOMEM;
727 
728 	for (cpu = 0; cpu < nr_cpus; cpu++) {
729 		entry = per_cpu_ptr(wq_table, cpu);
730 		entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
731 		if (!entry->wqs) {
732 			free_wq_table();
733 			return -ENOMEM;
734 		}
735 
736 		entry->max_wqs = max_wqs;
737 	}
738 
739 	pr_debug("initialized wq table\n");
740 
741 	return 0;
742 }
743 
save_iaa_wq(struct idxd_wq * wq)744 static int save_iaa_wq(struct idxd_wq *wq)
745 {
746 	struct iaa_device *iaa_device, *found = NULL;
747 	struct idxd_device *idxd;
748 	struct pci_dev *pdev;
749 	struct device *dev;
750 	int ret = 0;
751 
752 	list_for_each_entry(iaa_device, &iaa_devices, list) {
753 		if (iaa_device->idxd == wq->idxd) {
754 			idxd = iaa_device->idxd;
755 			pdev = idxd->pdev;
756 			dev = &pdev->dev;
757 			/*
758 			 * Check to see that we don't already have this wq.
759 			 * Shouldn't happen but we don't control probing.
760 			 */
761 			if (iaa_has_wq(iaa_device, wq)) {
762 				dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n",
763 					iaa_device);
764 				goto out;
765 			}
766 
767 			found = iaa_device;
768 
769 			ret = add_iaa_wq(iaa_device, wq, NULL);
770 			if (ret)
771 				goto out;
772 
773 			break;
774 		}
775 	}
776 
777 	if (!found) {
778 		struct iaa_device *new_device;
779 		struct iaa_wq *new_wq;
780 
781 		new_device = add_iaa_device(wq->idxd);
782 		if (!new_device) {
783 			ret = -ENOMEM;
784 			goto out;
785 		}
786 
787 		ret = add_iaa_wq(new_device, wq, &new_wq);
788 		if (ret) {
789 			del_iaa_device(new_device);
790 			free_iaa_device(new_device);
791 			goto out;
792 		}
793 
794 		ret = init_iaa_device(new_device, new_wq);
795 		if (ret) {
796 			del_iaa_wq(new_device, new_wq->wq);
797 			del_iaa_device(new_device);
798 			free_iaa_wq(new_wq);
799 			goto out;
800 		}
801 	}
802 
803 	if (WARN_ON(nr_iaa == 0))
804 		return -EINVAL;
805 
806 	cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
807 	if (!cpus_per_iaa)
808 		cpus_per_iaa = 1;
809 out:
810 	return 0;
811 }
812 
remove_iaa_wq(struct idxd_wq * wq)813 static void remove_iaa_wq(struct idxd_wq *wq)
814 {
815 	struct iaa_device *iaa_device;
816 
817 	list_for_each_entry(iaa_device, &iaa_devices, list) {
818 		if (iaa_has_wq(iaa_device, wq)) {
819 			del_iaa_wq(iaa_device, wq);
820 			break;
821 		}
822 	}
823 
824 	if (nr_iaa) {
825 		cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
826 		if (!cpus_per_iaa)
827 			cpus_per_iaa = 1;
828 	} else
829 		cpus_per_iaa = 1;
830 }
831 
wq_table_add_wqs(int iaa,int cpu)832 static int wq_table_add_wqs(int iaa, int cpu)
833 {
834 	struct iaa_device *iaa_device, *found_device = NULL;
835 	int ret = 0, cur_iaa = 0, n_wqs_added = 0;
836 	struct idxd_device *idxd;
837 	struct iaa_wq *iaa_wq;
838 	struct pci_dev *pdev;
839 	struct device *dev;
840 
841 	list_for_each_entry(iaa_device, &iaa_devices, list) {
842 		idxd = iaa_device->idxd;
843 		pdev = idxd->pdev;
844 		dev = &pdev->dev;
845 
846 		if (cur_iaa != iaa) {
847 			cur_iaa++;
848 			continue;
849 		}
850 
851 		found_device = iaa_device;
852 		dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n",
853 			found_device->idxd->id, cur_iaa);
854 		break;
855 	}
856 
857 	if (!found_device) {
858 		found_device = list_first_entry_or_null(&iaa_devices,
859 							struct iaa_device, list);
860 		if (!found_device) {
861 			pr_debug("couldn't find any iaa devices with wqs!\n");
862 			ret = -EINVAL;
863 			goto out;
864 		}
865 		cur_iaa = 0;
866 
867 		idxd = found_device->idxd;
868 		pdev = idxd->pdev;
869 		dev = &pdev->dev;
870 		dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n",
871 			found_device->idxd->id, cur_iaa);
872 	}
873 
874 	list_for_each_entry(iaa_wq, &found_device->wqs, list) {
875 		wq_table_add(cpu, iaa_wq->wq);
876 		pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n",
877 			 cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id);
878 		n_wqs_added++;
879 	}
880 
881 	if (!n_wqs_added) {
882 		pr_debug("couldn't find any iaa wqs!\n");
883 		ret = -EINVAL;
884 		goto out;
885 	}
886 out:
887 	return ret;
888 }
889 
890 /*
891  * Rebalance the wq table so that given a cpu, it's easy to find the
892  * closest IAA instance.  The idea is to try to choose the most
893  * appropriate IAA instance for a caller and spread available
894  * workqueues around to clients.
895  */
rebalance_wq_table(void)896 static void rebalance_wq_table(void)
897 {
898 	const struct cpumask *node_cpus;
899 	int node, cpu, iaa = -1;
900 
901 	if (nr_iaa == 0)
902 		return;
903 
904 	pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n",
905 		 nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa);
906 
907 	clear_wq_table();
908 
909 	if (nr_iaa == 1) {
910 		for (cpu = 0; cpu < nr_cpus; cpu++) {
911 			if (WARN_ON(wq_table_add_wqs(0, cpu))) {
912 				pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
913 				return;
914 			}
915 		}
916 
917 		return;
918 	}
919 
920 	for_each_node_with_cpus(node) {
921 		node_cpus = cpumask_of_node(node);
922 
923 		for (cpu = 0; cpu <  cpumask_weight(node_cpus); cpu++) {
924 			int node_cpu = cpumask_nth(cpu, node_cpus);
925 
926 			if (WARN_ON(node_cpu >= nr_cpu_ids)) {
927 				pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
928 				return;
929 			}
930 
931 			if ((cpu % cpus_per_iaa) == 0)
932 				iaa++;
933 
934 			if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
935 				pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
936 				return;
937 			}
938 		}
939 	}
940 }
941 
check_completion(struct device * dev,struct iax_completion_record * comp,bool compress,bool only_once)942 static inline int check_completion(struct device *dev,
943 				   struct iax_completion_record *comp,
944 				   bool compress,
945 				   bool only_once)
946 {
947 	char *op_str = compress ? "compress" : "decompress";
948 	int ret = 0;
949 
950 	while (!comp->status) {
951 		if (only_once)
952 			return -EAGAIN;
953 		cpu_relax();
954 	}
955 
956 	if (comp->status != IAX_COMP_SUCCESS) {
957 		if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) {
958 			ret = -ETIMEDOUT;
959 			dev_dbg(dev, "%s timed out, size=0x%x\n",
960 				op_str, comp->output_size);
961 			update_completion_timeout_errs();
962 			goto out;
963 		}
964 
965 		if (comp->status == IAA_ANALYTICS_ERROR &&
966 		    comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) {
967 			ret = -E2BIG;
968 			dev_dbg(dev, "compressed > uncompressed size,"
969 				" not compressing, size=0x%x\n",
970 				comp->output_size);
971 			update_completion_comp_buf_overflow_errs();
972 			goto out;
973 		}
974 
975 		if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) {
976 			ret = -EOVERFLOW;
977 			goto out;
978 		}
979 
980 		ret = -EINVAL;
981 		dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n",
982 			op_str, comp->status, comp->error_code, comp->output_size);
983 		print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0);
984 		update_completion_einval_errs();
985 
986 		goto out;
987 	}
988 out:
989 	return ret;
990 }
991 
deflate_generic_decompress(struct acomp_req * req)992 static int deflate_generic_decompress(struct acomp_req *req)
993 {
994 	void *src, *dst;
995 	int ret;
996 
997 	src = kmap_local_page(sg_page(req->src)) + req->src->offset;
998 	dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
999 
1000 	ret = crypto_comp_decompress(deflate_generic_tfm,
1001 				     src, req->slen, dst, &req->dlen);
1002 
1003 	kunmap_local(src);
1004 	kunmap_local(dst);
1005 
1006 	update_total_sw_decomp_calls();
1007 
1008 	return ret;
1009 }
1010 
1011 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
1012 				struct acomp_req *req,
1013 				dma_addr_t *src_addr, dma_addr_t *dst_addr);
1014 
1015 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
1016 			       struct idxd_wq *wq,
1017 			       dma_addr_t src_addr, unsigned int slen,
1018 			       dma_addr_t dst_addr, unsigned int *dlen,
1019 			       u32 compression_crc);
1020 
iaa_desc_complete(struct idxd_desc * idxd_desc,enum idxd_complete_type comp_type,bool free_desc,void * __ctx,u32 * status)1021 static void iaa_desc_complete(struct idxd_desc *idxd_desc,
1022 			      enum idxd_complete_type comp_type,
1023 			      bool free_desc, void *__ctx,
1024 			      u32 *status)
1025 {
1026 	struct iaa_device_compression_mode *active_compression_mode;
1027 	struct iaa_compression_ctx *compression_ctx;
1028 	struct crypto_ctx *ctx = __ctx;
1029 	struct iaa_device *iaa_device;
1030 	struct idxd_device *idxd;
1031 	struct iaa_wq *iaa_wq;
1032 	struct pci_dev *pdev;
1033 	struct device *dev;
1034 	int ret, err = 0;
1035 
1036 	compression_ctx = crypto_tfm_ctx(ctx->tfm);
1037 
1038 	iaa_wq = idxd_wq_get_private(idxd_desc->wq);
1039 	iaa_device = iaa_wq->iaa_device;
1040 	idxd = iaa_device->idxd;
1041 	pdev = idxd->pdev;
1042 	dev = &pdev->dev;
1043 
1044 	active_compression_mode = get_iaa_device_compression_mode(iaa_device,
1045 								  compression_ctx->mode);
1046 	dev_dbg(dev, "%s: compression mode %s,"
1047 		" ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
1048 		active_compression_mode->name,
1049 		ctx->src_addr, ctx->dst_addr);
1050 
1051 	ret = check_completion(dev, idxd_desc->iax_completion,
1052 			       ctx->compress, false);
1053 	if (ret) {
1054 		dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
1055 		if (!ctx->compress &&
1056 		    idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
1057 			pr_warn("%s: falling back to deflate-generic decompress, "
1058 				"analytics error code %x\n", __func__,
1059 				idxd_desc->iax_completion->error_code);
1060 			ret = deflate_generic_decompress(ctx->req);
1061 			if (ret) {
1062 				dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
1063 					__func__, ret);
1064 				err = -EIO;
1065 				goto err;
1066 			}
1067 		} else {
1068 			err = -EIO;
1069 			goto err;
1070 		}
1071 	} else {
1072 		ctx->req->dlen = idxd_desc->iax_completion->output_size;
1073 	}
1074 
1075 	/* Update stats */
1076 	if (ctx->compress) {
1077 		update_total_comp_bytes_out(ctx->req->dlen);
1078 		update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
1079 	} else {
1080 		update_total_decomp_bytes_in(ctx->req->slen);
1081 		update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen);
1082 	}
1083 
1084 	if (ctx->compress && compression_ctx->verify_compress) {
1085 		dma_addr_t src_addr, dst_addr;
1086 		u32 compression_crc;
1087 
1088 		compression_crc = idxd_desc->iax_completion->crc;
1089 
1090 		ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
1091 		if (ret) {
1092 			dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
1093 			err = -EIO;
1094 			goto out;
1095 		}
1096 
1097 		ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
1098 					  ctx->req->slen, dst_addr, &ctx->req->dlen,
1099 					  compression_crc);
1100 		if (ret) {
1101 			dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
1102 			err = -EIO;
1103 		}
1104 
1105 		dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
1106 		dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);
1107 
1108 		goto out;
1109 	}
1110 err:
1111 	dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
1112 	dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
1113 out:
1114 	if (ret != 0)
1115 		dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
1116 
1117 	if (ctx->req->base.complete)
1118 		acomp_request_complete(ctx->req, err);
1119 
1120 	if (free_desc)
1121 		idxd_free_desc(idxd_desc->wq, idxd_desc);
1122 	iaa_wq_put(idxd_desc->wq);
1123 }
1124 
iaa_compress(struct crypto_tfm * tfm,struct acomp_req * req,struct idxd_wq * wq,dma_addr_t src_addr,unsigned int slen,dma_addr_t dst_addr,unsigned int * dlen,u32 * compression_crc,bool disable_async)1125 static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
1126 			struct idxd_wq *wq,
1127 			dma_addr_t src_addr, unsigned int slen,
1128 			dma_addr_t dst_addr, unsigned int *dlen,
1129 			u32 *compression_crc,
1130 			bool disable_async)
1131 {
1132 	struct iaa_device_compression_mode *active_compression_mode;
1133 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1134 	struct iaa_device *iaa_device;
1135 	struct idxd_desc *idxd_desc;
1136 	struct iax_hw_desc *desc;
1137 	struct idxd_device *idxd;
1138 	struct iaa_wq *iaa_wq;
1139 	struct pci_dev *pdev;
1140 	struct device *dev;
1141 	int ret = 0;
1142 
1143 	iaa_wq = idxd_wq_get_private(wq);
1144 	iaa_device = iaa_wq->iaa_device;
1145 	idxd = iaa_device->idxd;
1146 	pdev = idxd->pdev;
1147 	dev = &pdev->dev;
1148 
1149 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1150 
1151 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1152 	if (IS_ERR(idxd_desc)) {
1153 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1154 		dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc));
1155 		return PTR_ERR(idxd_desc);
1156 	}
1157 	desc = idxd_desc->iax_hw;
1158 
1159 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR |
1160 		IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC;
1161 	desc->opcode = IAX_OPCODE_COMPRESS;
1162 	desc->compr_flags = IAA_COMP_FLAGS;
1163 	desc->priv = 0;
1164 
1165 	desc->src1_addr = (u64)src_addr;
1166 	desc->src1_size = slen;
1167 	desc->dst_addr = (u64)dst_addr;
1168 	desc->max_dst_size = *dlen;
1169 	desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr;
1170 	desc->src2_size = sizeof(struct aecs_comp_table_record);
1171 	desc->completion_addr = idxd_desc->compl_dma;
1172 
1173 	if (ctx->use_irq && !disable_async) {
1174 		desc->flags |= IDXD_OP_FLAG_RCI;
1175 
1176 		idxd_desc->crypto.req = req;
1177 		idxd_desc->crypto.tfm = tfm;
1178 		idxd_desc->crypto.src_addr = src_addr;
1179 		idxd_desc->crypto.dst_addr = dst_addr;
1180 		idxd_desc->crypto.compress = true;
1181 
1182 		dev_dbg(dev, "%s use_async_irq: compression mode %s,"
1183 			" src_addr %llx, dst_addr %llx\n", __func__,
1184 			active_compression_mode->name,
1185 			src_addr, dst_addr);
1186 	} else if (ctx->async_mode && !disable_async)
1187 		req->base.data = idxd_desc;
1188 
1189 	dev_dbg(dev, "%s: compression mode %s,"
1190 		" desc->src1_addr %llx, desc->src1_size %d,"
1191 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1192 		" desc->src2_addr %llx, desc->src2_size %d\n", __func__,
1193 		active_compression_mode->name,
1194 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1195 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1196 
1197 	ret = idxd_submit_desc(wq, idxd_desc);
1198 	if (ret) {
1199 		dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
1200 		goto err;
1201 	}
1202 
1203 	/* Update stats */
1204 	update_total_comp_calls();
1205 	update_wq_comp_calls(wq);
1206 
1207 	if (ctx->async_mode && !disable_async) {
1208 		ret = -EINPROGRESS;
1209 		dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1210 		goto out;
1211 	}
1212 
1213 	ret = check_completion(dev, idxd_desc->iax_completion, true, false);
1214 	if (ret) {
1215 		dev_dbg(dev, "check_completion failed ret=%d\n", ret);
1216 		goto err;
1217 	}
1218 
1219 	*dlen = idxd_desc->iax_completion->output_size;
1220 
1221 	/* Update stats */
1222 	update_total_comp_bytes_out(*dlen);
1223 	update_wq_comp_bytes(wq, *dlen);
1224 
1225 	*compression_crc = idxd_desc->iax_completion->crc;
1226 
1227 	if (!ctx->async_mode || disable_async)
1228 		idxd_free_desc(wq, idxd_desc);
1229 out:
1230 	return ret;
1231 err:
1232 	idxd_free_desc(wq, idxd_desc);
1233 	dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
1234 
1235 	goto out;
1236 }
1237 
iaa_remap_for_verify(struct device * dev,struct iaa_wq * iaa_wq,struct acomp_req * req,dma_addr_t * src_addr,dma_addr_t * dst_addr)1238 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
1239 				struct acomp_req *req,
1240 				dma_addr_t *src_addr, dma_addr_t *dst_addr)
1241 {
1242 	int ret = 0;
1243 	int nr_sgs;
1244 
1245 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1246 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1247 
1248 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1249 	if (nr_sgs <= 0 || nr_sgs > 1) {
1250 		dev_dbg(dev, "verify: couldn't map src sg for iaa device %d,"
1251 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1252 			iaa_wq->wq->id, ret);
1253 		ret = -EIO;
1254 		goto out;
1255 	}
1256 	*src_addr = sg_dma_address(req->src);
1257 	dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1258 		" req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs,
1259 		req->src, req->slen, sg_dma_len(req->src));
1260 
1261 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
1262 	if (nr_sgs <= 0 || nr_sgs > 1) {
1263 		dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d,"
1264 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1265 			iaa_wq->wq->id, ret);
1266 		ret = -EIO;
1267 		dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1268 		goto out;
1269 	}
1270 	*dst_addr = sg_dma_address(req->dst);
1271 	dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1272 		" req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs,
1273 		req->dst, req->dlen, sg_dma_len(req->dst));
1274 out:
1275 	return ret;
1276 }
1277 
iaa_compress_verify(struct crypto_tfm * tfm,struct acomp_req * req,struct idxd_wq * wq,dma_addr_t src_addr,unsigned int slen,dma_addr_t dst_addr,unsigned int * dlen,u32 compression_crc)1278 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
1279 			       struct idxd_wq *wq,
1280 			       dma_addr_t src_addr, unsigned int slen,
1281 			       dma_addr_t dst_addr, unsigned int *dlen,
1282 			       u32 compression_crc)
1283 {
1284 	struct iaa_device_compression_mode *active_compression_mode;
1285 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1286 	struct iaa_device *iaa_device;
1287 	struct idxd_desc *idxd_desc;
1288 	struct iax_hw_desc *desc;
1289 	struct idxd_device *idxd;
1290 	struct iaa_wq *iaa_wq;
1291 	struct pci_dev *pdev;
1292 	struct device *dev;
1293 	int ret = 0;
1294 
1295 	iaa_wq = idxd_wq_get_private(wq);
1296 	iaa_device = iaa_wq->iaa_device;
1297 	idxd = iaa_device->idxd;
1298 	pdev = idxd->pdev;
1299 	dev = &pdev->dev;
1300 
1301 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1302 
1303 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1304 	if (IS_ERR(idxd_desc)) {
1305 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1306 		dev_dbg(dev, "iaa compress failed: ret=%ld\n",
1307 			PTR_ERR(idxd_desc));
1308 		return PTR_ERR(idxd_desc);
1309 	}
1310 	desc = idxd_desc->iax_hw;
1311 
1312 	/* Verify (optional) - decompress and check crc, suppress dest write */
1313 
1314 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
1315 	desc->opcode = IAX_OPCODE_DECOMPRESS;
1316 	desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT;
1317 	desc->priv = 0;
1318 
1319 	desc->src1_addr = (u64)dst_addr;
1320 	desc->src1_size = *dlen;
1321 	desc->dst_addr = (u64)src_addr;
1322 	desc->max_dst_size = slen;
1323 	desc->completion_addr = idxd_desc->compl_dma;
1324 
1325 	dev_dbg(dev, "(verify) compression mode %s,"
1326 		" desc->src1_addr %llx, desc->src1_size %d,"
1327 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1328 		" desc->src2_addr %llx, desc->src2_size %d\n",
1329 		active_compression_mode->name,
1330 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1331 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1332 
1333 	ret = idxd_submit_desc(wq, idxd_desc);
1334 	if (ret) {
1335 		dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret);
1336 		goto err;
1337 	}
1338 
1339 	ret = check_completion(dev, idxd_desc->iax_completion, false, false);
1340 	if (ret) {
1341 		dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret);
1342 		goto err;
1343 	}
1344 
1345 	if (compression_crc != idxd_desc->iax_completion->crc) {
1346 		ret = -EINVAL;
1347 		dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
1348 			" comp=0x%x, decomp=0x%x\n", compression_crc,
1349 			idxd_desc->iax_completion->crc);
1350 		print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
1351 			       8, 1, idxd_desc->iax_completion, 64, 0);
1352 		goto err;
1353 	}
1354 
1355 	idxd_free_desc(wq, idxd_desc);
1356 out:
1357 	return ret;
1358 err:
1359 	idxd_free_desc(wq, idxd_desc);
1360 	dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
1361 
1362 	goto out;
1363 }
1364 
iaa_decompress(struct crypto_tfm * tfm,struct acomp_req * req,struct idxd_wq * wq,dma_addr_t src_addr,unsigned int slen,dma_addr_t dst_addr,unsigned int * dlen,bool disable_async)1365 static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
1366 			  struct idxd_wq *wq,
1367 			  dma_addr_t src_addr, unsigned int slen,
1368 			  dma_addr_t dst_addr, unsigned int *dlen,
1369 			  bool disable_async)
1370 {
1371 	struct iaa_device_compression_mode *active_compression_mode;
1372 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1373 	struct iaa_device *iaa_device;
1374 	struct idxd_desc *idxd_desc;
1375 	struct iax_hw_desc *desc;
1376 	struct idxd_device *idxd;
1377 	struct iaa_wq *iaa_wq;
1378 	struct pci_dev *pdev;
1379 	struct device *dev;
1380 	int ret = 0;
1381 
1382 	iaa_wq = idxd_wq_get_private(wq);
1383 	iaa_device = iaa_wq->iaa_device;
1384 	idxd = iaa_device->idxd;
1385 	pdev = idxd->pdev;
1386 	dev = &pdev->dev;
1387 
1388 	active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
1389 
1390 	idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
1391 	if (IS_ERR(idxd_desc)) {
1392 		dev_dbg(dev, "idxd descriptor allocation failed\n");
1393 		dev_dbg(dev, "iaa decompress failed: ret=%ld\n",
1394 			PTR_ERR(idxd_desc));
1395 		return PTR_ERR(idxd_desc);
1396 	}
1397 	desc = idxd_desc->iax_hw;
1398 
1399 	desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
1400 	desc->opcode = IAX_OPCODE_DECOMPRESS;
1401 	desc->max_dst_size = PAGE_SIZE;
1402 	desc->decompr_flags = IAA_DECOMP_FLAGS;
1403 	desc->priv = 0;
1404 
1405 	desc->src1_addr = (u64)src_addr;
1406 	desc->dst_addr = (u64)dst_addr;
1407 	desc->max_dst_size = *dlen;
1408 	desc->src1_size = slen;
1409 	desc->completion_addr = idxd_desc->compl_dma;
1410 
1411 	if (ctx->use_irq && !disable_async) {
1412 		desc->flags |= IDXD_OP_FLAG_RCI;
1413 
1414 		idxd_desc->crypto.req = req;
1415 		idxd_desc->crypto.tfm = tfm;
1416 		idxd_desc->crypto.src_addr = src_addr;
1417 		idxd_desc->crypto.dst_addr = dst_addr;
1418 		idxd_desc->crypto.compress = false;
1419 
1420 		dev_dbg(dev, "%s: use_async_irq compression mode %s,"
1421 			" src_addr %llx, dst_addr %llx\n", __func__,
1422 			active_compression_mode->name,
1423 			src_addr, dst_addr);
1424 	} else if (ctx->async_mode && !disable_async)
1425 		req->base.data = idxd_desc;
1426 
1427 	dev_dbg(dev, "%s: decompression mode %s,"
1428 		" desc->src1_addr %llx, desc->src1_size %d,"
1429 		" desc->dst_addr %llx, desc->max_dst_size %d,"
1430 		" desc->src2_addr %llx, desc->src2_size %d\n", __func__,
1431 		active_compression_mode->name,
1432 		desc->src1_addr, desc->src1_size, desc->dst_addr,
1433 		desc->max_dst_size, desc->src2_addr, desc->src2_size);
1434 
1435 	ret = idxd_submit_desc(wq, idxd_desc);
1436 	if (ret) {
1437 		dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
1438 		goto err;
1439 	}
1440 
1441 	/* Update stats */
1442 	update_total_decomp_calls();
1443 	update_wq_decomp_calls(wq);
1444 
1445 	if (ctx->async_mode && !disable_async) {
1446 		ret = -EINPROGRESS;
1447 		dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
1448 		goto out;
1449 	}
1450 
1451 	ret = check_completion(dev, idxd_desc->iax_completion, false, false);
1452 	if (ret) {
1453 		dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
1454 		if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
1455 			pr_warn("%s: falling back to deflate-generic decompress, "
1456 				"analytics error code %x\n", __func__,
1457 				idxd_desc->iax_completion->error_code);
1458 			ret = deflate_generic_decompress(req);
1459 			if (ret) {
1460 				dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
1461 					__func__, ret);
1462 				goto err;
1463 			}
1464 		} else {
1465 			goto err;
1466 		}
1467 	} else {
1468 		req->dlen = idxd_desc->iax_completion->output_size;
1469 	}
1470 
1471 	*dlen = req->dlen;
1472 
1473 	if (!ctx->async_mode || disable_async)
1474 		idxd_free_desc(wq, idxd_desc);
1475 
1476 	/* Update stats */
1477 	update_total_decomp_bytes_in(slen);
1478 	update_wq_decomp_bytes(wq, slen);
1479 out:
1480 	return ret;
1481 err:
1482 	idxd_free_desc(wq, idxd_desc);
1483 	dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret);
1484 
1485 	goto out;
1486 }
1487 
iaa_comp_acompress(struct acomp_req * req)1488 static int iaa_comp_acompress(struct acomp_req *req)
1489 {
1490 	struct iaa_compression_ctx *compression_ctx;
1491 	struct crypto_tfm *tfm = req->base.tfm;
1492 	dma_addr_t src_addr, dst_addr;
1493 	bool disable_async = false;
1494 	int nr_sgs, cpu, ret = 0;
1495 	struct iaa_wq *iaa_wq;
1496 	u32 compression_crc;
1497 	struct idxd_wq *wq;
1498 	struct device *dev;
1499 	int order = -1;
1500 
1501 	compression_ctx = crypto_tfm_ctx(tfm);
1502 
1503 	if (!iaa_crypto_enabled) {
1504 		pr_debug("iaa_crypto disabled, not compressing\n");
1505 		return -ENODEV;
1506 	}
1507 
1508 	if (!req->src || !req->slen) {
1509 		pr_debug("invalid src, not compressing\n");
1510 		return -EINVAL;
1511 	}
1512 
1513 	cpu = get_cpu();
1514 	wq = wq_table_next_wq(cpu);
1515 	put_cpu();
1516 	if (!wq) {
1517 		pr_debug("no wq configured for cpu=%d\n", cpu);
1518 		return -ENODEV;
1519 	}
1520 
1521 	ret = iaa_wq_get(wq);
1522 	if (ret) {
1523 		pr_debug("no wq available for cpu=%d\n", cpu);
1524 		return -ENODEV;
1525 	}
1526 
1527 	iaa_wq = idxd_wq_get_private(wq);
1528 
1529 	if (!req->dst) {
1530 		gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
1531 
1532 		/* incompressible data will always be < 2 * slen */
1533 		req->dlen = 2 * req->slen;
1534 		order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
1535 		req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
1536 		if (!req->dst) {
1537 			ret = -ENOMEM;
1538 			order = -1;
1539 			goto out;
1540 		}
1541 		disable_async = true;
1542 	}
1543 
1544 	dev = &wq->idxd->pdev->dev;
1545 
1546 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1547 	if (nr_sgs <= 0 || nr_sgs > 1) {
1548 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1549 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1550 			iaa_wq->wq->id, ret);
1551 		ret = -EIO;
1552 		goto out;
1553 	}
1554 	src_addr = sg_dma_address(req->src);
1555 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1556 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1557 		req->src, req->slen, sg_dma_len(req->src));
1558 
1559 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1560 	if (nr_sgs <= 0 || nr_sgs > 1) {
1561 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1562 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1563 			iaa_wq->wq->id, ret);
1564 		ret = -EIO;
1565 		goto err_map_dst;
1566 	}
1567 	dst_addr = sg_dma_address(req->dst);
1568 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1569 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1570 		req->dst, req->dlen, sg_dma_len(req->dst));
1571 
1572 	ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
1573 			   &req->dlen, &compression_crc, disable_async);
1574 	if (ret == -EINPROGRESS)
1575 		return ret;
1576 
1577 	if (!ret && compression_ctx->verify_compress) {
1578 		ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr);
1579 		if (ret) {
1580 			dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
1581 			goto out;
1582 		}
1583 
1584 		ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
1585 					  dst_addr, &req->dlen, compression_crc);
1586 		if (ret)
1587 			dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
1588 
1589 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
1590 		dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
1591 
1592 		goto out;
1593 	}
1594 
1595 	if (ret)
1596 		dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
1597 
1598 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1599 err_map_dst:
1600 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1601 out:
1602 	iaa_wq_put(wq);
1603 
1604 	if (order >= 0)
1605 		sgl_free_order(req->dst, order);
1606 
1607 	return ret;
1608 }
1609 
iaa_comp_adecompress_alloc_dest(struct acomp_req * req)1610 static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
1611 {
1612 	gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1613 		GFP_KERNEL : GFP_ATOMIC;
1614 	struct crypto_tfm *tfm = req->base.tfm;
1615 	dma_addr_t src_addr, dst_addr;
1616 	int nr_sgs, cpu, ret = 0;
1617 	struct iaa_wq *iaa_wq;
1618 	struct device *dev;
1619 	struct idxd_wq *wq;
1620 	int order = -1;
1621 
1622 	cpu = get_cpu();
1623 	wq = wq_table_next_wq(cpu);
1624 	put_cpu();
1625 	if (!wq) {
1626 		pr_debug("no wq configured for cpu=%d\n", cpu);
1627 		return -ENODEV;
1628 	}
1629 
1630 	ret = iaa_wq_get(wq);
1631 	if (ret) {
1632 		pr_debug("no wq available for cpu=%d\n", cpu);
1633 		return -ENODEV;
1634 	}
1635 
1636 	iaa_wq = idxd_wq_get_private(wq);
1637 
1638 	dev = &wq->idxd->pdev->dev;
1639 
1640 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1641 	if (nr_sgs <= 0 || nr_sgs > 1) {
1642 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1643 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1644 			iaa_wq->wq->id, ret);
1645 		ret = -EIO;
1646 		goto out;
1647 	}
1648 	src_addr = sg_dma_address(req->src);
1649 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1650 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1651 		req->src, req->slen, sg_dma_len(req->src));
1652 
1653 	req->dlen = 4 * req->slen; /* start with ~avg comp rato */
1654 alloc_dest:
1655 	order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
1656 	req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
1657 	if (!req->dst) {
1658 		ret = -ENOMEM;
1659 		order = -1;
1660 		goto out;
1661 	}
1662 
1663 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1664 	if (nr_sgs <= 0 || nr_sgs > 1) {
1665 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1666 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1667 			iaa_wq->wq->id, ret);
1668 		ret = -EIO;
1669 		goto err_map_dst;
1670 	}
1671 
1672 	dst_addr = sg_dma_address(req->dst);
1673 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1674 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1675 		req->dst, req->dlen, sg_dma_len(req->dst));
1676 	ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
1677 			     dst_addr, &req->dlen, true);
1678 	if (ret == -EOVERFLOW) {
1679 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1680 		req->dlen *= 2;
1681 		if (req->dlen > CRYPTO_ACOMP_DST_MAX)
1682 			goto err_map_dst;
1683 		goto alloc_dest;
1684 	}
1685 
1686 	if (ret != 0)
1687 		dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
1688 
1689 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1690 err_map_dst:
1691 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1692 out:
1693 	iaa_wq_put(wq);
1694 
1695 	if (order >= 0)
1696 		sgl_free_order(req->dst, order);
1697 
1698 	return ret;
1699 }
1700 
iaa_comp_adecompress(struct acomp_req * req)1701 static int iaa_comp_adecompress(struct acomp_req *req)
1702 {
1703 	struct crypto_tfm *tfm = req->base.tfm;
1704 	dma_addr_t src_addr, dst_addr;
1705 	int nr_sgs, cpu, ret = 0;
1706 	struct iaa_wq *iaa_wq;
1707 	struct device *dev;
1708 	struct idxd_wq *wq;
1709 
1710 	if (!iaa_crypto_enabled) {
1711 		pr_debug("iaa_crypto disabled, not decompressing\n");
1712 		return -ENODEV;
1713 	}
1714 
1715 	if (!req->src || !req->slen) {
1716 		pr_debug("invalid src, not decompressing\n");
1717 		return -EINVAL;
1718 	}
1719 
1720 	if (!req->dst)
1721 		return iaa_comp_adecompress_alloc_dest(req);
1722 
1723 	cpu = get_cpu();
1724 	wq = wq_table_next_wq(cpu);
1725 	put_cpu();
1726 	if (!wq) {
1727 		pr_debug("no wq configured for cpu=%d\n", cpu);
1728 		return -ENODEV;
1729 	}
1730 
1731 	ret = iaa_wq_get(wq);
1732 	if (ret) {
1733 		pr_debug("no wq available for cpu=%d\n", cpu);
1734 		return -ENODEV;
1735 	}
1736 
1737 	iaa_wq = idxd_wq_get_private(wq);
1738 
1739 	dev = &wq->idxd->pdev->dev;
1740 
1741 	nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1742 	if (nr_sgs <= 0 || nr_sgs > 1) {
1743 		dev_dbg(dev, "couldn't map src sg for iaa device %d,"
1744 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1745 			iaa_wq->wq->id, ret);
1746 		ret = -EIO;
1747 		goto out;
1748 	}
1749 	src_addr = sg_dma_address(req->src);
1750 	dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
1751 		" req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
1752 		req->src, req->slen, sg_dma_len(req->src));
1753 
1754 	nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1755 	if (nr_sgs <= 0 || nr_sgs > 1) {
1756 		dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
1757 			" wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
1758 			iaa_wq->wq->id, ret);
1759 		ret = -EIO;
1760 		goto err_map_dst;
1761 	}
1762 	dst_addr = sg_dma_address(req->dst);
1763 	dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
1764 		" req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
1765 		req->dst, req->dlen, sg_dma_len(req->dst));
1766 
1767 	ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
1768 			     dst_addr, &req->dlen, false);
1769 	if (ret == -EINPROGRESS)
1770 		return ret;
1771 
1772 	if (ret != 0)
1773 		dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
1774 
1775 	dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
1776 err_map_dst:
1777 	dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
1778 out:
1779 	iaa_wq_put(wq);
1780 
1781 	return ret;
1782 }
1783 
compression_ctx_init(struct iaa_compression_ctx * ctx)1784 static void compression_ctx_init(struct iaa_compression_ctx *ctx)
1785 {
1786 	ctx->verify_compress = iaa_verify_compress;
1787 	ctx->async_mode = async_mode;
1788 	ctx->use_irq = use_irq;
1789 }
1790 
iaa_comp_init_fixed(struct crypto_acomp * acomp_tfm)1791 static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
1792 {
1793 	struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
1794 	struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
1795 
1796 	compression_ctx_init(ctx);
1797 
1798 	ctx->mode = IAA_MODE_FIXED;
1799 
1800 	return 0;
1801 }
1802 
dst_free(struct scatterlist * sgl)1803 static void dst_free(struct scatterlist *sgl)
1804 {
1805 	/*
1806 	 * Called for req->dst = NULL cases but we free elsewhere
1807 	 * using sgl_free_order().
1808 	 */
1809 }
1810 
1811 static struct acomp_alg iaa_acomp_fixed_deflate = {
1812 	.init			= iaa_comp_init_fixed,
1813 	.compress		= iaa_comp_acompress,
1814 	.decompress		= iaa_comp_adecompress,
1815 	.dst_free               = dst_free,
1816 	.base			= {
1817 		.cra_name		= "deflate",
1818 		.cra_driver_name	= "deflate-iaa",
1819 		.cra_flags		= CRYPTO_ALG_ASYNC,
1820 		.cra_ctxsize		= sizeof(struct iaa_compression_ctx),
1821 		.cra_module		= THIS_MODULE,
1822 		.cra_priority		= IAA_ALG_PRIORITY,
1823 	}
1824 };
1825 
iaa_register_compression_device(void)1826 static int iaa_register_compression_device(void)
1827 {
1828 	int ret;
1829 
1830 	ret = crypto_register_acomp(&iaa_acomp_fixed_deflate);
1831 	if (ret) {
1832 		pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret);
1833 		goto out;
1834 	}
1835 
1836 	iaa_crypto_registered = true;
1837 out:
1838 	return ret;
1839 }
1840 
iaa_unregister_compression_device(void)1841 static int iaa_unregister_compression_device(void)
1842 {
1843 	if (iaa_crypto_registered)
1844 		crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
1845 
1846 	return 0;
1847 }
1848 
iaa_crypto_probe(struct idxd_dev * idxd_dev)1849 static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
1850 {
1851 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
1852 	struct idxd_device *idxd = wq->idxd;
1853 	struct idxd_driver_data *data = idxd->data;
1854 	struct device *dev = &idxd_dev->conf_dev;
1855 	bool first_wq = false;
1856 	int ret = 0;
1857 
1858 	if (idxd->state != IDXD_DEV_ENABLED)
1859 		return -ENXIO;
1860 
1861 	if (data->type != IDXD_TYPE_IAX)
1862 		return -ENODEV;
1863 
1864 	mutex_lock(&wq->wq_lock);
1865 
1866 	if (idxd_wq_get_private(wq)) {
1867 		mutex_unlock(&wq->wq_lock);
1868 		return -EBUSY;
1869 	}
1870 
1871 	if (!idxd_wq_driver_name_match(wq, dev)) {
1872 		dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n",
1873 			idxd->id, wq->id, wq->driver_name, dev->driver->name);
1874 		idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
1875 		ret = -ENODEV;
1876 		goto err;
1877 	}
1878 
1879 	wq->type = IDXD_WQT_KERNEL;
1880 
1881 	ret = idxd_drv_enable_wq(wq);
1882 	if (ret < 0) {
1883 		dev_dbg(dev, "enable wq %d.%d failed: %d\n",
1884 			idxd->id, wq->id, ret);
1885 		ret = -ENXIO;
1886 		goto err;
1887 	}
1888 
1889 	mutex_lock(&iaa_devices_lock);
1890 
1891 	if (list_empty(&iaa_devices)) {
1892 		ret = alloc_wq_table(wq->idxd->max_wqs);
1893 		if (ret)
1894 			goto err_alloc;
1895 		first_wq = true;
1896 	}
1897 
1898 	ret = save_iaa_wq(wq);
1899 	if (ret)
1900 		goto err_save;
1901 
1902 	rebalance_wq_table();
1903 
1904 	if (first_wq) {
1905 		iaa_crypto_enabled = true;
1906 		ret = iaa_register_compression_device();
1907 		if (ret != 0) {
1908 			iaa_crypto_enabled = false;
1909 			dev_dbg(dev, "IAA compression device registration failed\n");
1910 			goto err_register;
1911 		}
1912 		try_module_get(THIS_MODULE);
1913 
1914 		pr_info("iaa_crypto now ENABLED\n");
1915 	}
1916 
1917 	mutex_unlock(&iaa_devices_lock);
1918 out:
1919 	mutex_unlock(&wq->wq_lock);
1920 
1921 	return ret;
1922 
1923 err_register:
1924 	remove_iaa_wq(wq);
1925 	free_iaa_wq(idxd_wq_get_private(wq));
1926 err_save:
1927 	if (first_wq)
1928 		free_wq_table();
1929 err_alloc:
1930 	mutex_unlock(&iaa_devices_lock);
1931 	idxd_drv_disable_wq(wq);
1932 err:
1933 	wq->type = IDXD_WQT_NONE;
1934 
1935 	goto out;
1936 }
1937 
iaa_crypto_remove(struct idxd_dev * idxd_dev)1938 static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
1939 {
1940 	struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
1941 	struct idxd_device *idxd = wq->idxd;
1942 	struct iaa_wq *iaa_wq;
1943 	bool free = false;
1944 
1945 	idxd_wq_quiesce(wq);
1946 
1947 	mutex_lock(&wq->wq_lock);
1948 	mutex_lock(&iaa_devices_lock);
1949 
1950 	remove_iaa_wq(wq);
1951 
1952 	spin_lock(&idxd->dev_lock);
1953 	iaa_wq = idxd_wq_get_private(wq);
1954 	if (!iaa_wq) {
1955 		spin_unlock(&idxd->dev_lock);
1956 		pr_err("%s: no iaa_wq available to remove\n", __func__);
1957 		goto out;
1958 	}
1959 
1960 	if (iaa_wq->ref) {
1961 		iaa_wq->remove = true;
1962 	} else {
1963 		wq = iaa_wq->wq;
1964 		idxd_wq_set_private(wq, NULL);
1965 		free = true;
1966 	}
1967 	spin_unlock(&idxd->dev_lock);
1968 	if (free) {
1969 		__free_iaa_wq(iaa_wq);
1970 		kfree(iaa_wq);
1971 	}
1972 
1973 	idxd_drv_disable_wq(wq);
1974 	rebalance_wq_table();
1975 
1976 	if (nr_iaa == 0) {
1977 		iaa_crypto_enabled = false;
1978 		free_wq_table();
1979 		module_put(THIS_MODULE);
1980 
1981 		pr_info("iaa_crypto now DISABLED\n");
1982 	}
1983 out:
1984 	mutex_unlock(&iaa_devices_lock);
1985 	mutex_unlock(&wq->wq_lock);
1986 }
1987 
1988 static enum idxd_dev_type dev_types[] = {
1989 	IDXD_DEV_WQ,
1990 	IDXD_DEV_NONE,
1991 };
1992 
1993 static struct idxd_device_driver iaa_crypto_driver = {
1994 	.probe = iaa_crypto_probe,
1995 	.remove = iaa_crypto_remove,
1996 	.name = IDXD_SUBDRIVER_NAME,
1997 	.type = dev_types,
1998 	.desc_complete = iaa_desc_complete,
1999 };
2000 
iaa_crypto_init_module(void)2001 static int __init iaa_crypto_init_module(void)
2002 {
2003 	int ret = 0;
2004 	int node;
2005 
2006 	nr_cpus = num_possible_cpus();
2007 	for_each_node_with_cpus(node)
2008 		nr_nodes++;
2009 	if (!nr_nodes) {
2010 		pr_err("IAA couldn't find any nodes with cpus\n");
2011 		return -ENODEV;
2012 	}
2013 	nr_cpus_per_node = nr_cpus / nr_nodes;
2014 
2015 	if (crypto_has_comp("deflate-generic", 0, 0))
2016 		deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
2017 
2018 	if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
2019 		pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
2020 		       "deflate-generic", PTR_ERR(deflate_generic_tfm));
2021 		return -ENOMEM;
2022 	}
2023 
2024 	ret = iaa_aecs_init_fixed();
2025 	if (ret < 0) {
2026 		pr_debug("IAA fixed compression mode init failed\n");
2027 		goto err_aecs_init;
2028 	}
2029 
2030 	ret = idxd_driver_register(&iaa_crypto_driver);
2031 	if (ret) {
2032 		pr_debug("IAA wq sub-driver registration failed\n");
2033 		goto err_driver_reg;
2034 	}
2035 
2036 	ret = driver_create_file(&iaa_crypto_driver.drv,
2037 				 &driver_attr_verify_compress);
2038 	if (ret) {
2039 		pr_debug("IAA verify_compress attr creation failed\n");
2040 		goto err_verify_attr_create;
2041 	}
2042 
2043 	ret = driver_create_file(&iaa_crypto_driver.drv,
2044 				 &driver_attr_sync_mode);
2045 	if (ret) {
2046 		pr_debug("IAA sync mode attr creation failed\n");
2047 		goto err_sync_attr_create;
2048 	}
2049 
2050 	if (iaa_crypto_debugfs_init())
2051 		pr_warn("debugfs init failed, stats not available\n");
2052 
2053 	pr_debug("initialized\n");
2054 out:
2055 	return ret;
2056 
2057 err_sync_attr_create:
2058 	driver_remove_file(&iaa_crypto_driver.drv,
2059 			   &driver_attr_verify_compress);
2060 err_verify_attr_create:
2061 	idxd_driver_unregister(&iaa_crypto_driver);
2062 err_driver_reg:
2063 	iaa_aecs_cleanup_fixed();
2064 err_aecs_init:
2065 	crypto_free_comp(deflate_generic_tfm);
2066 
2067 	goto out;
2068 }
2069 
iaa_crypto_cleanup_module(void)2070 static void __exit iaa_crypto_cleanup_module(void)
2071 {
2072 	if (iaa_unregister_compression_device())
2073 		pr_debug("IAA compression device unregister failed\n");
2074 
2075 	iaa_crypto_debugfs_cleanup();
2076 	driver_remove_file(&iaa_crypto_driver.drv,
2077 			   &driver_attr_sync_mode);
2078 	driver_remove_file(&iaa_crypto_driver.drv,
2079 			   &driver_attr_verify_compress);
2080 	idxd_driver_unregister(&iaa_crypto_driver);
2081 	iaa_aecs_cleanup_fixed();
2082 	crypto_free_comp(deflate_generic_tfm);
2083 
2084 	pr_debug("cleaned up\n");
2085 }
2086 
2087 MODULE_IMPORT_NS(IDXD);
2088 MODULE_LICENSE("GPL");
2089 MODULE_ALIAS_IDXD_DEVICE(0);
2090 MODULE_AUTHOR("Intel Corporation");
2091 MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver");
2092 
2093 module_init(iaa_crypto_init_module);
2094 module_exit(iaa_crypto_cleanup_module);
2095