1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ 3 4 #include <linux/init.h> 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/pci.h> 8 #include <linux/device.h> 9 #include <linux/iommu.h> 10 #include <uapi/linux/idxd.h> 11 #include <linux/highmem.h> 12 #include <linux/sched/smt.h> 13 #include <crypto/internal/acompress.h> 14 15 #include "idxd.h" 16 #include "iaa_crypto.h" 17 #include "iaa_crypto_stats.h" 18 19 #ifdef pr_fmt 20 #undef pr_fmt 21 #endif 22 23 #define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt 24 25 #define IAA_ALG_PRIORITY 300 26 27 /* number of iaa instances probed */ 28 static unsigned int nr_iaa; 29 static unsigned int nr_cpus; 30 static unsigned int nr_nodes; 31 static unsigned int nr_cpus_per_node; 32 33 /* Number of physical cpus sharing each iaa instance */ 34 static unsigned int cpus_per_iaa; 35 36 /* Per-cpu lookup table for balanced wqs */ 37 static struct wq_table_entry __percpu *wq_table; 38 39 static struct idxd_wq *wq_table_next_wq(int cpu) 40 { 41 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 42 43 if (++entry->cur_wq >= entry->n_wqs) 44 entry->cur_wq = 0; 45 46 if (!entry->wqs[entry->cur_wq]) 47 return NULL; 48 49 pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__, 50 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, 51 entry->wqs[entry->cur_wq]->id, cpu); 52 53 return entry->wqs[entry->cur_wq]; 54 } 55 56 static void wq_table_add(int cpu, struct idxd_wq *wq) 57 { 58 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 59 60 if (WARN_ON(entry->n_wqs == entry->max_wqs)) 61 return; 62 63 entry->wqs[entry->n_wqs++] = wq; 64 65 pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__, 66 entry->wqs[entry->n_wqs - 1]->idxd->id, 67 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); 68 } 69 70 static void wq_table_free_entry(int cpu) 71 { 72 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 73 74 kfree(entry->wqs); 75 memset(entry, 0, sizeof(*entry)); 76 } 77 78 static void wq_table_clear_entry(int cpu) 79 { 80 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 81 82 entry->n_wqs = 0; 83 entry->cur_wq = 0; 84 memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); 85 } 86 87 LIST_HEAD(iaa_devices); 88 DEFINE_MUTEX(iaa_devices_lock); 89 90 /* If enabled, IAA hw crypto algos are registered, unavailable otherwise */ 91 static bool iaa_crypto_enabled; 92 static bool iaa_crypto_registered; 93 94 /* Verify results of IAA compress or not */ 95 static bool iaa_verify_compress = true; 96 97 static ssize_t verify_compress_show(struct device_driver *driver, char *buf) 98 { 99 return sprintf(buf, "%d\n", iaa_verify_compress); 100 } 101 102 static ssize_t verify_compress_store(struct device_driver *driver, 103 const char *buf, size_t count) 104 { 105 int ret = -EBUSY; 106 107 mutex_lock(&iaa_devices_lock); 108 109 if (iaa_crypto_enabled) 110 goto out; 111 112 ret = kstrtobool(buf, &iaa_verify_compress); 113 if (ret) 114 goto out; 115 116 ret = count; 117 out: 118 mutex_unlock(&iaa_devices_lock); 119 120 return ret; 121 } 122 static DRIVER_ATTR_RW(verify_compress); 123 124 /* 125 * The iaa crypto driver supports three 'sync' methods determining how 126 * compressions and decompressions are performed: 127 * 128 * - sync: the compression or decompression completes before 129 * returning. This is the mode used by the async crypto 130 * interface when the sync mode is set to 'sync' and by 131 * the sync crypto interface regardless of setting. 132 * 133 * - async: the compression or decompression is submitted and returns 134 * immediately. Completion interrupts are not used so 135 * the caller is responsible for polling the descriptor 136 * for completion. This mode is applicable to only the 137 * async crypto interface and is ignored for anything 138 * else. 139 * 140 * - async_irq: the compression or decompression is submitted and 141 * returns immediately. Completion interrupts are 142 * enabled so the caller can wait for the completion and 143 * yield to other threads. When the compression or 144 * decompression completes, the completion is signaled 145 * and the caller awakened. This mode is applicable to 146 * only the async crypto interface and is ignored for 147 * anything else. 148 * 149 * These modes can be set using the iaa_crypto sync_mode driver 150 * attribute. 151 */ 152 153 /* Use async mode */ 154 static bool async_mode; 155 /* Use interrupts */ 156 static bool use_irq; 157 158 /** 159 * set_iaa_sync_mode - Set IAA sync mode 160 * @name: The name of the sync mode 161 * 162 * Make the IAA sync mode named @name the current sync mode used by 163 * compression/decompression. 164 */ 165 166 static int set_iaa_sync_mode(const char *name) 167 { 168 int ret = 0; 169 170 if (sysfs_streq(name, "sync")) { 171 async_mode = false; 172 use_irq = false; 173 } else if (sysfs_streq(name, "async")) { 174 async_mode = false; 175 use_irq = false; 176 } else if (sysfs_streq(name, "async_irq")) { 177 async_mode = true; 178 use_irq = true; 179 } else { 180 ret = -EINVAL; 181 } 182 183 return ret; 184 } 185 186 static ssize_t sync_mode_show(struct device_driver *driver, char *buf) 187 { 188 int ret = 0; 189 190 if (!async_mode && !use_irq) 191 ret = sprintf(buf, "%s\n", "sync"); 192 else if (async_mode && !use_irq) 193 ret = sprintf(buf, "%s\n", "async"); 194 else if (async_mode && use_irq) 195 ret = sprintf(buf, "%s\n", "async_irq"); 196 197 return ret; 198 } 199 200 static ssize_t sync_mode_store(struct device_driver *driver, 201 const char *buf, size_t count) 202 { 203 int ret = -EBUSY; 204 205 mutex_lock(&iaa_devices_lock); 206 207 if (iaa_crypto_enabled) 208 goto out; 209 210 ret = set_iaa_sync_mode(buf); 211 if (ret == 0) 212 ret = count; 213 out: 214 mutex_unlock(&iaa_devices_lock); 215 216 return ret; 217 } 218 static DRIVER_ATTR_RW(sync_mode); 219 220 static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; 221 222 static int find_empty_iaa_compression_mode(void) 223 { 224 int i = -EINVAL; 225 226 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 227 if (iaa_compression_modes[i]) 228 continue; 229 break; 230 } 231 232 return i; 233 } 234 235 static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx) 236 { 237 struct iaa_compression_mode *mode; 238 int i; 239 240 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 241 mode = iaa_compression_modes[i]; 242 if (!mode) 243 continue; 244 245 if (!strcmp(mode->name, name)) { 246 *idx = i; 247 return iaa_compression_modes[i]; 248 } 249 } 250 251 return NULL; 252 } 253 254 static void free_iaa_compression_mode(struct iaa_compression_mode *mode) 255 { 256 kfree(mode->name); 257 kfree(mode->ll_table); 258 kfree(mode->d_table); 259 260 kfree(mode); 261 } 262 263 /* 264 * IAA Compression modes are defined by an ll_table and a d_table. 265 * These tables are typically generated and captured using statistics 266 * collected from running actual compress/decompress workloads. 267 * 268 * A module or other kernel code can add and remove compression modes 269 * with a given name using the exported @add_iaa_compression_mode() 270 * and @remove_iaa_compression_mode functions. 271 * 272 * When a new compression mode is added, the tables are saved in a 273 * global compression mode list. When IAA devices are added, a 274 * per-IAA device dma mapping is created for each IAA device, for each 275 * compression mode. These are the tables used to do the actual 276 * compression/deccompression and are unmapped if/when the devices are 277 * removed. Currently, compression modes must be added before any 278 * device is added, and removed after all devices have been removed. 279 */ 280 281 /** 282 * remove_iaa_compression_mode - Remove an IAA compression mode 283 * @name: The name the compression mode will be known as 284 * 285 * Remove the IAA compression mode named @name. 286 */ 287 void remove_iaa_compression_mode(const char *name) 288 { 289 struct iaa_compression_mode *mode; 290 int idx; 291 292 mutex_lock(&iaa_devices_lock); 293 294 if (!list_empty(&iaa_devices)) 295 goto out; 296 297 mode = find_iaa_compression_mode(name, &idx); 298 if (mode) { 299 free_iaa_compression_mode(mode); 300 iaa_compression_modes[idx] = NULL; 301 } 302 out: 303 mutex_unlock(&iaa_devices_lock); 304 } 305 EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); 306 307 /** 308 * add_iaa_compression_mode - Add an IAA compression mode 309 * @name: The name the compression mode will be known as 310 * @ll_table: The ll table 311 * @ll_table_size: The ll table size in bytes 312 * @d_table: The d table 313 * @d_table_size: The d table size in bytes 314 * @init: Optional callback function to init the compression mode data 315 * @free: Optional callback function to free the compression mode data 316 * 317 * Add a new IAA compression mode named @name. 318 * 319 * Returns 0 if successful, errcode otherwise. 320 */ 321 int add_iaa_compression_mode(const char *name, 322 const u32 *ll_table, 323 int ll_table_size, 324 const u32 *d_table, 325 int d_table_size, 326 iaa_dev_comp_init_fn_t init, 327 iaa_dev_comp_free_fn_t free) 328 { 329 struct iaa_compression_mode *mode; 330 int idx, ret = -ENOMEM; 331 332 mutex_lock(&iaa_devices_lock); 333 334 if (!list_empty(&iaa_devices)) { 335 ret = -EBUSY; 336 goto out; 337 } 338 339 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 340 if (!mode) 341 goto out; 342 343 mode->name = kstrdup(name, GFP_KERNEL); 344 if (!mode->name) 345 goto free; 346 347 if (ll_table) { 348 mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL); 349 if (!mode->ll_table) 350 goto free; 351 mode->ll_table_size = ll_table_size; 352 } 353 354 if (d_table) { 355 mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL); 356 if (!mode->d_table) 357 goto free; 358 mode->d_table_size = d_table_size; 359 } 360 361 mode->init = init; 362 mode->free = free; 363 364 idx = find_empty_iaa_compression_mode(); 365 if (idx < 0) 366 goto free; 367 368 pr_debug("IAA compression mode %s added at idx %d\n", 369 mode->name, idx); 370 371 iaa_compression_modes[idx] = mode; 372 373 ret = 0; 374 out: 375 mutex_unlock(&iaa_devices_lock); 376 377 return ret; 378 free: 379 free_iaa_compression_mode(mode); 380 goto out; 381 } 382 EXPORT_SYMBOL_GPL(add_iaa_compression_mode); 383 384 static struct iaa_device_compression_mode * 385 get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx) 386 { 387 return iaa_device->compression_modes[idx]; 388 } 389 390 static void free_device_compression_mode(struct iaa_device *iaa_device, 391 struct iaa_device_compression_mode *device_mode) 392 { 393 size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; 394 struct device *dev = &iaa_device->idxd->pdev->dev; 395 396 kfree(device_mode->name); 397 398 if (device_mode->aecs_comp_table) 399 dma_free_coherent(dev, size, device_mode->aecs_comp_table, 400 device_mode->aecs_comp_table_dma_addr); 401 kfree(device_mode); 402 } 403 404 #define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000 405 #define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC) 406 #define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) 407 #define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) 408 #define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \ 409 IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \ 410 IDXD_OP_FLAG_AECS_RW_TGLS) 411 412 static int check_completion(struct device *dev, 413 struct iax_completion_record *comp, 414 bool compress, 415 bool only_once); 416 417 static int init_device_compression_mode(struct iaa_device *iaa_device, 418 struct iaa_compression_mode *mode, 419 int idx, struct idxd_wq *wq) 420 { 421 size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; 422 struct device *dev = &iaa_device->idxd->pdev->dev; 423 struct iaa_device_compression_mode *device_mode; 424 int ret = -ENOMEM; 425 426 device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL); 427 if (!device_mode) 428 return -ENOMEM; 429 430 device_mode->name = kstrdup(mode->name, GFP_KERNEL); 431 if (!device_mode->name) 432 goto free; 433 434 device_mode->aecs_comp_table = dma_alloc_coherent(dev, size, 435 &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL); 436 if (!device_mode->aecs_comp_table) 437 goto free; 438 439 /* Add Huffman table to aecs */ 440 memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); 441 memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); 442 memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); 443 444 if (mode->init) { 445 ret = mode->init(device_mode); 446 if (ret) 447 goto free; 448 } 449 450 /* mode index should match iaa_compression_modes idx */ 451 iaa_device->compression_modes[idx] = device_mode; 452 453 pr_debug("IAA %s compression mode initialized for iaa device %d\n", 454 mode->name, iaa_device->idxd->id); 455 456 ret = 0; 457 out: 458 return ret; 459 free: 460 pr_debug("IAA %s compression mode initialization failed for iaa device %d\n", 461 mode->name, iaa_device->idxd->id); 462 463 free_device_compression_mode(iaa_device, device_mode); 464 goto out; 465 } 466 467 static int init_device_compression_modes(struct iaa_device *iaa_device, 468 struct idxd_wq *wq) 469 { 470 struct iaa_compression_mode *mode; 471 int i, ret = 0; 472 473 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 474 mode = iaa_compression_modes[i]; 475 if (!mode) 476 continue; 477 478 ret = init_device_compression_mode(iaa_device, mode, i, wq); 479 if (ret) 480 break; 481 } 482 483 return ret; 484 } 485 486 static void remove_device_compression_modes(struct iaa_device *iaa_device) 487 { 488 struct iaa_device_compression_mode *device_mode; 489 int i; 490 491 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 492 device_mode = iaa_device->compression_modes[i]; 493 if (!device_mode) 494 continue; 495 496 if (iaa_compression_modes[i]->free) 497 iaa_compression_modes[i]->free(device_mode); 498 free_device_compression_mode(iaa_device, device_mode); 499 iaa_device->compression_modes[i] = NULL; 500 } 501 } 502 503 static struct iaa_device *iaa_device_alloc(void) 504 { 505 struct iaa_device *iaa_device; 506 507 iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL); 508 if (!iaa_device) 509 return NULL; 510 511 INIT_LIST_HEAD(&iaa_device->wqs); 512 513 return iaa_device; 514 } 515 516 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 517 { 518 struct iaa_wq *iaa_wq; 519 520 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 521 if (iaa_wq->wq == wq) 522 return true; 523 } 524 525 return false; 526 } 527 528 static struct iaa_device *add_iaa_device(struct idxd_device *idxd) 529 { 530 struct iaa_device *iaa_device; 531 532 iaa_device = iaa_device_alloc(); 533 if (!iaa_device) 534 return NULL; 535 536 iaa_device->idxd = idxd; 537 538 list_add_tail(&iaa_device->list, &iaa_devices); 539 540 nr_iaa++; 541 542 return iaa_device; 543 } 544 545 static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq) 546 { 547 int ret = 0; 548 549 ret = init_device_compression_modes(iaa_device, iaa_wq->wq); 550 if (ret) 551 return ret; 552 553 return ret; 554 } 555 556 static void del_iaa_device(struct iaa_device *iaa_device) 557 { 558 list_del(&iaa_device->list); 559 560 nr_iaa--; 561 } 562 563 static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, 564 struct iaa_wq **new_wq) 565 { 566 struct idxd_device *idxd = iaa_device->idxd; 567 struct pci_dev *pdev = idxd->pdev; 568 struct device *dev = &pdev->dev; 569 struct iaa_wq *iaa_wq; 570 571 iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL); 572 if (!iaa_wq) 573 return -ENOMEM; 574 575 iaa_wq->wq = wq; 576 iaa_wq->iaa_device = iaa_device; 577 idxd_wq_set_private(wq, iaa_wq); 578 579 list_add_tail(&iaa_wq->list, &iaa_device->wqs); 580 581 iaa_device->n_wq++; 582 583 if (new_wq) 584 *new_wq = iaa_wq; 585 586 dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n", 587 wq->id, iaa_device->idxd->id, iaa_device->n_wq); 588 589 return 0; 590 } 591 592 static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 593 { 594 struct idxd_device *idxd = iaa_device->idxd; 595 struct pci_dev *pdev = idxd->pdev; 596 struct device *dev = &pdev->dev; 597 struct iaa_wq *iaa_wq; 598 599 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 600 if (iaa_wq->wq == wq) { 601 list_del(&iaa_wq->list); 602 iaa_device->n_wq--; 603 604 dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n", 605 wq->id, iaa_device->idxd->id, 606 iaa_device->n_wq, nr_iaa); 607 608 if (iaa_device->n_wq == 0) 609 del_iaa_device(iaa_device); 610 break; 611 } 612 } 613 } 614 615 static void clear_wq_table(void) 616 { 617 int cpu; 618 619 for (cpu = 0; cpu < nr_cpus; cpu++) 620 wq_table_clear_entry(cpu); 621 622 pr_debug("cleared wq table\n"); 623 } 624 625 static void free_iaa_device(struct iaa_device *iaa_device) 626 { 627 if (!iaa_device) 628 return; 629 630 remove_device_compression_modes(iaa_device); 631 kfree(iaa_device); 632 } 633 634 static void __free_iaa_wq(struct iaa_wq *iaa_wq) 635 { 636 struct iaa_device *iaa_device; 637 638 if (!iaa_wq) 639 return; 640 641 iaa_device = iaa_wq->iaa_device; 642 if (iaa_device->n_wq == 0) 643 free_iaa_device(iaa_wq->iaa_device); 644 } 645 646 static void free_iaa_wq(struct iaa_wq *iaa_wq) 647 { 648 struct idxd_wq *wq; 649 650 __free_iaa_wq(iaa_wq); 651 652 wq = iaa_wq->wq; 653 654 kfree(iaa_wq); 655 idxd_wq_set_private(wq, NULL); 656 } 657 658 static int iaa_wq_get(struct idxd_wq *wq) 659 { 660 struct idxd_device *idxd = wq->idxd; 661 struct iaa_wq *iaa_wq; 662 int ret = 0; 663 664 spin_lock(&idxd->dev_lock); 665 iaa_wq = idxd_wq_get_private(wq); 666 if (iaa_wq && !iaa_wq->remove) { 667 iaa_wq->ref++; 668 idxd_wq_get(wq); 669 } else { 670 ret = -ENODEV; 671 } 672 spin_unlock(&idxd->dev_lock); 673 674 return ret; 675 } 676 677 static int iaa_wq_put(struct idxd_wq *wq) 678 { 679 struct idxd_device *idxd = wq->idxd; 680 struct iaa_wq *iaa_wq; 681 bool free = false; 682 int ret = 0; 683 684 spin_lock(&idxd->dev_lock); 685 iaa_wq = idxd_wq_get_private(wq); 686 if (iaa_wq) { 687 iaa_wq->ref--; 688 if (iaa_wq->ref == 0 && iaa_wq->remove) { 689 idxd_wq_set_private(wq, NULL); 690 free = true; 691 } 692 idxd_wq_put(wq); 693 } else { 694 ret = -ENODEV; 695 } 696 spin_unlock(&idxd->dev_lock); 697 if (free) { 698 __free_iaa_wq(iaa_wq); 699 kfree(iaa_wq); 700 } 701 702 return ret; 703 } 704 705 static void free_wq_table(void) 706 { 707 int cpu; 708 709 for (cpu = 0; cpu < nr_cpus; cpu++) 710 wq_table_free_entry(cpu); 711 712 free_percpu(wq_table); 713 714 pr_debug("freed wq table\n"); 715 } 716 717 static int alloc_wq_table(int max_wqs) 718 { 719 struct wq_table_entry *entry; 720 int cpu; 721 722 wq_table = alloc_percpu(struct wq_table_entry); 723 if (!wq_table) 724 return -ENOMEM; 725 726 for (cpu = 0; cpu < nr_cpus; cpu++) { 727 entry = per_cpu_ptr(wq_table, cpu); 728 entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); 729 if (!entry->wqs) { 730 free_wq_table(); 731 return -ENOMEM; 732 } 733 734 entry->max_wqs = max_wqs; 735 } 736 737 pr_debug("initialized wq table\n"); 738 739 return 0; 740 } 741 742 static int save_iaa_wq(struct idxd_wq *wq) 743 { 744 struct iaa_device *iaa_device, *found = NULL; 745 struct idxd_device *idxd; 746 struct pci_dev *pdev; 747 struct device *dev; 748 int ret = 0; 749 750 list_for_each_entry(iaa_device, &iaa_devices, list) { 751 if (iaa_device->idxd == wq->idxd) { 752 idxd = iaa_device->idxd; 753 pdev = idxd->pdev; 754 dev = &pdev->dev; 755 /* 756 * Check to see that we don't already have this wq. 757 * Shouldn't happen but we don't control probing. 758 */ 759 if (iaa_has_wq(iaa_device, wq)) { 760 dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n", 761 iaa_device); 762 goto out; 763 } 764 765 found = iaa_device; 766 767 ret = add_iaa_wq(iaa_device, wq, NULL); 768 if (ret) 769 goto out; 770 771 break; 772 } 773 } 774 775 if (!found) { 776 struct iaa_device *new_device; 777 struct iaa_wq *new_wq; 778 779 new_device = add_iaa_device(wq->idxd); 780 if (!new_device) { 781 ret = -ENOMEM; 782 goto out; 783 } 784 785 ret = add_iaa_wq(new_device, wq, &new_wq); 786 if (ret) { 787 del_iaa_device(new_device); 788 free_iaa_device(new_device); 789 goto out; 790 } 791 792 ret = init_iaa_device(new_device, new_wq); 793 if (ret) { 794 del_iaa_wq(new_device, new_wq->wq); 795 del_iaa_device(new_device); 796 free_iaa_wq(new_wq); 797 goto out; 798 } 799 } 800 801 if (WARN_ON(nr_iaa == 0)) 802 return -EINVAL; 803 804 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; 805 if (!cpus_per_iaa) 806 cpus_per_iaa = 1; 807 out: 808 return 0; 809 } 810 811 static void remove_iaa_wq(struct idxd_wq *wq) 812 { 813 struct iaa_device *iaa_device; 814 815 list_for_each_entry(iaa_device, &iaa_devices, list) { 816 if (iaa_has_wq(iaa_device, wq)) { 817 del_iaa_wq(iaa_device, wq); 818 break; 819 } 820 } 821 822 if (nr_iaa) { 823 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; 824 if (!cpus_per_iaa) 825 cpus_per_iaa = 1; 826 } else 827 cpus_per_iaa = 1; 828 } 829 830 static int wq_table_add_wqs(int iaa, int cpu) 831 { 832 struct iaa_device *iaa_device, *found_device = NULL; 833 int ret = 0, cur_iaa = 0, n_wqs_added = 0; 834 struct idxd_device *idxd; 835 struct iaa_wq *iaa_wq; 836 struct pci_dev *pdev; 837 struct device *dev; 838 839 list_for_each_entry(iaa_device, &iaa_devices, list) { 840 idxd = iaa_device->idxd; 841 pdev = idxd->pdev; 842 dev = &pdev->dev; 843 844 if (cur_iaa != iaa) { 845 cur_iaa++; 846 continue; 847 } 848 849 found_device = iaa_device; 850 dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n", 851 found_device->idxd->id, cur_iaa); 852 break; 853 } 854 855 if (!found_device) { 856 found_device = list_first_entry_or_null(&iaa_devices, 857 struct iaa_device, list); 858 if (!found_device) { 859 pr_debug("couldn't find any iaa devices with wqs!\n"); 860 ret = -EINVAL; 861 goto out; 862 } 863 cur_iaa = 0; 864 865 idxd = found_device->idxd; 866 pdev = idxd->pdev; 867 dev = &pdev->dev; 868 dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n", 869 found_device->idxd->id, cur_iaa); 870 } 871 872 list_for_each_entry(iaa_wq, &found_device->wqs, list) { 873 wq_table_add(cpu, iaa_wq->wq); 874 pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n", 875 cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); 876 n_wqs_added++; 877 } 878 879 if (!n_wqs_added) { 880 pr_debug("couldn't find any iaa wqs!\n"); 881 ret = -EINVAL; 882 goto out; 883 } 884 out: 885 return ret; 886 } 887 888 /* 889 * Rebalance the wq table so that given a cpu, it's easy to find the 890 * closest IAA instance. The idea is to try to choose the most 891 * appropriate IAA instance for a caller and spread available 892 * workqueues around to clients. 893 */ 894 static void rebalance_wq_table(void) 895 { 896 const struct cpumask *node_cpus; 897 int node, cpu, iaa = -1; 898 899 if (nr_iaa == 0) 900 return; 901 902 pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n", 903 nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa); 904 905 clear_wq_table(); 906 907 if (nr_iaa == 1) { 908 for (cpu = 0; cpu < nr_cpus; cpu++) { 909 if (WARN_ON(wq_table_add_wqs(0, cpu))) { 910 pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); 911 return; 912 } 913 } 914 915 return; 916 } 917 918 for_each_node_with_cpus(node) { 919 node_cpus = cpumask_of_node(node); 920 921 for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { 922 int node_cpu = cpumask_nth(cpu, node_cpus); 923 924 if (WARN_ON(node_cpu >= nr_cpu_ids)) { 925 pr_debug("node_cpu %d doesn't exist!\n", node_cpu); 926 return; 927 } 928 929 if ((cpu % cpus_per_iaa) == 0) 930 iaa++; 931 932 if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { 933 pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); 934 return; 935 } 936 } 937 } 938 } 939 940 static inline int check_completion(struct device *dev, 941 struct iax_completion_record *comp, 942 bool compress, 943 bool only_once) 944 { 945 char *op_str = compress ? "compress" : "decompress"; 946 int status_checks = 0; 947 int ret = 0; 948 949 while (!comp->status) { 950 if (only_once) 951 return -EAGAIN; 952 cpu_relax(); 953 if (status_checks++ >= IAA_COMPLETION_TIMEOUT) { 954 /* Something is wrong with the hw, disable it. */ 955 dev_err(dev, "%s completion timed out - " 956 "assuming broken hw, iaa_crypto now DISABLED\n", 957 op_str); 958 iaa_crypto_enabled = false; 959 ret = -ETIMEDOUT; 960 goto out; 961 } 962 } 963 964 if (comp->status != IAX_COMP_SUCCESS) { 965 if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) { 966 ret = -ETIMEDOUT; 967 dev_dbg(dev, "%s timed out, size=0x%x\n", 968 op_str, comp->output_size); 969 update_completion_timeout_errs(); 970 goto out; 971 } 972 973 if (comp->status == IAA_ANALYTICS_ERROR && 974 comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) { 975 ret = -E2BIG; 976 dev_dbg(dev, "compressed > uncompressed size," 977 " not compressing, size=0x%x\n", 978 comp->output_size); 979 update_completion_comp_buf_overflow_errs(); 980 goto out; 981 } 982 983 if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) { 984 ret = -EOVERFLOW; 985 goto out; 986 } 987 988 ret = -EINVAL; 989 dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n", 990 op_str, comp->status, comp->error_code, comp->output_size); 991 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); 992 update_completion_einval_errs(); 993 994 goto out; 995 } 996 out: 997 return ret; 998 } 999 1000 static int deflate_generic_decompress(struct acomp_req *req) 1001 { 1002 ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); 1003 int ret; 1004 1005 acomp_request_set_callback(fbreq, 0, NULL, NULL); 1006 acomp_request_set_params(fbreq, req->src, req->dst, req->slen, 1007 req->dlen); 1008 ret = crypto_acomp_decompress(fbreq); 1009 req->dlen = fbreq->dlen; 1010 1011 update_total_sw_decomp_calls(); 1012 1013 return ret; 1014 } 1015 1016 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, 1017 struct acomp_req *req, 1018 dma_addr_t *src_addr, dma_addr_t *dst_addr); 1019 1020 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, 1021 struct idxd_wq *wq, 1022 dma_addr_t src_addr, unsigned int slen, 1023 dma_addr_t dst_addr, unsigned int *dlen); 1024 1025 static void iaa_desc_complete(struct idxd_desc *idxd_desc, 1026 enum idxd_complete_type comp_type, 1027 bool free_desc, void *__ctx, 1028 u32 *status) 1029 { 1030 struct iaa_device_compression_mode *active_compression_mode; 1031 struct iaa_compression_ctx *compression_ctx; 1032 struct crypto_ctx *ctx = __ctx; 1033 struct iaa_device *iaa_device; 1034 struct idxd_device *idxd; 1035 struct iaa_wq *iaa_wq; 1036 struct pci_dev *pdev; 1037 struct device *dev; 1038 int ret, err = 0; 1039 1040 compression_ctx = crypto_tfm_ctx(ctx->tfm); 1041 1042 iaa_wq = idxd_wq_get_private(idxd_desc->wq); 1043 iaa_device = iaa_wq->iaa_device; 1044 idxd = iaa_device->idxd; 1045 pdev = idxd->pdev; 1046 dev = &pdev->dev; 1047 1048 active_compression_mode = get_iaa_device_compression_mode(iaa_device, 1049 compression_ctx->mode); 1050 dev_dbg(dev, "%s: compression mode %s," 1051 " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__, 1052 active_compression_mode->name, 1053 ctx->src_addr, ctx->dst_addr); 1054 1055 ret = check_completion(dev, idxd_desc->iax_completion, 1056 ctx->compress, false); 1057 if (ret) { 1058 dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); 1059 if (!ctx->compress && 1060 idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { 1061 pr_warn("%s: falling back to deflate-generic decompress, " 1062 "analytics error code %x\n", __func__, 1063 idxd_desc->iax_completion->error_code); 1064 ret = deflate_generic_decompress(ctx->req); 1065 if (ret) { 1066 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", 1067 __func__, ret); 1068 err = -EIO; 1069 goto err; 1070 } 1071 } else { 1072 err = -EIO; 1073 goto err; 1074 } 1075 } else { 1076 ctx->req->dlen = idxd_desc->iax_completion->output_size; 1077 } 1078 1079 /* Update stats */ 1080 if (ctx->compress) { 1081 update_total_comp_bytes_out(ctx->req->dlen); 1082 update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); 1083 } else { 1084 update_total_decomp_bytes_in(ctx->req->slen); 1085 update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); 1086 } 1087 1088 if (ctx->compress && compression_ctx->verify_compress) { 1089 u32 *compression_crc = acomp_request_ctx(ctx->req); 1090 dma_addr_t src_addr, dst_addr; 1091 1092 *compression_crc = idxd_desc->iax_completion->crc; 1093 1094 ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); 1095 if (ret) { 1096 dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); 1097 err = -EIO; 1098 goto out; 1099 } 1100 1101 ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, 1102 ctx->req->slen, dst_addr, &ctx->req->dlen); 1103 if (ret) { 1104 dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); 1105 err = -EIO; 1106 } 1107 1108 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE); 1109 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE); 1110 1111 goto out; 1112 } 1113 err: 1114 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE); 1115 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE); 1116 out: 1117 if (ret != 0) 1118 dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); 1119 1120 if (ctx->req->base.complete) 1121 acomp_request_complete(ctx->req, err); 1122 1123 if (free_desc) 1124 idxd_free_desc(idxd_desc->wq, idxd_desc); 1125 iaa_wq_put(idxd_desc->wq); 1126 } 1127 1128 static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, 1129 struct idxd_wq *wq, 1130 dma_addr_t src_addr, unsigned int slen, 1131 dma_addr_t dst_addr, unsigned int *dlen) 1132 { 1133 struct iaa_device_compression_mode *active_compression_mode; 1134 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1135 u32 *compression_crc = acomp_request_ctx(req); 1136 struct iaa_device *iaa_device; 1137 struct idxd_desc *idxd_desc; 1138 struct iax_hw_desc *desc; 1139 struct idxd_device *idxd; 1140 struct iaa_wq *iaa_wq; 1141 struct pci_dev *pdev; 1142 struct device *dev; 1143 int ret = 0; 1144 1145 iaa_wq = idxd_wq_get_private(wq); 1146 iaa_device = iaa_wq->iaa_device; 1147 idxd = iaa_device->idxd; 1148 pdev = idxd->pdev; 1149 dev = &pdev->dev; 1150 1151 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1152 1153 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1154 if (IS_ERR(idxd_desc)) { 1155 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1156 dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc)); 1157 return PTR_ERR(idxd_desc); 1158 } 1159 desc = idxd_desc->iax_hw; 1160 1161 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | 1162 IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC; 1163 desc->opcode = IAX_OPCODE_COMPRESS; 1164 desc->compr_flags = IAA_COMP_FLAGS; 1165 desc->priv = 0; 1166 1167 desc->src1_addr = (u64)src_addr; 1168 desc->src1_size = slen; 1169 desc->dst_addr = (u64)dst_addr; 1170 desc->max_dst_size = *dlen; 1171 desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr; 1172 desc->src2_size = sizeof(struct aecs_comp_table_record); 1173 desc->completion_addr = idxd_desc->compl_dma; 1174 1175 if (ctx->use_irq) { 1176 desc->flags |= IDXD_OP_FLAG_RCI; 1177 1178 idxd_desc->crypto.req = req; 1179 idxd_desc->crypto.tfm = tfm; 1180 idxd_desc->crypto.src_addr = src_addr; 1181 idxd_desc->crypto.dst_addr = dst_addr; 1182 idxd_desc->crypto.compress = true; 1183 1184 dev_dbg(dev, "%s use_async_irq: compression mode %s," 1185 " src_addr %llx, dst_addr %llx\n", __func__, 1186 active_compression_mode->name, 1187 src_addr, dst_addr); 1188 } else if (ctx->async_mode) 1189 req->base.data = idxd_desc; 1190 1191 dev_dbg(dev, "%s: compression mode %s," 1192 " desc->src1_addr %llx, desc->src1_size %d," 1193 " desc->dst_addr %llx, desc->max_dst_size %d," 1194 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, 1195 active_compression_mode->name, 1196 desc->src1_addr, desc->src1_size, desc->dst_addr, 1197 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1198 1199 ret = idxd_submit_desc(wq, idxd_desc); 1200 if (ret) { 1201 dev_dbg(dev, "submit_desc failed ret=%d\n", ret); 1202 goto err; 1203 } 1204 1205 /* Update stats */ 1206 update_total_comp_calls(); 1207 update_wq_comp_calls(wq); 1208 1209 if (ctx->async_mode) { 1210 ret = -EINPROGRESS; 1211 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); 1212 goto out; 1213 } 1214 1215 ret = check_completion(dev, idxd_desc->iax_completion, true, false); 1216 if (ret) { 1217 dev_dbg(dev, "check_completion failed ret=%d\n", ret); 1218 goto err; 1219 } 1220 1221 *dlen = idxd_desc->iax_completion->output_size; 1222 1223 /* Update stats */ 1224 update_total_comp_bytes_out(*dlen); 1225 update_wq_comp_bytes(wq, *dlen); 1226 1227 *compression_crc = idxd_desc->iax_completion->crc; 1228 1229 if (!ctx->async_mode) 1230 idxd_free_desc(wq, idxd_desc); 1231 out: 1232 return ret; 1233 err: 1234 idxd_free_desc(wq, idxd_desc); 1235 dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); 1236 1237 goto out; 1238 } 1239 1240 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, 1241 struct acomp_req *req, 1242 dma_addr_t *src_addr, dma_addr_t *dst_addr) 1243 { 1244 int ret = 0; 1245 int nr_sgs; 1246 1247 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1248 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1249 1250 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1251 if (nr_sgs <= 0 || nr_sgs > 1) { 1252 dev_dbg(dev, "verify: couldn't map src sg for iaa device %d," 1253 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1254 iaa_wq->wq->id, ret); 1255 ret = -EIO; 1256 goto out; 1257 } 1258 *src_addr = sg_dma_address(req->src); 1259 dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1260 " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs, 1261 req->src, req->slen, sg_dma_len(req->src)); 1262 1263 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); 1264 if (nr_sgs <= 0 || nr_sgs > 1) { 1265 dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d," 1266 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1267 iaa_wq->wq->id, ret); 1268 ret = -EIO; 1269 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1270 goto out; 1271 } 1272 *dst_addr = sg_dma_address(req->dst); 1273 dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1274 " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs, 1275 req->dst, req->dlen, sg_dma_len(req->dst)); 1276 out: 1277 return ret; 1278 } 1279 1280 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, 1281 struct idxd_wq *wq, 1282 dma_addr_t src_addr, unsigned int slen, 1283 dma_addr_t dst_addr, unsigned int *dlen) 1284 { 1285 struct iaa_device_compression_mode *active_compression_mode; 1286 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1287 u32 *compression_crc = acomp_request_ctx(req); 1288 struct iaa_device *iaa_device; 1289 struct idxd_desc *idxd_desc; 1290 struct iax_hw_desc *desc; 1291 struct idxd_device *idxd; 1292 struct iaa_wq *iaa_wq; 1293 struct pci_dev *pdev; 1294 struct device *dev; 1295 int ret = 0; 1296 1297 iaa_wq = idxd_wq_get_private(wq); 1298 iaa_device = iaa_wq->iaa_device; 1299 idxd = iaa_device->idxd; 1300 pdev = idxd->pdev; 1301 dev = &pdev->dev; 1302 1303 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1304 1305 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1306 if (IS_ERR(idxd_desc)) { 1307 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1308 dev_dbg(dev, "iaa compress failed: ret=%ld\n", 1309 PTR_ERR(idxd_desc)); 1310 return PTR_ERR(idxd_desc); 1311 } 1312 desc = idxd_desc->iax_hw; 1313 1314 /* Verify (optional) - decompress and check crc, suppress dest write */ 1315 1316 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; 1317 desc->opcode = IAX_OPCODE_DECOMPRESS; 1318 desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; 1319 desc->priv = 0; 1320 1321 desc->src1_addr = (u64)dst_addr; 1322 desc->src1_size = *dlen; 1323 desc->dst_addr = (u64)src_addr; 1324 desc->max_dst_size = slen; 1325 desc->completion_addr = idxd_desc->compl_dma; 1326 1327 dev_dbg(dev, "(verify) compression mode %s," 1328 " desc->src1_addr %llx, desc->src1_size %d," 1329 " desc->dst_addr %llx, desc->max_dst_size %d," 1330 " desc->src2_addr %llx, desc->src2_size %d\n", 1331 active_compression_mode->name, 1332 desc->src1_addr, desc->src1_size, desc->dst_addr, 1333 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1334 1335 ret = idxd_submit_desc(wq, idxd_desc); 1336 if (ret) { 1337 dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret); 1338 goto err; 1339 } 1340 1341 ret = check_completion(dev, idxd_desc->iax_completion, false, false); 1342 if (ret) { 1343 dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret); 1344 goto err; 1345 } 1346 1347 if (*compression_crc != idxd_desc->iax_completion->crc) { 1348 ret = -EINVAL; 1349 dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" 1350 " comp=0x%x, decomp=0x%x\n", *compression_crc, 1351 idxd_desc->iax_completion->crc); 1352 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 1353 8, 1, idxd_desc->iax_completion, 64, 0); 1354 goto err; 1355 } 1356 1357 idxd_free_desc(wq, idxd_desc); 1358 out: 1359 return ret; 1360 err: 1361 idxd_free_desc(wq, idxd_desc); 1362 dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); 1363 1364 goto out; 1365 } 1366 1367 static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, 1368 struct idxd_wq *wq, 1369 dma_addr_t src_addr, unsigned int slen, 1370 dma_addr_t dst_addr, unsigned int *dlen, 1371 bool disable_async) 1372 { 1373 struct iaa_device_compression_mode *active_compression_mode; 1374 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1375 struct iaa_device *iaa_device; 1376 struct idxd_desc *idxd_desc; 1377 struct iax_hw_desc *desc; 1378 struct idxd_device *idxd; 1379 struct iaa_wq *iaa_wq; 1380 struct pci_dev *pdev; 1381 struct device *dev; 1382 int ret = 0; 1383 1384 iaa_wq = idxd_wq_get_private(wq); 1385 iaa_device = iaa_wq->iaa_device; 1386 idxd = iaa_device->idxd; 1387 pdev = idxd->pdev; 1388 dev = &pdev->dev; 1389 1390 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1391 1392 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1393 if (IS_ERR(idxd_desc)) { 1394 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1395 dev_dbg(dev, "iaa decompress failed: ret=%ld\n", 1396 PTR_ERR(idxd_desc)); 1397 return PTR_ERR(idxd_desc); 1398 } 1399 desc = idxd_desc->iax_hw; 1400 1401 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; 1402 desc->opcode = IAX_OPCODE_DECOMPRESS; 1403 desc->max_dst_size = PAGE_SIZE; 1404 desc->decompr_flags = IAA_DECOMP_FLAGS; 1405 desc->priv = 0; 1406 1407 desc->src1_addr = (u64)src_addr; 1408 desc->dst_addr = (u64)dst_addr; 1409 desc->max_dst_size = *dlen; 1410 desc->src1_size = slen; 1411 desc->completion_addr = idxd_desc->compl_dma; 1412 1413 if (ctx->use_irq && !disable_async) { 1414 desc->flags |= IDXD_OP_FLAG_RCI; 1415 1416 idxd_desc->crypto.req = req; 1417 idxd_desc->crypto.tfm = tfm; 1418 idxd_desc->crypto.src_addr = src_addr; 1419 idxd_desc->crypto.dst_addr = dst_addr; 1420 idxd_desc->crypto.compress = false; 1421 1422 dev_dbg(dev, "%s: use_async_irq compression mode %s," 1423 " src_addr %llx, dst_addr %llx\n", __func__, 1424 active_compression_mode->name, 1425 src_addr, dst_addr); 1426 } else if (ctx->async_mode && !disable_async) 1427 req->base.data = idxd_desc; 1428 1429 dev_dbg(dev, "%s: decompression mode %s," 1430 " desc->src1_addr %llx, desc->src1_size %d," 1431 " desc->dst_addr %llx, desc->max_dst_size %d," 1432 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, 1433 active_compression_mode->name, 1434 desc->src1_addr, desc->src1_size, desc->dst_addr, 1435 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1436 1437 ret = idxd_submit_desc(wq, idxd_desc); 1438 if (ret) { 1439 dev_dbg(dev, "submit_desc failed ret=%d\n", ret); 1440 goto err; 1441 } 1442 1443 /* Update stats */ 1444 update_total_decomp_calls(); 1445 update_wq_decomp_calls(wq); 1446 1447 if (ctx->async_mode && !disable_async) { 1448 ret = -EINPROGRESS; 1449 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); 1450 goto out; 1451 } 1452 1453 ret = check_completion(dev, idxd_desc->iax_completion, false, false); 1454 if (ret) { 1455 dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); 1456 if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { 1457 pr_warn("%s: falling back to deflate-generic decompress, " 1458 "analytics error code %x\n", __func__, 1459 idxd_desc->iax_completion->error_code); 1460 ret = deflate_generic_decompress(req); 1461 if (ret) { 1462 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", 1463 __func__, ret); 1464 goto err; 1465 } 1466 } else { 1467 goto err; 1468 } 1469 } else { 1470 req->dlen = idxd_desc->iax_completion->output_size; 1471 } 1472 1473 *dlen = req->dlen; 1474 1475 if (!ctx->async_mode || disable_async) 1476 idxd_free_desc(wq, idxd_desc); 1477 1478 /* Update stats */ 1479 update_total_decomp_bytes_in(slen); 1480 update_wq_decomp_bytes(wq, slen); 1481 out: 1482 return ret; 1483 err: 1484 idxd_free_desc(wq, idxd_desc); 1485 dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret); 1486 1487 goto out; 1488 } 1489 1490 static int iaa_comp_acompress(struct acomp_req *req) 1491 { 1492 struct iaa_compression_ctx *compression_ctx; 1493 struct crypto_tfm *tfm = req->base.tfm; 1494 dma_addr_t src_addr, dst_addr; 1495 int nr_sgs, cpu, ret = 0; 1496 struct iaa_wq *iaa_wq; 1497 struct idxd_wq *wq; 1498 struct device *dev; 1499 1500 compression_ctx = crypto_tfm_ctx(tfm); 1501 1502 if (!iaa_crypto_enabled) { 1503 pr_debug("iaa_crypto disabled, not compressing\n"); 1504 return -ENODEV; 1505 } 1506 1507 if (!req->src || !req->slen) { 1508 pr_debug("invalid src, not compressing\n"); 1509 return -EINVAL; 1510 } 1511 1512 cpu = get_cpu(); 1513 wq = wq_table_next_wq(cpu); 1514 put_cpu(); 1515 if (!wq) { 1516 pr_debug("no wq configured for cpu=%d\n", cpu); 1517 return -ENODEV; 1518 } 1519 1520 ret = iaa_wq_get(wq); 1521 if (ret) { 1522 pr_debug("no wq available for cpu=%d\n", cpu); 1523 return -ENODEV; 1524 } 1525 1526 iaa_wq = idxd_wq_get_private(wq); 1527 1528 dev = &wq->idxd->pdev->dev; 1529 1530 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1531 if (nr_sgs <= 0 || nr_sgs > 1) { 1532 dev_dbg(dev, "couldn't map src sg for iaa device %d," 1533 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1534 iaa_wq->wq->id, ret); 1535 ret = -EIO; 1536 goto out; 1537 } 1538 src_addr = sg_dma_address(req->src); 1539 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1540 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, 1541 req->src, req->slen, sg_dma_len(req->src)); 1542 1543 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1544 if (nr_sgs <= 0 || nr_sgs > 1) { 1545 dev_dbg(dev, "couldn't map dst sg for iaa device %d," 1546 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1547 iaa_wq->wq->id, ret); 1548 ret = -EIO; 1549 goto err_map_dst; 1550 } 1551 dst_addr = sg_dma_address(req->dst); 1552 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1553 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, 1554 req->dst, req->dlen, sg_dma_len(req->dst)); 1555 1556 ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, 1557 &req->dlen); 1558 if (ret == -EINPROGRESS) 1559 return ret; 1560 1561 if (!ret && compression_ctx->verify_compress) { 1562 ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr); 1563 if (ret) { 1564 dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); 1565 goto out; 1566 } 1567 1568 ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, 1569 dst_addr, &req->dlen); 1570 if (ret) 1571 dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); 1572 1573 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); 1574 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1575 1576 goto out; 1577 } 1578 1579 if (ret) 1580 dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); 1581 1582 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1583 err_map_dst: 1584 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1585 out: 1586 iaa_wq_put(wq); 1587 1588 return ret; 1589 } 1590 1591 static int iaa_comp_adecompress(struct acomp_req *req) 1592 { 1593 struct crypto_tfm *tfm = req->base.tfm; 1594 dma_addr_t src_addr, dst_addr; 1595 int nr_sgs, cpu, ret = 0; 1596 struct iaa_wq *iaa_wq; 1597 struct device *dev; 1598 struct idxd_wq *wq; 1599 1600 if (!iaa_crypto_enabled) { 1601 pr_debug("iaa_crypto disabled, not decompressing\n"); 1602 return -ENODEV; 1603 } 1604 1605 if (!req->src || !req->slen) { 1606 pr_debug("invalid src, not decompressing\n"); 1607 return -EINVAL; 1608 } 1609 1610 cpu = get_cpu(); 1611 wq = wq_table_next_wq(cpu); 1612 put_cpu(); 1613 if (!wq) { 1614 pr_debug("no wq configured for cpu=%d\n", cpu); 1615 return -ENODEV; 1616 } 1617 1618 ret = iaa_wq_get(wq); 1619 if (ret) { 1620 pr_debug("no wq available for cpu=%d\n", cpu); 1621 return -ENODEV; 1622 } 1623 1624 iaa_wq = idxd_wq_get_private(wq); 1625 1626 dev = &wq->idxd->pdev->dev; 1627 1628 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1629 if (nr_sgs <= 0 || nr_sgs > 1) { 1630 dev_dbg(dev, "couldn't map src sg for iaa device %d," 1631 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1632 iaa_wq->wq->id, ret); 1633 ret = -EIO; 1634 goto out; 1635 } 1636 src_addr = sg_dma_address(req->src); 1637 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1638 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, 1639 req->src, req->slen, sg_dma_len(req->src)); 1640 1641 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1642 if (nr_sgs <= 0 || nr_sgs > 1) { 1643 dev_dbg(dev, "couldn't map dst sg for iaa device %d," 1644 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1645 iaa_wq->wq->id, ret); 1646 ret = -EIO; 1647 goto err_map_dst; 1648 } 1649 dst_addr = sg_dma_address(req->dst); 1650 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1651 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, 1652 req->dst, req->dlen, sg_dma_len(req->dst)); 1653 1654 ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, 1655 dst_addr, &req->dlen, false); 1656 if (ret == -EINPROGRESS) 1657 return ret; 1658 1659 if (ret != 0) 1660 dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); 1661 1662 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1663 err_map_dst: 1664 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1665 out: 1666 iaa_wq_put(wq); 1667 1668 return ret; 1669 } 1670 1671 static void compression_ctx_init(struct iaa_compression_ctx *ctx) 1672 { 1673 ctx->verify_compress = iaa_verify_compress; 1674 ctx->async_mode = async_mode; 1675 ctx->use_irq = use_irq; 1676 } 1677 1678 static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) 1679 { 1680 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); 1681 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1682 1683 compression_ctx_init(ctx); 1684 1685 ctx->mode = IAA_MODE_FIXED; 1686 1687 return 0; 1688 } 1689 1690 static struct acomp_alg iaa_acomp_fixed_deflate = { 1691 .init = iaa_comp_init_fixed, 1692 .compress = iaa_comp_acompress, 1693 .decompress = iaa_comp_adecompress, 1694 .reqsize = sizeof(u32), 1695 .base = { 1696 .cra_name = "deflate", 1697 .cra_driver_name = "deflate-iaa", 1698 .cra_flags = CRYPTO_ALG_ASYNC, 1699 .cra_ctxsize = sizeof(struct iaa_compression_ctx), 1700 .cra_module = THIS_MODULE, 1701 .cra_priority = IAA_ALG_PRIORITY, 1702 } 1703 }; 1704 1705 static int iaa_register_compression_device(void) 1706 { 1707 int ret; 1708 1709 ret = crypto_register_acomp(&iaa_acomp_fixed_deflate); 1710 if (ret) { 1711 pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret); 1712 goto out; 1713 } 1714 1715 iaa_crypto_registered = true; 1716 out: 1717 return ret; 1718 } 1719 1720 static int iaa_unregister_compression_device(void) 1721 { 1722 if (iaa_crypto_registered) 1723 crypto_unregister_acomp(&iaa_acomp_fixed_deflate); 1724 1725 return 0; 1726 } 1727 1728 static int iaa_crypto_probe(struct idxd_dev *idxd_dev) 1729 { 1730 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 1731 struct idxd_device *idxd = wq->idxd; 1732 struct idxd_driver_data *data = idxd->data; 1733 struct device *dev = &idxd_dev->conf_dev; 1734 bool first_wq = false; 1735 int ret = 0; 1736 1737 if (idxd->state != IDXD_DEV_ENABLED) 1738 return -ENXIO; 1739 1740 if (data->type != IDXD_TYPE_IAX) 1741 return -ENODEV; 1742 1743 mutex_lock(&wq->wq_lock); 1744 1745 if (idxd_wq_get_private(wq)) { 1746 mutex_unlock(&wq->wq_lock); 1747 return -EBUSY; 1748 } 1749 1750 if (!idxd_wq_driver_name_match(wq, dev)) { 1751 dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", 1752 idxd->id, wq->id, wq->driver_name, dev->driver->name); 1753 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 1754 ret = -ENODEV; 1755 goto err; 1756 } 1757 1758 wq->type = IDXD_WQT_KERNEL; 1759 1760 ret = idxd_drv_enable_wq(wq); 1761 if (ret < 0) { 1762 dev_dbg(dev, "enable wq %d.%d failed: %d\n", 1763 idxd->id, wq->id, ret); 1764 ret = -ENXIO; 1765 goto err; 1766 } 1767 1768 mutex_lock(&iaa_devices_lock); 1769 1770 if (list_empty(&iaa_devices)) { 1771 ret = alloc_wq_table(wq->idxd->max_wqs); 1772 if (ret) 1773 goto err_alloc; 1774 first_wq = true; 1775 } 1776 1777 ret = save_iaa_wq(wq); 1778 if (ret) 1779 goto err_save; 1780 1781 rebalance_wq_table(); 1782 1783 if (first_wq) { 1784 iaa_crypto_enabled = true; 1785 ret = iaa_register_compression_device(); 1786 if (ret != 0) { 1787 iaa_crypto_enabled = false; 1788 dev_dbg(dev, "IAA compression device registration failed\n"); 1789 goto err_register; 1790 } 1791 try_module_get(THIS_MODULE); 1792 1793 pr_info("iaa_crypto now ENABLED\n"); 1794 } 1795 1796 mutex_unlock(&iaa_devices_lock); 1797 out: 1798 mutex_unlock(&wq->wq_lock); 1799 1800 return ret; 1801 1802 err_register: 1803 remove_iaa_wq(wq); 1804 free_iaa_wq(idxd_wq_get_private(wq)); 1805 err_save: 1806 if (first_wq) 1807 free_wq_table(); 1808 err_alloc: 1809 mutex_unlock(&iaa_devices_lock); 1810 idxd_drv_disable_wq(wq); 1811 err: 1812 wq->type = IDXD_WQT_NONE; 1813 1814 goto out; 1815 } 1816 1817 static void iaa_crypto_remove(struct idxd_dev *idxd_dev) 1818 { 1819 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 1820 struct idxd_device *idxd = wq->idxd; 1821 struct iaa_wq *iaa_wq; 1822 bool free = false; 1823 1824 idxd_wq_quiesce(wq); 1825 1826 mutex_lock(&wq->wq_lock); 1827 mutex_lock(&iaa_devices_lock); 1828 1829 remove_iaa_wq(wq); 1830 1831 spin_lock(&idxd->dev_lock); 1832 iaa_wq = idxd_wq_get_private(wq); 1833 if (!iaa_wq) { 1834 spin_unlock(&idxd->dev_lock); 1835 pr_err("%s: no iaa_wq available to remove\n", __func__); 1836 goto out; 1837 } 1838 1839 if (iaa_wq->ref) { 1840 iaa_wq->remove = true; 1841 } else { 1842 wq = iaa_wq->wq; 1843 idxd_wq_set_private(wq, NULL); 1844 free = true; 1845 } 1846 spin_unlock(&idxd->dev_lock); 1847 if (free) { 1848 __free_iaa_wq(iaa_wq); 1849 kfree(iaa_wq); 1850 } 1851 1852 idxd_drv_disable_wq(wq); 1853 rebalance_wq_table(); 1854 1855 if (nr_iaa == 0) { 1856 iaa_crypto_enabled = false; 1857 free_wq_table(); 1858 module_put(THIS_MODULE); 1859 1860 pr_info("iaa_crypto now DISABLED\n"); 1861 } 1862 out: 1863 mutex_unlock(&iaa_devices_lock); 1864 mutex_unlock(&wq->wq_lock); 1865 } 1866 1867 static enum idxd_dev_type dev_types[] = { 1868 IDXD_DEV_WQ, 1869 IDXD_DEV_NONE, 1870 }; 1871 1872 static struct idxd_device_driver iaa_crypto_driver = { 1873 .probe = iaa_crypto_probe, 1874 .remove = iaa_crypto_remove, 1875 .name = IDXD_SUBDRIVER_NAME, 1876 .type = dev_types, 1877 .desc_complete = iaa_desc_complete, 1878 }; 1879 1880 static int __init iaa_crypto_init_module(void) 1881 { 1882 int ret = 0; 1883 int node; 1884 1885 nr_cpus = num_possible_cpus(); 1886 for_each_node_with_cpus(node) 1887 nr_nodes++; 1888 if (!nr_nodes) { 1889 pr_err("IAA couldn't find any nodes with cpus\n"); 1890 return -ENODEV; 1891 } 1892 nr_cpus_per_node = nr_cpus / nr_nodes; 1893 1894 ret = iaa_aecs_init_fixed(); 1895 if (ret < 0) { 1896 pr_debug("IAA fixed compression mode init failed\n"); 1897 goto err_aecs_init; 1898 } 1899 1900 ret = idxd_driver_register(&iaa_crypto_driver); 1901 if (ret) { 1902 pr_debug("IAA wq sub-driver registration failed\n"); 1903 goto err_driver_reg; 1904 } 1905 1906 ret = driver_create_file(&iaa_crypto_driver.drv, 1907 &driver_attr_verify_compress); 1908 if (ret) { 1909 pr_debug("IAA verify_compress attr creation failed\n"); 1910 goto err_verify_attr_create; 1911 } 1912 1913 ret = driver_create_file(&iaa_crypto_driver.drv, 1914 &driver_attr_sync_mode); 1915 if (ret) { 1916 pr_debug("IAA sync mode attr creation failed\n"); 1917 goto err_sync_attr_create; 1918 } 1919 1920 if (iaa_crypto_debugfs_init()) 1921 pr_warn("debugfs init failed, stats not available\n"); 1922 1923 pr_debug("initialized\n"); 1924 out: 1925 return ret; 1926 1927 err_sync_attr_create: 1928 driver_remove_file(&iaa_crypto_driver.drv, 1929 &driver_attr_verify_compress); 1930 err_verify_attr_create: 1931 idxd_driver_unregister(&iaa_crypto_driver); 1932 err_driver_reg: 1933 iaa_aecs_cleanup_fixed(); 1934 err_aecs_init: 1935 1936 goto out; 1937 } 1938 1939 static void __exit iaa_crypto_cleanup_module(void) 1940 { 1941 if (iaa_unregister_compression_device()) 1942 pr_debug("IAA compression device unregister failed\n"); 1943 1944 iaa_crypto_debugfs_cleanup(); 1945 driver_remove_file(&iaa_crypto_driver.drv, 1946 &driver_attr_sync_mode); 1947 driver_remove_file(&iaa_crypto_driver.drv, 1948 &driver_attr_verify_compress); 1949 idxd_driver_unregister(&iaa_crypto_driver); 1950 iaa_aecs_cleanup_fixed(); 1951 1952 pr_debug("cleaned up\n"); 1953 } 1954 1955 MODULE_IMPORT_NS("IDXD"); 1956 MODULE_LICENSE("GPL"); 1957 MODULE_ALIAS_IDXD_DEVICE(0); 1958 MODULE_AUTHOR("Intel Corporation"); 1959 MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver"); 1960 1961 module_init(iaa_crypto_init_module); 1962 module_exit(iaa_crypto_cleanup_module); 1963