1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2021 Intel Corporation. All rights rsvd. */ 3 4 #include <linux/init.h> 5 #include <linux/kernel.h> 6 #include <linux/module.h> 7 #include <linux/pci.h> 8 #include <linux/device.h> 9 #include <linux/iommu.h> 10 #include <uapi/linux/idxd.h> 11 #include <linux/highmem.h> 12 #include <linux/sched/smt.h> 13 #include <crypto/internal/acompress.h> 14 15 #include "idxd.h" 16 #include "iaa_crypto.h" 17 #include "iaa_crypto_stats.h" 18 19 #ifdef pr_fmt 20 #undef pr_fmt 21 #endif 22 23 #define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt 24 25 #define IAA_ALG_PRIORITY 300 26 27 /* number of iaa instances probed */ 28 static unsigned int nr_iaa; 29 static unsigned int nr_cpus; 30 static unsigned int nr_nodes; 31 static unsigned int nr_cpus_per_node; 32 33 /* Number of physical cpus sharing each iaa instance */ 34 static unsigned int cpus_per_iaa; 35 36 /* Per-cpu lookup table for balanced wqs */ 37 static struct wq_table_entry __percpu *wq_table; 38 39 static struct idxd_wq *wq_table_next_wq(int cpu) 40 { 41 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 42 43 if (++entry->cur_wq >= entry->n_wqs) 44 entry->cur_wq = 0; 45 46 if (!entry->wqs[entry->cur_wq]) 47 return NULL; 48 49 pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__, 50 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, 51 entry->wqs[entry->cur_wq]->id, cpu); 52 53 return entry->wqs[entry->cur_wq]; 54 } 55 56 static void wq_table_add(int cpu, struct idxd_wq *wq) 57 { 58 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 59 60 if (WARN_ON(entry->n_wqs == entry->max_wqs)) 61 return; 62 63 entry->wqs[entry->n_wqs++] = wq; 64 65 pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__, 66 entry->wqs[entry->n_wqs - 1]->idxd->id, 67 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); 68 } 69 70 static void wq_table_free_entry(int cpu) 71 { 72 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 73 74 kfree(entry->wqs); 75 memset(entry, 0, sizeof(*entry)); 76 } 77 78 static void wq_table_clear_entry(int cpu) 79 { 80 struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu); 81 82 entry->n_wqs = 0; 83 entry->cur_wq = 0; 84 memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); 85 } 86 87 LIST_HEAD(iaa_devices); 88 DEFINE_MUTEX(iaa_devices_lock); 89 90 /* If enabled, IAA hw crypto algos are registered, unavailable otherwise */ 91 static bool iaa_crypto_enabled; 92 static bool iaa_crypto_registered; 93 94 /* Verify results of IAA compress or not */ 95 static bool iaa_verify_compress = true; 96 97 static ssize_t verify_compress_show(struct device_driver *driver, char *buf) 98 { 99 return sprintf(buf, "%d\n", iaa_verify_compress); 100 } 101 102 static ssize_t verify_compress_store(struct device_driver *driver, 103 const char *buf, size_t count) 104 { 105 int ret = -EBUSY; 106 107 mutex_lock(&iaa_devices_lock); 108 109 if (iaa_crypto_enabled) 110 goto out; 111 112 ret = kstrtobool(buf, &iaa_verify_compress); 113 if (ret) 114 goto out; 115 116 ret = count; 117 out: 118 mutex_unlock(&iaa_devices_lock); 119 120 return ret; 121 } 122 static DRIVER_ATTR_RW(verify_compress); 123 124 /* 125 * The iaa crypto driver supports three 'sync' methods determining how 126 * compressions and decompressions are performed: 127 * 128 * - sync: the compression or decompression completes before 129 * returning. This is the mode used by the async crypto 130 * interface when the sync mode is set to 'sync' and by 131 * the sync crypto interface regardless of setting. 132 * 133 * - async: the compression or decompression is submitted and returns 134 * immediately. Completion interrupts are not used so 135 * the caller is responsible for polling the descriptor 136 * for completion. This mode is applicable to only the 137 * async crypto interface and is ignored for anything 138 * else. 139 * 140 * - async_irq: the compression or decompression is submitted and 141 * returns immediately. Completion interrupts are 142 * enabled so the caller can wait for the completion and 143 * yield to other threads. When the compression or 144 * decompression completes, the completion is signaled 145 * and the caller awakened. This mode is applicable to 146 * only the async crypto interface and is ignored for 147 * anything else. 148 * 149 * These modes can be set using the iaa_crypto sync_mode driver 150 * attribute. 151 */ 152 153 /* Use async mode */ 154 static bool async_mode; 155 /* Use interrupts */ 156 static bool use_irq; 157 158 /** 159 * set_iaa_sync_mode - Set IAA sync mode 160 * @name: The name of the sync mode 161 * 162 * Make the IAA sync mode named @name the current sync mode used by 163 * compression/decompression. 164 */ 165 166 static int set_iaa_sync_mode(const char *name) 167 { 168 int ret = 0; 169 170 if (sysfs_streq(name, "sync")) { 171 async_mode = false; 172 use_irq = false; 173 } else if (sysfs_streq(name, "async")) { 174 async_mode = false; 175 use_irq = false; 176 } else if (sysfs_streq(name, "async_irq")) { 177 async_mode = true; 178 use_irq = true; 179 } else { 180 ret = -EINVAL; 181 } 182 183 return ret; 184 } 185 186 static ssize_t sync_mode_show(struct device_driver *driver, char *buf) 187 { 188 int ret = 0; 189 190 if (!async_mode && !use_irq) 191 ret = sprintf(buf, "%s\n", "sync"); 192 else if (async_mode && !use_irq) 193 ret = sprintf(buf, "%s\n", "async"); 194 else if (async_mode && use_irq) 195 ret = sprintf(buf, "%s\n", "async_irq"); 196 197 return ret; 198 } 199 200 static ssize_t sync_mode_store(struct device_driver *driver, 201 const char *buf, size_t count) 202 { 203 int ret = -EBUSY; 204 205 mutex_lock(&iaa_devices_lock); 206 207 if (iaa_crypto_enabled) 208 goto out; 209 210 ret = set_iaa_sync_mode(buf); 211 if (ret == 0) 212 ret = count; 213 out: 214 mutex_unlock(&iaa_devices_lock); 215 216 return ret; 217 } 218 static DRIVER_ATTR_RW(sync_mode); 219 220 static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX]; 221 222 static int find_empty_iaa_compression_mode(void) 223 { 224 int i = -EINVAL; 225 226 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 227 if (iaa_compression_modes[i]) 228 continue; 229 break; 230 } 231 232 return i; 233 } 234 235 static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx) 236 { 237 struct iaa_compression_mode *mode; 238 int i; 239 240 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 241 mode = iaa_compression_modes[i]; 242 if (!mode) 243 continue; 244 245 if (!strcmp(mode->name, name)) { 246 *idx = i; 247 return iaa_compression_modes[i]; 248 } 249 } 250 251 return NULL; 252 } 253 254 static void free_iaa_compression_mode(struct iaa_compression_mode *mode) 255 { 256 kfree(mode->name); 257 kfree(mode->ll_table); 258 kfree(mode->d_table); 259 260 kfree(mode); 261 } 262 263 /* 264 * IAA Compression modes are defined by an ll_table and a d_table. 265 * These tables are typically generated and captured using statistics 266 * collected from running actual compress/decompress workloads. 267 * 268 * A module or other kernel code can add and remove compression modes 269 * with a given name using the exported @add_iaa_compression_mode() 270 * and @remove_iaa_compression_mode functions. 271 * 272 * When a new compression mode is added, the tables are saved in a 273 * global compression mode list. When IAA devices are added, a 274 * per-IAA device dma mapping is created for each IAA device, for each 275 * compression mode. These are the tables used to do the actual 276 * compression/deccompression and are unmapped if/when the devices are 277 * removed. Currently, compression modes must be added before any 278 * device is added, and removed after all devices have been removed. 279 */ 280 281 /** 282 * remove_iaa_compression_mode - Remove an IAA compression mode 283 * @name: The name the compression mode will be known as 284 * 285 * Remove the IAA compression mode named @name. 286 */ 287 void remove_iaa_compression_mode(const char *name) 288 { 289 struct iaa_compression_mode *mode; 290 int idx; 291 292 mutex_lock(&iaa_devices_lock); 293 294 if (!list_empty(&iaa_devices)) 295 goto out; 296 297 mode = find_iaa_compression_mode(name, &idx); 298 if (mode) { 299 free_iaa_compression_mode(mode); 300 iaa_compression_modes[idx] = NULL; 301 } 302 out: 303 mutex_unlock(&iaa_devices_lock); 304 } 305 EXPORT_SYMBOL_GPL(remove_iaa_compression_mode); 306 307 /** 308 * add_iaa_compression_mode - Add an IAA compression mode 309 * @name: The name the compression mode will be known as 310 * @ll_table: The ll table 311 * @ll_table_size: The ll table size in bytes 312 * @d_table: The d table 313 * @d_table_size: The d table size in bytes 314 * @init: Optional callback function to init the compression mode data 315 * @free: Optional callback function to free the compression mode data 316 * 317 * Add a new IAA compression mode named @name. 318 * 319 * Returns 0 if successful, errcode otherwise. 320 */ 321 int add_iaa_compression_mode(const char *name, 322 const u32 *ll_table, 323 int ll_table_size, 324 const u32 *d_table, 325 int d_table_size, 326 iaa_dev_comp_init_fn_t init, 327 iaa_dev_comp_free_fn_t free) 328 { 329 struct iaa_compression_mode *mode; 330 int idx, ret = -ENOMEM; 331 332 mutex_lock(&iaa_devices_lock); 333 334 if (!list_empty(&iaa_devices)) { 335 ret = -EBUSY; 336 goto out; 337 } 338 339 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 340 if (!mode) 341 goto out; 342 343 mode->name = kstrdup(name, GFP_KERNEL); 344 if (!mode->name) 345 goto free; 346 347 if (ll_table) { 348 mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL); 349 if (!mode->ll_table) 350 goto free; 351 mode->ll_table_size = ll_table_size; 352 } 353 354 if (d_table) { 355 mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL); 356 if (!mode->d_table) 357 goto free; 358 mode->d_table_size = d_table_size; 359 } 360 361 mode->init = init; 362 mode->free = free; 363 364 idx = find_empty_iaa_compression_mode(); 365 if (idx < 0) 366 goto free; 367 368 pr_debug("IAA compression mode %s added at idx %d\n", 369 mode->name, idx); 370 371 iaa_compression_modes[idx] = mode; 372 373 ret = 0; 374 out: 375 mutex_unlock(&iaa_devices_lock); 376 377 return ret; 378 free: 379 free_iaa_compression_mode(mode); 380 goto out; 381 } 382 EXPORT_SYMBOL_GPL(add_iaa_compression_mode); 383 384 static struct iaa_device_compression_mode * 385 get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx) 386 { 387 return iaa_device->compression_modes[idx]; 388 } 389 390 static void free_device_compression_mode(struct iaa_device *iaa_device, 391 struct iaa_device_compression_mode *device_mode) 392 { 393 size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; 394 struct device *dev = &iaa_device->idxd->pdev->dev; 395 396 kfree(device_mode->name); 397 398 if (device_mode->aecs_comp_table) 399 dma_free_coherent(dev, size, device_mode->aecs_comp_table, 400 device_mode->aecs_comp_table_dma_addr); 401 kfree(device_mode); 402 } 403 404 #define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000 405 #define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC) 406 #define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) 407 #define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS) 408 #define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \ 409 IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \ 410 IDXD_OP_FLAG_AECS_RW_TGLS) 411 412 static int check_completion(struct device *dev, 413 struct iax_completion_record *comp, 414 bool compress, 415 bool only_once); 416 417 static int init_device_compression_mode(struct iaa_device *iaa_device, 418 struct iaa_compression_mode *mode, 419 int idx, struct idxd_wq *wq) 420 { 421 size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN; 422 struct device *dev = &iaa_device->idxd->pdev->dev; 423 struct iaa_device_compression_mode *device_mode; 424 int ret = -ENOMEM; 425 426 device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL); 427 if (!device_mode) 428 return -ENOMEM; 429 430 device_mode->name = kstrdup(mode->name, GFP_KERNEL); 431 if (!device_mode->name) 432 goto free; 433 434 device_mode->aecs_comp_table = dma_alloc_coherent(dev, size, 435 &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL); 436 if (!device_mode->aecs_comp_table) 437 goto free; 438 439 /* Add Huffman table to aecs */ 440 memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); 441 memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); 442 memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); 443 444 if (mode->init) { 445 ret = mode->init(device_mode); 446 if (ret) 447 goto free; 448 } 449 450 /* mode index should match iaa_compression_modes idx */ 451 iaa_device->compression_modes[idx] = device_mode; 452 453 pr_debug("IAA %s compression mode initialized for iaa device %d\n", 454 mode->name, iaa_device->idxd->id); 455 456 ret = 0; 457 out: 458 return ret; 459 free: 460 pr_debug("IAA %s compression mode initialization failed for iaa device %d\n", 461 mode->name, iaa_device->idxd->id); 462 463 free_device_compression_mode(iaa_device, device_mode); 464 goto out; 465 } 466 467 static int init_device_compression_modes(struct iaa_device *iaa_device, 468 struct idxd_wq *wq) 469 { 470 struct iaa_compression_mode *mode; 471 int i, ret = 0; 472 473 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 474 mode = iaa_compression_modes[i]; 475 if (!mode) 476 continue; 477 478 ret = init_device_compression_mode(iaa_device, mode, i, wq); 479 if (ret) 480 break; 481 } 482 483 return ret; 484 } 485 486 static void remove_device_compression_modes(struct iaa_device *iaa_device) 487 { 488 struct iaa_device_compression_mode *device_mode; 489 int i; 490 491 for (i = 0; i < IAA_COMP_MODES_MAX; i++) { 492 device_mode = iaa_device->compression_modes[i]; 493 if (!device_mode) 494 continue; 495 496 if (iaa_compression_modes[i]->free) 497 iaa_compression_modes[i]->free(device_mode); 498 free_device_compression_mode(iaa_device, device_mode); 499 iaa_device->compression_modes[i] = NULL; 500 } 501 } 502 503 static struct iaa_device *iaa_device_alloc(void) 504 { 505 struct iaa_device *iaa_device; 506 507 iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL); 508 if (!iaa_device) 509 return NULL; 510 511 INIT_LIST_HEAD(&iaa_device->wqs); 512 513 return iaa_device; 514 } 515 516 static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 517 { 518 struct iaa_wq *iaa_wq; 519 520 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 521 if (iaa_wq->wq == wq) 522 return true; 523 } 524 525 return false; 526 } 527 528 static struct iaa_device *add_iaa_device(struct idxd_device *idxd) 529 { 530 struct iaa_device *iaa_device; 531 532 iaa_device = iaa_device_alloc(); 533 if (!iaa_device) 534 return NULL; 535 536 iaa_device->idxd = idxd; 537 538 list_add_tail(&iaa_device->list, &iaa_devices); 539 540 nr_iaa++; 541 542 return iaa_device; 543 } 544 545 static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq) 546 { 547 int ret = 0; 548 549 ret = init_device_compression_modes(iaa_device, iaa_wq->wq); 550 if (ret) 551 return ret; 552 553 return ret; 554 } 555 556 static void del_iaa_device(struct iaa_device *iaa_device) 557 { 558 list_del(&iaa_device->list); 559 560 nr_iaa--; 561 } 562 563 static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq, 564 struct iaa_wq **new_wq) 565 { 566 struct idxd_device *idxd = iaa_device->idxd; 567 struct pci_dev *pdev = idxd->pdev; 568 struct device *dev = &pdev->dev; 569 struct iaa_wq *iaa_wq; 570 571 iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL); 572 if (!iaa_wq) 573 return -ENOMEM; 574 575 iaa_wq->wq = wq; 576 iaa_wq->iaa_device = iaa_device; 577 idxd_wq_set_private(wq, iaa_wq); 578 579 list_add_tail(&iaa_wq->list, &iaa_device->wqs); 580 581 iaa_device->n_wq++; 582 583 if (new_wq) 584 *new_wq = iaa_wq; 585 586 dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n", 587 wq->id, iaa_device->idxd->id, iaa_device->n_wq); 588 589 return 0; 590 } 591 592 static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq) 593 { 594 struct idxd_device *idxd = iaa_device->idxd; 595 struct pci_dev *pdev = idxd->pdev; 596 struct device *dev = &pdev->dev; 597 struct iaa_wq *iaa_wq; 598 599 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { 600 if (iaa_wq->wq == wq) { 601 list_del(&iaa_wq->list); 602 iaa_device->n_wq--; 603 604 dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n", 605 wq->id, iaa_device->idxd->id, 606 iaa_device->n_wq, nr_iaa); 607 608 if (iaa_device->n_wq == 0) 609 del_iaa_device(iaa_device); 610 break; 611 } 612 } 613 } 614 615 static void clear_wq_table(void) 616 { 617 int cpu; 618 619 for (cpu = 0; cpu < nr_cpus; cpu++) 620 wq_table_clear_entry(cpu); 621 622 pr_debug("cleared wq table\n"); 623 } 624 625 static void free_iaa_device(struct iaa_device *iaa_device) 626 { 627 if (!iaa_device) 628 return; 629 630 remove_device_compression_modes(iaa_device); 631 kfree(iaa_device); 632 } 633 634 static void __free_iaa_wq(struct iaa_wq *iaa_wq) 635 { 636 struct iaa_device *iaa_device; 637 638 if (!iaa_wq) 639 return; 640 641 iaa_device = iaa_wq->iaa_device; 642 if (iaa_device->n_wq == 0) 643 free_iaa_device(iaa_wq->iaa_device); 644 } 645 646 static void free_iaa_wq(struct iaa_wq *iaa_wq) 647 { 648 struct idxd_wq *wq; 649 650 __free_iaa_wq(iaa_wq); 651 652 wq = iaa_wq->wq; 653 654 kfree(iaa_wq); 655 idxd_wq_set_private(wq, NULL); 656 } 657 658 static int iaa_wq_get(struct idxd_wq *wq) 659 { 660 struct idxd_device *idxd = wq->idxd; 661 struct iaa_wq *iaa_wq; 662 int ret = 0; 663 664 spin_lock(&idxd->dev_lock); 665 iaa_wq = idxd_wq_get_private(wq); 666 if (iaa_wq && !iaa_wq->remove) { 667 iaa_wq->ref++; 668 idxd_wq_get(wq); 669 } else { 670 ret = -ENODEV; 671 } 672 spin_unlock(&idxd->dev_lock); 673 674 return ret; 675 } 676 677 static int iaa_wq_put(struct idxd_wq *wq) 678 { 679 struct idxd_device *idxd = wq->idxd; 680 struct iaa_wq *iaa_wq; 681 bool free = false; 682 int ret = 0; 683 684 spin_lock(&idxd->dev_lock); 685 iaa_wq = idxd_wq_get_private(wq); 686 if (iaa_wq) { 687 iaa_wq->ref--; 688 if (iaa_wq->ref == 0 && iaa_wq->remove) { 689 idxd_wq_set_private(wq, NULL); 690 free = true; 691 } 692 idxd_wq_put(wq); 693 } else { 694 ret = -ENODEV; 695 } 696 spin_unlock(&idxd->dev_lock); 697 if (free) { 698 __free_iaa_wq(iaa_wq); 699 kfree(iaa_wq); 700 } 701 702 return ret; 703 } 704 705 static void free_wq_table(void) 706 { 707 int cpu; 708 709 for (cpu = 0; cpu < nr_cpus; cpu++) 710 wq_table_free_entry(cpu); 711 712 free_percpu(wq_table); 713 714 pr_debug("freed wq table\n"); 715 } 716 717 static int alloc_wq_table(int max_wqs) 718 { 719 struct wq_table_entry *entry; 720 int cpu; 721 722 wq_table = alloc_percpu(struct wq_table_entry); 723 if (!wq_table) 724 return -ENOMEM; 725 726 for (cpu = 0; cpu < nr_cpus; cpu++) { 727 entry = per_cpu_ptr(wq_table, cpu); 728 entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL); 729 if (!entry->wqs) { 730 free_wq_table(); 731 return -ENOMEM; 732 } 733 734 entry->max_wqs = max_wqs; 735 } 736 737 pr_debug("initialized wq table\n"); 738 739 return 0; 740 } 741 742 static int save_iaa_wq(struct idxd_wq *wq) 743 { 744 struct iaa_device *iaa_device, *found = NULL; 745 struct idxd_device *idxd; 746 struct pci_dev *pdev; 747 struct device *dev; 748 int ret = 0; 749 750 list_for_each_entry(iaa_device, &iaa_devices, list) { 751 if (iaa_device->idxd == wq->idxd) { 752 idxd = iaa_device->idxd; 753 pdev = idxd->pdev; 754 dev = &pdev->dev; 755 /* 756 * Check to see that we don't already have this wq. 757 * Shouldn't happen but we don't control probing. 758 */ 759 if (iaa_has_wq(iaa_device, wq)) { 760 dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n", 761 iaa_device); 762 goto out; 763 } 764 765 found = iaa_device; 766 767 ret = add_iaa_wq(iaa_device, wq, NULL); 768 if (ret) 769 goto out; 770 771 break; 772 } 773 } 774 775 if (!found) { 776 struct iaa_device *new_device; 777 struct iaa_wq *new_wq; 778 779 new_device = add_iaa_device(wq->idxd); 780 if (!new_device) { 781 ret = -ENOMEM; 782 goto out; 783 } 784 785 ret = add_iaa_wq(new_device, wq, &new_wq); 786 if (ret) { 787 del_iaa_device(new_device); 788 free_iaa_device(new_device); 789 goto out; 790 } 791 792 ret = init_iaa_device(new_device, new_wq); 793 if (ret) { 794 del_iaa_wq(new_device, new_wq->wq); 795 del_iaa_device(new_device); 796 free_iaa_wq(new_wq); 797 goto out; 798 } 799 } 800 801 if (WARN_ON(nr_iaa == 0)) 802 return -EINVAL; 803 804 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; 805 if (!cpus_per_iaa) 806 cpus_per_iaa = 1; 807 out: 808 return 0; 809 } 810 811 static void remove_iaa_wq(struct idxd_wq *wq) 812 { 813 struct iaa_device *iaa_device; 814 815 list_for_each_entry(iaa_device, &iaa_devices, list) { 816 if (iaa_has_wq(iaa_device, wq)) { 817 del_iaa_wq(iaa_device, wq); 818 break; 819 } 820 } 821 822 if (nr_iaa) { 823 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa; 824 if (!cpus_per_iaa) 825 cpus_per_iaa = 1; 826 } else 827 cpus_per_iaa = 1; 828 } 829 830 static int wq_table_add_wqs(int iaa, int cpu) 831 { 832 struct iaa_device *iaa_device, *found_device = NULL; 833 int ret = 0, cur_iaa = 0, n_wqs_added = 0; 834 struct idxd_device *idxd; 835 struct iaa_wq *iaa_wq; 836 struct pci_dev *pdev; 837 struct device *dev; 838 839 list_for_each_entry(iaa_device, &iaa_devices, list) { 840 idxd = iaa_device->idxd; 841 pdev = idxd->pdev; 842 dev = &pdev->dev; 843 844 if (cur_iaa != iaa) { 845 cur_iaa++; 846 continue; 847 } 848 849 found_device = iaa_device; 850 dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n", 851 found_device->idxd->id, cur_iaa); 852 break; 853 } 854 855 if (!found_device) { 856 found_device = list_first_entry_or_null(&iaa_devices, 857 struct iaa_device, list); 858 if (!found_device) { 859 pr_debug("couldn't find any iaa devices with wqs!\n"); 860 ret = -EINVAL; 861 goto out; 862 } 863 cur_iaa = 0; 864 865 idxd = found_device->idxd; 866 pdev = idxd->pdev; 867 dev = &pdev->dev; 868 dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n", 869 found_device->idxd->id, cur_iaa); 870 } 871 872 list_for_each_entry(iaa_wq, &found_device->wqs, list) { 873 wq_table_add(cpu, iaa_wq->wq); 874 pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n", 875 cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); 876 n_wqs_added++; 877 } 878 879 if (!n_wqs_added) { 880 pr_debug("couldn't find any iaa wqs!\n"); 881 ret = -EINVAL; 882 goto out; 883 } 884 out: 885 return ret; 886 } 887 888 /* 889 * Rebalance the wq table so that given a cpu, it's easy to find the 890 * closest IAA instance. The idea is to try to choose the most 891 * appropriate IAA instance for a caller and spread available 892 * workqueues around to clients. 893 */ 894 static void rebalance_wq_table(void) 895 { 896 const struct cpumask *node_cpus; 897 int node, cpu, iaa = -1; 898 899 if (nr_iaa == 0) 900 return; 901 902 pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n", 903 nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa); 904 905 clear_wq_table(); 906 907 if (nr_iaa == 1) { 908 for (cpu = 0; cpu < nr_cpus; cpu++) { 909 if (WARN_ON(wq_table_add_wqs(0, cpu))) { 910 pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu); 911 return; 912 } 913 } 914 915 return; 916 } 917 918 for_each_node_with_cpus(node) { 919 node_cpus = cpumask_of_node(node); 920 921 for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) { 922 int node_cpu = cpumask_nth(cpu, node_cpus); 923 924 if (WARN_ON(node_cpu >= nr_cpu_ids)) { 925 pr_debug("node_cpu %d doesn't exist!\n", node_cpu); 926 return; 927 } 928 929 if ((cpu % cpus_per_iaa) == 0) 930 iaa++; 931 932 if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) { 933 pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu); 934 return; 935 } 936 } 937 } 938 } 939 940 static inline int check_completion(struct device *dev, 941 struct iax_completion_record *comp, 942 bool compress, 943 bool only_once) 944 { 945 char *op_str = compress ? "compress" : "decompress"; 946 int status_checks = 0; 947 int ret = 0; 948 949 while (!comp->status) { 950 if (only_once) 951 return -EAGAIN; 952 cpu_relax(); 953 if (status_checks++ >= IAA_COMPLETION_TIMEOUT) { 954 /* Something is wrong with the hw, disable it. */ 955 dev_err(dev, "%s completion timed out - " 956 "assuming broken hw, iaa_crypto now DISABLED\n", 957 op_str); 958 iaa_crypto_enabled = false; 959 ret = -ETIMEDOUT; 960 goto out; 961 } 962 } 963 964 if (comp->status != IAX_COMP_SUCCESS) { 965 if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) { 966 ret = -ETIMEDOUT; 967 dev_dbg(dev, "%s timed out, size=0x%x\n", 968 op_str, comp->output_size); 969 update_completion_timeout_errs(); 970 goto out; 971 } 972 973 if (comp->status == IAA_ANALYTICS_ERROR && 974 comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) { 975 ret = -E2BIG; 976 dev_dbg(dev, "compressed > uncompressed size," 977 " not compressing, size=0x%x\n", 978 comp->output_size); 979 update_completion_comp_buf_overflow_errs(); 980 goto out; 981 } 982 983 if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) { 984 ret = -EOVERFLOW; 985 goto out; 986 } 987 988 ret = -EINVAL; 989 dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n", 990 op_str, comp->status, comp->error_code, comp->output_size); 991 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); 992 update_completion_einval_errs(); 993 994 goto out; 995 } 996 out: 997 return ret; 998 } 999 1000 static int deflate_generic_decompress(struct acomp_req *req) 1001 { 1002 ACOMP_REQUEST_ON_STACK(fbreq, crypto_acomp_reqtfm(req)); 1003 int ret; 1004 1005 acomp_request_set_callback(fbreq, 0, NULL, NULL); 1006 acomp_request_set_params(fbreq, req->src, req->dst, req->slen, 1007 req->dlen); 1008 ret = crypto_acomp_decompress(fbreq); 1009 req->dlen = fbreq->dlen; 1010 1011 update_total_sw_decomp_calls(); 1012 1013 return ret; 1014 } 1015 1016 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, 1017 struct acomp_req *req, 1018 dma_addr_t *src_addr, dma_addr_t *dst_addr); 1019 1020 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, 1021 struct idxd_wq *wq, 1022 dma_addr_t src_addr, unsigned int slen, 1023 dma_addr_t dst_addr, unsigned int *dlen); 1024 1025 static void iaa_desc_complete(struct idxd_desc *idxd_desc, 1026 enum idxd_complete_type comp_type, 1027 bool free_desc, void *__ctx, 1028 u32 *status) 1029 { 1030 struct iaa_device_compression_mode *active_compression_mode; 1031 struct iaa_compression_ctx *compression_ctx; 1032 struct crypto_ctx *ctx = __ctx; 1033 struct iaa_device *iaa_device; 1034 struct idxd_device *idxd; 1035 struct iaa_wq *iaa_wq; 1036 struct pci_dev *pdev; 1037 struct device *dev; 1038 int ret, err = 0; 1039 1040 compression_ctx = crypto_tfm_ctx(ctx->tfm); 1041 1042 iaa_wq = idxd_wq_get_private(idxd_desc->wq); 1043 iaa_device = iaa_wq->iaa_device; 1044 idxd = iaa_device->idxd; 1045 pdev = idxd->pdev; 1046 dev = &pdev->dev; 1047 1048 active_compression_mode = get_iaa_device_compression_mode(iaa_device, 1049 compression_ctx->mode); 1050 dev_dbg(dev, "%s: compression mode %s," 1051 " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__, 1052 active_compression_mode->name, 1053 ctx->src_addr, ctx->dst_addr); 1054 1055 ret = check_completion(dev, idxd_desc->iax_completion, 1056 ctx->compress, false); 1057 if (ret) { 1058 dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); 1059 if (!ctx->compress && 1060 idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { 1061 pr_warn("%s: falling back to deflate-generic decompress, " 1062 "analytics error code %x\n", __func__, 1063 idxd_desc->iax_completion->error_code); 1064 ret = deflate_generic_decompress(ctx->req); 1065 if (ret) { 1066 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", 1067 __func__, ret); 1068 err = -EIO; 1069 goto err; 1070 } 1071 } else { 1072 err = -EIO; 1073 goto err; 1074 } 1075 } else { 1076 ctx->req->dlen = idxd_desc->iax_completion->output_size; 1077 } 1078 1079 /* Update stats */ 1080 if (ctx->compress) { 1081 update_total_comp_bytes_out(ctx->req->dlen); 1082 update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); 1083 } else { 1084 update_total_decomp_bytes_in(ctx->req->slen); 1085 update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); 1086 } 1087 1088 if (ctx->compress && compression_ctx->verify_compress) { 1089 u32 *compression_crc = acomp_request_ctx(ctx->req); 1090 dma_addr_t src_addr, dst_addr; 1091 1092 *compression_crc = idxd_desc->iax_completion->crc; 1093 1094 ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); 1095 if (ret) { 1096 dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); 1097 err = -EIO; 1098 goto out; 1099 } 1100 1101 ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, 1102 ctx->req->slen, dst_addr, &ctx->req->dlen); 1103 if (ret) { 1104 dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret); 1105 err = -EIO; 1106 } 1107 1108 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE); 1109 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE); 1110 1111 goto out; 1112 } 1113 err: 1114 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE); 1115 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE); 1116 out: 1117 if (ret != 0) 1118 dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); 1119 1120 if (ctx->req->base.complete) 1121 acomp_request_complete(ctx->req, err); 1122 1123 if (free_desc) 1124 idxd_free_desc(idxd_desc->wq, idxd_desc); 1125 iaa_wq_put(idxd_desc->wq); 1126 } 1127 1128 static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, 1129 struct idxd_wq *wq, 1130 dma_addr_t src_addr, unsigned int slen, 1131 dma_addr_t dst_addr, unsigned int *dlen) 1132 { 1133 struct iaa_device_compression_mode *active_compression_mode; 1134 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1135 u32 *compression_crc = acomp_request_ctx(req); 1136 struct iaa_device *iaa_device; 1137 struct idxd_desc *idxd_desc; 1138 struct iax_hw_desc *desc; 1139 struct idxd_device *idxd; 1140 struct iaa_wq *iaa_wq; 1141 struct pci_dev *pdev; 1142 struct device *dev; 1143 int ret = 0; 1144 1145 iaa_wq = idxd_wq_get_private(wq); 1146 iaa_device = iaa_wq->iaa_device; 1147 idxd = iaa_device->idxd; 1148 pdev = idxd->pdev; 1149 dev = &pdev->dev; 1150 1151 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1152 1153 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1154 if (IS_ERR(idxd_desc)) { 1155 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1156 dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc)); 1157 return PTR_ERR(idxd_desc); 1158 } 1159 desc = idxd_desc->iax_hw; 1160 1161 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | 1162 IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC; 1163 desc->opcode = IAX_OPCODE_COMPRESS; 1164 desc->compr_flags = IAA_COMP_FLAGS; 1165 desc->priv = 0; 1166 1167 desc->src1_addr = (u64)src_addr; 1168 desc->src1_size = slen; 1169 desc->dst_addr = (u64)dst_addr; 1170 desc->max_dst_size = *dlen; 1171 desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr; 1172 desc->src2_size = sizeof(struct aecs_comp_table_record); 1173 desc->completion_addr = idxd_desc->compl_dma; 1174 1175 if (ctx->use_irq) { 1176 desc->flags |= IDXD_OP_FLAG_RCI; 1177 1178 idxd_desc->crypto.req = req; 1179 idxd_desc->crypto.tfm = tfm; 1180 idxd_desc->crypto.src_addr = src_addr; 1181 idxd_desc->crypto.dst_addr = dst_addr; 1182 idxd_desc->crypto.compress = true; 1183 1184 dev_dbg(dev, "%s use_async_irq: compression mode %s," 1185 " src_addr %llx, dst_addr %llx\n", __func__, 1186 active_compression_mode->name, 1187 src_addr, dst_addr); 1188 } 1189 1190 dev_dbg(dev, "%s: compression mode %s," 1191 " desc->src1_addr %llx, desc->src1_size %d," 1192 " desc->dst_addr %llx, desc->max_dst_size %d," 1193 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, 1194 active_compression_mode->name, 1195 desc->src1_addr, desc->src1_size, desc->dst_addr, 1196 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1197 1198 ret = idxd_submit_desc(wq, idxd_desc); 1199 if (ret) { 1200 dev_dbg(dev, "submit_desc failed ret=%d\n", ret); 1201 goto err; 1202 } 1203 1204 /* Update stats */ 1205 update_total_comp_calls(); 1206 update_wq_comp_calls(wq); 1207 1208 if (ctx->async_mode) { 1209 ret = -EINPROGRESS; 1210 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); 1211 goto out; 1212 } 1213 1214 ret = check_completion(dev, idxd_desc->iax_completion, true, false); 1215 if (ret) { 1216 dev_dbg(dev, "check_completion failed ret=%d\n", ret); 1217 goto err; 1218 } 1219 1220 *dlen = idxd_desc->iax_completion->output_size; 1221 1222 /* Update stats */ 1223 update_total_comp_bytes_out(*dlen); 1224 update_wq_comp_bytes(wq, *dlen); 1225 1226 *compression_crc = idxd_desc->iax_completion->crc; 1227 1228 if (!ctx->async_mode) 1229 idxd_free_desc(wq, idxd_desc); 1230 out: 1231 return ret; 1232 err: 1233 idxd_free_desc(wq, idxd_desc); 1234 dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); 1235 1236 goto out; 1237 } 1238 1239 static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq, 1240 struct acomp_req *req, 1241 dma_addr_t *src_addr, dma_addr_t *dst_addr) 1242 { 1243 int ret = 0; 1244 int nr_sgs; 1245 1246 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1247 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1248 1249 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1250 if (nr_sgs <= 0 || nr_sgs > 1) { 1251 dev_dbg(dev, "verify: couldn't map src sg for iaa device %d," 1252 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1253 iaa_wq->wq->id, ret); 1254 ret = -EIO; 1255 goto out; 1256 } 1257 *src_addr = sg_dma_address(req->src); 1258 dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1259 " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs, 1260 req->src, req->slen, sg_dma_len(req->src)); 1261 1262 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); 1263 if (nr_sgs <= 0 || nr_sgs > 1) { 1264 dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d," 1265 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1266 iaa_wq->wq->id, ret); 1267 ret = -EIO; 1268 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1269 goto out; 1270 } 1271 *dst_addr = sg_dma_address(req->dst); 1272 dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1273 " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs, 1274 req->dst, req->dlen, sg_dma_len(req->dst)); 1275 out: 1276 return ret; 1277 } 1278 1279 static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req, 1280 struct idxd_wq *wq, 1281 dma_addr_t src_addr, unsigned int slen, 1282 dma_addr_t dst_addr, unsigned int *dlen) 1283 { 1284 struct iaa_device_compression_mode *active_compression_mode; 1285 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1286 u32 *compression_crc = acomp_request_ctx(req); 1287 struct iaa_device *iaa_device; 1288 struct idxd_desc *idxd_desc; 1289 struct iax_hw_desc *desc; 1290 struct idxd_device *idxd; 1291 struct iaa_wq *iaa_wq; 1292 struct pci_dev *pdev; 1293 struct device *dev; 1294 int ret = 0; 1295 1296 iaa_wq = idxd_wq_get_private(wq); 1297 iaa_device = iaa_wq->iaa_device; 1298 idxd = iaa_device->idxd; 1299 pdev = idxd->pdev; 1300 dev = &pdev->dev; 1301 1302 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1303 1304 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1305 if (IS_ERR(idxd_desc)) { 1306 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1307 dev_dbg(dev, "iaa compress failed: ret=%ld\n", 1308 PTR_ERR(idxd_desc)); 1309 return PTR_ERR(idxd_desc); 1310 } 1311 desc = idxd_desc->iax_hw; 1312 1313 /* Verify (optional) - decompress and check crc, suppress dest write */ 1314 1315 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; 1316 desc->opcode = IAX_OPCODE_DECOMPRESS; 1317 desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; 1318 desc->priv = 0; 1319 1320 desc->src1_addr = (u64)dst_addr; 1321 desc->src1_size = *dlen; 1322 desc->dst_addr = (u64)src_addr; 1323 desc->max_dst_size = slen; 1324 desc->completion_addr = idxd_desc->compl_dma; 1325 1326 dev_dbg(dev, "(verify) compression mode %s," 1327 " desc->src1_addr %llx, desc->src1_size %d," 1328 " desc->dst_addr %llx, desc->max_dst_size %d," 1329 " desc->src2_addr %llx, desc->src2_size %d\n", 1330 active_compression_mode->name, 1331 desc->src1_addr, desc->src1_size, desc->dst_addr, 1332 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1333 1334 ret = idxd_submit_desc(wq, idxd_desc); 1335 if (ret) { 1336 dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret); 1337 goto err; 1338 } 1339 1340 ret = check_completion(dev, idxd_desc->iax_completion, false, false); 1341 if (ret) { 1342 dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret); 1343 goto err; 1344 } 1345 1346 if (*compression_crc != idxd_desc->iax_completion->crc) { 1347 ret = -EINVAL; 1348 dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:" 1349 " comp=0x%x, decomp=0x%x\n", *compression_crc, 1350 idxd_desc->iax_completion->crc); 1351 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 1352 8, 1, idxd_desc->iax_completion, 64, 0); 1353 goto err; 1354 } 1355 1356 idxd_free_desc(wq, idxd_desc); 1357 out: 1358 return ret; 1359 err: 1360 idxd_free_desc(wq, idxd_desc); 1361 dev_dbg(dev, "iaa compress failed: ret=%d\n", ret); 1362 1363 goto out; 1364 } 1365 1366 static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, 1367 struct idxd_wq *wq, 1368 dma_addr_t src_addr, unsigned int slen, 1369 dma_addr_t dst_addr, unsigned int *dlen, 1370 bool disable_async) 1371 { 1372 struct iaa_device_compression_mode *active_compression_mode; 1373 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1374 struct iaa_device *iaa_device; 1375 struct idxd_desc *idxd_desc; 1376 struct iax_hw_desc *desc; 1377 struct idxd_device *idxd; 1378 struct iaa_wq *iaa_wq; 1379 struct pci_dev *pdev; 1380 struct device *dev; 1381 int ret = 0; 1382 1383 iaa_wq = idxd_wq_get_private(wq); 1384 iaa_device = iaa_wq->iaa_device; 1385 idxd = iaa_device->idxd; 1386 pdev = idxd->pdev; 1387 dev = &pdev->dev; 1388 1389 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); 1390 1391 idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); 1392 if (IS_ERR(idxd_desc)) { 1393 dev_dbg(dev, "idxd descriptor allocation failed\n"); 1394 dev_dbg(dev, "iaa decompress failed: ret=%ld\n", 1395 PTR_ERR(idxd_desc)); 1396 return PTR_ERR(idxd_desc); 1397 } 1398 desc = idxd_desc->iax_hw; 1399 1400 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; 1401 desc->opcode = IAX_OPCODE_DECOMPRESS; 1402 desc->max_dst_size = PAGE_SIZE; 1403 desc->decompr_flags = IAA_DECOMP_FLAGS; 1404 desc->priv = 0; 1405 1406 desc->src1_addr = (u64)src_addr; 1407 desc->dst_addr = (u64)dst_addr; 1408 desc->max_dst_size = *dlen; 1409 desc->src1_size = slen; 1410 desc->completion_addr = idxd_desc->compl_dma; 1411 1412 if (ctx->use_irq && !disable_async) { 1413 desc->flags |= IDXD_OP_FLAG_RCI; 1414 1415 idxd_desc->crypto.req = req; 1416 idxd_desc->crypto.tfm = tfm; 1417 idxd_desc->crypto.src_addr = src_addr; 1418 idxd_desc->crypto.dst_addr = dst_addr; 1419 idxd_desc->crypto.compress = false; 1420 1421 dev_dbg(dev, "%s: use_async_irq compression mode %s," 1422 " src_addr %llx, dst_addr %llx\n", __func__, 1423 active_compression_mode->name, 1424 src_addr, dst_addr); 1425 } 1426 1427 dev_dbg(dev, "%s: decompression mode %s," 1428 " desc->src1_addr %llx, desc->src1_size %d," 1429 " desc->dst_addr %llx, desc->max_dst_size %d," 1430 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, 1431 active_compression_mode->name, 1432 desc->src1_addr, desc->src1_size, desc->dst_addr, 1433 desc->max_dst_size, desc->src2_addr, desc->src2_size); 1434 1435 ret = idxd_submit_desc(wq, idxd_desc); 1436 if (ret) { 1437 dev_dbg(dev, "submit_desc failed ret=%d\n", ret); 1438 goto err; 1439 } 1440 1441 /* Update stats */ 1442 update_total_decomp_calls(); 1443 update_wq_decomp_calls(wq); 1444 1445 if (ctx->async_mode && !disable_async) { 1446 ret = -EINPROGRESS; 1447 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); 1448 goto out; 1449 } 1450 1451 ret = check_completion(dev, idxd_desc->iax_completion, false, false); 1452 if (ret) { 1453 dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret); 1454 if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { 1455 pr_warn("%s: falling back to deflate-generic decompress, " 1456 "analytics error code %x\n", __func__, 1457 idxd_desc->iax_completion->error_code); 1458 ret = deflate_generic_decompress(req); 1459 if (ret) { 1460 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", 1461 __func__, ret); 1462 goto err; 1463 } 1464 } else { 1465 goto err; 1466 } 1467 } else { 1468 req->dlen = idxd_desc->iax_completion->output_size; 1469 } 1470 1471 *dlen = req->dlen; 1472 1473 if (!ctx->async_mode || disable_async) 1474 idxd_free_desc(wq, idxd_desc); 1475 1476 /* Update stats */ 1477 update_total_decomp_bytes_in(slen); 1478 update_wq_decomp_bytes(wq, slen); 1479 out: 1480 return ret; 1481 err: 1482 idxd_free_desc(wq, idxd_desc); 1483 dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret); 1484 1485 goto out; 1486 } 1487 1488 static int iaa_comp_acompress(struct acomp_req *req) 1489 { 1490 struct iaa_compression_ctx *compression_ctx; 1491 struct crypto_tfm *tfm = req->base.tfm; 1492 dma_addr_t src_addr, dst_addr; 1493 int nr_sgs, cpu, ret = 0; 1494 struct iaa_wq *iaa_wq; 1495 struct idxd_wq *wq; 1496 struct device *dev; 1497 1498 compression_ctx = crypto_tfm_ctx(tfm); 1499 1500 if (!iaa_crypto_enabled) { 1501 pr_debug("iaa_crypto disabled, not compressing\n"); 1502 return -ENODEV; 1503 } 1504 1505 if (!req->src || !req->slen) { 1506 pr_debug("invalid src, not compressing\n"); 1507 return -EINVAL; 1508 } 1509 1510 cpu = get_cpu(); 1511 wq = wq_table_next_wq(cpu); 1512 put_cpu(); 1513 if (!wq) { 1514 pr_debug("no wq configured for cpu=%d\n", cpu); 1515 return -ENODEV; 1516 } 1517 1518 ret = iaa_wq_get(wq); 1519 if (ret) { 1520 pr_debug("no wq available for cpu=%d\n", cpu); 1521 return -ENODEV; 1522 } 1523 1524 iaa_wq = idxd_wq_get_private(wq); 1525 1526 dev = &wq->idxd->pdev->dev; 1527 1528 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1529 if (nr_sgs <= 0 || nr_sgs > 1) { 1530 dev_dbg(dev, "couldn't map src sg for iaa device %d," 1531 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1532 iaa_wq->wq->id, ret); 1533 ret = -EIO; 1534 goto out; 1535 } 1536 src_addr = sg_dma_address(req->src); 1537 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1538 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, 1539 req->src, req->slen, sg_dma_len(req->src)); 1540 1541 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1542 if (nr_sgs <= 0 || nr_sgs > 1) { 1543 dev_dbg(dev, "couldn't map dst sg for iaa device %d," 1544 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1545 iaa_wq->wq->id, ret); 1546 ret = -EIO; 1547 goto err_map_dst; 1548 } 1549 dst_addr = sg_dma_address(req->dst); 1550 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1551 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, 1552 req->dst, req->dlen, sg_dma_len(req->dst)); 1553 1554 ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, 1555 &req->dlen); 1556 if (ret == -EINPROGRESS) 1557 return ret; 1558 1559 if (!ret && compression_ctx->verify_compress) { 1560 ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr); 1561 if (ret) { 1562 dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret); 1563 goto out; 1564 } 1565 1566 ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, 1567 dst_addr, &req->dlen); 1568 if (ret) 1569 dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret); 1570 1571 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); 1572 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); 1573 1574 goto out; 1575 } 1576 1577 if (ret) 1578 dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret); 1579 1580 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1581 err_map_dst: 1582 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1583 out: 1584 iaa_wq_put(wq); 1585 1586 return ret; 1587 } 1588 1589 static int iaa_comp_adecompress(struct acomp_req *req) 1590 { 1591 struct crypto_tfm *tfm = req->base.tfm; 1592 dma_addr_t src_addr, dst_addr; 1593 int nr_sgs, cpu, ret = 0; 1594 struct iaa_wq *iaa_wq; 1595 struct device *dev; 1596 struct idxd_wq *wq; 1597 1598 if (!iaa_crypto_enabled) { 1599 pr_debug("iaa_crypto disabled, not decompressing\n"); 1600 return -ENODEV; 1601 } 1602 1603 if (!req->src || !req->slen) { 1604 pr_debug("invalid src, not decompressing\n"); 1605 return -EINVAL; 1606 } 1607 1608 cpu = get_cpu(); 1609 wq = wq_table_next_wq(cpu); 1610 put_cpu(); 1611 if (!wq) { 1612 pr_debug("no wq configured for cpu=%d\n", cpu); 1613 return -ENODEV; 1614 } 1615 1616 ret = iaa_wq_get(wq); 1617 if (ret) { 1618 pr_debug("no wq available for cpu=%d\n", cpu); 1619 return -ENODEV; 1620 } 1621 1622 iaa_wq = idxd_wq_get_private(wq); 1623 1624 dev = &wq->idxd->pdev->dev; 1625 1626 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1627 if (nr_sgs <= 0 || nr_sgs > 1) { 1628 dev_dbg(dev, "couldn't map src sg for iaa device %d," 1629 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1630 iaa_wq->wq->id, ret); 1631 ret = -EIO; 1632 goto out; 1633 } 1634 src_addr = sg_dma_address(req->src); 1635 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," 1636 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, 1637 req->src, req->slen, sg_dma_len(req->src)); 1638 1639 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1640 if (nr_sgs <= 0 || nr_sgs > 1) { 1641 dev_dbg(dev, "couldn't map dst sg for iaa device %d," 1642 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, 1643 iaa_wq->wq->id, ret); 1644 ret = -EIO; 1645 goto err_map_dst; 1646 } 1647 dst_addr = sg_dma_address(req->dst); 1648 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," 1649 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, 1650 req->dst, req->dlen, sg_dma_len(req->dst)); 1651 1652 ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, 1653 dst_addr, &req->dlen, false); 1654 if (ret == -EINPROGRESS) 1655 return ret; 1656 1657 if (ret != 0) 1658 dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); 1659 1660 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); 1661 err_map_dst: 1662 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); 1663 out: 1664 iaa_wq_put(wq); 1665 1666 return ret; 1667 } 1668 1669 static void compression_ctx_init(struct iaa_compression_ctx *ctx) 1670 { 1671 ctx->verify_compress = iaa_verify_compress; 1672 ctx->async_mode = async_mode; 1673 ctx->use_irq = use_irq; 1674 } 1675 1676 static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) 1677 { 1678 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); 1679 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); 1680 1681 compression_ctx_init(ctx); 1682 1683 ctx->mode = IAA_MODE_FIXED; 1684 1685 return 0; 1686 } 1687 1688 static struct acomp_alg iaa_acomp_fixed_deflate = { 1689 .init = iaa_comp_init_fixed, 1690 .compress = iaa_comp_acompress, 1691 .decompress = iaa_comp_adecompress, 1692 .reqsize = sizeof(u32), 1693 .base = { 1694 .cra_name = "deflate", 1695 .cra_driver_name = "deflate-iaa", 1696 .cra_flags = CRYPTO_ALG_ASYNC, 1697 .cra_ctxsize = sizeof(struct iaa_compression_ctx), 1698 .cra_module = THIS_MODULE, 1699 .cra_priority = IAA_ALG_PRIORITY, 1700 } 1701 }; 1702 1703 static int iaa_register_compression_device(void) 1704 { 1705 int ret; 1706 1707 ret = crypto_register_acomp(&iaa_acomp_fixed_deflate); 1708 if (ret) { 1709 pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret); 1710 goto out; 1711 } 1712 1713 iaa_crypto_registered = true; 1714 out: 1715 return ret; 1716 } 1717 1718 static int iaa_unregister_compression_device(void) 1719 { 1720 if (iaa_crypto_registered) 1721 crypto_unregister_acomp(&iaa_acomp_fixed_deflate); 1722 1723 return 0; 1724 } 1725 1726 static int iaa_crypto_probe(struct idxd_dev *idxd_dev) 1727 { 1728 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 1729 struct idxd_device *idxd = wq->idxd; 1730 struct idxd_driver_data *data = idxd->data; 1731 struct device *dev = &idxd_dev->conf_dev; 1732 bool first_wq = false; 1733 int ret = 0; 1734 1735 if (idxd->state != IDXD_DEV_ENABLED) 1736 return -ENXIO; 1737 1738 if (data->type != IDXD_TYPE_IAX) 1739 return -ENODEV; 1740 1741 mutex_lock(&wq->wq_lock); 1742 1743 if (idxd_wq_get_private(wq)) { 1744 mutex_unlock(&wq->wq_lock); 1745 return -EBUSY; 1746 } 1747 1748 if (!idxd_wq_driver_name_match(wq, dev)) { 1749 dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n", 1750 idxd->id, wq->id, wq->driver_name, dev->driver->name); 1751 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 1752 ret = -ENODEV; 1753 goto err; 1754 } 1755 1756 wq->type = IDXD_WQT_KERNEL; 1757 1758 ret = idxd_drv_enable_wq(wq); 1759 if (ret < 0) { 1760 dev_dbg(dev, "enable wq %d.%d failed: %d\n", 1761 idxd->id, wq->id, ret); 1762 ret = -ENXIO; 1763 goto err; 1764 } 1765 1766 mutex_lock(&iaa_devices_lock); 1767 1768 if (list_empty(&iaa_devices)) { 1769 ret = alloc_wq_table(wq->idxd->max_wqs); 1770 if (ret) 1771 goto err_alloc; 1772 first_wq = true; 1773 } 1774 1775 ret = save_iaa_wq(wq); 1776 if (ret) 1777 goto err_save; 1778 1779 rebalance_wq_table(); 1780 1781 if (first_wq) { 1782 iaa_crypto_enabled = true; 1783 ret = iaa_register_compression_device(); 1784 if (ret != 0) { 1785 iaa_crypto_enabled = false; 1786 dev_dbg(dev, "IAA compression device registration failed\n"); 1787 goto err_register; 1788 } 1789 try_module_get(THIS_MODULE); 1790 1791 pr_info("iaa_crypto now ENABLED\n"); 1792 } 1793 1794 mutex_unlock(&iaa_devices_lock); 1795 out: 1796 mutex_unlock(&wq->wq_lock); 1797 1798 return ret; 1799 1800 err_register: 1801 remove_iaa_wq(wq); 1802 free_iaa_wq(idxd_wq_get_private(wq)); 1803 err_save: 1804 if (first_wq) 1805 free_wq_table(); 1806 err_alloc: 1807 mutex_unlock(&iaa_devices_lock); 1808 idxd_drv_disable_wq(wq); 1809 err: 1810 wq->type = IDXD_WQT_NONE; 1811 1812 goto out; 1813 } 1814 1815 static void iaa_crypto_remove(struct idxd_dev *idxd_dev) 1816 { 1817 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 1818 struct idxd_device *idxd = wq->idxd; 1819 struct iaa_wq *iaa_wq; 1820 bool free = false; 1821 1822 idxd_wq_quiesce(wq); 1823 1824 mutex_lock(&wq->wq_lock); 1825 mutex_lock(&iaa_devices_lock); 1826 1827 remove_iaa_wq(wq); 1828 1829 spin_lock(&idxd->dev_lock); 1830 iaa_wq = idxd_wq_get_private(wq); 1831 if (!iaa_wq) { 1832 spin_unlock(&idxd->dev_lock); 1833 pr_err("%s: no iaa_wq available to remove\n", __func__); 1834 goto out; 1835 } 1836 1837 if (iaa_wq->ref) { 1838 iaa_wq->remove = true; 1839 } else { 1840 wq = iaa_wq->wq; 1841 idxd_wq_set_private(wq, NULL); 1842 free = true; 1843 } 1844 spin_unlock(&idxd->dev_lock); 1845 if (free) { 1846 __free_iaa_wq(iaa_wq); 1847 kfree(iaa_wq); 1848 } 1849 1850 idxd_drv_disable_wq(wq); 1851 rebalance_wq_table(); 1852 1853 if (nr_iaa == 0) { 1854 iaa_crypto_enabled = false; 1855 free_wq_table(); 1856 module_put(THIS_MODULE); 1857 1858 pr_info("iaa_crypto now DISABLED\n"); 1859 } 1860 out: 1861 mutex_unlock(&iaa_devices_lock); 1862 mutex_unlock(&wq->wq_lock); 1863 } 1864 1865 static enum idxd_dev_type dev_types[] = { 1866 IDXD_DEV_WQ, 1867 IDXD_DEV_NONE, 1868 }; 1869 1870 static struct idxd_device_driver iaa_crypto_driver = { 1871 .probe = iaa_crypto_probe, 1872 .remove = iaa_crypto_remove, 1873 .name = IDXD_SUBDRIVER_NAME, 1874 .type = dev_types, 1875 .desc_complete = iaa_desc_complete, 1876 }; 1877 1878 static int __init iaa_crypto_init_module(void) 1879 { 1880 int ret = 0; 1881 int node; 1882 1883 nr_cpus = num_possible_cpus(); 1884 for_each_node_with_cpus(node) 1885 nr_nodes++; 1886 if (!nr_nodes) { 1887 pr_err("IAA couldn't find any nodes with cpus\n"); 1888 return -ENODEV; 1889 } 1890 nr_cpus_per_node = nr_cpus / nr_nodes; 1891 1892 ret = iaa_aecs_init_fixed(); 1893 if (ret < 0) { 1894 pr_debug("IAA fixed compression mode init failed\n"); 1895 goto err_aecs_init; 1896 } 1897 1898 ret = idxd_driver_register(&iaa_crypto_driver); 1899 if (ret) { 1900 pr_debug("IAA wq sub-driver registration failed\n"); 1901 goto err_driver_reg; 1902 } 1903 1904 ret = driver_create_file(&iaa_crypto_driver.drv, 1905 &driver_attr_verify_compress); 1906 if (ret) { 1907 pr_debug("IAA verify_compress attr creation failed\n"); 1908 goto err_verify_attr_create; 1909 } 1910 1911 ret = driver_create_file(&iaa_crypto_driver.drv, 1912 &driver_attr_sync_mode); 1913 if (ret) { 1914 pr_debug("IAA sync mode attr creation failed\n"); 1915 goto err_sync_attr_create; 1916 } 1917 1918 if (iaa_crypto_debugfs_init()) 1919 pr_warn("debugfs init failed, stats not available\n"); 1920 1921 pr_debug("initialized\n"); 1922 out: 1923 return ret; 1924 1925 err_sync_attr_create: 1926 driver_remove_file(&iaa_crypto_driver.drv, 1927 &driver_attr_verify_compress); 1928 err_verify_attr_create: 1929 idxd_driver_unregister(&iaa_crypto_driver); 1930 err_driver_reg: 1931 iaa_aecs_cleanup_fixed(); 1932 err_aecs_init: 1933 1934 goto out; 1935 } 1936 1937 static void __exit iaa_crypto_cleanup_module(void) 1938 { 1939 if (iaa_unregister_compression_device()) 1940 pr_debug("IAA compression device unregister failed\n"); 1941 1942 iaa_crypto_debugfs_cleanup(); 1943 driver_remove_file(&iaa_crypto_driver.drv, 1944 &driver_attr_sync_mode); 1945 driver_remove_file(&iaa_crypto_driver.drv, 1946 &driver_attr_verify_compress); 1947 idxd_driver_unregister(&iaa_crypto_driver); 1948 iaa_aecs_cleanup_fixed(); 1949 1950 pr_debug("cleaned up\n"); 1951 } 1952 1953 MODULE_IMPORT_NS("IDXD"); 1954 MODULE_LICENSE("GPL"); 1955 MODULE_ALIAS_IDXD_DEVICE(0); 1956 MODULE_AUTHOR("Intel Corporation"); 1957 MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver"); 1958 1959 module_init(iaa_crypto_init_module); 1960 module_exit(iaa_crypto_cleanup_module); 1961