Lines Matching +full:iommu +full:- +full:ctx

1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/iommu.h>
36 /* Per-cpu lookup table for balanced wqs */
43 if (++entry->cur_wq >= entry->n_wqs) in wq_table_next_wq()
44 entry->cur_wq = 0; in wq_table_next_wq()
46 if (!entry->wqs[entry->cur_wq]) in wq_table_next_wq()
50 entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id, in wq_table_next_wq()
51 entry->wqs[entry->cur_wq]->id, cpu); in wq_table_next_wq()
53 return entry->wqs[entry->cur_wq]; in wq_table_next_wq()
60 if (WARN_ON(entry->n_wqs == entry->max_wqs)) in wq_table_add()
63 entry->wqs[entry->n_wqs++] = wq; in wq_table_add()
66 entry->wqs[entry->n_wqs - 1]->idxd->id, in wq_table_add()
67 entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu); in wq_table_add()
74 kfree(entry->wqs); in wq_table_free_entry()
82 entry->n_wqs = 0; in wq_table_clear_entry()
83 entry->cur_wq = 0; in wq_table_clear_entry()
84 memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *)); in wq_table_clear_entry()
105 int ret = -EBUSY; in verify_compress_store()
128 * - sync: the compression or decompression completes before
133 * - async: the compression or decompression is submitted and returns
140 * - async_irq: the compression or decompression is submitted and
159 * set_iaa_sync_mode - Set IAA sync mode
180 ret = -EINVAL; in set_iaa_sync_mode()
203 int ret = -EBUSY; in sync_mode_store()
224 int i = -EINVAL; in find_empty_iaa_compression_mode()
245 if (!strcmp(mode->name, name)) { in find_iaa_compression_mode()
256 kfree(mode->name); in free_iaa_compression_mode()
257 kfree(mode->ll_table); in free_iaa_compression_mode()
258 kfree(mode->d_table); in free_iaa_compression_mode()
274 * per-IAA device dma mapping is created for each IAA device, for each
282 * remove_iaa_compression_mode - Remove an IAA compression mode
308 * add_iaa_compression_mode - Add an IAA compression mode
330 int idx, ret = -ENOMEM; in add_iaa_compression_mode()
335 ret = -EBUSY; in add_iaa_compression_mode()
343 mode->name = kstrdup(name, GFP_KERNEL); in add_iaa_compression_mode()
344 if (!mode->name) in add_iaa_compression_mode()
348 mode->ll_table = kmemdup(ll_table, ll_table_size, GFP_KERNEL); in add_iaa_compression_mode()
349 if (!mode->ll_table) in add_iaa_compression_mode()
351 mode->ll_table_size = ll_table_size; in add_iaa_compression_mode()
355 mode->d_table = kmemdup(d_table, d_table_size, GFP_KERNEL); in add_iaa_compression_mode()
356 if (!mode->d_table) in add_iaa_compression_mode()
358 mode->d_table_size = d_table_size; in add_iaa_compression_mode()
361 mode->init = init; in add_iaa_compression_mode()
362 mode->free = free; in add_iaa_compression_mode()
369 mode->name, idx); in add_iaa_compression_mode()
387 return iaa_device->compression_modes[idx]; in get_iaa_device_compression_mode()
394 struct device *dev = &iaa_device->idxd->pdev->dev; in free_device_compression_mode()
396 kfree(device_mode->name); in free_device_compression_mode()
398 if (device_mode->aecs_comp_table) in free_device_compression_mode()
399 dma_free_coherent(dev, size, device_mode->aecs_comp_table, in free_device_compression_mode()
400 device_mode->aecs_comp_table_dma_addr); in free_device_compression_mode()
422 struct device *dev = &iaa_device->idxd->pdev->dev; in init_device_compression_mode()
424 int ret = -ENOMEM; in init_device_compression_mode()
428 return -ENOMEM; in init_device_compression_mode()
430 device_mode->name = kstrdup(mode->name, GFP_KERNEL); in init_device_compression_mode()
431 if (!device_mode->name) in init_device_compression_mode()
434 device_mode->aecs_comp_table = dma_alloc_coherent(dev, size, in init_device_compression_mode()
435 &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL); in init_device_compression_mode()
436 if (!device_mode->aecs_comp_table) in init_device_compression_mode()
440 memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table)); in init_device_compression_mode()
441 memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size); in init_device_compression_mode()
442 memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size); in init_device_compression_mode()
444 if (mode->init) { in init_device_compression_mode()
445 ret = mode->init(device_mode); in init_device_compression_mode()
451 iaa_device->compression_modes[idx] = device_mode; in init_device_compression_mode()
454 mode->name, iaa_device->idxd->id); in init_device_compression_mode()
461 mode->name, iaa_device->idxd->id); in init_device_compression_mode()
492 device_mode = iaa_device->compression_modes[i]; in remove_device_compression_modes()
496 if (iaa_compression_modes[i]->free) in remove_device_compression_modes()
497 iaa_compression_modes[i]->free(device_mode); in remove_device_compression_modes()
499 iaa_device->compression_modes[i] = NULL; in remove_device_compression_modes()
511 INIT_LIST_HEAD(&iaa_device->wqs); in iaa_device_alloc()
520 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { in iaa_has_wq()
521 if (iaa_wq->wq == wq) in iaa_has_wq()
536 iaa_device->idxd = idxd; in add_iaa_device()
538 list_add_tail(&iaa_device->list, &iaa_devices); in add_iaa_device()
549 ret = init_device_compression_modes(iaa_device, iaa_wq->wq); in init_iaa_device()
558 list_del(&iaa_device->list); in del_iaa_device()
560 nr_iaa--; in del_iaa_device()
566 struct idxd_device *idxd = iaa_device->idxd; in add_iaa_wq()
567 struct pci_dev *pdev = idxd->pdev; in add_iaa_wq()
568 struct device *dev = &pdev->dev; in add_iaa_wq()
573 return -ENOMEM; in add_iaa_wq()
575 iaa_wq->wq = wq; in add_iaa_wq()
576 iaa_wq->iaa_device = iaa_device; in add_iaa_wq()
579 list_add_tail(&iaa_wq->list, &iaa_device->wqs); in add_iaa_wq()
581 iaa_device->n_wq++; in add_iaa_wq()
587 wq->id, iaa_device->idxd->id, iaa_device->n_wq); in add_iaa_wq()
594 struct idxd_device *idxd = iaa_device->idxd; in del_iaa_wq()
595 struct pci_dev *pdev = idxd->pdev; in del_iaa_wq()
596 struct device *dev = &pdev->dev; in del_iaa_wq()
599 list_for_each_entry(iaa_wq, &iaa_device->wqs, list) { in del_iaa_wq()
600 if (iaa_wq->wq == wq) { in del_iaa_wq()
601 list_del(&iaa_wq->list); in del_iaa_wq()
602 iaa_device->n_wq--; in del_iaa_wq()
605 wq->id, iaa_device->idxd->id, in del_iaa_wq()
606 iaa_device->n_wq, nr_iaa); in del_iaa_wq()
608 if (iaa_device->n_wq == 0) in del_iaa_wq()
641 iaa_device = iaa_wq->iaa_device; in __free_iaa_wq()
642 if (iaa_device->n_wq == 0) in __free_iaa_wq()
643 free_iaa_device(iaa_wq->iaa_device); in __free_iaa_wq()
652 wq = iaa_wq->wq; in free_iaa_wq()
660 struct idxd_device *idxd = wq->idxd; in iaa_wq_get()
664 spin_lock(&idxd->dev_lock); in iaa_wq_get()
666 if (iaa_wq && !iaa_wq->remove) { in iaa_wq_get()
667 iaa_wq->ref++; in iaa_wq_get()
670 ret = -ENODEV; in iaa_wq_get()
672 spin_unlock(&idxd->dev_lock); in iaa_wq_get()
679 struct idxd_device *idxd = wq->idxd; in iaa_wq_put()
684 spin_lock(&idxd->dev_lock); in iaa_wq_put()
687 iaa_wq->ref--; in iaa_wq_put()
688 if (iaa_wq->ref == 0 && iaa_wq->remove) { in iaa_wq_put()
694 ret = -ENODEV; in iaa_wq_put()
696 spin_unlock(&idxd->dev_lock); in iaa_wq_put()
724 return -ENOMEM; in alloc_wq_table()
728 entry->wqs = kcalloc(max_wqs, sizeof(*entry->wqs), GFP_KERNEL); in alloc_wq_table()
729 if (!entry->wqs) { in alloc_wq_table()
731 return -ENOMEM; in alloc_wq_table()
734 entry->max_wqs = max_wqs; in alloc_wq_table()
751 if (iaa_device->idxd == wq->idxd) { in save_iaa_wq()
752 idxd = iaa_device->idxd; in save_iaa_wq()
753 pdev = idxd->pdev; in save_iaa_wq()
754 dev = &pdev->dev; in save_iaa_wq()
779 new_device = add_iaa_device(wq->idxd); in save_iaa_wq()
781 ret = -ENOMEM; in save_iaa_wq()
794 del_iaa_wq(new_device, new_wq->wq); in save_iaa_wq()
802 return -EINVAL; in save_iaa_wq()
840 idxd = iaa_device->idxd; in wq_table_add_wqs()
841 pdev = idxd->pdev; in wq_table_add_wqs()
842 dev = &pdev->dev; in wq_table_add_wqs()
851 found_device->idxd->id, cur_iaa); in wq_table_add_wqs()
860 ret = -EINVAL; in wq_table_add_wqs()
865 idxd = found_device->idxd; in wq_table_add_wqs()
866 pdev = idxd->pdev; in wq_table_add_wqs()
867 dev = &pdev->dev; in wq_table_add_wqs()
869 found_device->idxd->id, cur_iaa); in wq_table_add_wqs()
872 list_for_each_entry(iaa_wq, &found_device->wqs, list) { in wq_table_add_wqs()
873 wq_table_add(cpu, iaa_wq->wq); in wq_table_add_wqs()
875 cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id); in wq_table_add_wqs()
881 ret = -EINVAL; in wq_table_add_wqs()
942 while (!comp->status) { in check_completion()
944 return -EAGAIN; in check_completion()
948 dev_err(dev, "%s completion timed out - " in check_completion()
952 ret = -ETIMEDOUT; in check_completion()
957 if (comp->status != IAX_COMP_SUCCESS) { in check_completion()
958 if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) { in check_completion()
959 ret = -ETIMEDOUT; in check_completion()
961 op_str, comp->output_size); in check_completion()
966 if (comp->status == IAA_ANALYTICS_ERROR && in check_completion()
967 comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) { in check_completion()
968 ret = -E2BIG; in check_completion()
971 comp->output_size); in check_completion()
976 if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) { in check_completion()
977 ret = -EOVERFLOW; in check_completion()
981 ret = -EINVAL; in check_completion()
983 op_str, comp->status, comp->error_code, comp->output_size); in check_completion()
984 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0); in check_completion()
999 req->dlen = fbreq->dlen; in deflate_generic_decompress()
1022 struct crypto_ctx *ctx = __ctx; in iaa_desc_complete() local
1030 compression_ctx = crypto_tfm_ctx(ctx->tfm); in iaa_desc_complete()
1032 iaa_wq = idxd_wq_get_private(idxd_desc->wq); in iaa_desc_complete()
1033 iaa_device = iaa_wq->iaa_device; in iaa_desc_complete()
1034 idxd = iaa_device->idxd; in iaa_desc_complete()
1035 pdev = idxd->pdev; in iaa_desc_complete()
1036 dev = &pdev->dev; in iaa_desc_complete()
1039 compression_ctx->mode); in iaa_desc_complete()
1041 " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__, in iaa_desc_complete()
1042 active_compression_mode->name, in iaa_desc_complete()
1043 ctx->src_addr, ctx->dst_addr); in iaa_desc_complete()
1045 ret = check_completion(dev, idxd_desc->iax_completion, in iaa_desc_complete()
1046 ctx->compress, false); in iaa_desc_complete()
1049 if (!ctx->compress && in iaa_desc_complete()
1050 idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { in iaa_desc_complete()
1051 pr_warn("%s: falling back to deflate-generic decompress, " in iaa_desc_complete()
1053 idxd_desc->iax_completion->error_code); in iaa_desc_complete()
1054 ret = deflate_generic_decompress(ctx->req); in iaa_desc_complete()
1056 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", in iaa_desc_complete()
1058 err = -EIO; in iaa_desc_complete()
1062 err = -EIO; in iaa_desc_complete()
1066 ctx->req->dlen = idxd_desc->iax_completion->output_size; in iaa_desc_complete()
1070 if (ctx->compress) { in iaa_desc_complete()
1071 update_total_comp_bytes_out(ctx->req->dlen); in iaa_desc_complete()
1072 update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen); in iaa_desc_complete()
1074 update_total_decomp_bytes_in(ctx->req->slen); in iaa_desc_complete()
1075 update_wq_decomp_bytes(iaa_wq->wq, ctx->req->slen); in iaa_desc_complete()
1078 if (ctx->compress && compression_ctx->verify_compress) { in iaa_desc_complete()
1079 u32 *compression_crc = acomp_request_ctx(ctx->req); in iaa_desc_complete()
1082 *compression_crc = idxd_desc->iax_completion->crc; in iaa_desc_complete()
1084 ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr); in iaa_desc_complete()
1087 err = -EIO; in iaa_desc_complete()
1091 ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr, in iaa_desc_complete()
1092 ctx->req->slen, dst_addr, &ctx->req->dlen); in iaa_desc_complete()
1095 err = -EIO; in iaa_desc_complete()
1098 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE); in iaa_desc_complete()
1099 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE); in iaa_desc_complete()
1104 dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE); in iaa_desc_complete()
1105 dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE); in iaa_desc_complete()
1110 if (ctx->req->base.complete) in iaa_desc_complete()
1111 acomp_request_complete(ctx->req, err); in iaa_desc_complete()
1114 idxd_free_desc(idxd_desc->wq, idxd_desc); in iaa_desc_complete()
1115 iaa_wq_put(idxd_desc->wq); in iaa_desc_complete()
1124 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); in iaa_compress() local
1136 iaa_device = iaa_wq->iaa_device; in iaa_compress()
1137 idxd = iaa_device->idxd; in iaa_compress()
1138 pdev = idxd->pdev; in iaa_compress()
1139 dev = &pdev->dev; in iaa_compress()
1141 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); in iaa_compress()
1149 desc = idxd_desc->iax_hw; in iaa_compress()
1151 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | in iaa_compress()
1153 desc->opcode = IAX_OPCODE_COMPRESS; in iaa_compress()
1154 desc->compr_flags = IAA_COMP_FLAGS; in iaa_compress()
1155 desc->priv = 0; in iaa_compress()
1157 desc->src1_addr = (u64)src_addr; in iaa_compress()
1158 desc->src1_size = slen; in iaa_compress()
1159 desc->dst_addr = (u64)dst_addr; in iaa_compress()
1160 desc->max_dst_size = *dlen; in iaa_compress()
1161 desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr; in iaa_compress()
1162 desc->src2_size = sizeof(struct aecs_comp_table_record); in iaa_compress()
1163 desc->completion_addr = idxd_desc->compl_dma; in iaa_compress()
1165 if (ctx->use_irq) { in iaa_compress()
1166 desc->flags |= IDXD_OP_FLAG_RCI; in iaa_compress()
1168 idxd_desc->crypto.req = req; in iaa_compress()
1169 idxd_desc->crypto.tfm = tfm; in iaa_compress()
1170 idxd_desc->crypto.src_addr = src_addr; in iaa_compress()
1171 idxd_desc->crypto.dst_addr = dst_addr; in iaa_compress()
1172 idxd_desc->crypto.compress = true; in iaa_compress()
1176 active_compression_mode->name, in iaa_compress()
1181 " desc->src1_addr %llx, desc->src1_size %d," in iaa_compress()
1182 " desc->dst_addr %llx, desc->max_dst_size %d," in iaa_compress()
1183 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, in iaa_compress()
1184 active_compression_mode->name, in iaa_compress()
1185 desc->src1_addr, desc->src1_size, desc->dst_addr, in iaa_compress()
1186 desc->max_dst_size, desc->src2_addr, desc->src2_size); in iaa_compress()
1198 if (ctx->async_mode) { in iaa_compress()
1199 ret = -EINPROGRESS; in iaa_compress()
1200 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); in iaa_compress()
1204 ret = check_completion(dev, idxd_desc->iax_completion, true, false); in iaa_compress()
1210 *dlen = idxd_desc->iax_completion->output_size; in iaa_compress()
1216 *compression_crc = idxd_desc->iax_completion->crc; in iaa_compress()
1218 if (!ctx->async_mode) in iaa_compress()
1236 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); in iaa_remap_for_verify()
1237 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); in iaa_remap_for_verify()
1239 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); in iaa_remap_for_verify()
1242 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_remap_for_verify()
1243 iaa_wq->wq->id, ret); in iaa_remap_for_verify()
1244 ret = -EIO; in iaa_remap_for_verify()
1247 *src_addr = sg_dma_address(req->src); in iaa_remap_for_verify()
1248 dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," in iaa_remap_for_verify()
1249 " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs, in iaa_remap_for_verify()
1250 req->src, req->slen, sg_dma_len(req->src)); in iaa_remap_for_verify()
1252 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); in iaa_remap_for_verify()
1255 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_remap_for_verify()
1256 iaa_wq->wq->id, ret); in iaa_remap_for_verify()
1257 ret = -EIO; in iaa_remap_for_verify()
1258 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); in iaa_remap_for_verify()
1261 *dst_addr = sg_dma_address(req->dst); in iaa_remap_for_verify()
1262 dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," in iaa_remap_for_verify()
1263 " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs, in iaa_remap_for_verify()
1264 req->dst, req->dlen, sg_dma_len(req->dst)); in iaa_remap_for_verify()
1275 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); in iaa_compress_verify() local
1287 iaa_device = iaa_wq->iaa_device; in iaa_compress_verify()
1288 idxd = iaa_device->idxd; in iaa_compress_verify()
1289 pdev = idxd->pdev; in iaa_compress_verify()
1290 dev = &pdev->dev; in iaa_compress_verify()
1292 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); in iaa_compress_verify()
1301 desc = idxd_desc->iax_hw; in iaa_compress_verify()
1303 /* Verify (optional) - decompress and check crc, suppress dest write */ in iaa_compress_verify()
1305 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; in iaa_compress_verify()
1306 desc->opcode = IAX_OPCODE_DECOMPRESS; in iaa_compress_verify()
1307 desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT; in iaa_compress_verify()
1308 desc->priv = 0; in iaa_compress_verify()
1310 desc->src1_addr = (u64)dst_addr; in iaa_compress_verify()
1311 desc->src1_size = *dlen; in iaa_compress_verify()
1312 desc->dst_addr = (u64)src_addr; in iaa_compress_verify()
1313 desc->max_dst_size = slen; in iaa_compress_verify()
1314 desc->completion_addr = idxd_desc->compl_dma; in iaa_compress_verify()
1317 " desc->src1_addr %llx, desc->src1_size %d," in iaa_compress_verify()
1318 " desc->dst_addr %llx, desc->max_dst_size %d," in iaa_compress_verify()
1319 " desc->src2_addr %llx, desc->src2_size %d\n", in iaa_compress_verify()
1320 active_compression_mode->name, in iaa_compress_verify()
1321 desc->src1_addr, desc->src1_size, desc->dst_addr, in iaa_compress_verify()
1322 desc->max_dst_size, desc->src2_addr, desc->src2_size); in iaa_compress_verify()
1330 ret = check_completion(dev, idxd_desc->iax_completion, false, false); in iaa_compress_verify()
1336 if (*compression_crc != idxd_desc->iax_completion->crc) { in iaa_compress_verify()
1337 ret = -EINVAL; in iaa_compress_verify()
1340 idxd_desc->iax_completion->crc); in iaa_compress_verify()
1341 print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, in iaa_compress_verify()
1342 8, 1, idxd_desc->iax_completion, 64, 0); in iaa_compress_verify()
1362 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); in iaa_decompress() local
1373 iaa_device = iaa_wq->iaa_device; in iaa_decompress()
1374 idxd = iaa_device->idxd; in iaa_decompress()
1375 pdev = idxd->pdev; in iaa_decompress()
1376 dev = &pdev->dev; in iaa_decompress()
1378 active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode); in iaa_decompress()
1387 desc = idxd_desc->iax_hw; in iaa_decompress()
1389 desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC; in iaa_decompress()
1390 desc->opcode = IAX_OPCODE_DECOMPRESS; in iaa_decompress()
1391 desc->max_dst_size = PAGE_SIZE; in iaa_decompress()
1392 desc->decompr_flags = IAA_DECOMP_FLAGS; in iaa_decompress()
1393 desc->priv = 0; in iaa_decompress()
1395 desc->src1_addr = (u64)src_addr; in iaa_decompress()
1396 desc->dst_addr = (u64)dst_addr; in iaa_decompress()
1397 desc->max_dst_size = *dlen; in iaa_decompress()
1398 desc->src1_size = slen; in iaa_decompress()
1399 desc->completion_addr = idxd_desc->compl_dma; in iaa_decompress()
1401 if (ctx->use_irq) { in iaa_decompress()
1402 desc->flags |= IDXD_OP_FLAG_RCI; in iaa_decompress()
1404 idxd_desc->crypto.req = req; in iaa_decompress()
1405 idxd_desc->crypto.tfm = tfm; in iaa_decompress()
1406 idxd_desc->crypto.src_addr = src_addr; in iaa_decompress()
1407 idxd_desc->crypto.dst_addr = dst_addr; in iaa_decompress()
1408 idxd_desc->crypto.compress = false; in iaa_decompress()
1412 active_compression_mode->name, in iaa_decompress()
1417 " desc->src1_addr %llx, desc->src1_size %d," in iaa_decompress()
1418 " desc->dst_addr %llx, desc->max_dst_size %d," in iaa_decompress()
1419 " desc->src2_addr %llx, desc->src2_size %d\n", __func__, in iaa_decompress()
1420 active_compression_mode->name, in iaa_decompress()
1421 desc->src1_addr, desc->src1_size, desc->dst_addr, in iaa_decompress()
1422 desc->max_dst_size, desc->src2_addr, desc->src2_size); in iaa_decompress()
1434 if (ctx->async_mode) { in iaa_decompress()
1435 ret = -EINPROGRESS; in iaa_decompress()
1436 dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); in iaa_decompress()
1440 ret = check_completion(dev, idxd_desc->iax_completion, false, false); in iaa_decompress()
1443 if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) { in iaa_decompress()
1444 pr_warn("%s: falling back to deflate-generic decompress, " in iaa_decompress()
1446 idxd_desc->iax_completion->error_code); in iaa_decompress()
1449 dev_dbg(dev, "%s: deflate-generic failed ret=%d\n", in iaa_decompress()
1457 req->dlen = idxd_desc->iax_completion->output_size; in iaa_decompress()
1460 *dlen = req->dlen; in iaa_decompress()
1462 if (!ctx->async_mode) in iaa_decompress()
1480 struct crypto_tfm *tfm = req->base.tfm; in iaa_comp_acompress()
1491 return -ENODEV; in iaa_comp_acompress()
1494 if (!req->src || !req->slen) { in iaa_comp_acompress()
1496 return -EINVAL; in iaa_comp_acompress()
1504 return -ENODEV; in iaa_comp_acompress()
1510 return -ENODEV; in iaa_comp_acompress()
1515 dev = &wq->idxd->pdev->dev; in iaa_comp_acompress()
1517 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); in iaa_comp_acompress()
1520 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_comp_acompress()
1521 iaa_wq->wq->id, ret); in iaa_comp_acompress()
1522 ret = -EIO; in iaa_comp_acompress()
1525 src_addr = sg_dma_address(req->src); in iaa_comp_acompress()
1526 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," in iaa_comp_acompress()
1527 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, in iaa_comp_acompress()
1528 req->src, req->slen, sg_dma_len(req->src)); in iaa_comp_acompress()
1530 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); in iaa_comp_acompress()
1533 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_comp_acompress()
1534 iaa_wq->wq->id, ret); in iaa_comp_acompress()
1535 ret = -EIO; in iaa_comp_acompress()
1538 dst_addr = sg_dma_address(req->dst); in iaa_comp_acompress()
1539 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," in iaa_comp_acompress()
1540 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, in iaa_comp_acompress()
1541 req->dst, req->dlen, sg_dma_len(req->dst)); in iaa_comp_acompress()
1543 ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, in iaa_comp_acompress()
1544 &req->dlen); in iaa_comp_acompress()
1545 if (ret == -EINPROGRESS) in iaa_comp_acompress()
1548 if (!ret && compression_ctx->verify_compress) { in iaa_comp_acompress()
1555 ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, in iaa_comp_acompress()
1556 dst_addr, &req->dlen); in iaa_comp_acompress()
1560 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE); in iaa_comp_acompress()
1561 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE); in iaa_comp_acompress()
1569 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); in iaa_comp_acompress()
1571 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); in iaa_comp_acompress()
1580 struct crypto_tfm *tfm = req->base.tfm; in iaa_comp_adecompress()
1589 return -ENODEV; in iaa_comp_adecompress()
1592 if (!req->src || !req->slen) { in iaa_comp_adecompress()
1594 return -EINVAL; in iaa_comp_adecompress()
1602 return -ENODEV; in iaa_comp_adecompress()
1608 return -ENODEV; in iaa_comp_adecompress()
1613 dev = &wq->idxd->pdev->dev; in iaa_comp_adecompress()
1615 nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); in iaa_comp_adecompress()
1618 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_comp_adecompress()
1619 iaa_wq->wq->id, ret); in iaa_comp_adecompress()
1620 ret = -EIO; in iaa_comp_adecompress()
1623 src_addr = sg_dma_address(req->src); in iaa_comp_adecompress()
1624 dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," in iaa_comp_adecompress()
1625 " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, in iaa_comp_adecompress()
1626 req->src, req->slen, sg_dma_len(req->src)); in iaa_comp_adecompress()
1628 nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); in iaa_comp_adecompress()
1631 " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, in iaa_comp_adecompress()
1632 iaa_wq->wq->id, ret); in iaa_comp_adecompress()
1633 ret = -EIO; in iaa_comp_adecompress()
1636 dst_addr = sg_dma_address(req->dst); in iaa_comp_adecompress()
1637 dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," in iaa_comp_adecompress()
1638 " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, in iaa_comp_adecompress()
1639 req->dst, req->dlen, sg_dma_len(req->dst)); in iaa_comp_adecompress()
1641 ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, in iaa_comp_adecompress()
1642 dst_addr, &req->dlen); in iaa_comp_adecompress()
1643 if (ret == -EINPROGRESS) in iaa_comp_adecompress()
1649 dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); in iaa_comp_adecompress()
1651 dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); in iaa_comp_adecompress()
1658 static void compression_ctx_init(struct iaa_compression_ctx *ctx) in compression_ctx_init() argument
1660 ctx->verify_compress = iaa_verify_compress; in compression_ctx_init()
1661 ctx->async_mode = async_mode; in compression_ctx_init()
1662 ctx->use_irq = use_irq; in compression_ctx_init()
1668 struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); in iaa_comp_init_fixed() local
1670 compression_ctx_init(ctx); in iaa_comp_init_fixed()
1672 ctx->mode = IAA_MODE_FIXED; in iaa_comp_init_fixed()
1683 .cra_driver_name = "deflate-iaa",
1718 struct idxd_device *idxd = wq->idxd; in iaa_crypto_probe()
1719 struct idxd_driver_data *data = idxd->data; in iaa_crypto_probe()
1720 struct device *dev = &idxd_dev->conf_dev; in iaa_crypto_probe()
1724 if (idxd->state != IDXD_DEV_ENABLED) in iaa_crypto_probe()
1725 return -ENXIO; in iaa_crypto_probe()
1727 if (data->type != IDXD_TYPE_IAX) in iaa_crypto_probe()
1728 return -ENODEV; in iaa_crypto_probe()
1730 mutex_lock(&wq->wq_lock); in iaa_crypto_probe()
1733 mutex_unlock(&wq->wq_lock); in iaa_crypto_probe()
1734 return -EBUSY; in iaa_crypto_probe()
1739 idxd->id, wq->id, wq->driver_name, dev->driver->name); in iaa_crypto_probe()
1740 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; in iaa_crypto_probe()
1741 ret = -ENODEV; in iaa_crypto_probe()
1745 wq->type = IDXD_WQT_KERNEL; in iaa_crypto_probe()
1750 idxd->id, wq->id, ret); in iaa_crypto_probe()
1751 ret = -ENXIO; in iaa_crypto_probe()
1758 ret = alloc_wq_table(wq->idxd->max_wqs); in iaa_crypto_probe()
1785 mutex_unlock(&wq->wq_lock); in iaa_crypto_probe()
1799 wq->type = IDXD_WQT_NONE; in iaa_crypto_probe()
1807 struct idxd_device *idxd = wq->idxd; in iaa_crypto_remove()
1813 mutex_lock(&wq->wq_lock); in iaa_crypto_remove()
1818 spin_lock(&idxd->dev_lock); in iaa_crypto_remove()
1821 spin_unlock(&idxd->dev_lock); in iaa_crypto_remove()
1826 if (iaa_wq->ref) { in iaa_crypto_remove()
1827 iaa_wq->remove = true; in iaa_crypto_remove()
1829 wq = iaa_wq->wq; in iaa_crypto_remove()
1833 spin_unlock(&idxd->dev_lock); in iaa_crypto_remove()
1851 mutex_unlock(&wq->wq_lock); in iaa_crypto_remove()
1877 return -ENODEV; in iaa_crypto_init_module()
1889 pr_debug("IAA wq sub-driver registration failed\n"); in iaa_crypto_init_module()