Lines Matching +full:num +full:- +full:irq +full:- +full:priority +full:- +full:bits

1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/irq.h>
20 /* Interrupt control bits */
25 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
28 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_unmask_error_interrupts()
35 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
38 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_mask_error_interrupts()
45 for (i = 0; i < wq->num_descs; i++) in free_hw_descs()
46 kfree(wq->hw_descs[i]); in free_hw_descs()
48 kfree(wq->hw_descs); in free_hw_descs()
51 static int alloc_hw_descs(struct idxd_wq *wq, int num) in alloc_hw_descs() argument
53 struct device *dev = &wq->idxd->pdev->dev; in alloc_hw_descs()
57 wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), in alloc_hw_descs()
59 if (!wq->hw_descs) in alloc_hw_descs()
60 return -ENOMEM; in alloc_hw_descs()
62 for (i = 0; i < num; i++) { in alloc_hw_descs()
63 wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), in alloc_hw_descs()
65 if (!wq->hw_descs[i]) { in alloc_hw_descs()
67 return -ENOMEM; in alloc_hw_descs()
78 for (i = 0; i < wq->num_descs; i++) in free_descs()
79 kfree(wq->descs[i]); in free_descs()
81 kfree(wq->descs); in free_descs()
84 static int alloc_descs(struct idxd_wq *wq, int num) in alloc_descs() argument
86 struct device *dev = &wq->idxd->pdev->dev; in alloc_descs()
90 wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), in alloc_descs()
92 if (!wq->descs) in alloc_descs()
93 return -ENOMEM; in alloc_descs()
95 for (i = 0; i < num; i++) { in alloc_descs()
96 wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), in alloc_descs()
98 if (!wq->descs[i]) { in alloc_descs()
100 return -ENOMEM; in alloc_descs()
107 /* WQ control bits */
110 struct idxd_device *idxd = wq->idxd; in idxd_wq_alloc_resources()
111 struct device *dev = &idxd->pdev->dev; in idxd_wq_alloc_resources()
114 if (wq->type != IDXD_WQT_KERNEL) in idxd_wq_alloc_resources()
117 num_descs = wq_dedicated(wq) ? wq->size : wq->threshold; in idxd_wq_alloc_resources()
118 wq->num_descs = num_descs; in idxd_wq_alloc_resources()
124 wq->compls_size = num_descs * idxd->data->compl_size; in idxd_wq_alloc_resources()
125 wq->compls = dma_alloc_coherent(dev, wq->compls_size, &wq->compls_addr, GFP_KERNEL); in idxd_wq_alloc_resources()
126 if (!wq->compls) { in idxd_wq_alloc_resources()
127 rc = -ENOMEM; in idxd_wq_alloc_resources()
135 rc = sbitmap_queue_init_node(&wq->sbq, num_descs, -1, false, GFP_KERNEL, in idxd_wq_alloc_resources()
141 struct idxd_desc *desc = wq->descs[i]; in idxd_wq_alloc_resources()
143 desc->hw = wq->hw_descs[i]; in idxd_wq_alloc_resources()
144 if (idxd->data->type == IDXD_TYPE_DSA) in idxd_wq_alloc_resources()
145 desc->completion = &wq->compls[i]; in idxd_wq_alloc_resources()
146 else if (idxd->data->type == IDXD_TYPE_IAX) in idxd_wq_alloc_resources()
147 desc->iax_completion = &wq->iax_compls[i]; in idxd_wq_alloc_resources()
148 desc->compl_dma = wq->compls_addr + idxd->data->compl_size * i; in idxd_wq_alloc_resources()
149 desc->id = i; in idxd_wq_alloc_resources()
150 desc->wq = wq; in idxd_wq_alloc_resources()
151 desc->cpu = -1; in idxd_wq_alloc_resources()
159 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); in idxd_wq_alloc_resources()
168 struct device *dev = &wq->idxd->pdev->dev; in idxd_wq_free_resources()
170 if (wq->type != IDXD_WQT_KERNEL) in idxd_wq_free_resources()
175 dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); in idxd_wq_free_resources()
176 sbitmap_queue_free(&wq->sbq); in idxd_wq_free_resources()
182 struct idxd_device *idxd = wq->idxd; in idxd_wq_enable()
183 struct device *dev = &idxd->pdev->dev; in idxd_wq_enable()
186 if (wq->state == IDXD_WQ_ENABLED) { in idxd_wq_enable()
187 dev_dbg(dev, "WQ %d already enabled\n", wq->id); in idxd_wq_enable()
191 idxd_cmd_exec(idxd, IDXD_CMD_ENABLE_WQ, wq->id, &status); in idxd_wq_enable()
196 return -ENXIO; in idxd_wq_enable()
199 wq->state = IDXD_WQ_ENABLED; in idxd_wq_enable()
200 set_bit(wq->id, idxd->wq_enable_map); in idxd_wq_enable()
201 dev_dbg(dev, "WQ %d enabled\n", wq->id); in idxd_wq_enable()
207 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable()
208 struct device *dev = &idxd->pdev->dev; in idxd_wq_disable()
211 dev_dbg(dev, "Disabling WQ %d\n", wq->id); in idxd_wq_disable()
213 if (wq->state != IDXD_WQ_ENABLED) { in idxd_wq_disable()
214 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); in idxd_wq_disable()
218 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); in idxd_wq_disable()
223 return -ENXIO; in idxd_wq_disable()
228 clear_bit(wq->id, idxd->wq_enable_map); in idxd_wq_disable()
229 wq->state = IDXD_WQ_DISABLED; in idxd_wq_disable()
230 dev_dbg(dev, "WQ %d disabled\n", wq->id); in idxd_wq_disable()
236 struct idxd_device *idxd = wq->idxd; in idxd_wq_drain()
237 struct device *dev = &idxd->pdev->dev; in idxd_wq_drain()
240 if (wq->state != IDXD_WQ_ENABLED) { in idxd_wq_drain()
241 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); in idxd_wq_drain()
245 dev_dbg(dev, "Draining WQ %d\n", wq->id); in idxd_wq_drain()
246 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); in idxd_wq_drain()
252 struct idxd_device *idxd = wq->idxd; in idxd_wq_reset()
253 struct device *dev = &idxd->pdev->dev; in idxd_wq_reset()
256 if (wq->state != IDXD_WQ_ENABLED) { in idxd_wq_reset()
257 dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); in idxd_wq_reset()
261 operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); in idxd_wq_reset()
268 struct idxd_device *idxd = wq->idxd; in idxd_wq_map_portal()
269 struct pci_dev *pdev = idxd->pdev; in idxd_wq_map_portal()
270 struct device *dev = &pdev->dev; in idxd_wq_map_portal()
274 start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED); in idxd_wq_map_portal()
276 wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); in idxd_wq_map_portal()
277 if (!wq->portal) in idxd_wq_map_portal()
278 return -ENOMEM; in idxd_wq_map_portal()
285 struct device *dev = &wq->idxd->pdev->dev; in idxd_wq_unmap_portal()
287 devm_iounmap(dev, wq->portal); in idxd_wq_unmap_portal()
288 wq->portal = NULL; in idxd_wq_unmap_portal()
289 wq->portal_offset = 0; in idxd_wq_unmap_portal()
296 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_unmap_portal()
297 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_unmap_portal()
299 if (wq->portal) in idxd_wqs_unmap_portal()
306 struct idxd_device *idxd = wq->idxd; in __idxd_wq_set_pasid_locked()
310 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); in __idxd_wq_set_pasid_locked()
311 spin_lock(&idxd->dev_lock); in __idxd_wq_set_pasid_locked()
312 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); in __idxd_wq_set_pasid_locked()
315 wq->wqcfg->bits[WQCFG_PASID_IDX] = wqcfg.bits[WQCFG_PASID_IDX]; in __idxd_wq_set_pasid_locked()
316 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); in __idxd_wq_set_pasid_locked()
317 spin_unlock(&idxd->dev_lock); in __idxd_wq_set_pasid_locked()
339 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_pasid()
348 offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX); in idxd_wq_disable_pasid()
349 spin_lock(&idxd->dev_lock); in idxd_wq_disable_pasid()
350 wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset); in idxd_wq_disable_pasid()
353 iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset); in idxd_wq_disable_pasid()
354 spin_unlock(&idxd->dev_lock); in idxd_wq_disable_pasid()
365 struct idxd_device *idxd = wq->idxd; in idxd_wq_disable_cleanup()
367 lockdep_assert_held(&wq->wq_lock); in idxd_wq_disable_cleanup()
368 wq->state = IDXD_WQ_DISABLED; in idxd_wq_disable_cleanup()
369 memset(wq->wqcfg, 0, idxd->wqcfg_size); in idxd_wq_disable_cleanup()
370 wq->type = IDXD_WQT_NONE; in idxd_wq_disable_cleanup()
371 wq->threshold = 0; in idxd_wq_disable_cleanup()
372 wq->priority = 0; in idxd_wq_disable_cleanup()
373 wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; in idxd_wq_disable_cleanup()
374 wq->flags = 0; in idxd_wq_disable_cleanup()
375 memset(wq->name, 0, WQ_NAME_SIZE); in idxd_wq_disable_cleanup()
376 wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; in idxd_wq_disable_cleanup()
377 idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); in idxd_wq_disable_cleanup()
378 if (wq->opcap_bmap) in idxd_wq_disable_cleanup()
379 bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); in idxd_wq_disable_cleanup()
384 lockdep_assert_held(&wq->wq_lock); in idxd_wq_device_reset_cleanup()
386 wq->size = 0; in idxd_wq_device_reset_cleanup()
387 wq->group = NULL; in idxd_wq_device_reset_cleanup()
394 complete(&wq->wq_dead); in idxd_wq_ref_release()
401 memset(&wq->wq_active, 0, sizeof(wq->wq_active)); in idxd_wq_init_percpu_ref()
402 rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, in idxd_wq_init_percpu_ref()
406 reinit_completion(&wq->wq_dead); in idxd_wq_init_percpu_ref()
407 reinit_completion(&wq->wq_resurrect); in idxd_wq_init_percpu_ref()
414 lockdep_assert_held(&wq->wq_lock); in __idxd_wq_quiesce()
415 reinit_completion(&wq->wq_resurrect); in __idxd_wq_quiesce()
416 percpu_ref_kill(&wq->wq_active); in __idxd_wq_quiesce()
417 complete_all(&wq->wq_resurrect); in __idxd_wq_quiesce()
418 wait_for_completion(&wq->wq_dead); in __idxd_wq_quiesce()
424 mutex_lock(&wq->wq_lock); in idxd_wq_quiesce()
426 mutex_unlock(&wq->wq_lock); in idxd_wq_quiesce()
430 /* Device control bits */
435 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_is_enabled()
446 gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); in idxd_device_is_halted()
458 struct device *dev = &idxd->pdev->dev; in idxd_device_init_reset()
462 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); in idxd_device_init_reset()
463 return -ENXIO; in idxd_device_init_reset()
469 spin_lock(&idxd->cmd_lock); in idxd_device_init_reset()
470 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_device_init_reset()
472 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & in idxd_device_init_reset()
475 spin_unlock(&idxd->cmd_lock); in idxd_device_init_reset()
488 dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); in idxd_cmd_exec()
499 spin_lock_irqsave(&idxd->cmd_lock, flags); in idxd_cmd_exec()
500 wait_event_lock_irq(idxd->cmd_waitq, in idxd_cmd_exec()
501 !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags), in idxd_cmd_exec()
502 idxd->cmd_lock); in idxd_cmd_exec()
504 dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", in idxd_cmd_exec()
507 idxd->cmd_status = 0; in idxd_cmd_exec()
508 __set_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); in idxd_cmd_exec()
509 idxd->cmd_done = &done; in idxd_cmd_exec()
510 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_cmd_exec()
516 spin_unlock_irqrestore(&idxd->cmd_lock, flags); in idxd_cmd_exec()
518 stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); in idxd_cmd_exec()
519 spin_lock(&idxd->cmd_lock); in idxd_cmd_exec()
522 idxd->cmd_status = stat & GENMASK(7, 0); in idxd_cmd_exec()
524 __clear_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags); in idxd_cmd_exec()
526 wake_up(&idxd->cmd_waitq); in idxd_cmd_exec()
527 spin_unlock(&idxd->cmd_lock); in idxd_cmd_exec()
532 struct device *dev = &idxd->pdev->dev; in idxd_device_enable()
537 return -ENXIO; in idxd_device_enable()
546 return -ENXIO; in idxd_device_enable()
549 idxd->state = IDXD_DEV_ENABLED; in idxd_device_enable()
555 struct device *dev = &idxd->pdev->dev; in idxd_device_disable()
569 return -ENXIO; in idxd_device_disable()
580 spin_lock(&idxd->dev_lock); in idxd_device_reset()
582 spin_unlock(&idxd->dev_lock); in idxd_device_reset()
587 struct device *dev = &idxd->pdev->dev; in idxd_device_drain_pasid()
599 struct device *dev = &idxd->pdev->dev; in idxd_device_request_int_handle()
602 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))) in idxd_device_request_int_handle()
603 return -EOPNOTSUPP; in idxd_device_request_int_handle()
617 return -ENXIO; in idxd_device_request_int_handle()
629 struct device *dev = &idxd->pdev->dev; in idxd_device_release_int_handle()
633 if (!(idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))) in idxd_device_release_int_handle()
634 return -EOPNOTSUPP; in idxd_device_release_int_handle()
649 spin_lock(&idxd->cmd_lock); in idxd_device_release_int_handle()
650 iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); in idxd_device_release_int_handle()
652 while (ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET) & IDXD_CMDSTS_ACTIVE) in idxd_device_release_int_handle()
654 status = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); in idxd_device_release_int_handle()
655 spin_unlock(&idxd->cmd_lock); in idxd_device_release_int_handle()
659 return -ENXIO; in idxd_device_release_int_handle()
666 /* Device configuration bits */
672 lockdep_assert_held(&idxd->dev_lock); in idxd_engines_clear_state()
673 for (i = 0; i < idxd->max_engines; i++) { in idxd_engines_clear_state()
674 engine = idxd->engines[i]; in idxd_engines_clear_state()
675 engine->group = NULL; in idxd_engines_clear_state()
684 lockdep_assert_held(&idxd->dev_lock); in idxd_groups_clear_state()
685 for (i = 0; i < idxd->max_groups; i++) { in idxd_groups_clear_state()
686 group = idxd->groups[i]; in idxd_groups_clear_state()
687 memset(&group->grpcfg, 0, sizeof(group->grpcfg)); in idxd_groups_clear_state()
688 group->num_engines = 0; in idxd_groups_clear_state()
689 group->num_wqs = 0; in idxd_groups_clear_state()
690 group->use_rdbuf_limit = false; in idxd_groups_clear_state()
695 group->rdbufs_allowed = idxd->max_rdbufs; in idxd_groups_clear_state()
696 group->rdbufs_reserved = 0; in idxd_groups_clear_state()
697 if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) { in idxd_groups_clear_state()
698 group->tc_a = 1; in idxd_groups_clear_state()
699 group->tc_b = 1; in idxd_groups_clear_state()
701 group->tc_a = -1; in idxd_groups_clear_state()
702 group->tc_b = -1; in idxd_groups_clear_state()
704 group->desc_progress_limit = 0; in idxd_groups_clear_state()
705 group->batch_progress_limit = 0; in idxd_groups_clear_state()
713 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_wqs_clear_state()
714 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_wqs_clear_state()
716 mutex_lock(&wq->wq_lock); in idxd_device_wqs_clear_state()
719 mutex_unlock(&wq->wq_lock); in idxd_device_wqs_clear_state()
726 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_device_clear_state()
733 spin_lock(&idxd->dev_lock); in idxd_device_clear_state()
737 spin_lock(&idxd->dev_lock); in idxd_device_clear_state()
740 idxd->state = IDXD_DEV_DISABLED; in idxd_device_clear_state()
741 spin_unlock(&idxd->dev_lock); in idxd_device_clear_state()
749 struct device *dev = &idxd->pdev->dev; in idxd_device_evl_setup()
753 struct idxd_evl *evl = idxd->evl; in idxd_device_evl_setup()
764 rc = -ENOMEM; in idxd_device_evl_setup()
774 rc = -ENOMEM; in idxd_device_evl_setup()
778 mutex_lock(&evl->lock); in idxd_device_evl_setup()
779 evl->log = addr; in idxd_device_evl_setup()
780 evl->dma = dma_addr; in idxd_device_evl_setup()
781 evl->log_size = size; in idxd_device_evl_setup()
782 evl->bmap = bmap; in idxd_device_evl_setup()
785 evlcfg.bits[0] = dma_addr & GENMASK(63, 12); in idxd_device_evl_setup()
786 evlcfg.size = evl->size; in idxd_device_evl_setup()
788 iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET); in idxd_device_evl_setup()
789 iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); in idxd_device_evl_setup()
791 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_setup()
793 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_setup()
795 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_setup()
797 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_setup()
799 mutex_unlock(&evl->lock); in idxd_device_evl_setup()
815 struct device *dev = &idxd->pdev->dev; in idxd_device_evl_free()
816 struct idxd_evl *evl = idxd->evl; in idxd_device_evl_free()
818 gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_free()
822 mutex_lock(&evl->lock); in idxd_device_evl_free()
824 iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_evl_free()
826 genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_free()
828 iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); in idxd_device_evl_free()
830 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET); in idxd_device_evl_free()
831 iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8); in idxd_device_evl_free()
833 bitmap_free(evl->bmap); in idxd_device_evl_free()
834 evl_log = evl->log; in idxd_device_evl_free()
835 evl_log_size = evl->log_size; in idxd_device_evl_free()
836 evl_dma = evl->dma; in idxd_device_evl_free()
837 evl->log = NULL; in idxd_device_evl_free()
838 evl->size = IDXD_EVL_SIZE_MIN; in idxd_device_evl_free()
839 mutex_unlock(&evl->lock); in idxd_device_evl_free()
846 struct idxd_device *idxd = group->idxd; in idxd_group_config_write()
847 struct device *dev = &idxd->pdev->dev; in idxd_group_config_write()
851 dev_dbg(dev, "Writing group %d cfg registers\n", group->id); in idxd_group_config_write()
855 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); in idxd_group_config_write()
856 iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
858 group->id, i, grpcfg_offset, in idxd_group_config_write()
859 ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
863 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); in idxd_group_config_write()
864 iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
865 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, in idxd_group_config_write()
866 grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
869 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); in idxd_group_config_write()
870 iowrite64(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); in idxd_group_config_write()
872 group->id, grpcfg_offset, in idxd_group_config_write()
873 ioread64(idxd->reg_base + grpcfg_offset)); in idxd_group_config_write()
881 struct device *dev = &idxd->pdev->dev; in idxd_groups_config_write()
884 if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { in idxd_groups_config_write()
885 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_groups_config_write()
886 reg.rdbuf_limit = idxd->rdbuf_limit; in idxd_groups_config_write()
887 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_groups_config_write()
891 ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); in idxd_groups_config_write()
893 for (i = 0; i < idxd->max_groups; i++) { in idxd_groups_config_write()
894 struct idxd_group *group = idxd->groups[i]; in idxd_groups_config_write()
904 struct pci_dev *pdev = idxd->pdev; in idxd_device_pasid_priv_enabled()
906 if (pdev->pasid_enabled && (pdev->pasid_features & PCI_PASID_CAP_PRIV)) in idxd_device_pasid_priv_enabled()
913 struct idxd_device *idxd = wq->idxd; in idxd_wq_config_write()
914 struct device *dev = &idxd->pdev->dev; in idxd_wq_config_write()
918 if (!wq->group) in idxd_wq_config_write()
926 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_config_write()
927 wq->wqcfg->bits[i] |= ioread32(idxd->reg_base + wq_offset); in idxd_wq_config_write()
930 if (wq->size == 0 && wq->type != IDXD_WQT_NONE) in idxd_wq_config_write()
931 wq->size = WQ_DEFAULT_QUEUE_DEPTH; in idxd_wq_config_write()
933 /* byte 0-3 */ in idxd_wq_config_write()
934 wq->wqcfg->wq_size = wq->size; in idxd_wq_config_write()
936 /* bytes 4-7 */ in idxd_wq_config_write()
937 wq->wqcfg->wq_thresh = wq->threshold; in idxd_wq_config_write()
939 /* byte 8-11 */ in idxd_wq_config_write()
941 wq->wqcfg->mode = 1; in idxd_wq_config_write()
954 if (wq_dedicated(wq) && wq->wqcfg->pasid_en && in idxd_wq_config_write()
956 wq->type == IDXD_WQT_KERNEL) { in idxd_wq_config_write()
957 idxd->cmd_status = IDXD_SCMD_WQ_NO_PRIV; in idxd_wq_config_write()
958 return -EOPNOTSUPP; in idxd_wq_config_write()
961 wq->wqcfg->priority = wq->priority; in idxd_wq_config_write()
963 if (idxd->hw.gen_cap.block_on_fault && in idxd_wq_config_write()
964 test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) && in idxd_wq_config_write()
965 !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags)) in idxd_wq_config_write()
966 wq->wqcfg->bof = 1; in idxd_wq_config_write()
968 if (idxd->hw.wq_cap.wq_ats_support) in idxd_wq_config_write()
969 wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); in idxd_wq_config_write()
971 if (idxd->hw.wq_cap.wq_prs_support) in idxd_wq_config_write()
972 wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags); in idxd_wq_config_write()
974 /* bytes 12-15 */ in idxd_wq_config_write()
975 wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); in idxd_wq_config_write()
976 idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); in idxd_wq_config_write()
978 /* bytes 32-63 */ in idxd_wq_config_write()
979 if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { in idxd_wq_config_write()
980 memset(wq->wqcfg->op_config, 0, IDXD_MAX_OPCAP_BITS / 8); in idxd_wq_config_write()
981 for_each_set_bit(n, wq->opcap_bmap, IDXD_MAX_OPCAP_BITS) { in idxd_wq_config_write()
985 wq->wqcfg->op_config[idx] |= BIT(pos); in idxd_wq_config_write()
989 dev_dbg(dev, "WQ %d CFGs\n", wq->id); in idxd_wq_config_write()
991 wq_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_config_write()
992 iowrite32(wq->wqcfg->bits[i], idxd->reg_base + wq_offset); in idxd_wq_config_write()
994 wq->id, i, wq_offset, in idxd_wq_config_write()
995 ioread32(idxd->reg_base + wq_offset)); in idxd_wq_config_write()
1005 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_config_write()
1006 struct idxd_wq *wq = idxd->wqs[i]; in idxd_wqs_config_write()
1020 /* TC-A 0 and TC-B 1 should be defaults */ in idxd_group_flags_setup()
1021 for (i = 0; i < idxd->max_groups; i++) { in idxd_group_flags_setup()
1022 struct idxd_group *group = idxd->groups[i]; in idxd_group_flags_setup()
1024 if (group->tc_a == -1) in idxd_group_flags_setup()
1025 group->tc_a = group->grpcfg.flags.tc_a = 0; in idxd_group_flags_setup()
1027 group->grpcfg.flags.tc_a = group->tc_a; in idxd_group_flags_setup()
1028 if (group->tc_b == -1) in idxd_group_flags_setup()
1029 group->tc_b = group->grpcfg.flags.tc_b = 1; in idxd_group_flags_setup()
1031 group->grpcfg.flags.tc_b = group->tc_b; in idxd_group_flags_setup()
1032 group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; in idxd_group_flags_setup()
1033 group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; in idxd_group_flags_setup()
1034 group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; in idxd_group_flags_setup()
1035 group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit; in idxd_group_flags_setup()
1036 group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit; in idxd_group_flags_setup()
1046 for (i = 0; i < idxd->max_groups; i++) { in idxd_engines_setup()
1047 group = idxd->groups[i]; in idxd_engines_setup()
1048 group->grpcfg.engines = 0; in idxd_engines_setup()
1051 for (i = 0; i < idxd->max_engines; i++) { in idxd_engines_setup()
1052 eng = idxd->engines[i]; in idxd_engines_setup()
1053 group = eng->group; in idxd_engines_setup()
1058 group->grpcfg.engines |= BIT(eng->id); in idxd_engines_setup()
1063 return -EINVAL; in idxd_engines_setup()
1073 struct device *dev = &idxd->pdev->dev; in idxd_wqs_setup()
1075 for (i = 0; i < idxd->max_groups; i++) { in idxd_wqs_setup()
1076 group = idxd->groups[i]; in idxd_wqs_setup()
1078 group->grpcfg.wqs[j] = 0; in idxd_wqs_setup()
1081 for (i = 0; i < idxd->max_wqs; i++) { in idxd_wqs_setup()
1082 wq = idxd->wqs[i]; in idxd_wqs_setup()
1083 group = wq->group; in idxd_wqs_setup()
1085 if (!wq->group) in idxd_wqs_setup()
1089 idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT; in idxd_wqs_setup()
1091 return -EINVAL; in idxd_wqs_setup()
1094 group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); in idxd_wqs_setup()
1099 idxd->cmd_status = IDXD_SCMD_WQ_NONE_CONFIGURED; in idxd_wqs_setup()
1100 return -EINVAL; in idxd_wqs_setup()
1110 lockdep_assert_held(&idxd->dev_lock); in idxd_device_config()
1134 struct idxd_device *idxd = wq->idxd; in idxd_wq_load_config()
1135 struct device *dev = &idxd->pdev->dev; in idxd_wq_load_config()
1139 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, 0); in idxd_wq_load_config()
1140 memcpy_fromio(wq->wqcfg, idxd->reg_base + wqcfg_offset, idxd->wqcfg_size); in idxd_wq_load_config()
1142 wq->size = wq->wqcfg->wq_size; in idxd_wq_load_config()
1143 wq->threshold = wq->wqcfg->wq_thresh; in idxd_wq_load_config()
1145 /* The driver does not support shared WQ mode in read-only config yet */ in idxd_wq_load_config()
1146 if (wq->wqcfg->mode == 0 || wq->wqcfg->pasid_en) in idxd_wq_load_config()
1147 return -EOPNOTSUPP; in idxd_wq_load_config()
1149 set_bit(WQ_FLAG_DEDICATED, &wq->flags); in idxd_wq_load_config()
1151 wq->priority = wq->wqcfg->priority; in idxd_wq_load_config()
1153 wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; in idxd_wq_load_config()
1154 idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); in idxd_wq_load_config()
1157 wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); in idxd_wq_load_config()
1158 dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", wq->id, i, wqcfg_offset, wq->wqcfg->bits[i]); in idxd_wq_load_config()
1166 struct idxd_device *idxd = group->idxd; in idxd_group_load_config()
1167 struct device *dev = &idxd->pdev->dev; in idxd_group_load_config()
1172 * Iterate through all 256 bits 64 bits at a time in idxd_group_load_config()
1177 grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i); in idxd_group_load_config()
1178 group->grpcfg.wqs[i] = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1180 group->id, i, grpcfg_offset, group->grpcfg.wqs[i]); in idxd_group_load_config()
1182 if (i * 64 >= idxd->max_wqs) in idxd_group_load_config()
1185 /* Iterate through all 64 bits and check for wq set */ in idxd_group_load_config()
1190 if (id >= idxd->max_wqs) in idxd_group_load_config()
1194 if (group->grpcfg.wqs[i] & BIT(j)) { in idxd_group_load_config()
1195 wq = idxd->wqs[id]; in idxd_group_load_config()
1196 wq->group = group; in idxd_group_load_config()
1201 grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id); in idxd_group_load_config()
1202 group->grpcfg.engines = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1203 dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, in idxd_group_load_config()
1204 grpcfg_offset, group->grpcfg.engines); in idxd_group_load_config()
1206 /* Iterate through all 64 bits to check engines set */ in idxd_group_load_config()
1208 if (i >= idxd->max_engines) in idxd_group_load_config()
1211 if (group->grpcfg.engines & BIT(i)) { in idxd_group_load_config()
1212 struct idxd_engine *engine = idxd->engines[i]; in idxd_group_load_config()
1214 engine->group = group; in idxd_group_load_config()
1218 grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id); in idxd_group_load_config()
1219 group->grpcfg.flags.bits = ioread64(idxd->reg_base + grpcfg_offset); in idxd_group_load_config()
1221 group->id, grpcfg_offset, group->grpcfg.flags.bits); in idxd_group_load_config()
1229 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); in idxd_device_load_config()
1230 idxd->rdbuf_limit = reg.rdbuf_limit; in idxd_device_load_config()
1232 for (i = 0; i < idxd->max_groups; i++) { in idxd_device_load_config()
1233 struct idxd_group *group = idxd->groups[i]; in idxd_device_load_config()
1238 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_load_config()
1239 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_load_config()
1256 spin_lock(&ie->list_lock); in idxd_flush_pending_descs()
1257 head = llist_del_all(&ie->pending_llist); in idxd_flush_pending_descs()
1260 list_add_tail(&desc->list, &ie->work_list); in idxd_flush_pending_descs()
1263 list_for_each_entry_safe(desc, itr, &ie->work_list, list) in idxd_flush_pending_descs()
1264 list_move_tail(&desc->list, &flist); in idxd_flush_pending_descs()
1265 spin_unlock(&ie->list_lock); in idxd_flush_pending_descs()
1270 list_del(&desc->list); in idxd_flush_pending_descs()
1271 ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT; in idxd_flush_pending_descs()
1278 tx = &desc->txd; in idxd_flush_pending_descs()
1279 tx->callback = NULL; in idxd_flush_pending_descs()
1280 tx->callback_result = NULL; in idxd_flush_pending_descs()
1290 if (ie->pasid == IOMMU_PASID_INVALID) in idxd_device_set_perm_entry()
1293 mperm.bits = 0; in idxd_device_set_perm_entry()
1294 mperm.pasid = ie->pasid; in idxd_device_set_perm_entry()
1296 iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); in idxd_device_set_perm_entry()
1302 iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8); in idxd_device_clear_perm_entry()
1307 struct idxd_device *idxd = wq->idxd; in idxd_wq_free_irq()
1308 struct idxd_irq_entry *ie = &wq->ie; in idxd_wq_free_irq()
1310 if (wq->type != IDXD_WQT_KERNEL) in idxd_wq_free_irq()
1313 free_irq(ie->vector, ie); in idxd_wq_free_irq()
1315 if (idxd->request_int_handles) in idxd_wq_free_irq()
1316 idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX); in idxd_wq_free_irq()
1318 ie->vector = -1; in idxd_wq_free_irq()
1319 ie->int_handle = INVALID_INT_HANDLE; in idxd_wq_free_irq()
1320 ie->pasid = IOMMU_PASID_INVALID; in idxd_wq_free_irq()
1325 struct idxd_device *idxd = wq->idxd; in idxd_wq_request_irq()
1326 struct pci_dev *pdev = idxd->pdev; in idxd_wq_request_irq()
1327 struct device *dev = &pdev->dev; in idxd_wq_request_irq()
1331 if (wq->type != IDXD_WQT_KERNEL) in idxd_wq_request_irq()
1334 ie = &wq->ie; in idxd_wq_request_irq()
1335 ie->vector = pci_irq_vector(pdev, ie->id); in idxd_wq_request_irq()
1336 ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : IOMMU_PASID_INVALID; in idxd_wq_request_irq()
1339 rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie); in idxd_wq_request_irq()
1341 dev_err(dev, "Failed to request irq %d.\n", ie->vector); in idxd_wq_request_irq()
1345 if (idxd->request_int_handles) { in idxd_wq_request_irq()
1346 rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle, in idxd_wq_request_irq()
1351 ie->int_handle = ie->id; in idxd_wq_request_irq()
1357 ie->int_handle = INVALID_INT_HANDLE; in idxd_wq_request_irq()
1358 free_irq(ie->vector, ie); in idxd_wq_request_irq()
1361 ie->pasid = IOMMU_PASID_INVALID; in idxd_wq_request_irq()
1367 struct idxd_device *idxd = wq->idxd; in idxd_drv_enable_wq()
1368 struct device *dev = &idxd->pdev->dev; in idxd_drv_enable_wq()
1369 int rc = -ENXIO; in idxd_drv_enable_wq()
1371 lockdep_assert_held(&wq->wq_lock); in idxd_drv_enable_wq()
1373 if (idxd->state != IDXD_DEV_ENABLED) { in idxd_drv_enable_wq()
1374 idxd->cmd_status = IDXD_SCMD_DEV_NOT_ENABLED; in idxd_drv_enable_wq()
1378 if (wq->state != IDXD_WQ_DISABLED) { in idxd_drv_enable_wq()
1379 dev_dbg(dev, "wq %d already enabled.\n", wq->id); in idxd_drv_enable_wq()
1380 idxd->cmd_status = IDXD_SCMD_WQ_ENABLED; in idxd_drv_enable_wq()
1381 rc = -EBUSY; in idxd_drv_enable_wq()
1385 if (!wq->group) { in idxd_drv_enable_wq()
1386 dev_dbg(dev, "wq %d not attached to group.\n", wq->id); in idxd_drv_enable_wq()
1387 idxd->cmd_status = IDXD_SCMD_WQ_NO_GRP; in idxd_drv_enable_wq()
1391 if (strlen(wq->name) == 0) { in idxd_drv_enable_wq()
1392 idxd->cmd_status = IDXD_SCMD_WQ_NO_NAME; in idxd_drv_enable_wq()
1393 dev_dbg(dev, "wq %d name not set.\n", wq->id); in idxd_drv_enable_wq()
1400 idxd->cmd_status = IDXD_SCMD_WQ_NO_SVM; in idxd_drv_enable_wq()
1412 if (wq->threshold == 0) { in idxd_drv_enable_wq()
1413 idxd->cmd_status = IDXD_SCMD_WQ_NO_THRESH; in idxd_drv_enable_wq()
1423 * that in-kernel DMA will also do user privileged requests. in idxd_drv_enable_wq()
1427 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { in idxd_drv_enable_wq()
1430 u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0; in idxd_drv_enable_wq()
1438 spin_lock(&idxd->dev_lock); in idxd_drv_enable_wq()
1439 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_drv_enable_wq()
1441 spin_unlock(&idxd->dev_lock); in idxd_drv_enable_wq()
1443 dev_dbg(dev, "Writing wq %d config failed: %d\n", wq->id, rc); in idxd_drv_enable_wq()
1449 dev_dbg(dev, "wq %d enabling failed: %d\n", wq->id, rc); in idxd_drv_enable_wq()
1455 idxd->cmd_status = IDXD_SCMD_WQ_PORTAL_ERR; in idxd_drv_enable_wq()
1456 dev_dbg(dev, "wq %d portal mapping failed: %d\n", wq->id, rc); in idxd_drv_enable_wq()
1460 wq->client_count = 0; in idxd_drv_enable_wq()
1464 idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR; in idxd_drv_enable_wq()
1465 dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc); in idxd_drv_enable_wq()
1471 idxd->cmd_status = IDXD_SCMD_WQ_RES_ALLOC_ERR; in idxd_drv_enable_wq()
1478 idxd->cmd_status = IDXD_SCMD_PERCPU_ERR; in idxd_drv_enable_wq()
1501 struct idxd_device *idxd = wq->idxd; in idxd_drv_disable_wq()
1502 struct device *dev = &idxd->pdev->dev; in idxd_drv_disable_wq()
1504 lockdep_assert_held(&wq->wq_lock); in idxd_drv_disable_wq()
1508 wq->id, idxd_wq_refcount(wq)); in idxd_drv_disable_wq()
1515 percpu_ref_exit(&wq->wq_active); in idxd_drv_disable_wq()
1516 wq->type = IDXD_WQT_NONE; in idxd_drv_disable_wq()
1517 wq->client_count = 0; in idxd_drv_disable_wq()
1531 if (idxd->state != IDXD_DEV_DISABLED) { in idxd_device_drv_probe()
1532 idxd->cmd_status = IDXD_SCMD_DEV_ENABLED; in idxd_device_drv_probe()
1533 return -ENXIO; in idxd_device_drv_probe()
1537 spin_lock(&idxd->dev_lock); in idxd_device_drv_probe()
1538 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_device_drv_probe()
1540 spin_unlock(&idxd->dev_lock); in idxd_device_drv_probe()
1542 return -ENXIO; in idxd_device_drv_probe()
1547 * need to re-enable user interrupts for kernel work queue completion in idxd_device_drv_probe()
1548 * IRQ to function. in idxd_device_drv_probe()
1550 if (idxd->pasid != IOMMU_PASID_INVALID) in idxd_device_drv_probe()
1555 idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR; in idxd_device_drv_probe()
1571 idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR; in idxd_device_drv_probe()
1575 idxd->cmd_status = 0; in idxd_device_drv_probe()
1581 struct device *dev = &idxd_dev->conf_dev; in idxd_device_drv_remove()
1585 for (i = 0; i < idxd->max_wqs; i++) { in idxd_device_drv_remove()
1586 struct idxd_wq *wq = idxd->wqs[i]; in idxd_device_drv_remove()
1589 if (wq->state == IDXD_WQ_DISABLED) in idxd_device_drv_remove()
1597 if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) in idxd_device_drv_remove()