Lines Matching +full:cm +full:- +full:poll +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Thunderbolt driver - NHI driver
16 #include <linux/dma-mapping.h>
28 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
37 * Minimal number of vectors when we use MSI-X. Two for control channel
55 int bit = ring->hop; in ring_interrupt_index()
56 if (!ring->is_tx) in ring_interrupt_index()
57 bit += ring->nhi->hop_count; in ring_interrupt_index()
63 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) { in nhi_mask_interrupt()
66 val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring); in nhi_mask_interrupt()
67 iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring); in nhi_mask_interrupt()
69 iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring); in nhi_mask_interrupt()
75 if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) in nhi_clear_interrupt()
76 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring); in nhi_clear_interrupt()
78 iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring); in nhi_clear_interrupt()
82 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
84 * ring->nhi->lock must be held.
94 if (ring->irq > 0) { in ring_interrupt_active()
100 if (ring->is_tx) in ring_interrupt_active()
101 index = ring->hop; in ring_interrupt_active()
103 index = ring->hop + ring->nhi->hop_count; in ring_interrupt_active()
111 * Other routers explicitly disable auto-clear in ring_interrupt_active()
116 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
117 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) in ring_interrupt_active()
123 ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
125 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; in ring_interrupt_active()
131 ivr |= ring->vector << shift; in ring_interrupt_active()
135 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
141 dev_dbg(&ring->nhi->pdev->dev, in ring_interrupt_active()
142 "%s interrupt at register %#x bit %d (%#x -> %#x)\n", in ring_interrupt_active()
146 dev_WARN(&ring->nhi->pdev->dev, in ring_interrupt_active()
148 RING_TYPE(ring), ring->hop, in ring_interrupt_active()
152 iowrite32(new, ring->nhi->iobase + reg); in ring_interrupt_active()
154 nhi_mask_interrupt(ring->nhi, mask, index); in ring_interrupt_active()
158 * nhi_disable_interrupts() - disable interrupts for all rings
178 void __iomem *io = ring->nhi->iobase; in ring_desc_base()
179 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; in ring_desc_base()
180 io += ring->hop * 16; in ring_desc_base()
186 void __iomem *io = ring->nhi->iobase; in ring_options_base()
187 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; in ring_options_base()
188 io += ring->hop * 32; in ring_options_base()
195 * The other 16-bits in the register is read-only and writes to it in ring_iowrite_cons()
197 * filling the read-only bits with zeroes. in ring_iowrite_cons()
226 return ((ring->head + 1) % ring->size) == ring->tail; in ring_full()
231 return ring->head == ring->tail; in ring_empty()
235 * ring_write_descriptors() - post frames from ring->queue to the controller
237 * ring->lock is held.
243 list_for_each_entry_safe(frame, n, &ring->queue, list) { in ring_write_descriptors()
246 list_move_tail(&frame->list, &ring->in_flight); in ring_write_descriptors()
247 descriptor = &ring->descriptors[ring->head]; in ring_write_descriptors()
248 descriptor->phys = frame->buffer_phy; in ring_write_descriptors()
249 descriptor->time = 0; in ring_write_descriptors()
250 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; in ring_write_descriptors()
251 if (ring->is_tx) { in ring_write_descriptors()
252 descriptor->length = frame->size; in ring_write_descriptors()
253 descriptor->eof = frame->eof; in ring_write_descriptors()
254 descriptor->sof = frame->sof; in ring_write_descriptors()
256 ring->head = (ring->head + 1) % ring->size; in ring_write_descriptors()
257 if (ring->is_tx) in ring_write_descriptors()
258 ring_iowrite_prod(ring, ring->head); in ring_write_descriptors()
260 ring_iowrite_cons(ring, ring->head); in ring_write_descriptors()
265 * ring_work() - progress completed frames
281 spin_lock_irqsave(&ring->lock, flags); in ring_work()
283 if (!ring->running) { in ring_work()
285 list_splice_tail_init(&ring->in_flight, &done); in ring_work()
286 list_splice_tail_init(&ring->queue, &done); in ring_work()
292 if (!(ring->descriptors[ring->tail].flags in ring_work()
295 frame = list_first_entry(&ring->in_flight, typeof(*frame), in ring_work()
297 list_move_tail(&frame->list, &done); in ring_work()
298 if (!ring->is_tx) { in ring_work()
299 frame->size = ring->descriptors[ring->tail].length; in ring_work()
300 frame->eof = ring->descriptors[ring->tail].eof; in ring_work()
301 frame->sof = ring->descriptors[ring->tail].sof; in ring_work()
302 frame->flags = ring->descriptors[ring->tail].flags; in ring_work()
304 ring->tail = (ring->tail + 1) % ring->size; in ring_work()
310 spin_unlock_irqrestore(&ring->lock, flags); in ring_work()
317 list_del_init(&frame->list); in ring_work()
318 if (frame->callback) in ring_work()
319 frame->callback(ring, frame, canceled); in ring_work()
328 spin_lock_irqsave(&ring->lock, flags); in __tb_ring_enqueue()
329 if (ring->running) { in __tb_ring_enqueue()
330 list_add_tail(&frame->list, &ring->queue); in __tb_ring_enqueue()
333 ret = -ESHUTDOWN; in __tb_ring_enqueue()
335 spin_unlock_irqrestore(&ring->lock, flags); in __tb_ring_enqueue()
341 * tb_ring_poll() - Poll one completed frame from the ring
342 * @ring: Ring to poll
354 spin_lock_irqsave(&ring->lock, flags); in tb_ring_poll()
355 if (!ring->running) in tb_ring_poll()
360 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) { in tb_ring_poll()
361 frame = list_first_entry(&ring->in_flight, typeof(*frame), in tb_ring_poll()
363 list_del_init(&frame->list); in tb_ring_poll()
365 if (!ring->is_tx) { in tb_ring_poll()
366 frame->size = ring->descriptors[ring->tail].length; in tb_ring_poll()
367 frame->eof = ring->descriptors[ring->tail].eof; in tb_ring_poll()
368 frame->sof = ring->descriptors[ring->tail].sof; in tb_ring_poll()
369 frame->flags = ring->descriptors[ring->tail].flags; in tb_ring_poll()
372 ring->tail = (ring->tail + 1) % ring->size; in tb_ring_poll()
376 spin_unlock_irqrestore(&ring->lock, flags); in tb_ring_poll()
388 val = ioread32(ring->nhi->iobase + reg); in __ring_interrupt_mask()
393 iowrite32(val, ring->nhi->iobase + reg); in __ring_interrupt_mask()
396 /* Both @nhi->lock and @ring->lock should be held */
399 if (!ring->running) in __ring_interrupt()
402 if (ring->start_poll) { in __ring_interrupt()
404 ring->start_poll(ring->poll_data); in __ring_interrupt()
406 schedule_work(&ring->work); in __ring_interrupt()
411 * tb_ring_poll_complete() - Re-start interrupt for the ring
412 * @ring: Ring to re-start the interrupt
414 * This will re-start (unmask) the ring interrupt once the user is done
421 spin_lock_irqsave(&ring->nhi->lock, flags); in tb_ring_poll_complete()
422 spin_lock(&ring->lock); in tb_ring_poll_complete()
423 if (ring->start_poll) in tb_ring_poll_complete()
425 spin_unlock(&ring->lock); in tb_ring_poll_complete()
426 spin_unlock_irqrestore(&ring->nhi->lock, flags); in tb_ring_poll_complete()
434 if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) in ring_clear_msix()
438 if (ring->is_tx) in ring_clear_msix()
439 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR); in ring_clear_msix()
441 iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR + in ring_clear_msix()
442 4 * (ring->nhi->hop_count / 32)); in ring_clear_msix()
449 spin_lock(&ring->nhi->lock); in ring_msix()
451 spin_lock(&ring->lock); in ring_msix()
453 spin_unlock(&ring->lock); in ring_msix()
454 spin_unlock(&ring->nhi->lock); in ring_msix()
461 struct tb_nhi *nhi = ring->nhi; in ring_request_msix()
465 if (!nhi->pdev->msix_enabled) in ring_request_msix()
468 ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL); in ring_request_msix()
472 ring->vector = ret; in ring_request_msix()
474 ret = pci_irq_vector(ring->nhi->pdev, ring->vector); in ring_request_msix()
478 ring->irq = ret; in ring_request_msix()
481 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); in ring_request_msix()
488 ida_free(&nhi->msix_ida, ring->vector); in ring_request_msix()
495 if (ring->irq <= 0) in ring_release_msix()
498 free_irq(ring->irq, ring); in ring_release_msix()
499 ida_free(&ring->nhi->msix_ida, ring->vector); in ring_release_msix()
500 ring->vector = 0; in ring_release_msix()
501 ring->irq = 0; in ring_release_msix()
509 if (nhi->quirks & QUIRK_E2E) { in nhi_alloc_hop()
511 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { in nhi_alloc_hop()
512 dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n", in nhi_alloc_hop()
513 ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID); in nhi_alloc_hop()
514 ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID; in nhi_alloc_hop()
518 spin_lock_irq(&nhi->lock); in nhi_alloc_hop()
520 if (ring->hop < 0) { in nhi_alloc_hop()
524 * Automatically allocate HopID from the non-reserved in nhi_alloc_hop()
525 * range 1 .. hop_count - 1. in nhi_alloc_hop()
527 for (i = start_hop; i < nhi->hop_count; i++) { in nhi_alloc_hop()
528 if (ring->is_tx) { in nhi_alloc_hop()
529 if (!nhi->tx_rings[i]) { in nhi_alloc_hop()
530 ring->hop = i; in nhi_alloc_hop()
534 if (!nhi->rx_rings[i]) { in nhi_alloc_hop()
535 ring->hop = i; in nhi_alloc_hop()
542 if (ring->hop > 0 && ring->hop < start_hop) { in nhi_alloc_hop()
543 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); in nhi_alloc_hop()
544 ret = -EINVAL; in nhi_alloc_hop()
547 if (ring->hop < 0 || ring->hop >= nhi->hop_count) { in nhi_alloc_hop()
548 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); in nhi_alloc_hop()
549 ret = -EINVAL; in nhi_alloc_hop()
552 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop()
553 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", in nhi_alloc_hop()
554 ring->hop); in nhi_alloc_hop()
555 ret = -EBUSY; in nhi_alloc_hop()
558 if (!ring->is_tx && nhi->rx_rings[ring->hop]) { in nhi_alloc_hop()
559 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", in nhi_alloc_hop()
560 ring->hop); in nhi_alloc_hop()
561 ret = -EBUSY; in nhi_alloc_hop()
565 if (ring->is_tx) in nhi_alloc_hop()
566 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop()
568 nhi->rx_rings[ring->hop] = ring; in nhi_alloc_hop()
571 spin_unlock_irq(&nhi->lock); in nhi_alloc_hop()
584 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", in tb_ring_alloc()
591 spin_lock_init(&ring->lock); in tb_ring_alloc()
592 INIT_LIST_HEAD(&ring->queue); in tb_ring_alloc()
593 INIT_LIST_HEAD(&ring->in_flight); in tb_ring_alloc()
594 INIT_WORK(&ring->work, ring_work); in tb_ring_alloc()
596 ring->nhi = nhi; in tb_ring_alloc()
597 ring->hop = hop; in tb_ring_alloc()
598 ring->is_tx = transmit; in tb_ring_alloc()
599 ring->size = size; in tb_ring_alloc()
600 ring->flags = flags; in tb_ring_alloc()
601 ring->e2e_tx_hop = e2e_tx_hop; in tb_ring_alloc()
602 ring->sof_mask = sof_mask; in tb_ring_alloc()
603 ring->eof_mask = eof_mask; in tb_ring_alloc()
604 ring->head = 0; in tb_ring_alloc()
605 ring->tail = 0; in tb_ring_alloc()
606 ring->running = false; in tb_ring_alloc()
607 ring->start_poll = start_poll; in tb_ring_alloc()
608 ring->poll_data = poll_data; in tb_ring_alloc()
610 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
611 size * sizeof(*ring->descriptors), in tb_ring_alloc()
612 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); in tb_ring_alloc()
613 if (!ring->descriptors) in tb_ring_alloc()
627 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
628 ring->size * sizeof(*ring->descriptors), in tb_ring_alloc()
629 ring->descriptors, ring->descriptors_dma); in tb_ring_alloc()
637 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
651 * tb_ring_alloc_rx() - Allocate DMA ring for receive
653 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
675 * tb_ring_start() - enable a ring
685 spin_lock_irq(&ring->nhi->lock); in tb_ring_start()
686 spin_lock(&ring->lock); in tb_ring_start()
687 if (ring->nhi->going_away) in tb_ring_start()
689 if (ring->running) { in tb_ring_start()
690 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); in tb_ring_start()
693 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", in tb_ring_start()
694 RING_TYPE(ring), ring->hop); in tb_ring_start()
696 if (ring->flags & RING_FLAG_FRAME) { in tb_ring_start()
705 ring_iowrite64desc(ring, ring->descriptors_dma, 0); in tb_ring_start()
706 if (ring->is_tx) { in tb_ring_start()
707 ring_iowrite32desc(ring, ring->size, 12); in tb_ring_start()
711 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask; in tb_ring_start()
713 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12); in tb_ring_start()
722 if (ring->flags & RING_FLAG_E2E) { in tb_ring_start()
723 if (!ring->is_tx) { in tb_ring_start()
726 hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; in tb_ring_start()
730 dev_dbg(&ring->nhi->pdev->dev, in tb_ring_start()
732 RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); in tb_ring_start()
734 dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", in tb_ring_start()
735 RING_TYPE(ring), ring->hop); in tb_ring_start()
743 ring->running = true; in tb_ring_start()
745 spin_unlock(&ring->lock); in tb_ring_start()
746 spin_unlock_irq(&ring->nhi->lock); in tb_ring_start()
751 * tb_ring_stop() - shutdown a ring
757 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
761 * with frame->canceled set to true (on the callback thread). This method
766 spin_lock_irq(&ring->nhi->lock); in tb_ring_stop()
767 spin_lock(&ring->lock); in tb_ring_stop()
768 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", in tb_ring_stop()
769 RING_TYPE(ring), ring->hop); in tb_ring_stop()
770 if (ring->nhi->going_away) in tb_ring_stop()
772 if (!ring->running) { in tb_ring_stop()
773 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", in tb_ring_stop()
774 RING_TYPE(ring), ring->hop); in tb_ring_stop()
783 ring->head = 0; in tb_ring_stop()
784 ring->tail = 0; in tb_ring_stop()
785 ring->running = false; in tb_ring_stop()
788 spin_unlock(&ring->lock); in tb_ring_stop()
789 spin_unlock_irq(&ring->nhi->lock); in tb_ring_stop()
792 * schedule ring->work to invoke callbacks on all remaining frames. in tb_ring_stop()
794 schedule_work(&ring->work); in tb_ring_stop()
795 flush_work(&ring->work); in tb_ring_stop()
800 * tb_ring_free() - free ring
802 * When this method returns all invocations of ring->callback will have
807 * Must NOT be called from ring_frame->callback!
811 spin_lock_irq(&ring->nhi->lock); in tb_ring_free()
814 * nhi_interrupt_work cannot reschedule ring->work. in tb_ring_free()
816 if (ring->is_tx) in tb_ring_free()
817 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free()
819 ring->nhi->rx_rings[ring->hop] = NULL; in tb_ring_free()
821 if (ring->running) { in tb_ring_free()
822 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", in tb_ring_free()
823 RING_TYPE(ring), ring->hop); in tb_ring_free()
825 spin_unlock_irq(&ring->nhi->lock); in tb_ring_free()
829 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_free()
830 ring->size * sizeof(*ring->descriptors), in tb_ring_free()
831 ring->descriptors, ring->descriptors_dma); in tb_ring_free()
833 ring->descriptors = NULL; in tb_ring_free()
834 ring->descriptors_dma = 0; in tb_ring_free()
837 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), in tb_ring_free()
838 ring->hop); in tb_ring_free()
841 * ring->work can no longer be scheduled (it is scheduled only in tb_ring_free()
845 flush_work(&ring->work); in tb_ring_free()
851 * nhi_mailbox_cmd() - Send a command through NHI mailbox
864 iowrite32(data, nhi->iobase + REG_INMAIL_DATA); in nhi_mailbox_cmd()
866 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
869 iowrite32(val, nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
873 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
880 return -ETIMEDOUT; in nhi_mailbox_cmd()
882 return -EIO; in nhi_mailbox_cmd()
888 * nhi_mailbox_mode() - Return current firmware operation mode
891 * The function reads current firmware operation mode using NHI mailbox
898 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); in nhi_mailbox_mode()
910 int hop = -1; in nhi_interrupt_work()
914 spin_lock_irq(&nhi->lock); in nhi_interrupt_work()
921 for (bit = 0; bit < 3 * nhi->hop_count; bit++) { in nhi_interrupt_work()
923 value = ioread32(nhi->iobase in nhi_interrupt_work()
926 if (++hop == nhi->hop_count) { in nhi_interrupt_work()
933 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
939 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
941 ring = nhi->rx_rings[hop]; in nhi_interrupt_work()
943 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
950 spin_lock(&ring->lock); in nhi_interrupt_work()
952 spin_unlock(&ring->lock); in nhi_interrupt_work()
954 spin_unlock_irq(&nhi->lock); in nhi_interrupt_work()
960 schedule_work(&nhi->interrupt_work); in nhi_msi()
968 struct tb_nhi *nhi = tb->nhi; in __nhi_suspend_noirq()
975 if (nhi->ops && nhi->ops->suspend_noirq) { in __nhi_suspend_noirq()
976 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); in __nhi_suspend_noirq()
1013 if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val)) in nhi_wake_supported()
1040 iowrite32(throttle, nhi->iobase + reg); in nhi_enable_int_throttling()
1048 struct tb_nhi *nhi = tb->nhi; in nhi_resume_noirq()
1057 nhi->going_away = true; in nhi_resume_noirq()
1059 if (nhi->ops && nhi->ops->resume_noirq) { in nhi_resume_noirq()
1060 ret = nhi->ops->resume_noirq(nhi); in nhi_resume_noirq()
1064 nhi_enable_int_throttling(tb->nhi); in nhi_resume_noirq()
1088 if (pm_runtime_suspended(&pdev->dev)) in nhi_complete()
1089 pm_runtime_resume(&pdev->dev); in nhi_complete()
1098 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_suspend()
1105 if (nhi->ops && nhi->ops->runtime_suspend) { in nhi_runtime_suspend()
1106 ret = nhi->ops->runtime_suspend(tb->nhi); in nhi_runtime_suspend()
1117 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_resume()
1120 if (nhi->ops && nhi->ops->runtime_resume) { in nhi_runtime_resume()
1121 ret = nhi->ops->runtime_resume(nhi); in nhi_runtime_resume()
1134 dev_dbg(&nhi->pdev->dev, "shutdown\n"); in nhi_shutdown()
1136 for (i = 0; i < nhi->hop_count; i++) { in nhi_shutdown()
1137 if (nhi->tx_rings[i]) in nhi_shutdown()
1138 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1140 if (nhi->rx_rings[i]) in nhi_shutdown()
1141 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1149 if (!nhi->pdev->msix_enabled) { in nhi_shutdown()
1150 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); in nhi_shutdown()
1151 flush_work(&nhi->interrupt_work); in nhi_shutdown()
1153 ida_destroy(&nhi->msix_ida); in nhi_shutdown()
1155 if (nhi->ops && nhi->ops->shutdown) in nhi_shutdown()
1156 nhi->ops->shutdown(nhi); in nhi_shutdown()
1161 if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) { in nhi_check_quirks()
1167 nhi->quirks |= QUIRK_AUTO_CLEAR_INT; in nhi_check_quirks()
1169 switch (nhi->pdev->device) { in nhi_check_quirks()
1173 * Falcon Ridge controller needs the end-to-end in nhi_check_quirks()
1177 nhi->quirks |= QUIRK_E2E; in nhi_check_quirks()
1185 if (!pdev->external_facing || in nhi_check_iommu_pdev()
1186 !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION)) in nhi_check_iommu_pdev()
1194 struct pci_bus *bus = nhi->pdev->bus; in nhi_check_iommu()
1212 * to have been subverted by a pre-boot DMA attack. in nhi_check_iommu()
1214 while (bus->parent) in nhi_check_iommu()
1215 bus = bus->parent; in nhi_check_iommu()
1219 nhi->iommu_dma_protection = port_ok; in nhi_check_iommu()
1220 dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n", in nhi_check_iommu()
1229 val = ioread32(nhi->iobase + REG_CAPS); in nhi_reset()
1235 dev_dbg(&nhi->pdev->dev, "skipping host router reset\n"); in nhi_reset()
1239 iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET); in nhi_reset()
1244 val = ioread32(nhi->iobase + REG_RESET); in nhi_reset()
1246 dev_warn(&nhi->pdev->dev, "host router reset successful\n"); in nhi_reset()
1252 dev_warn(&nhi->pdev->dev, "timeout resetting host router\n"); in nhi_reset()
1257 struct pci_dev *pdev = nhi->pdev; in nhi_init_msi()
1258 struct device *dev = &pdev->dev; in nhi_init_msi()
1266 ida_init(&nhi->msix_ida); in nhi_init_msi()
1269 * The NHI has 16 MSI-X vectors or a single MSI. We first try to in nhi_init_msi()
1270 * get all MSI-X vectors and if we succeed, each ring will have in nhi_init_msi()
1271 * one MSI-X. If for some reason that does not work out, we in nhi_init_msi()
1281 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); in nhi_init_msi()
1283 irq = pci_irq_vector(nhi->pdev, 0); in nhi_init_msi()
1287 res = devm_request_irq(&pdev->dev, irq, nhi_msi, in nhi_init_msi()
1300 if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val)) in nhi_imr_valid()
1312 * capabilities, we use software CM. in nhi_select_cm()
1318 * Either firmware based CM is running (we did not get control in nhi_select_cm()
1319 * from the firmware) or this is pre-USB4 PC so try first in nhi_select_cm()
1320 * firmware CM and then fallback to software CM. in nhi_select_cm()
1331 struct device *dev = &pdev->dev; in nhi_probe()
1337 return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n"); in nhi_probe()
1343 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); in nhi_probe()
1345 return -ENOMEM; in nhi_probe()
1347 nhi->pdev = pdev; in nhi_probe()
1348 nhi->ops = (const struct tb_nhi_ops *)id->driver_data; in nhi_probe()
1350 nhi->iobase = pcim_iomap_region(pdev, 0, "thunderbolt"); in nhi_probe()
1351 res = PTR_ERR_OR_ZERO(nhi->iobase); in nhi_probe()
1355 nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff; in nhi_probe()
1356 dev_dbg(dev, "total paths: %d\n", nhi->hop_count); in nhi_probe()
1358 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1359 sizeof(*nhi->tx_rings), GFP_KERNEL); in nhi_probe()
1360 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1361 sizeof(*nhi->rx_rings), GFP_KERNEL); in nhi_probe()
1362 if (!nhi->tx_rings || !nhi->rx_rings) in nhi_probe()
1363 return -ENOMEM; in nhi_probe()
1373 spin_lock_init(&nhi->lock); in nhi_probe()
1375 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in nhi_probe()
1381 if (nhi->ops && nhi->ops->init) { in nhi_probe()
1382 res = nhi->ops->init(nhi); in nhi_probe()
1389 return dev_err_probe(dev, -ENODEV, in nhi_probe()
1406 device_wakeup_enable(&pdev->dev); in nhi_probe()
1408 pm_runtime_allow(&pdev->dev); in nhi_probe()
1409 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); in nhi_probe()
1410 pm_runtime_use_autosuspend(&pdev->dev); in nhi_probe()
1411 pm_runtime_put_autosuspend(&pdev->dev); in nhi_probe()
1419 struct tb_nhi *nhi = tb->nhi; in nhi_remove()
1421 pm_runtime_get_sync(&pdev->dev); in nhi_remove()
1422 pm_runtime_dont_use_autosuspend(&pdev->dev); in nhi_remove()
1423 pm_runtime_forbid(&pdev->dev); in nhi_remove()
1439 * pci-tunnels stay alive.