Lines Matching +full:dma +full:- +full:router

1 // SPDX-License-Identifier: GPL-2.0
3 * Thunderbolt driver - Tunneling support
54 * Number of credits we try to allocate for each DMA path if not limited
55 * by the host router baMaxHI.
58 /* Minimum number of credits for DMA path */
88 "DPRX capability read timeout in ms, -1 waits forever (default: "
93 MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
101 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
108 return port->total_credits - port->ctl_credits; in tb_usable_credits()
112 * tb_available_credits() - Available credits for PCIe and DMA
114 * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
120 const struct tb_switch *sw = port->sw; in tb_available_credits()
124 usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; in tb_available_credits()
125 pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; in tb_available_credits()
128 spare = min_not_zero(sw->max_dma_credits, dma_credits); in tb_available_credits()
129 /* Add some credits for potential second DMA tunnel */ in tb_available_credits()
141 if (sw->min_dp_aux_credits + sw->min_dp_main_credits) in tb_available_credits()
142 ndp = (credits - (usb3 + pcie + spare)) / in tb_available_credits()
143 (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
149 credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); in tb_available_credits()
150 credits -= usb3; in tb_available_credits()
160 struct tb_port *out_port = hop->out_port; in tb_init_pm_support()
161 struct tb_port *in_port = hop->in_port; in tb_init_pm_support()
164 usb4_switch_version(in_port->sw) >= 2) in tb_init_pm_support()
165 hop->pm_support = true; in tb_init_pm_support()
177 tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL); in tb_tunnel_alloc()
178 if (!tunnel->paths) { in tb_tunnel_alloc()
183 INIT_LIST_HEAD(&tunnel->list); in tb_tunnel_alloc()
184 tunnel->tb = tb; in tb_tunnel_alloc()
185 tunnel->npaths = npaths; in tb_tunnel_alloc()
186 tunnel->type = type; in tb_tunnel_alloc()
187 kref_init(&tunnel->kref); in tb_tunnel_alloc()
195 kref_get(&tunnel->kref); in tb_tunnel_get()
204 if (tunnel->destroy) in tb_tunnel_destroy()
205 tunnel->destroy(tunnel); in tb_tunnel_destroy()
207 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_destroy()
208 if (tunnel->paths[i]) in tb_tunnel_destroy()
209 tb_path_free(tunnel->paths[i]); in tb_tunnel_destroy()
212 kfree(tunnel->paths); in tb_tunnel_destroy()
219 kref_put(&tunnel->kref, tb_tunnel_destroy); in tb_tunnel_put()
225 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); in tb_pci_set_ext_encapsulation()
229 if ((usb4_switch_version(tunnel->src_port->sw) < 2) || in tb_pci_set_ext_encapsulation()
230 (usb4_switch_version(tunnel->dst_port->sw) < 2)) in tb_pci_set_ext_encapsulation()
236 ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable); in tb_pci_set_ext_encapsulation()
241 * Downstream router could be unplugged so disable of encapsulation in tb_pci_set_ext_encapsulation()
242 * in upstream router is still possible. in tb_pci_set_ext_encapsulation()
244 ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable); in tb_pci_set_ext_encapsulation()
248 if (ret != -ENODEV) in tb_pci_set_ext_encapsulation()
268 res = tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
270 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
276 res = tb_pci_port_enable(tunnel->src_port, activate); in tb_pci_activate()
280 /* Downstream router could be unplugged */ in tb_pci_activate()
281 tb_pci_port_enable(tunnel->dst_port, activate); in tb_pci_activate()
289 struct tb_port *port = hop->in_port; in tb_pci_init_credits()
290 struct tb_switch *sw = port->sw; in tb_pci_init_credits()
297 credits = min(sw->max_pcie_credits, available); in tb_pci_init_credits()
300 return -ENOSPC; in tb_pci_init_credits()
305 credits = port->bonded ? 32 : 16; in tb_pci_init_credits()
310 hop->initial_credits = credits; in tb_pci_init_credits()
318 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_pci_init_path()
319 path->egress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
320 path->ingress_fc_enable = TB_PATH_ALL; in tb_pci_init_path()
321 path->ingress_shared_buffer = TB_PATH_NONE; in tb_pci_init_path()
322 path->priority = TB_PCI_PRIORITY; in tb_pci_init_path()
323 path->weight = TB_PCI_WEIGHT; in tb_pci_init_path()
324 path->drop_packages = 0; in tb_pci_init_path()
338 * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
360 tunnel->activate = tb_pci_activate; in tb_tunnel_discover_pci()
361 tunnel->src_port = down; in tb_tunnel_discover_pci()
368 path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, in tb_tunnel_discover_pci()
369 &tunnel->dst_port, "PCIe Up", alloc_hopid); in tb_tunnel_discover_pci()
375 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_discover_pci()
376 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) in tb_tunnel_discover_pci()
379 path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, in tb_tunnel_discover_pci()
383 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_discover_pci()
384 if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) in tb_tunnel_discover_pci()
388 if (!tb_port_is_pcie_up(tunnel->dst_port)) { in tb_tunnel_discover_pci()
389 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_pci()
394 if (down != tunnel->src_port) { in tb_tunnel_discover_pci()
399 if (!tb_pci_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_pci()
417 * tb_tunnel_alloc_pci() - allocate a pci tunnel
437 tunnel->activate = tb_pci_activate; in tb_tunnel_alloc_pci()
438 tunnel->src_port = down; in tb_tunnel_alloc_pci()
439 tunnel->dst_port = up; in tb_tunnel_alloc_pci()
445 tunnel->paths[TB_PCI_PATH_DOWN] = path; in tb_tunnel_alloc_pci()
453 tunnel->paths[TB_PCI_PATH_UP] = path; in tb_tunnel_alloc_pci()
465 * tb_tunnel_reserved_pci() - Amount of bandwidth to reserve for PCIe
479 if (WARN_ON_ONCE(!port->remote)) in tb_tunnel_reserved_pci()
490 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_UP)) in tb_tunnel_reserved_pci()
492 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_DOWN)) in tb_tunnel_reserved_pci()
495 if (!tb_switch_find_port(port->sw, TB_TYPE_PCIE_DOWN)) in tb_tunnel_reserved_pci()
497 if (!tb_switch_find_port(port->remote->sw, TB_TYPE_PCIE_UP)) in tb_tunnel_reserved_pci()
523 if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) in tb_dp_cm_handshake()
527 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
534 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
540 out->cap_adap + DP_STATUS_CTRL, 1); in tb_dp_cm_handshake()
548 return -ETIMEDOUT; in tb_dp_cm_handshake()
703 return -ENOSR; in tb_dp_reduce_bandwidth()
709 struct tb_port *out = tunnel->dst_port; in tb_dp_xchg_caps()
710 struct tb_port *in = tunnel->src_port; in tb_dp_xchg_caps()
717 if (in->sw->generation < 2 || out->sw->generation < 2) in tb_dp_xchg_caps()
730 in->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
735 out->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_xchg_caps()
741 out->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
763 max_bw = tunnel->max_down; in tb_dp_xchg_caps()
765 max_bw = tunnel->max_up; in tb_dp_xchg_caps()
796 if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { in tb_dp_xchg_caps()
802 in->cap_adap + DP_REMOTE_CAP, 1); in tb_dp_xchg_caps()
808 struct tb_port *out = tunnel->dst_port; in tb_dp_bandwidth_alloc_mode_enable()
809 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_alloc_mode_enable()
821 ret = usb4_dp_port_set_group_id(in, in->group->index); in tb_dp_bandwidth_alloc_mode_enable()
826 * Get the non-reduced rate and lanes based on the lowest in tb_dp_bandwidth_alloc_mode_enable()
830 in->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_bandwidth_alloc_mode_enable()
835 out->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_bandwidth_alloc_mode_enable()
848 tb_tunnel_dbg(tunnel, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", in tb_dp_bandwidth_alloc_mode_enable()
875 * Returns -EINVAL if granularity above is outside of the in tb_dp_bandwidth_alloc_mode_enable()
888 estimated_bw = tunnel->max_down; in tb_dp_bandwidth_alloc_mode_enable()
890 estimated_bw = tunnel->max_up; in tb_dp_bandwidth_alloc_mode_enable()
909 struct tb_port *in = tunnel->src_port; in tb_dp_pre_activate()
910 struct tb_switch *sw = in->sw; in tb_dp_pre_activate()
911 struct tb *tb = in->sw->tb; in tb_dp_pre_activate()
926 ret = usb4_dp_port_set_cm_id(in, tb->index); in tb_dp_pre_activate()
935 struct tb_port *in = tunnel->src_port; in tb_dp_post_deactivate()
954 struct tb_port *in = tunnel->src_port; in tb_dp_wait_dprx()
965 in->cap_adap + DP_COMMON_CAP, 1); in tb_dp_wait_dprx()
976 return -ETIMEDOUT; in tb_dp_wait_dprx()
982 struct tb *tb = tunnel->tb; in tb_dp_dprx_work()
984 if (!tunnel->dprx_canceled) { in tb_dp_dprx_work()
985 mutex_lock(&tb->lock); in tb_dp_dprx_work()
986 if (tb_dp_is_usb4(tunnel->src_port->sw) && in tb_dp_dprx_work()
988 if (ktime_before(ktime_get(), tunnel->dprx_timeout)) { in tb_dp_dprx_work()
989 queue_delayed_work(tb->wq, &tunnel->dprx_work, in tb_dp_dprx_work()
991 mutex_unlock(&tb->lock); in tb_dp_dprx_work()
995 tunnel->state = TB_TUNNEL_ACTIVE; in tb_dp_dprx_work()
997 mutex_unlock(&tb->lock); in tb_dp_dprx_work()
1000 if (tunnel->callback) in tb_dp_dprx_work()
1001 tunnel->callback(tunnel, tunnel->callback_data); in tb_dp_dprx_work()
1012 tunnel->dprx_started = true; in tb_dp_dprx_start()
1014 if (tunnel->callback) { in tb_dp_dprx_start()
1015 tunnel->dprx_timeout = dprx_timeout_to_ktime(dprx_timeout); in tb_dp_dprx_start()
1016 queue_delayed_work(tunnel->tb->wq, &tunnel->dprx_work, 0); in tb_dp_dprx_start()
1017 return -EINPROGRESS; in tb_dp_dprx_start()
1020 return tb_dp_is_usb4(tunnel->src_port->sw) ? in tb_dp_dprx_start()
1026 if (tunnel->dprx_started) { in tb_dp_dprx_stop()
1027 tunnel->dprx_started = false; in tb_dp_dprx_stop()
1028 tunnel->dprx_canceled = true; in tb_dp_dprx_stop()
1029 cancel_delayed_work(&tunnel->dprx_work); in tb_dp_dprx_stop()
1042 paths = tunnel->paths; in tb_dp_activate()
1043 last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1; in tb_dp_activate()
1045 tb_dp_port_set_hops(tunnel->src_port, in tb_dp_activate()
1046 paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
1047 paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index, in tb_dp_activate()
1048 paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index); in tb_dp_activate()
1050 tb_dp_port_set_hops(tunnel->dst_port, in tb_dp_activate()
1051 paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index, in tb_dp_activate()
1052 paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index, in tb_dp_activate()
1053 paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index); in tb_dp_activate()
1056 tb_dp_port_hpd_clear(tunnel->src_port); in tb_dp_activate()
1057 tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0); in tb_dp_activate()
1058 if (tb_port_is_dpout(tunnel->dst_port)) in tb_dp_activate()
1059 tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0); in tb_dp_activate()
1062 ret = tb_dp_port_enable(tunnel->src_port, active); in tb_dp_activate()
1066 if (tb_port_is_dpout(tunnel->dst_port)) { in tb_dp_activate()
1067 ret = tb_dp_port_enable(tunnel->dst_port, active); in tb_dp_activate()
1076 * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
1085 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_mode_maximum_bandwidth()
1097 in->cap_adap + DP_LOCAL_CAP, 1); in tb_dp_bandwidth_mode_maximum_bandwidth()
1120 struct tb_port *in = tunnel->src_port; in tb_dp_bandwidth_mode_consumed_bandwidth()
1124 return -EOPNOTSUPP; in tb_dp_bandwidth_mode_consumed_bandwidth()
1126 if (!tunnel->bw_mode) in tb_dp_bandwidth_mode_consumed_bandwidth()
1127 return -EOPNOTSUPP; in tb_dp_bandwidth_mode_consumed_bandwidth()
1155 struct tb_port *in = tunnel->src_port; in tb_dp_allocated_bandwidth()
1161 if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) { in tb_dp_allocated_bandwidth()
1186 return tunnel->consumed_bandwidth(tunnel, allocated_up, in tb_dp_allocated_bandwidth()
1193 struct tb_port *in = tunnel->src_port; in tb_dp_alloc_bandwidth()
1197 return -EOPNOTSUPP; in tb_dp_alloc_bandwidth()
1221 tunnel->bw_mode = true; in tb_dp_alloc_bandwidth()
1229 struct tb_port *in = tunnel->src_port; in tb_dp_read_cap()
1241 return -EINVAL; in tb_dp_read_cap()
1248 ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1); in tb_dp_read_cap()
1262 if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port)) in tb_dp_maximum_bandwidth()
1263 return -EOPNOTSUPP; in tb_dp_maximum_bandwidth()
1283 const struct tb_switch *sw = tunnel->src_port->sw; in tb_dp_consumed_bandwidth()
1290 if (ret == -ETIMEDOUT) { in tb_dp_consumed_bandwidth()
1312 if (ret != -EOPNOTSUPP) in tb_dp_consumed_bandwidth()
1321 } else if (sw->generation >= 2) { in tb_dp_consumed_bandwidth()
1345 struct tb_port *port = hop->in_port; in tb_dp_init_aux_credits()
1346 struct tb_switch *sw = port->sw; in tb_dp_init_aux_credits()
1349 hop->initial_credits = sw->min_dp_aux_credits; in tb_dp_init_aux_credits()
1351 hop->initial_credits = 1; in tb_dp_init_aux_credits()
1358 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dp_init_aux_path()
1359 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
1360 path->ingress_fc_enable = TB_PATH_ALL; in tb_dp_init_aux_path()
1361 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_aux_path()
1362 path->priority = TB_DP_AUX_PRIORITY; in tb_dp_init_aux_path()
1363 path->weight = TB_DP_AUX_WEIGHT; in tb_dp_init_aux_path()
1374 struct tb_port *port = hop->in_port; in tb_dp_init_video_credits()
1375 struct tb_switch *sw = port->sw; in tb_dp_init_video_credits()
1388 nfc_credits = port->config.nfc_credits & in tb_dp_init_video_credits()
1390 if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) in tb_dp_init_video_credits()
1391 return -ENOSPC; in tb_dp_init_video_credits()
1393 hop->nfc_credits = sw->min_dp_main_credits; in tb_dp_init_video_credits()
1395 hop->nfc_credits = min(port->total_credits - 2, 12U); in tb_dp_init_video_credits()
1405 path->egress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
1406 path->egress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
1407 path->ingress_fc_enable = TB_PATH_NONE; in tb_dp_init_video_path()
1408 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dp_init_video_path()
1409 path->priority = TB_DP_VIDEO_PRIORITY; in tb_dp_init_video_path()
1410 path->weight = TB_DP_VIDEO_WEIGHT; in tb_dp_init_video_path()
1430 in = tunnel->src_port; in tb_dp_dump()
1431 out = tunnel->dst_port; in tb_dp_dump()
1434 in->cap_adap + DP_LOCAL_CAP, 1)) in tb_dp_dump()
1445 out->cap_adap + DP_LOCAL_CAP, 1)) in tb_dp_dump()
1456 in->cap_adap + DP_REMOTE_CAP, 1)) in tb_dp_dump()
1467 * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
1492 tunnel->pre_activate = tb_dp_pre_activate; in tb_tunnel_discover_dp()
1493 tunnel->activate = tb_dp_activate; in tb_tunnel_discover_dp()
1494 tunnel->post_deactivate = tb_dp_post_deactivate; in tb_tunnel_discover_dp()
1495 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; in tb_tunnel_discover_dp()
1496 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; in tb_tunnel_discover_dp()
1497 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; in tb_tunnel_discover_dp()
1498 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_discover_dp()
1499 tunnel->src_port = in; in tb_tunnel_discover_dp()
1501 path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, in tb_tunnel_discover_dp()
1502 &tunnel->dst_port, "Video", alloc_hopid); in tb_tunnel_discover_dp()
1508 tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; in tb_tunnel_discover_dp()
1509 if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], false)) in tb_tunnel_discover_dp()
1512 path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", in tb_tunnel_discover_dp()
1516 tunnel->paths[TB_DP_AUX_PATH_OUT] = path; in tb_tunnel_discover_dp()
1517 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT], false); in tb_tunnel_discover_dp()
1519 path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, in tb_tunnel_discover_dp()
1523 tunnel->paths[TB_DP_AUX_PATH_IN] = path; in tb_tunnel_discover_dp()
1524 tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN], false); in tb_tunnel_discover_dp()
1527 if (!tb_port_is_dpout(tunnel->dst_port)) { in tb_tunnel_discover_dp()
1532 if (!tb_dp_port_is_enabled(tunnel->dst_port)) in tb_tunnel_discover_dp()
1535 if (!tb_dp_port_hpd_is_active(tunnel->dst_port)) in tb_tunnel_discover_dp()
1538 if (port != tunnel->src_port) { in tb_tunnel_discover_dp()
1557 * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
1575 * The @callback is called without @tb->lock held.
1590 if (WARN_ON(!in->cap_adap || !out->cap_adap)) in tb_tunnel_alloc_dp()
1597 tunnel->pre_activate = tb_dp_pre_activate; in tb_tunnel_alloc_dp()
1598 tunnel->activate = tb_dp_activate; in tb_tunnel_alloc_dp()
1599 tunnel->post_deactivate = tb_dp_post_deactivate; in tb_tunnel_alloc_dp()
1600 tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth; in tb_tunnel_alloc_dp()
1601 tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth; in tb_tunnel_alloc_dp()
1602 tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth; in tb_tunnel_alloc_dp()
1603 tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; in tb_tunnel_alloc_dp()
1604 tunnel->src_port = in; in tb_tunnel_alloc_dp()
1605 tunnel->dst_port = out; in tb_tunnel_alloc_dp()
1606 tunnel->max_up = max_up; in tb_tunnel_alloc_dp()
1607 tunnel->max_down = max_down; in tb_tunnel_alloc_dp()
1608 tunnel->callback = callback; in tb_tunnel_alloc_dp()
1609 tunnel->callback_data = callback_data; in tb_tunnel_alloc_dp()
1610 INIT_DELAYED_WORK(&tunnel->dprx_work, tb_dp_dprx_work); in tb_tunnel_alloc_dp()
1612 paths = tunnel->paths; in tb_tunnel_alloc_dp()
1613 pm_support = usb4_switch_version(in->sw) >= 2; in tb_tunnel_alloc_dp()
1645 const struct tb_switch *sw = port->sw; in tb_dma_available_credits()
1650 credits -= sw->max_pcie_credits; in tb_dma_available_credits()
1651 credits -= port->dma_credits; in tb_dma_available_credits()
1658 struct tb_port *port = hop->in_port; in tb_dma_reserve_credits()
1665 * DMA path cannot be established. in tb_dma_reserve_credits()
1668 return -ENOSPC; in tb_dma_reserve_credits()
1671 credits--; in tb_dma_reserve_credits()
1673 tb_port_dbg(port, "reserving %u credits for DMA path\n", in tb_dma_reserve_credits()
1676 port->dma_credits += credits; in tb_dma_reserve_credits()
1679 credits = port->bonded ? 14 : 6; in tb_dma_reserve_credits()
1681 credits = min(port->total_credits, credits); in tb_dma_reserve_credits()
1684 hop->initial_credits = credits; in tb_dma_reserve_credits()
1694 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_dma_init_rx_path()
1695 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_rx_path()
1696 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
1697 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_rx_path()
1698 path->priority = TB_DMA_PRIORITY; in tb_dma_init_rx_path()
1699 path->weight = TB_DMA_WEIGHT; in tb_dma_init_rx_path()
1700 path->clear_fc = true; in tb_dma_init_rx_path()
1707 hop = &path->hops[0]; in tb_dma_init_rx_path()
1708 tmp = min(tb_usable_credits(hop->in_port), credits); in tb_dma_init_rx_path()
1709 hop->initial_credits = tmp; in tb_dma_init_rx_path()
1710 hop->in_port->dma_credits += tmp; in tb_dma_init_rx_path()
1712 for (i = 1; i < path->path_length; i++) { in tb_dma_init_rx_path()
1715 ret = tb_dma_reserve_credits(&path->hops[i], credits); in tb_dma_init_rx_path()
1728 path->egress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
1729 path->ingress_fc_enable = TB_PATH_ALL; in tb_dma_init_tx_path()
1730 path->egress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
1731 path->ingress_shared_buffer = TB_PATH_NONE; in tb_dma_init_tx_path()
1732 path->priority = TB_DMA_PRIORITY; in tb_dma_init_tx_path()
1733 path->weight = TB_DMA_WEIGHT; in tb_dma_init_tx_path()
1734 path->clear_fc = true; in tb_dma_init_tx_path()
1749 struct tb_port *port = hop->in_port; in tb_dma_release_credits()
1752 port->dma_credits -= hop->initial_credits; in tb_dma_release_credits()
1754 tb_port_dbg(port, "released %u DMA path credits\n", in tb_dma_release_credits()
1755 hop->initial_credits); in tb_dma_release_credits()
1771 for (i = 0; i < tunnel->npaths; i++) { in tb_dma_destroy()
1772 if (!tunnel->paths[i]) in tb_dma_destroy()
1774 tb_dma_destroy_path(tunnel->paths[i]); in tb_dma_destroy()
1779 * tb_tunnel_alloc_dma() - allocate a DMA tunnel
1785 * other domain. Set to %-1 if TX path is not needed.
1788 * other domain. Set to %-1 if RX path is not needed.
1818 tunnel->src_port = nhi; in tb_tunnel_alloc_dma()
1819 tunnel->dst_port = dst; in tb_tunnel_alloc_dma()
1820 tunnel->destroy = tb_dma_destroy; in tb_tunnel_alloc_dma()
1822 credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits); in tb_tunnel_alloc_dma()
1826 "DMA RX"); in tb_tunnel_alloc_dma()
1829 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1838 "DMA TX"); in tb_tunnel_alloc_dma()
1841 tunnel->paths[i++] = path; in tb_tunnel_alloc_dma()
1856 * tb_tunnel_match_dma() - Match DMA tunnel
1858 * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
1860 * other domain. Pass %-1 to ignore.
1861 * @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
1863 * other domain. Pass %-1 to ignore.
1865 * This function can be used to match specific DMA tunnel, if there are
1866 * multiple DMA tunnels going through the same XDomain connection.
1878 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_match_dma()
1879 const struct tb_path *path = tunnel->paths[i]; in tb_tunnel_match_dma()
1884 if (tb_port_is_nhi(path->hops[0].in_port)) in tb_tunnel_match_dma()
1886 else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) in tb_tunnel_match_dma()
1894 (tx_path->hops[0].in_hop_index != transmit_ring)) in tb_tunnel_match_dma()
1897 (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) in tb_tunnel_match_dma()
1905 (rx_path->hops[0].in_hop_index != receive_path)) in tb_tunnel_match_dma()
1908 (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) in tb_tunnel_match_dma()
1935 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_pre_activate()
1937 return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, in tb_usb3_pre_activate()
1938 &tunnel->allocated_up, in tb_usb3_pre_activate()
1939 &tunnel->allocated_down); in tb_usb3_pre_activate()
1946 res = tb_usb3_port_enable(tunnel->src_port, activate); in tb_usb3_activate()
1950 if (tb_port_is_usb3_up(tunnel->dst_port)) in tb_usb3_activate()
1951 return tb_usb3_port_enable(tunnel->dst_port, activate); in tb_usb3_activate()
1959 struct tb_port *port = tb_upstream_port(tunnel->dst_port->sw); in tb_usb3_consumed_bandwidth()
1966 *consumed_up = tunnel->allocated_up * in tb_usb3_consumed_bandwidth()
1968 *consumed_down = tunnel->allocated_down * in tb_usb3_consumed_bandwidth()
1983 ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, in tb_usb3_release_unused_bandwidth()
1984 &tunnel->allocated_up, in tb_usb3_release_unused_bandwidth()
1985 &tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
1990 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_release_unused_bandwidth()
2000 ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port); in tb_usb3_reclaim_available_bandwidth()
2013 if (tunnel->allocated_up >= max_rate && in tb_usb3_reclaim_available_bandwidth()
2014 tunnel->allocated_down >= max_rate) in tb_usb3_reclaim_available_bandwidth()
2019 if (allocate_up < tunnel->allocated_up) in tb_usb3_reclaim_available_bandwidth()
2020 allocate_up = tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
2023 if (allocate_down < tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
2024 allocate_down = tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
2027 if (allocate_up == tunnel->allocated_up && in tb_usb3_reclaim_available_bandwidth()
2028 allocate_down == tunnel->allocated_down) in tb_usb3_reclaim_available_bandwidth()
2031 ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, in tb_usb3_reclaim_available_bandwidth()
2038 tunnel->allocated_up = allocate_up; in tb_usb3_reclaim_available_bandwidth()
2039 *available_up -= tunnel->allocated_up; in tb_usb3_reclaim_available_bandwidth()
2041 tunnel->allocated_down = allocate_down; in tb_usb3_reclaim_available_bandwidth()
2042 *available_down -= tunnel->allocated_down; in tb_usb3_reclaim_available_bandwidth()
2045 tunnel->allocated_up, tunnel->allocated_down); in tb_usb3_reclaim_available_bandwidth()
2050 struct tb_port *port = hop->in_port; in tb_usb3_init_credits()
2051 struct tb_switch *sw = port->sw; in tb_usb3_init_credits()
2055 credits = sw->max_usb3_credits; in tb_usb3_init_credits()
2058 credits = port->bonded ? 32 : 16; in tb_usb3_init_credits()
2063 hop->initial_credits = credits; in tb_usb3_init_credits()
2070 path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; in tb_usb3_init_path()
2071 path->egress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
2072 path->ingress_fc_enable = TB_PATH_ALL; in tb_usb3_init_path()
2073 path->ingress_shared_buffer = TB_PATH_NONE; in tb_usb3_init_path()
2074 path->priority = TB_USB3_PRIORITY; in tb_usb3_init_path()
2075 path->weight = TB_USB3_WEIGHT; in tb_usb3_init_path()
2076 path->drop_packages = 0; in tb_usb3_init_path()
2083 * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
2105 tunnel->activate = tb_usb3_activate; in tb_tunnel_discover_usb3()
2106 tunnel->src_port = down; in tb_tunnel_discover_usb3()
2113 path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, in tb_tunnel_discover_usb3()
2114 &tunnel->dst_port, "USB3 Down", alloc_hopid); in tb_tunnel_discover_usb3()
2120 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_discover_usb3()
2121 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); in tb_tunnel_discover_usb3()
2123 path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, in tb_tunnel_discover_usb3()
2127 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_discover_usb3()
2128 tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); in tb_tunnel_discover_usb3()
2131 if (!tb_port_is_usb3_up(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
2132 tb_port_warn(tunnel->dst_port, in tb_tunnel_discover_usb3()
2137 if (down != tunnel->src_port) { in tb_tunnel_discover_usb3()
2142 if (!tb_usb3_port_is_enabled(tunnel->dst_port)) { in tb_tunnel_discover_usb3()
2148 if (!tb_route(down->sw)) { in tb_tunnel_discover_usb3()
2156 &tunnel->allocated_up, &tunnel->allocated_down); in tb_tunnel_discover_usb3()
2161 tunnel->allocated_up, tunnel->allocated_down); in tb_tunnel_discover_usb3()
2163 tunnel->pre_activate = tb_usb3_pre_activate; in tb_tunnel_discover_usb3()
2164 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_discover_usb3()
2165 tunnel->release_unused_bandwidth = in tb_tunnel_discover_usb3()
2167 tunnel->reclaim_available_bandwidth = in tb_tunnel_discover_usb3()
2183 * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
2205 if (!tb_route(down->sw) && (max_up > 0 || max_down > 0)) { in tb_tunnel_alloc_usb3()
2224 tunnel->activate = tb_usb3_activate; in tb_tunnel_alloc_usb3()
2225 tunnel->src_port = down; in tb_tunnel_alloc_usb3()
2226 tunnel->dst_port = up; in tb_tunnel_alloc_usb3()
2227 tunnel->max_up = max_up; in tb_tunnel_alloc_usb3()
2228 tunnel->max_down = max_down; in tb_tunnel_alloc_usb3()
2237 tunnel->paths[TB_USB3_PATH_DOWN] = path; in tb_tunnel_alloc_usb3()
2246 tunnel->paths[TB_USB3_PATH_UP] = path; in tb_tunnel_alloc_usb3()
2248 if (!tb_route(down->sw)) { in tb_tunnel_alloc_usb3()
2249 tunnel->allocated_up = min(max_rate, max_up); in tb_tunnel_alloc_usb3()
2250 tunnel->allocated_down = min(max_rate, max_down); in tb_tunnel_alloc_usb3()
2252 tunnel->pre_activate = tb_usb3_pre_activate; in tb_tunnel_alloc_usb3()
2253 tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; in tb_tunnel_alloc_usb3()
2254 tunnel->release_unused_bandwidth = in tb_tunnel_alloc_usb3()
2256 tunnel->reclaim_available_bandwidth = in tb_tunnel_alloc_usb3()
2264 * tb_tunnel_is_invalid - check whether an activated path is still valid
2271 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_is_invalid()
2272 WARN_ON(!tunnel->paths[i]->activated); in tb_tunnel_is_invalid()
2273 if (tb_path_is_invalid(tunnel->paths[i])) in tb_tunnel_is_invalid()
2281 * tb_tunnel_activate() - activate a tunnel
2285 * Specifically returns %-EINPROGRESS if the tunnel activation is still
2299 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
2300 if (tunnel->paths[i]->activated) { in tb_tunnel_activate()
2301 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_activate()
2302 tunnel->paths[i]->activated = false; in tb_tunnel_activate()
2306 tunnel->state = TB_TUNNEL_ACTIVATING; in tb_tunnel_activate()
2308 if (tunnel->pre_activate) { in tb_tunnel_activate()
2309 res = tunnel->pre_activate(tunnel); in tb_tunnel_activate()
2314 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_activate()
2315 res = tb_path_activate(tunnel->paths[i]); in tb_tunnel_activate()
2320 if (tunnel->activate) { in tb_tunnel_activate()
2321 res = tunnel->activate(tunnel, true); in tb_tunnel_activate()
2323 if (res == -EINPROGRESS) in tb_tunnel_activate()
2329 tunnel->state = TB_TUNNEL_ACTIVE; in tb_tunnel_activate()
2339 * tb_tunnel_deactivate() - deactivate a tunnel
2348 if (tunnel->activate) in tb_tunnel_deactivate()
2349 tunnel->activate(tunnel, false); in tb_tunnel_deactivate()
2351 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_deactivate()
2352 if (tunnel->paths[i] && tunnel->paths[i]->activated) in tb_tunnel_deactivate()
2353 tb_path_deactivate(tunnel->paths[i]); in tb_tunnel_deactivate()
2356 if (tunnel->post_deactivate) in tb_tunnel_deactivate()
2357 tunnel->post_deactivate(tunnel); in tb_tunnel_deactivate()
2359 tunnel->state = TB_TUNNEL_INACTIVE; in tb_tunnel_deactivate()
2363 * tb_tunnel_port_on_path() - Does the tunnel go through port
2375 for (i = 0; i < tunnel->npaths; i++) { in tb_tunnel_port_on_path()
2376 if (!tunnel->paths[i]) in tb_tunnel_port_on_path()
2379 if (tb_path_port_on_path(tunnel->paths[i], port)) in tb_tunnel_port_on_path()
2389 return tunnel->state == TB_TUNNEL_ACTIVATING || tb_tunnel_is_active(tunnel); in tb_tunnel_is_activated()
2393 * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
2400 * returns %-EOPNOTSUPP.
2406 return -ENOTCONN; in tb_tunnel_maximum_bandwidth()
2408 if (tunnel->maximum_bandwidth) in tb_tunnel_maximum_bandwidth()
2409 return tunnel->maximum_bandwidth(tunnel, max_up, max_down); in tb_tunnel_maximum_bandwidth()
2410 return -EOPNOTSUPP; in tb_tunnel_maximum_bandwidth()
2414 * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
2427 return -ENOTCONN; in tb_tunnel_allocated_bandwidth()
2429 if (tunnel->allocated_bandwidth) in tb_tunnel_allocated_bandwidth()
2430 return tunnel->allocated_bandwidth(tunnel, allocated_up, in tb_tunnel_allocated_bandwidth()
2432 return -EOPNOTSUPP; in tb_tunnel_allocated_bandwidth()
2436 * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
2450 return -ENOTCONN; in tb_tunnel_alloc_bandwidth()
2452 if (tunnel->alloc_bandwidth) in tb_tunnel_alloc_bandwidth()
2453 return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down); in tb_tunnel_alloc_bandwidth()
2455 return -EOPNOTSUPP; in tb_tunnel_alloc_bandwidth()
2459 * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
2482 if (tb_tunnel_is_activated(tunnel) && tunnel->consumed_bandwidth) { in tb_tunnel_consumed_bandwidth()
2485 ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); in tb_tunnel_consumed_bandwidth()
2500 * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
2511 return -ENOTCONN; in tb_tunnel_release_unused_bandwidth()
2513 if (tunnel->release_unused_bandwidth) { in tb_tunnel_release_unused_bandwidth()
2516 ret = tunnel->release_unused_bandwidth(tunnel); in tb_tunnel_release_unused_bandwidth()
2525 * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
2542 if (tunnel->reclaim_available_bandwidth) in tb_tunnel_reclaim_available_bandwidth()
2543 tunnel->reclaim_available_bandwidth(tunnel, available_up, in tb_tunnel_reclaim_available_bandwidth()
2549 return tb_tunnel_names[tunnel->type]; in tb_tunnel_type_name()