Lines Matching refs:sw

111 static void tb_add_dp_resources(struct tb_switch *sw)  in tb_add_dp_resources()  argument
113 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
116 tb_switch_for_each_port(sw, port) { in tb_add_dp_resources()
120 if (!tb_switch_query_dp_resource(sw, port)) in tb_add_dp_resources()
129 if (tb_route(sw)) in tb_add_dp_resources()
138 static void tb_remove_dp_resources(struct tb_switch *sw) in tb_remove_dp_resources() argument
140 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
144 tb_switch_for_each_port(sw, port) { in tb_remove_dp_resources()
146 tb_remove_dp_resources(port->remote->sw); in tb_remove_dp_resources()
150 if (port->sw == sw) { in tb_remove_dp_resources()
184 static int tb_enable_clx(struct tb_switch *sw) in tb_enable_clx() argument
186 struct tb_cm *tcm = tb_priv(sw->tb); in tb_enable_clx()
198 while (sw && tb_switch_depth(sw) > 1) in tb_enable_clx()
199 sw = tb_switch_parent(sw); in tb_enable_clx()
201 if (!sw) in tb_enable_clx()
204 if (tb_switch_depth(sw) != 1) in tb_enable_clx()
213 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) in tb_enable_clx()
222 ret = tb_switch_clx_enable(sw, clx | TB_CL2); in tb_enable_clx()
224 ret = tb_switch_clx_enable(sw, clx); in tb_enable_clx()
237 static bool tb_disable_clx(struct tb_switch *sw) in tb_disable_clx() argument
244 ret = tb_switch_clx_disable(sw); in tb_disable_clx()
248 tb_sw_warn(sw, "failed to disable CL states\n"); in tb_disable_clx()
250 sw = tb_switch_parent(sw); in tb_disable_clx()
251 } while (sw); in tb_disable_clx()
258 struct tb_switch *sw; in tb_increase_switch_tmu_accuracy() local
260 sw = tb_to_switch(dev); in tb_increase_switch_tmu_accuracy()
261 if (!sw) in tb_increase_switch_tmu_accuracy()
264 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { in tb_increase_switch_tmu_accuracy()
268 if (tb_switch_clx_is_enabled(sw, TB_CL1)) in tb_increase_switch_tmu_accuracy()
273 ret = tb_switch_tmu_configure(sw, mode); in tb_increase_switch_tmu_accuracy()
277 return tb_switch_tmu_enable(sw); in tb_increase_switch_tmu_accuracy()
285 struct tb_switch *sw; in tb_increase_tmu_accuracy() local
299 sw = tunnel->tb->root_switch; in tb_increase_tmu_accuracy()
300 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); in tb_increase_tmu_accuracy()
305 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_tmu_hifi_uni_required() local
307 if (sw && tb_switch_tmu_is_enabled(sw) && in tb_switch_tmu_hifi_uni_required()
308 tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) in tb_switch_tmu_hifi_uni_required()
321 static int tb_enable_tmu(struct tb_switch *sw) in tb_enable_tmu() argument
332 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
335 if (tb_switch_clx_is_enabled(sw, TB_CL1)) { in tb_enable_tmu()
346 if (tb_tmu_hifi_uni_required(sw->tb)) in tb_enable_tmu()
347 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
350 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
353 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
358 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
364 if (tb_switch_tmu_is_enabled(sw)) in tb_enable_tmu()
367 ret = tb_switch_tmu_disable(sw); in tb_enable_tmu()
371 ret = tb_switch_tmu_post_time(sw); in tb_enable_tmu()
375 return tb_switch_tmu_enable(sw); in tb_enable_tmu()
378 static void tb_switch_discover_tunnels(struct tb_switch *sw, in tb_switch_discover_tunnels() argument
382 struct tb *tb = sw->tb; in tb_switch_discover_tunnels()
385 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
410 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
412 tb_switch_discover_tunnels(port->remote->sw, list, in tb_switch_discover_tunnels()
420 if (tb_switch_is_usb4(port->sw)) in tb_port_configure_xdomain()
427 if (tb_switch_is_usb4(port->sw)) in tb_port_unconfigure_xdomain()
435 struct tb_switch *sw = port->sw; in tb_scan_xdomain() local
436 struct tb *tb = sw->tb; in tb_scan_xdomain()
450 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
453 tb_port_at(route, sw)->xdomain = xd; in tb_scan_xdomain()
464 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, in tb_find_unused_port() argument
469 tb_switch_for_each_port(sw, port) { in tb_find_unused_port()
483 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, in tb_find_usb3_down() argument
488 down = usb4_switch_map_usb3_down(sw, port); in tb_find_usb3_down()
517 struct tb_switch *sw; in tb_find_first_usb3_tunnel() local
521 sw = dst_port->sw; in tb_find_first_usb3_tunnel()
523 sw = src_port->sw; in tb_find_first_usb3_tunnel()
526 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
530 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
720 link_speed = port->sw->link_speed; in tb_maximum_bandwidth()
725 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { in tb_maximum_bandwidth()
728 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { in tb_maximum_bandwidth()
746 up_bw = link_speed * port->sw->link_width * 1000; in tb_maximum_bandwidth()
901 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
903 struct tb_switch *parent = tb_switch_parent(sw); in tb_tunnel_usb3()
914 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); in tb_tunnel_usb3()
918 if (!sw->link_usb4) in tb_tunnel_usb3()
925 port = tb_switch_downstream_port(sw); in tb_tunnel_usb3()
984 static int tb_create_usb3_tunnels(struct tb_switch *sw) in tb_create_usb3_tunnels() argument
992 if (tb_route(sw)) { in tb_create_usb3_tunnels()
993 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
998 tb_switch_for_each_port(sw, port) { in tb_create_usb3_tunnels()
1001 ret = tb_create_usb3_tunnels(port->remote->sw); in tb_create_usb3_tunnels()
1029 struct tb_switch *sw; in tb_configure_asym() local
1039 sw = dst_port->sw; in tb_configure_asym()
1041 sw = src_port->sw; in tb_configure_asym()
1044 struct tb_port *down = tb_switch_downstream_port(up->sw); in tb_configure_asym()
1082 if (up->sw->link_width == width_up) in tb_configure_asym()
1095 clx = tb_disable_clx(sw); in tb_configure_asym()
1099 tb_sw_dbg(up->sw, "configuring asymmetric link\n"); in tb_configure_asym()
1105 ret = tb_switch_set_link_width(up->sw, width_up); in tb_configure_asym()
1107 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_asym()
1114 tb_enable_clx(sw); in tb_configure_asym()
1134 struct tb_switch *sw; in tb_configure_sym() local
1144 sw = dst_port->sw; in tb_configure_sym()
1146 sw = src_port->sw; in tb_configure_sym()
1152 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1155 if (up->sw->is_unplugged) in tb_configure_sym()
1177 if (up->sw->link_width == TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1188 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { in tb_configure_sym()
1189 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); in tb_configure_sym()
1195 clx = tb_disable_clx(sw); in tb_configure_sym()
1199 tb_sw_dbg(up->sw, "configuring symmetric link\n"); in tb_configure_sym()
1201 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); in tb_configure_sym()
1203 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_sym()
1210 tb_enable_clx(sw); in tb_configure_sym()
1216 struct tb_switch *sw) in tb_configure_link() argument
1218 struct tb *tb = sw->tb; in tb_configure_link()
1232 if (sw->link_width < TB_LINK_WIDTH_DUAL) in tb_configure_link()
1233 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); in tb_configure_link()
1240 if (tb_switch_depth(sw) > 1 && in tb_configure_link()
1242 up->sw->link_width == TB_LINK_WIDTH_DUAL) { in tb_configure_link()
1245 host_port = tb_port_at(tb_route(sw), tb->root_switch); in tb_configure_link()
1250 tb_switch_configure_link(sw); in tb_configure_link()
1256 static void tb_scan_switch(struct tb_switch *sw) in tb_scan_switch() argument
1260 pm_runtime_get_sync(&sw->dev); in tb_scan_switch()
1262 tb_switch_for_each_port(sw, port) in tb_scan_switch()
1265 pm_runtime_mark_last_busy(&sw->dev); in tb_scan_switch()
1266 pm_runtime_put_autosuspend(&sw->dev); in tb_scan_switch()
1274 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
1277 struct tb_switch *sw; in tb_scan_port() local
1285 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
1310 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
1312 if (IS_ERR(sw)) { in tb_scan_port()
1318 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) in tb_scan_port()
1323 if (tb_switch_configure(sw)) { in tb_scan_port()
1324 tb_switch_put(sw); in tb_scan_port()
1344 dev_set_uevent_suppress(&sw->dev, true); in tb_scan_port()
1352 sw->rpm = sw->generation > 1; in tb_scan_port()
1354 if (tb_switch_add(sw)) { in tb_scan_port()
1355 tb_switch_put(sw); in tb_scan_port()
1359 upstream_port = tb_upstream_port(sw); in tb_scan_port()
1360 tb_configure_link(port, upstream_port, sw); in tb_scan_port()
1367 tb_sw_dbg(sw, "discovery, not touching CL states\n"); in tb_scan_port()
1368 else if (tb_enable_clx(sw)) in tb_scan_port()
1369 tb_sw_warn(sw, "failed to enable CL states\n"); in tb_scan_port()
1371 if (tb_enable_tmu(sw)) in tb_scan_port()
1372 tb_sw_warn(sw, "failed to enable TMU\n"); in tb_scan_port()
1378 tb_switch_configuration_valid(sw); in tb_scan_port()
1389 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
1390 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); in tb_scan_port()
1392 tb_add_dp_resources(sw); in tb_scan_port()
1393 tb_scan_switch(sw); in tb_scan_port()
1609 if (tunnel->src_port->sw == in->sw && in tb_attach_bandwidth_group()
1610 tunnel->dst_port->sw == out->sw) { in tb_attach_bandwidth_group()
1674 struct tb_switch *parent = tunnel->dst_port->sw; in tb_discover_tunnels()
1676 while (parent != tunnel->src_port->sw) { in tb_discover_tunnels()
1685 pm_runtime_get_sync(&in->sw->dev); in tb_discover_tunnels()
1686 pm_runtime_get_sync(&out->sw->dev); in tb_discover_tunnels()
1715 tb_switch_dealloc_dp_resource(src_port->sw, src_port); in tb_deactivate_and_free_tunnel()
1722 pm_runtime_mark_last_busy(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1723 pm_runtime_put_autosuspend(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1724 pm_runtime_mark_last_busy(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1725 pm_runtime_put_autosuspend(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1761 static void tb_free_unplugged_children(struct tb_switch *sw) in tb_free_unplugged_children() argument
1765 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_children()
1769 if (port->remote->sw->is_unplugged) { in tb_free_unplugged_children()
1771 tb_remove_dp_resources(port->remote->sw); in tb_free_unplugged_children()
1772 tb_switch_unconfigure_link(port->remote->sw); in tb_free_unplugged_children()
1773 tb_switch_set_link_width(port->remote->sw, in tb_free_unplugged_children()
1775 tb_switch_remove(port->remote->sw); in tb_free_unplugged_children()
1780 tb_free_unplugged_children(port->remote->sw); in tb_free_unplugged_children()
1785 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, in tb_find_pcie_down() argument
1794 if (tb_switch_is_usb4(sw)) { in tb_find_pcie_down()
1795 down = usb4_switch_map_pcie_down(sw, port); in tb_find_pcie_down()
1796 } else if (!tb_route(sw)) { in tb_find_pcie_down()
1804 if (tb_switch_is_cactus_ridge(sw) || in tb_find_pcie_down()
1805 tb_switch_is_alpine_ridge(sw)) in tb_find_pcie_down()
1807 else if (tb_switch_is_falcon_ridge(sw)) in tb_find_pcie_down()
1809 else if (tb_switch_is_titan_ridge(sw)) in tb_find_pcie_down()
1815 if (WARN_ON(index > sw->config.max_port_number)) in tb_find_pcie_down()
1818 down = &sw->ports[index]; in tb_find_pcie_down()
1831 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); in tb_find_pcie_down()
1839 host_port = tb_route(in->sw) ? in tb_find_dp_out()
1840 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
1852 if (in->sw == port->sw) { in tb_find_dp_out()
1863 if (host_port && tb_route(port->sw)) { in tb_find_dp_out()
1866 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
1971 pm_runtime_get_sync(&in->sw->dev); in tb_tunnel_one_dp()
1972 pm_runtime_get_sync(&out->sw->dev); in tb_tunnel_one_dp()
1974 if (tb_switch_alloc_dp_resource(in->sw, in)) { in tb_tunnel_one_dp()
2024 tb_switch_dealloc_dp_resource(in->sw, in); in tb_tunnel_one_dp()
2026 pm_runtime_mark_last_busy(&out->sw->dev); in tb_tunnel_one_dp()
2027 pm_runtime_put_autosuspend(&out->sw->dev); in tb_tunnel_one_dp()
2028 pm_runtime_mark_last_busy(&in->sw->dev); in tb_tunnel_one_dp()
2029 pm_runtime_put_autosuspend(&in->sw->dev); in tb_tunnel_one_dp()
2075 struct tb_switch *sw = port->sw; in tb_enter_redrive() local
2077 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_enter_redrive()
2089 if (tb_route(sw)) in tb_enter_redrive()
2091 if (!tb_switch_query_dp_resource(sw, port)) { in tb_enter_redrive()
2093 pm_runtime_get(&sw->dev); in tb_enter_redrive()
2100 struct tb_switch *sw = port->sw; in tb_exit_redrive() local
2102 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_exit_redrive()
2107 if (tb_route(sw)) in tb_exit_redrive()
2109 if (port->redrive && tb_switch_query_dp_resource(sw, port)) { in tb_exit_redrive()
2111 pm_runtime_put(&sw->dev); in tb_exit_redrive()
2116 static void tb_switch_enter_redrive(struct tb_switch *sw) in tb_switch_enter_redrive() argument
2120 tb_switch_for_each_port(sw, port) in tb_switch_enter_redrive()
2128 static void tb_switch_exit_redrive(struct tb_switch *sw) in tb_switch_exit_redrive() argument
2132 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_switch_exit_redrive()
2135 tb_switch_for_each_port(sw, port) { in tb_switch_exit_redrive()
2141 pm_runtime_put(&sw->dev); in tb_switch_exit_redrive()
2223 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) in tb_disconnect_pci() argument
2228 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_disconnect_pci()
2236 tb_switch_xhci_disconnect(sw); in tb_disconnect_pci()
2244 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
2250 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_tunnel_pci()
2258 port = tb_switch_downstream_port(sw); in tb_tunnel_pci()
2259 down = tb_find_pcie_down(tb_switch_parent(sw), port); in tb_tunnel_pci()
2278 if (tb_switch_pcie_l1_enable(sw)) in tb_tunnel_pci()
2279 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); in tb_tunnel_pci()
2281 if (tb_switch_xhci_connect(sw)) in tb_tunnel_pci()
2282 tb_sw_warn(sw, "failed to connect xHCI\n"); in tb_tunnel_pci()
2295 struct tb_switch *sw; in tb_approve_xdomain_paths() local
2298 sw = tb_to_switch(xd->dev.parent); in tb_approve_xdomain_paths()
2299 dst_port = tb_port_at(xd->route, sw); in tb_approve_xdomain_paths()
2308 tb_disable_clx(sw); in tb_approve_xdomain_paths()
2331 tb_enable_clx(sw); in tb_approve_xdomain_paths()
2344 struct tb_switch *sw; in __tb_disconnect_xdomain_paths() local
2346 sw = tb_to_switch(xd->dev.parent); in __tb_disconnect_xdomain_paths()
2347 dst_port = tb_port_at(xd->route, sw); in __tb_disconnect_xdomain_paths()
2366 tb_enable_clx(sw); in __tb_disconnect_xdomain_paths()
2395 struct tb_switch *sw; in tb_handle_hotplug() local
2405 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
2406 if (!sw) { in tb_handle_hotplug()
2412 if (ev->port > sw->config.max_port_number) { in tb_handle_hotplug()
2418 port = &sw->ports[ev->port]; in tb_handle_hotplug()
2425 pm_runtime_get_sync(&sw->dev); in tb_handle_hotplug()
2432 tb_sw_set_unplugged(port->remote->sw); in tb_handle_hotplug()
2434 tb_remove_dp_resources(port->remote->sw); in tb_handle_hotplug()
2435 tb_switch_tmu_disable(port->remote->sw); in tb_handle_hotplug()
2436 tb_switch_unconfigure_link(port->remote->sw); in tb_handle_hotplug()
2437 tb_switch_set_link_width(port->remote->sw, in tb_handle_hotplug()
2439 tb_switch_remove(port->remote->sw); in tb_handle_hotplug()
2466 tb_sw_dbg(sw, "xHCI disconnect request\n"); in tb_handle_hotplug()
2467 tb_switch_xhci_disconnect(sw); in tb_handle_hotplug()
2474 } else if (!port->port && sw->authorized) { in tb_handle_hotplug()
2475 tb_sw_dbg(sw, "xHCI connect request\n"); in tb_handle_hotplug()
2476 tb_switch_xhci_connect(sw); in tb_handle_hotplug()
2488 pm_runtime_mark_last_busy(&sw->dev); in tb_handle_hotplug()
2489 pm_runtime_put_autosuspend(&sw->dev); in tb_handle_hotplug()
2492 tb_switch_put(sw); in tb_handle_hotplug()
2702 struct tb_switch *sw; in tb_handle_dp_bandwidth_request() local
2711 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_dp_bandwidth_request()
2712 if (!sw) { in tb_handle_dp_bandwidth_request()
2718 in = &sw->ports[ev->port]; in tb_handle_dp_bandwidth_request()
2814 tb_switch_put(sw); in tb_handle_dp_bandwidth_request()
2932 struct tb_switch *sw = tb_to_switch(dev); in tb_scan_finalize_switch() local
2939 if (sw->boot) in tb_scan_finalize_switch()
2940 sw->authorized = 1; in tb_scan_finalize_switch()
3045 static void tb_restore_children(struct tb_switch *sw) in tb_restore_children() argument
3050 if (sw->is_unplugged) in tb_restore_children()
3053 if (tb_enable_clx(sw)) in tb_restore_children()
3054 tb_sw_warn(sw, "failed to re-enable CL states\n"); in tb_restore_children()
3056 if (tb_enable_tmu(sw)) in tb_restore_children()
3057 tb_sw_warn(sw, "failed to restore TMU configuration\n"); in tb_restore_children()
3059 tb_switch_configuration_valid(sw); in tb_restore_children()
3061 tb_switch_for_each_port(sw, port) { in tb_restore_children()
3066 tb_switch_set_link_width(port->remote->sw, in tb_restore_children()
3067 port->remote->sw->link_width); in tb_restore_children()
3068 tb_switch_configure_link(port->remote->sw); in tb_restore_children()
3070 tb_restore_children(port->remote->sw); in tb_restore_children()
3138 static int tb_free_unplugged_xdomains(struct tb_switch *sw) in tb_free_unplugged_xdomains() argument
3143 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_xdomains()
3153 ret += tb_free_unplugged_xdomains(port->remote->sw); in tb_free_unplugged_xdomains()