Lines Matching full:sw

111 static void tb_add_dp_resources(struct tb_switch *sw)  in tb_add_dp_resources()  argument
113 struct tb_cm *tcm = tb_priv(sw->tb); in tb_add_dp_resources()
116 tb_switch_for_each_port(sw, port) { in tb_add_dp_resources()
120 if (!tb_switch_query_dp_resource(sw, port)) in tb_add_dp_resources()
129 if (tb_route(sw)) in tb_add_dp_resources()
138 static void tb_remove_dp_resources(struct tb_switch *sw) in tb_remove_dp_resources() argument
140 struct tb_cm *tcm = tb_priv(sw->tb); in tb_remove_dp_resources()
144 tb_switch_for_each_port(sw, port) { in tb_remove_dp_resources()
146 tb_remove_dp_resources(port->remote->sw); in tb_remove_dp_resources()
150 if (port->sw == sw) { in tb_remove_dp_resources()
184 static int tb_enable_clx(struct tb_switch *sw) in tb_enable_clx() argument
186 struct tb_cm *tcm = tb_priv(sw->tb); in tb_enable_clx()
198 while (sw && tb_switch_depth(sw) > 1) in tb_enable_clx()
199 sw = tb_switch_parent(sw); in tb_enable_clx()
201 if (!sw) in tb_enable_clx()
204 if (tb_switch_depth(sw) != 1) in tb_enable_clx()
213 if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw))) in tb_enable_clx()
222 ret = tb_switch_clx_enable(sw, clx | TB_CL2); in tb_enable_clx()
224 ret = tb_switch_clx_enable(sw, clx); in tb_enable_clx()
229 * Disables CL states from @sw up to the host router.
235 static bool tb_disable_clx(struct tb_switch *sw) in tb_disable_clx() argument
242 ret = tb_switch_clx_disable(sw); in tb_disable_clx()
246 tb_sw_warn(sw, "failed to disable CL states\n"); in tb_disable_clx()
248 sw = tb_switch_parent(sw); in tb_disable_clx()
249 } while (sw); in tb_disable_clx()
256 struct tb_switch *sw; in tb_increase_switch_tmu_accuracy() local
258 sw = tb_to_switch(dev); in tb_increase_switch_tmu_accuracy()
259 if (!sw) in tb_increase_switch_tmu_accuracy()
262 if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) { in tb_increase_switch_tmu_accuracy()
266 if (tb_switch_clx_is_enabled(sw, TB_CL1)) in tb_increase_switch_tmu_accuracy()
271 ret = tb_switch_tmu_configure(sw, mode); in tb_increase_switch_tmu_accuracy()
275 return tb_switch_tmu_enable(sw); in tb_increase_switch_tmu_accuracy()
283 struct tb_switch *sw; in tb_increase_tmu_accuracy() local
297 sw = tunnel->tb->root_switch; in tb_increase_tmu_accuracy()
298 device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy); in tb_increase_tmu_accuracy()
303 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_tmu_hifi_uni_required() local
305 if (sw && tb_switch_tmu_is_enabled(sw) && in tb_switch_tmu_hifi_uni_required()
306 tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_HIFI_UNI)) in tb_switch_tmu_hifi_uni_required()
319 static int tb_enable_tmu(struct tb_switch *sw) in tb_enable_tmu() argument
330 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
333 if (tb_switch_clx_is_enabled(sw, TB_CL1)) { in tb_enable_tmu()
344 if (tb_tmu_hifi_uni_required(sw->tb)) in tb_enable_tmu()
345 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
348 ret = tb_switch_tmu_configure(sw, in tb_enable_tmu()
351 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
356 ret = tb_switch_tmu_configure(sw, TB_SWITCH_TMU_MODE_HIFI_BI); in tb_enable_tmu()
362 if (tb_switch_tmu_is_enabled(sw)) in tb_enable_tmu()
365 ret = tb_switch_tmu_disable(sw); in tb_enable_tmu()
369 ret = tb_switch_tmu_post_time(sw); in tb_enable_tmu()
373 return tb_switch_tmu_enable(sw); in tb_enable_tmu()
376 static void tb_switch_discover_tunnels(struct tb_switch *sw, in tb_switch_discover_tunnels() argument
380 struct tb *tb = sw->tb; in tb_switch_discover_tunnels()
383 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
408 tb_switch_for_each_port(sw, port) { in tb_switch_discover_tunnels()
410 tb_switch_discover_tunnels(port->remote->sw, list, in tb_switch_discover_tunnels()
418 if (tb_switch_is_usb4(port->sw)) in tb_port_configure_xdomain()
425 if (tb_switch_is_usb4(port->sw)) in tb_port_unconfigure_xdomain()
433 struct tb_switch *sw = port->sw; in tb_scan_xdomain() local
434 struct tb *tb = sw->tb; in tb_scan_xdomain()
448 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, in tb_scan_xdomain()
451 tb_port_at(route, sw)->xdomain = xd; in tb_scan_xdomain()
458 * Returns the first inactive port on @sw.
460 static struct tb_port *tb_find_unused_port(struct tb_switch *sw, in tb_find_unused_port() argument
465 tb_switch_for_each_port(sw, port) { in tb_find_unused_port()
479 static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, in tb_find_usb3_down() argument
484 down = usb4_switch_map_usb3_down(sw, port); in tb_find_usb3_down()
513 struct tb_switch *sw; in tb_find_first_usb3_tunnel() local
517 sw = dst_port->sw; in tb_find_first_usb3_tunnel()
519 sw = src_port->sw; in tb_find_first_usb3_tunnel()
522 if (sw == tb->root_switch) in tb_find_first_usb3_tunnel()
526 port = tb_port_at(tb_route(sw), tb->root_switch); in tb_find_first_usb3_tunnel()
722 link_speed = port->sw->link_speed; in tb_maximum_bandwidth()
724 * sw->link_width is from upstream perspective so we use in tb_maximum_bandwidth()
727 if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) { in tb_maximum_bandwidth()
730 } else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) { in tb_maximum_bandwidth()
748 up_bw = link_speed * port->sw->link_width * 1000; in tb_maximum_bandwidth()
905 static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) in tb_tunnel_usb3() argument
907 struct tb_switch *parent = tb_switch_parent(sw); in tb_tunnel_usb3()
918 up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); in tb_tunnel_usb3()
922 if (!sw->link_usb4) in tb_tunnel_usb3()
929 port = tb_switch_downstream_port(sw); in tb_tunnel_usb3()
997 static int tb_create_usb3_tunnels(struct tb_switch *sw) in tb_create_usb3_tunnels() argument
1005 if (tb_route(sw)) { in tb_create_usb3_tunnels()
1006 ret = tb_tunnel_usb3(sw->tb, sw); in tb_create_usb3_tunnels()
1011 tb_switch_for_each_port(sw, port) { in tb_create_usb3_tunnels()
1014 ret = tb_create_usb3_tunnels(port->remote->sw); in tb_create_usb3_tunnels()
1044 struct tb_switch *sw; in tb_configure_asym() local
1054 sw = dst_port->sw; in tb_configure_asym()
1056 sw = src_port->sw; in tb_configure_asym()
1059 struct tb_port *down = tb_switch_downstream_port(up->sw); in tb_configure_asym()
1097 if (up->sw->link_width == width_up) in tb_configure_asym()
1110 clx = tb_disable_clx(sw); in tb_configure_asym()
1114 tb_sw_dbg(up->sw, "configuring asymmetric link\n"); in tb_configure_asym()
1120 ret = tb_switch_set_link_width(up->sw, width_up); in tb_configure_asym()
1122 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_asym()
1129 tb_enable_clx(sw); in tb_configure_asym()
1151 struct tb_switch *sw; in tb_configure_sym() local
1161 sw = dst_port->sw; in tb_configure_sym()
1163 sw = src_port->sw; in tb_configure_sym()
1169 if (up->sw->link_width <= TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1172 if (up->sw->is_unplugged) in tb_configure_sym()
1194 if (up->sw->link_width == TB_LINK_WIDTH_DUAL) in tb_configure_sym()
1205 up->sw->preferred_link_width > TB_LINK_WIDTH_DUAL) { in tb_configure_sym()
1206 tb_sw_dbg(up->sw, "keeping preferred asymmetric link\n"); in tb_configure_sym()
1212 clx = tb_disable_clx(sw); in tb_configure_sym()
1216 tb_sw_dbg(up->sw, "configuring symmetric link\n"); in tb_configure_sym()
1218 ret = tb_switch_set_link_width(up->sw, TB_LINK_WIDTH_DUAL); in tb_configure_sym()
1220 tb_sw_warn(up->sw, "failed to set link width\n"); in tb_configure_sym()
1227 tb_enable_clx(sw); in tb_configure_sym()
1233 struct tb_switch *sw) in tb_configure_link() argument
1235 struct tb *tb = sw->tb; in tb_configure_link()
1249 if (sw->link_width < TB_LINK_WIDTH_DUAL) in tb_configure_link()
1250 tb_switch_set_link_width(sw, TB_LINK_WIDTH_DUAL); in tb_configure_link()
1257 if (tb_switch_depth(sw) > 1 && in tb_configure_link()
1259 up->sw->link_width == TB_LINK_WIDTH_DUAL) { in tb_configure_link()
1262 host_port = tb_port_at(tb_route(sw), tb->root_switch); in tb_configure_link()
1267 tb_switch_configure_link(sw); in tb_configure_link()
1273 static void tb_scan_switch(struct tb_switch *sw) in tb_scan_switch() argument
1277 pm_runtime_get_sync(&sw->dev); in tb_scan_switch()
1279 tb_switch_for_each_port(sw, port) in tb_scan_switch()
1282 pm_runtime_mark_last_busy(&sw->dev); in tb_scan_switch()
1283 pm_runtime_put_autosuspend(&sw->dev); in tb_scan_switch()
1291 struct tb_cm *tcm = tb_priv(port->sw->tb); in tb_scan_port()
1294 struct tb_switch *sw; in tb_scan_port() local
1302 tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, in tb_scan_port()
1325 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, in tb_scan_port()
1327 if (IS_ERR(sw)) { in tb_scan_port()
1339 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) in tb_scan_port()
1344 if (tb_switch_configure(sw)) { in tb_scan_port()
1345 tb_switch_put(sw); in tb_scan_port()
1365 dev_set_uevent_suppress(&sw->dev, true); in tb_scan_port()
1373 sw->rpm = sw->generation > 1; in tb_scan_port()
1375 if (tb_switch_add(sw)) { in tb_scan_port()
1376 tb_switch_put(sw); in tb_scan_port()
1380 upstream_port = tb_upstream_port(sw); in tb_scan_port()
1381 tb_configure_link(port, upstream_port, sw); in tb_scan_port()
1396 tb_sw_dbg(sw, "discovery, not touching CL states\n"); in tb_scan_port()
1397 else if (tb_enable_clx(sw)) in tb_scan_port()
1398 tb_sw_warn(sw, "failed to enable CL states\n"); in tb_scan_port()
1400 if (tb_enable_tmu(sw)) in tb_scan_port()
1401 tb_sw_warn(sw, "failed to enable TMU\n"); in tb_scan_port()
1407 tb_switch_configuration_valid(sw); in tb_scan_port()
1418 if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) in tb_scan_port()
1419 tb_sw_warn(sw, "USB3 tunnel creation failed\n"); in tb_scan_port()
1421 tb_add_dp_resources(sw); in tb_scan_port()
1422 tb_scan_switch(sw); in tb_scan_port()
1638 if (tunnel->src_port->sw == in->sw && in tb_attach_bandwidth_group()
1639 tunnel->dst_port->sw == out->sw) { in tb_attach_bandwidth_group()
1703 struct tb_switch *parent = tunnel->dst_port->sw; in tb_discover_tunnels()
1705 while (parent != tunnel->src_port->sw) { in tb_discover_tunnels()
1714 pm_runtime_get_sync(&in->sw->dev); in tb_discover_tunnels()
1715 pm_runtime_get_sync(&out->sw->dev); in tb_discover_tunnels()
1744 tb_switch_dealloc_dp_resource(src_port->sw, src_port); in tb_deactivate_and_free_tunnel()
1751 pm_runtime_mark_last_busy(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1752 pm_runtime_put_autosuspend(&dst_port->sw->dev); in tb_deactivate_and_free_tunnel()
1753 pm_runtime_mark_last_busy(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1754 pm_runtime_put_autosuspend(&src_port->sw->dev); in tb_deactivate_and_free_tunnel()
1790 static void tb_free_unplugged_children(struct tb_switch *sw) in tb_free_unplugged_children() argument
1794 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_children()
1798 if (port->remote->sw->is_unplugged) { in tb_free_unplugged_children()
1800 tb_remove_dp_resources(port->remote->sw); in tb_free_unplugged_children()
1801 tb_switch_unconfigure_link(port->remote->sw); in tb_free_unplugged_children()
1802 tb_switch_set_link_width(port->remote->sw, in tb_free_unplugged_children()
1804 tb_switch_remove(port->remote->sw); in tb_free_unplugged_children()
1809 tb_free_unplugged_children(port->remote->sw); in tb_free_unplugged_children()
1814 static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, in tb_find_pcie_down() argument
1823 if (tb_switch_is_usb4(sw)) { in tb_find_pcie_down()
1824 down = usb4_switch_map_pcie_down(sw, port); in tb_find_pcie_down()
1825 } else if (!tb_route(sw)) { in tb_find_pcie_down()
1833 if (tb_switch_is_cactus_ridge(sw) || in tb_find_pcie_down()
1834 tb_switch_is_alpine_ridge(sw)) in tb_find_pcie_down()
1836 else if (tb_switch_is_falcon_ridge(sw)) in tb_find_pcie_down()
1838 else if (tb_switch_is_titan_ridge(sw)) in tb_find_pcie_down()
1844 if (WARN_ON(index > sw->config.max_port_number)) in tb_find_pcie_down()
1847 down = &sw->ports[index]; in tb_find_pcie_down()
1860 return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); in tb_find_pcie_down()
1868 host_port = tb_route(in->sw) ? in tb_find_dp_out()
1869 tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; in tb_find_dp_out()
1881 if (in->sw == port->sw) { in tb_find_dp_out()
1892 if (host_port && tb_route(port->sw)) { in tb_find_dp_out()
1895 p = tb_port_at(tb_route(port->sw), tb->root_switch); in tb_find_dp_out()
2000 pm_runtime_get_sync(&in->sw->dev); in tb_tunnel_one_dp()
2001 pm_runtime_get_sync(&out->sw->dev); in tb_tunnel_one_dp()
2003 if (tb_switch_alloc_dp_resource(in->sw, in)) { in tb_tunnel_one_dp()
2055 tb_switch_dealloc_dp_resource(in->sw, in); in tb_tunnel_one_dp()
2057 pm_runtime_mark_last_busy(&out->sw->dev); in tb_tunnel_one_dp()
2058 pm_runtime_put_autosuspend(&out->sw->dev); in tb_tunnel_one_dp()
2059 pm_runtime_mark_last_busy(&in->sw->dev); in tb_tunnel_one_dp()
2060 pm_runtime_put_autosuspend(&in->sw->dev); in tb_tunnel_one_dp()
2106 struct tb_switch *sw = port->sw; in tb_enter_redrive() local
2108 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_enter_redrive()
2120 if (tb_route(sw)) in tb_enter_redrive()
2122 if (!tb_switch_query_dp_resource(sw, port)) { in tb_enter_redrive()
2124 pm_runtime_get(&sw->dev); in tb_enter_redrive()
2131 struct tb_switch *sw = port->sw; in tb_exit_redrive() local
2133 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_exit_redrive()
2138 if (tb_route(sw)) in tb_exit_redrive()
2140 if (port->redrive && tb_switch_query_dp_resource(sw, port)) { in tb_exit_redrive()
2142 pm_runtime_put(&sw->dev); in tb_exit_redrive()
2147 static void tb_switch_enter_redrive(struct tb_switch *sw) in tb_switch_enter_redrive() argument
2151 tb_switch_for_each_port(sw, port) in tb_switch_enter_redrive()
2159 static void tb_switch_exit_redrive(struct tb_switch *sw) in tb_switch_exit_redrive() argument
2163 if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) in tb_switch_exit_redrive()
2166 tb_switch_for_each_port(sw, port) { in tb_switch_exit_redrive()
2172 pm_runtime_put(&sw->dev); in tb_switch_exit_redrive()
2254 static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) in tb_disconnect_pci() argument
2259 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_disconnect_pci()
2267 tb_switch_xhci_disconnect(sw); in tb_disconnect_pci()
2275 static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) in tb_tunnel_pci() argument
2281 up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); in tb_tunnel_pci()
2289 port = tb_switch_downstream_port(sw); in tb_tunnel_pci()
2290 down = tb_find_pcie_down(tb_switch_parent(sw), port); in tb_tunnel_pci()
2309 if (tb_switch_pcie_l1_enable(sw)) in tb_tunnel_pci()
2310 tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); in tb_tunnel_pci()
2312 if (tb_switch_xhci_connect(sw)) in tb_tunnel_pci()
2313 tb_sw_warn(sw, "failed to connect xHCI\n"); in tb_tunnel_pci()
2326 struct tb_switch *sw; in tb_approve_xdomain_paths() local
2329 sw = tb_to_switch(xd->dev.parent); in tb_approve_xdomain_paths()
2330 dst_port = tb_port_at(xd->route, sw); in tb_approve_xdomain_paths()
2339 tb_disable_clx(sw); in tb_approve_xdomain_paths()
2362 tb_enable_clx(sw); in tb_approve_xdomain_paths()
2375 struct tb_switch *sw; in __tb_disconnect_xdomain_paths() local
2377 sw = tb_to_switch(xd->dev.parent); in __tb_disconnect_xdomain_paths()
2378 dst_port = tb_port_at(xd->route, sw); in __tb_disconnect_xdomain_paths()
2397 tb_enable_clx(sw); in __tb_disconnect_xdomain_paths()
2426 struct tb_switch *sw; in tb_handle_hotplug() local
2436 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_hotplug()
2437 if (!sw) { in tb_handle_hotplug()
2443 if (ev->port > sw->config.max_port_number) { in tb_handle_hotplug()
2449 port = &sw->ports[ev->port]; in tb_handle_hotplug()
2456 pm_runtime_get_sync(&sw->dev); in tb_handle_hotplug()
2463 tb_sw_set_unplugged(port->remote->sw); in tb_handle_hotplug()
2465 tb_remove_dp_resources(port->remote->sw); in tb_handle_hotplug()
2466 tb_switch_tmu_disable(port->remote->sw); in tb_handle_hotplug()
2467 tb_switch_unconfigure_link(port->remote->sw); in tb_handle_hotplug()
2468 tb_switch_set_link_width(port->remote->sw, in tb_handle_hotplug()
2470 tb_switch_remove(port->remote->sw); in tb_handle_hotplug()
2497 tb_sw_dbg(sw, "xHCI disconnect request\n"); in tb_handle_hotplug()
2498 tb_switch_xhci_disconnect(sw); in tb_handle_hotplug()
2505 } else if (!port->port && sw->authorized) { in tb_handle_hotplug()
2506 tb_sw_dbg(sw, "xHCI connect request\n"); in tb_handle_hotplug()
2507 tb_switch_xhci_connect(sw); in tb_handle_hotplug()
2519 pm_runtime_mark_last_busy(&sw->dev); in tb_handle_hotplug()
2520 pm_runtime_put_autosuspend(&sw->dev); in tb_handle_hotplug()
2523 tb_switch_put(sw); in tb_handle_hotplug()
2738 struct tb_switch *sw; in tb_handle_dp_bandwidth_request() local
2747 sw = tb_switch_find_by_route(tb, ev->route); in tb_handle_dp_bandwidth_request()
2748 if (!sw) { in tb_handle_dp_bandwidth_request()
2754 in = &sw->ports[ev->port]; in tb_handle_dp_bandwidth_request()
2850 tb_switch_put(sw); in tb_handle_dp_bandwidth_request()
2968 struct tb_switch *sw = tb_to_switch(dev); in tb_scan_finalize_switch() local
2975 if (sw->boot) in tb_scan_finalize_switch()
2976 sw->authorized = 1; in tb_scan_finalize_switch()
3081 static void tb_restore_children(struct tb_switch *sw) in tb_restore_children() argument
3086 if (sw->is_unplugged) in tb_restore_children()
3089 if (tb_enable_clx(sw)) in tb_restore_children()
3090 tb_sw_warn(sw, "failed to re-enable CL states\n"); in tb_restore_children()
3092 if (tb_enable_tmu(sw)) in tb_restore_children()
3093 tb_sw_warn(sw, "failed to restore TMU configuration\n"); in tb_restore_children()
3095 tb_switch_configuration_valid(sw); in tb_restore_children()
3097 tb_switch_for_each_port(sw, port) { in tb_restore_children()
3102 tb_switch_set_link_width(port->remote->sw, in tb_restore_children()
3103 port->remote->sw->link_width); in tb_restore_children()
3104 tb_switch_configure_link(port->remote->sw); in tb_restore_children()
3106 tb_restore_children(port->remote->sw); in tb_restore_children()
3174 static int tb_free_unplugged_xdomains(struct tb_switch *sw) in tb_free_unplugged_xdomains() argument
3179 tb_switch_for_each_port(sw, port) { in tb_free_unplugged_xdomains()
3189 ret += tb_free_unplugged_xdomains(port->remote->sw); in tb_free_unplugged_xdomains()