Lines Matching full:sw

37 static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)  in __nvm_get_auth_status()  argument
42 if (uuid_equal(&st->uuid, sw->uuid)) in __nvm_get_auth_status()
49 static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) in nvm_get_auth_status() argument
54 st = __nvm_get_auth_status(sw); in nvm_get_auth_status()
60 static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) in nvm_set_auth_status() argument
64 if (WARN_ON(!sw->uuid)) in nvm_set_auth_status()
68 st = __nvm_get_auth_status(sw); in nvm_set_auth_status()
75 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); in nvm_set_auth_status()
85 static void nvm_clear_auth_status(const struct tb_switch *sw) in nvm_clear_auth_status() argument
90 st = __nvm_get_auth_status(sw); in nvm_clear_auth_status()
98 static int nvm_validate_and_write(struct tb_switch *sw) in nvm_validate_and_write() argument
104 ret = tb_nvm_validate(sw->nvm); in nvm_validate_and_write()
108 ret = tb_nvm_write_headers(sw->nvm); in nvm_validate_and_write()
112 buf = sw->nvm->buf_data_start; in nvm_validate_and_write()
113 image_size = sw->nvm->buf_data_size; in nvm_validate_and_write()
115 if (tb_switch_is_usb4(sw)) in nvm_validate_and_write()
116 ret = usb4_switch_nvm_write(sw, 0, buf, image_size); in nvm_validate_and_write()
118 ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); in nvm_validate_and_write()
122 sw->nvm->flushed = true; in nvm_validate_and_write()
126 static int nvm_authenticate_host_dma_port(struct tb_switch *sw) in nvm_authenticate_host_dma_port() argument
135 if (!sw->safe_mode) { in nvm_authenticate_host_dma_port()
138 ret = tb_domain_disconnect_all_paths(sw->tb); in nvm_authenticate_host_dma_port()
145 ret = dma_port_flash_update_auth(sw->dma_port); in nvm_authenticate_host_dma_port()
153 tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); in nvm_authenticate_host_dma_port()
154 if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) in nvm_authenticate_host_dma_port()
155 nvm_set_auth_status(sw, status); in nvm_authenticate_host_dma_port()
162 dma_port_power_cycle(sw->dma_port); in nvm_authenticate_host_dma_port()
166 static int nvm_authenticate_device_dma_port(struct tb_switch *sw) in nvm_authenticate_device_dma_port() argument
170 ret = dma_port_flash_update_auth(sw->dma_port); in nvm_authenticate_device_dma_port()
191 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); in nvm_authenticate_device_dma_port()
196 tb_sw_warn(sw, "failed to authenticate NVM\n"); in nvm_authenticate_device_dma_port()
197 nvm_set_auth_status(sw, status); in nvm_authenticate_device_dma_port()
200 tb_sw_info(sw, "power cycling the switch now\n"); in nvm_authenticate_device_dma_port()
201 dma_port_power_cycle(sw->dma_port); in nvm_authenticate_device_dma_port()
211 static void nvm_authenticate_start_dma_port(struct tb_switch *sw) in nvm_authenticate_start_dma_port() argument
221 root_port = pcie_find_root_port(sw->tb->nhi->pdev); in nvm_authenticate_start_dma_port()
226 static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) in nvm_authenticate_complete_dma_port() argument
230 root_port = pcie_find_root_port(sw->tb->nhi->pdev); in nvm_authenticate_complete_dma_port()
235 static inline bool nvm_readable(struct tb_switch *sw) in nvm_readable() argument
237 if (tb_switch_is_usb4(sw)) { in nvm_readable()
244 return usb4_switch_nvm_sector_size(sw) > 0; in nvm_readable()
248 return !!sw->dma_port; in nvm_readable()
251 static inline bool nvm_upgradeable(struct tb_switch *sw) in nvm_upgradeable() argument
253 if (sw->no_nvm_upgrade) in nvm_upgradeable()
255 return nvm_readable(sw); in nvm_upgradeable()
258 static int nvm_authenticate(struct tb_switch *sw, bool auth_only) in nvm_authenticate() argument
262 if (tb_switch_is_usb4(sw)) { in nvm_authenticate()
264 ret = usb4_switch_nvm_set_offset(sw, 0); in nvm_authenticate()
268 sw->nvm->authenticating = true; in nvm_authenticate()
269 return usb4_switch_nvm_authenticate(sw); in nvm_authenticate()
274 sw->nvm->authenticating = true; in nvm_authenticate()
275 if (!tb_route(sw)) { in nvm_authenticate()
276 nvm_authenticate_start_dma_port(sw); in nvm_authenticate()
277 ret = nvm_authenticate_host_dma_port(sw); in nvm_authenticate()
279 ret = nvm_authenticate_device_dma_port(sw); in nvm_authenticate()
287 * @sw: Router whose NVM to read
297 int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, in tb_switch_nvm_read() argument
300 if (tb_switch_is_usb4(sw)) in tb_switch_nvm_read()
301 return usb4_switch_nvm_read(sw, address, buf, size); in tb_switch_nvm_read()
302 return dma_port_flash_read(sw->dma_port, address, buf, size); in tb_switch_nvm_read()
308 struct tb_switch *sw = tb_to_switch(nvm->dev); in nvm_read() local
311 pm_runtime_get_sync(&sw->dev); in nvm_read()
313 if (!mutex_trylock(&sw->tb->lock)) { in nvm_read()
318 ret = tb_switch_nvm_read(sw, offset, val, bytes); in nvm_read()
319 mutex_unlock(&sw->tb->lock); in nvm_read()
322 pm_runtime_mark_last_busy(&sw->dev); in nvm_read()
323 pm_runtime_put_autosuspend(&sw->dev); in nvm_read()
331 struct tb_switch *sw = tb_to_switch(nvm->dev); in nvm_write() local
334 if (!mutex_trylock(&sw->tb->lock)) in nvm_write()
344 mutex_unlock(&sw->tb->lock); in nvm_write()
349 static int tb_switch_nvm_add(struct tb_switch *sw) in tb_switch_nvm_add() argument
354 if (!nvm_readable(sw)) in tb_switch_nvm_add()
357 nvm = tb_nvm_alloc(&sw->dev); in tb_switch_nvm_add()
372 if (!sw->safe_mode) { in tb_switch_nvm_add()
376 tb_sw_dbg(sw, "NVM version %x.%x\n", nvm->major, nvm->minor); in tb_switch_nvm_add()
379 if (!sw->no_nvm_upgrade) { in tb_switch_nvm_add()
385 sw->nvm = nvm; in tb_switch_nvm_add()
389 tb_sw_dbg(sw, "NVM upgrade disabled\n"); in tb_switch_nvm_add()
390 sw->no_nvm_upgrade = true; in tb_switch_nvm_add()
397 static void tb_switch_nvm_remove(struct tb_switch *sw) in tb_switch_nvm_remove() argument
401 nvm = sw->nvm; in tb_switch_nvm_remove()
402 sw->nvm = NULL; in tb_switch_nvm_remove()
409 nvm_clear_auth_status(sw); in tb_switch_nvm_remove()
574 if (credits == 0 || port->sw->is_unplugged) in tb_port_add_nfc_credits()
581 if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) in tb_port_add_nfc_credits()
625 if (tb_switch_is_icm(port->sw)) in tb_port_unlock()
629 if (tb_switch_is_usb4(port->sw)) in tb_port_unlock()
690 if (tb_switch_is_usb4(port->sw)) in tb_port_reset()
717 tb_dbg(port->sw->tb, " Port %d: not implemented\n", in tb_init_port()
762 tb_dump_port(port->sw->tb, port); in tb_init_port()
842 const struct tb_switch *sw) in tb_switch_is_reachable() argument
845 return (tb_route(parent) & mask) == (tb_route(sw) & mask); in tb_switch_is_reachable()
871 if (prev->sw == end->sw) { in tb_next_port_on_path()
877 if (tb_switch_is_reachable(prev->sw, end->sw)) { in tb_next_port_on_path()
878 next = tb_port_at(tb_route(end->sw), prev->sw); in tb_next_port_on_path()
887 next = tb_upstream_port(prev->sw); in tb_next_port_on_path()
1289 if (tb_switch_is_usb4(port->sw)) in tb_port_start_lane_initialization()
1480 if (tb_switch_is_usb4(port->sw)) in tb_dp_port_set_hops()
1550 static const char *tb_switch_generation_name(const struct tb_switch *sw) in tb_switch_generation_name() argument
1552 switch (sw->generation) { in tb_switch_generation_name()
1566 static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) in tb_dump_switch() argument
1568 const struct tb_regs_switch_header *regs = &sw->config; in tb_dump_switch()
1571 tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, in tb_dump_switch()
1584 static int tb_switch_reset_host(struct tb_switch *sw) in tb_switch_reset_host() argument
1586 if (sw->generation > 1) { in tb_switch_reset_host()
1589 tb_switch_for_each_port(sw, port) { in tb_switch_reset_host()
1629 res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, in tb_switch_reset_host()
1633 res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); in tb_switch_reset_host()
1643 static int tb_switch_reset_device(struct tb_switch *sw) in tb_switch_reset_device() argument
1645 return tb_port_reset(tb_switch_downstream_port(sw)); in tb_switch_reset_device()
1648 static bool tb_switch_enumerated(struct tb_switch *sw) in tb_switch_enumerated() argument
1655 * during system sleep where sw->config.enabled is already set in tb_switch_enumerated()
1658 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1); in tb_switch_enumerated()
1667 * @sw: Router to reset
1669 * Issues reset to the router @sw. Can be used for any router. For host
1679 int tb_switch_reset(struct tb_switch *sw) in tb_switch_reset() argument
1688 if (!tb_switch_enumerated(sw)) in tb_switch_reset()
1691 tb_sw_dbg(sw, "resetting\n"); in tb_switch_reset()
1693 if (tb_route(sw)) in tb_switch_reset()
1694 ret = tb_switch_reset_device(sw); in tb_switch_reset()
1696 ret = tb_switch_reset_host(sw); in tb_switch_reset()
1699 tb_sw_warn(sw, "failed to reset\n"); in tb_switch_reset()
1706 * @sw: Router to read the offset value from
1720 int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, in tb_switch_wait_for_bit() argument
1729 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); in tb_switch_wait_for_bit()
1749 static int tb_plug_events_active(struct tb_switch *sw, bool active) in tb_plug_events_active() argument
1754 if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) in tb_plug_events_active()
1757 sw->config.plug_events_delay = 0xff; in tb_plug_events_active()
1758 res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); in tb_plug_events_active()
1762 res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); in tb_plug_events_active()
1768 switch (sw->config.device_id) { in tb_plug_events_active()
1779 if (!tb_switch_is_alpine_ridge(sw)) in tb_plug_events_active()
1785 return tb_sw_write(sw, &data, TB_CFG_SWITCH, in tb_plug_events_active()
1786 sw->cap_plug_events + 1, 1); in tb_plug_events_active()
1793 struct tb_switch *sw = tb_to_switch(dev); in authorized_show() local
1795 return sysfs_emit(buf, "%u\n", sw->authorized); in authorized_show()
1801 struct tb_switch *sw; in disapprove_switch() local
1803 sw = tb_to_switch(dev); in disapprove_switch()
1804 if (sw && sw->authorized) { in disapprove_switch()
1808 ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); in disapprove_switch()
1812 ret = tb_domain_disapprove_switch(sw->tb, sw); in disapprove_switch()
1816 sw->authorized = 0; in disapprove_switch()
1817 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); in disapprove_switch()
1823 static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) in tb_switch_set_authorized() argument
1829 if (!mutex_trylock(&sw->tb->lock)) in tb_switch_set_authorized()
1832 if (!!sw->authorized == !!val) in tb_switch_set_authorized()
1838 if (tb_route(sw)) { in tb_switch_set_authorized()
1839 ret = disapprove_switch(&sw->dev, NULL); in tb_switch_set_authorized()
1846 if (sw->key) in tb_switch_set_authorized()
1847 ret = tb_domain_approve_switch_key(sw->tb, sw); in tb_switch_set_authorized()
1849 ret = tb_domain_approve_switch(sw->tb, sw); in tb_switch_set_authorized()
1854 if (sw->key) in tb_switch_set_authorized()
1855 ret = tb_domain_challenge_switch_key(sw->tb, sw); in tb_switch_set_authorized()
1863 sw->authorized = val; in tb_switch_set_authorized()
1868 sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); in tb_switch_set_authorized()
1869 kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); in tb_switch_set_authorized()
1873 mutex_unlock(&sw->tb->lock); in tb_switch_set_authorized()
1881 struct tb_switch *sw = tb_to_switch(dev); in authorized_store() local
1891 pm_runtime_get_sync(&sw->dev); in authorized_store()
1892 ret = tb_switch_set_authorized(sw, val); in authorized_store()
1893 pm_runtime_mark_last_busy(&sw->dev); in authorized_store()
1894 pm_runtime_put_autosuspend(&sw->dev); in authorized_store()
1903 struct tb_switch *sw = tb_to_switch(dev); in boot_show() local
1905 return sysfs_emit(buf, "%u\n", sw->boot); in boot_show()
1912 struct tb_switch *sw = tb_to_switch(dev); in device_show() local
1914 return sysfs_emit(buf, "%#x\n", sw->device); in device_show()
1921 struct tb_switch *sw = tb_to_switch(dev); in device_name_show() local
1923 return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); in device_name_show()
1930 struct tb_switch *sw = tb_to_switch(dev); in generation_show() local
1932 return sysfs_emit(buf, "%u\n", sw->generation); in generation_show()
1939 struct tb_switch *sw = tb_to_switch(dev); in key_show() local
1942 if (!mutex_trylock(&sw->tb->lock)) in key_show()
1945 if (sw->key) in key_show()
1946 ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); in key_show()
1950 mutex_unlock(&sw->tb->lock); in key_show()
1957 struct tb_switch *sw = tb_to_switch(dev); in key_store() local
1967 if (!mutex_trylock(&sw->tb->lock)) in key_store()
1970 if (sw->authorized) { in key_store()
1973 kfree(sw->key); in key_store()
1975 sw->key = NULL; in key_store()
1977 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); in key_store()
1978 if (!sw->key) in key_store()
1983 mutex_unlock(&sw->tb->lock); in key_store()
1991 struct tb_switch *sw = tb_to_switch(dev); in speed_show() local
1993 return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); in speed_show()
2006 struct tb_switch *sw = tb_to_switch(dev); in rx_lanes_show() local
2009 switch (sw->link_width) { in rx_lanes_show()
2032 struct tb_switch *sw = tb_to_switch(dev); in tx_lanes_show() local
2035 switch (sw->link_width) { in tx_lanes_show()
2058 struct tb_switch *sw = tb_to_switch(dev); in nvm_authenticate_show() local
2061 nvm_get_auth_status(sw, &status); in nvm_authenticate_show()
2068 struct tb_switch *sw = tb_to_switch(dev); in nvm_authenticate_sysfs() local
2071 pm_runtime_get_sync(&sw->dev); in nvm_authenticate_sysfs()
2073 if (!mutex_trylock(&sw->tb->lock)) { in nvm_authenticate_sysfs()
2078 if (sw->no_nvm_upgrade) { in nvm_authenticate_sysfs()
2084 if (!sw->nvm) { in nvm_authenticate_sysfs()
2094 nvm_clear_auth_status(sw); in nvm_authenticate_sysfs()
2101 ret = nvm_authenticate(sw, true); in nvm_authenticate_sysfs()
2103 if (!sw->nvm->flushed) { in nvm_authenticate_sysfs()
2104 if (!sw->nvm->buf) { in nvm_authenticate_sysfs()
2109 ret = nvm_validate_and_write(sw); in nvm_authenticate_sysfs()
2115 ret = tb_lc_force_power(sw); in nvm_authenticate_sysfs()
2117 ret = nvm_authenticate(sw, false); in nvm_authenticate_sysfs()
2123 mutex_unlock(&sw->tb->lock); in nvm_authenticate_sysfs()
2125 pm_runtime_mark_last_busy(&sw->dev); in nvm_authenticate_sysfs()
2126 pm_runtime_put_autosuspend(&sw->dev); in nvm_authenticate_sysfs()
2160 struct tb_switch *sw = tb_to_switch(dev); in nvm_version_show() local
2163 if (!mutex_trylock(&sw->tb->lock)) in nvm_version_show()
2166 if (sw->safe_mode) in nvm_version_show()
2168 else if (!sw->nvm) in nvm_version_show()
2171 ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); in nvm_version_show()
2173 mutex_unlock(&sw->tb->lock); in nvm_version_show()
2182 struct tb_switch *sw = tb_to_switch(dev); in vendor_show() local
2184 return sysfs_emit(buf, "%#x\n", sw->vendor); in vendor_show()
2191 struct tb_switch *sw = tb_to_switch(dev); in vendor_name_show() local
2193 return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); in vendor_name_show()
2200 struct tb_switch *sw = tb_to_switch(dev); in unique_id_show() local
2202 return sysfs_emit(buf, "%pUb\n", sw->uuid); in unique_id_show()
2230 struct tb_switch *sw = tb_to_switch(dev); in switch_attr_is_visible() local
2233 if (sw->tb->security_level == TB_SECURITY_NOPCIE || in switch_attr_is_visible()
2234 sw->tb->security_level == TB_SECURITY_DPONLY) in switch_attr_is_visible()
2237 if (!sw->device) in switch_attr_is_visible()
2240 if (!sw->device_name) in switch_attr_is_visible()
2243 if (!sw->vendor) in switch_attr_is_visible()
2246 if (!sw->vendor_name) in switch_attr_is_visible()
2249 if (tb_route(sw) && in switch_attr_is_visible()
2250 sw->tb->security_level == TB_SECURITY_SECURE && in switch_attr_is_visible()
2251 sw->security_level == TB_SECURITY_SECURE) in switch_attr_is_visible()
2258 if (tb_route(sw)) in switch_attr_is_visible()
2262 if (nvm_upgradeable(sw)) in switch_attr_is_visible()
2266 if (nvm_readable(sw)) in switch_attr_is_visible()
2270 if (tb_route(sw)) in switch_attr_is_visible()
2274 if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) in switch_attr_is_visible()
2279 return sw->safe_mode ? 0 : attr->mode; in switch_attr_is_visible()
2294 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_release() local
2297 dma_port_free(sw->dma_port); in tb_switch_release()
2299 tb_switch_for_each_port(sw, port) { in tb_switch_release()
2304 kfree(sw->uuid); in tb_switch_release()
2305 kfree(sw->device_name); in tb_switch_release()
2306 kfree(sw->vendor_name); in tb_switch_release()
2307 kfree(sw->ports); in tb_switch_release()
2308 kfree(sw->drom); in tb_switch_release()
2309 kfree(sw->key); in tb_switch_release()
2310 kfree(sw); in tb_switch_release()
2315 const struct tb_switch *sw = tb_to_switch(dev); in tb_switch_uevent() local
2318 if (tb_switch_is_usb4(sw)) { in tb_switch_uevent()
2320 usb4_switch_version(sw))) in tb_switch_uevent()
2324 if (!tb_route(sw)) { in tb_switch_uevent()
2331 tb_switch_for_each_port(sw, port) { in tb_switch_uevent()
2353 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_runtime_suspend() local
2354 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; in tb_switch_runtime_suspend()
2357 return cm_ops->runtime_suspend_switch(sw); in tb_switch_runtime_suspend()
2364 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_runtime_resume() local
2365 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; in tb_switch_runtime_resume()
2368 return cm_ops->runtime_resume_switch(sw); in tb_switch_runtime_resume()
2384 static int tb_switch_get_generation(struct tb_switch *sw) in tb_switch_get_generation() argument
2386 if (tb_switch_is_usb4(sw)) in tb_switch_get_generation()
2389 if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { in tb_switch_get_generation()
2390 switch (sw->config.device_id) { in tb_switch_get_generation()
2424 tb_sw_warn(sw, "unsupported switch device id %#x\n", in tb_switch_get_generation()
2425 sw->config.device_id); in tb_switch_get_generation()
2429 static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) in tb_switch_exceeds_max_depth() argument
2433 if (tb_switch_is_usb4(sw) || in tb_switch_exceeds_max_depth()
2434 (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) in tb_switch_exceeds_max_depth()
2458 struct tb_switch *sw; in tb_switch_alloc() local
2477 sw = kzalloc(sizeof(*sw), GFP_KERNEL); in tb_switch_alloc()
2478 if (!sw) in tb_switch_alloc()
2481 sw->tb = tb; in tb_switch_alloc()
2482 ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); in tb_switch_alloc()
2486 sw->generation = tb_switch_get_generation(sw); in tb_switch_alloc()
2489 tb_dump_switch(tb, sw); in tb_switch_alloc()
2492 sw->config.upstream_port_number = upstream_port; in tb_switch_alloc()
2493 sw->config.depth = depth; in tb_switch_alloc()
2494 sw->config.route_hi = upper_32_bits(route); in tb_switch_alloc()
2495 sw->config.route_lo = lower_32_bits(route); in tb_switch_alloc()
2496 sw->config.enabled = 0; in tb_switch_alloc()
2499 if (tb_switch_exceeds_max_depth(sw, depth)) { in tb_switch_alloc()
2505 sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), in tb_switch_alloc()
2507 if (!sw->ports) { in tb_switch_alloc()
2512 for (i = 0; i <= sw->config.max_port_number; i++) { in tb_switch_alloc()
2514 sw->ports[i].sw = sw; in tb_switch_alloc()
2515 sw->ports[i].port = i; in tb_switch_alloc()
2519 ida_init(&sw->ports[i].in_hopids); in tb_switch_alloc()
2520 ida_init(&sw->ports[i].out_hopids); in tb_switch_alloc()
2524 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); in tb_switch_alloc()
2526 sw->cap_plug_events = ret; in tb_switch_alloc()
2528 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); in tb_switch_alloc()
2530 sw->cap_vsec_tmu = ret; in tb_switch_alloc()
2532 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); in tb_switch_alloc()
2534 sw->cap_lc = ret; in tb_switch_alloc()
2536 ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); in tb_switch_alloc()
2538 sw->cap_lp = ret; in tb_switch_alloc()
2542 sw->authorized = true; in tb_switch_alloc()
2544 device_initialize(&sw->dev); in tb_switch_alloc()
2545 sw->dev.parent = parent; in tb_switch_alloc()
2546 sw->dev.bus = &tb_bus_type; in tb_switch_alloc()
2547 sw->dev.type = &tb_switch_type; in tb_switch_alloc()
2548 sw->dev.groups = switch_groups; in tb_switch_alloc()
2549 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); in tb_switch_alloc()
2551 return sw; in tb_switch_alloc()
2554 kfree(sw->ports); in tb_switch_alloc()
2555 kfree(sw); in tb_switch_alloc()
2577 struct tb_switch *sw; in tb_switch_alloc_safe_mode() local
2579 sw = kzalloc(sizeof(*sw), GFP_KERNEL); in tb_switch_alloc_safe_mode()
2580 if (!sw) in tb_switch_alloc_safe_mode()
2583 sw->tb = tb; in tb_switch_alloc_safe_mode()
2584 sw->config.depth = tb_route_length(route); in tb_switch_alloc_safe_mode()
2585 sw->config.route_hi = upper_32_bits(route); in tb_switch_alloc_safe_mode()
2586 sw->config.route_lo = lower_32_bits(route); in tb_switch_alloc_safe_mode()
2587 sw->safe_mode = true; in tb_switch_alloc_safe_mode()
2589 device_initialize(&sw->dev); in tb_switch_alloc_safe_mode()
2590 sw->dev.parent = parent; in tb_switch_alloc_safe_mode()
2591 sw->dev.bus = &tb_bus_type; in tb_switch_alloc_safe_mode()
2592 sw->dev.type = &tb_switch_type; in tb_switch_alloc_safe_mode()
2593 sw->dev.groups = switch_groups; in tb_switch_alloc_safe_mode()
2594 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); in tb_switch_alloc_safe_mode()
2596 return sw; in tb_switch_alloc_safe_mode()
2601 * @sw: Switch to configure
2610 int tb_switch_configure(struct tb_switch *sw) in tb_switch_configure() argument
2612 struct tb *tb = sw->tb; in tb_switch_configure()
2616 route = tb_route(sw); in tb_switch_configure()
2619 sw->config.enabled ? "restoring" : "initializing", route, in tb_switch_configure()
2620 tb_route_length(route), sw->config.upstream_port_number); in tb_switch_configure()
2622 sw->config.enabled = 1; in tb_switch_configure()
2624 if (tb_switch_is_usb4(sw)) { in tb_switch_configure()
2631 if (usb4_switch_version(sw) < 2) in tb_switch_configure()
2632 sw->config.cmuv = ROUTER_CS_4_CMUV_V1; in tb_switch_configure()
2634 sw->config.cmuv = ROUTER_CS_4_CMUV_V2; in tb_switch_configure()
2635 sw->config.plug_events_delay = 0xa; in tb_switch_configure()
2638 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, in tb_switch_configure()
2643 ret = usb4_switch_setup(sw); in tb_switch_configure()
2645 if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) in tb_switch_configure()
2646 tb_sw_warn(sw, "unknown switch vendor id %#x\n", in tb_switch_configure()
2647 sw->config.vendor_id); in tb_switch_configure()
2649 if (!sw->cap_plug_events) { in tb_switch_configure()
2650 tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); in tb_switch_configure()
2655 ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, in tb_switch_configure()
2661 return tb_plug_events_active(sw, true); in tb_switch_configure()
2666 * @sw: Router to configure
2673 int tb_switch_configuration_valid(struct tb_switch *sw) in tb_switch_configuration_valid() argument
2675 if (tb_switch_is_usb4(sw)) in tb_switch_configuration_valid()
2676 return usb4_switch_configuration_valid(sw); in tb_switch_configuration_valid()
2680 static int tb_switch_set_uuid(struct tb_switch *sw) in tb_switch_set_uuid() argument
2686 if (sw->uuid) in tb_switch_set_uuid()
2689 if (tb_switch_is_usb4(sw)) { in tb_switch_set_uuid()
2690 ret = usb4_switch_read_uid(sw, &sw->uid); in tb_switch_set_uuid()
2699 ret = tb_lc_read_uuid(sw, uuid); in tb_switch_set_uuid()
2714 uuid[0] = sw->uid & 0xffffffff; in tb_switch_set_uuid()
2715 uuid[1] = (sw->uid >> 32) & 0xffffffff; in tb_switch_set_uuid()
2720 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); in tb_switch_set_uuid()
2721 if (!sw->uuid) in tb_switch_set_uuid()
2726 static int tb_switch_add_dma_port(struct tb_switch *sw) in tb_switch_add_dma_port() argument
2731 switch (sw->generation) { in tb_switch_add_dma_port()
2734 if (tb_route(sw)) in tb_switch_add_dma_port()
2740 ret = tb_switch_set_uuid(sw); in tb_switch_add_dma_port()
2750 if (!sw->safe_mode) in tb_switch_add_dma_port()
2755 if (sw->no_nvm_upgrade) in tb_switch_add_dma_port()
2758 if (tb_switch_is_usb4(sw)) { in tb_switch_add_dma_port()
2759 ret = usb4_switch_nvm_authenticate_status(sw, &status); in tb_switch_add_dma_port()
2764 tb_sw_info(sw, "switch flash authentication failed\n"); in tb_switch_add_dma_port()
2765 nvm_set_auth_status(sw, status); in tb_switch_add_dma_port()
2772 if (!tb_route(sw) && !tb_switch_is_icm(sw)) in tb_switch_add_dma_port()
2775 sw->dma_port = dma_port_alloc(sw); in tb_switch_add_dma_port()
2776 if (!sw->dma_port) in tb_switch_add_dma_port()
2785 nvm_get_auth_status(sw, &status); in tb_switch_add_dma_port()
2787 if (!tb_route(sw)) in tb_switch_add_dma_port()
2788 nvm_authenticate_complete_dma_port(sw); in tb_switch_add_dma_port()
2797 ret = dma_port_flash_update_auth_status(sw->dma_port, &status); in tb_switch_add_dma_port()
2802 if (!tb_route(sw)) in tb_switch_add_dma_port()
2803 nvm_authenticate_complete_dma_port(sw); in tb_switch_add_dma_port()
2806 tb_sw_info(sw, "switch flash authentication failed\n"); in tb_switch_add_dma_port()
2807 nvm_set_auth_status(sw, status); in tb_switch_add_dma_port()
2810 tb_sw_info(sw, "power cycling the switch now\n"); in tb_switch_add_dma_port()
2811 dma_port_power_cycle(sw->dma_port); in tb_switch_add_dma_port()
2820 static void tb_switch_default_link_ports(struct tb_switch *sw) in tb_switch_default_link_ports() argument
2824 for (i = 1; i <= sw->config.max_port_number; i++) { in tb_switch_default_link_ports()
2825 struct tb_port *port = &sw->ports[i]; in tb_switch_default_link_ports()
2832 if (i == sw->config.max_port_number || in tb_switch_default_link_ports()
2833 !tb_port_is_null(&sw->ports[i + 1])) in tb_switch_default_link_ports()
2837 subordinate = &sw->ports[i + 1]; in tb_switch_default_link_ports()
2844 tb_sw_dbg(sw, "linked ports %d <-> %d\n", in tb_switch_default_link_ports()
2850 static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) in tb_switch_lane_bonding_possible() argument
2852 const struct tb_port *up = tb_upstream_port(sw); in tb_switch_lane_bonding_possible()
2857 if (tb_switch_is_usb4(sw)) in tb_switch_lane_bonding_possible()
2858 return usb4_switch_lane_bonding_possible(sw); in tb_switch_lane_bonding_possible()
2859 return tb_lc_lane_bonding_possible(sw); in tb_switch_lane_bonding_possible()
2862 static int tb_switch_update_link_attributes(struct tb_switch *sw) in tb_switch_update_link_attributes() argument
2868 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_update_link_attributes()
2871 up = tb_upstream_port(sw); in tb_switch_update_link_attributes()
2876 if (sw->link_speed != ret) in tb_switch_update_link_attributes()
2878 sw->link_speed = ret; in tb_switch_update_link_attributes()
2883 if (sw->link_width != ret) in tb_switch_update_link_attributes()
2885 sw->link_width = ret; in tb_switch_update_link_attributes()
2888 if (device_is_registered(&sw->dev) && change) in tb_switch_update_link_attributes()
2889 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); in tb_switch_update_link_attributes()
2895 static void tb_switch_link_init(struct tb_switch *sw) in tb_switch_link_init() argument
2900 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_link_init()
2903 tb_sw_dbg(sw, "current link speed %u.0 Gb/s\n", sw->link_speed); in tb_switch_link_init()
2904 tb_sw_dbg(sw, "current link width %s\n", tb_width_name(sw->link_width)); in tb_switch_link_init()
2906 bonded = sw->link_width >= TB_LINK_WIDTH_DUAL; in tb_switch_link_init()
2912 up = tb_upstream_port(sw); in tb_switch_link_init()
2913 down = tb_switch_downstream_port(sw); in tb_switch_link_init()
2934 sw->preferred_link_width = sw->link_width; in tb_switch_link_init()
2935 tb_sw_dbg(sw, "preferred link width %s\n", in tb_switch_link_init()
2936 tb_width_name(sw->preferred_link_width)); in tb_switch_link_init()
2941 * @sw: Switch to enable lane bonding
2949 static int tb_switch_lane_bonding_enable(struct tb_switch *sw) in tb_switch_lane_bonding_enable() argument
2955 if (!tb_switch_lane_bonding_possible(sw)) in tb_switch_lane_bonding_enable()
2958 up = tb_upstream_port(sw); in tb_switch_lane_bonding_enable()
2959 down = tb_switch_downstream_port(sw); in tb_switch_lane_bonding_enable()
2994 * @sw: Switch whose lane bonding to disable
2996 * Disables lane bonding between @sw and parent. This can be called even
3001 static int tb_switch_lane_bonding_disable(struct tb_switch *sw) in tb_switch_lane_bonding_disable() argument
3006 up = tb_upstream_port(sw); in tb_switch_lane_bonding_disable()
3013 * if the link is not up anymore (sw is unplugged). in tb_switch_lane_bonding_disable()
3021 down = tb_switch_downstream_port(sw); in tb_switch_lane_bonding_disable()
3032 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
3033 static int tb_switch_asym_enable(struct tb_switch *sw, enum tb_link_width width) in tb_switch_asym_enable() argument
3039 up = tb_upstream_port(sw); in tb_switch_asym_enable()
3040 down = tb_switch_downstream_port(sw); in tb_switch_asym_enable()
3062 if (sw->link_width != width) { in tb_switch_asym_enable()
3075 /* Note updating sw->link_width done in tb_switch_update_link_attributes() */
3076 static int tb_switch_asym_disable(struct tb_switch *sw) in tb_switch_asym_disable() argument
3081 up = tb_upstream_port(sw); in tb_switch_asym_disable()
3082 down = tb_switch_downstream_port(sw); in tb_switch_asym_disable()
3097 if (sw->link_width > TB_LINK_WIDTH_DUAL) { in tb_switch_asym_disable()
3098 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX) in tb_switch_asym_disable()
3115 * @sw: Router to configure
3126 int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width) in tb_switch_set_link_width() argument
3131 if (!tb_route(sw)) in tb_switch_set_link_width()
3134 up = tb_upstream_port(sw); in tb_switch_set_link_width()
3135 down = tb_switch_downstream_port(sw); in tb_switch_set_link_width()
3139 ret = tb_switch_lane_bonding_disable(sw); in tb_switch_set_link_width()
3143 if (sw->link_width == TB_LINK_WIDTH_ASYM_TX || in tb_switch_set_link_width()
3144 sw->link_width == TB_LINK_WIDTH_ASYM_RX) { in tb_switch_set_link_width()
3145 ret = tb_switch_asym_disable(sw); in tb_switch_set_link_width()
3149 ret = tb_switch_lane_bonding_enable(sw); in tb_switch_set_link_width()
3154 ret = tb_switch_asym_enable(sw, width); in tb_switch_set_link_width()
3163 tb_sw_warn(sw, "timeout changing link width\n"); in tb_switch_set_link_width()
3172 tb_sw_dbg(sw, "failed to change link width: %d\n", ret); in tb_switch_set_link_width()
3179 tb_switch_update_link_attributes(sw); in tb_switch_set_link_width()
3181 tb_sw_dbg(sw, "link width set to %s\n", tb_width_name(width)); in tb_switch_set_link_width()
3187 * @sw: Switch whose link is configured
3189 * Sets the link upstream from @sw configured (from both ends) so that
3197 int tb_switch_configure_link(struct tb_switch *sw) in tb_switch_configure_link() argument
3202 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_configure_link()
3205 up = tb_upstream_port(sw); in tb_switch_configure_link()
3206 if (tb_switch_is_usb4(up->sw)) in tb_switch_configure_link()
3214 if (tb_switch_is_usb4(down->sw)) in tb_switch_configure_link()
3221 * @sw: Switch whose link is unconfigured
3223 * Sets the link unconfigured so the @sw will be disconnected if the
3226 void tb_switch_unconfigure_link(struct tb_switch *sw) in tb_switch_unconfigure_link() argument
3230 if (!tb_route(sw) || tb_switch_is_icm(sw)) in tb_switch_unconfigure_link()
3238 up = tb_upstream_port(sw); in tb_switch_unconfigure_link()
3240 if (tb_switch_is_usb4(down->sw)) in tb_switch_unconfigure_link()
3245 if (sw->is_unplugged) in tb_switch_unconfigure_link()
3248 up = tb_upstream_port(sw); in tb_switch_unconfigure_link()
3249 if (tb_switch_is_usb4(up->sw)) in tb_switch_unconfigure_link()
3255 static void tb_switch_credits_init(struct tb_switch *sw) in tb_switch_credits_init() argument
3257 if (tb_switch_is_icm(sw)) in tb_switch_credits_init()
3259 if (!tb_switch_is_usb4(sw)) in tb_switch_credits_init()
3261 if (usb4_switch_credits_init(sw)) in tb_switch_credits_init()
3262 tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); in tb_switch_credits_init()
3265 static int tb_switch_port_hotplug_enable(struct tb_switch *sw) in tb_switch_port_hotplug_enable() argument
3269 if (tb_switch_is_icm(sw)) in tb_switch_port_hotplug_enable()
3272 tb_switch_for_each_port(sw, port) { in tb_switch_port_hotplug_enable()
3287 * @sw: Switch to add
3297 int tb_switch_add(struct tb_switch *sw) in tb_switch_add() argument
3308 ret = tb_switch_add_dma_port(sw); in tb_switch_add()
3310 dev_err(&sw->dev, "failed to add DMA port\n"); in tb_switch_add()
3314 if (!sw->safe_mode) { in tb_switch_add()
3315 tb_switch_credits_init(sw); in tb_switch_add()
3318 ret = tb_drom_read(sw); in tb_switch_add()
3320 dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); in tb_switch_add()
3321 tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); in tb_switch_add()
3323 ret = tb_switch_set_uuid(sw); in tb_switch_add()
3325 dev_err(&sw->dev, "failed to set UUID\n"); in tb_switch_add()
3329 for (i = 0; i <= sw->config.max_port_number; i++) { in tb_switch_add()
3330 if (sw->ports[i].disabled) { in tb_switch_add()
3331 tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); in tb_switch_add()
3334 ret = tb_init_port(&sw->ports[i]); in tb_switch_add()
3336 dev_err(&sw->dev, "failed to initialize port %d\n", i); in tb_switch_add()
3341 tb_check_quirks(sw); in tb_switch_add()
3343 tb_switch_default_link_ports(sw); in tb_switch_add()
3345 ret = tb_switch_update_link_attributes(sw); in tb_switch_add()
3349 tb_switch_link_init(sw); in tb_switch_add()
3351 ret = tb_switch_clx_init(sw); in tb_switch_add()
3355 ret = tb_switch_tmu_init(sw); in tb_switch_add()
3360 ret = tb_switch_port_hotplug_enable(sw); in tb_switch_add()
3364 ret = device_add(&sw->dev); in tb_switch_add()
3366 dev_err(&sw->dev, "failed to add device: %d\n", ret); in tb_switch_add()
3370 if (tb_route(sw)) { in tb_switch_add()
3371 dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", in tb_switch_add()
3372 sw->vendor, sw->device); in tb_switch_add()
3373 if (sw->vendor_name && sw->device_name) in tb_switch_add()
3374 dev_info(&sw->dev, "%s %s\n", sw->vendor_name, in tb_switch_add()
3375 sw->device_name); in tb_switch_add()
3378 ret = usb4_switch_add_ports(sw); in tb_switch_add()
3380 dev_err(&sw->dev, "failed to add USB4 ports\n"); in tb_switch_add()
3384 ret = tb_switch_nvm_add(sw); in tb_switch_add()
3386 dev_err(&sw->dev, "failed to add NVM devices\n"); in tb_switch_add()
3395 device_init_wakeup(&sw->dev, true); in tb_switch_add()
3397 pm_runtime_set_active(&sw->dev); in tb_switch_add()
3398 if (sw->rpm) { in tb_switch_add()
3399 pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); in tb_switch_add()
3400 pm_runtime_use_autosuspend(&sw->dev); in tb_switch_add()
3401 pm_runtime_mark_last_busy(&sw->dev); in tb_switch_add()
3402 pm_runtime_enable(&sw->dev); in tb_switch_add()
3403 pm_request_autosuspend(&sw->dev); in tb_switch_add()
3406 tb_switch_debugfs_init(sw); in tb_switch_add()
3410 usb4_switch_remove_ports(sw); in tb_switch_add()
3412 device_del(&sw->dev); in tb_switch_add()
3419 * @sw: Switch to remove
3425 void tb_switch_remove(struct tb_switch *sw) in tb_switch_remove() argument
3429 tb_switch_debugfs_remove(sw); in tb_switch_remove()
3431 if (sw->rpm) { in tb_switch_remove()
3432 pm_runtime_get_sync(&sw->dev); in tb_switch_remove()
3433 pm_runtime_disable(&sw->dev); in tb_switch_remove()
3437 tb_switch_for_each_port(sw, port) { in tb_switch_remove()
3439 tb_switch_remove(port->remote->sw); in tb_switch_remove()
3451 if (!sw->is_unplugged) in tb_switch_remove()
3452 tb_plug_events_active(sw, false); in tb_switch_remove()
3454 tb_switch_nvm_remove(sw); in tb_switch_remove()
3455 usb4_switch_remove_ports(sw); in tb_switch_remove()
3457 if (tb_route(sw)) in tb_switch_remove()
3458 dev_info(&sw->dev, "device disconnected\n"); in tb_switch_remove()
3459 device_unregister(&sw->dev); in tb_switch_remove()
3464 * @sw: Router to mark unplugged
3466 void tb_sw_set_unplugged(struct tb_switch *sw) in tb_sw_set_unplugged() argument
3470 if (sw == sw->tb->root_switch) { in tb_sw_set_unplugged()
3471 tb_sw_WARN(sw, "cannot unplug root switch\n"); in tb_sw_set_unplugged()
3474 if (sw->is_unplugged) { in tb_sw_set_unplugged()
3475 tb_sw_WARN(sw, "is_unplugged already set\n"); in tb_sw_set_unplugged()
3478 sw->is_unplugged = true; in tb_sw_set_unplugged()
3479 tb_switch_for_each_port(sw, port) { in tb_sw_set_unplugged()
3481 tb_sw_set_unplugged(port->remote->sw); in tb_sw_set_unplugged()
3487 static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) in tb_switch_set_wake() argument
3490 tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); in tb_switch_set_wake()
3492 tb_sw_dbg(sw, "disabling wakeup\n"); in tb_switch_set_wake()
3494 if (tb_switch_is_usb4(sw)) in tb_switch_set_wake()
3495 return usb4_switch_set_wake(sw, flags, runtime); in tb_switch_set_wake()
3496 return tb_lc_set_wake(sw, flags); in tb_switch_set_wake()
3499 static void tb_switch_check_wakes(struct tb_switch *sw) in tb_switch_check_wakes() argument
3501 if (device_may_wakeup(&sw->dev)) { in tb_switch_check_wakes()
3502 if (tb_switch_is_usb4(sw)) in tb_switch_check_wakes()
3503 usb4_switch_check_wakes(sw); in tb_switch_check_wakes()
3509 * @sw: Switch to resume
3520 int tb_switch_resume(struct tb_switch *sw, bool runtime) in tb_switch_resume() argument
3525 tb_sw_dbg(sw, "resuming switch\n"); in tb_switch_resume()
3531 if (tb_route(sw)) { in tb_switch_resume()
3539 err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); in tb_switch_resume()
3541 tb_sw_info(sw, "switch not present anymore\n"); in tb_switch_resume()
3546 if (!sw->uid) in tb_switch_resume()
3549 if (tb_switch_is_usb4(sw)) in tb_switch_resume()
3550 err = usb4_switch_read_uid(sw, &uid); in tb_switch_resume()
3552 err = tb_drom_read_uid_only(sw, &uid); in tb_switch_resume()
3554 tb_sw_warn(sw, "uid read failed\n"); in tb_switch_resume()
3557 if (sw->uid != uid) { in tb_switch_resume()
3558 tb_sw_info(sw, in tb_switch_resume()
3560 sw->uid, uid); in tb_switch_resume()
3565 err = tb_switch_configure(sw); in tb_switch_resume()
3570 tb_switch_check_wakes(sw); in tb_switch_resume()
3573 tb_switch_set_wake(sw, 0, true); in tb_switch_resume()
3575 err = tb_switch_tmu_init(sw); in tb_switch_resume()
3580 tb_switch_for_each_port(sw, port) { in tb_switch_resume()
3591 tb_sw_set_unplugged(port->remote->sw); in tb_switch_resume()
3602 tb_switch_resume(port->remote->sw, runtime)) { in tb_switch_resume()
3605 tb_sw_set_unplugged(port->remote->sw); in tb_switch_resume()
3614 * @sw: Switch to suspend
3618 * value of @runtime and then sets sleep bit for the router. If @sw is
3622 void tb_switch_suspend(struct tb_switch *sw, bool runtime) in tb_switch_suspend() argument
3628 tb_sw_dbg(sw, "suspending switch\n"); in tb_switch_suspend()
3634 tb_switch_clx_disable(sw); in tb_switch_suspend()
3636 err = tb_plug_events_active(sw, false); in tb_switch_suspend()
3640 tb_switch_for_each_port(sw, port) { in tb_switch_suspend()
3642 tb_switch_suspend(port->remote->sw, runtime); in tb_switch_suspend()
3650 } else if (device_may_wakeup(&sw->dev)) { in tb_switch_suspend()
3655 tb_switch_set_wake(sw, flags, runtime); in tb_switch_suspend()
3657 if (tb_switch_is_usb4(sw)) in tb_switch_suspend()
3658 usb4_switch_set_sleep(sw); in tb_switch_suspend()
3660 tb_lc_set_sleep(sw); in tb_switch_suspend()
3665 * @sw: Switch whose DP resource is queried
3673 bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_query_dp_resource() argument
3675 if (tb_switch_is_usb4(sw)) in tb_switch_query_dp_resource()
3676 return usb4_switch_query_dp_resource(sw, in); in tb_switch_query_dp_resource()
3677 return tb_lc_dp_sink_query(sw, in); in tb_switch_query_dp_resource()
3682 * @sw: Switch whose DP resource is allocated
3690 int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_alloc_dp_resource() argument
3694 if (tb_switch_is_usb4(sw)) in tb_switch_alloc_dp_resource()
3695 ret = usb4_switch_alloc_dp_resource(sw, in); in tb_switch_alloc_dp_resource()
3697 ret = tb_lc_dp_sink_alloc(sw, in); in tb_switch_alloc_dp_resource()
3700 tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", in tb_switch_alloc_dp_resource()
3703 tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); in tb_switch_alloc_dp_resource()
3710 * @sw: Switch whose DP resource is de-allocated
3716 void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) in tb_switch_dealloc_dp_resource() argument
3720 if (tb_switch_is_usb4(sw)) in tb_switch_dealloc_dp_resource()
3721 ret = usb4_switch_dealloc_dp_resource(sw, in); in tb_switch_dealloc_dp_resource()
3723 ret = tb_lc_dp_sink_dealloc(sw, in); in tb_switch_dealloc_dp_resource()
3726 tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", in tb_switch_dealloc_dp_resource()
3729 tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); in tb_switch_dealloc_dp_resource()
3742 struct tb_switch *sw = tb_to_switch(dev); in tb_switch_match() local
3745 if (!sw) in tb_switch_match()
3747 if (sw->tb != lookup->tb) in tb_switch_match()
3751 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); in tb_switch_match()
3754 return sw->config.route_lo == lower_32_bits(lookup->route) && in tb_switch_match()
3755 sw->config.route_hi == upper_32_bits(lookup->route); in tb_switch_match()
3760 return !sw->depth; in tb_switch_match()
3762 return sw->link == lookup->link && sw->depth == lookup->depth; in tb_switch_match()
3849 * tb_switch_find_port() - return the first port of @type on @sw or NULL
3850 * @sw: Switch to find the port from
3855 struct tb_port *tb_switch_find_port(struct tb_switch *sw, in tb_switch_find_port() argument
3860 tb_switch_for_each_port(sw, port) { in tb_switch_find_port()
3872 static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, in tb_switch_pcie_bridge_write() argument
3878 if (sw->generation != 3) in tb_switch_pcie_bridge_write()
3881 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; in tb_switch_pcie_bridge_write()
3882 ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3893 offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; in tb_switch_pcie_bridge_write()
3895 ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3899 ret = tb_switch_wait_for_bit(sw, offset, in tb_switch_pcie_bridge_write()
3904 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); in tb_switch_pcie_bridge_write()
3916 * @sw: Router to enable PCIe L1
3925 int tb_switch_pcie_l1_enable(struct tb_switch *sw) in tb_switch_pcie_l1_enable() argument
3927 struct tb_switch *parent = tb_switch_parent(sw); in tb_switch_pcie_l1_enable()
3930 if (!tb_route(sw)) in tb_switch_pcie_l1_enable()
3933 if (!tb_switch_is_titan_ridge(sw)) in tb_switch_pcie_l1_enable()
3941 ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); in tb_switch_pcie_l1_enable()
3946 return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); in tb_switch_pcie_l1_enable()
3951 * @sw: Router whose xHCI to connect
3961 int tb_switch_xhci_connect(struct tb_switch *sw) in tb_switch_xhci_connect() argument
3966 if (sw->generation != 3) in tb_switch_xhci_connect()
3969 port1 = &sw->ports[1]; in tb_switch_xhci_connect()
3970 port3 = &sw->ports[3]; in tb_switch_xhci_connect()
3972 if (tb_switch_is_alpine_ridge(sw)) { in tb_switch_xhci_connect()
3988 } else if (tb_switch_is_titan_ridge(sw)) { in tb_switch_xhci_connect()
4000 * @sw: Router whose xHCI to disconnect
4005 void tb_switch_xhci_disconnect(struct tb_switch *sw) in tb_switch_xhci_disconnect() argument
4007 if (sw->generation == 3) { in tb_switch_xhci_disconnect()
4008 struct tb_port *port1 = &sw->ports[1]; in tb_switch_xhci_disconnect()
4009 struct tb_port *port3 = &sw->ports[3]; in tb_switch_xhci_disconnect()