1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Thunderbolt driver - Tunneling support 4 * 5 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> 6 * Copyright (C) 2019, Intel Corporation 7 */ 8 9 #ifndef TB_TUNNEL_H_ 10 #define TB_TUNNEL_H_ 11 12 #include "tb.h" 13 14 enum tb_tunnel_type { 15 TB_TUNNEL_PCI, 16 TB_TUNNEL_DP, 17 TB_TUNNEL_DMA, 18 TB_TUNNEL_USB3, 19 }; 20 21 /** 22 * enum tb_tunnel_state - State of a tunnel 23 * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel 24 * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel 25 * @TB_TUNNEL_ACTIVE: The tunnel is fully active 26 */ 27 enum tb_tunnel_state { 28 TB_TUNNEL_INACTIVE, 29 TB_TUNNEL_ACTIVATING, 30 TB_TUNNEL_ACTIVE, 31 }; 32 33 /** 34 * struct tb_tunnel - Tunnel between two ports 35 * @kref: Reference count 36 * @tb: Pointer to the domain 37 * @src_port: Source port of the tunnel 38 * @dst_port: Destination port of the tunnel. For discovered incomplete 39 * tunnels may be %NULL or null adapter port instead. 40 * @paths: All paths required by the tunnel 41 * @npaths: Number of paths in @paths 42 * @pre_activate: Optional tunnel specific initialization called before 43 * activation. Can touch hardware. 44 * @activate: Optional tunnel specific activation/deactivation 45 * @post_deactivate: Optional tunnel specific de-initialization called 46 * after deactivation. Can touch hardware. 47 * @destroy: Optional tunnel specific callback called when the tunnel 48 * memory is being released. Should not touch hardware. 49 * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel 50 * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel 51 * @alloc_bandwidth: Change tunnel bandwidth allocation 52 * @consumed_bandwidth: Return how much bandwidth the tunnel consumes 53 * @release_unused_bandwidth: Release all unused bandwidth 54 * @reclaim_available_bandwidth: Reclaim back available bandwidth 55 * @list: Tunnels are linked using this field 56 * @type: Type of the tunnel 57 * @state: Current state of the tunnel 58 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel. 59 * Only set if the bandwidth needs to be limited. 60 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel. 61 * Only set if the bandwidth needs to be limited. 62 * @allocated_up: Allocated upstream bandwidth (only for USB3) 63 * @allocated_down: Allocated downstream bandwidth (only for USB3) 64 * @bw_mode: DP bandwidth allocation mode registers can be used to 65 * determine consumed and allocated bandwidth 66 * @dprx_canceled: Was DPRX capabilities read poll canceled 67 * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes 68 * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read 69 * @callback: Optional callback called when DP tunnel is fully activated 70 * @callback_data: Optional data for @callback 71 */ 72 struct tb_tunnel { 73 struct kref kref; 74 struct tb *tb; 75 struct tb_port *src_port; 76 struct tb_port *dst_port; 77 struct tb_path **paths; 78 size_t npaths; 79 int (*pre_activate)(struct tb_tunnel *tunnel); 80 int (*activate)(struct tb_tunnel *tunnel, bool activate); 81 void (*post_deactivate)(struct tb_tunnel *tunnel); 82 void (*destroy)(struct tb_tunnel *tunnel); 83 int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up, 84 int *max_down); 85 int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up, 86 int *allocated_down); 87 int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up, 88 int *alloc_down); 89 int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up, 90 int *consumed_down); 91 int (*release_unused_bandwidth)(struct tb_tunnel *tunnel); 92 void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel, 93 int *available_up, 94 int *available_down); 95 struct list_head list; 96 enum tb_tunnel_type type; 97 enum tb_tunnel_state state; 98 int max_up; 99 int max_down; 100 int allocated_up; 101 int allocated_down; 102 bool bw_mode; 103 bool dprx_canceled; 104 ktime_t dprx_timeout; 105 struct delayed_work dprx_work; 106 void (*callback)(struct tb_tunnel *tunnel, void *data); 107 void *callback_data; 108 }; 109 110 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, 111 bool alloc_hopid); 112 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, 113 struct tb_port *down); 114 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up, 115 int *reserved_down); 116 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, 117 bool alloc_hopid); 118 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, 119 struct tb_port *out, int link_nr, 120 int max_up, int max_down, 121 void (*callback)(struct tb_tunnel *, void *), 122 void *callback_data); 123 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, 124 struct tb_port *dst, int transmit_path, 125 int transmit_ring, int receive_path, 126 int receive_ring); 127 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, 128 int transmit_ring, int receive_path, int receive_ring); 129 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, 130 bool alloc_hopid); 131 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, 132 struct tb_port *down, int max_up, 133 int max_down); 134 135 void tb_tunnel_put(struct tb_tunnel *tunnel); 136 int tb_tunnel_activate(struct tb_tunnel *tunnel); 137 void tb_tunnel_deactivate(struct tb_tunnel *tunnel); 138 139 /** 140 * tb_tunnel_is_active() - Is tunnel fully activated 141 * @tunnel: Tunnel to check 142 * 143 * Returns %true if @tunnel is fully activated. For other than DP 144 * tunnels this is pretty much once tb_tunnel_activate() returns 145 * successfully. However, for DP tunnels this returns %true only once the 146 * DPRX capabilities read has been issued successfully. 147 */ 148 static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) 149 { 150 return tunnel->state == TB_TUNNEL_ACTIVE; 151 } 152 153 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); 154 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, 155 const struct tb_port *port); 156 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up, 157 int *max_down); 158 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up, 159 int *allocated_down); 160 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up, 161 int *alloc_down); 162 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, 163 int *consumed_down); 164 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel); 165 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, 166 int *available_up, 167 int *available_down); 168 169 static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) 170 { 171 return tunnel->type == TB_TUNNEL_PCI; 172 } 173 174 static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel) 175 { 176 return tunnel->type == TB_TUNNEL_DP; 177 } 178 179 static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel) 180 { 181 return tunnel->type == TB_TUNNEL_DMA; 182 } 183 184 static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel) 185 { 186 return tunnel->type == TB_TUNNEL_USB3; 187 } 188 189 static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel) 190 { 191 return tb_port_path_direction_downstream(tunnel->src_port, 192 tunnel->dst_port); 193 } 194 195 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel); 196 197 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ 198 do { \ 199 struct tb_tunnel *__tunnel = (tunnel); \ 200 level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \ 201 tb_route(__tunnel->src_port->sw), \ 202 __tunnel->src_port->port, \ 203 tb_route(__tunnel->dst_port->sw), \ 204 __tunnel->dst_port->port, \ 205 tb_tunnel_type_name(__tunnel), \ 206 ## arg); \ 207 } while (0) 208 209 #define tb_tunnel_WARN(tunnel, fmt, arg...) \ 210 __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) 211 #define tb_tunnel_warn(tunnel, fmt, arg...) \ 212 __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) 213 #define tb_tunnel_info(tunnel, fmt, arg...) \ 214 __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) 215 #define tb_tunnel_dbg(tunnel, fmt, arg...) \ 216 __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) 217 218 #endif 219