xref: /linux/drivers/thunderbolt/tunnel.h (revision 68a052239fc4b351e961f698b824f7654a346091)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #ifndef TB_TUNNEL_H_
10 #define TB_TUNNEL_H_
11 
12 #include "tb.h"
13 
14 enum tb_tunnel_type {
15 	TB_TUNNEL_PCI,
16 	TB_TUNNEL_DP,
17 	TB_TUNNEL_DMA,
18 	TB_TUNNEL_USB3,
19 };
20 
21 /**
22  * enum tb_tunnel_state - State of a tunnel
23  * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
24  * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
25  * @TB_TUNNEL_ACTIVE: The tunnel is fully active
26  */
27 enum tb_tunnel_state {
28 	TB_TUNNEL_INACTIVE,
29 	TB_TUNNEL_ACTIVATING,
30 	TB_TUNNEL_ACTIVE,
31 };
32 
33 /**
34  * struct tb_tunnel - Tunnel between two ports
35  * @kref: Reference count
36  * @tb: Pointer to the domain
37  * @src_port: Source port of the tunnel
38  * @dst_port: Destination port of the tunnel. For discovered incomplete
39  *	      tunnels may be %NULL or null adapter port instead.
40  * @paths: All paths required by the tunnel
41  * @npaths: Number of paths in @paths
42  * @pre_activate: Optional tunnel specific initialization called before
43  *		  activation. Can touch hardware.
44  * @activate: Optional tunnel specific activation/deactivation
45  * @post_deactivate: Optional tunnel specific de-initialization called
46  *		     after deactivation. Can touch hardware.
47  * @destroy: Optional tunnel specific callback called when the tunnel
48  *	     memory is being released. Should not touch hardware.
49  * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
50  * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
51  * @alloc_bandwidth: Change tunnel bandwidth allocation
52  * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
53  * @release_unused_bandwidth: Release all unused bandwidth
54  * @reclaim_available_bandwidth: Reclaim back available bandwidth
55  * @list: Tunnels are linked using this field
56  * @type: Type of the tunnel
57  * @state: Current state of the tunnel
58  * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
59  *	    Only set if the bandwidth needs to be limited.
60  * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
61  *	      Only set if the bandwidth needs to be limited.
62  * @allocated_up: Allocated upstream bandwidth (only for USB3)
63  * @allocated_down: Allocated downstream bandwidth (only for USB3)
64  * @bw_mode: DP bandwidth allocation mode registers can be used to
65  *	     determine consumed and allocated bandwidth
66  * @dprx_started: DPRX negotiation was started (tb_dp_dprx_start() was called for it)
67  * @dprx_canceled: Was DPRX capabilities read poll canceled
68  * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
69  * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
70  * @callback: Optional callback called when DP tunnel is fully activated
71  * @callback_data: Optional data for @callback
72  */
73 struct tb_tunnel {
74 	struct kref kref;
75 	struct tb *tb;
76 	struct tb_port *src_port;
77 	struct tb_port *dst_port;
78 	struct tb_path **paths;
79 	size_t npaths;
80 	int (*pre_activate)(struct tb_tunnel *tunnel);
81 	int (*activate)(struct tb_tunnel *tunnel, bool activate);
82 	void (*post_deactivate)(struct tb_tunnel *tunnel);
83 	void (*destroy)(struct tb_tunnel *tunnel);
84 	int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
85 				 int *max_down);
86 	int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
87 				   int *allocated_down);
88 	int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
89 			       int *alloc_down);
90 	int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
91 				  int *consumed_down);
92 	int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
93 	void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
94 					    int *available_up,
95 					    int *available_down);
96 	struct list_head list;
97 	enum tb_tunnel_type type;
98 	enum tb_tunnel_state state;
99 	int max_up;
100 	int max_down;
101 	int allocated_up;
102 	int allocated_down;
103 	bool bw_mode;
104 	bool dprx_started;
105 	bool dprx_canceled;
106 	ktime_t dprx_timeout;
107 	struct delayed_work dprx_work;
108 	void (*callback)(struct tb_tunnel *tunnel, void *data);
109 	void *callback_data;
110 };
111 
112 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
113 					 bool alloc_hopid);
114 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
115 				      struct tb_port *down);
116 bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
117 			    int *reserved_down);
118 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
119 					bool alloc_hopid);
120 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
121 				     struct tb_port *out, int link_nr,
122 				     int max_up, int max_down,
123 				     void (*callback)(struct tb_tunnel *, void *),
124 				     void *callback_data);
125 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
126 				      struct tb_port *dst, int transmit_path,
127 				      int transmit_ring, int receive_path,
128 				      int receive_ring);
129 bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
130 			 int transmit_ring, int receive_path, int receive_ring);
131 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
132 					  bool alloc_hopid);
133 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
134 				       struct tb_port *down, int max_up,
135 				       int max_down);
136 
137 void tb_tunnel_put(struct tb_tunnel *tunnel);
138 int tb_tunnel_activate(struct tb_tunnel *tunnel);
139 void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
140 
141 /**
142  * tb_tunnel_is_active() - Is tunnel fully activated
143  * @tunnel: Tunnel to check
144  *
145  * Return: %true if @tunnel is fully activated.
146  *
147  * Note for DP tunnels this returns %true only once the DPRX capabilities
148  * read has been issued successfully. For other tunnels, this function
149  * returns %true pretty much once tb_tunnel_activate() returns successfully.
150  */
151 static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
152 {
153 	return tunnel->state == TB_TUNNEL_ACTIVE;
154 }
155 
156 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
157 bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
158 			    const struct tb_port *port);
159 int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
160 				int *max_down);
161 int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
162 				  int *allocated_down);
163 int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
164 			      int *alloc_down);
165 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
166 				 int *consumed_down);
167 int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
168 void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
169 					   int *available_up,
170 					   int *available_down);
171 
172 static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
173 {
174 	return tunnel->type == TB_TUNNEL_PCI;
175 }
176 
177 static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
178 {
179 	return tunnel->type == TB_TUNNEL_DP;
180 }
181 
182 static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
183 {
184 	return tunnel->type == TB_TUNNEL_DMA;
185 }
186 
187 static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
188 {
189 	return tunnel->type == TB_TUNNEL_USB3;
190 }
191 
192 static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
193 {
194 	return tb_port_path_direction_downstream(tunnel->src_port,
195 						 tunnel->dst_port);
196 }
197 
198 /**
199  * enum tb_tunnel_event - Tunnel related events
200  * @TB_TUNNEL_ACTIVATED: A tunnel was activated
201  * @TB_TUNNEL_CHANGED: There is a tunneling change in the domain. Includes
202  *		       full %TUNNEL_DETAILS if the tunnel in question is known
203  *		       (ICM does not provide that information).
204  * @TB_TUNNEL_DEACTIVATED: A tunnel was torn down
205  * @TB_TUNNEL_LOW_BANDWIDTH: Tunnel bandwidth is not optimal
206  * @TB_TUNNEL_NO_BANDWIDTH: There is not enough bandwidth for a tunnel
207  */
208 enum tb_tunnel_event {
209 	TB_TUNNEL_ACTIVATED,
210 	TB_TUNNEL_CHANGED,
211 	TB_TUNNEL_DEACTIVATED,
212 	TB_TUNNEL_LOW_BANDWIDTH,
213 	TB_TUNNEL_NO_BANDWIDTH,
214 };
215 
216 void tb_tunnel_event(struct tb *tb, enum tb_tunnel_event event,
217 		     enum tb_tunnel_type type,
218 		     const struct tb_port *src_port,
219 		     const struct tb_port *dst_port);
220 
221 const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
222 
223 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
224 	do {                                                            \
225 		struct tb_tunnel *__tunnel = (tunnel);                  \
226 		level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt,   \
227 		      tb_route(__tunnel->src_port->sw),                 \
228 		      __tunnel->src_port->port,                         \
229 		      tb_route(__tunnel->dst_port->sw),                 \
230 		      __tunnel->dst_port->port,                         \
231 		      tb_tunnel_type_name(__tunnel),			\
232 		      ## arg);                                          \
233 	} while (0)
234 
235 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
236 	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
237 #define tb_tunnel_warn(tunnel, fmt, arg...) \
238 	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
239 #define tb_tunnel_info(tunnel, fmt, arg...) \
240 	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
241 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
242 	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
243 
244 #endif
245