xref: /linux/drivers/thunderbolt/tunnel.c (revision c5d3cdad688ed75fb311a3a671eb30ba7106d7d3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - Tunneling support
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/list.h>
12 
13 #include "tunnel.h"
14 #include "tb.h"
15 
16 /* PCIe adapters use always HopID of 8 for both directions */
17 #define TB_PCI_HOPID			8
18 
19 #define TB_PCI_PATH_DOWN		0
20 #define TB_PCI_PATH_UP			1
21 
22 /* USB3 adapters use always HopID of 8 for both directions */
23 #define TB_USB3_HOPID			8
24 
25 #define TB_USB3_PATH_DOWN		0
26 #define TB_USB3_PATH_UP			1
27 
28 /* DP adapters use HopID 8 for AUX and 9 for Video */
29 #define TB_DP_AUX_TX_HOPID		8
30 #define TB_DP_AUX_RX_HOPID		8
31 #define TB_DP_VIDEO_HOPID		9
32 
33 #define TB_DP_VIDEO_PATH_OUT		0
34 #define TB_DP_AUX_PATH_OUT		1
35 #define TB_DP_AUX_PATH_IN		2
36 
37 #define TB_DMA_PATH_OUT			0
38 #define TB_DMA_PATH_IN			1
39 
40 static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
41 
42 #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
43 	do {                                                            \
44 		struct tb_tunnel *__tunnel = (tunnel);                  \
45 		level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt,   \
46 		      tb_route(__tunnel->src_port->sw),                 \
47 		      __tunnel->src_port->port,                         \
48 		      tb_route(__tunnel->dst_port->sw),                 \
49 		      __tunnel->dst_port->port,                         \
50 		      tb_tunnel_names[__tunnel->type],			\
51 		      ## arg);                                          \
52 	} while (0)
53 
54 #define tb_tunnel_WARN(tunnel, fmt, arg...) \
55 	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
56 #define tb_tunnel_warn(tunnel, fmt, arg...) \
57 	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
58 #define tb_tunnel_info(tunnel, fmt, arg...) \
59 	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
60 #define tb_tunnel_dbg(tunnel, fmt, arg...) \
61 	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
62 
63 static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
64 					 enum tb_tunnel_type type)
65 {
66 	struct tb_tunnel *tunnel;
67 
68 	tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
69 	if (!tunnel)
70 		return NULL;
71 
72 	tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
73 	if (!tunnel->paths) {
74 		tb_tunnel_free(tunnel);
75 		return NULL;
76 	}
77 
78 	INIT_LIST_HEAD(&tunnel->list);
79 	tunnel->tb = tb;
80 	tunnel->npaths = npaths;
81 	tunnel->type = type;
82 
83 	return tunnel;
84 }
85 
86 static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
87 {
88 	int res;
89 
90 	res = tb_pci_port_enable(tunnel->src_port, activate);
91 	if (res)
92 		return res;
93 
94 	if (tb_port_is_pcie_up(tunnel->dst_port))
95 		return tb_pci_port_enable(tunnel->dst_port, activate);
96 
97 	return 0;
98 }
99 
100 static int tb_initial_credits(const struct tb_switch *sw)
101 {
102 	/* If the path is complete sw is not NULL */
103 	if (sw) {
104 		/* More credits for faster link */
105 		switch (sw->link_speed * sw->link_width) {
106 		case 40:
107 			return 32;
108 		case 20:
109 			return 24;
110 		}
111 	}
112 
113 	return 16;
114 }
115 
116 static void tb_pci_init_path(struct tb_path *path)
117 {
118 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
119 	path->egress_shared_buffer = TB_PATH_NONE;
120 	path->ingress_fc_enable = TB_PATH_ALL;
121 	path->ingress_shared_buffer = TB_PATH_NONE;
122 	path->priority = 3;
123 	path->weight = 1;
124 	path->drop_packages = 0;
125 	path->nfc_credits = 0;
126 	path->hops[0].initial_credits = 7;
127 	path->hops[1].initial_credits =
128 		tb_initial_credits(path->hops[1].in_port->sw);
129 }
130 
131 /**
132  * tb_tunnel_discover_pci() - Discover existing PCIe tunnels
133  * @tb: Pointer to the domain structure
134  * @down: PCIe downstream adapter
135  *
136  * If @down adapter is active, follows the tunnel to the PCIe upstream
137  * adapter and back. Returns the discovered tunnel or %NULL if there was
138  * no tunnel.
139  */
140 struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
141 {
142 	struct tb_tunnel *tunnel;
143 	struct tb_path *path;
144 
145 	if (!tb_pci_port_is_enabled(down))
146 		return NULL;
147 
148 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
149 	if (!tunnel)
150 		return NULL;
151 
152 	tunnel->activate = tb_pci_activate;
153 	tunnel->src_port = down;
154 
155 	/*
156 	 * Discover both paths even if they are not complete. We will
157 	 * clean them up by calling tb_tunnel_deactivate() below in that
158 	 * case.
159 	 */
160 	path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
161 				&tunnel->dst_port, "PCIe Up");
162 	if (!path) {
163 		/* Just disable the downstream port */
164 		tb_pci_port_enable(down, false);
165 		goto err_free;
166 	}
167 	tunnel->paths[TB_PCI_PATH_UP] = path;
168 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]);
169 
170 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
171 				"PCIe Down");
172 	if (!path)
173 		goto err_deactivate;
174 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
175 	tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]);
176 
177 	/* Validate that the tunnel is complete */
178 	if (!tb_port_is_pcie_up(tunnel->dst_port)) {
179 		tb_port_warn(tunnel->dst_port,
180 			     "path does not end on a PCIe adapter, cleaning up\n");
181 		goto err_deactivate;
182 	}
183 
184 	if (down != tunnel->src_port) {
185 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
186 		goto err_deactivate;
187 	}
188 
189 	if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
190 		tb_tunnel_warn(tunnel,
191 			       "tunnel is not fully activated, cleaning up\n");
192 		goto err_deactivate;
193 	}
194 
195 	tb_tunnel_dbg(tunnel, "discovered\n");
196 	return tunnel;
197 
198 err_deactivate:
199 	tb_tunnel_deactivate(tunnel);
200 err_free:
201 	tb_tunnel_free(tunnel);
202 
203 	return NULL;
204 }
205 
206 /**
207  * tb_tunnel_alloc_pci() - allocate a pci tunnel
208  * @tb: Pointer to the domain structure
209  * @up: PCIe upstream adapter port
210  * @down: PCIe downstream adapter port
211  *
212  * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
213  * TB_TYPE_PCIE_DOWN.
214  *
215  * Return: Returns a tb_tunnel on success or NULL on failure.
216  */
217 struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
218 				      struct tb_port *down)
219 {
220 	struct tb_tunnel *tunnel;
221 	struct tb_path *path;
222 
223 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
224 	if (!tunnel)
225 		return NULL;
226 
227 	tunnel->activate = tb_pci_activate;
228 	tunnel->src_port = down;
229 	tunnel->dst_port = up;
230 
231 	path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
232 			     "PCIe Down");
233 	if (!path) {
234 		tb_tunnel_free(tunnel);
235 		return NULL;
236 	}
237 	tb_pci_init_path(path);
238 	tunnel->paths[TB_PCI_PATH_DOWN] = path;
239 
240 	path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
241 			     "PCIe Up");
242 	if (!path) {
243 		tb_tunnel_free(tunnel);
244 		return NULL;
245 	}
246 	tb_pci_init_path(path);
247 	tunnel->paths[TB_PCI_PATH_UP] = path;
248 
249 	return tunnel;
250 }
251 
252 static bool tb_dp_is_usb4(const struct tb_switch *sw)
253 {
254 	/* Titan Ridge DP adapters need the same treatment as USB4 */
255 	return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
256 }
257 
258 static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
259 {
260 	int timeout = 10;
261 	u32 val;
262 	int ret;
263 
264 	/* Both ends need to support this */
265 	if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
266 		return 0;
267 
268 	ret = tb_port_read(out, &val, TB_CFG_PORT,
269 			   out->cap_adap + DP_STATUS_CTRL, 1);
270 	if (ret)
271 		return ret;
272 
273 	val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
274 
275 	ret = tb_port_write(out, &val, TB_CFG_PORT,
276 			    out->cap_adap + DP_STATUS_CTRL, 1);
277 	if (ret)
278 		return ret;
279 
280 	do {
281 		ret = tb_port_read(out, &val, TB_CFG_PORT,
282 				   out->cap_adap + DP_STATUS_CTRL, 1);
283 		if (ret)
284 			return ret;
285 		if (!(val & DP_STATUS_CTRL_CMHS))
286 			return 0;
287 		usleep_range(10, 100);
288 	} while (timeout--);
289 
290 	return -ETIMEDOUT;
291 }
292 
293 static inline u32 tb_dp_cap_get_rate(u32 val)
294 {
295 	u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
296 
297 	switch (rate) {
298 	case DP_COMMON_CAP_RATE_RBR:
299 		return 1620;
300 	case DP_COMMON_CAP_RATE_HBR:
301 		return 2700;
302 	case DP_COMMON_CAP_RATE_HBR2:
303 		return 5400;
304 	case DP_COMMON_CAP_RATE_HBR3:
305 		return 8100;
306 	default:
307 		return 0;
308 	}
309 }
310 
311 static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
312 {
313 	val &= ~DP_COMMON_CAP_RATE_MASK;
314 	switch (rate) {
315 	default:
316 		WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
317 		/* Fallthrough */
318 	case 1620:
319 		val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
320 		break;
321 	case 2700:
322 		val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
323 		break;
324 	case 5400:
325 		val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
326 		break;
327 	case 8100:
328 		val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
329 		break;
330 	}
331 	return val;
332 }
333 
334 static inline u32 tb_dp_cap_get_lanes(u32 val)
335 {
336 	u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
337 
338 	switch (lanes) {
339 	case DP_COMMON_CAP_1_LANE:
340 		return 1;
341 	case DP_COMMON_CAP_2_LANES:
342 		return 2;
343 	case DP_COMMON_CAP_4_LANES:
344 		return 4;
345 	default:
346 		return 0;
347 	}
348 }
349 
350 static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
351 {
352 	val &= ~DP_COMMON_CAP_LANES_MASK;
353 	switch (lanes) {
354 	default:
355 		WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
356 		     lanes);
357 		/* Fallthrough */
358 	case 1:
359 		val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
360 		break;
361 	case 2:
362 		val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
363 		break;
364 	case 4:
365 		val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
366 		break;
367 	}
368 	return val;
369 }
370 
371 static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
372 {
373 	/* Tunneling removes the DP 8b/10b encoding */
374 	return rate * lanes * 8 / 10;
375 }
376 
377 static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
378 				  u32 out_rate, u32 out_lanes, u32 *new_rate,
379 				  u32 *new_lanes)
380 {
381 	static const u32 dp_bw[][2] = {
382 		/* Mb/s, lanes */
383 		{ 8100, 4 }, /* 25920 Mb/s */
384 		{ 5400, 4 }, /* 17280 Mb/s */
385 		{ 8100, 2 }, /* 12960 Mb/s */
386 		{ 2700, 4 }, /* 8640 Mb/s */
387 		{ 5400, 2 }, /* 8640 Mb/s */
388 		{ 8100, 1 }, /* 6480 Mb/s */
389 		{ 1620, 4 }, /* 5184 Mb/s */
390 		{ 5400, 1 }, /* 4320 Mb/s */
391 		{ 2700, 2 }, /* 4320 Mb/s */
392 		{ 1620, 2 }, /* 2592 Mb/s */
393 		{ 2700, 1 }, /* 2160 Mb/s */
394 		{ 1620, 1 }, /* 1296 Mb/s */
395 	};
396 	unsigned int i;
397 
398 	/*
399 	 * Find a combination that can fit into max_bw and does not
400 	 * exceed the maximum rate and lanes supported by the DP OUT and
401 	 * DP IN adapters.
402 	 */
403 	for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
404 		if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
405 			continue;
406 
407 		if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
408 			continue;
409 
410 		if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
411 			*new_rate = dp_bw[i][0];
412 			*new_lanes = dp_bw[i][1];
413 			return 0;
414 		}
415 	}
416 
417 	return -ENOSR;
418 }
419 
420 static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
421 {
422 	u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
423 	struct tb_port *out = tunnel->dst_port;
424 	struct tb_port *in = tunnel->src_port;
425 	int ret;
426 
427 	/*
428 	 * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
429 	 * newer generation hardware.
430 	 */
431 	if (in->sw->generation < 2 || out->sw->generation < 2)
432 		return 0;
433 
434 	/*
435 	 * Perform connection manager handshake between IN and OUT ports
436 	 * before capabilities exchange can take place.
437 	 */
438 	ret = tb_dp_cm_handshake(in, out);
439 	if (ret)
440 		return ret;
441 
442 	/* Read both DP_LOCAL_CAP registers */
443 	ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
444 			   in->cap_adap + DP_LOCAL_CAP, 1);
445 	if (ret)
446 		return ret;
447 
448 	ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
449 			   out->cap_adap + DP_LOCAL_CAP, 1);
450 	if (ret)
451 		return ret;
452 
453 	/* Write IN local caps to OUT remote caps */
454 	ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
455 			    out->cap_adap + DP_REMOTE_CAP, 1);
456 	if (ret)
457 		return ret;
458 
459 	in_rate = tb_dp_cap_get_rate(in_dp_cap);
460 	in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
461 	tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
462 		    in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
463 
464 	/*
465 	 * If the tunnel bandwidth is limited (max_bw is set) then see
466 	 * if we need to reduce bandwidth to fit there.
467 	 */
468 	out_rate = tb_dp_cap_get_rate(out_dp_cap);
469 	out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
470 	bw = tb_dp_bandwidth(out_rate, out_lanes);
471 	tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
472 		    out_rate, out_lanes, bw);
473 
474 	if (tunnel->max_bw && bw > tunnel->max_bw) {
475 		u32 new_rate, new_lanes, new_bw;
476 
477 		ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes,
478 					     out_rate, out_lanes, &new_rate,
479 					     &new_lanes);
480 		if (ret) {
481 			tb_port_info(out, "not enough bandwidth for DP tunnel\n");
482 			return ret;
483 		}
484 
485 		new_bw = tb_dp_bandwidth(new_rate, new_lanes);
486 		tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
487 			    new_rate, new_lanes, new_bw);
488 
489 		/*
490 		 * Set new rate and number of lanes before writing it to
491 		 * the IN port remote caps.
492 		 */
493 		out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
494 		out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
495 	}
496 
497 	return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
498 			     in->cap_adap + DP_REMOTE_CAP, 1);
499 }
500 
501 static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
502 {
503 	int ret;
504 
505 	if (active) {
506 		struct tb_path **paths;
507 		int last;
508 
509 		paths = tunnel->paths;
510 		last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
511 
512 		tb_dp_port_set_hops(tunnel->src_port,
513 			paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
514 			paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
515 			paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
516 
517 		tb_dp_port_set_hops(tunnel->dst_port,
518 			paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
519 			paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
520 			paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
521 	} else {
522 		tb_dp_port_hpd_clear(tunnel->src_port);
523 		tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
524 		if (tb_port_is_dpout(tunnel->dst_port))
525 			tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
526 	}
527 
528 	ret = tb_dp_port_enable(tunnel->src_port, active);
529 	if (ret)
530 		return ret;
531 
532 	if (tb_port_is_dpout(tunnel->dst_port))
533 		return tb_dp_port_enable(tunnel->dst_port, active);
534 
535 	return 0;
536 }
537 
538 static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel)
539 {
540 	struct tb_port *in = tunnel->src_port;
541 	const struct tb_switch *sw = in->sw;
542 	u32 val, rate = 0, lanes = 0;
543 	int ret;
544 
545 	if (tb_dp_is_usb4(sw)) {
546 		int timeout = 10;
547 
548 		/*
549 		 * Wait for DPRX done. Normally it should be already set
550 		 * for active tunnel.
551 		 */
552 		do {
553 			ret = tb_port_read(in, &val, TB_CFG_PORT,
554 					   in->cap_adap + DP_COMMON_CAP, 1);
555 			if (ret)
556 				return ret;
557 
558 			if (val & DP_COMMON_CAP_DPRX_DONE) {
559 				rate = tb_dp_cap_get_rate(val);
560 				lanes = tb_dp_cap_get_lanes(val);
561 				break;
562 			}
563 			msleep(250);
564 		} while (timeout--);
565 
566 		if (!timeout)
567 			return -ETIMEDOUT;
568 	} else if (sw->generation >= 2) {
569 		/*
570 		 * Read from the copied remote cap so that we take into
571 		 * account if capabilities were reduced during exchange.
572 		 */
573 		ret = tb_port_read(in, &val, TB_CFG_PORT,
574 				   in->cap_adap + DP_REMOTE_CAP, 1);
575 		if (ret)
576 			return ret;
577 
578 		rate = tb_dp_cap_get_rate(val);
579 		lanes = tb_dp_cap_get_lanes(val);
580 	} else {
581 		/* No bandwidth management for legacy devices  */
582 		return 0;
583 	}
584 
585 	return tb_dp_bandwidth(rate, lanes);
586 }
587 
588 static void tb_dp_init_aux_path(struct tb_path *path)
589 {
590 	int i;
591 
592 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
593 	path->egress_shared_buffer = TB_PATH_NONE;
594 	path->ingress_fc_enable = TB_PATH_ALL;
595 	path->ingress_shared_buffer = TB_PATH_NONE;
596 	path->priority = 2;
597 	path->weight = 1;
598 
599 	for (i = 0; i < path->path_length; i++)
600 		path->hops[i].initial_credits = 1;
601 }
602 
603 static void tb_dp_init_video_path(struct tb_path *path, bool discover)
604 {
605 	u32 nfc_credits = path->hops[0].in_port->config.nfc_credits;
606 
607 	path->egress_fc_enable = TB_PATH_NONE;
608 	path->egress_shared_buffer = TB_PATH_NONE;
609 	path->ingress_fc_enable = TB_PATH_NONE;
610 	path->ingress_shared_buffer = TB_PATH_NONE;
611 	path->priority = 1;
612 	path->weight = 1;
613 
614 	if (discover) {
615 		path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
616 	} else {
617 		u32 max_credits;
618 
619 		max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
620 			ADP_CS_4_TOTAL_BUFFERS_SHIFT;
621 		/* Leave some credits for AUX path */
622 		path->nfc_credits = min(max_credits - 2, 12U);
623 	}
624 }
625 
626 /**
627  * tb_tunnel_discover_dp() - Discover existing Display Port tunnels
628  * @tb: Pointer to the domain structure
629  * @in: DP in adapter
630  *
631  * If @in adapter is active, follows the tunnel to the DP out adapter
632  * and back. Returns the discovered tunnel or %NULL if there was no
633  * tunnel.
634  *
635  * Return: DP tunnel or %NULL if no tunnel found.
636  */
637 struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
638 {
639 	struct tb_tunnel *tunnel;
640 	struct tb_port *port;
641 	struct tb_path *path;
642 
643 	if (!tb_dp_port_is_enabled(in))
644 		return NULL;
645 
646 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
647 	if (!tunnel)
648 		return NULL;
649 
650 	tunnel->init = tb_dp_xchg_caps;
651 	tunnel->activate = tb_dp_activate;
652 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
653 	tunnel->src_port = in;
654 
655 	path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
656 				&tunnel->dst_port, "Video");
657 	if (!path) {
658 		/* Just disable the DP IN port */
659 		tb_dp_port_enable(in, false);
660 		goto err_free;
661 	}
662 	tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
663 	tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true);
664 
665 	path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
666 	if (!path)
667 		goto err_deactivate;
668 	tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
669 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
670 
671 	path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
672 				&port, "AUX RX");
673 	if (!path)
674 		goto err_deactivate;
675 	tunnel->paths[TB_DP_AUX_PATH_IN] = path;
676 	tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
677 
678 	/* Validate that the tunnel is complete */
679 	if (!tb_port_is_dpout(tunnel->dst_port)) {
680 		tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
681 		goto err_deactivate;
682 	}
683 
684 	if (!tb_dp_port_is_enabled(tunnel->dst_port))
685 		goto err_deactivate;
686 
687 	if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
688 		goto err_deactivate;
689 
690 	if (port != tunnel->src_port) {
691 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
692 		goto err_deactivate;
693 	}
694 
695 	tb_tunnel_dbg(tunnel, "discovered\n");
696 	return tunnel;
697 
698 err_deactivate:
699 	tb_tunnel_deactivate(tunnel);
700 err_free:
701 	tb_tunnel_free(tunnel);
702 
703 	return NULL;
704 }
705 
706 /**
707  * tb_tunnel_alloc_dp() - allocate a Display Port tunnel
708  * @tb: Pointer to the domain structure
709  * @in: DP in adapter port
710  * @out: DP out adapter port
711  * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited)
712  *
713  * Allocates a tunnel between @in and @out that is capable of tunneling
714  * Display Port traffic.
715  *
716  * Return: Returns a tb_tunnel on success or NULL on failure.
717  */
718 struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
719 				     struct tb_port *out, int max_bw)
720 {
721 	struct tb_tunnel *tunnel;
722 	struct tb_path **paths;
723 	struct tb_path *path;
724 
725 	if (WARN_ON(!in->cap_adap || !out->cap_adap))
726 		return NULL;
727 
728 	tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
729 	if (!tunnel)
730 		return NULL;
731 
732 	tunnel->init = tb_dp_xchg_caps;
733 	tunnel->activate = tb_dp_activate;
734 	tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
735 	tunnel->src_port = in;
736 	tunnel->dst_port = out;
737 	tunnel->max_bw = max_bw;
738 
739 	paths = tunnel->paths;
740 
741 	path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
742 			     1, "Video");
743 	if (!path)
744 		goto err_free;
745 	tb_dp_init_video_path(path, false);
746 	paths[TB_DP_VIDEO_PATH_OUT] = path;
747 
748 	path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
749 			     TB_DP_AUX_TX_HOPID, 1, "AUX TX");
750 	if (!path)
751 		goto err_free;
752 	tb_dp_init_aux_path(path);
753 	paths[TB_DP_AUX_PATH_OUT] = path;
754 
755 	path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
756 			     TB_DP_AUX_RX_HOPID, 1, "AUX RX");
757 	if (!path)
758 		goto err_free;
759 	tb_dp_init_aux_path(path);
760 	paths[TB_DP_AUX_PATH_IN] = path;
761 
762 	return tunnel;
763 
764 err_free:
765 	tb_tunnel_free(tunnel);
766 	return NULL;
767 }
768 
769 static u32 tb_dma_credits(struct tb_port *nhi)
770 {
771 	u32 max_credits;
772 
773 	max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
774 		ADP_CS_4_TOTAL_BUFFERS_SHIFT;
775 	return min(max_credits, 13U);
776 }
777 
778 static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
779 {
780 	struct tb_port *nhi = tunnel->src_port;
781 	u32 credits;
782 
783 	credits = active ? tb_dma_credits(nhi) : 0;
784 	return tb_port_set_initial_credits(nhi, credits);
785 }
786 
787 static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
788 			     unsigned int efc, u32 credits)
789 {
790 	int i;
791 
792 	path->egress_fc_enable = efc;
793 	path->ingress_fc_enable = TB_PATH_ALL;
794 	path->egress_shared_buffer = TB_PATH_NONE;
795 	path->ingress_shared_buffer = isb;
796 	path->priority = 5;
797 	path->weight = 1;
798 	path->clear_fc = true;
799 
800 	for (i = 0; i < path->path_length; i++)
801 		path->hops[i].initial_credits = credits;
802 }
803 
804 /**
805  * tb_tunnel_alloc_dma() - allocate a DMA tunnel
806  * @tb: Pointer to the domain structure
807  * @nhi: Host controller port
808  * @dst: Destination null port which the other domain is connected to
809  * @transmit_ring: NHI ring number used to send packets towards the
810  *		   other domain
811  * @transmit_path: HopID used for transmitting packets
812  * @receive_ring: NHI ring number used to receive packets from the
813  *		  other domain
814  * @reveive_path: HopID used for receiving packets
815  *
816  * Return: Returns a tb_tunnel on success or NULL on failure.
817  */
818 struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
819 				      struct tb_port *dst, int transmit_ring,
820 				      int transmit_path, int receive_ring,
821 				      int receive_path)
822 {
823 	struct tb_tunnel *tunnel;
824 	struct tb_path *path;
825 	u32 credits;
826 
827 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA);
828 	if (!tunnel)
829 		return NULL;
830 
831 	tunnel->activate = tb_dma_activate;
832 	tunnel->src_port = nhi;
833 	tunnel->dst_port = dst;
834 
835 	credits = tb_dma_credits(nhi);
836 
837 	path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX");
838 	if (!path) {
839 		tb_tunnel_free(tunnel);
840 		return NULL;
841 	}
842 	tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
843 			 credits);
844 	tunnel->paths[TB_DMA_PATH_IN] = path;
845 
846 	path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX");
847 	if (!path) {
848 		tb_tunnel_free(tunnel);
849 		return NULL;
850 	}
851 	tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
852 	tunnel->paths[TB_DMA_PATH_OUT] = path;
853 
854 	return tunnel;
855 }
856 
857 static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
858 {
859 	int res;
860 
861 	res = tb_usb3_port_enable(tunnel->src_port, activate);
862 	if (res)
863 		return res;
864 
865 	if (tb_port_is_usb3_up(tunnel->dst_port))
866 		return tb_usb3_port_enable(tunnel->dst_port, activate);
867 
868 	return 0;
869 }
870 
871 static void tb_usb3_init_path(struct tb_path *path)
872 {
873 	path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
874 	path->egress_shared_buffer = TB_PATH_NONE;
875 	path->ingress_fc_enable = TB_PATH_ALL;
876 	path->ingress_shared_buffer = TB_PATH_NONE;
877 	path->priority = 3;
878 	path->weight = 3;
879 	path->drop_packages = 0;
880 	path->nfc_credits = 0;
881 	path->hops[0].initial_credits = 7;
882 	path->hops[1].initial_credits =
883 		tb_initial_credits(path->hops[1].in_port->sw);
884 }
885 
886 /**
887  * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
888  * @tb: Pointer to the domain structure
889  * @down: USB3 downstream adapter
890  *
891  * If @down adapter is active, follows the tunnel to the USB3 upstream
892  * adapter and back. Returns the discovered tunnel or %NULL if there was
893  * no tunnel.
894  */
895 struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
896 {
897 	struct tb_tunnel *tunnel;
898 	struct tb_path *path;
899 
900 	if (!tb_usb3_port_is_enabled(down))
901 		return NULL;
902 
903 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
904 	if (!tunnel)
905 		return NULL;
906 
907 	tunnel->activate = tb_usb3_activate;
908 	tunnel->src_port = down;
909 
910 	/*
911 	 * Discover both paths even if they are not complete. We will
912 	 * clean them up by calling tb_tunnel_deactivate() below in that
913 	 * case.
914 	 */
915 	path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
916 				&tunnel->dst_port, "USB3 Up");
917 	if (!path) {
918 		/* Just disable the downstream port */
919 		tb_usb3_port_enable(down, false);
920 		goto err_free;
921 	}
922 	tunnel->paths[TB_USB3_PATH_UP] = path;
923 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
924 
925 	path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
926 				"USB3 Down");
927 	if (!path)
928 		goto err_deactivate;
929 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
930 	tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
931 
932 	/* Validate that the tunnel is complete */
933 	if (!tb_port_is_usb3_up(tunnel->dst_port)) {
934 		tb_port_warn(tunnel->dst_port,
935 			     "path does not end on an USB3 adapter, cleaning up\n");
936 		goto err_deactivate;
937 	}
938 
939 	if (down != tunnel->src_port) {
940 		tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
941 		goto err_deactivate;
942 	}
943 
944 	if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
945 		tb_tunnel_warn(tunnel,
946 			       "tunnel is not fully activated, cleaning up\n");
947 		goto err_deactivate;
948 	}
949 
950 	tb_tunnel_dbg(tunnel, "discovered\n");
951 	return tunnel;
952 
953 err_deactivate:
954 	tb_tunnel_deactivate(tunnel);
955 err_free:
956 	tb_tunnel_free(tunnel);
957 
958 	return NULL;
959 }
960 
961 /**
962  * tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
963  * @tb: Pointer to the domain structure
964  * @up: USB3 upstream adapter port
965  * @down: USB3 downstream adapter port
966  *
967  * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
968  * @TB_TYPE_USB3_DOWN.
969  *
970  * Return: Returns a tb_tunnel on success or %NULL on failure.
971  */
972 struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
973 				       struct tb_port *down)
974 {
975 	struct tb_tunnel *tunnel;
976 	struct tb_path *path;
977 
978 	tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
979 	if (!tunnel)
980 		return NULL;
981 
982 	tunnel->activate = tb_usb3_activate;
983 	tunnel->src_port = down;
984 	tunnel->dst_port = up;
985 
986 	path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
987 			     "USB3 Down");
988 	if (!path) {
989 		tb_tunnel_free(tunnel);
990 		return NULL;
991 	}
992 	tb_usb3_init_path(path);
993 	tunnel->paths[TB_USB3_PATH_DOWN] = path;
994 
995 	path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
996 			     "USB3 Up");
997 	if (!path) {
998 		tb_tunnel_free(tunnel);
999 		return NULL;
1000 	}
1001 	tb_usb3_init_path(path);
1002 	tunnel->paths[TB_USB3_PATH_UP] = path;
1003 
1004 	return tunnel;
1005 }
1006 
1007 /**
1008  * tb_tunnel_free() - free a tunnel
1009  * @tunnel: Tunnel to be freed
1010  *
1011  * Frees a tunnel. The tunnel does not need to be deactivated.
1012  */
1013 void tb_tunnel_free(struct tb_tunnel *tunnel)
1014 {
1015 	int i;
1016 
1017 	if (!tunnel)
1018 		return;
1019 
1020 	for (i = 0; i < tunnel->npaths; i++) {
1021 		if (tunnel->paths[i])
1022 			tb_path_free(tunnel->paths[i]);
1023 	}
1024 
1025 	kfree(tunnel->paths);
1026 	kfree(tunnel);
1027 }
1028 
1029 /**
1030  * tb_tunnel_is_invalid - check whether an activated path is still valid
1031  * @tunnel: Tunnel to check
1032  */
1033 bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
1034 {
1035 	int i;
1036 
1037 	for (i = 0; i < tunnel->npaths; i++) {
1038 		WARN_ON(!tunnel->paths[i]->activated);
1039 		if (tb_path_is_invalid(tunnel->paths[i]))
1040 			return true;
1041 	}
1042 
1043 	return false;
1044 }
1045 
1046 /**
1047  * tb_tunnel_restart() - activate a tunnel after a hardware reset
1048  * @tunnel: Tunnel to restart
1049  *
1050  * Return: 0 on success and negative errno in case if failure
1051  */
1052 int tb_tunnel_restart(struct tb_tunnel *tunnel)
1053 {
1054 	int res, i;
1055 
1056 	tb_tunnel_dbg(tunnel, "activating\n");
1057 
1058 	/*
1059 	 * Make sure all paths are properly disabled before enabling
1060 	 * them again.
1061 	 */
1062 	for (i = 0; i < tunnel->npaths; i++) {
1063 		if (tunnel->paths[i]->activated) {
1064 			tb_path_deactivate(tunnel->paths[i]);
1065 			tunnel->paths[i]->activated = false;
1066 		}
1067 	}
1068 
1069 	if (tunnel->init) {
1070 		res = tunnel->init(tunnel);
1071 		if (res)
1072 			return res;
1073 	}
1074 
1075 	for (i = 0; i < tunnel->npaths; i++) {
1076 		res = tb_path_activate(tunnel->paths[i]);
1077 		if (res)
1078 			goto err;
1079 	}
1080 
1081 	if (tunnel->activate) {
1082 		res = tunnel->activate(tunnel, true);
1083 		if (res)
1084 			goto err;
1085 	}
1086 
1087 	return 0;
1088 
1089 err:
1090 	tb_tunnel_warn(tunnel, "activation failed\n");
1091 	tb_tunnel_deactivate(tunnel);
1092 	return res;
1093 }
1094 
1095 /**
1096  * tb_tunnel_activate() - activate a tunnel
1097  * @tunnel: Tunnel to activate
1098  *
1099  * Return: Returns 0 on success or an error code on failure.
1100  */
1101 int tb_tunnel_activate(struct tb_tunnel *tunnel)
1102 {
1103 	int i;
1104 
1105 	for (i = 0; i < tunnel->npaths; i++) {
1106 		if (tunnel->paths[i]->activated) {
1107 			tb_tunnel_WARN(tunnel,
1108 				       "trying to activate an already activated tunnel\n");
1109 			return -EINVAL;
1110 		}
1111 	}
1112 
1113 	return tb_tunnel_restart(tunnel);
1114 }
1115 
1116 /**
1117  * tb_tunnel_deactivate() - deactivate a tunnel
1118  * @tunnel: Tunnel to deactivate
1119  */
1120 void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
1121 {
1122 	int i;
1123 
1124 	tb_tunnel_dbg(tunnel, "deactivating\n");
1125 
1126 	if (tunnel->activate)
1127 		tunnel->activate(tunnel, false);
1128 
1129 	for (i = 0; i < tunnel->npaths; i++) {
1130 		if (tunnel->paths[i] && tunnel->paths[i]->activated)
1131 			tb_path_deactivate(tunnel->paths[i]);
1132 	}
1133 }
1134 
1135 /**
1136  * tb_tunnel_switch_on_path() - Does the tunnel go through switch
1137  * @tunnel: Tunnel to check
1138  * @sw: Switch to check
1139  *
1140  * Returns true if @tunnel goes through @sw (direction does not matter),
1141  * false otherwise.
1142  */
1143 bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel,
1144 			      const struct tb_switch *sw)
1145 {
1146 	int i;
1147 
1148 	for (i = 0; i < tunnel->npaths; i++) {
1149 		if (!tunnel->paths[i])
1150 			continue;
1151 		if (tb_path_switch_on_path(tunnel->paths[i], sw))
1152 			return true;
1153 	}
1154 
1155 	return false;
1156 }
1157 
1158 static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
1159 {
1160 	int i;
1161 
1162 	for (i = 0; i < tunnel->npaths; i++) {
1163 		if (!tunnel->paths[i])
1164 			return false;
1165 		if (!tunnel->paths[i]->activated)
1166 			return false;
1167 	}
1168 
1169 	return true;
1170 }
1171 
1172 /**
1173  * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
1174  * @tunnel: Tunnel to check
1175  *
1176  * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel
1177  * is not active or does consume bandwidth.
1178  */
1179 int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel)
1180 {
1181 	if (!tb_tunnel_is_active(tunnel))
1182 		return 0;
1183 
1184 	if (tunnel->consumed_bandwidth) {
1185 		int ret = tunnel->consumed_bandwidth(tunnel);
1186 
1187 		tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret);
1188 		return ret;
1189 	}
1190 
1191 	return 0;
1192 }
1193