xref: /linux/drivers/thunderbolt/path.c (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - path/tunnel functionality
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2019, Intel Corporation
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/errno.h>
11 #include <linux/delay.h>
12 #include <linux/ktime.h>
13 
14 #include "tb.h"
15 
tb_dump_hop(const struct tb_path_hop * hop,const struct tb_regs_hop * regs)16 static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
17 {
18 	const struct tb_port *port = hop->in_port;
19 
20 	tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
21 		    hop->in_hop_index, regs->out_port, regs->next_hop);
22 	tb_port_dbg(port, "  Weight: %d Priority: %d Credits: %d Drop: %d PM: %d\n",
23 		    regs->weight, regs->priority, regs->initial_credits,
24 		    regs->drop_packages, regs->pmps);
25 	tb_port_dbg(port, "   Counter enabled: %d Counter index: %d\n",
26 		    regs->counter_enable, regs->counter);
27 	tb_port_dbg(port, "  Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
28 		    regs->ingress_fc, regs->egress_fc,
29 		    regs->ingress_shared_buffer, regs->egress_shared_buffer);
30 	tb_port_dbg(port, "  Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
31 		    regs->unknown1, regs->unknown2, regs->unknown3);
32 }
33 
tb_path_find_dst_port(struct tb_port * src,int src_hopid,int dst_hopid)34 static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
35 					     int dst_hopid)
36 {
37 	struct tb_port *port, *out_port = NULL;
38 	struct tb_regs_hop hop;
39 	struct tb_switch *sw;
40 	int i, ret, hopid;
41 
42 	hopid = src_hopid;
43 	port = src;
44 
45 	for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
46 		sw = port->sw;
47 
48 		ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
49 		if (ret) {
50 			tb_port_warn(port, "failed to read path at %d\n", hopid);
51 			return NULL;
52 		}
53 
54 		if (!hop.enable)
55 			return NULL;
56 
57 		out_port = &sw->ports[hop.out_port];
58 		hopid = hop.next_hop;
59 		port = out_port->remote;
60 	}
61 
62 	return out_port && hopid == dst_hopid ? out_port : NULL;
63 }
64 
tb_path_find_src_hopid(struct tb_port * src,const struct tb_port * dst,int dst_hopid)65 static int tb_path_find_src_hopid(struct tb_port *src,
66 	const struct tb_port *dst, int dst_hopid)
67 {
68 	struct tb_port *out;
69 	int i;
70 
71 	for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
72 		out = tb_path_find_dst_port(src, i, dst_hopid);
73 		if (out == dst)
74 			return i;
75 	}
76 
77 	return 0;
78 }
79 
80 /**
81  * tb_path_discover() - Discover a path
82  * @src: First input port of a path
83  * @src_hopid: Starting HopID of a path (%-1 if don't care)
84  * @dst: Expected destination port of the path (%NULL if don't care)
85  * @dst_hopid: HopID to the @dst (%-1 if don't care)
86  * @last: Last port is filled here if not %NULL
87  * @name: Name of the path
88  * @alloc_hopid: Allocate HopIDs for the ports
89  *
90  * Follows a path starting from @src and @src_hopid to the last output
91  * port of the path. Allocates HopIDs for the visited ports (if
92  * @alloc_hopid is true). Call tb_path_free() to release the path and
93  * allocated HopIDs when the path is not needed anymore.
94  *
95  * Note function discovers also incomplete paths so caller should check
96  * that the @dst port is the expected one. If it is not, the path can be
97  * cleaned up by calling tb_path_deactivate() before tb_path_free().
98  *
99  * Return: Discovered path on success, %NULL in case of failure
100  */
tb_path_discover(struct tb_port * src,int src_hopid,struct tb_port * dst,int dst_hopid,struct tb_port ** last,const char * name,bool alloc_hopid)101 struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
102 				 struct tb_port *dst, int dst_hopid,
103 				 struct tb_port **last, const char *name,
104 				 bool alloc_hopid)
105 {
106 	struct tb_port *out_port;
107 	struct tb_regs_hop hop;
108 	struct tb_path *path;
109 	struct tb_switch *sw;
110 	struct tb_port *p;
111 	size_t num_hops;
112 	int ret, i, h;
113 
114 	if (src_hopid < 0 && dst) {
115 		/*
116 		 * For incomplete paths the intermediate HopID can be
117 		 * different from the one used by the protocol adapter
118 		 * so in that case find a path that ends on @dst with
119 		 * matching @dst_hopid. That should give us the correct
120 		 * HopID for the @src.
121 		 */
122 		src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
123 		if (!src_hopid)
124 			return NULL;
125 	}
126 
127 	p = src;
128 	h = src_hopid;
129 	num_hops = 0;
130 
131 	for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
132 		sw = p->sw;
133 
134 		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
135 		if (ret) {
136 			tb_port_warn(p, "failed to read path at %d\n", h);
137 			return NULL;
138 		}
139 
140 		/* If the hop is not enabled we got an incomplete path */
141 		if (!hop.enable)
142 			break;
143 
144 		out_port = &sw->ports[hop.out_port];
145 		if (last)
146 			*last = out_port;
147 
148 		h = hop.next_hop;
149 		p = out_port->remote;
150 		num_hops++;
151 	}
152 
153 	path = kzalloc(sizeof(*path), GFP_KERNEL);
154 	if (!path)
155 		return NULL;
156 
157 	path->name = name;
158 	path->tb = src->sw->tb;
159 	path->path_length = num_hops;
160 	path->activated = true;
161 	path->alloc_hopid = alloc_hopid;
162 
163 	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
164 	if (!path->hops) {
165 		kfree(path);
166 		return NULL;
167 	}
168 
169 	tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
170 	       path->name, tb_route(src->sw), src->port);
171 
172 	p = src;
173 	h = src_hopid;
174 
175 	for (i = 0; i < num_hops; i++) {
176 		int next_hop;
177 
178 		sw = p->sw;
179 
180 		ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
181 		if (ret) {
182 			tb_port_warn(p, "failed to read path at %d\n", h);
183 			goto err;
184 		}
185 
186 		if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
187 			goto err;
188 
189 		out_port = &sw->ports[hop.out_port];
190 		next_hop = hop.next_hop;
191 
192 		if (alloc_hopid &&
193 		    tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
194 			tb_port_release_in_hopid(p, h);
195 			goto err;
196 		}
197 
198 		path->hops[i].in_port = p;
199 		path->hops[i].in_hop_index = h;
200 		path->hops[i].in_counter_index = -1;
201 		path->hops[i].out_port = out_port;
202 		path->hops[i].next_hop_index = next_hop;
203 
204 		tb_dump_hop(&path->hops[i], &hop);
205 
206 		h = next_hop;
207 		p = out_port->remote;
208 	}
209 
210 	tb_dbg(path->tb, "path discovery complete\n");
211 	return path;
212 
213 err:
214 	tb_port_warn(src, "failed to discover path starting at HopID %d\n",
215 		     src_hopid);
216 	tb_path_free(path);
217 	return NULL;
218 }
219 
220 /**
221  * tb_path_alloc() - allocate a thunderbolt path between two ports
222  * @tb: Domain pointer
223  * @src: Source port of the path
224  * @src_hopid: HopID used for the first ingress port in the path
225  * @dst: Destination port of the path
226  * @dst_hopid: HopID used for the last egress port in the path
227  * @link_nr: Preferred link if there are dual links on the path
228  * @name: Name of the path
229  *
230  * Creates path between two ports starting with given @src_hopid. Reserves
231  * HopIDs for each port (they can be different from @src_hopid depending on
232  * how many HopIDs each port already have reserved). If there are dual
233  * links on the path, prioritizes using @link_nr but takes into account
234  * that the lanes may be bonded.
235  *
236  * Return: Returns a tb_path on success or NULL on failure.
237  */
tb_path_alloc(struct tb * tb,struct tb_port * src,int src_hopid,struct tb_port * dst,int dst_hopid,int link_nr,const char * name)238 struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
239 			      struct tb_port *dst, int dst_hopid, int link_nr,
240 			      const char *name)
241 {
242 	struct tb_port *in_port, *out_port, *first_port, *last_port;
243 	int in_hopid, out_hopid;
244 	struct tb_path *path;
245 	size_t num_hops;
246 	int i, ret;
247 
248 	path = kzalloc(sizeof(*path), GFP_KERNEL);
249 	if (!path)
250 		return NULL;
251 
252 	first_port = last_port = NULL;
253 	i = 0;
254 	tb_for_each_port_on_path(src, dst, in_port) {
255 		if (!first_port)
256 			first_port = in_port;
257 		last_port = in_port;
258 		i++;
259 	}
260 
261 	/* Check that src and dst are reachable */
262 	if (first_port != src || last_port != dst) {
263 		kfree(path);
264 		return NULL;
265 	}
266 
267 	/* Each hop takes two ports */
268 	num_hops = i / 2;
269 
270 	path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
271 	if (!path->hops) {
272 		kfree(path);
273 		return NULL;
274 	}
275 
276 	path->alloc_hopid = true;
277 
278 	in_hopid = src_hopid;
279 	out_port = NULL;
280 
281 	for (i = 0; i < num_hops; i++) {
282 		in_port = tb_next_port_on_path(src, dst, out_port);
283 		if (!in_port)
284 			goto err;
285 
286 		/* When lanes are bonded primary link must be used */
287 		if (!in_port->bonded && in_port->dual_link_port &&
288 		    in_port->link_nr != link_nr)
289 			in_port = in_port->dual_link_port;
290 
291 		ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
292 		if (ret < 0)
293 			goto err;
294 		in_hopid = ret;
295 
296 		out_port = tb_next_port_on_path(src, dst, in_port);
297 		if (!out_port)
298 			goto err;
299 
300 		/*
301 		 * Pick up right port when going from non-bonded to
302 		 * bonded or from bonded to non-bonded.
303 		 */
304 		if (out_port->dual_link_port) {
305 			if (!in_port->bonded && out_port->bonded &&
306 			    out_port->link_nr) {
307 				/*
308 				 * Use primary link when going from
309 				 * non-bonded to bonded.
310 				 */
311 				out_port = out_port->dual_link_port;
312 			} else if (!out_port->bonded &&
313 				   out_port->link_nr != link_nr) {
314 				/*
315 				 * If out port is not bonded follow
316 				 * link_nr.
317 				 */
318 				out_port = out_port->dual_link_port;
319 			}
320 		}
321 
322 		if (i == num_hops - 1)
323 			ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
324 						      dst_hopid);
325 		else
326 			ret = tb_port_alloc_out_hopid(out_port, -1, -1);
327 
328 		if (ret < 0)
329 			goto err;
330 		out_hopid = ret;
331 
332 		path->hops[i].in_hop_index = in_hopid;
333 		path->hops[i].in_port = in_port;
334 		path->hops[i].in_counter_index = -1;
335 		path->hops[i].out_port = out_port;
336 		path->hops[i].next_hop_index = out_hopid;
337 
338 		in_hopid = out_hopid;
339 	}
340 
341 	path->tb = tb;
342 	path->path_length = num_hops;
343 	path->name = name;
344 
345 	return path;
346 
347 err:
348 	tb_path_free(path);
349 	return NULL;
350 }
351 
352 /**
353  * tb_path_free() - free a path
354  * @path: Path to free
355  *
356  * Frees a path. The path does not need to be deactivated.
357  */
tb_path_free(struct tb_path * path)358 void tb_path_free(struct tb_path *path)
359 {
360 	if (path->alloc_hopid) {
361 		int i;
362 
363 		for (i = 0; i < path->path_length; i++) {
364 			const struct tb_path_hop *hop = &path->hops[i];
365 
366 			if (hop->in_port)
367 				tb_port_release_in_hopid(hop->in_port,
368 							 hop->in_hop_index);
369 			if (hop->out_port)
370 				tb_port_release_out_hopid(hop->out_port,
371 							  hop->next_hop_index);
372 		}
373 	}
374 
375 	kfree(path->hops);
376 	kfree(path);
377 }
378 
__tb_path_deallocate_nfc(struct tb_path * path,int first_hop)379 static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
380 {
381 	int i, res;
382 	for (i = first_hop; i < path->path_length; i++) {
383 		res = tb_port_add_nfc_credits(path->hops[i].in_port,
384 					      -path->hops[i].nfc_credits);
385 		if (res)
386 			tb_port_warn(path->hops[i].in_port,
387 				     "nfc credits deallocation failed for hop %d\n",
388 				     i);
389 	}
390 }
391 
__tb_path_deactivate_hop(struct tb_port * port,int hop_index,bool clear_fc)392 static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
393 				    bool clear_fc)
394 {
395 	struct tb_regs_hop hop;
396 	ktime_t timeout;
397 	int ret;
398 
399 	/* Disable the path */
400 	ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
401 	if (ret)
402 		return ret;
403 
404 	/* Already disabled */
405 	if (!hop.enable)
406 		return 0;
407 
408 	hop.enable = 0;
409 
410 	ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
411 	if (ret)
412 		return ret;
413 
414 	/* Wait until it is drained */
415 	timeout = ktime_add_ms(ktime_get(), 500);
416 	do {
417 		ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
418 		if (ret)
419 			return ret;
420 
421 		if (!hop.pending) {
422 			if (clear_fc) {
423 				/*
424 				 * Clear flow control. Protocol adapters
425 				 * IFC and ISE bits are vendor defined
426 				 * in the USB4 spec so we clear them
427 				 * only for pre-USB4 adapters.
428 				 */
429 				if (!tb_switch_is_usb4(port->sw)) {
430 					hop.ingress_fc = 0;
431 					hop.ingress_shared_buffer = 0;
432 				}
433 				hop.egress_fc = 0;
434 				hop.egress_shared_buffer = 0;
435 
436 				return tb_port_write(port, &hop, TB_CFG_HOPS,
437 						     2 * hop_index, 2);
438 			}
439 
440 			return 0;
441 		}
442 
443 		usleep_range(10, 20);
444 	} while (ktime_before(ktime_get(), timeout));
445 
446 	return -ETIMEDOUT;
447 }
448 
449 /**
450  * tb_path_deactivate_hop() - Deactivate one path in path config space
451  * @port: Lane or protocol adapter
452  * @hop_index: HopID of the path to be cleared
453  *
454  * This deactivates or clears a single path config space entry at
455  * @hop_index. Returns %0 in success and negative errno otherwise.
456  */
tb_path_deactivate_hop(struct tb_port * port,int hop_index)457 int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
458 {
459 	return __tb_path_deactivate_hop(port, hop_index, true);
460 }
461 
__tb_path_deactivate_hops(struct tb_path * path,int first_hop)462 static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
463 {
464 	int i, res;
465 
466 	for (i = first_hop; i < path->path_length; i++) {
467 		res = __tb_path_deactivate_hop(path->hops[i].in_port,
468 					       path->hops[i].in_hop_index,
469 					       path->clear_fc);
470 		if (res && res != -ENODEV)
471 			tb_port_warn(path->hops[i].in_port,
472 				     "hop deactivation failed for hop %d, index %d\n",
473 				     i, path->hops[i].in_hop_index);
474 	}
475 }
476 
tb_path_deactivate(struct tb_path * path)477 void tb_path_deactivate(struct tb_path *path)
478 {
479 	if (!path->activated) {
480 		tb_WARN(path->tb, "trying to deactivate an inactive path\n");
481 		return;
482 	}
483 	tb_dbg(path->tb,
484 	       "deactivating %s path from %llx:%u to %llx:%u\n",
485 	       path->name, tb_route(path->hops[0].in_port->sw),
486 	       path->hops[0].in_port->port,
487 	       tb_route(path->hops[path->path_length - 1].out_port->sw),
488 	       path->hops[path->path_length - 1].out_port->port);
489 	__tb_path_deactivate_hops(path, 0);
490 	__tb_path_deallocate_nfc(path, 0);
491 	path->activated = false;
492 }
493 
494 /**
495  * tb_path_activate() - activate a path
496  * @path: Path to activate
497  *
498  * Activate a path starting with the last hop and iterating backwards. The
499  * caller must fill path->hops before calling tb_path_activate().
500  *
501  * Return: Returns 0 on success or an error code on failure.
502  */
tb_path_activate(struct tb_path * path)503 int tb_path_activate(struct tb_path *path)
504 {
505 	int i, res;
506 	enum tb_path_port out_mask, in_mask;
507 	if (path->activated) {
508 		tb_WARN(path->tb, "trying to activate already activated path\n");
509 		return -EINVAL;
510 	}
511 
512 	tb_dbg(path->tb,
513 	       "activating %s path from %llx:%u to %llx:%u\n",
514 	       path->name, tb_route(path->hops[0].in_port->sw),
515 	       path->hops[0].in_port->port,
516 	       tb_route(path->hops[path->path_length - 1].out_port->sw),
517 	       path->hops[path->path_length - 1].out_port->port);
518 
519 	/* Clear counters. */
520 	for (i = path->path_length - 1; i >= 0; i--) {
521 		if (path->hops[i].in_counter_index == -1)
522 			continue;
523 		res = tb_port_clear_counter(path->hops[i].in_port,
524 					    path->hops[i].in_counter_index);
525 		if (res)
526 			goto err;
527 	}
528 
529 	/* Add non flow controlled credits. */
530 	for (i = path->path_length - 1; i >= 0; i--) {
531 		res = tb_port_add_nfc_credits(path->hops[i].in_port,
532 					      path->hops[i].nfc_credits);
533 		if (res) {
534 			__tb_path_deallocate_nfc(path, i);
535 			goto err;
536 		}
537 	}
538 
539 	/* Activate hops. */
540 	for (i = path->path_length - 1; i >= 0; i--) {
541 		struct tb_regs_hop hop = { 0 };
542 
543 		/* If it is left active deactivate it first */
544 		__tb_path_deactivate_hop(path->hops[i].in_port,
545 				path->hops[i].in_hop_index, path->clear_fc);
546 
547 		/* dword 0 */
548 		hop.next_hop = path->hops[i].next_hop_index;
549 		hop.out_port = path->hops[i].out_port->port;
550 		hop.initial_credits = path->hops[i].initial_credits;
551 		hop.pmps = path->hops[i].pm_support;
552 		hop.unknown1 = 0;
553 		hop.enable = 1;
554 
555 		/* dword 1 */
556 		out_mask = (i == path->path_length - 1) ?
557 				TB_PATH_DESTINATION : TB_PATH_INTERNAL;
558 		in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
559 		hop.weight = path->weight;
560 		hop.unknown2 = 0;
561 		hop.priority = path->priority;
562 		hop.drop_packages = path->drop_packages;
563 		hop.counter = path->hops[i].in_counter_index;
564 		hop.counter_enable = path->hops[i].in_counter_index != -1;
565 		hop.ingress_fc = path->ingress_fc_enable & in_mask;
566 		hop.egress_fc = path->egress_fc_enable & out_mask;
567 		hop.ingress_shared_buffer = path->ingress_shared_buffer
568 					    & in_mask;
569 		hop.egress_shared_buffer = path->egress_shared_buffer
570 					    & out_mask;
571 		hop.unknown3 = 0;
572 
573 		tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
574 		tb_dump_hop(&path->hops[i], &hop);
575 		res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
576 				    2 * path->hops[i].in_hop_index, 2);
577 		if (res) {
578 			__tb_path_deactivate_hops(path, i);
579 			__tb_path_deallocate_nfc(path, 0);
580 			goto err;
581 		}
582 	}
583 	path->activated = true;
584 	tb_dbg(path->tb, "path activation complete\n");
585 	return 0;
586 err:
587 	tb_WARN(path->tb, "path activation failed\n");
588 	return res;
589 }
590 
591 /**
592  * tb_path_is_invalid() - check whether any ports on the path are invalid
593  * @path: Path to check
594  *
595  * Return: Returns true if the path is invalid, false otherwise.
596  */
tb_path_is_invalid(struct tb_path * path)597 bool tb_path_is_invalid(struct tb_path *path)
598 {
599 	int i = 0;
600 	for (i = 0; i < path->path_length; i++) {
601 		if (path->hops[i].in_port->sw->is_unplugged)
602 			return true;
603 		if (path->hops[i].out_port->sw->is_unplugged)
604 			return true;
605 	}
606 	return false;
607 }
608 
609 /**
610  * tb_path_port_on_path() - Does the path go through certain port
611  * @path: Path to check
612  * @port: Switch to check
613  *
614  * Goes over all hops on path and checks if @port is any of them.
615  * Direction does not matter.
616  */
tb_path_port_on_path(const struct tb_path * path,const struct tb_port * port)617 bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
618 {
619 	int i;
620 
621 	for (i = 0; i < path->path_length; i++) {
622 		if (path->hops[i].in_port == port ||
623 		    path->hops[i].out_port == port)
624 			return true;
625 	}
626 
627 	return false;
628 }
629