xref: /linux/drivers/thunderbolt/retimer.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt/USB4 retimer support.
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13 
14 #include "sb_regs.h"
15 #include "tb.h"
16 
17 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
18 #define TB_MAX_RETIMER_INDEX	6
19 #else
20 #define TB_MAX_RETIMER_INDEX	2
21 #endif
22 
23 /**
24  * tb_retimer_nvm_read() - Read contents of retimer NVM
25  * @rt: Retimer device
26  * @address: NVM address (in bytes) to start reading
27  * @buf: Data read from NVM is stored here
28  * @size: Number of bytes to read
29  *
30  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
31  * read was successful and negative errno in case of failure.
32  */
33 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
34 			size_t size)
35 {
36 	return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
37 }
38 
39 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
40 {
41 	struct tb_nvm *nvm = priv;
42 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
43 	int ret;
44 
45 	pm_runtime_get_sync(&rt->dev);
46 
47 	if (!mutex_trylock(&rt->tb->lock)) {
48 		ret = restart_syscall();
49 		goto out;
50 	}
51 
52 	ret = tb_retimer_nvm_read(rt, offset, val, bytes);
53 	mutex_unlock(&rt->tb->lock);
54 
55 out:
56 	pm_runtime_mark_last_busy(&rt->dev);
57 	pm_runtime_put_autosuspend(&rt->dev);
58 
59 	return ret;
60 }
61 
62 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
63 {
64 	struct tb_nvm *nvm = priv;
65 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
66 	int ret = 0;
67 
68 	if (!mutex_trylock(&rt->tb->lock))
69 		return restart_syscall();
70 
71 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
72 	mutex_unlock(&rt->tb->lock);
73 
74 	return ret;
75 }
76 
77 static int tb_retimer_nvm_add(struct tb_retimer *rt)
78 {
79 	struct tb_nvm *nvm;
80 	int ret;
81 
82 	nvm = tb_nvm_alloc(&rt->dev);
83 	if (IS_ERR(nvm)) {
84 		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
85 		goto err_nvm;
86 	}
87 
88 	ret = tb_nvm_read_version(nvm);
89 	if (ret)
90 		goto err_nvm;
91 
92 	ret = tb_nvm_add_active(nvm, nvm_read);
93 	if (ret)
94 		goto err_nvm;
95 
96 	ret = tb_nvm_add_non_active(nvm, nvm_write);
97 	if (ret)
98 		goto err_nvm;
99 
100 	rt->nvm = nvm;
101 	dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
102 	return 0;
103 
104 err_nvm:
105 	dev_dbg(&rt->dev, "NVM upgrade disabled\n");
106 	if (!IS_ERR(nvm))
107 		tb_nvm_free(nvm);
108 
109 	return ret;
110 }
111 
112 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
113 {
114 	unsigned int image_size;
115 	const u8 *buf;
116 	int ret;
117 
118 	ret = tb_nvm_validate(rt->nvm);
119 	if (ret)
120 		return ret;
121 
122 	buf = rt->nvm->buf_data_start;
123 	image_size = rt->nvm->buf_data_size;
124 
125 	ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
126 					 image_size);
127 	if (ret)
128 		return ret;
129 
130 	rt->nvm->flushed = true;
131 	return 0;
132 }
133 
134 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
135 {
136 	u32 status;
137 	int ret;
138 
139 	if (auth_only) {
140 		ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
141 		if (ret)
142 			return ret;
143 	}
144 
145 	ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
146 	if (ret)
147 		return ret;
148 
149 	usleep_range(100, 150);
150 
151 	/*
152 	 * Check the status now if we still can access the retimer. It
153 	 * is expected that the below fails.
154 	 */
155 	ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
156 							&status);
157 	if (!ret) {
158 		rt->auth_status = status;
159 		return status ? -EINVAL : 0;
160 	}
161 
162 	return 0;
163 }
164 
165 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
166 			   char *buf)
167 {
168 	struct tb_retimer *rt = tb_to_retimer(dev);
169 
170 	return sysfs_emit(buf, "%#x\n", rt->device);
171 }
172 static DEVICE_ATTR_RO(device);
173 
174 static ssize_t nvm_authenticate_show(struct device *dev,
175 	struct device_attribute *attr, char *buf)
176 {
177 	struct tb_retimer *rt = tb_to_retimer(dev);
178 	int ret;
179 
180 	if (!mutex_trylock(&rt->tb->lock))
181 		return restart_syscall();
182 
183 	if (!rt->nvm)
184 		ret = -EAGAIN;
185 	else if (rt->no_nvm_upgrade)
186 		ret = -EOPNOTSUPP;
187 	else
188 		ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
189 
190 	mutex_unlock(&rt->tb->lock);
191 
192 	return ret;
193 }
194 
195 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
196 {
197 	int i;
198 
199 	tb_port_dbg(port, "reading NVM authentication status of retimers\n");
200 
201 	/*
202 	 * Before doing anything else, read the authentication status.
203 	 * If the retimer has it set, store it for the new retimer
204 	 * device instance.
205 	 */
206 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
207 		if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]))
208 			break;
209 	}
210 }
211 
212 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
213 {
214 	int i;
215 
216 	/*
217 	 * When USB4 port is online sideband communications are
218 	 * already up.
219 	 */
220 	if (!usb4_port_device_is_offline(port->usb4))
221 		return;
222 
223 	tb_port_dbg(port, "enabling sideband transactions\n");
224 
225 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
226 		usb4_port_retimer_set_inbound_sbtx(port, i);
227 }
228 
229 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
230 {
231 	int i;
232 
233 	/*
234 	 * When USB4 port is offline we need to keep the sideband
235 	 * communications up to make it possible to communicate with
236 	 * the connected retimers.
237 	 */
238 	if (usb4_port_device_is_offline(port->usb4))
239 		return;
240 
241 	tb_port_dbg(port, "disabling sideband transactions\n");
242 
243 	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) {
244 		if (usb4_port_retimer_unset_inbound_sbtx(port, i))
245 			break;
246 	}
247 }
248 
249 static ssize_t nvm_authenticate_store(struct device *dev,
250 	struct device_attribute *attr, const char *buf, size_t count)
251 {
252 	struct tb_retimer *rt = tb_to_retimer(dev);
253 	int val, ret;
254 
255 	pm_runtime_get_sync(&rt->dev);
256 
257 	if (!mutex_trylock(&rt->tb->lock)) {
258 		ret = restart_syscall();
259 		goto exit_rpm;
260 	}
261 
262 	if (!rt->nvm) {
263 		ret = -EAGAIN;
264 		goto exit_unlock;
265 	}
266 
267 	ret = kstrtoint(buf, 10, &val);
268 	if (ret)
269 		goto exit_unlock;
270 
271 	/* Always clear status */
272 	rt->auth_status = 0;
273 
274 	if (val) {
275 		/*
276 		 * When NVM authentication starts the retimer is not
277 		 * accessible so calling tb_retimer_unset_inbound_sbtx()
278 		 * will fail and therefore we do not call it. Exception
279 		 * is when the validation fails or we only write the new
280 		 * NVM image without authentication.
281 		 */
282 		tb_retimer_set_inbound_sbtx(rt->port);
283 		if (val == AUTHENTICATE_ONLY) {
284 			ret = tb_retimer_nvm_authenticate(rt, true);
285 		} else {
286 			if (!rt->nvm->flushed) {
287 				if (!rt->nvm->buf) {
288 					ret = -EINVAL;
289 					goto exit_unlock;
290 				}
291 
292 				ret = tb_retimer_nvm_validate_and_write(rt);
293 				if (ret || val == WRITE_ONLY)
294 					goto exit_unlock;
295 			}
296 			if (val == WRITE_AND_AUTHENTICATE)
297 				ret = tb_retimer_nvm_authenticate(rt, false);
298 		}
299 	}
300 
301 exit_unlock:
302 	if (ret || val == WRITE_ONLY)
303 		tb_retimer_unset_inbound_sbtx(rt->port);
304 	mutex_unlock(&rt->tb->lock);
305 exit_rpm:
306 	pm_runtime_mark_last_busy(&rt->dev);
307 	pm_runtime_put_autosuspend(&rt->dev);
308 
309 	if (ret)
310 		return ret;
311 	return count;
312 }
313 static DEVICE_ATTR_RW(nvm_authenticate);
314 
315 static ssize_t nvm_version_show(struct device *dev,
316 				struct device_attribute *attr, char *buf)
317 {
318 	struct tb_retimer *rt = tb_to_retimer(dev);
319 	int ret;
320 
321 	if (!mutex_trylock(&rt->tb->lock))
322 		return restart_syscall();
323 
324 	if (!rt->nvm)
325 		ret = -EAGAIN;
326 	else if (rt->no_nvm_upgrade)
327 		ret = -EOPNOTSUPP;
328 	else
329 		ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
330 
331 	mutex_unlock(&rt->tb->lock);
332 	return ret;
333 }
334 static DEVICE_ATTR_RO(nvm_version);
335 
336 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
337 			   char *buf)
338 {
339 	struct tb_retimer *rt = tb_to_retimer(dev);
340 
341 	return sysfs_emit(buf, "%#x\n", rt->vendor);
342 }
343 static DEVICE_ATTR_RO(vendor);
344 
345 static struct attribute *retimer_attrs[] = {
346 	&dev_attr_device.attr,
347 	&dev_attr_nvm_authenticate.attr,
348 	&dev_attr_nvm_version.attr,
349 	&dev_attr_vendor.attr,
350 	NULL
351 };
352 
353 static const struct attribute_group retimer_group = {
354 	.attrs = retimer_attrs,
355 };
356 
357 static const struct attribute_group *retimer_groups[] = {
358 	&retimer_group,
359 	NULL
360 };
361 
362 static void tb_retimer_release(struct device *dev)
363 {
364 	struct tb_retimer *rt = tb_to_retimer(dev);
365 
366 	kfree(rt);
367 }
368 
369 const struct device_type tb_retimer_type = {
370 	.name = "thunderbolt_retimer",
371 	.groups = retimer_groups,
372 	.release = tb_retimer_release,
373 };
374 
375 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
376 			  bool on_board)
377 {
378 	struct tb_retimer *rt;
379 	u32 vendor, device;
380 	int ret;
381 
382 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
383 				USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
384 	if (ret) {
385 		if (ret != -ENODEV)
386 			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
387 		return ret;
388 	}
389 
390 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
391 				USB4_SB_PRODUCT_ID, &device, sizeof(device));
392 	if (ret) {
393 		if (ret != -ENODEV)
394 			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
395 		return ret;
396 	}
397 
398 
399 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
400 	if (!rt)
401 		return -ENOMEM;
402 
403 	rt->index = index;
404 	rt->vendor = vendor;
405 	rt->device = device;
406 	rt->auth_status = auth_status;
407 	rt->port = port;
408 	rt->tb = port->sw->tb;
409 
410 	/*
411 	 * Only support NVM upgrade for on-board retimers. The retimers
412 	 * on the other side of the connection.
413 	 */
414 	if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
415 		rt->no_nvm_upgrade = true;
416 
417 	rt->dev.parent = &port->usb4->dev;
418 	rt->dev.bus = &tb_bus_type;
419 	rt->dev.type = &tb_retimer_type;
420 	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
421 		     port->port, index);
422 
423 	ret = device_register(&rt->dev);
424 	if (ret) {
425 		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
426 		put_device(&rt->dev);
427 		return ret;
428 	}
429 
430 	ret = tb_retimer_nvm_add(rt);
431 	if (ret) {
432 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
433 		device_unregister(&rt->dev);
434 		return ret;
435 	}
436 
437 	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
438 		 rt->vendor, rt->device);
439 
440 	pm_runtime_no_callbacks(&rt->dev);
441 	pm_runtime_set_active(&rt->dev);
442 	pm_runtime_enable(&rt->dev);
443 	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
444 	pm_runtime_mark_last_busy(&rt->dev);
445 	pm_runtime_use_autosuspend(&rt->dev);
446 
447 	tb_retimer_debugfs_init(rt);
448 	return 0;
449 }
450 
451 static void tb_retimer_remove(struct tb_retimer *rt)
452 {
453 	dev_info(&rt->dev, "retimer disconnected\n");
454 	tb_retimer_debugfs_remove(rt);
455 	tb_nvm_free(rt->nvm);
456 	device_unregister(&rt->dev);
457 }
458 
459 struct tb_retimer_lookup {
460 	const struct tb_port *port;
461 	u8 index;
462 };
463 
464 static int retimer_match(struct device *dev, void *data)
465 {
466 	const struct tb_retimer_lookup *lookup = data;
467 	struct tb_retimer *rt = tb_to_retimer(dev);
468 
469 	return rt && rt->port == lookup->port && rt->index == lookup->index;
470 }
471 
472 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
473 {
474 	struct tb_retimer_lookup lookup = { .port = port, .index = index };
475 	struct device *dev;
476 
477 	dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
478 	if (dev)
479 		return tb_to_retimer(dev);
480 
481 	return NULL;
482 }
483 
484 /**
485  * tb_retimer_scan() - Scan for on-board retimers under port
486  * @port: USB4 port to scan
487  * @add: If true also registers found retimers
488  *
489  * Brings the sideband into a state where retimers can be accessed.
490  * Then Tries to enumerate on-board retimers connected to @port. Found
491  * retimers are registered as children of @port if @add is set.  Does
492  * not scan for cable retimers for now.
493  */
494 int tb_retimer_scan(struct tb_port *port, bool add)
495 {
496 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
497 	int ret, i, max, last_idx = 0;
498 
499 	/*
500 	 * Send broadcast RT to make sure retimer indices facing this
501 	 * port are set.
502 	 */
503 	ret = usb4_port_enumerate_retimers(port);
504 	if (ret)
505 		return ret;
506 
507 	/*
508 	 * Immediately after sending enumerate retimers read the
509 	 * authentication status of each retimer.
510 	 */
511 	tb_retimer_nvm_authenticate_status(port, status);
512 
513 	/*
514 	 * Enable sideband channel for each retimer. We can do this
515 	 * regardless whether there is device connected or not.
516 	 */
517 	tb_retimer_set_inbound_sbtx(port);
518 
519 	for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
520 		/*
521 		 * Last retimer is true only for the last on-board
522 		 * retimer (the one connected directly to the Type-C
523 		 * port).
524 		 */
525 		ret = usb4_port_retimer_is_last(port, i);
526 		if (ret > 0)
527 			last_idx = i;
528 		else if (ret < 0)
529 			break;
530 
531 		max = i;
532 	}
533 
534 	ret = 0;
535 	if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
536 		max = min(last_idx, max);
537 
538 	/* Add retimers if they do not exist already */
539 	for (i = 1; i <= max; i++) {
540 		struct tb_retimer *rt;
541 
542 		/* Skip cable retimers */
543 		if (usb4_port_retimer_is_cable(port, i))
544 			continue;
545 
546 		rt = tb_port_find_retimer(port, i);
547 		if (rt) {
548 			put_device(&rt->dev);
549 		} else if (add) {
550 			ret = tb_retimer_add(port, i, status[i], i <= last_idx);
551 			if (ret && ret != -EOPNOTSUPP)
552 				break;
553 		}
554 	}
555 
556 	tb_retimer_unset_inbound_sbtx(port);
557 	return ret;
558 }
559 
560 static int remove_retimer(struct device *dev, void *data)
561 {
562 	struct tb_retimer *rt = tb_to_retimer(dev);
563 	struct tb_port *port = data;
564 
565 	if (rt && rt->port == port)
566 		tb_retimer_remove(rt);
567 	return 0;
568 }
569 
570 /**
571  * tb_retimer_remove_all() - Remove all retimers under port
572  * @port: USB4 port whose retimers to remove
573  *
574  * This removes all previously added retimers under @port.
575  */
576 void tb_retimer_remove_all(struct tb_port *port)
577 {
578 	struct usb4_port *usb4;
579 
580 	usb4 = port->usb4;
581 	if (usb4)
582 		device_for_each_child_reverse(&usb4->dev, port,
583 					      remove_retimer);
584 }
585