xref: /linux/drivers/thunderbolt/retimer.c (revision e9b8ffafd20ad21357a789cc58ffaa162b3ad074)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt/USB4 retimer support.
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13 
14 #include "sb_regs.h"
15 #include "tb.h"
16 
17 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
18 #define TB_MAX_RETIMER_INDEX	6
19 #else
20 #define TB_MAX_RETIMER_INDEX	2
21 #endif
22 
23 /**
24  * tb_retimer_nvm_read() - Read contents of retimer NVM
25  * @rt: Retimer device
26  * @address: NVM address (in bytes) to start reading
27  * @buf: Data read from NVM is stored here
28  * @size: Number of bytes to read
29  *
30  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
31  * read was successful and negative errno in case of failure.
32  */
tb_retimer_nvm_read(struct tb_retimer * rt,unsigned int address,void * buf,size_t size)33 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
34 			size_t size)
35 {
36 	return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
37 }
38 
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)39 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
40 {
41 	struct tb_nvm *nvm = priv;
42 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
43 	int ret;
44 
45 	pm_runtime_get_sync(&rt->dev);
46 
47 	if (!mutex_trylock(&rt->tb->lock)) {
48 		ret = restart_syscall();
49 		goto out;
50 	}
51 
52 	ret = tb_retimer_nvm_read(rt, offset, val, bytes);
53 	mutex_unlock(&rt->tb->lock);
54 
55 out:
56 	pm_runtime_mark_last_busy(&rt->dev);
57 	pm_runtime_put_autosuspend(&rt->dev);
58 
59 	return ret;
60 }
61 
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)62 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
63 {
64 	struct tb_nvm *nvm = priv;
65 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
66 	int ret = 0;
67 
68 	if (!mutex_trylock(&rt->tb->lock))
69 		return restart_syscall();
70 
71 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
72 	mutex_unlock(&rt->tb->lock);
73 
74 	return ret;
75 }
76 
tb_retimer_nvm_add(struct tb_retimer * rt)77 static int tb_retimer_nvm_add(struct tb_retimer *rt)
78 {
79 	struct tb_nvm *nvm;
80 	int ret;
81 
82 	nvm = tb_nvm_alloc(&rt->dev);
83 	if (IS_ERR(nvm)) {
84 		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
85 		goto err_nvm;
86 	}
87 
88 	ret = tb_nvm_read_version(nvm);
89 	if (ret)
90 		goto err_nvm;
91 
92 	ret = tb_nvm_add_active(nvm, nvm_read);
93 	if (ret)
94 		goto err_nvm;
95 
96 	ret = tb_nvm_add_non_active(nvm, nvm_write);
97 	if (ret)
98 		goto err_nvm;
99 
100 	rt->nvm = nvm;
101 	dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor);
102 	return 0;
103 
104 err_nvm:
105 	dev_dbg(&rt->dev, "NVM upgrade disabled\n");
106 	rt->no_nvm_upgrade = true;
107 	if (!IS_ERR(nvm))
108 		tb_nvm_free(nvm);
109 
110 	return ret;
111 }
112 
tb_retimer_nvm_validate_and_write(struct tb_retimer * rt)113 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
114 {
115 	unsigned int image_size;
116 	const u8 *buf;
117 	int ret;
118 
119 	ret = tb_nvm_validate(rt->nvm);
120 	if (ret)
121 		return ret;
122 
123 	buf = rt->nvm->buf_data_start;
124 	image_size = rt->nvm->buf_data_size;
125 
126 	ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
127 					 image_size);
128 	if (ret)
129 		return ret;
130 
131 	rt->nvm->flushed = true;
132 	return 0;
133 }
134 
tb_retimer_nvm_authenticate(struct tb_retimer * rt,bool auth_only)135 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
136 {
137 	u32 status;
138 	int ret;
139 
140 	if (auth_only) {
141 		ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
142 		if (ret)
143 			return ret;
144 	}
145 
146 	ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
147 	if (ret)
148 		return ret;
149 
150 	usleep_range(100, 150);
151 
152 	/*
153 	 * Check the status now if we still can access the retimer. It
154 	 * is expected that the below fails.
155 	 */
156 	ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
157 							&status);
158 	if (!ret) {
159 		rt->auth_status = status;
160 		return status ? -EINVAL : 0;
161 	}
162 
163 	return 0;
164 }
165 
device_show(struct device * dev,struct device_attribute * attr,char * buf)166 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
167 			   char *buf)
168 {
169 	struct tb_retimer *rt = tb_to_retimer(dev);
170 
171 	return sysfs_emit(buf, "%#x\n", rt->device);
172 }
173 static DEVICE_ATTR_RO(device);
174 
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)175 static ssize_t nvm_authenticate_show(struct device *dev,
176 	struct device_attribute *attr, char *buf)
177 {
178 	struct tb_retimer *rt = tb_to_retimer(dev);
179 	int ret;
180 
181 	if (!mutex_trylock(&rt->tb->lock))
182 		return restart_syscall();
183 
184 	if (!rt->nvm)
185 		ret = -EAGAIN;
186 	else
187 		ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
188 
189 	mutex_unlock(&rt->tb->lock);
190 
191 	return ret;
192 }
193 
tb_retimer_nvm_authenticate_status(struct tb_port * port,u32 * status)194 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
195 {
196 	int i;
197 
198 	tb_port_dbg(port, "reading NVM authentication status of retimers\n");
199 
200 	/*
201 	 * Before doing anything else, read the authentication status.
202 	 * If the retimer has it set, store it for the new retimer
203 	 * device instance.
204 	 */
205 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
206 		if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]))
207 			break;
208 	}
209 }
210 
tb_retimer_set_inbound_sbtx(struct tb_port * port)211 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
212 {
213 	int i;
214 
215 	/*
216 	 * When USB4 port is online sideband communications are
217 	 * already up.
218 	 */
219 	if (!usb4_port_device_is_offline(port->usb4))
220 		return;
221 
222 	tb_port_dbg(port, "enabling sideband transactions\n");
223 
224 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
225 		usb4_port_retimer_set_inbound_sbtx(port, i);
226 }
227 
tb_retimer_unset_inbound_sbtx(struct tb_port * port)228 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
229 {
230 	int i;
231 
232 	/*
233 	 * When USB4 port is offline we need to keep the sideband
234 	 * communications up to make it possible to communicate with
235 	 * the connected retimers.
236 	 */
237 	if (usb4_port_device_is_offline(port->usb4))
238 		return;
239 
240 	tb_port_dbg(port, "disabling sideband transactions\n");
241 
242 	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) {
243 		if (usb4_port_retimer_unset_inbound_sbtx(port, i))
244 			break;
245 	}
246 }
247 
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)248 static ssize_t nvm_authenticate_store(struct device *dev,
249 	struct device_attribute *attr, const char *buf, size_t count)
250 {
251 	struct tb_retimer *rt = tb_to_retimer(dev);
252 	int val, ret;
253 
254 	pm_runtime_get_sync(&rt->dev);
255 
256 	if (!mutex_trylock(&rt->tb->lock)) {
257 		ret = restart_syscall();
258 		goto exit_rpm;
259 	}
260 
261 	if (!rt->nvm) {
262 		ret = -EAGAIN;
263 		goto exit_unlock;
264 	}
265 
266 	ret = kstrtoint(buf, 10, &val);
267 	if (ret)
268 		goto exit_unlock;
269 
270 	/* Always clear status */
271 	rt->auth_status = 0;
272 
273 	if (val) {
274 		/*
275 		 * When NVM authentication starts the retimer is not
276 		 * accessible so calling tb_retimer_unset_inbound_sbtx()
277 		 * will fail and therefore we do not call it. Exception
278 		 * is when the validation fails or we only write the new
279 		 * NVM image without authentication.
280 		 */
281 		tb_retimer_set_inbound_sbtx(rt->port);
282 		if (val == AUTHENTICATE_ONLY) {
283 			ret = tb_retimer_nvm_authenticate(rt, true);
284 		} else {
285 			if (!rt->nvm->flushed) {
286 				if (!rt->nvm->buf) {
287 					ret = -EINVAL;
288 					goto exit_unlock;
289 				}
290 
291 				ret = tb_retimer_nvm_validate_and_write(rt);
292 				if (ret || val == WRITE_ONLY)
293 					goto exit_unlock;
294 			}
295 			if (val == WRITE_AND_AUTHENTICATE)
296 				ret = tb_retimer_nvm_authenticate(rt, false);
297 		}
298 	}
299 
300 exit_unlock:
301 	if (ret || val == WRITE_ONLY)
302 		tb_retimer_unset_inbound_sbtx(rt->port);
303 	mutex_unlock(&rt->tb->lock);
304 exit_rpm:
305 	pm_runtime_mark_last_busy(&rt->dev);
306 	pm_runtime_put_autosuspend(&rt->dev);
307 
308 	if (ret)
309 		return ret;
310 	return count;
311 }
312 static DEVICE_ATTR_RW(nvm_authenticate);
313 
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)314 static ssize_t nvm_version_show(struct device *dev,
315 				struct device_attribute *attr, char *buf)
316 {
317 	struct tb_retimer *rt = tb_to_retimer(dev);
318 	int ret;
319 
320 	if (!mutex_trylock(&rt->tb->lock))
321 		return restart_syscall();
322 
323 	if (!rt->nvm)
324 		ret = -EAGAIN;
325 	else
326 		ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
327 
328 	mutex_unlock(&rt->tb->lock);
329 	return ret;
330 }
331 static DEVICE_ATTR_RO(nvm_version);
332 
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)333 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
334 			   char *buf)
335 {
336 	struct tb_retimer *rt = tb_to_retimer(dev);
337 
338 	return sysfs_emit(buf, "%#x\n", rt->vendor);
339 }
340 static DEVICE_ATTR_RO(vendor);
341 
retimer_is_visible(struct kobject * kobj,struct attribute * attr,int n)342 static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
343 				  int n)
344 {
345 	struct device *dev = kobj_to_dev(kobj);
346 	struct tb_retimer *rt = tb_to_retimer(dev);
347 
348 	if (attr == &dev_attr_nvm_authenticate.attr ||
349 	    attr == &dev_attr_nvm_version.attr)
350 		return rt->no_nvm_upgrade ? 0 : attr->mode;
351 
352 	return attr->mode;
353 }
354 
355 static struct attribute *retimer_attrs[] = {
356 	&dev_attr_device.attr,
357 	&dev_attr_nvm_authenticate.attr,
358 	&dev_attr_nvm_version.attr,
359 	&dev_attr_vendor.attr,
360 	NULL
361 };
362 
363 static const struct attribute_group retimer_group = {
364 	.is_visible = retimer_is_visible,
365 	.attrs = retimer_attrs,
366 };
367 
368 static const struct attribute_group *retimer_groups[] = {
369 	&retimer_group,
370 	NULL
371 };
372 
tb_retimer_release(struct device * dev)373 static void tb_retimer_release(struct device *dev)
374 {
375 	struct tb_retimer *rt = tb_to_retimer(dev);
376 
377 	kfree(rt);
378 }
379 
380 const struct device_type tb_retimer_type = {
381 	.name = "thunderbolt_retimer",
382 	.groups = retimer_groups,
383 	.release = tb_retimer_release,
384 };
385 
tb_retimer_add(struct tb_port * port,u8 index,u32 auth_status,bool on_board)386 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status,
387 			  bool on_board)
388 {
389 	struct tb_retimer *rt;
390 	u32 vendor, device;
391 	int ret;
392 
393 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
394 				USB4_SB_VENDOR_ID, &vendor, sizeof(vendor));
395 	if (ret) {
396 		if (ret != -ENODEV)
397 			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
398 		return ret;
399 	}
400 
401 	ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
402 				USB4_SB_PRODUCT_ID, &device, sizeof(device));
403 	if (ret) {
404 		if (ret != -ENODEV)
405 			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
406 		return ret;
407 	}
408 
409 
410 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
411 	if (!rt)
412 		return -ENOMEM;
413 
414 	rt->index = index;
415 	rt->vendor = vendor;
416 	rt->device = device;
417 	rt->auth_status = auth_status;
418 	rt->port = port;
419 	rt->tb = port->sw->tb;
420 
421 	/*
422 	 * Only support NVM upgrade for on-board retimers. The retimers
423 	 * on the other side of the connection.
424 	 */
425 	if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0)
426 		rt->no_nvm_upgrade = true;
427 
428 	rt->dev.parent = &port->usb4->dev;
429 	rt->dev.bus = &tb_bus_type;
430 	rt->dev.type = &tb_retimer_type;
431 	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
432 		     port->port, index);
433 
434 	ret = device_register(&rt->dev);
435 	if (ret) {
436 		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
437 		put_device(&rt->dev);
438 		return ret;
439 	}
440 
441 	ret = tb_retimer_nvm_add(rt);
442 	if (ret) {
443 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
444 		device_unregister(&rt->dev);
445 		return ret;
446 	}
447 
448 	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
449 		 rt->vendor, rt->device);
450 
451 	pm_runtime_no_callbacks(&rt->dev);
452 	pm_runtime_set_active(&rt->dev);
453 	pm_runtime_enable(&rt->dev);
454 	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
455 	pm_runtime_mark_last_busy(&rt->dev);
456 	pm_runtime_use_autosuspend(&rt->dev);
457 
458 	tb_retimer_debugfs_init(rt);
459 	return 0;
460 }
461 
tb_retimer_remove(struct tb_retimer * rt)462 static void tb_retimer_remove(struct tb_retimer *rt)
463 {
464 	dev_info(&rt->dev, "retimer disconnected\n");
465 	tb_retimer_debugfs_remove(rt);
466 	tb_nvm_free(rt->nvm);
467 	device_unregister(&rt->dev);
468 }
469 
470 struct tb_retimer_lookup {
471 	const struct tb_port *port;
472 	u8 index;
473 };
474 
retimer_match(struct device * dev,void * data)475 static int retimer_match(struct device *dev, void *data)
476 {
477 	const struct tb_retimer_lookup *lookup = data;
478 	struct tb_retimer *rt = tb_to_retimer(dev);
479 
480 	return rt && rt->port == lookup->port && rt->index == lookup->index;
481 }
482 
tb_port_find_retimer(struct tb_port * port,u8 index)483 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
484 {
485 	struct tb_retimer_lookup lookup = { .port = port, .index = index };
486 	struct device *dev;
487 
488 	dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
489 	if (dev)
490 		return tb_to_retimer(dev);
491 
492 	return NULL;
493 }
494 
495 /**
496  * tb_retimer_scan() - Scan for on-board retimers under port
497  * @port: USB4 port to scan
498  * @add: If true also registers found retimers
499  *
500  * Brings the sideband into a state where retimers can be accessed.
501  * Then Tries to enumerate on-board retimers connected to @port. Found
502  * retimers are registered as children of @port if @add is set.  Does
503  * not scan for cable retimers for now.
504  */
tb_retimer_scan(struct tb_port * port,bool add)505 int tb_retimer_scan(struct tb_port *port, bool add)
506 {
507 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
508 	int ret, i, max, last_idx = 0;
509 
510 	/*
511 	 * Send broadcast RT to make sure retimer indices facing this
512 	 * port are set.
513 	 */
514 	ret = usb4_port_enumerate_retimers(port);
515 	if (ret)
516 		return ret;
517 
518 	/*
519 	 * Immediately after sending enumerate retimers read the
520 	 * authentication status of each retimer.
521 	 */
522 	tb_retimer_nvm_authenticate_status(port, status);
523 
524 	/*
525 	 * Enable sideband channel for each retimer. We can do this
526 	 * regardless whether there is device connected or not.
527 	 */
528 	tb_retimer_set_inbound_sbtx(port);
529 
530 	for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
531 		/*
532 		 * Last retimer is true only for the last on-board
533 		 * retimer (the one connected directly to the Type-C
534 		 * port).
535 		 */
536 		ret = usb4_port_retimer_is_last(port, i);
537 		if (ret > 0)
538 			last_idx = i;
539 		else if (ret < 0)
540 			break;
541 
542 		max = i;
543 	}
544 
545 	ret = 0;
546 	if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING))
547 		max = min(last_idx, max);
548 
549 	/* Add retimers if they do not exist already */
550 	for (i = 1; i <= max; i++) {
551 		struct tb_retimer *rt;
552 
553 		/* Skip cable retimers */
554 		if (usb4_port_retimer_is_cable(port, i))
555 			continue;
556 
557 		rt = tb_port_find_retimer(port, i);
558 		if (rt) {
559 			put_device(&rt->dev);
560 		} else if (add) {
561 			ret = tb_retimer_add(port, i, status[i], i <= last_idx);
562 			if (ret && ret != -EOPNOTSUPP)
563 				break;
564 		}
565 	}
566 
567 	tb_retimer_unset_inbound_sbtx(port);
568 	return ret;
569 }
570 
remove_retimer(struct device * dev,void * data)571 static int remove_retimer(struct device *dev, void *data)
572 {
573 	struct tb_retimer *rt = tb_to_retimer(dev);
574 	struct tb_port *port = data;
575 
576 	if (rt && rt->port == port)
577 		tb_retimer_remove(rt);
578 	return 0;
579 }
580 
581 /**
582  * tb_retimer_remove_all() - Remove all retimers under port
583  * @port: USB4 port whose retimers to remove
584  *
585  * This removes all previously added retimers under @port.
586  */
tb_retimer_remove_all(struct tb_port * port)587 void tb_retimer_remove_all(struct tb_port *port)
588 {
589 	struct usb4_port *usb4;
590 
591 	usb4 = port->usb4;
592 	if (usb4)
593 		device_for_each_child_reverse(&usb4->dev, port,
594 					      remove_retimer);
595 }
596