1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt/USB4 retimer support. 4 * 5 * Copyright (C) 2020, Intel Corporation 6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/sched/signal.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING) 18 #define TB_MAX_RETIMER_INDEX 6 19 #else 20 #define TB_MAX_RETIMER_INDEX 2 21 #endif 22 23 /** 24 * tb_retimer_nvm_read() - Read contents of retimer NVM 25 * @rt: Retimer device 26 * @address: NVM address (in bytes) to start reading 27 * @buf: Data read from NVM is stored here 28 * @size: Number of bytes to read 29 * 30 * Reads retimer NVM and copies the contents to @buf. 31 * 32 * Return: %0 if the read was successful, negative errno in case of failure. 33 */ 34 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, 35 size_t size) 36 { 37 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); 38 } 39 40 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 41 { 42 struct tb_nvm *nvm = priv; 43 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 44 int ret; 45 46 pm_runtime_get_sync(&rt->dev); 47 48 if (!mutex_trylock(&rt->tb->lock)) { 49 ret = restart_syscall(); 50 goto out; 51 } 52 53 ret = tb_retimer_nvm_read(rt, offset, val, bytes); 54 mutex_unlock(&rt->tb->lock); 55 56 out: 57 pm_runtime_mark_last_busy(&rt->dev); 58 pm_runtime_put_autosuspend(&rt->dev); 59 60 return ret; 61 } 62 63 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 64 { 65 struct tb_nvm *nvm = priv; 66 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 67 int ret = 0; 68 69 if (!mutex_trylock(&rt->tb->lock)) 70 return restart_syscall(); 71 72 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 73 mutex_unlock(&rt->tb->lock); 74 75 return ret; 76 } 77 78 static int tb_retimer_nvm_add(struct tb_retimer *rt) 79 { 80 struct tb_nvm *nvm; 81 int ret; 82 83 nvm = tb_nvm_alloc(&rt->dev); 84 if (IS_ERR(nvm)) { 85 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 86 goto err_nvm; 87 } 88 89 ret = tb_nvm_read_version(nvm); 90 if (ret) 91 goto err_nvm; 92 93 ret = tb_nvm_add_active(nvm, nvm_read); 94 if (ret) 95 goto err_nvm; 96 97 if (!rt->no_nvm_upgrade) { 98 ret = tb_nvm_add_non_active(nvm, nvm_write); 99 if (ret) 100 goto err_nvm; 101 } 102 103 rt->nvm = nvm; 104 dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor); 105 return 0; 106 107 err_nvm: 108 dev_dbg(&rt->dev, "NVM upgrade disabled\n"); 109 rt->no_nvm_upgrade = true; 110 if (!IS_ERR(nvm)) 111 tb_nvm_free(nvm); 112 113 return ret; 114 } 115 116 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) 117 { 118 unsigned int image_size; 119 const u8 *buf; 120 int ret; 121 122 ret = tb_nvm_validate(rt->nvm); 123 if (ret) 124 return ret; 125 126 buf = rt->nvm->buf_data_start; 127 image_size = rt->nvm->buf_data_size; 128 129 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, 130 image_size); 131 if (ret) 132 return ret; 133 134 rt->nvm->flushed = true; 135 return 0; 136 } 137 138 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) 139 { 140 u32 status; 141 int ret; 142 143 if (auth_only) { 144 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); 145 if (ret) 146 return ret; 147 } 148 149 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); 150 if (ret) 151 return ret; 152 153 usleep_range(100, 150); 154 155 /* 156 * Check the status now if we still can access the retimer. It 157 * is expected that the below fails. 158 */ 159 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, 160 &status); 161 if (!ret) { 162 rt->auth_status = status; 163 return status ? -EINVAL : 0; 164 } 165 166 return 0; 167 } 168 169 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 170 char *buf) 171 { 172 struct tb_retimer *rt = tb_to_retimer(dev); 173 174 return sysfs_emit(buf, "%#x\n", rt->device); 175 } 176 static DEVICE_ATTR_RO(device); 177 178 static ssize_t nvm_authenticate_show(struct device *dev, 179 struct device_attribute *attr, char *buf) 180 { 181 struct tb_retimer *rt = tb_to_retimer(dev); 182 int ret; 183 184 if (!mutex_trylock(&rt->tb->lock)) 185 return restart_syscall(); 186 187 if (!rt->nvm) 188 ret = -EAGAIN; 189 else 190 ret = sysfs_emit(buf, "%#x\n", rt->auth_status); 191 192 mutex_unlock(&rt->tb->lock); 193 194 return ret; 195 } 196 197 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) 198 { 199 int i; 200 201 tb_port_dbg(port, "reading NVM authentication status of retimers\n"); 202 203 /* 204 * Before doing anything else, read the authentication status. 205 * If the retimer has it set, store it for the new retimer 206 * device instance. 207 */ 208 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 209 if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i])) 210 break; 211 } 212 } 213 214 static void tb_retimer_set_inbound_sbtx(struct tb_port *port) 215 { 216 int i; 217 218 /* 219 * When USB4 port is online sideband communications are 220 * already up. 221 */ 222 if (!usb4_port_device_is_offline(port->usb4)) 223 return; 224 225 tb_port_dbg(port, "enabling sideband transactions\n"); 226 227 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 228 usb4_port_retimer_set_inbound_sbtx(port, i); 229 } 230 231 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) 232 { 233 int i; 234 235 /* 236 * When USB4 port is offline we need to keep the sideband 237 * communications up to make it possible to communicate with 238 * the connected retimers. 239 */ 240 if (usb4_port_device_is_offline(port->usb4)) 241 return; 242 243 tb_port_dbg(port, "disabling sideband transactions\n"); 244 245 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) { 246 if (usb4_port_retimer_unset_inbound_sbtx(port, i)) 247 break; 248 } 249 } 250 251 static ssize_t nvm_authenticate_store(struct device *dev, 252 struct device_attribute *attr, const char *buf, size_t count) 253 { 254 struct tb_retimer *rt = tb_to_retimer(dev); 255 int val, ret; 256 257 pm_runtime_get_sync(&rt->dev); 258 259 if (!mutex_trylock(&rt->tb->lock)) { 260 ret = restart_syscall(); 261 goto exit_rpm; 262 } 263 264 if (!rt->nvm) { 265 ret = -EAGAIN; 266 goto exit_unlock; 267 } 268 269 ret = kstrtoint(buf, 10, &val); 270 if (ret) 271 goto exit_unlock; 272 273 /* Always clear status */ 274 rt->auth_status = 0; 275 276 if (val) { 277 /* 278 * When NVM authentication starts the retimer is not 279 * accessible so calling tb_retimer_unset_inbound_sbtx() 280 * will fail and therefore we do not call it. Exception 281 * is when the validation fails or we only write the new 282 * NVM image without authentication. 283 */ 284 tb_retimer_set_inbound_sbtx(rt->port); 285 if (val == AUTHENTICATE_ONLY) { 286 ret = tb_retimer_nvm_authenticate(rt, true); 287 } else { 288 if (!rt->nvm->flushed) { 289 if (!rt->nvm->buf) { 290 ret = -EINVAL; 291 goto exit_unlock; 292 } 293 294 ret = tb_retimer_nvm_validate_and_write(rt); 295 if (ret || val == WRITE_ONLY) 296 goto exit_unlock; 297 } 298 if (val == WRITE_AND_AUTHENTICATE) 299 ret = tb_retimer_nvm_authenticate(rt, false); 300 } 301 } 302 303 exit_unlock: 304 if (ret || val == WRITE_ONLY) 305 tb_retimer_unset_inbound_sbtx(rt->port); 306 mutex_unlock(&rt->tb->lock); 307 exit_rpm: 308 pm_runtime_mark_last_busy(&rt->dev); 309 pm_runtime_put_autosuspend(&rt->dev); 310 311 if (ret) 312 return ret; 313 return count; 314 } 315 static DEVICE_ATTR_RW(nvm_authenticate); 316 317 static ssize_t nvm_version_show(struct device *dev, 318 struct device_attribute *attr, char *buf) 319 { 320 struct tb_retimer *rt = tb_to_retimer(dev); 321 int ret; 322 323 if (!mutex_trylock(&rt->tb->lock)) 324 return restart_syscall(); 325 326 if (!rt->nvm) 327 ret = -EAGAIN; 328 else 329 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); 330 331 mutex_unlock(&rt->tb->lock); 332 return ret; 333 } 334 static DEVICE_ATTR_RO(nvm_version); 335 336 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 337 char *buf) 338 { 339 struct tb_retimer *rt = tb_to_retimer(dev); 340 341 return sysfs_emit(buf, "%#x\n", rt->vendor); 342 } 343 static DEVICE_ATTR_RO(vendor); 344 345 static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr, 346 int n) 347 { 348 struct device *dev = kobj_to_dev(kobj); 349 struct tb_retimer *rt = tb_to_retimer(dev); 350 351 if (attr == &dev_attr_nvm_authenticate.attr || 352 attr == &dev_attr_nvm_version.attr) 353 return rt->no_nvm_upgrade ? 0 : attr->mode; 354 355 return attr->mode; 356 } 357 358 static struct attribute *retimer_attrs[] = { 359 &dev_attr_device.attr, 360 &dev_attr_nvm_authenticate.attr, 361 &dev_attr_nvm_version.attr, 362 &dev_attr_vendor.attr, 363 NULL 364 }; 365 366 static const struct attribute_group retimer_group = { 367 .is_visible = retimer_is_visible, 368 .attrs = retimer_attrs, 369 }; 370 371 static const struct attribute_group *retimer_groups[] = { 372 &retimer_group, 373 NULL 374 }; 375 376 static void tb_retimer_release(struct device *dev) 377 { 378 struct tb_retimer *rt = tb_to_retimer(dev); 379 380 kfree(rt); 381 } 382 383 const struct device_type tb_retimer_type = { 384 .name = "thunderbolt_retimer", 385 .groups = retimer_groups, 386 .release = tb_retimer_release, 387 }; 388 389 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status, 390 bool on_board) 391 { 392 struct tb_retimer *rt; 393 u32 vendor, device; 394 int ret; 395 396 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 397 USB4_SB_VENDOR_ID, &vendor, sizeof(vendor)); 398 if (ret) { 399 if (ret != -ENODEV) 400 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); 401 return ret; 402 } 403 404 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, 405 USB4_SB_PRODUCT_ID, &device, sizeof(device)); 406 if (ret) { 407 if (ret != -ENODEV) 408 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); 409 return ret; 410 } 411 412 413 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 414 if (!rt) 415 return -ENOMEM; 416 417 rt->index = index; 418 rt->vendor = vendor; 419 rt->device = device; 420 rt->auth_status = auth_status; 421 rt->port = port; 422 rt->tb = port->sw->tb; 423 424 /* 425 * Only support NVM upgrade for on-board retimers. The retimers 426 * on the other side of the connection. 427 */ 428 if (!on_board || usb4_port_retimer_nvm_sector_size(port, index) <= 0) 429 rt->no_nvm_upgrade = true; 430 431 rt->dev.parent = &port->usb4->dev; 432 rt->dev.bus = &tb_bus_type; 433 rt->dev.type = &tb_retimer_type; 434 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), 435 port->port, index); 436 437 ret = device_register(&rt->dev); 438 if (ret) { 439 dev_err(&rt->dev, "failed to register retimer: %d\n", ret); 440 put_device(&rt->dev); 441 return ret; 442 } 443 444 ret = tb_retimer_nvm_add(rt); 445 if (ret) { 446 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); 447 device_unregister(&rt->dev); 448 return ret; 449 } 450 451 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", 452 rt->vendor, rt->device); 453 454 pm_runtime_no_callbacks(&rt->dev); 455 pm_runtime_set_active(&rt->dev); 456 pm_runtime_enable(&rt->dev); 457 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); 458 pm_runtime_mark_last_busy(&rt->dev); 459 pm_runtime_use_autosuspend(&rt->dev); 460 461 tb_retimer_debugfs_init(rt); 462 return 0; 463 } 464 465 static void tb_retimer_remove(struct tb_retimer *rt) 466 { 467 dev_info(&rt->dev, "retimer disconnected\n"); 468 tb_retimer_debugfs_remove(rt); 469 tb_nvm_free(rt->nvm); 470 device_unregister(&rt->dev); 471 } 472 473 struct tb_retimer_lookup { 474 const struct tb_port *port; 475 u8 index; 476 }; 477 478 static int retimer_match(struct device *dev, const void *data) 479 { 480 const struct tb_retimer_lookup *lookup = data; 481 struct tb_retimer *rt = tb_to_retimer(dev); 482 483 return rt && rt->port == lookup->port && rt->index == lookup->index; 484 } 485 486 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) 487 { 488 struct tb_retimer_lookup lookup = { .port = port, .index = index }; 489 struct device *dev; 490 491 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); 492 if (dev) 493 return tb_to_retimer(dev); 494 495 return NULL; 496 } 497 498 /** 499 * tb_retimer_scan() - Scan for on-board retimers under port 500 * @port: USB4 port to scan 501 * @add: If true also registers found retimers 502 * 503 * Brings the sideband into a state where retimers can be accessed. 504 * Then Tries to enumerate on-board retimers connected to @port. Found 505 * retimers are registered as children of @port if @add is set. Does 506 * not scan for cable retimers for now. 507 * 508 * Return: %0 on success, negative errno otherwise. 509 */ 510 int tb_retimer_scan(struct tb_port *port, bool add) 511 { 512 u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; 513 int ret, i, max, last_idx = 0; 514 515 /* 516 * Send broadcast RT to make sure retimer indices facing this 517 * port are set. 518 */ 519 ret = usb4_port_enumerate_retimers(port); 520 if (ret) 521 return ret; 522 523 /* 524 * Immediately after sending enumerate retimers read the 525 * authentication status of each retimer. 526 */ 527 tb_retimer_nvm_authenticate_status(port, status); 528 529 /* 530 * Enable sideband channel for each retimer. We can do this 531 * regardless whether there is device connected or not. 532 */ 533 tb_retimer_set_inbound_sbtx(port); 534 535 for (max = 1, i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 536 /* 537 * Last retimer is true only for the last on-board 538 * retimer (the one connected directly to the Type-C 539 * port). 540 */ 541 ret = usb4_port_retimer_is_last(port, i); 542 if (ret > 0) 543 last_idx = i; 544 else if (ret < 0) 545 break; 546 547 max = i; 548 } 549 550 ret = 0; 551 if (!IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)) 552 max = min(last_idx, max); 553 554 /* Add retimers if they do not exist already */ 555 for (i = 1; i <= max; i++) { 556 struct tb_retimer *rt; 557 558 /* Skip cable retimers */ 559 if (usb4_port_retimer_is_cable(port, i)) 560 continue; 561 562 rt = tb_port_find_retimer(port, i); 563 if (rt) { 564 put_device(&rt->dev); 565 } else if (add) { 566 ret = tb_retimer_add(port, i, status[i], i <= last_idx); 567 if (ret && ret != -EOPNOTSUPP) 568 break; 569 } 570 } 571 572 tb_retimer_unset_inbound_sbtx(port); 573 return ret; 574 } 575 576 static int remove_retimer(struct device *dev, void *data) 577 { 578 struct tb_retimer *rt = tb_to_retimer(dev); 579 struct tb_port *port = data; 580 581 if (rt && rt->port == port) 582 tb_retimer_remove(rt); 583 return 0; 584 } 585 586 /** 587 * tb_retimer_remove_all() - Remove all retimers under port 588 * @port: USB4 port whose retimers to remove 589 * 590 * This removes all previously added retimers under @port. 591 */ 592 void tb_retimer_remove_all(struct tb_port *port) 593 { 594 struct usb4_port *usb4; 595 596 usb4 = port->usb4; 597 if (usb4) 598 device_for_each_child_reverse(&usb4->dev, port, 599 remove_retimer); 600 } 601