1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt/USB4 retimer support. 4 * 5 * Copyright (C) 2020, Intel Corporation 6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/sched/signal.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #define TB_MAX_RETIMER_INDEX 6 18 19 /** 20 * tb_retimer_nvm_read() - Read contents of retimer NVM 21 * @rt: Retimer device 22 * @address: NVM address (in bytes) to start reading 23 * @buf: Data read from NVM is stored here 24 * @size: Number of bytes to read 25 * 26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 27 * read was successful and negative errno in case of failure. 28 */ 29 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, 30 size_t size) 31 { 32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); 33 } 34 35 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 36 { 37 struct tb_nvm *nvm = priv; 38 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 39 int ret; 40 41 pm_runtime_get_sync(&rt->dev); 42 43 if (!mutex_trylock(&rt->tb->lock)) { 44 ret = restart_syscall(); 45 goto out; 46 } 47 48 ret = tb_retimer_nvm_read(rt, offset, val, bytes); 49 mutex_unlock(&rt->tb->lock); 50 51 out: 52 pm_runtime_mark_last_busy(&rt->dev); 53 pm_runtime_put_autosuspend(&rt->dev); 54 55 return ret; 56 } 57 58 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 59 { 60 struct tb_nvm *nvm = priv; 61 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 62 int ret = 0; 63 64 if (!mutex_trylock(&rt->tb->lock)) 65 return restart_syscall(); 66 67 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 68 mutex_unlock(&rt->tb->lock); 69 70 return ret; 71 } 72 73 static int tb_retimer_nvm_add(struct tb_retimer *rt) 74 { 75 struct tb_nvm *nvm; 76 int ret; 77 78 nvm = tb_nvm_alloc(&rt->dev); 79 if (IS_ERR(nvm)) { 80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 81 goto err_nvm; 82 } 83 84 ret = tb_nvm_read_version(nvm); 85 if (ret) 86 goto err_nvm; 87 88 ret = tb_nvm_add_active(nvm, nvm_read); 89 if (ret) 90 goto err_nvm; 91 92 ret = tb_nvm_add_non_active(nvm, nvm_write); 93 if (ret) 94 goto err_nvm; 95 96 rt->nvm = nvm; 97 dev_dbg(&rt->dev, "NVM version %x.%x\n", nvm->major, nvm->minor); 98 return 0; 99 100 err_nvm: 101 dev_dbg(&rt->dev, "NVM upgrade disabled\n"); 102 if (!IS_ERR(nvm)) 103 tb_nvm_free(nvm); 104 105 return ret; 106 } 107 108 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) 109 { 110 unsigned int image_size; 111 const u8 *buf; 112 int ret; 113 114 ret = tb_nvm_validate(rt->nvm); 115 if (ret) 116 return ret; 117 118 buf = rt->nvm->buf_data_start; 119 image_size = rt->nvm->buf_data_size; 120 121 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, 122 image_size); 123 if (ret) 124 return ret; 125 126 rt->nvm->flushed = true; 127 return 0; 128 } 129 130 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) 131 { 132 u32 status; 133 int ret; 134 135 if (auth_only) { 136 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); 137 if (ret) 138 return ret; 139 } 140 141 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); 142 if (ret) 143 return ret; 144 145 usleep_range(100, 150); 146 147 /* 148 * Check the status now if we still can access the retimer. It 149 * is expected that the below fails. 150 */ 151 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, 152 &status); 153 if (!ret) { 154 rt->auth_status = status; 155 return status ? -EINVAL : 0; 156 } 157 158 return 0; 159 } 160 161 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 162 char *buf) 163 { 164 struct tb_retimer *rt = tb_to_retimer(dev); 165 166 return sysfs_emit(buf, "%#x\n", rt->device); 167 } 168 static DEVICE_ATTR_RO(device); 169 170 static ssize_t nvm_authenticate_show(struct device *dev, 171 struct device_attribute *attr, char *buf) 172 { 173 struct tb_retimer *rt = tb_to_retimer(dev); 174 int ret; 175 176 if (!mutex_trylock(&rt->tb->lock)) 177 return restart_syscall(); 178 179 if (!rt->nvm) 180 ret = -EAGAIN; 181 else if (rt->no_nvm_upgrade) 182 ret = -EOPNOTSUPP; 183 else 184 ret = sysfs_emit(buf, "%#x\n", rt->auth_status); 185 186 mutex_unlock(&rt->tb->lock); 187 188 return ret; 189 } 190 191 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) 192 { 193 int i; 194 195 tb_port_dbg(port, "reading NVM authentication status of retimers\n"); 196 197 /* 198 * Before doing anything else, read the authentication status. 199 * If the retimer has it set, store it for the new retimer 200 * device instance. 201 */ 202 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 203 if (usb4_port_retimer_nvm_authenticate_status(port, i, &status[i])) 204 break; 205 } 206 } 207 208 static void tb_retimer_set_inbound_sbtx(struct tb_port *port) 209 { 210 int i; 211 212 /* 213 * When USB4 port is online sideband communications are 214 * already up. 215 */ 216 if (!usb4_port_device_is_offline(port->usb4)) 217 return; 218 219 tb_port_dbg(port, "enabling sideband transactions\n"); 220 221 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 222 usb4_port_retimer_set_inbound_sbtx(port, i); 223 } 224 225 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) 226 { 227 int i; 228 229 /* 230 * When USB4 port is offline we need to keep the sideband 231 * communications up to make it possible to communicate with 232 * the connected retimers. 233 */ 234 if (usb4_port_device_is_offline(port->usb4)) 235 return; 236 237 tb_port_dbg(port, "disabling sideband transactions\n"); 238 239 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) { 240 if (usb4_port_retimer_unset_inbound_sbtx(port, i)) 241 break; 242 } 243 } 244 245 static ssize_t nvm_authenticate_store(struct device *dev, 246 struct device_attribute *attr, const char *buf, size_t count) 247 { 248 struct tb_retimer *rt = tb_to_retimer(dev); 249 int val, ret; 250 251 pm_runtime_get_sync(&rt->dev); 252 253 if (!mutex_trylock(&rt->tb->lock)) { 254 ret = restart_syscall(); 255 goto exit_rpm; 256 } 257 258 if (!rt->nvm) { 259 ret = -EAGAIN; 260 goto exit_unlock; 261 } 262 263 ret = kstrtoint(buf, 10, &val); 264 if (ret) 265 goto exit_unlock; 266 267 /* Always clear status */ 268 rt->auth_status = 0; 269 270 if (val) { 271 /* 272 * When NVM authentication starts the retimer is not 273 * accessible so calling tb_retimer_unset_inbound_sbtx() 274 * will fail and therefore we do not call it. Exception 275 * is when the validation fails or we only write the new 276 * NVM image without authentication. 277 */ 278 tb_retimer_set_inbound_sbtx(rt->port); 279 if (val == AUTHENTICATE_ONLY) { 280 ret = tb_retimer_nvm_authenticate(rt, true); 281 } else { 282 if (!rt->nvm->flushed) { 283 if (!rt->nvm->buf) { 284 ret = -EINVAL; 285 goto exit_unlock; 286 } 287 288 ret = tb_retimer_nvm_validate_and_write(rt); 289 if (ret || val == WRITE_ONLY) 290 goto exit_unlock; 291 } 292 if (val == WRITE_AND_AUTHENTICATE) 293 ret = tb_retimer_nvm_authenticate(rt, false); 294 } 295 } 296 297 exit_unlock: 298 if (ret || val == WRITE_ONLY) 299 tb_retimer_unset_inbound_sbtx(rt->port); 300 mutex_unlock(&rt->tb->lock); 301 exit_rpm: 302 pm_runtime_mark_last_busy(&rt->dev); 303 pm_runtime_put_autosuspend(&rt->dev); 304 305 if (ret) 306 return ret; 307 return count; 308 } 309 static DEVICE_ATTR_RW(nvm_authenticate); 310 311 static ssize_t nvm_version_show(struct device *dev, 312 struct device_attribute *attr, char *buf) 313 { 314 struct tb_retimer *rt = tb_to_retimer(dev); 315 int ret; 316 317 if (!mutex_trylock(&rt->tb->lock)) 318 return restart_syscall(); 319 320 if (!rt->nvm) 321 ret = -EAGAIN; 322 else 323 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); 324 325 mutex_unlock(&rt->tb->lock); 326 return ret; 327 } 328 static DEVICE_ATTR_RO(nvm_version); 329 330 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 331 char *buf) 332 { 333 struct tb_retimer *rt = tb_to_retimer(dev); 334 335 return sysfs_emit(buf, "%#x\n", rt->vendor); 336 } 337 static DEVICE_ATTR_RO(vendor); 338 339 static struct attribute *retimer_attrs[] = { 340 &dev_attr_device.attr, 341 &dev_attr_nvm_authenticate.attr, 342 &dev_attr_nvm_version.attr, 343 &dev_attr_vendor.attr, 344 NULL 345 }; 346 347 static const struct attribute_group retimer_group = { 348 .attrs = retimer_attrs, 349 }; 350 351 static const struct attribute_group *retimer_groups[] = { 352 &retimer_group, 353 NULL 354 }; 355 356 static void tb_retimer_release(struct device *dev) 357 { 358 struct tb_retimer *rt = tb_to_retimer(dev); 359 360 kfree(rt); 361 } 362 363 const struct device_type tb_retimer_type = { 364 .name = "thunderbolt_retimer", 365 .groups = retimer_groups, 366 .release = tb_retimer_release, 367 }; 368 369 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) 370 { 371 struct tb_retimer *rt; 372 u32 vendor, device; 373 int ret; 374 375 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, 376 sizeof(vendor)); 377 if (ret) { 378 if (ret != -ENODEV) 379 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); 380 return ret; 381 } 382 383 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, 384 sizeof(device)); 385 if (ret) { 386 if (ret != -ENODEV) 387 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); 388 return ret; 389 } 390 391 /* 392 * Check that it supports NVM operations. If not then don't add 393 * the device at all. 394 */ 395 ret = usb4_port_retimer_nvm_sector_size(port, index); 396 if (ret < 0) 397 return ret; 398 399 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 400 if (!rt) 401 return -ENOMEM; 402 403 rt->index = index; 404 rt->vendor = vendor; 405 rt->device = device; 406 rt->auth_status = auth_status; 407 rt->port = port; 408 rt->tb = port->sw->tb; 409 410 rt->dev.parent = &port->usb4->dev; 411 rt->dev.bus = &tb_bus_type; 412 rt->dev.type = &tb_retimer_type; 413 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), 414 port->port, index); 415 416 ret = device_register(&rt->dev); 417 if (ret) { 418 dev_err(&rt->dev, "failed to register retimer: %d\n", ret); 419 put_device(&rt->dev); 420 return ret; 421 } 422 423 ret = tb_retimer_nvm_add(rt); 424 if (ret) { 425 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); 426 device_unregister(&rt->dev); 427 return ret; 428 } 429 430 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", 431 rt->vendor, rt->device); 432 433 pm_runtime_no_callbacks(&rt->dev); 434 pm_runtime_set_active(&rt->dev); 435 pm_runtime_enable(&rt->dev); 436 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); 437 pm_runtime_mark_last_busy(&rt->dev); 438 pm_runtime_use_autosuspend(&rt->dev); 439 440 return 0; 441 } 442 443 static void tb_retimer_remove(struct tb_retimer *rt) 444 { 445 dev_info(&rt->dev, "retimer disconnected\n"); 446 tb_nvm_free(rt->nvm); 447 device_unregister(&rt->dev); 448 } 449 450 struct tb_retimer_lookup { 451 const struct tb_port *port; 452 u8 index; 453 }; 454 455 static int retimer_match(struct device *dev, void *data) 456 { 457 const struct tb_retimer_lookup *lookup = data; 458 struct tb_retimer *rt = tb_to_retimer(dev); 459 460 return rt && rt->port == lookup->port && rt->index == lookup->index; 461 } 462 463 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) 464 { 465 struct tb_retimer_lookup lookup = { .port = port, .index = index }; 466 struct device *dev; 467 468 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); 469 if (dev) 470 return tb_to_retimer(dev); 471 472 return NULL; 473 } 474 475 /** 476 * tb_retimer_scan() - Scan for on-board retimers under port 477 * @port: USB4 port to scan 478 * @add: If true also registers found retimers 479 * 480 * Brings the sideband into a state where retimers can be accessed. 481 * Then Tries to enumerate on-board retimers connected to @port. Found 482 * retimers are registered as children of @port if @add is set. Does 483 * not scan for cable retimers for now. 484 */ 485 int tb_retimer_scan(struct tb_port *port, bool add) 486 { 487 u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; 488 int ret, i, last_idx = 0; 489 490 /* 491 * Send broadcast RT to make sure retimer indices facing this 492 * port are set. 493 */ 494 ret = usb4_port_enumerate_retimers(port); 495 if (ret) 496 return ret; 497 498 /* 499 * Immediately after sending enumerate retimers read the 500 * authentication status of each retimer. 501 */ 502 tb_retimer_nvm_authenticate_status(port, status); 503 504 /* 505 * Enable sideband channel for each retimer. We can do this 506 * regardless whether there is device connected or not. 507 */ 508 tb_retimer_set_inbound_sbtx(port); 509 510 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 511 /* 512 * Last retimer is true only for the last on-board 513 * retimer (the one connected directly to the Type-C 514 * port). 515 */ 516 ret = usb4_port_retimer_is_last(port, i); 517 if (ret > 0) 518 last_idx = i; 519 else if (ret < 0) 520 break; 521 } 522 523 tb_retimer_unset_inbound_sbtx(port); 524 525 if (!last_idx) 526 return 0; 527 528 /* Add on-board retimers if they do not exist already */ 529 ret = 0; 530 for (i = 1; i <= last_idx; i++) { 531 struct tb_retimer *rt; 532 533 rt = tb_port_find_retimer(port, i); 534 if (rt) { 535 put_device(&rt->dev); 536 } else if (add) { 537 ret = tb_retimer_add(port, i, status[i]); 538 if (ret && ret != -EOPNOTSUPP) 539 break; 540 } 541 } 542 543 return ret; 544 } 545 546 static int remove_retimer(struct device *dev, void *data) 547 { 548 struct tb_retimer *rt = tb_to_retimer(dev); 549 struct tb_port *port = data; 550 551 if (rt && rt->port == port) 552 tb_retimer_remove(rt); 553 return 0; 554 } 555 556 /** 557 * tb_retimer_remove_all() - Remove all retimers under port 558 * @port: USB4 port whose retimers to remove 559 * 560 * This removes all previously added retimers under @port. 561 */ 562 void tb_retimer_remove_all(struct tb_port *port) 563 { 564 struct usb4_port *usb4; 565 566 usb4 = port->usb4; 567 if (usb4) 568 device_for_each_child_reverse(&usb4->dev, port, 569 remove_retimer); 570 } 571