1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * USB4 specific functionality 4 * 5 * Copyright (C) 2019, Intel Corporation 6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Rajmohan Mani <rajmohan.mani@intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/ktime.h> 12 #include <linux/units.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #define USB4_DATA_RETRIES 3 18 #define USB4_DATA_DWORDS 16 19 20 enum usb4_sb_target { 21 USB4_SB_TARGET_ROUTER, 22 USB4_SB_TARGET_PARTNER, 23 USB4_SB_TARGET_RETIMER, 24 }; 25 26 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) 27 #define USB4_NVM_READ_OFFSET_SHIFT 2 28 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) 29 #define USB4_NVM_READ_LENGTH_SHIFT 24 30 31 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK 32 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT 33 34 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) 35 #define USB4_DROM_ADDRESS_SHIFT 2 36 #define USB4_DROM_SIZE_MASK GENMASK(19, 15) 37 #define USB4_DROM_SIZE_SHIFT 15 38 39 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) 40 41 #define USB4_BA_LENGTH_MASK GENMASK(7, 0) 42 #define USB4_BA_INDEX_MASK GENMASK(15, 0) 43 44 enum usb4_ba_index { 45 USB4_BA_MAX_USB3 = 0x1, 46 USB4_BA_MIN_DP_AUX = 0x2, 47 USB4_BA_MIN_DP_MAIN = 0x3, 48 USB4_BA_MAX_PCIE = 0x4, 49 USB4_BA_MAX_HI = 0x5, 50 }; 51 52 #define USB4_BA_VALUE_MASK GENMASK(31, 16) 53 #define USB4_BA_VALUE_SHIFT 16 54 55 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, 56 u32 *metadata, u8 *status, 57 const void *tx_data, size_t tx_dwords, 58 void *rx_data, size_t rx_dwords) 59 { 60 u32 val; 61 int ret; 62 63 if (metadata) { 64 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 65 if (ret) 66 return ret; 67 } 68 if (tx_dwords) { 69 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, 70 tx_dwords); 71 if (ret) 72 return ret; 73 } 74 75 val = opcode | ROUTER_CS_26_OV; 76 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 77 if (ret) 78 return ret; 79 80 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); 81 if (ret) 82 return ret; 83 84 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 85 if (ret) 86 return ret; 87 88 if (val & ROUTER_CS_26_ONS) 89 return -EOPNOTSUPP; 90 91 if (status) 92 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 93 ROUTER_CS_26_STATUS_SHIFT; 94 95 if (metadata) { 96 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); 97 if (ret) 98 return ret; 99 } 100 if (rx_dwords) { 101 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, 102 rx_dwords); 103 if (ret) 104 return ret; 105 } 106 107 return 0; 108 } 109 110 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, 111 u8 *status, const void *tx_data, size_t tx_dwords, 112 void *rx_data, size_t rx_dwords) 113 { 114 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 115 116 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS) 117 return -EINVAL; 118 119 /* 120 * If the connection manager implementation provides USB4 router 121 * operation proxy callback, call it here instead of running the 122 * operation natively. 123 */ 124 if (cm_ops->usb4_switch_op) { 125 int ret; 126 127 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, 128 tx_data, tx_dwords, rx_data, 129 rx_dwords); 130 if (ret != -EOPNOTSUPP) 131 return ret; 132 133 /* 134 * If the proxy was not supported then run the native 135 * router operation instead. 136 */ 137 } 138 139 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, 140 tx_dwords, rx_data, rx_dwords); 141 } 142 143 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, 144 u32 *metadata, u8 *status) 145 { 146 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); 147 } 148 149 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, 150 u32 *metadata, u8 *status, 151 const void *tx_data, size_t tx_dwords, 152 void *rx_data, size_t rx_dwords) 153 { 154 return __usb4_switch_op(sw, opcode, metadata, status, tx_data, 155 tx_dwords, rx_data, rx_dwords); 156 } 157 158 static void usb4_switch_check_wakes(struct tb_switch *sw) 159 { 160 bool wakeup_usb4 = false; 161 struct usb4_port *usb4; 162 struct tb_port *port; 163 bool wakeup = false; 164 u32 val; 165 166 if (!device_may_wakeup(&sw->dev)) 167 return; 168 169 if (tb_route(sw)) { 170 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) 171 return; 172 173 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", 174 (val & ROUTER_CS_6_WOPS) ? "yes" : "no", 175 (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); 176 177 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); 178 } 179 180 /* 181 * Check for any downstream ports for USB4 wake, 182 * connection wake and disconnection wake. 183 */ 184 tb_switch_for_each_port(sw, port) { 185 if (!port->cap_usb4) 186 continue; 187 188 if (tb_port_read(port, &val, TB_CFG_PORT, 189 port->cap_usb4 + PORT_CS_18, 1)) 190 break; 191 192 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n", 193 (val & PORT_CS_18_WOU4S) ? "yes" : "no", 194 (val & PORT_CS_18_WOCS) ? "yes" : "no", 195 (val & PORT_CS_18_WODS) ? "yes" : "no"); 196 197 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS | 198 PORT_CS_18_WODS); 199 200 usb4 = port->usb4; 201 if (device_may_wakeup(&usb4->dev) && wakeup_usb4) 202 pm_wakeup_event(&usb4->dev, 0); 203 204 wakeup |= wakeup_usb4; 205 } 206 207 if (wakeup) 208 pm_wakeup_event(&sw->dev, 0); 209 } 210 211 static bool link_is_usb4(struct tb_port *port) 212 { 213 u32 val; 214 215 if (!port->cap_usb4) 216 return false; 217 218 if (tb_port_read(port, &val, TB_CFG_PORT, 219 port->cap_usb4 + PORT_CS_18, 1)) 220 return false; 221 222 return !(val & PORT_CS_18_TCM); 223 } 224 225 /** 226 * usb4_switch_setup() - Additional setup for USB4 device 227 * @sw: USB4 router to setup 228 * 229 * USB4 routers need additional settings in order to enable all the 230 * tunneling. This function enables USB and PCIe tunneling if it can be 231 * enabled (e.g the parent switch also supports them). If USB tunneling 232 * is not available for some reason (like that there is Thunderbolt 3 233 * switch upstream) then the internal xHCI controller is enabled 234 * instead. 235 * 236 * This does not set the configuration valid bit of the router. To do 237 * that call usb4_switch_configuration_valid(). 238 */ 239 int usb4_switch_setup(struct tb_switch *sw) 240 { 241 struct tb_switch *parent = tb_switch_parent(sw); 242 struct tb_port *down; 243 bool tbt3, xhci; 244 u32 val = 0; 245 int ret; 246 247 usb4_switch_check_wakes(sw); 248 249 if (!tb_route(sw)) 250 return 0; 251 252 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); 253 if (ret) 254 return ret; 255 256 down = tb_switch_downstream_port(sw); 257 sw->link_usb4 = link_is_usb4(down); 258 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); 259 260 xhci = val & ROUTER_CS_6_HCI; 261 tbt3 = !(val & ROUTER_CS_6_TNS); 262 263 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", 264 tbt3 ? "yes" : "no", xhci ? "yes" : "no"); 265 266 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 267 if (ret) 268 return ret; 269 270 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && 271 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { 272 val |= ROUTER_CS_5_UTO; 273 xhci = false; 274 } 275 276 /* 277 * Only enable PCIe tunneling if the parent router supports it 278 * and it is not disabled. 279 */ 280 if (tb_acpi_may_tunnel_pcie() && 281 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { 282 val |= ROUTER_CS_5_PTO; 283 /* 284 * xHCI can be enabled if PCIe tunneling is supported 285 * and the parent does not have any USB3 dowstream 286 * adapters (so we cannot do USB 3.x tunneling). 287 */ 288 if (xhci) 289 val |= ROUTER_CS_5_HCO; 290 } 291 292 /* TBT3 supported by the CM */ 293 val &= ~ROUTER_CS_5_CNS; 294 295 return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 296 } 297 298 /** 299 * usb4_switch_configuration_valid() - Set tunneling configuration to be valid 300 * @sw: USB4 router 301 * 302 * Sets configuration valid bit for the router. Must be called before 303 * any tunnels can be set through the router and after 304 * usb4_switch_setup() has been called. Can be called to host and device 305 * routers (does nothing for the latter). 306 * 307 * Returns %0 in success and negative errno otherwise. 308 */ 309 int usb4_switch_configuration_valid(struct tb_switch *sw) 310 { 311 u32 val; 312 int ret; 313 314 if (!tb_route(sw)) 315 return 0; 316 317 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 318 if (ret) 319 return ret; 320 321 val |= ROUTER_CS_5_CV; 322 323 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 324 if (ret) 325 return ret; 326 327 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, 328 ROUTER_CS_6_CR, 50); 329 } 330 331 /** 332 * usb4_switch_read_uid() - Read UID from USB4 router 333 * @sw: USB4 router 334 * @uid: UID is stored here 335 * 336 * Reads 64-bit UID from USB4 router config space. 337 */ 338 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) 339 { 340 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); 341 } 342 343 static int usb4_switch_drom_read_block(void *data, 344 unsigned int dwaddress, void *buf, 345 size_t dwords) 346 { 347 struct tb_switch *sw = data; 348 u8 status = 0; 349 u32 metadata; 350 int ret; 351 352 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; 353 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & 354 USB4_DROM_ADDRESS_MASK; 355 356 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, 357 &status, NULL, 0, buf, dwords); 358 if (ret) 359 return ret; 360 361 return status ? -EIO : 0; 362 } 363 364 /** 365 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM 366 * @sw: USB4 router 367 * @address: Byte address inside DROM to start reading 368 * @buf: Buffer where the DROM content is stored 369 * @size: Number of bytes to read from DROM 370 * 371 * Uses USB4 router operations to read router DROM. For devices this 372 * should always work but for hosts it may return %-EOPNOTSUPP in which 373 * case the host router does not have DROM. 374 */ 375 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, 376 size_t size) 377 { 378 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 379 usb4_switch_drom_read_block, sw); 380 } 381 382 /** 383 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding 384 * @sw: USB4 router 385 * 386 * Checks whether conditions are met so that lane bonding can be 387 * established with the upstream router. Call only for device routers. 388 */ 389 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) 390 { 391 struct tb_port *up; 392 int ret; 393 u32 val; 394 395 up = tb_upstream_port(sw); 396 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); 397 if (ret) 398 return false; 399 400 return !!(val & PORT_CS_18_BE); 401 } 402 403 /** 404 * usb4_switch_set_wake() - Enabled/disable wake 405 * @sw: USB4 router 406 * @flags: Wakeup flags (%0 to disable) 407 * 408 * Enables/disables router to wake up from sleep. 409 */ 410 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) 411 { 412 struct usb4_port *usb4; 413 struct tb_port *port; 414 u64 route = tb_route(sw); 415 u32 val; 416 int ret; 417 418 /* 419 * Enable wakes coming from all USB4 downstream ports (from 420 * child routers). For device routers do this also for the 421 * upstream USB4 port. 422 */ 423 tb_switch_for_each_port(sw, port) { 424 if (!tb_port_is_null(port)) 425 continue; 426 if (!route && tb_is_upstream_port(port)) 427 continue; 428 if (!port->cap_usb4) 429 continue; 430 431 ret = tb_port_read(port, &val, TB_CFG_PORT, 432 port->cap_usb4 + PORT_CS_19, 1); 433 if (ret) 434 return ret; 435 436 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); 437 438 if (tb_is_upstream_port(port)) { 439 val |= PORT_CS_19_WOU4; 440 } else { 441 bool configured = val & PORT_CS_19_PC; 442 usb4 = port->usb4; 443 444 if (((flags & TB_WAKE_ON_CONNECT) | 445 device_may_wakeup(&usb4->dev)) && !configured) 446 val |= PORT_CS_19_WOC; 447 if (((flags & TB_WAKE_ON_DISCONNECT) | 448 device_may_wakeup(&usb4->dev)) && configured) 449 val |= PORT_CS_19_WOD; 450 if ((flags & TB_WAKE_ON_USB4) && configured) 451 val |= PORT_CS_19_WOU4; 452 } 453 454 ret = tb_port_write(port, &val, TB_CFG_PORT, 455 port->cap_usb4 + PORT_CS_19, 1); 456 if (ret) 457 return ret; 458 } 459 460 /* 461 * Enable wakes from PCIe, USB 3.x and DP on this router. Only 462 * needed for device routers. 463 */ 464 if (route) { 465 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 466 if (ret) 467 return ret; 468 469 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); 470 if (flags & TB_WAKE_ON_USB3) 471 val |= ROUTER_CS_5_WOU; 472 if (flags & TB_WAKE_ON_PCIE) 473 val |= ROUTER_CS_5_WOP; 474 if (flags & TB_WAKE_ON_DP) 475 val |= ROUTER_CS_5_WOD; 476 477 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 478 if (ret) 479 return ret; 480 } 481 482 return 0; 483 } 484 485 /** 486 * usb4_switch_set_sleep() - Prepare the router to enter sleep 487 * @sw: USB4 router 488 * 489 * Sets sleep bit for the router. Returns when the router sleep ready 490 * bit has been asserted. 491 */ 492 int usb4_switch_set_sleep(struct tb_switch *sw) 493 { 494 int ret; 495 u32 val; 496 497 /* Set sleep bit and wait for sleep ready to be asserted */ 498 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 499 if (ret) 500 return ret; 501 502 val |= ROUTER_CS_5_SLP; 503 504 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); 505 if (ret) 506 return ret; 507 508 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, 509 ROUTER_CS_6_SLPR, 500); 510 } 511 512 /** 513 * usb4_switch_nvm_sector_size() - Return router NVM sector size 514 * @sw: USB4 router 515 * 516 * If the router supports NVM operations this function returns the NVM 517 * sector size in bytes. If NVM operations are not supported returns 518 * %-EOPNOTSUPP. 519 */ 520 int usb4_switch_nvm_sector_size(struct tb_switch *sw) 521 { 522 u32 metadata; 523 u8 status; 524 int ret; 525 526 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, 527 &status); 528 if (ret) 529 return ret; 530 531 if (status) 532 return status == 0x2 ? -EOPNOTSUPP : -EIO; 533 534 return metadata & USB4_NVM_SECTOR_SIZE_MASK; 535 } 536 537 static int usb4_switch_nvm_read_block(void *data, 538 unsigned int dwaddress, void *buf, size_t dwords) 539 { 540 struct tb_switch *sw = data; 541 u8 status = 0; 542 u32 metadata; 543 int ret; 544 545 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & 546 USB4_NVM_READ_LENGTH_MASK; 547 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & 548 USB4_NVM_READ_OFFSET_MASK; 549 550 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, 551 &status, NULL, 0, buf, dwords); 552 if (ret) 553 return ret; 554 555 return status ? -EIO : 0; 556 } 557 558 /** 559 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM 560 * @sw: USB4 router 561 * @address: Starting address in bytes 562 * @buf: Read data is placed here 563 * @size: How many bytes to read 564 * 565 * Reads NVM contents of the router. If NVM is not supported returns 566 * %-EOPNOTSUPP. 567 */ 568 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, 569 size_t size) 570 { 571 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 572 usb4_switch_nvm_read_block, sw); 573 } 574 575 /** 576 * usb4_switch_nvm_set_offset() - Set NVM write offset 577 * @sw: USB4 router 578 * @address: Start offset 579 * 580 * Explicitly sets NVM write offset. Normally when writing to NVM this 581 * is done automatically by usb4_switch_nvm_write(). 582 * 583 * Returns %0 in success and negative errno if there was a failure. 584 */ 585 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) 586 { 587 u32 metadata, dwaddress; 588 u8 status = 0; 589 int ret; 590 591 dwaddress = address / 4; 592 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 593 USB4_NVM_SET_OFFSET_MASK; 594 595 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, 596 &status); 597 if (ret) 598 return ret; 599 600 return status ? -EIO : 0; 601 } 602 603 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, 604 const void *buf, size_t dwords) 605 { 606 struct tb_switch *sw = data; 607 u8 status; 608 int ret; 609 610 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, 611 buf, dwords, NULL, 0); 612 if (ret) 613 return ret; 614 615 return status ? -EIO : 0; 616 } 617 618 /** 619 * usb4_switch_nvm_write() - Write to the router NVM 620 * @sw: USB4 router 621 * @address: Start address where to write in bytes 622 * @buf: Pointer to the data to write 623 * @size: Size of @buf in bytes 624 * 625 * Writes @buf to the router NVM using USB4 router operations. If NVM 626 * write is not supported returns %-EOPNOTSUPP. 627 */ 628 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, 629 const void *buf, size_t size) 630 { 631 int ret; 632 633 ret = usb4_switch_nvm_set_offset(sw, address); 634 if (ret) 635 return ret; 636 637 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 638 usb4_switch_nvm_write_next_block, sw); 639 } 640 641 /** 642 * usb4_switch_nvm_authenticate() - Authenticate new NVM 643 * @sw: USB4 router 644 * 645 * After the new NVM has been written via usb4_switch_nvm_write(), this 646 * function triggers NVM authentication process. The router gets power 647 * cycled and if the authentication is successful the new NVM starts 648 * running. In case of failure returns negative errno. 649 * 650 * The caller should call usb4_switch_nvm_authenticate_status() to read 651 * the status of the authentication after power cycle. It should be the 652 * first router operation to avoid the status being lost. 653 */ 654 int usb4_switch_nvm_authenticate(struct tb_switch *sw) 655 { 656 int ret; 657 658 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); 659 switch (ret) { 660 /* 661 * The router is power cycled once NVM_AUTH is started so it is 662 * expected to get any of the following errors back. 663 */ 664 case -EACCES: 665 case -ENOTCONN: 666 case -ETIMEDOUT: 667 return 0; 668 669 default: 670 return ret; 671 } 672 } 673 674 /** 675 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate 676 * @sw: USB4 router 677 * @status: Status code of the operation 678 * 679 * The function checks if there is status available from the last NVM 680 * authenticate router operation. If there is status then %0 is returned 681 * and the status code is placed in @status. Returns negative errno in case 682 * of failure. 683 * 684 * Must be called before any other router operation. 685 */ 686 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) 687 { 688 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; 689 u16 opcode; 690 u32 val; 691 int ret; 692 693 if (cm_ops->usb4_switch_nvm_authenticate_status) { 694 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); 695 if (ret != -EOPNOTSUPP) 696 return ret; 697 } 698 699 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); 700 if (ret) 701 return ret; 702 703 /* Check that the opcode is correct */ 704 opcode = val & ROUTER_CS_26_OPCODE_MASK; 705 if (opcode == USB4_SWITCH_OP_NVM_AUTH) { 706 if (val & ROUTER_CS_26_OV) 707 return -EBUSY; 708 if (val & ROUTER_CS_26_ONS) 709 return -EOPNOTSUPP; 710 711 *status = (val & ROUTER_CS_26_STATUS_MASK) >> 712 ROUTER_CS_26_STATUS_SHIFT; 713 } else { 714 *status = 0; 715 } 716 717 return 0; 718 } 719 720 /** 721 * usb4_switch_credits_init() - Read buffer allocation parameters 722 * @sw: USB4 router 723 * 724 * Reads @sw buffer allocation parameters and initializes @sw buffer 725 * allocation fields accordingly. Specifically @sw->credits_allocation 726 * is set to %true if these parameters can be used in tunneling. 727 * 728 * Returns %0 on success and negative errno otherwise. 729 */ 730 int usb4_switch_credits_init(struct tb_switch *sw) 731 { 732 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; 733 int ret, length, i, nports; 734 const struct tb_port *port; 735 u32 data[USB4_DATA_DWORDS]; 736 u32 metadata = 0; 737 u8 status = 0; 738 739 memset(data, 0, sizeof(data)); 740 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, 741 &status, NULL, 0, data, ARRAY_SIZE(data)); 742 if (ret) 743 return ret; 744 if (status) 745 return -EIO; 746 747 length = metadata & USB4_BA_LENGTH_MASK; 748 if (WARN_ON(length > ARRAY_SIZE(data))) 749 return -EMSGSIZE; 750 751 max_usb3 = -1; 752 min_dp_aux = -1; 753 min_dp_main = -1; 754 max_pcie = -1; 755 max_dma = -1; 756 757 tb_sw_dbg(sw, "credit allocation parameters:\n"); 758 759 for (i = 0; i < length; i++) { 760 u16 index, value; 761 762 index = data[i] & USB4_BA_INDEX_MASK; 763 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; 764 765 switch (index) { 766 case USB4_BA_MAX_USB3: 767 tb_sw_dbg(sw, " USB3: %u\n", value); 768 max_usb3 = value; 769 break; 770 case USB4_BA_MIN_DP_AUX: 771 tb_sw_dbg(sw, " DP AUX: %u\n", value); 772 min_dp_aux = value; 773 break; 774 case USB4_BA_MIN_DP_MAIN: 775 tb_sw_dbg(sw, " DP main: %u\n", value); 776 min_dp_main = value; 777 break; 778 case USB4_BA_MAX_PCIE: 779 tb_sw_dbg(sw, " PCIe: %u\n", value); 780 max_pcie = value; 781 break; 782 case USB4_BA_MAX_HI: 783 tb_sw_dbg(sw, " DMA: %u\n", value); 784 max_dma = value; 785 break; 786 default: 787 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", 788 index); 789 break; 790 } 791 } 792 793 /* 794 * Validate the buffer allocation preferences. If we find 795 * issues, log a warning and fall back using the hard-coded 796 * values. 797 */ 798 799 /* Host router must report baMaxHI */ 800 if (!tb_route(sw) && max_dma < 0) { 801 tb_sw_warn(sw, "host router is missing baMaxHI\n"); 802 goto err_invalid; 803 } 804 805 nports = 0; 806 tb_switch_for_each_port(sw, port) { 807 if (tb_port_is_null(port)) 808 nports++; 809 } 810 811 /* Must have DP buffer allocation (multiple USB4 ports) */ 812 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { 813 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); 814 goto err_invalid; 815 } 816 817 tb_switch_for_each_port(sw, port) { 818 if (tb_port_is_dpout(port) && min_dp_main < 0) { 819 tb_sw_warn(sw, "missing baMinDPmain"); 820 goto err_invalid; 821 } 822 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && 823 min_dp_aux < 0) { 824 tb_sw_warn(sw, "missing baMinDPaux"); 825 goto err_invalid; 826 } 827 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && 828 max_usb3 < 0) { 829 tb_sw_warn(sw, "missing baMaxUSB3"); 830 goto err_invalid; 831 } 832 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && 833 max_pcie < 0) { 834 tb_sw_warn(sw, "missing baMaxPCIe"); 835 goto err_invalid; 836 } 837 } 838 839 /* 840 * Buffer allocation passed the validation so we can use it in 841 * path creation. 842 */ 843 sw->credit_allocation = true; 844 if (max_usb3 > 0) 845 sw->max_usb3_credits = max_usb3; 846 if (min_dp_aux > 0) 847 sw->min_dp_aux_credits = min_dp_aux; 848 if (min_dp_main > 0) 849 sw->min_dp_main_credits = min_dp_main; 850 if (max_pcie > 0) 851 sw->max_pcie_credits = max_pcie; 852 if (max_dma > 0) 853 sw->max_dma_credits = max_dma; 854 855 return 0; 856 857 err_invalid: 858 return -EINVAL; 859 } 860 861 /** 862 * usb4_switch_query_dp_resource() - Query availability of DP IN resource 863 * @sw: USB4 router 864 * @in: DP IN adapter 865 * 866 * For DP tunneling this function can be used to query availability of 867 * DP IN resource. Returns true if the resource is available for DP 868 * tunneling, false otherwise. 869 */ 870 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) 871 { 872 u32 metadata = in->port; 873 u8 status; 874 int ret; 875 876 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, 877 &status); 878 /* 879 * If DP resource allocation is not supported assume it is 880 * always available. 881 */ 882 if (ret == -EOPNOTSUPP) 883 return true; 884 if (ret) 885 return false; 886 887 return !status; 888 } 889 890 /** 891 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource 892 * @sw: USB4 router 893 * @in: DP IN adapter 894 * 895 * Allocates DP IN resource for DP tunneling using USB4 router 896 * operations. If the resource was allocated returns %0. Otherwise 897 * returns negative errno, in particular %-EBUSY if the resource is 898 * already allocated. 899 */ 900 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 901 { 902 u32 metadata = in->port; 903 u8 status; 904 int ret; 905 906 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, 907 &status); 908 if (ret == -EOPNOTSUPP) 909 return 0; 910 if (ret) 911 return ret; 912 913 return status ? -EBUSY : 0; 914 } 915 916 /** 917 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource 918 * @sw: USB4 router 919 * @in: DP IN adapter 920 * 921 * Releases the previously allocated DP IN resource. 922 */ 923 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) 924 { 925 u32 metadata = in->port; 926 u8 status; 927 int ret; 928 929 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, 930 &status); 931 if (ret == -EOPNOTSUPP) 932 return 0; 933 if (ret) 934 return ret; 935 936 return status ? -EIO : 0; 937 } 938 939 static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) 940 { 941 struct tb_port *p; 942 int usb4_idx = 0; 943 944 /* Assume port is primary */ 945 tb_switch_for_each_port(sw, p) { 946 if (!tb_port_is_null(p)) 947 continue; 948 if (tb_is_upstream_port(p)) 949 continue; 950 if (!p->link_nr) { 951 if (p == port) 952 break; 953 usb4_idx++; 954 } 955 } 956 957 return usb4_idx; 958 } 959 960 /** 961 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter 962 * @sw: USB4 router 963 * @port: USB4 port 964 * 965 * USB4 routers have direct mapping between USB4 ports and PCIe 966 * downstream adapters where the PCIe topology is extended. This 967 * function returns the corresponding downstream PCIe adapter or %NULL 968 * if no such mapping was possible. 969 */ 970 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, 971 const struct tb_port *port) 972 { 973 int usb4_idx = usb4_port_idx(sw, port); 974 struct tb_port *p; 975 int pcie_idx = 0; 976 977 /* Find PCIe down port matching usb4_port */ 978 tb_switch_for_each_port(sw, p) { 979 if (!tb_port_is_pcie_down(p)) 980 continue; 981 982 if (pcie_idx == usb4_idx) 983 return p; 984 985 pcie_idx++; 986 } 987 988 return NULL; 989 } 990 991 /** 992 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter 993 * @sw: USB4 router 994 * @port: USB4 port 995 * 996 * USB4 routers have direct mapping between USB4 ports and USB 3.x 997 * downstream adapters where the USB 3.x topology is extended. This 998 * function returns the corresponding downstream USB 3.x adapter or 999 * %NULL if no such mapping was possible. 1000 */ 1001 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, 1002 const struct tb_port *port) 1003 { 1004 int usb4_idx = usb4_port_idx(sw, port); 1005 struct tb_port *p; 1006 int usb_idx = 0; 1007 1008 /* Find USB3 down port matching usb4_port */ 1009 tb_switch_for_each_port(sw, p) { 1010 if (!tb_port_is_usb3_down(p)) 1011 continue; 1012 1013 if (usb_idx == usb4_idx) 1014 return p; 1015 1016 usb_idx++; 1017 } 1018 1019 return NULL; 1020 } 1021 1022 /** 1023 * usb4_switch_add_ports() - Add USB4 ports for this router 1024 * @sw: USB4 router 1025 * 1026 * For USB4 router finds all USB4 ports and registers devices for each. 1027 * Can be called to any router. 1028 * 1029 * Return %0 in case of success and negative errno in case of failure. 1030 */ 1031 int usb4_switch_add_ports(struct tb_switch *sw) 1032 { 1033 struct tb_port *port; 1034 1035 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) 1036 return 0; 1037 1038 tb_switch_for_each_port(sw, port) { 1039 struct usb4_port *usb4; 1040 1041 if (!tb_port_is_null(port)) 1042 continue; 1043 if (!port->cap_usb4) 1044 continue; 1045 1046 usb4 = usb4_port_device_add(port); 1047 if (IS_ERR(usb4)) { 1048 usb4_switch_remove_ports(sw); 1049 return PTR_ERR(usb4); 1050 } 1051 1052 port->usb4 = usb4; 1053 } 1054 1055 return 0; 1056 } 1057 1058 /** 1059 * usb4_switch_remove_ports() - Removes USB4 ports from this router 1060 * @sw: USB4 router 1061 * 1062 * Unregisters previously registered USB4 ports. 1063 */ 1064 void usb4_switch_remove_ports(struct tb_switch *sw) 1065 { 1066 struct tb_port *port; 1067 1068 tb_switch_for_each_port(sw, port) { 1069 if (port->usb4) { 1070 usb4_port_device_remove(port->usb4); 1071 port->usb4 = NULL; 1072 } 1073 } 1074 } 1075 1076 /** 1077 * usb4_port_unlock() - Unlock USB4 downstream port 1078 * @port: USB4 port to unlock 1079 * 1080 * Unlocks USB4 downstream port so that the connection manager can 1081 * access the router below this port. 1082 */ 1083 int usb4_port_unlock(struct tb_port *port) 1084 { 1085 int ret; 1086 u32 val; 1087 1088 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1089 if (ret) 1090 return ret; 1091 1092 val &= ~ADP_CS_4_LCK; 1093 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); 1094 } 1095 1096 /** 1097 * usb4_port_hotplug_enable() - Enables hotplug for a port 1098 * @port: USB4 port to operate on 1099 * 1100 * Enables hot plug events on a given port. This is only intended 1101 * to be used on lane, DP-IN, and DP-OUT adapters. 1102 */ 1103 int usb4_port_hotplug_enable(struct tb_port *port) 1104 { 1105 int ret; 1106 u32 val; 1107 1108 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1109 if (ret) 1110 return ret; 1111 1112 val &= ~ADP_CS_5_DHP; 1113 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); 1114 } 1115 1116 /** 1117 * usb4_port_reset() - Issue downstream port reset 1118 * @port: USB4 port to reset 1119 * 1120 * Issues downstream port reset to @port. 1121 */ 1122 int usb4_port_reset(struct tb_port *port) 1123 { 1124 int ret; 1125 u32 val; 1126 1127 if (!port->cap_usb4) 1128 return -EINVAL; 1129 1130 ret = tb_port_read(port, &val, TB_CFG_PORT, 1131 port->cap_usb4 + PORT_CS_19, 1); 1132 if (ret) 1133 return ret; 1134 1135 val |= PORT_CS_19_DPR; 1136 1137 ret = tb_port_write(port, &val, TB_CFG_PORT, 1138 port->cap_usb4 + PORT_CS_19, 1); 1139 if (ret) 1140 return ret; 1141 1142 fsleep(10000); 1143 1144 ret = tb_port_read(port, &val, TB_CFG_PORT, 1145 port->cap_usb4 + PORT_CS_19, 1); 1146 if (ret) 1147 return ret; 1148 1149 val &= ~PORT_CS_19_DPR; 1150 1151 return tb_port_write(port, &val, TB_CFG_PORT, 1152 port->cap_usb4 + PORT_CS_19, 1); 1153 } 1154 1155 static int usb4_port_set_configured(struct tb_port *port, bool configured) 1156 { 1157 int ret; 1158 u32 val; 1159 1160 if (!port->cap_usb4) 1161 return -EINVAL; 1162 1163 ret = tb_port_read(port, &val, TB_CFG_PORT, 1164 port->cap_usb4 + PORT_CS_19, 1); 1165 if (ret) 1166 return ret; 1167 1168 if (configured) 1169 val |= PORT_CS_19_PC; 1170 else 1171 val &= ~PORT_CS_19_PC; 1172 1173 return tb_port_write(port, &val, TB_CFG_PORT, 1174 port->cap_usb4 + PORT_CS_19, 1); 1175 } 1176 1177 /** 1178 * usb4_port_configure() - Set USB4 port configured 1179 * @port: USB4 router 1180 * 1181 * Sets the USB4 link to be configured for power management purposes. 1182 */ 1183 int usb4_port_configure(struct tb_port *port) 1184 { 1185 return usb4_port_set_configured(port, true); 1186 } 1187 1188 /** 1189 * usb4_port_unconfigure() - Set USB4 port unconfigured 1190 * @port: USB4 router 1191 * 1192 * Sets the USB4 link to be unconfigured for power management purposes. 1193 */ 1194 void usb4_port_unconfigure(struct tb_port *port) 1195 { 1196 usb4_port_set_configured(port, false); 1197 } 1198 1199 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) 1200 { 1201 int ret; 1202 u32 val; 1203 1204 if (!port->cap_usb4) 1205 return -EINVAL; 1206 1207 ret = tb_port_read(port, &val, TB_CFG_PORT, 1208 port->cap_usb4 + PORT_CS_19, 1); 1209 if (ret) 1210 return ret; 1211 1212 if (configured) 1213 val |= PORT_CS_19_PID; 1214 else 1215 val &= ~PORT_CS_19_PID; 1216 1217 return tb_port_write(port, &val, TB_CFG_PORT, 1218 port->cap_usb4 + PORT_CS_19, 1); 1219 } 1220 1221 /** 1222 * usb4_port_configure_xdomain() - Configure port for XDomain 1223 * @port: USB4 port connected to another host 1224 * @xd: XDomain that is connected to the port 1225 * 1226 * Marks the USB4 port as being connected to another host and updates 1227 * the link type. Returns %0 in success and negative errno in failure. 1228 */ 1229 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) 1230 { 1231 xd->link_usb4 = link_is_usb4(port); 1232 return usb4_set_xdomain_configured(port, true); 1233 } 1234 1235 /** 1236 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain 1237 * @port: USB4 port that was connected to another host 1238 * 1239 * Clears USB4 port from being marked as XDomain. 1240 */ 1241 void usb4_port_unconfigure_xdomain(struct tb_port *port) 1242 { 1243 usb4_set_xdomain_configured(port, false); 1244 } 1245 1246 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, 1247 u32 value, int timeout_msec) 1248 { 1249 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); 1250 1251 do { 1252 u32 val; 1253 int ret; 1254 1255 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); 1256 if (ret) 1257 return ret; 1258 1259 if ((val & bit) == value) 1260 return 0; 1261 1262 usleep_range(50, 100); 1263 } while (ktime_before(ktime_get(), timeout)); 1264 1265 return -ETIMEDOUT; 1266 } 1267 1268 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) 1269 { 1270 if (dwords > USB4_DATA_DWORDS) 1271 return -EINVAL; 1272 1273 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1274 dwords); 1275 } 1276 1277 static int usb4_port_write_data(struct tb_port *port, const void *data, 1278 size_t dwords) 1279 { 1280 if (dwords > USB4_DATA_DWORDS) 1281 return -EINVAL; 1282 1283 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, 1284 dwords); 1285 } 1286 1287 static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, 1288 u8 index, u8 reg, void *buf, u8 size) 1289 { 1290 size_t dwords = DIV_ROUND_UP(size, 4); 1291 int ret; 1292 u32 val; 1293 1294 if (!port->cap_usb4) 1295 return -EINVAL; 1296 1297 val = reg; 1298 val |= size << PORT_CS_1_LENGTH_SHIFT; 1299 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1300 if (target == USB4_SB_TARGET_RETIMER) 1301 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1302 val |= PORT_CS_1_PND; 1303 1304 ret = tb_port_write(port, &val, TB_CFG_PORT, 1305 port->cap_usb4 + PORT_CS_1, 1); 1306 if (ret) 1307 return ret; 1308 1309 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1310 PORT_CS_1_PND, 0, 500); 1311 if (ret) 1312 return ret; 1313 1314 ret = tb_port_read(port, &val, TB_CFG_PORT, 1315 port->cap_usb4 + PORT_CS_1, 1); 1316 if (ret) 1317 return ret; 1318 1319 if (val & PORT_CS_1_NR) 1320 return -ENODEV; 1321 if (val & PORT_CS_1_RC) 1322 return -EIO; 1323 1324 return buf ? usb4_port_read_data(port, buf, dwords) : 0; 1325 } 1326 1327 static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, 1328 u8 index, u8 reg, const void *buf, u8 size) 1329 { 1330 size_t dwords = DIV_ROUND_UP(size, 4); 1331 int ret; 1332 u32 val; 1333 1334 if (!port->cap_usb4) 1335 return -EINVAL; 1336 1337 if (buf) { 1338 ret = usb4_port_write_data(port, buf, dwords); 1339 if (ret) 1340 return ret; 1341 } 1342 1343 val = reg; 1344 val |= size << PORT_CS_1_LENGTH_SHIFT; 1345 val |= PORT_CS_1_WNR_WRITE; 1346 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; 1347 if (target == USB4_SB_TARGET_RETIMER) 1348 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); 1349 val |= PORT_CS_1_PND; 1350 1351 ret = tb_port_write(port, &val, TB_CFG_PORT, 1352 port->cap_usb4 + PORT_CS_1, 1); 1353 if (ret) 1354 return ret; 1355 1356 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, 1357 PORT_CS_1_PND, 0, 500); 1358 if (ret) 1359 return ret; 1360 1361 ret = tb_port_read(port, &val, TB_CFG_PORT, 1362 port->cap_usb4 + PORT_CS_1, 1); 1363 if (ret) 1364 return ret; 1365 1366 if (val & PORT_CS_1_NR) 1367 return -ENODEV; 1368 if (val & PORT_CS_1_RC) 1369 return -EIO; 1370 1371 return 0; 1372 } 1373 1374 static int usb4_port_sb_opcode_err_to_errno(u32 val) 1375 { 1376 switch (val) { 1377 case 0: 1378 return 0; 1379 case USB4_SB_OPCODE_ERR: 1380 return -EAGAIN; 1381 case USB4_SB_OPCODE_ONS: 1382 return -EOPNOTSUPP; 1383 default: 1384 return -EIO; 1385 } 1386 } 1387 1388 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, 1389 u8 index, enum usb4_sb_opcode opcode, int timeout_msec) 1390 { 1391 ktime_t timeout; 1392 u32 val; 1393 int ret; 1394 1395 val = opcode; 1396 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, 1397 sizeof(val)); 1398 if (ret) 1399 return ret; 1400 1401 timeout = ktime_add_ms(ktime_get(), timeout_msec); 1402 1403 do { 1404 /* Check results */ 1405 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, 1406 &val, sizeof(val)); 1407 if (ret) 1408 return ret; 1409 1410 if (val != opcode) 1411 return usb4_port_sb_opcode_err_to_errno(val); 1412 } while (ktime_before(ktime_get(), timeout)); 1413 1414 return -ETIMEDOUT; 1415 } 1416 1417 static int usb4_port_set_router_offline(struct tb_port *port, bool offline) 1418 { 1419 u32 val = !offline; 1420 int ret; 1421 1422 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1423 USB4_SB_METADATA, &val, sizeof(val)); 1424 if (ret) 1425 return ret; 1426 1427 val = USB4_SB_OPCODE_ROUTER_OFFLINE; 1428 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1429 USB4_SB_OPCODE, &val, sizeof(val)); 1430 } 1431 1432 /** 1433 * usb4_port_router_offline() - Put the USB4 port to offline mode 1434 * @port: USB4 port 1435 * 1436 * This function puts the USB4 port into offline mode. In this mode the 1437 * port does not react on hotplug events anymore. This needs to be 1438 * called before retimer access is done when the USB4 links is not up. 1439 * 1440 * Returns %0 in case of success and negative errno if there was an 1441 * error. 1442 */ 1443 int usb4_port_router_offline(struct tb_port *port) 1444 { 1445 return usb4_port_set_router_offline(port, true); 1446 } 1447 1448 /** 1449 * usb4_port_router_online() - Put the USB4 port back to online 1450 * @port: USB4 port 1451 * 1452 * Makes the USB4 port functional again. 1453 */ 1454 int usb4_port_router_online(struct tb_port *port) 1455 { 1456 return usb4_port_set_router_offline(port, false); 1457 } 1458 1459 /** 1460 * usb4_port_enumerate_retimers() - Send RT broadcast transaction 1461 * @port: USB4 port 1462 * 1463 * This forces the USB4 port to send broadcast RT transaction which 1464 * makes the retimers on the link to assign index to themselves. Returns 1465 * %0 in case of success and negative errno if there was an error. 1466 */ 1467 int usb4_port_enumerate_retimers(struct tb_port *port) 1468 { 1469 u32 val; 1470 1471 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; 1472 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1473 USB4_SB_OPCODE, &val, sizeof(val)); 1474 } 1475 1476 /** 1477 * usb4_port_clx_supported() - Check if CLx is supported by the link 1478 * @port: Port to check for CLx support for 1479 * 1480 * PORT_CS_18_CPS bit reflects if the link supports CLx including 1481 * active cables (if connected on the link). 1482 */ 1483 bool usb4_port_clx_supported(struct tb_port *port) 1484 { 1485 int ret; 1486 u32 val; 1487 1488 ret = tb_port_read(port, &val, TB_CFG_PORT, 1489 port->cap_usb4 + PORT_CS_18, 1); 1490 if (ret) 1491 return false; 1492 1493 return !!(val & PORT_CS_18_CPS); 1494 } 1495 1496 /** 1497 * usb4_port_asym_supported() - If the port supports asymmetric link 1498 * @port: USB4 port 1499 * 1500 * Checks if the port and the cable supports asymmetric link and returns 1501 * %true in that case. 1502 */ 1503 bool usb4_port_asym_supported(struct tb_port *port) 1504 { 1505 u32 val; 1506 1507 if (!port->cap_usb4) 1508 return false; 1509 1510 if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1)) 1511 return false; 1512 1513 return !!(val & PORT_CS_18_CSA); 1514 } 1515 1516 /** 1517 * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric 1518 * @port: USB4 port 1519 * @width: Asymmetric width to configure 1520 * 1521 * Sets USB4 port link width to @width. Can be called for widths where 1522 * usb4_port_asym_width_supported() returned @true. 1523 */ 1524 int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width) 1525 { 1526 u32 val; 1527 int ret; 1528 1529 if (!port->cap_phy) 1530 return -EINVAL; 1531 1532 ret = tb_port_read(port, &val, TB_CFG_PORT, 1533 port->cap_phy + LANE_ADP_CS_1, 1); 1534 if (ret) 1535 return ret; 1536 1537 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK; 1538 switch (width) { 1539 case TB_LINK_WIDTH_DUAL: 1540 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1541 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL); 1542 break; 1543 case TB_LINK_WIDTH_ASYM_TX: 1544 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1545 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX); 1546 break; 1547 case TB_LINK_WIDTH_ASYM_RX: 1548 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK, 1549 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX); 1550 break; 1551 default: 1552 return -EINVAL; 1553 } 1554 1555 return tb_port_write(port, &val, TB_CFG_PORT, 1556 port->cap_phy + LANE_ADP_CS_1, 1); 1557 } 1558 1559 /** 1560 * usb4_port_asym_start() - Start symmetry change and wait for completion 1561 * @port: USB4 port 1562 * 1563 * Start symmetry change of the link to asymmetric or symmetric 1564 * (according to what was previously set in tb_port_set_link_width(). 1565 * Wait for completion of the change. 1566 * 1567 * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or 1568 * a negative errno in case of a failure. 1569 */ 1570 int usb4_port_asym_start(struct tb_port *port) 1571 { 1572 int ret; 1573 u32 val; 1574 1575 ret = tb_port_read(port, &val, TB_CFG_PORT, 1576 port->cap_usb4 + PORT_CS_19, 1); 1577 if (ret) 1578 return ret; 1579 1580 val &= ~PORT_CS_19_START_ASYM; 1581 val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1); 1582 1583 ret = tb_port_write(port, &val, TB_CFG_PORT, 1584 port->cap_usb4 + PORT_CS_19, 1); 1585 if (ret) 1586 return ret; 1587 1588 /* 1589 * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4 1590 * port started the symmetry transition. 1591 */ 1592 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19, 1593 PORT_CS_19_START_ASYM, 0, 1000); 1594 if (ret) 1595 return ret; 1596 1597 /* Then wait for the transtion to be completed */ 1598 return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18, 1599 PORT_CS_18_TIP, 0, 5000); 1600 } 1601 1602 /** 1603 * usb4_port_margining_caps() - Read USB4 port marginig capabilities 1604 * @port: USB4 port 1605 * @caps: Array with at least two elements to hold the results 1606 * 1607 * Reads the USB4 port lane margining capabilities into @caps. 1608 */ 1609 int usb4_port_margining_caps(struct tb_port *port, u32 *caps) 1610 { 1611 int ret; 1612 1613 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1614 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); 1615 if (ret) 1616 return ret; 1617 1618 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1619 USB4_SB_DATA, caps, sizeof(*caps) * 2); 1620 } 1621 1622 /** 1623 * usb4_port_hw_margin() - Run hardware lane margining on port 1624 * @port: USB4 port 1625 * @lanes: Which lanes to run (must match the port capabilities). Can be 1626 * %0, %1 or %7. 1627 * @ber_level: BER level contour value 1628 * @timing: Perform timing margining instead of voltage 1629 * @right_high: Use Right/high margin instead of left/low 1630 * @results: Array with at least two elements to hold the results 1631 * 1632 * Runs hardware lane margining on USB4 port and returns the result in 1633 * @results. 1634 */ 1635 int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, 1636 unsigned int ber_level, bool timing, bool right_high, 1637 u32 *results) 1638 { 1639 u32 val; 1640 int ret; 1641 1642 val = lanes; 1643 if (timing) 1644 val |= USB4_MARGIN_HW_TIME; 1645 if (right_high) 1646 val |= USB4_MARGIN_HW_RH; 1647 if (ber_level) 1648 val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & 1649 USB4_MARGIN_HW_BER_MASK; 1650 1651 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1652 USB4_SB_METADATA, &val, sizeof(val)); 1653 if (ret) 1654 return ret; 1655 1656 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1657 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); 1658 if (ret) 1659 return ret; 1660 1661 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1662 USB4_SB_DATA, results, sizeof(*results) * 2); 1663 } 1664 1665 /** 1666 * usb4_port_sw_margin() - Run software lane margining on port 1667 * @port: USB4 port 1668 * @lanes: Which lanes to run (must match the port capabilities). Can be 1669 * %0, %1 or %7. 1670 * @timing: Perform timing margining instead of voltage 1671 * @right_high: Use Right/high margin instead of left/low 1672 * @counter: What to do with the error counter 1673 * 1674 * Runs software lane margining on USB4 port. Read back the error 1675 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in 1676 * success and negative errno otherwise. 1677 */ 1678 int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, 1679 bool right_high, u32 counter) 1680 { 1681 u32 val; 1682 int ret; 1683 1684 val = lanes; 1685 if (timing) 1686 val |= USB4_MARGIN_SW_TIME; 1687 if (right_high) 1688 val |= USB4_MARGIN_SW_RH; 1689 val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & 1690 USB4_MARGIN_SW_COUNTER_MASK; 1691 1692 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, 1693 USB4_SB_METADATA, &val, sizeof(val)); 1694 if (ret) 1695 return ret; 1696 1697 return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1698 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); 1699 } 1700 1701 /** 1702 * usb4_port_sw_margin_errors() - Read the software margining error counters 1703 * @port: USB4 port 1704 * @errors: Error metadata is copied here. 1705 * 1706 * This reads back the software margining error counters from the port. 1707 * Returns %0 in success and negative errno otherwise. 1708 */ 1709 int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) 1710 { 1711 int ret; 1712 1713 ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, 1714 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); 1715 if (ret) 1716 return ret; 1717 1718 return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, 1719 USB4_SB_METADATA, errors, sizeof(*errors)); 1720 } 1721 1722 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, 1723 enum usb4_sb_opcode opcode, 1724 int timeout_msec) 1725 { 1726 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, 1727 timeout_msec); 1728 } 1729 1730 /** 1731 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions 1732 * @port: USB4 port 1733 * @index: Retimer index 1734 * 1735 * Enables sideband channel transations on SBTX. Can be used when USB4 1736 * link does not go up, for example if there is no device connected. 1737 */ 1738 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) 1739 { 1740 int ret; 1741 1742 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1743 500); 1744 1745 if (ret != -ENODEV) 1746 return ret; 1747 1748 /* 1749 * Per the USB4 retimer spec, the retimer is not required to 1750 * send an RT (Retimer Transaction) response for the first 1751 * SET_INBOUND_SBTX command 1752 */ 1753 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, 1754 500); 1755 } 1756 1757 /** 1758 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions 1759 * @port: USB4 port 1760 * @index: Retimer index 1761 * 1762 * Disables sideband channel transations on SBTX. The reverse of 1763 * usb4_port_retimer_set_inbound_sbtx(). 1764 */ 1765 int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index) 1766 { 1767 return usb4_port_retimer_op(port, index, 1768 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500); 1769 } 1770 1771 /** 1772 * usb4_port_retimer_read() - Read from retimer sideband registers 1773 * @port: USB4 port 1774 * @index: Retimer index 1775 * @reg: Sideband register to read 1776 * @buf: Data from @reg is stored here 1777 * @size: Number of bytes to read 1778 * 1779 * Function reads retimer sideband registers starting from @reg. The 1780 * retimer is connected to @port at @index. Returns %0 in case of 1781 * success, and read data is copied to @buf. If there is no retimer 1782 * present at given @index returns %-ENODEV. In any other failure 1783 * returns negative errno. 1784 */ 1785 int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, 1786 u8 size) 1787 { 1788 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1789 size); 1790 } 1791 1792 /** 1793 * usb4_port_retimer_write() - Write to retimer sideband registers 1794 * @port: USB4 port 1795 * @index: Retimer index 1796 * @reg: Sideband register to write 1797 * @buf: Data that is written starting from @reg 1798 * @size: Number of bytes to write 1799 * 1800 * Writes retimer sideband registers starting from @reg. The retimer is 1801 * connected to @port at @index. Returns %0 in case of success. If there 1802 * is no retimer present at given @index returns %-ENODEV. In any other 1803 * failure returns negative errno. 1804 */ 1805 int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, 1806 const void *buf, u8 size) 1807 { 1808 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, 1809 size); 1810 } 1811 1812 /** 1813 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer 1814 * @port: USB4 port 1815 * @index: Retimer index 1816 * 1817 * If the retimer at @index is last one (connected directly to the 1818 * Type-C port) this function returns %1. If it is not returns %0. If 1819 * the retimer is not present returns %-ENODEV. Otherwise returns 1820 * negative errno. 1821 */ 1822 int usb4_port_retimer_is_last(struct tb_port *port, u8 index) 1823 { 1824 u32 metadata; 1825 int ret; 1826 1827 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, 1828 500); 1829 if (ret) 1830 return ret; 1831 1832 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1833 sizeof(metadata)); 1834 return ret ? ret : metadata & 1; 1835 } 1836 1837 /** 1838 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size 1839 * @port: USB4 port 1840 * @index: Retimer index 1841 * 1842 * Reads NVM sector size (in bytes) of a retimer at @index. This 1843 * operation can be used to determine whether the retimer supports NVM 1844 * upgrade for example. Returns sector size in bytes or negative errno 1845 * in case of error. Specifically returns %-ENODEV if there is no 1846 * retimer at @index. 1847 */ 1848 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) 1849 { 1850 u32 metadata; 1851 int ret; 1852 1853 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, 1854 500); 1855 if (ret) 1856 return ret; 1857 1858 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, 1859 sizeof(metadata)); 1860 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; 1861 } 1862 1863 /** 1864 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset 1865 * @port: USB4 port 1866 * @index: Retimer index 1867 * @address: Start offset 1868 * 1869 * Exlicitly sets NVM write offset. Normally when writing to NVM this is 1870 * done automatically by usb4_port_retimer_nvm_write(). 1871 * 1872 * Returns %0 in success and negative errno if there was a failure. 1873 */ 1874 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, 1875 unsigned int address) 1876 { 1877 u32 metadata, dwaddress; 1878 int ret; 1879 1880 dwaddress = address / 4; 1881 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & 1882 USB4_NVM_SET_OFFSET_MASK; 1883 1884 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 1885 sizeof(metadata)); 1886 if (ret) 1887 return ret; 1888 1889 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, 1890 500); 1891 } 1892 1893 struct retimer_info { 1894 struct tb_port *port; 1895 u8 index; 1896 }; 1897 1898 static int usb4_port_retimer_nvm_write_next_block(void *data, 1899 unsigned int dwaddress, const void *buf, size_t dwords) 1900 1901 { 1902 const struct retimer_info *info = data; 1903 struct tb_port *port = info->port; 1904 u8 index = info->index; 1905 int ret; 1906 1907 ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, 1908 buf, dwords * 4); 1909 if (ret) 1910 return ret; 1911 1912 return usb4_port_retimer_op(port, index, 1913 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); 1914 } 1915 1916 /** 1917 * usb4_port_retimer_nvm_write() - Write to retimer NVM 1918 * @port: USB4 port 1919 * @index: Retimer index 1920 * @address: Byte address where to start the write 1921 * @buf: Data to write 1922 * @size: Size in bytes how much to write 1923 * 1924 * Writes @size bytes from @buf to the retimer NVM. Used for NVM 1925 * upgrade. Returns %0 if the data was written successfully and negative 1926 * errno in case of failure. Specifically returns %-ENODEV if there is 1927 * no retimer at @index. 1928 */ 1929 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, 1930 const void *buf, size_t size) 1931 { 1932 struct retimer_info info = { .port = port, .index = index }; 1933 int ret; 1934 1935 ret = usb4_port_retimer_nvm_set_offset(port, index, address); 1936 if (ret) 1937 return ret; 1938 1939 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, 1940 usb4_port_retimer_nvm_write_next_block, &info); 1941 } 1942 1943 /** 1944 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade 1945 * @port: USB4 port 1946 * @index: Retimer index 1947 * 1948 * After the new NVM image has been written via usb4_port_retimer_nvm_write() 1949 * this function can be used to trigger the NVM upgrade process. If 1950 * successful the retimer restarts with the new NVM and may not have the 1951 * index set so one needs to call usb4_port_enumerate_retimers() to 1952 * force index to be assigned. 1953 */ 1954 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) 1955 { 1956 u32 val; 1957 1958 /* 1959 * We need to use the raw operation here because once the 1960 * authentication completes the retimer index is not set anymore 1961 * so we do not get back the status now. 1962 */ 1963 val = USB4_SB_OPCODE_NVM_AUTH_WRITE; 1964 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, 1965 USB4_SB_OPCODE, &val, sizeof(val)); 1966 } 1967 1968 /** 1969 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade 1970 * @port: USB4 port 1971 * @index: Retimer index 1972 * @status: Raw status code read from metadata 1973 * 1974 * This can be called after usb4_port_retimer_nvm_authenticate() and 1975 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. 1976 * 1977 * Returns %0 if the authentication status was successfully read. The 1978 * completion metadata (the result) is then stored into @status. If 1979 * reading the status fails, returns negative errno. 1980 */ 1981 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, 1982 u32 *status) 1983 { 1984 u32 metadata, val; 1985 int ret; 1986 1987 ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, 1988 sizeof(val)); 1989 if (ret) 1990 return ret; 1991 1992 ret = usb4_port_sb_opcode_err_to_errno(val); 1993 switch (ret) { 1994 case 0: 1995 *status = 0; 1996 return 0; 1997 1998 case -EAGAIN: 1999 ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, 2000 &metadata, sizeof(metadata)); 2001 if (ret) 2002 return ret; 2003 2004 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; 2005 return 0; 2006 2007 default: 2008 return ret; 2009 } 2010 } 2011 2012 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, 2013 void *buf, size_t dwords) 2014 { 2015 const struct retimer_info *info = data; 2016 struct tb_port *port = info->port; 2017 u8 index = info->index; 2018 u32 metadata; 2019 int ret; 2020 2021 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; 2022 if (dwords < USB4_DATA_DWORDS) 2023 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; 2024 2025 ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, 2026 sizeof(metadata)); 2027 if (ret) 2028 return ret; 2029 2030 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); 2031 if (ret) 2032 return ret; 2033 2034 return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, 2035 dwords * 4); 2036 } 2037 2038 /** 2039 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM 2040 * @port: USB4 port 2041 * @index: Retimer index 2042 * @address: NVM address (in bytes) to start reading 2043 * @buf: Data read from NVM is stored here 2044 * @size: Number of bytes to read 2045 * 2046 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 2047 * read was successful and negative errno in case of failure. 2048 * Specifically returns %-ENODEV if there is no retimer at @index. 2049 */ 2050 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, 2051 unsigned int address, void *buf, size_t size) 2052 { 2053 struct retimer_info info = { .port = port, .index = index }; 2054 2055 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, 2056 usb4_port_retimer_nvm_read_block, &info); 2057 } 2058 2059 static inline unsigned int 2060 usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw) 2061 { 2062 /* Take the possible bandwidth limitation into account */ 2063 if (port->max_bw) 2064 return min(bw, port->max_bw); 2065 return bw; 2066 } 2067 2068 /** 2069 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate 2070 * @port: USB3 adapter port 2071 * 2072 * Return maximum supported link rate of a USB3 adapter in Mb/s. 2073 * Negative errno in case of error. 2074 */ 2075 int usb4_usb3_port_max_link_rate(struct tb_port *port) 2076 { 2077 int ret, lr; 2078 u32 val; 2079 2080 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) 2081 return -EINVAL; 2082 2083 ret = tb_port_read(port, &val, TB_CFG_PORT, 2084 port->cap_adap + ADP_USB3_CS_4, 1); 2085 if (ret) 2086 return ret; 2087 2088 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; 2089 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; 2090 2091 return usb4_usb3_port_max_bandwidth(port, ret); 2092 } 2093 2094 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) 2095 { 2096 int ret; 2097 u32 val; 2098 2099 if (!tb_port_is_usb3_down(port)) 2100 return -EINVAL; 2101 if (tb_route(port->sw)) 2102 return -EINVAL; 2103 2104 ret = tb_port_read(port, &val, TB_CFG_PORT, 2105 port->cap_adap + ADP_USB3_CS_2, 1); 2106 if (ret) 2107 return ret; 2108 2109 if (request) 2110 val |= ADP_USB3_CS_2_CMR; 2111 else 2112 val &= ~ADP_USB3_CS_2_CMR; 2113 2114 ret = tb_port_write(port, &val, TB_CFG_PORT, 2115 port->cap_adap + ADP_USB3_CS_2, 1); 2116 if (ret) 2117 return ret; 2118 2119 /* 2120 * We can use val here directly as the CMR bit is in the same place 2121 * as HCA. Just mask out others. 2122 */ 2123 val &= ADP_USB3_CS_2_CMR; 2124 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, 2125 ADP_USB3_CS_1_HCA, val, 1500); 2126 } 2127 2128 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) 2129 { 2130 return usb4_usb3_port_cm_request(port, true); 2131 } 2132 2133 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) 2134 { 2135 return usb4_usb3_port_cm_request(port, false); 2136 } 2137 2138 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) 2139 { 2140 unsigned long uframes; 2141 2142 uframes = bw * 512UL << scale; 2143 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA); 2144 } 2145 2146 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) 2147 { 2148 unsigned long uframes; 2149 2150 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ 2151 uframes = ((unsigned long)mbps * MEGA) / 8000; 2152 return DIV_ROUND_UP(uframes, 512UL << scale); 2153 } 2154 2155 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, 2156 int *upstream_bw, 2157 int *downstream_bw) 2158 { 2159 u32 val, bw, scale; 2160 int ret; 2161 2162 ret = tb_port_read(port, &val, TB_CFG_PORT, 2163 port->cap_adap + ADP_USB3_CS_2, 1); 2164 if (ret) 2165 return ret; 2166 2167 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2168 port->cap_adap + ADP_USB3_CS_3, 1); 2169 if (ret) 2170 return ret; 2171 2172 scale &= ADP_USB3_CS_3_SCALE_MASK; 2173 2174 bw = val & ADP_USB3_CS_2_AUBW_MASK; 2175 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2176 2177 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; 2178 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2179 2180 return 0; 2181 } 2182 2183 /** 2184 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 2185 * @port: USB3 adapter port 2186 * @upstream_bw: Allocated upstream bandwidth is stored here 2187 * @downstream_bw: Allocated downstream bandwidth is stored here 2188 * 2189 * Stores currently allocated USB3 bandwidth into @upstream_bw and 2190 * @downstream_bw in Mb/s. Returns %0 in case of success and negative 2191 * errno in failure. 2192 */ 2193 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, 2194 int *downstream_bw) 2195 { 2196 int ret; 2197 2198 ret = usb4_usb3_port_set_cm_request(port); 2199 if (ret) 2200 return ret; 2201 2202 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, 2203 downstream_bw); 2204 usb4_usb3_port_clear_cm_request(port); 2205 2206 return ret; 2207 } 2208 2209 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, 2210 int *upstream_bw, 2211 int *downstream_bw) 2212 { 2213 u32 val, bw, scale; 2214 int ret; 2215 2216 ret = tb_port_read(port, &val, TB_CFG_PORT, 2217 port->cap_adap + ADP_USB3_CS_1, 1); 2218 if (ret) 2219 return ret; 2220 2221 ret = tb_port_read(port, &scale, TB_CFG_PORT, 2222 port->cap_adap + ADP_USB3_CS_3, 1); 2223 if (ret) 2224 return ret; 2225 2226 scale &= ADP_USB3_CS_3_SCALE_MASK; 2227 2228 bw = val & ADP_USB3_CS_1_CUBW_MASK; 2229 *upstream_bw = usb3_bw_to_mbps(bw, scale); 2230 2231 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; 2232 *downstream_bw = usb3_bw_to_mbps(bw, scale); 2233 2234 return 0; 2235 } 2236 2237 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, 2238 int upstream_bw, 2239 int downstream_bw) 2240 { 2241 u32 val, ubw, dbw, scale; 2242 int ret, max_bw; 2243 2244 /* Figure out suitable scale */ 2245 scale = 0; 2246 max_bw = max(upstream_bw, downstream_bw); 2247 while (scale < 64) { 2248 if (mbps_to_usb3_bw(max_bw, scale) < 4096) 2249 break; 2250 scale++; 2251 } 2252 2253 if (WARN_ON(scale >= 64)) 2254 return -EINVAL; 2255 2256 ret = tb_port_write(port, &scale, TB_CFG_PORT, 2257 port->cap_adap + ADP_USB3_CS_3, 1); 2258 if (ret) 2259 return ret; 2260 2261 ubw = mbps_to_usb3_bw(upstream_bw, scale); 2262 dbw = mbps_to_usb3_bw(downstream_bw, scale); 2263 2264 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale); 2265 2266 ret = tb_port_read(port, &val, TB_CFG_PORT, 2267 port->cap_adap + ADP_USB3_CS_2, 1); 2268 if (ret) 2269 return ret; 2270 2271 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); 2272 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; 2273 val |= ubw; 2274 2275 return tb_port_write(port, &val, TB_CFG_PORT, 2276 port->cap_adap + ADP_USB3_CS_2, 1); 2277 } 2278 2279 /** 2280 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 2281 * @port: USB3 adapter port 2282 * @upstream_bw: New upstream bandwidth 2283 * @downstream_bw: New downstream bandwidth 2284 * 2285 * This can be used to set how much bandwidth is allocated for the USB3 2286 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the 2287 * new values programmed to the USB3 adapter allocation registers. If 2288 * the values are lower than what is currently consumed the allocation 2289 * is set to what is currently consumed instead (consumed bandwidth 2290 * cannot be taken away by CM). The actual new values are returned in 2291 * @upstream_bw and @downstream_bw. 2292 * 2293 * Returns %0 in case of success and negative errno if there was a 2294 * failure. 2295 */ 2296 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, 2297 int *downstream_bw) 2298 { 2299 int ret, consumed_up, consumed_down, allocate_up, allocate_down; 2300 2301 ret = usb4_usb3_port_set_cm_request(port); 2302 if (ret) 2303 return ret; 2304 2305 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2306 &consumed_down); 2307 if (ret) 2308 goto err_request; 2309 2310 /* Don't allow it go lower than what is consumed */ 2311 allocate_up = max(*upstream_bw, consumed_up); 2312 allocate_down = max(*downstream_bw, consumed_down); 2313 2314 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, 2315 allocate_down); 2316 if (ret) 2317 goto err_request; 2318 2319 *upstream_bw = allocate_up; 2320 *downstream_bw = allocate_down; 2321 2322 err_request: 2323 usb4_usb3_port_clear_cm_request(port); 2324 return ret; 2325 } 2326 2327 /** 2328 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth 2329 * @port: USB3 adapter port 2330 * @upstream_bw: New allocated upstream bandwidth 2331 * @downstream_bw: New allocated downstream bandwidth 2332 * 2333 * Releases USB3 allocated bandwidth down to what is actually consumed. 2334 * The new bandwidth is returned in @upstream_bw and @downstream_bw. 2335 * 2336 * Returns 0% in success and negative errno in case of failure. 2337 */ 2338 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, 2339 int *downstream_bw) 2340 { 2341 int ret, consumed_up, consumed_down; 2342 2343 ret = usb4_usb3_port_set_cm_request(port); 2344 if (ret) 2345 return ret; 2346 2347 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, 2348 &consumed_down); 2349 if (ret) 2350 goto err_request; 2351 2352 /* 2353 * Always keep 900 Mb/s to make sure xHCI has at least some 2354 * bandwidth available for isochronous traffic. 2355 */ 2356 if (consumed_up < 900) 2357 consumed_up = 900; 2358 if (consumed_down < 900) 2359 consumed_down = 900; 2360 2361 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, 2362 consumed_down); 2363 if (ret) 2364 goto err_request; 2365 2366 *upstream_bw = consumed_up; 2367 *downstream_bw = consumed_down; 2368 2369 err_request: 2370 usb4_usb3_port_clear_cm_request(port); 2371 return ret; 2372 } 2373 2374 static bool is_usb4_dpin(const struct tb_port *port) 2375 { 2376 if (!tb_port_is_dpin(port)) 2377 return false; 2378 if (!tb_switch_is_usb4(port->sw)) 2379 return false; 2380 return true; 2381 } 2382 2383 /** 2384 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter 2385 * @port: DP IN adapter 2386 * @cm_id: CM ID to assign 2387 * 2388 * Sets CM ID for the @port. Returns %0 on success and negative errno 2389 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not 2390 * support this. 2391 */ 2392 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id) 2393 { 2394 u32 val; 2395 int ret; 2396 2397 if (!is_usb4_dpin(port)) 2398 return -EOPNOTSUPP; 2399 2400 ret = tb_port_read(port, &val, TB_CFG_PORT, 2401 port->cap_adap + ADP_DP_CS_2, 1); 2402 if (ret) 2403 return ret; 2404 2405 val &= ~ADP_DP_CS_2_CM_ID_MASK; 2406 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT; 2407 2408 return tb_port_write(port, &val, TB_CFG_PORT, 2409 port->cap_adap + ADP_DP_CS_2, 1); 2410 } 2411 2412 /** 2413 * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode 2414 * supported 2415 * @port: DP IN adapter to check 2416 * 2417 * Can be called to any DP IN adapter. Returns true if the adapter 2418 * supports USB4 bandwidth allocation mode, false otherwise. 2419 */ 2420 bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port) 2421 { 2422 int ret; 2423 u32 val; 2424 2425 if (!is_usb4_dpin(port)) 2426 return false; 2427 2428 ret = tb_port_read(port, &val, TB_CFG_PORT, 2429 port->cap_adap + DP_LOCAL_CAP, 1); 2430 if (ret) 2431 return false; 2432 2433 return !!(val & DP_COMMON_CAP_BW_MODE); 2434 } 2435 2436 /** 2437 * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode 2438 * enabled 2439 * @port: DP IN adapter to check 2440 * 2441 * Can be called to any DP IN adapter. Returns true if the bandwidth 2442 * allocation mode has been enabled, false otherwise. 2443 */ 2444 bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port) 2445 { 2446 int ret; 2447 u32 val; 2448 2449 if (!is_usb4_dpin(port)) 2450 return false; 2451 2452 ret = tb_port_read(port, &val, TB_CFG_PORT, 2453 port->cap_adap + ADP_DP_CS_8, 1); 2454 if (ret) 2455 return false; 2456 2457 return !!(val & ADP_DP_CS_8_DPME); 2458 } 2459 2460 /** 2461 * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for 2462 * bandwidth allocation mode 2463 * @port: DP IN adapter 2464 * @supported: Does the CM support bandwidth allocation mode 2465 * 2466 * Can be called to any DP IN adapter. Sets or clears the CM support bit 2467 * of the DP IN adapter. Returns %0 in success and negative errno 2468 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter 2469 * does not support this. 2470 */ 2471 int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port, 2472 bool supported) 2473 { 2474 u32 val; 2475 int ret; 2476 2477 if (!is_usb4_dpin(port)) 2478 return -EOPNOTSUPP; 2479 2480 ret = tb_port_read(port, &val, TB_CFG_PORT, 2481 port->cap_adap + ADP_DP_CS_2, 1); 2482 if (ret) 2483 return ret; 2484 2485 if (supported) 2486 val |= ADP_DP_CS_2_CMMS; 2487 else 2488 val &= ~ADP_DP_CS_2_CMMS; 2489 2490 return tb_port_write(port, &val, TB_CFG_PORT, 2491 port->cap_adap + ADP_DP_CS_2, 1); 2492 } 2493 2494 /** 2495 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter 2496 * @port: DP IN adapter 2497 * 2498 * Reads bandwidth allocation Group ID from the DP IN adapter and 2499 * returns it. If the adapter does not support setting Group_ID 2500 * %-EOPNOTSUPP is returned. 2501 */ 2502 int usb4_dp_port_group_id(struct tb_port *port) 2503 { 2504 u32 val; 2505 int ret; 2506 2507 if (!is_usb4_dpin(port)) 2508 return -EOPNOTSUPP; 2509 2510 ret = tb_port_read(port, &val, TB_CFG_PORT, 2511 port->cap_adap + ADP_DP_CS_2, 1); 2512 if (ret) 2513 return ret; 2514 2515 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT; 2516 } 2517 2518 /** 2519 * usb4_dp_port_set_group_id() - Set adapter Group ID 2520 * @port: DP IN adapter 2521 * @group_id: Group ID for the adapter 2522 * 2523 * Sets bandwidth allocation mode Group ID for the DP IN adapter. 2524 * Returns %0 in case of success and negative errno otherwise. 2525 * Specifically returns %-EOPNOTSUPP if the adapter does not support 2526 * this. 2527 */ 2528 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id) 2529 { 2530 u32 val; 2531 int ret; 2532 2533 if (!is_usb4_dpin(port)) 2534 return -EOPNOTSUPP; 2535 2536 ret = tb_port_read(port, &val, TB_CFG_PORT, 2537 port->cap_adap + ADP_DP_CS_2, 1); 2538 if (ret) 2539 return ret; 2540 2541 val &= ~ADP_DP_CS_2_GROUP_ID_MASK; 2542 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT; 2543 2544 return tb_port_write(port, &val, TB_CFG_PORT, 2545 port->cap_adap + ADP_DP_CS_2, 1); 2546 } 2547 2548 /** 2549 * usb4_dp_port_nrd() - Read non-reduced rate and lanes 2550 * @port: DP IN adapter 2551 * @rate: Non-reduced rate in Mb/s is placed here 2552 * @lanes: Non-reduced lanes are placed here 2553 * 2554 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns 2555 * %0 in success and negative errno otherwise. Specifically returns 2556 * %-EOPNOTSUPP if the adapter does not support this. 2557 */ 2558 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes) 2559 { 2560 u32 val, tmp; 2561 int ret; 2562 2563 if (!is_usb4_dpin(port)) 2564 return -EOPNOTSUPP; 2565 2566 ret = tb_port_read(port, &val, TB_CFG_PORT, 2567 port->cap_adap + ADP_DP_CS_2, 1); 2568 if (ret) 2569 return ret; 2570 2571 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT; 2572 switch (tmp) { 2573 case DP_COMMON_CAP_RATE_RBR: 2574 *rate = 1620; 2575 break; 2576 case DP_COMMON_CAP_RATE_HBR: 2577 *rate = 2700; 2578 break; 2579 case DP_COMMON_CAP_RATE_HBR2: 2580 *rate = 5400; 2581 break; 2582 case DP_COMMON_CAP_RATE_HBR3: 2583 *rate = 8100; 2584 break; 2585 } 2586 2587 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK; 2588 switch (tmp) { 2589 case DP_COMMON_CAP_1_LANE: 2590 *lanes = 1; 2591 break; 2592 case DP_COMMON_CAP_2_LANES: 2593 *lanes = 2; 2594 break; 2595 case DP_COMMON_CAP_4_LANES: 2596 *lanes = 4; 2597 break; 2598 } 2599 2600 return 0; 2601 } 2602 2603 /** 2604 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes 2605 * @port: DP IN adapter 2606 * @rate: Non-reduced rate in Mb/s 2607 * @lanes: Non-reduced lanes 2608 * 2609 * Before the capabilities reduction this function can be used to set 2610 * the non-reduced values for the DP IN adapter. Returns %0 in success 2611 * and negative errno otherwise. If the adapter does not support this 2612 * %-EOPNOTSUPP is returned. 2613 */ 2614 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes) 2615 { 2616 u32 val; 2617 int ret; 2618 2619 if (!is_usb4_dpin(port)) 2620 return -EOPNOTSUPP; 2621 2622 ret = tb_port_read(port, &val, TB_CFG_PORT, 2623 port->cap_adap + ADP_DP_CS_2, 1); 2624 if (ret) 2625 return ret; 2626 2627 val &= ~ADP_DP_CS_2_NRD_MLR_MASK; 2628 2629 switch (rate) { 2630 case 1620: 2631 break; 2632 case 2700: 2633 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT) 2634 & ADP_DP_CS_2_NRD_MLR_MASK; 2635 break; 2636 case 5400: 2637 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2638 & ADP_DP_CS_2_NRD_MLR_MASK; 2639 break; 2640 case 8100: 2641 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT) 2642 & ADP_DP_CS_2_NRD_MLR_MASK; 2643 break; 2644 default: 2645 return -EINVAL; 2646 } 2647 2648 val &= ~ADP_DP_CS_2_NRD_MLC_MASK; 2649 2650 switch (lanes) { 2651 case 1: 2652 break; 2653 case 2: 2654 val |= DP_COMMON_CAP_2_LANES; 2655 break; 2656 case 4: 2657 val |= DP_COMMON_CAP_4_LANES; 2658 break; 2659 default: 2660 return -EINVAL; 2661 } 2662 2663 return tb_port_write(port, &val, TB_CFG_PORT, 2664 port->cap_adap + ADP_DP_CS_2, 1); 2665 } 2666 2667 /** 2668 * usb4_dp_port_granularity() - Return granularity for the bandwidth values 2669 * @port: DP IN adapter 2670 * 2671 * Reads the programmed granularity from @port. If the DP IN adapter does 2672 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative 2673 * errno in other error cases. 2674 */ 2675 int usb4_dp_port_granularity(struct tb_port *port) 2676 { 2677 u32 val; 2678 int ret; 2679 2680 if (!is_usb4_dpin(port)) 2681 return -EOPNOTSUPP; 2682 2683 ret = tb_port_read(port, &val, TB_CFG_PORT, 2684 port->cap_adap + ADP_DP_CS_2, 1); 2685 if (ret) 2686 return ret; 2687 2688 val &= ADP_DP_CS_2_GR_MASK; 2689 val >>= ADP_DP_CS_2_GR_SHIFT; 2690 2691 switch (val) { 2692 case ADP_DP_CS_2_GR_0_25G: 2693 return 250; 2694 case ADP_DP_CS_2_GR_0_5G: 2695 return 500; 2696 case ADP_DP_CS_2_GR_1G: 2697 return 1000; 2698 } 2699 2700 return -EINVAL; 2701 } 2702 2703 /** 2704 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values 2705 * @port: DP IN adapter 2706 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250. 2707 * 2708 * Sets the granularity used with the estimated, allocated and requested 2709 * bandwidth. Returns %0 in success and negative errno otherwise. If the 2710 * adapter does not support this %-EOPNOTSUPP is returned. 2711 */ 2712 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity) 2713 { 2714 u32 val; 2715 int ret; 2716 2717 if (!is_usb4_dpin(port)) 2718 return -EOPNOTSUPP; 2719 2720 ret = tb_port_read(port, &val, TB_CFG_PORT, 2721 port->cap_adap + ADP_DP_CS_2, 1); 2722 if (ret) 2723 return ret; 2724 2725 val &= ~ADP_DP_CS_2_GR_MASK; 2726 2727 switch (granularity) { 2728 case 250: 2729 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT; 2730 break; 2731 case 500: 2732 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT; 2733 break; 2734 case 1000: 2735 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT; 2736 break; 2737 default: 2738 return -EINVAL; 2739 } 2740 2741 return tb_port_write(port, &val, TB_CFG_PORT, 2742 port->cap_adap + ADP_DP_CS_2, 1); 2743 } 2744 2745 /** 2746 * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth 2747 * @port: DP IN adapter 2748 * @bw: Estimated bandwidth in Mb/s. 2749 * 2750 * Sets the estimated bandwidth to @bw. Set the granularity by calling 2751 * usb4_dp_port_set_granularity() before calling this. The @bw is round 2752 * down to the closest granularity multiplier. Returns %0 in success 2753 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if 2754 * the adapter does not support this. 2755 */ 2756 int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw) 2757 { 2758 u32 val, granularity; 2759 int ret; 2760 2761 if (!is_usb4_dpin(port)) 2762 return -EOPNOTSUPP; 2763 2764 ret = usb4_dp_port_granularity(port); 2765 if (ret < 0) 2766 return ret; 2767 granularity = ret; 2768 2769 ret = tb_port_read(port, &val, TB_CFG_PORT, 2770 port->cap_adap + ADP_DP_CS_2, 1); 2771 if (ret) 2772 return ret; 2773 2774 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK; 2775 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT; 2776 2777 return tb_port_write(port, &val, TB_CFG_PORT, 2778 port->cap_adap + ADP_DP_CS_2, 1); 2779 } 2780 2781 /** 2782 * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth 2783 * @port: DP IN adapter 2784 * 2785 * Reads and returns allocated bandwidth for @port in Mb/s (taking into 2786 * account the programmed granularity). Returns negative errno in case 2787 * of error. 2788 */ 2789 int usb4_dp_port_allocated_bandwidth(struct tb_port *port) 2790 { 2791 u32 val, granularity; 2792 int ret; 2793 2794 if (!is_usb4_dpin(port)) 2795 return -EOPNOTSUPP; 2796 2797 ret = usb4_dp_port_granularity(port); 2798 if (ret < 0) 2799 return ret; 2800 granularity = ret; 2801 2802 ret = tb_port_read(port, &val, TB_CFG_PORT, 2803 port->cap_adap + DP_STATUS, 1); 2804 if (ret) 2805 return ret; 2806 2807 val &= DP_STATUS_ALLOCATED_BW_MASK; 2808 val >>= DP_STATUS_ALLOCATED_BW_SHIFT; 2809 2810 return val * granularity; 2811 } 2812 2813 static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack) 2814 { 2815 u32 val; 2816 int ret; 2817 2818 ret = tb_port_read(port, &val, TB_CFG_PORT, 2819 port->cap_adap + ADP_DP_CS_2, 1); 2820 if (ret) 2821 return ret; 2822 2823 if (ack) 2824 val |= ADP_DP_CS_2_CA; 2825 else 2826 val &= ~ADP_DP_CS_2_CA; 2827 2828 return tb_port_write(port, &val, TB_CFG_PORT, 2829 port->cap_adap + ADP_DP_CS_2, 1); 2830 } 2831 2832 static inline int usb4_dp_port_set_cm_ack(struct tb_port *port) 2833 { 2834 return __usb4_dp_port_set_cm_ack(port, true); 2835 } 2836 2837 static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port, 2838 int timeout_msec) 2839 { 2840 ktime_t end; 2841 u32 val; 2842 int ret; 2843 2844 ret = __usb4_dp_port_set_cm_ack(port, false); 2845 if (ret) 2846 return ret; 2847 2848 end = ktime_add_ms(ktime_get(), timeout_msec); 2849 do { 2850 ret = tb_port_read(port, &val, TB_CFG_PORT, 2851 port->cap_adap + ADP_DP_CS_8, 1); 2852 if (ret) 2853 return ret; 2854 2855 if (!(val & ADP_DP_CS_8_DR)) 2856 break; 2857 2858 usleep_range(50, 100); 2859 } while (ktime_before(ktime_get(), end)); 2860 2861 if (val & ADP_DP_CS_8_DR) { 2862 tb_port_warn(port, "timeout waiting for DPTX request to clear\n"); 2863 return -ETIMEDOUT; 2864 } 2865 2866 ret = tb_port_read(port, &val, TB_CFG_PORT, 2867 port->cap_adap + ADP_DP_CS_2, 1); 2868 if (ret) 2869 return ret; 2870 2871 val &= ~ADP_DP_CS_2_CA; 2872 return tb_port_write(port, &val, TB_CFG_PORT, 2873 port->cap_adap + ADP_DP_CS_2, 1); 2874 } 2875 2876 /** 2877 * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth 2878 * @port: DP IN adapter 2879 * @bw: New allocated bandwidth in Mb/s 2880 * 2881 * Communicates the new allocated bandwidth with the DPCD (graphics 2882 * driver). Takes into account the programmed granularity. Returns %0 in 2883 * success and negative errno in case of error. 2884 */ 2885 int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw) 2886 { 2887 u32 val, granularity; 2888 int ret; 2889 2890 if (!is_usb4_dpin(port)) 2891 return -EOPNOTSUPP; 2892 2893 ret = usb4_dp_port_granularity(port); 2894 if (ret < 0) 2895 return ret; 2896 granularity = ret; 2897 2898 ret = tb_port_read(port, &val, TB_CFG_PORT, 2899 port->cap_adap + DP_STATUS, 1); 2900 if (ret) 2901 return ret; 2902 2903 val &= ~DP_STATUS_ALLOCATED_BW_MASK; 2904 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT; 2905 2906 ret = tb_port_write(port, &val, TB_CFG_PORT, 2907 port->cap_adap + DP_STATUS, 1); 2908 if (ret) 2909 return ret; 2910 2911 ret = usb4_dp_port_set_cm_ack(port); 2912 if (ret) 2913 return ret; 2914 2915 return usb4_dp_port_wait_and_clear_cm_ack(port, 500); 2916 } 2917 2918 /** 2919 * usb4_dp_port_requested_bandwidth() - Read requested bandwidth 2920 * @port: DP IN adapter 2921 * 2922 * Reads the DPCD (graphics driver) requested bandwidth and returns it 2923 * in Mb/s. Takes the programmed granularity into account. In case of 2924 * error returns negative errno. Specifically returns %-EOPNOTSUPP if 2925 * the adapter does not support bandwidth allocation mode, and %ENODATA 2926 * if there is no active bandwidth request from the graphics driver. 2927 */ 2928 int usb4_dp_port_requested_bandwidth(struct tb_port *port) 2929 { 2930 u32 val, granularity; 2931 int ret; 2932 2933 if (!is_usb4_dpin(port)) 2934 return -EOPNOTSUPP; 2935 2936 ret = usb4_dp_port_granularity(port); 2937 if (ret < 0) 2938 return ret; 2939 granularity = ret; 2940 2941 ret = tb_port_read(port, &val, TB_CFG_PORT, 2942 port->cap_adap + ADP_DP_CS_8, 1); 2943 if (ret) 2944 return ret; 2945 2946 if (!(val & ADP_DP_CS_8_DR)) 2947 return -ENODATA; 2948 2949 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity; 2950 } 2951 2952 /** 2953 * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation 2954 * @port: PCIe adapter 2955 * @enable: Enable/disable extended encapsulation 2956 * 2957 * Enables or disables extended encapsulation used in PCIe tunneling. Caller 2958 * needs to make sure both adapters support this before enabling. Returns %0 on 2959 * success and negative errno otherwise. 2960 */ 2961 int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable) 2962 { 2963 u32 val; 2964 int ret; 2965 2966 if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port)) 2967 return -EINVAL; 2968 2969 ret = tb_port_read(port, &val, TB_CFG_PORT, 2970 port->cap_adap + ADP_PCIE_CS_1, 1); 2971 if (ret) 2972 return ret; 2973 2974 if (enable) 2975 val |= ADP_PCIE_CS_1_EE; 2976 else 2977 val &= ~ADP_PCIE_CS_1_EE; 2978 2979 return tb_port_write(port, &val, TB_CFG_PORT, 2980 port->cap_adap + ADP_PCIE_CS_1, 1); 2981 } 2982