1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * USB4 specific functionality
4 *
5 * Copyright (C) 2019, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rajmohan Mani <rajmohan.mani@intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/ktime.h>
12 #include <linux/units.h>
13
14 #include "sb_regs.h"
15 #include "tb.h"
16
17 #define USB4_DATA_RETRIES 3
18 #define USB4_DATA_DWORDS 16
19
20 #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
21 #define USB4_NVM_READ_OFFSET_SHIFT 2
22 #define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
23 #define USB4_NVM_READ_LENGTH_SHIFT 24
24
25 #define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
26 #define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
27
28 #define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
29 #define USB4_DROM_ADDRESS_SHIFT 2
30 #define USB4_DROM_SIZE_MASK GENMASK(19, 15)
31 #define USB4_DROM_SIZE_SHIFT 15
32
33 #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
34
35 #define USB4_BA_LENGTH_MASK GENMASK(7, 0)
36 #define USB4_BA_INDEX_MASK GENMASK(15, 0)
37
38 enum usb4_ba_index {
39 USB4_BA_MAX_USB3 = 0x1,
40 USB4_BA_MIN_DP_AUX = 0x2,
41 USB4_BA_MIN_DP_MAIN = 0x3,
42 USB4_BA_MAX_PCIE = 0x4,
43 USB4_BA_MAX_HI = 0x5,
44 };
45
46 #define USB4_BA_VALUE_MASK GENMASK(31, 16)
47 #define USB4_BA_VALUE_SHIFT 16
48
49 /* Delays in us used with usb4_port_wait_for_bit() */
50 #define USB4_PORT_DELAY 50
51 #define USB4_PORT_SB_DELAY 1000
52
usb4_native_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)53 static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
54 u32 *metadata, u8 *status,
55 const void *tx_data, size_t tx_dwords,
56 void *rx_data, size_t rx_dwords)
57 {
58 u32 val;
59 int ret;
60
61 if (metadata) {
62 ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
63 if (ret)
64 return ret;
65 }
66 if (tx_dwords) {
67 ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
68 tx_dwords);
69 if (ret)
70 return ret;
71 }
72
73 val = opcode | ROUTER_CS_26_OV;
74 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
75 if (ret)
76 return ret;
77
78 ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
79 if (ret)
80 return ret;
81
82 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
83 if (ret)
84 return ret;
85
86 if (val & ROUTER_CS_26_ONS)
87 return -EOPNOTSUPP;
88
89 if (status)
90 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
91 ROUTER_CS_26_STATUS_SHIFT;
92
93 if (metadata) {
94 ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
95 if (ret)
96 return ret;
97 }
98 if (rx_dwords) {
99 ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
100 rx_dwords);
101 if (ret)
102 return ret;
103 }
104
105 return 0;
106 }
107
__usb4_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)108 static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
109 u8 *status, const void *tx_data, size_t tx_dwords,
110 void *rx_data, size_t rx_dwords)
111 {
112 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
113
114 if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
115 return -EINVAL;
116
117 /*
118 * If the connection manager implementation provides USB4 router
119 * operation proxy callback, call it here instead of running the
120 * operation natively.
121 */
122 if (cm_ops->usb4_switch_op) {
123 int ret;
124
125 ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
126 tx_data, tx_dwords, rx_data,
127 rx_dwords);
128 if (ret != -EOPNOTSUPP)
129 return ret;
130
131 /*
132 * If the proxy was not supported then run the native
133 * router operation instead.
134 */
135 }
136
137 return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
138 tx_dwords, rx_data, rx_dwords);
139 }
140
usb4_switch_op(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status)141 static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
142 u32 *metadata, u8 *status)
143 {
144 return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
145 }
146
usb4_switch_op_data(struct tb_switch * sw,u16 opcode,u32 * metadata,u8 * status,const void * tx_data,size_t tx_dwords,void * rx_data,size_t rx_dwords)147 static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
148 u32 *metadata, u8 *status,
149 const void *tx_data, size_t tx_dwords,
150 void *rx_data, size_t rx_dwords)
151 {
152 return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
153 tx_dwords, rx_data, rx_dwords);
154 }
155
156 /**
157 * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
158 * @sw: Router whose wakes to check
159 *
160 * Checks wakes occurred during suspend and notify the PM core about them.
161 */
usb4_switch_check_wakes(struct tb_switch * sw)162 void usb4_switch_check_wakes(struct tb_switch *sw)
163 {
164 bool wakeup_usb4 = false;
165 struct usb4_port *usb4;
166 struct tb_port *port;
167 bool wakeup = false;
168 u32 val;
169
170 if (tb_route(sw)) {
171 if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
172 return;
173
174 tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
175 (val & ROUTER_CS_6_WOPS) ? "yes" : "no",
176 (val & ROUTER_CS_6_WOUS) ? "yes" : "no");
177
178 wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
179 }
180
181 /*
182 * Check for any downstream ports for USB4 wake,
183 * connection wake and disconnection wake.
184 */
185 tb_switch_for_each_port(sw, port) {
186 if (!port->cap_usb4)
187 continue;
188
189 if (tb_port_read(port, &val, TB_CFG_PORT,
190 port->cap_usb4 + PORT_CS_18, 1))
191 break;
192
193 tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
194 (val & PORT_CS_18_WOU4S) ? "yes" : "no",
195 (val & PORT_CS_18_WOCS) ? "yes" : "no",
196 (val & PORT_CS_18_WODS) ? "yes" : "no");
197
198 wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
199 PORT_CS_18_WODS);
200
201 usb4 = port->usb4;
202 if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
203 pm_wakeup_event(&usb4->dev, 0);
204
205 wakeup |= wakeup_usb4;
206 }
207
208 if (wakeup)
209 pm_wakeup_event(&sw->dev, 0);
210 }
211
link_is_usb4(struct tb_port * port)212 static bool link_is_usb4(struct tb_port *port)
213 {
214 u32 val;
215
216 if (!port->cap_usb4)
217 return false;
218
219 if (tb_port_read(port, &val, TB_CFG_PORT,
220 port->cap_usb4 + PORT_CS_18, 1))
221 return false;
222
223 return !(val & PORT_CS_18_TCM);
224 }
225
226 /**
227 * usb4_switch_setup() - Additional setup for USB4 device
228 * @sw: USB4 router to setup
229 *
230 * USB4 routers need additional settings in order to enable all the
231 * tunneling. This function enables USB and PCIe tunneling if it can be
232 * enabled (e.g the parent switch also supports them). If USB tunneling
233 * is not available for some reason (like that there is Thunderbolt 3
234 * switch upstream) then the internal xHCI controller is enabled
235 * instead.
236 *
237 * This does not set the configuration valid bit of the router. To do
238 * that call usb4_switch_configuration_valid().
239 */
usb4_switch_setup(struct tb_switch * sw)240 int usb4_switch_setup(struct tb_switch *sw)
241 {
242 struct tb_switch *parent = tb_switch_parent(sw);
243 struct tb_port *down;
244 bool tbt3, xhci;
245 u32 val = 0;
246 int ret;
247
248 if (!tb_route(sw))
249 return 0;
250
251 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
252 if (ret)
253 return ret;
254
255 down = tb_switch_downstream_port(sw);
256 sw->link_usb4 = link_is_usb4(down);
257 tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
258
259 xhci = val & ROUTER_CS_6_HCI;
260 tbt3 = !(val & ROUTER_CS_6_TNS);
261
262 tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
263 tbt3 ? "yes" : "no", xhci ? "yes" : "no");
264
265 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
266 if (ret)
267 return ret;
268
269 if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
270 tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
271 val |= ROUTER_CS_5_UTO;
272 xhci = false;
273 }
274
275 /*
276 * Only enable PCIe tunneling if the parent router supports it
277 * and it is not disabled.
278 */
279 if (tb_acpi_may_tunnel_pcie() &&
280 tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
281 val |= ROUTER_CS_5_PTO;
282 /*
283 * xHCI can be enabled if PCIe tunneling is supported
284 * and the parent does not have any USB3 dowstream
285 * adapters (so we cannot do USB 3.x tunneling).
286 */
287 if (xhci)
288 val |= ROUTER_CS_5_HCO;
289 }
290
291 /* TBT3 supported by the CM */
292 val &= ~ROUTER_CS_5_CNS;
293
294 return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
295 }
296
297 /**
298 * usb4_switch_configuration_valid() - Set tunneling configuration to be valid
299 * @sw: USB4 router
300 *
301 * Sets configuration valid bit for the router. Must be called before
302 * any tunnels can be set through the router and after
303 * usb4_switch_setup() has been called. Can be called to host and device
304 * routers (does nothing for the latter).
305 *
306 * Returns %0 in success and negative errno otherwise.
307 */
usb4_switch_configuration_valid(struct tb_switch * sw)308 int usb4_switch_configuration_valid(struct tb_switch *sw)
309 {
310 u32 val;
311 int ret;
312
313 if (!tb_route(sw))
314 return 0;
315
316 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
317 if (ret)
318 return ret;
319
320 val |= ROUTER_CS_5_CV;
321
322 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
323 if (ret)
324 return ret;
325
326 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
327 ROUTER_CS_6_CR, 50);
328 }
329
330 /**
331 * usb4_switch_read_uid() - Read UID from USB4 router
332 * @sw: USB4 router
333 * @uid: UID is stored here
334 *
335 * Reads 64-bit UID from USB4 router config space.
336 */
usb4_switch_read_uid(struct tb_switch * sw,u64 * uid)337 int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
338 {
339 return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
340 }
341
usb4_switch_drom_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)342 static int usb4_switch_drom_read_block(void *data,
343 unsigned int dwaddress, void *buf,
344 size_t dwords)
345 {
346 struct tb_switch *sw = data;
347 u8 status = 0;
348 u32 metadata;
349 int ret;
350
351 metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
352 metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
353 USB4_DROM_ADDRESS_MASK;
354
355 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
356 &status, NULL, 0, buf, dwords);
357 if (ret)
358 return ret;
359
360 return status ? -EIO : 0;
361 }
362
363 /**
364 * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
365 * @sw: USB4 router
366 * @address: Byte address inside DROM to start reading
367 * @buf: Buffer where the DROM content is stored
368 * @size: Number of bytes to read from DROM
369 *
370 * Uses USB4 router operations to read router DROM. For devices this
371 * should always work but for hosts it may return %-EOPNOTSUPP in which
372 * case the host router does not have DROM.
373 */
usb4_switch_drom_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)374 int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
375 size_t size)
376 {
377 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
378 usb4_switch_drom_read_block, sw);
379 }
380
381 /**
382 * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
383 * @sw: USB4 router
384 *
385 * Checks whether conditions are met so that lane bonding can be
386 * established with the upstream router. Call only for device routers.
387 */
usb4_switch_lane_bonding_possible(struct tb_switch * sw)388 bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
389 {
390 struct tb_port *up;
391 int ret;
392 u32 val;
393
394 up = tb_upstream_port(sw);
395 ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
396 if (ret)
397 return false;
398
399 return !!(val & PORT_CS_18_BE);
400 }
401
402 /**
403 * usb4_switch_set_wake() - Enabled/disable wake
404 * @sw: USB4 router
405 * @flags: Wakeup flags (%0 to disable)
406 * @runtime: Wake is being programmed during system runtime
407 *
408 * Enables/disables router to wake up from sleep.
409 */
usb4_switch_set_wake(struct tb_switch * sw,unsigned int flags,bool runtime)410 int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
411 {
412 struct tb_port *port;
413 u64 route = tb_route(sw);
414 u32 val;
415 int ret;
416
417 /*
418 * Enable wakes coming from all USB4 downstream ports (from
419 * child routers). For device routers do this also for the
420 * upstream USB4 port.
421 */
422 tb_switch_for_each_port(sw, port) {
423 if (!tb_port_is_null(port))
424 continue;
425 if (!route && tb_is_upstream_port(port))
426 continue;
427 if (!port->cap_usb4)
428 continue;
429
430 ret = tb_port_read(port, &val, TB_CFG_PORT,
431 port->cap_usb4 + PORT_CS_19, 1);
432 if (ret)
433 return ret;
434
435 val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
436
437 if (tb_is_upstream_port(port)) {
438 val |= PORT_CS_19_WOU4;
439 } else {
440 bool configured = val & PORT_CS_19_PC;
441 bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
442
443 if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
444 val |= PORT_CS_19_WOC;
445 if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
446 val |= PORT_CS_19_WOD;
447 if ((flags & TB_WAKE_ON_USB4) && configured)
448 val |= PORT_CS_19_WOU4;
449 }
450
451 ret = tb_port_write(port, &val, TB_CFG_PORT,
452 port->cap_usb4 + PORT_CS_19, 1);
453 if (ret)
454 return ret;
455 }
456
457 /*
458 * Enable wakes from PCIe, USB 3.x and DP on this router. Only
459 * needed for device routers.
460 */
461 if (route) {
462 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
463 if (ret)
464 return ret;
465
466 val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
467 if (flags & TB_WAKE_ON_USB3)
468 val |= ROUTER_CS_5_WOU;
469 if (flags & TB_WAKE_ON_PCIE)
470 val |= ROUTER_CS_5_WOP;
471 if (flags & TB_WAKE_ON_DP)
472 val |= ROUTER_CS_5_WOD;
473
474 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
475 if (ret)
476 return ret;
477 }
478
479 return 0;
480 }
481
482 /**
483 * usb4_switch_set_sleep() - Prepare the router to enter sleep
484 * @sw: USB4 router
485 *
486 * Sets sleep bit for the router. Returns when the router sleep ready
487 * bit has been asserted.
488 */
usb4_switch_set_sleep(struct tb_switch * sw)489 int usb4_switch_set_sleep(struct tb_switch *sw)
490 {
491 int ret;
492 u32 val;
493
494 /* Set sleep bit and wait for sleep ready to be asserted */
495 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
496 if (ret)
497 return ret;
498
499 val |= ROUTER_CS_5_SLP;
500
501 ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
502 if (ret)
503 return ret;
504
505 return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
506 ROUTER_CS_6_SLPR, 500);
507 }
508
509 /**
510 * usb4_switch_nvm_sector_size() - Return router NVM sector size
511 * @sw: USB4 router
512 *
513 * If the router supports NVM operations this function returns the NVM
514 * sector size in bytes. If NVM operations are not supported returns
515 * %-EOPNOTSUPP.
516 */
usb4_switch_nvm_sector_size(struct tb_switch * sw)517 int usb4_switch_nvm_sector_size(struct tb_switch *sw)
518 {
519 u32 metadata;
520 u8 status;
521 int ret;
522
523 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
524 &status);
525 if (ret)
526 return ret;
527
528 if (status)
529 return status == 0x2 ? -EOPNOTSUPP : -EIO;
530
531 return metadata & USB4_NVM_SECTOR_SIZE_MASK;
532 }
533
usb4_switch_nvm_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)534 static int usb4_switch_nvm_read_block(void *data,
535 unsigned int dwaddress, void *buf, size_t dwords)
536 {
537 struct tb_switch *sw = data;
538 u8 status = 0;
539 u32 metadata;
540 int ret;
541
542 metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
543 USB4_NVM_READ_LENGTH_MASK;
544 metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
545 USB4_NVM_READ_OFFSET_MASK;
546
547 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
548 &status, NULL, 0, buf, dwords);
549 if (ret)
550 return ret;
551
552 return status ? -EIO : 0;
553 }
554
555 /**
556 * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
557 * @sw: USB4 router
558 * @address: Starting address in bytes
559 * @buf: Read data is placed here
560 * @size: How many bytes to read
561 *
562 * Reads NVM contents of the router. If NVM is not supported returns
563 * %-EOPNOTSUPP.
564 */
usb4_switch_nvm_read(struct tb_switch * sw,unsigned int address,void * buf,size_t size)565 int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
566 size_t size)
567 {
568 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
569 usb4_switch_nvm_read_block, sw);
570 }
571
572 /**
573 * usb4_switch_nvm_set_offset() - Set NVM write offset
574 * @sw: USB4 router
575 * @address: Start offset
576 *
577 * Explicitly sets NVM write offset. Normally when writing to NVM this
578 * is done automatically by usb4_switch_nvm_write().
579 *
580 * Returns %0 in success and negative errno if there was a failure.
581 */
usb4_switch_nvm_set_offset(struct tb_switch * sw,unsigned int address)582 int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
583 {
584 u32 metadata, dwaddress;
585 u8 status = 0;
586 int ret;
587
588 dwaddress = address / 4;
589 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
590 USB4_NVM_SET_OFFSET_MASK;
591
592 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
593 &status);
594 if (ret)
595 return ret;
596
597 return status ? -EIO : 0;
598 }
599
usb4_switch_nvm_write_next_block(void * data,unsigned int dwaddress,const void * buf,size_t dwords)600 static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
601 const void *buf, size_t dwords)
602 {
603 struct tb_switch *sw = data;
604 u8 status;
605 int ret;
606
607 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
608 buf, dwords, NULL, 0);
609 if (ret)
610 return ret;
611
612 return status ? -EIO : 0;
613 }
614
615 /**
616 * usb4_switch_nvm_write() - Write to the router NVM
617 * @sw: USB4 router
618 * @address: Start address where to write in bytes
619 * @buf: Pointer to the data to write
620 * @size: Size of @buf in bytes
621 *
622 * Writes @buf to the router NVM using USB4 router operations. If NVM
623 * write is not supported returns %-EOPNOTSUPP.
624 */
usb4_switch_nvm_write(struct tb_switch * sw,unsigned int address,const void * buf,size_t size)625 int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
626 const void *buf, size_t size)
627 {
628 int ret;
629
630 ret = usb4_switch_nvm_set_offset(sw, address);
631 if (ret)
632 return ret;
633
634 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
635 usb4_switch_nvm_write_next_block, sw);
636 }
637
638 /**
639 * usb4_switch_nvm_authenticate() - Authenticate new NVM
640 * @sw: USB4 router
641 *
642 * After the new NVM has been written via usb4_switch_nvm_write(), this
643 * function triggers NVM authentication process. The router gets power
644 * cycled and if the authentication is successful the new NVM starts
645 * running. In case of failure returns negative errno.
646 *
647 * The caller should call usb4_switch_nvm_authenticate_status() to read
648 * the status of the authentication after power cycle. It should be the
649 * first router operation to avoid the status being lost.
650 */
usb4_switch_nvm_authenticate(struct tb_switch * sw)651 int usb4_switch_nvm_authenticate(struct tb_switch *sw)
652 {
653 int ret;
654
655 ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
656 switch (ret) {
657 /*
658 * The router is power cycled once NVM_AUTH is started so it is
659 * expected to get any of the following errors back.
660 */
661 case -EACCES:
662 case -ENOTCONN:
663 case -ETIMEDOUT:
664 return 0;
665
666 default:
667 return ret;
668 }
669 }
670
671 /**
672 * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
673 * @sw: USB4 router
674 * @status: Status code of the operation
675 *
676 * The function checks if there is status available from the last NVM
677 * authenticate router operation. If there is status then %0 is returned
678 * and the status code is placed in @status. Returns negative errno in case
679 * of failure.
680 *
681 * Must be called before any other router operation.
682 */
usb4_switch_nvm_authenticate_status(struct tb_switch * sw,u32 * status)683 int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
684 {
685 const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
686 u16 opcode;
687 u32 val;
688 int ret;
689
690 if (cm_ops->usb4_switch_nvm_authenticate_status) {
691 ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
692 if (ret != -EOPNOTSUPP)
693 return ret;
694 }
695
696 ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
697 if (ret)
698 return ret;
699
700 /* Check that the opcode is correct */
701 opcode = val & ROUTER_CS_26_OPCODE_MASK;
702 if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
703 if (val & ROUTER_CS_26_OV)
704 return -EBUSY;
705 if (val & ROUTER_CS_26_ONS)
706 return -EOPNOTSUPP;
707
708 *status = (val & ROUTER_CS_26_STATUS_MASK) >>
709 ROUTER_CS_26_STATUS_SHIFT;
710 } else {
711 *status = 0;
712 }
713
714 return 0;
715 }
716
717 /**
718 * usb4_switch_credits_init() - Read buffer allocation parameters
719 * @sw: USB4 router
720 *
721 * Reads @sw buffer allocation parameters and initializes @sw buffer
722 * allocation fields accordingly. Specifically @sw->credits_allocation
723 * is set to %true if these parameters can be used in tunneling.
724 *
725 * Returns %0 on success and negative errno otherwise.
726 */
usb4_switch_credits_init(struct tb_switch * sw)727 int usb4_switch_credits_init(struct tb_switch *sw)
728 {
729 int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
730 int ret, length, i, nports;
731 const struct tb_port *port;
732 u32 data[USB4_DATA_DWORDS];
733 u32 metadata = 0;
734 u8 status = 0;
735
736 memset(data, 0, sizeof(data));
737 ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
738 &status, NULL, 0, data, ARRAY_SIZE(data));
739 if (ret)
740 return ret;
741 if (status)
742 return -EIO;
743
744 length = metadata & USB4_BA_LENGTH_MASK;
745 if (WARN_ON(length > ARRAY_SIZE(data)))
746 return -EMSGSIZE;
747
748 max_usb3 = -1;
749 min_dp_aux = -1;
750 min_dp_main = -1;
751 max_pcie = -1;
752 max_dma = -1;
753
754 tb_sw_dbg(sw, "credit allocation parameters:\n");
755
756 for (i = 0; i < length; i++) {
757 u16 index, value;
758
759 index = data[i] & USB4_BA_INDEX_MASK;
760 value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
761
762 switch (index) {
763 case USB4_BA_MAX_USB3:
764 tb_sw_dbg(sw, " USB3: %u\n", value);
765 max_usb3 = value;
766 break;
767 case USB4_BA_MIN_DP_AUX:
768 tb_sw_dbg(sw, " DP AUX: %u\n", value);
769 min_dp_aux = value;
770 break;
771 case USB4_BA_MIN_DP_MAIN:
772 tb_sw_dbg(sw, " DP main: %u\n", value);
773 min_dp_main = value;
774 break;
775 case USB4_BA_MAX_PCIE:
776 tb_sw_dbg(sw, " PCIe: %u\n", value);
777 max_pcie = value;
778 break;
779 case USB4_BA_MAX_HI:
780 tb_sw_dbg(sw, " DMA: %u\n", value);
781 max_dma = value;
782 break;
783 default:
784 tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
785 index);
786 break;
787 }
788 }
789
790 /*
791 * Validate the buffer allocation preferences. If we find
792 * issues, log a warning and fall back using the hard-coded
793 * values.
794 */
795
796 /* Host router must report baMaxHI */
797 if (!tb_route(sw) && max_dma < 0) {
798 tb_sw_warn(sw, "host router is missing baMaxHI\n");
799 goto err_invalid;
800 }
801
802 nports = 0;
803 tb_switch_for_each_port(sw, port) {
804 if (tb_port_is_null(port))
805 nports++;
806 }
807
808 /* Must have DP buffer allocation (multiple USB4 ports) */
809 if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
810 tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
811 goto err_invalid;
812 }
813
814 tb_switch_for_each_port(sw, port) {
815 if (tb_port_is_dpout(port) && min_dp_main < 0) {
816 tb_sw_warn(sw, "missing baMinDPmain");
817 goto err_invalid;
818 }
819 if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
820 min_dp_aux < 0) {
821 tb_sw_warn(sw, "missing baMinDPaux");
822 goto err_invalid;
823 }
824 if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
825 max_usb3 < 0) {
826 tb_sw_warn(sw, "missing baMaxUSB3");
827 goto err_invalid;
828 }
829 if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
830 max_pcie < 0) {
831 tb_sw_warn(sw, "missing baMaxPCIe");
832 goto err_invalid;
833 }
834 }
835
836 /*
837 * Buffer allocation passed the validation so we can use it in
838 * path creation.
839 */
840 sw->credit_allocation = true;
841 if (max_usb3 > 0)
842 sw->max_usb3_credits = max_usb3;
843 if (min_dp_aux > 0)
844 sw->min_dp_aux_credits = min_dp_aux;
845 if (min_dp_main > 0)
846 sw->min_dp_main_credits = min_dp_main;
847 if (max_pcie > 0)
848 sw->max_pcie_credits = max_pcie;
849 if (max_dma > 0)
850 sw->max_dma_credits = max_dma;
851
852 return 0;
853
854 err_invalid:
855 return -EINVAL;
856 }
857
858 /**
859 * usb4_switch_query_dp_resource() - Query availability of DP IN resource
860 * @sw: USB4 router
861 * @in: DP IN adapter
862 *
863 * For DP tunneling this function can be used to query availability of
864 * DP IN resource. Returns true if the resource is available for DP
865 * tunneling, false otherwise.
866 */
usb4_switch_query_dp_resource(struct tb_switch * sw,struct tb_port * in)867 bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
868 {
869 u32 metadata = in->port;
870 u8 status;
871 int ret;
872
873 ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
874 &status);
875 /*
876 * If DP resource allocation is not supported assume it is
877 * always available.
878 */
879 if (ret == -EOPNOTSUPP)
880 return true;
881 if (ret)
882 return false;
883
884 return !status;
885 }
886
887 /**
888 * usb4_switch_alloc_dp_resource() - Allocate DP IN resource
889 * @sw: USB4 router
890 * @in: DP IN adapter
891 *
892 * Allocates DP IN resource for DP tunneling using USB4 router
893 * operations. If the resource was allocated returns %0. Otherwise
894 * returns negative errno, in particular %-EBUSY if the resource is
895 * already allocated.
896 */
usb4_switch_alloc_dp_resource(struct tb_switch * sw,struct tb_port * in)897 int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
898 {
899 u32 metadata = in->port;
900 u8 status;
901 int ret;
902
903 ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
904 &status);
905 if (ret == -EOPNOTSUPP)
906 return 0;
907 if (ret)
908 return ret;
909
910 return status ? -EBUSY : 0;
911 }
912
913 /**
914 * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
915 * @sw: USB4 router
916 * @in: DP IN adapter
917 *
918 * Releases the previously allocated DP IN resource.
919 */
usb4_switch_dealloc_dp_resource(struct tb_switch * sw,struct tb_port * in)920 int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
921 {
922 u32 metadata = in->port;
923 u8 status;
924 int ret;
925
926 ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
927 &status);
928 if (ret == -EOPNOTSUPP)
929 return 0;
930 if (ret)
931 return ret;
932
933 return status ? -EIO : 0;
934 }
935
936 /**
937 * usb4_port_index() - Finds matching USB4 port index
938 * @sw: USB4 router
939 * @port: USB4 protocol or lane adapter
940 *
941 * Finds matching USB4 port index (starting from %0) that given @port goes
942 * through.
943 */
usb4_port_index(const struct tb_switch * sw,const struct tb_port * port)944 int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port)
945 {
946 struct tb_port *p;
947 int usb4_idx = 0;
948
949 /* Assume port is primary */
950 tb_switch_for_each_port(sw, p) {
951 if (!tb_port_is_null(p))
952 continue;
953 if (tb_is_upstream_port(p))
954 continue;
955 if (!p->link_nr) {
956 if (p == port)
957 break;
958 usb4_idx++;
959 }
960 }
961
962 return usb4_idx;
963 }
964
965 /**
966 * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
967 * @sw: USB4 router
968 * @port: USB4 port
969 *
970 * USB4 routers have direct mapping between USB4 ports and PCIe
971 * downstream adapters where the PCIe topology is extended. This
972 * function returns the corresponding downstream PCIe adapter or %NULL
973 * if no such mapping was possible.
974 */
usb4_switch_map_pcie_down(struct tb_switch * sw,const struct tb_port * port)975 struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
976 const struct tb_port *port)
977 {
978 int usb4_idx = usb4_port_index(sw, port);
979 struct tb_port *p;
980 int pcie_idx = 0;
981
982 /* Find PCIe down port matching usb4_port */
983 tb_switch_for_each_port(sw, p) {
984 if (!tb_port_is_pcie_down(p))
985 continue;
986
987 if (pcie_idx == usb4_idx)
988 return p;
989
990 pcie_idx++;
991 }
992
993 return NULL;
994 }
995
996 /**
997 * usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
998 * @sw: USB4 router
999 * @port: USB4 port
1000 *
1001 * USB4 routers have direct mapping between USB4 ports and USB 3.x
1002 * downstream adapters where the USB 3.x topology is extended. This
1003 * function returns the corresponding downstream USB 3.x adapter or
1004 * %NULL if no such mapping was possible.
1005 */
usb4_switch_map_usb3_down(struct tb_switch * sw,const struct tb_port * port)1006 struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
1007 const struct tb_port *port)
1008 {
1009 int usb4_idx = usb4_port_index(sw, port);
1010 struct tb_port *p;
1011 int usb_idx = 0;
1012
1013 /* Find USB3 down port matching usb4_port */
1014 tb_switch_for_each_port(sw, p) {
1015 if (!tb_port_is_usb3_down(p))
1016 continue;
1017
1018 if (usb_idx == usb4_idx)
1019 return p;
1020
1021 usb_idx++;
1022 }
1023
1024 return NULL;
1025 }
1026
1027 /**
1028 * usb4_switch_add_ports() - Add USB4 ports for this router
1029 * @sw: USB4 router
1030 *
1031 * For USB4 router finds all USB4 ports and registers devices for each.
1032 * Can be called to any router.
1033 *
1034 * Return %0 in case of success and negative errno in case of failure.
1035 */
usb4_switch_add_ports(struct tb_switch * sw)1036 int usb4_switch_add_ports(struct tb_switch *sw)
1037 {
1038 struct tb_port *port;
1039
1040 if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
1041 return 0;
1042
1043 tb_switch_for_each_port(sw, port) {
1044 struct usb4_port *usb4;
1045
1046 if (!tb_port_is_null(port))
1047 continue;
1048 if (!port->cap_usb4)
1049 continue;
1050
1051 usb4 = usb4_port_device_add(port);
1052 if (IS_ERR(usb4)) {
1053 usb4_switch_remove_ports(sw);
1054 return PTR_ERR(usb4);
1055 }
1056
1057 port->usb4 = usb4;
1058 }
1059
1060 return 0;
1061 }
1062
1063 /**
1064 * usb4_switch_remove_ports() - Removes USB4 ports from this router
1065 * @sw: USB4 router
1066 *
1067 * Unregisters previously registered USB4 ports.
1068 */
usb4_switch_remove_ports(struct tb_switch * sw)1069 void usb4_switch_remove_ports(struct tb_switch *sw)
1070 {
1071 struct tb_port *port;
1072
1073 tb_switch_for_each_port(sw, port) {
1074 if (port->usb4) {
1075 usb4_port_device_remove(port->usb4);
1076 port->usb4 = NULL;
1077 }
1078 }
1079 }
1080
1081 /**
1082 * usb4_port_unlock() - Unlock USB4 downstream port
1083 * @port: USB4 port to unlock
1084 *
1085 * Unlocks USB4 downstream port so that the connection manager can
1086 * access the router below this port.
1087 */
usb4_port_unlock(struct tb_port * port)1088 int usb4_port_unlock(struct tb_port *port)
1089 {
1090 int ret;
1091 u32 val;
1092
1093 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1094 if (ret)
1095 return ret;
1096
1097 val &= ~ADP_CS_4_LCK;
1098 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
1099 }
1100
1101 /**
1102 * usb4_port_hotplug_enable() - Enables hotplug for a port
1103 * @port: USB4 port to operate on
1104 *
1105 * Enables hot plug events on a given port. This is only intended
1106 * to be used on lane, DP-IN, and DP-OUT adapters.
1107 */
usb4_port_hotplug_enable(struct tb_port * port)1108 int usb4_port_hotplug_enable(struct tb_port *port)
1109 {
1110 int ret;
1111 u32 val;
1112
1113 ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1114 if (ret)
1115 return ret;
1116
1117 val &= ~ADP_CS_5_DHP;
1118 return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
1119 }
1120
1121 /**
1122 * usb4_port_reset() - Issue downstream port reset
1123 * @port: USB4 port to reset
1124 *
1125 * Issues downstream port reset to @port.
1126 */
usb4_port_reset(struct tb_port * port)1127 int usb4_port_reset(struct tb_port *port)
1128 {
1129 int ret;
1130 u32 val;
1131
1132 if (!port->cap_usb4)
1133 return -EINVAL;
1134
1135 ret = tb_port_read(port, &val, TB_CFG_PORT,
1136 port->cap_usb4 + PORT_CS_19, 1);
1137 if (ret)
1138 return ret;
1139
1140 val |= PORT_CS_19_DPR;
1141
1142 ret = tb_port_write(port, &val, TB_CFG_PORT,
1143 port->cap_usb4 + PORT_CS_19, 1);
1144 if (ret)
1145 return ret;
1146
1147 fsleep(10000);
1148
1149 ret = tb_port_read(port, &val, TB_CFG_PORT,
1150 port->cap_usb4 + PORT_CS_19, 1);
1151 if (ret)
1152 return ret;
1153
1154 val &= ~PORT_CS_19_DPR;
1155
1156 return tb_port_write(port, &val, TB_CFG_PORT,
1157 port->cap_usb4 + PORT_CS_19, 1);
1158 }
1159
usb4_port_set_configured(struct tb_port * port,bool configured)1160 static int usb4_port_set_configured(struct tb_port *port, bool configured)
1161 {
1162 int ret;
1163 u32 val;
1164
1165 if (!port->cap_usb4)
1166 return -EINVAL;
1167
1168 ret = tb_port_read(port, &val, TB_CFG_PORT,
1169 port->cap_usb4 + PORT_CS_19, 1);
1170 if (ret)
1171 return ret;
1172
1173 if (configured)
1174 val |= PORT_CS_19_PC;
1175 else
1176 val &= ~PORT_CS_19_PC;
1177
1178 return tb_port_write(port, &val, TB_CFG_PORT,
1179 port->cap_usb4 + PORT_CS_19, 1);
1180 }
1181
1182 /**
1183 * usb4_port_configure() - Set USB4 port configured
1184 * @port: USB4 router
1185 *
1186 * Sets the USB4 link to be configured for power management purposes.
1187 */
usb4_port_configure(struct tb_port * port)1188 int usb4_port_configure(struct tb_port *port)
1189 {
1190 return usb4_port_set_configured(port, true);
1191 }
1192
1193 /**
1194 * usb4_port_unconfigure() - Set USB4 port unconfigured
1195 * @port: USB4 router
1196 *
1197 * Sets the USB4 link to be unconfigured for power management purposes.
1198 */
usb4_port_unconfigure(struct tb_port * port)1199 void usb4_port_unconfigure(struct tb_port *port)
1200 {
1201 usb4_port_set_configured(port, false);
1202 }
1203
usb4_set_xdomain_configured(struct tb_port * port,bool configured)1204 static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
1205 {
1206 int ret;
1207 u32 val;
1208
1209 if (!port->cap_usb4)
1210 return -EINVAL;
1211
1212 ret = tb_port_read(port, &val, TB_CFG_PORT,
1213 port->cap_usb4 + PORT_CS_19, 1);
1214 if (ret)
1215 return ret;
1216
1217 if (configured)
1218 val |= PORT_CS_19_PID;
1219 else
1220 val &= ~PORT_CS_19_PID;
1221
1222 return tb_port_write(port, &val, TB_CFG_PORT,
1223 port->cap_usb4 + PORT_CS_19, 1);
1224 }
1225
1226 /**
1227 * usb4_port_configure_xdomain() - Configure port for XDomain
1228 * @port: USB4 port connected to another host
1229 * @xd: XDomain that is connected to the port
1230 *
1231 * Marks the USB4 port as being connected to another host and updates
1232 * the link type. Returns %0 in success and negative errno in failure.
1233 */
usb4_port_configure_xdomain(struct tb_port * port,struct tb_xdomain * xd)1234 int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
1235 {
1236 xd->link_usb4 = link_is_usb4(port);
1237 return usb4_set_xdomain_configured(port, true);
1238 }
1239
1240 /**
1241 * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
1242 * @port: USB4 port that was connected to another host
1243 *
1244 * Clears USB4 port from being marked as XDomain.
1245 */
usb4_port_unconfigure_xdomain(struct tb_port * port)1246 void usb4_port_unconfigure_xdomain(struct tb_port *port)
1247 {
1248 usb4_set_xdomain_configured(port, false);
1249 }
1250
usb4_port_wait_for_bit(struct tb_port * port,u32 offset,u32 bit,u32 value,int timeout_msec,unsigned long delay_usec)1251 static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
1252 u32 value, int timeout_msec, unsigned long delay_usec)
1253 {
1254 ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
1255
1256 do {
1257 u32 val;
1258 int ret;
1259
1260 ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
1261 if (ret)
1262 return ret;
1263
1264 if ((val & bit) == value)
1265 return 0;
1266
1267 fsleep(delay_usec);
1268 } while (ktime_before(ktime_get(), timeout));
1269
1270 return -ETIMEDOUT;
1271 }
1272
usb4_port_read_data(struct tb_port * port,void * data,size_t dwords)1273 static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
1274 {
1275 if (dwords > USB4_DATA_DWORDS)
1276 return -EINVAL;
1277
1278 return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1279 dwords);
1280 }
1281
usb4_port_write_data(struct tb_port * port,const void * data,size_t dwords)1282 static int usb4_port_write_data(struct tb_port *port, const void *data,
1283 size_t dwords)
1284 {
1285 if (dwords > USB4_DATA_DWORDS)
1286 return -EINVAL;
1287
1288 return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
1289 dwords);
1290 }
1291
1292 /**
1293 * usb4_port_sb_read() - Read from sideband register
1294 * @port: USB4 port to read
1295 * @target: Sideband target
1296 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1297 * @reg: Sideband register index
1298 * @buf: Buffer where the sideband data is copied
1299 * @size: Size of @buf
1300 *
1301 * Reads data from sideband register @reg and copies it into @buf.
1302 * Returns %0 in case of success and negative errno in case of failure.
1303 */
usb4_port_sb_read(struct tb_port * port,enum usb4_sb_target target,u8 index,u8 reg,void * buf,u8 size)1304 int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
1305 u8 reg, void *buf, u8 size)
1306 {
1307 size_t dwords = DIV_ROUND_UP(size, 4);
1308 int ret;
1309 u32 val;
1310
1311 if (!port->cap_usb4)
1312 return -EINVAL;
1313
1314 val = reg;
1315 val |= size << PORT_CS_1_LENGTH_SHIFT;
1316 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1317 if (target == USB4_SB_TARGET_RETIMER)
1318 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1319 val |= PORT_CS_1_PND;
1320
1321 ret = tb_port_write(port, &val, TB_CFG_PORT,
1322 port->cap_usb4 + PORT_CS_1, 1);
1323 if (ret)
1324 return ret;
1325
1326 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1327 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
1328 if (ret)
1329 return ret;
1330
1331 ret = tb_port_read(port, &val, TB_CFG_PORT,
1332 port->cap_usb4 + PORT_CS_1, 1);
1333 if (ret)
1334 return ret;
1335
1336 if (val & PORT_CS_1_NR)
1337 return -ENODEV;
1338 if (val & PORT_CS_1_RC)
1339 return -EIO;
1340
1341 return buf ? usb4_port_read_data(port, buf, dwords) : 0;
1342 }
1343
1344 /**
1345 * usb4_port_sb_write() - Write to sideband register
1346 * @port: USB4 port to write
1347 * @target: Sideband target
1348 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1349 * @reg: Sideband register index
1350 * @buf: Data to write
1351 * @size: Size of @buf
1352 *
1353 * Writes @buf to sideband register @reg. Returns %0 in case of success
1354 * and negative errno in case of failure.
1355 */
usb4_port_sb_write(struct tb_port * port,enum usb4_sb_target target,u8 index,u8 reg,const void * buf,u8 size)1356 int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
1357 u8 index, u8 reg, const void *buf, u8 size)
1358 {
1359 size_t dwords = DIV_ROUND_UP(size, 4);
1360 int ret;
1361 u32 val;
1362
1363 if (!port->cap_usb4)
1364 return -EINVAL;
1365
1366 if (buf) {
1367 ret = usb4_port_write_data(port, buf, dwords);
1368 if (ret)
1369 return ret;
1370 }
1371
1372 val = reg;
1373 val |= size << PORT_CS_1_LENGTH_SHIFT;
1374 val |= PORT_CS_1_WNR_WRITE;
1375 val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
1376 if (target == USB4_SB_TARGET_RETIMER)
1377 val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
1378 val |= PORT_CS_1_PND;
1379
1380 ret = tb_port_write(port, &val, TB_CFG_PORT,
1381 port->cap_usb4 + PORT_CS_1, 1);
1382 if (ret)
1383 return ret;
1384
1385 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
1386 PORT_CS_1_PND, 0, 500, USB4_PORT_SB_DELAY);
1387 if (ret)
1388 return ret;
1389
1390 ret = tb_port_read(port, &val, TB_CFG_PORT,
1391 port->cap_usb4 + PORT_CS_1, 1);
1392 if (ret)
1393 return ret;
1394
1395 if (val & PORT_CS_1_NR)
1396 return -ENODEV;
1397 if (val & PORT_CS_1_RC)
1398 return -EIO;
1399
1400 return 0;
1401 }
1402
usb4_port_sb_opcode_err_to_errno(u32 val)1403 static int usb4_port_sb_opcode_err_to_errno(u32 val)
1404 {
1405 switch (val) {
1406 case 0:
1407 return 0;
1408 case USB4_SB_OPCODE_ERR:
1409 return -EAGAIN;
1410 case USB4_SB_OPCODE_ONS:
1411 return -EOPNOTSUPP;
1412 default:
1413 return -EIO;
1414 }
1415 }
1416
usb4_port_sb_op(struct tb_port * port,enum usb4_sb_target target,u8 index,enum usb4_sb_opcode opcode,int timeout_msec)1417 static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
1418 u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
1419 {
1420 ktime_t timeout;
1421 u32 val;
1422 int ret;
1423
1424 val = opcode;
1425 ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
1426 sizeof(val));
1427 if (ret)
1428 return ret;
1429
1430 timeout = ktime_add_ms(ktime_get(), timeout_msec);
1431
1432 do {
1433 /* Check results */
1434 ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
1435 &val, sizeof(val));
1436 if (ret)
1437 return ret;
1438
1439 if (val != opcode)
1440 return usb4_port_sb_opcode_err_to_errno(val);
1441
1442 fsleep(USB4_PORT_SB_DELAY);
1443 } while (ktime_before(ktime_get(), timeout));
1444
1445 return -ETIMEDOUT;
1446 }
1447
usb4_port_set_router_offline(struct tb_port * port,bool offline)1448 static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
1449 {
1450 u32 val = !offline;
1451 int ret;
1452
1453 ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1454 USB4_SB_METADATA, &val, sizeof(val));
1455 if (ret)
1456 return ret;
1457
1458 val = USB4_SB_OPCODE_ROUTER_OFFLINE;
1459 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1460 USB4_SB_OPCODE, &val, sizeof(val));
1461 }
1462
1463 /**
1464 * usb4_port_router_offline() - Put the USB4 port to offline mode
1465 * @port: USB4 port
1466 *
1467 * This function puts the USB4 port into offline mode. In this mode the
1468 * port does not react on hotplug events anymore. This needs to be
1469 * called before retimer access is done when the USB4 links is not up.
1470 *
1471 * Returns %0 in case of success and negative errno if there was an
1472 * error.
1473 */
usb4_port_router_offline(struct tb_port * port)1474 int usb4_port_router_offline(struct tb_port *port)
1475 {
1476 return usb4_port_set_router_offline(port, true);
1477 }
1478
1479 /**
1480 * usb4_port_router_online() - Put the USB4 port back to online
1481 * @port: USB4 port
1482 *
1483 * Makes the USB4 port functional again.
1484 */
usb4_port_router_online(struct tb_port * port)1485 int usb4_port_router_online(struct tb_port *port)
1486 {
1487 return usb4_port_set_router_offline(port, false);
1488 }
1489
1490 /**
1491 * usb4_port_enumerate_retimers() - Send RT broadcast transaction
1492 * @port: USB4 port
1493 *
1494 * This forces the USB4 port to send broadcast RT transaction which
1495 * makes the retimers on the link to assign index to themselves. Returns
1496 * %0 in case of success and negative errno if there was an error.
1497 */
usb4_port_enumerate_retimers(struct tb_port * port)1498 int usb4_port_enumerate_retimers(struct tb_port *port)
1499 {
1500 u32 val;
1501
1502 val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
1503 return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
1504 USB4_SB_OPCODE, &val, sizeof(val));
1505 }
1506
1507 /**
1508 * usb4_port_clx_supported() - Check if CLx is supported by the link
1509 * @port: Port to check for CLx support for
1510 *
1511 * PORT_CS_18_CPS bit reflects if the link supports CLx including
1512 * active cables (if connected on the link).
1513 */
usb4_port_clx_supported(struct tb_port * port)1514 bool usb4_port_clx_supported(struct tb_port *port)
1515 {
1516 int ret;
1517 u32 val;
1518
1519 ret = tb_port_read(port, &val, TB_CFG_PORT,
1520 port->cap_usb4 + PORT_CS_18, 1);
1521 if (ret)
1522 return false;
1523
1524 return !!(val & PORT_CS_18_CPS);
1525 }
1526
1527 /**
1528 * usb4_port_asym_supported() - If the port supports asymmetric link
1529 * @port: USB4 port
1530 *
1531 * Checks if the port and the cable supports asymmetric link and returns
1532 * %true in that case.
1533 */
usb4_port_asym_supported(struct tb_port * port)1534 bool usb4_port_asym_supported(struct tb_port *port)
1535 {
1536 u32 val;
1537
1538 if (!port->cap_usb4)
1539 return false;
1540
1541 if (tb_port_read(port, &val, TB_CFG_PORT, port->cap_usb4 + PORT_CS_18, 1))
1542 return false;
1543
1544 return !!(val & PORT_CS_18_CSA);
1545 }
1546
1547 /**
1548 * usb4_port_asym_set_link_width() - Set link width to asymmetric or symmetric
1549 * @port: USB4 port
1550 * @width: Asymmetric width to configure
1551 *
1552 * Sets USB4 port link width to @width. Can be called for widths where
1553 * usb4_port_asym_width_supported() returned @true.
1554 */
usb4_port_asym_set_link_width(struct tb_port * port,enum tb_link_width width)1555 int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
1556 {
1557 u32 val;
1558 int ret;
1559
1560 if (!port->cap_phy)
1561 return -EINVAL;
1562
1563 ret = tb_port_read(port, &val, TB_CFG_PORT,
1564 port->cap_phy + LANE_ADP_CS_1, 1);
1565 if (ret)
1566 return ret;
1567
1568 val &= ~LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK;
1569 switch (width) {
1570 case TB_LINK_WIDTH_DUAL:
1571 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1572 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_DUAL);
1573 break;
1574 case TB_LINK_WIDTH_ASYM_TX:
1575 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1576 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_TX);
1577 break;
1578 case TB_LINK_WIDTH_ASYM_RX:
1579 val |= FIELD_PREP(LANE_ADP_CS_1_TARGET_WIDTH_ASYM_MASK,
1580 LANE_ADP_CS_1_TARGET_WIDTH_ASYM_RX);
1581 break;
1582 default:
1583 return -EINVAL;
1584 }
1585
1586 return tb_port_write(port, &val, TB_CFG_PORT,
1587 port->cap_phy + LANE_ADP_CS_1, 1);
1588 }
1589
1590 /**
1591 * usb4_port_asym_start() - Start symmetry change and wait for completion
1592 * @port: USB4 port
1593 *
1594 * Start symmetry change of the link to asymmetric or symmetric
1595 * (according to what was previously set in tb_port_set_link_width().
1596 * Wait for completion of the change.
1597 *
1598 * Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
1599 * a negative errno in case of a failure.
1600 */
usb4_port_asym_start(struct tb_port * port)1601 int usb4_port_asym_start(struct tb_port *port)
1602 {
1603 int ret;
1604 u32 val;
1605
1606 ret = tb_port_read(port, &val, TB_CFG_PORT,
1607 port->cap_usb4 + PORT_CS_19, 1);
1608 if (ret)
1609 return ret;
1610
1611 val &= ~PORT_CS_19_START_ASYM;
1612 val |= FIELD_PREP(PORT_CS_19_START_ASYM, 1);
1613
1614 ret = tb_port_write(port, &val, TB_CFG_PORT,
1615 port->cap_usb4 + PORT_CS_19, 1);
1616 if (ret)
1617 return ret;
1618
1619 /*
1620 * Wait for PORT_CS_19_START_ASYM to be 0. This means the USB4
1621 * port started the symmetry transition.
1622 */
1623 ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_19,
1624 PORT_CS_19_START_ASYM, 0, 1000,
1625 USB4_PORT_DELAY);
1626 if (ret)
1627 return ret;
1628
1629 /* Then wait for the transtion to be completed */
1630 return usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_18,
1631 PORT_CS_18_TIP, 0, 5000, USB4_PORT_DELAY);
1632 }
1633
1634 /**
1635 * usb4_port_margining_caps() - Read USB4 port marginig capabilities
1636 * @port: USB4 port
1637 * @target: Sideband target
1638 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1639 * @caps: Array with at least two elements to hold the results
1640 * @ncaps: Number of elements in the caps array
1641 *
1642 * Reads the USB4 port lane margining capabilities into @caps.
1643 */
usb4_port_margining_caps(struct tb_port * port,enum usb4_sb_target target,u8 index,u32 * caps,size_t ncaps)1644 int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
1645 u8 index, u32 *caps, size_t ncaps)
1646 {
1647 int ret;
1648
1649 ret = usb4_port_sb_op(port, target, index,
1650 USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
1651 if (ret)
1652 return ret;
1653
1654 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, caps,
1655 sizeof(*caps) * ncaps);
1656 }
1657
1658 /**
1659 * usb4_port_hw_margin() - Run hardware lane margining on port
1660 * @port: USB4 port
1661 * @target: Sideband target
1662 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1663 * @params: Parameters for USB4 hardware margining
1664 * @results: Array to hold the results
1665 * @nresults: Number of elements in the results array
1666 *
1667 * Runs hardware lane margining on USB4 port and returns the result in
1668 * @results.
1669 */
usb4_port_hw_margin(struct tb_port * port,enum usb4_sb_target target,u8 index,const struct usb4_port_margining_params * params,u32 * results,size_t nresults)1670 int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
1671 u8 index, const struct usb4_port_margining_params *params,
1672 u32 *results, size_t nresults)
1673 {
1674 u32 val;
1675 int ret;
1676
1677 if (WARN_ON_ONCE(!params))
1678 return -EINVAL;
1679
1680 val = params->lanes;
1681 if (params->time)
1682 val |= USB4_MARGIN_HW_TIME;
1683 if (params->right_high || params->upper_eye)
1684 val |= USB4_MARGIN_HW_RHU;
1685 if (params->ber_level)
1686 val |= FIELD_PREP(USB4_MARGIN_HW_BER_MASK, params->ber_level);
1687 if (params->optional_voltage_offset_range)
1688 val |= USB4_MARGIN_HW_OPT_VOLTAGE;
1689
1690 ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
1691 sizeof(val));
1692 if (ret)
1693 return ret;
1694
1695 ret = usb4_port_sb_op(port, target, index,
1696 USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
1697 if (ret)
1698 return ret;
1699
1700 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
1701 sizeof(*results) * nresults);
1702 }
1703
1704 /**
1705 * usb4_port_sw_margin() - Run software lane margining on port
1706 * @port: USB4 port
1707 * @target: Sideband target
1708 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1709 * @params: Parameters for USB4 software margining
1710 * @results: Data word for the operation completion data
1711 *
1712 * Runs software lane margining on USB4 port. Read back the error
1713 * counters by calling usb4_port_sw_margin_errors(). Returns %0 in
1714 * success and negative errno otherwise.
1715 */
usb4_port_sw_margin(struct tb_port * port,enum usb4_sb_target target,u8 index,const struct usb4_port_margining_params * params,u32 * results)1716 int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
1717 u8 index, const struct usb4_port_margining_params *params,
1718 u32 *results)
1719 {
1720 u32 val;
1721 int ret;
1722
1723 if (WARN_ON_ONCE(!params))
1724 return -EINVAL;
1725
1726 val = params->lanes;
1727 if (params->time)
1728 val |= USB4_MARGIN_SW_TIME;
1729 if (params->optional_voltage_offset_range)
1730 val |= USB4_MARGIN_SW_OPT_VOLTAGE;
1731 if (params->right_high)
1732 val |= USB4_MARGIN_SW_RH;
1733 if (params->upper_eye)
1734 val |= USB4_MARGIN_SW_UPPER_EYE;
1735 val |= FIELD_PREP(USB4_MARGIN_SW_COUNTER_MASK, params->error_counter);
1736 val |= FIELD_PREP(USB4_MARGIN_SW_VT_MASK, params->voltage_time_offset);
1737
1738 ret = usb4_port_sb_write(port, target, index, USB4_SB_METADATA, &val,
1739 sizeof(val));
1740 if (ret)
1741 return ret;
1742
1743 ret = usb4_port_sb_op(port, target, index,
1744 USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
1745 if (ret)
1746 return ret;
1747
1748 return usb4_port_sb_read(port, target, index, USB4_SB_DATA, results,
1749 sizeof(*results));
1750
1751 }
1752
1753 /**
1754 * usb4_port_sw_margin_errors() - Read the software margining error counters
1755 * @port: USB4 port
1756 * @target: Sideband target
1757 * @index: Retimer index if taget is %USB4_SB_TARGET_RETIMER
1758 * @errors: Error metadata is copied here.
1759 *
1760 * This reads back the software margining error counters from the port.
1761 * Returns %0 in success and negative errno otherwise.
1762 */
usb4_port_sw_margin_errors(struct tb_port * port,enum usb4_sb_target target,u8 index,u32 * errors)1763 int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
1764 u8 index, u32 *errors)
1765 {
1766 int ret;
1767
1768 ret = usb4_port_sb_op(port, target, index,
1769 USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
1770 if (ret)
1771 return ret;
1772
1773 return usb4_port_sb_read(port, target, index, USB4_SB_METADATA, errors,
1774 sizeof(*errors));
1775 }
1776
usb4_port_retimer_op(struct tb_port * port,u8 index,enum usb4_sb_opcode opcode,int timeout_msec)1777 static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
1778 enum usb4_sb_opcode opcode,
1779 int timeout_msec)
1780 {
1781 return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
1782 timeout_msec);
1783 }
1784
1785 /**
1786 * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
1787 * @port: USB4 port
1788 * @index: Retimer index
1789 *
1790 * Enables sideband channel transations on SBTX. Can be used when USB4
1791 * link does not go up, for example if there is no device connected.
1792 */
usb4_port_retimer_set_inbound_sbtx(struct tb_port * port,u8 index)1793 int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
1794 {
1795 int ret;
1796
1797 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1798 500);
1799
1800 if (ret != -ENODEV)
1801 return ret;
1802
1803 /*
1804 * Per the USB4 retimer spec, the retimer is not required to
1805 * send an RT (Retimer Transaction) response for the first
1806 * SET_INBOUND_SBTX command
1807 */
1808 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
1809 500);
1810 }
1811
1812 /**
1813 * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
1814 * @port: USB4 port
1815 * @index: Retimer index
1816 *
1817 * Disables sideband channel transations on SBTX. The reverse of
1818 * usb4_port_retimer_set_inbound_sbtx().
1819 */
usb4_port_retimer_unset_inbound_sbtx(struct tb_port * port,u8 index)1820 int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
1821 {
1822 return usb4_port_retimer_op(port, index,
1823 USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
1824 }
1825
1826 /**
1827 * usb4_port_retimer_is_last() - Is the retimer last on-board retimer
1828 * @port: USB4 port
1829 * @index: Retimer index
1830 *
1831 * If the retimer at @index is last one (connected directly to the
1832 * Type-C port) this function returns %1. If it is not returns %0. If
1833 * the retimer is not present returns %-ENODEV. Otherwise returns
1834 * negative errno.
1835 */
usb4_port_retimer_is_last(struct tb_port * port,u8 index)1836 int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
1837 {
1838 u32 metadata;
1839 int ret;
1840
1841 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
1842 500);
1843 if (ret)
1844 return ret;
1845
1846 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
1847 USB4_SB_METADATA, &metadata, sizeof(metadata));
1848 return ret ? ret : metadata & 1;
1849 }
1850
1851 /**
1852 * usb4_port_retimer_is_cable() - Is the retimer cable retimer
1853 * @port: USB4 port
1854 * @index: Retimer index
1855 *
1856 * If the retimer at @index is last cable retimer this function returns
1857 * %1 and %0 if it is on-board retimer. In case a retimer is not present
1858 * at @index returns %-ENODEV. Otherwise returns negative errno.
1859 */
usb4_port_retimer_is_cable(struct tb_port * port,u8 index)1860 int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
1861 {
1862 u32 metadata;
1863 int ret;
1864
1865 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_CABLE_RETIMER,
1866 500);
1867 if (ret)
1868 return ret;
1869
1870 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
1871 USB4_SB_METADATA, &metadata, sizeof(metadata));
1872 return ret ? ret : metadata & 1;
1873 }
1874
1875 /**
1876 * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
1877 * @port: USB4 port
1878 * @index: Retimer index
1879 *
1880 * Reads NVM sector size (in bytes) of a retimer at @index. This
1881 * operation can be used to determine whether the retimer supports NVM
1882 * upgrade for example. Returns sector size in bytes or negative errno
1883 * in case of error. Specifically returns %-ENODEV if there is no
1884 * retimer at @index.
1885 */
usb4_port_retimer_nvm_sector_size(struct tb_port * port,u8 index)1886 int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
1887 {
1888 u32 metadata;
1889 int ret;
1890
1891 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
1892 500);
1893 if (ret)
1894 return ret;
1895
1896 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
1897 USB4_SB_METADATA, &metadata, sizeof(metadata));
1898 return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
1899 }
1900
1901 /**
1902 * usb4_port_retimer_nvm_set_offset() - Set NVM write offset
1903 * @port: USB4 port
1904 * @index: Retimer index
1905 * @address: Start offset
1906 *
1907 * Exlicitly sets NVM write offset. Normally when writing to NVM this is
1908 * done automatically by usb4_port_retimer_nvm_write().
1909 *
1910 * Returns %0 in success and negative errno if there was a failure.
1911 */
usb4_port_retimer_nvm_set_offset(struct tb_port * port,u8 index,unsigned int address)1912 int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
1913 unsigned int address)
1914 {
1915 u32 metadata, dwaddress;
1916 int ret;
1917
1918 dwaddress = address / 4;
1919 metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
1920 USB4_NVM_SET_OFFSET_MASK;
1921
1922 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1923 USB4_SB_METADATA, &metadata, sizeof(metadata));
1924 if (ret)
1925 return ret;
1926
1927 return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
1928 500);
1929 }
1930
1931 struct retimer_info {
1932 struct tb_port *port;
1933 u8 index;
1934 };
1935
usb4_port_retimer_nvm_write_next_block(void * data,unsigned int dwaddress,const void * buf,size_t dwords)1936 static int usb4_port_retimer_nvm_write_next_block(void *data,
1937 unsigned int dwaddress, const void *buf, size_t dwords)
1938
1939 {
1940 const struct retimer_info *info = data;
1941 struct tb_port *port = info->port;
1942 u8 index = info->index;
1943 int ret;
1944
1945 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
1946 USB4_SB_DATA, buf, dwords * 4);
1947 if (ret)
1948 return ret;
1949
1950 return usb4_port_retimer_op(port, index,
1951 USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
1952 }
1953
1954 /**
1955 * usb4_port_retimer_nvm_write() - Write to retimer NVM
1956 * @port: USB4 port
1957 * @index: Retimer index
1958 * @address: Byte address where to start the write
1959 * @buf: Data to write
1960 * @size: Size in bytes how much to write
1961 *
1962 * Writes @size bytes from @buf to the retimer NVM. Used for NVM
1963 * upgrade. Returns %0 if the data was written successfully and negative
1964 * errno in case of failure. Specifically returns %-ENODEV if there is
1965 * no retimer at @index.
1966 */
usb4_port_retimer_nvm_write(struct tb_port * port,u8 index,unsigned int address,const void * buf,size_t size)1967 int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
1968 const void *buf, size_t size)
1969 {
1970 struct retimer_info info = { .port = port, .index = index };
1971 int ret;
1972
1973 ret = usb4_port_retimer_nvm_set_offset(port, index, address);
1974 if (ret)
1975 return ret;
1976
1977 return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
1978 usb4_port_retimer_nvm_write_next_block, &info);
1979 }
1980
1981 /**
1982 * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
1983 * @port: USB4 port
1984 * @index: Retimer index
1985 *
1986 * After the new NVM image has been written via usb4_port_retimer_nvm_write()
1987 * this function can be used to trigger the NVM upgrade process. If
1988 * successful the retimer restarts with the new NVM and may not have the
1989 * index set so one needs to call usb4_port_enumerate_retimers() to
1990 * force index to be assigned.
1991 */
usb4_port_retimer_nvm_authenticate(struct tb_port * port,u8 index)1992 int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
1993 {
1994 u32 val;
1995
1996 /*
1997 * We need to use the raw operation here because once the
1998 * authentication completes the retimer index is not set anymore
1999 * so we do not get back the status now.
2000 */
2001 val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
2002 return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
2003 USB4_SB_OPCODE, &val, sizeof(val));
2004 }
2005
2006 /**
2007 * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
2008 * @port: USB4 port
2009 * @index: Retimer index
2010 * @status: Raw status code read from metadata
2011 *
2012 * This can be called after usb4_port_retimer_nvm_authenticate() and
2013 * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
2014 *
2015 * Returns %0 if the authentication status was successfully read. The
2016 * completion metadata (the result) is then stored into @status. If
2017 * reading the status fails, returns negative errno.
2018 */
usb4_port_retimer_nvm_authenticate_status(struct tb_port * port,u8 index,u32 * status)2019 int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
2020 u32 *status)
2021 {
2022 u32 metadata, val;
2023 int ret;
2024
2025 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
2026 USB4_SB_OPCODE, &val, sizeof(val));
2027 if (ret)
2028 return ret;
2029
2030 ret = usb4_port_sb_opcode_err_to_errno(val);
2031 switch (ret) {
2032 case 0:
2033 *status = 0;
2034 return 0;
2035
2036 case -EAGAIN:
2037 ret = usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
2038 USB4_SB_METADATA, &metadata,
2039 sizeof(metadata));
2040 if (ret)
2041 return ret;
2042
2043 *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
2044 return 0;
2045
2046 default:
2047 return ret;
2048 }
2049 }
2050
usb4_port_retimer_nvm_read_block(void * data,unsigned int dwaddress,void * buf,size_t dwords)2051 static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
2052 void *buf, size_t dwords)
2053 {
2054 const struct retimer_info *info = data;
2055 struct tb_port *port = info->port;
2056 u8 index = info->index;
2057 u32 metadata;
2058 int ret;
2059
2060 metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
2061 if (dwords < USB4_DATA_DWORDS)
2062 metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
2063
2064 ret = usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
2065 USB4_SB_METADATA, &metadata, sizeof(metadata));
2066 if (ret)
2067 return ret;
2068
2069 ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
2070 if (ret)
2071 return ret;
2072
2073 return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index,
2074 USB4_SB_DATA, buf, dwords * 4);
2075 }
2076
2077 /**
2078 * usb4_port_retimer_nvm_read() - Read contents of retimer NVM
2079 * @port: USB4 port
2080 * @index: Retimer index
2081 * @address: NVM address (in bytes) to start reading
2082 * @buf: Data read from NVM is stored here
2083 * @size: Number of bytes to read
2084 *
2085 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
2086 * read was successful and negative errno in case of failure.
2087 * Specifically returns %-ENODEV if there is no retimer at @index.
2088 */
usb4_port_retimer_nvm_read(struct tb_port * port,u8 index,unsigned int address,void * buf,size_t size)2089 int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
2090 unsigned int address, void *buf, size_t size)
2091 {
2092 struct retimer_info info = { .port = port, .index = index };
2093
2094 return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
2095 usb4_port_retimer_nvm_read_block, &info);
2096 }
2097
2098 static inline unsigned int
usb4_usb3_port_max_bandwidth(const struct tb_port * port,unsigned int bw)2099 usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
2100 {
2101 /* Take the possible bandwidth limitation into account */
2102 if (port->max_bw)
2103 return min(bw, port->max_bw);
2104 return bw;
2105 }
2106
2107 /**
2108 * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
2109 * @port: USB3 adapter port
2110 *
2111 * Return maximum supported link rate of a USB3 adapter in Mb/s.
2112 * Negative errno in case of error.
2113 */
usb4_usb3_port_max_link_rate(struct tb_port * port)2114 int usb4_usb3_port_max_link_rate(struct tb_port *port)
2115 {
2116 int ret, lr;
2117 u32 val;
2118
2119 if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
2120 return -EINVAL;
2121
2122 ret = tb_port_read(port, &val, TB_CFG_PORT,
2123 port->cap_adap + ADP_USB3_CS_4, 1);
2124 if (ret)
2125 return ret;
2126
2127 lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
2128 ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
2129
2130 return usb4_usb3_port_max_bandwidth(port, ret);
2131 }
2132
usb4_usb3_port_cm_request(struct tb_port * port,bool request)2133 static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
2134 {
2135 int ret;
2136 u32 val;
2137
2138 if (!tb_port_is_usb3_down(port))
2139 return -EINVAL;
2140 if (tb_route(port->sw))
2141 return -EINVAL;
2142
2143 ret = tb_port_read(port, &val, TB_CFG_PORT,
2144 port->cap_adap + ADP_USB3_CS_2, 1);
2145 if (ret)
2146 return ret;
2147
2148 if (request)
2149 val |= ADP_USB3_CS_2_CMR;
2150 else
2151 val &= ~ADP_USB3_CS_2_CMR;
2152
2153 ret = tb_port_write(port, &val, TB_CFG_PORT,
2154 port->cap_adap + ADP_USB3_CS_2, 1);
2155 if (ret)
2156 return ret;
2157
2158 /*
2159 * We can use val here directly as the CMR bit is in the same place
2160 * as HCA. Just mask out others.
2161 */
2162 val &= ADP_USB3_CS_2_CMR;
2163 return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
2164 ADP_USB3_CS_1_HCA, val, 1500,
2165 USB4_PORT_DELAY);
2166 }
2167
usb4_usb3_port_set_cm_request(struct tb_port * port)2168 static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
2169 {
2170 return usb4_usb3_port_cm_request(port, true);
2171 }
2172
usb4_usb3_port_clear_cm_request(struct tb_port * port)2173 static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
2174 {
2175 return usb4_usb3_port_cm_request(port, false);
2176 }
2177
usb3_bw_to_mbps(u32 bw,u8 scale)2178 static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
2179 {
2180 unsigned long uframes;
2181
2182 uframes = bw * 512UL << scale;
2183 return DIV_ROUND_CLOSEST(uframes * 8000, MEGA);
2184 }
2185
mbps_to_usb3_bw(unsigned int mbps,u8 scale)2186 static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
2187 {
2188 unsigned long uframes;
2189
2190 /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
2191 uframes = ((unsigned long)mbps * MEGA) / 8000;
2192 return DIV_ROUND_UP(uframes, 512UL << scale);
2193 }
2194
usb4_usb3_port_read_allocated_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2195 static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
2196 int *upstream_bw,
2197 int *downstream_bw)
2198 {
2199 u32 val, bw, scale;
2200 int ret;
2201
2202 ret = tb_port_read(port, &val, TB_CFG_PORT,
2203 port->cap_adap + ADP_USB3_CS_2, 1);
2204 if (ret)
2205 return ret;
2206
2207 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2208 port->cap_adap + ADP_USB3_CS_3, 1);
2209 if (ret)
2210 return ret;
2211
2212 scale &= ADP_USB3_CS_3_SCALE_MASK;
2213
2214 bw = val & ADP_USB3_CS_2_AUBW_MASK;
2215 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2216
2217 bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
2218 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2219
2220 return 0;
2221 }
2222
2223 /**
2224 * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
2225 * @port: USB3 adapter port
2226 * @upstream_bw: Allocated upstream bandwidth is stored here
2227 * @downstream_bw: Allocated downstream bandwidth is stored here
2228 *
2229 * Stores currently allocated USB3 bandwidth into @upstream_bw and
2230 * @downstream_bw in Mb/s. Returns %0 in case of success and negative
2231 * errno in failure.
2232 */
usb4_usb3_port_allocated_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2233 int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
2234 int *downstream_bw)
2235 {
2236 int ret;
2237
2238 ret = usb4_usb3_port_set_cm_request(port);
2239 if (ret)
2240 return ret;
2241
2242 ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
2243 downstream_bw);
2244 usb4_usb3_port_clear_cm_request(port);
2245
2246 return ret;
2247 }
2248
usb4_usb3_port_read_consumed_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2249 static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
2250 int *upstream_bw,
2251 int *downstream_bw)
2252 {
2253 u32 val, bw, scale;
2254 int ret;
2255
2256 ret = tb_port_read(port, &val, TB_CFG_PORT,
2257 port->cap_adap + ADP_USB3_CS_1, 1);
2258 if (ret)
2259 return ret;
2260
2261 ret = tb_port_read(port, &scale, TB_CFG_PORT,
2262 port->cap_adap + ADP_USB3_CS_3, 1);
2263 if (ret)
2264 return ret;
2265
2266 scale &= ADP_USB3_CS_3_SCALE_MASK;
2267
2268 bw = val & ADP_USB3_CS_1_CUBW_MASK;
2269 *upstream_bw = usb3_bw_to_mbps(bw, scale);
2270
2271 bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
2272 *downstream_bw = usb3_bw_to_mbps(bw, scale);
2273
2274 return 0;
2275 }
2276
usb4_usb3_port_write_allocated_bandwidth(struct tb_port * port,int upstream_bw,int downstream_bw)2277 static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
2278 int upstream_bw,
2279 int downstream_bw)
2280 {
2281 u32 val, ubw, dbw, scale;
2282 int ret, max_bw;
2283
2284 /* Figure out suitable scale */
2285 scale = 0;
2286 max_bw = max(upstream_bw, downstream_bw);
2287 while (scale < 64) {
2288 if (mbps_to_usb3_bw(max_bw, scale) < 4096)
2289 break;
2290 scale++;
2291 }
2292
2293 if (WARN_ON(scale >= 64))
2294 return -EINVAL;
2295
2296 ret = tb_port_write(port, &scale, TB_CFG_PORT,
2297 port->cap_adap + ADP_USB3_CS_3, 1);
2298 if (ret)
2299 return ret;
2300
2301 ubw = mbps_to_usb3_bw(upstream_bw, scale);
2302 dbw = mbps_to_usb3_bw(downstream_bw, scale);
2303
2304 tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
2305
2306 ret = tb_port_read(port, &val, TB_CFG_PORT,
2307 port->cap_adap + ADP_USB3_CS_2, 1);
2308 if (ret)
2309 return ret;
2310
2311 val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
2312 val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
2313 val |= ubw;
2314
2315 return tb_port_write(port, &val, TB_CFG_PORT,
2316 port->cap_adap + ADP_USB3_CS_2, 1);
2317 }
2318
2319 /**
2320 * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
2321 * @port: USB3 adapter port
2322 * @upstream_bw: New upstream bandwidth
2323 * @downstream_bw: New downstream bandwidth
2324 *
2325 * This can be used to set how much bandwidth is allocated for the USB3
2326 * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
2327 * new values programmed to the USB3 adapter allocation registers. If
2328 * the values are lower than what is currently consumed the allocation
2329 * is set to what is currently consumed instead (consumed bandwidth
2330 * cannot be taken away by CM). The actual new values are returned in
2331 * @upstream_bw and @downstream_bw.
2332 *
2333 * Returns %0 in case of success and negative errno if there was a
2334 * failure.
2335 */
usb4_usb3_port_allocate_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2336 int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
2337 int *downstream_bw)
2338 {
2339 int ret, consumed_up, consumed_down, allocate_up, allocate_down;
2340
2341 ret = usb4_usb3_port_set_cm_request(port);
2342 if (ret)
2343 return ret;
2344
2345 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2346 &consumed_down);
2347 if (ret)
2348 goto err_request;
2349
2350 /* Don't allow it go lower than what is consumed */
2351 allocate_up = max(*upstream_bw, consumed_up);
2352 allocate_down = max(*downstream_bw, consumed_down);
2353
2354 ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
2355 allocate_down);
2356 if (ret)
2357 goto err_request;
2358
2359 *upstream_bw = allocate_up;
2360 *downstream_bw = allocate_down;
2361
2362 err_request:
2363 usb4_usb3_port_clear_cm_request(port);
2364 return ret;
2365 }
2366
2367 /**
2368 * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
2369 * @port: USB3 adapter port
2370 * @upstream_bw: New allocated upstream bandwidth
2371 * @downstream_bw: New allocated downstream bandwidth
2372 *
2373 * Releases USB3 allocated bandwidth down to what is actually consumed.
2374 * The new bandwidth is returned in @upstream_bw and @downstream_bw.
2375 *
2376 * Returns 0% in success and negative errno in case of failure.
2377 */
usb4_usb3_port_release_bandwidth(struct tb_port * port,int * upstream_bw,int * downstream_bw)2378 int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
2379 int *downstream_bw)
2380 {
2381 int ret, consumed_up, consumed_down;
2382
2383 ret = usb4_usb3_port_set_cm_request(port);
2384 if (ret)
2385 return ret;
2386
2387 ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
2388 &consumed_down);
2389 if (ret)
2390 goto err_request;
2391
2392 /*
2393 * Always keep 900 Mb/s to make sure xHCI has at least some
2394 * bandwidth available for isochronous traffic.
2395 */
2396 if (consumed_up < 900)
2397 consumed_up = 900;
2398 if (consumed_down < 900)
2399 consumed_down = 900;
2400
2401 ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
2402 consumed_down);
2403 if (ret)
2404 goto err_request;
2405
2406 *upstream_bw = consumed_up;
2407 *downstream_bw = consumed_down;
2408
2409 err_request:
2410 usb4_usb3_port_clear_cm_request(port);
2411 return ret;
2412 }
2413
is_usb4_dpin(const struct tb_port * port)2414 static bool is_usb4_dpin(const struct tb_port *port)
2415 {
2416 if (!tb_port_is_dpin(port))
2417 return false;
2418 if (!tb_switch_is_usb4(port->sw))
2419 return false;
2420 return true;
2421 }
2422
2423 /**
2424 * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
2425 * @port: DP IN adapter
2426 * @cm_id: CM ID to assign
2427 *
2428 * Sets CM ID for the @port. Returns %0 on success and negative errno
2429 * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
2430 * support this.
2431 */
usb4_dp_port_set_cm_id(struct tb_port * port,int cm_id)2432 int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
2433 {
2434 u32 val;
2435 int ret;
2436
2437 if (!is_usb4_dpin(port))
2438 return -EOPNOTSUPP;
2439
2440 ret = tb_port_read(port, &val, TB_CFG_PORT,
2441 port->cap_adap + ADP_DP_CS_2, 1);
2442 if (ret)
2443 return ret;
2444
2445 val &= ~ADP_DP_CS_2_CM_ID_MASK;
2446 val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
2447
2448 return tb_port_write(port, &val, TB_CFG_PORT,
2449 port->cap_adap + ADP_DP_CS_2, 1);
2450 }
2451
2452 /**
2453 * usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
2454 * supported
2455 * @port: DP IN adapter to check
2456 *
2457 * Can be called to any DP IN adapter. Returns true if the adapter
2458 * supports USB4 bandwidth allocation mode, false otherwise.
2459 */
usb4_dp_port_bandwidth_mode_supported(struct tb_port * port)2460 bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
2461 {
2462 int ret;
2463 u32 val;
2464
2465 if (!is_usb4_dpin(port))
2466 return false;
2467
2468 ret = tb_port_read(port, &val, TB_CFG_PORT,
2469 port->cap_adap + DP_LOCAL_CAP, 1);
2470 if (ret)
2471 return false;
2472
2473 return !!(val & DP_COMMON_CAP_BW_MODE);
2474 }
2475
2476 /**
2477 * usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
2478 * enabled
2479 * @port: DP IN adapter to check
2480 *
2481 * Can be called to any DP IN adapter. Returns true if the bandwidth
2482 * allocation mode has been enabled, false otherwise.
2483 */
usb4_dp_port_bandwidth_mode_enabled(struct tb_port * port)2484 bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
2485 {
2486 int ret;
2487 u32 val;
2488
2489 if (!is_usb4_dpin(port))
2490 return false;
2491
2492 ret = tb_port_read(port, &val, TB_CFG_PORT,
2493 port->cap_adap + ADP_DP_CS_8, 1);
2494 if (ret)
2495 return false;
2496
2497 return !!(val & ADP_DP_CS_8_DPME);
2498 }
2499
2500 /**
2501 * usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
2502 * bandwidth allocation mode
2503 * @port: DP IN adapter
2504 * @supported: Does the CM support bandwidth allocation mode
2505 *
2506 * Can be called to any DP IN adapter. Sets or clears the CM support bit
2507 * of the DP IN adapter. Returns %0 in success and negative errno
2508 * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
2509 * does not support this.
2510 */
usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port * port,bool supported)2511 int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
2512 bool supported)
2513 {
2514 u32 val;
2515 int ret;
2516
2517 if (!is_usb4_dpin(port))
2518 return -EOPNOTSUPP;
2519
2520 ret = tb_port_read(port, &val, TB_CFG_PORT,
2521 port->cap_adap + ADP_DP_CS_2, 1);
2522 if (ret)
2523 return ret;
2524
2525 if (supported)
2526 val |= ADP_DP_CS_2_CMMS;
2527 else
2528 val &= ~ADP_DP_CS_2_CMMS;
2529
2530 return tb_port_write(port, &val, TB_CFG_PORT,
2531 port->cap_adap + ADP_DP_CS_2, 1);
2532 }
2533
2534 /**
2535 * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
2536 * @port: DP IN adapter
2537 *
2538 * Reads bandwidth allocation Group ID from the DP IN adapter and
2539 * returns it. If the adapter does not support setting Group_ID
2540 * %-EOPNOTSUPP is returned.
2541 */
usb4_dp_port_group_id(struct tb_port * port)2542 int usb4_dp_port_group_id(struct tb_port *port)
2543 {
2544 u32 val;
2545 int ret;
2546
2547 if (!is_usb4_dpin(port))
2548 return -EOPNOTSUPP;
2549
2550 ret = tb_port_read(port, &val, TB_CFG_PORT,
2551 port->cap_adap + ADP_DP_CS_2, 1);
2552 if (ret)
2553 return ret;
2554
2555 return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
2556 }
2557
2558 /**
2559 * usb4_dp_port_set_group_id() - Set adapter Group ID
2560 * @port: DP IN adapter
2561 * @group_id: Group ID for the adapter
2562 *
2563 * Sets bandwidth allocation mode Group ID for the DP IN adapter.
2564 * Returns %0 in case of success and negative errno otherwise.
2565 * Specifically returns %-EOPNOTSUPP if the adapter does not support
2566 * this.
2567 */
usb4_dp_port_set_group_id(struct tb_port * port,int group_id)2568 int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
2569 {
2570 u32 val;
2571 int ret;
2572
2573 if (!is_usb4_dpin(port))
2574 return -EOPNOTSUPP;
2575
2576 ret = tb_port_read(port, &val, TB_CFG_PORT,
2577 port->cap_adap + ADP_DP_CS_2, 1);
2578 if (ret)
2579 return ret;
2580
2581 val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
2582 val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
2583
2584 return tb_port_write(port, &val, TB_CFG_PORT,
2585 port->cap_adap + ADP_DP_CS_2, 1);
2586 }
2587
2588 /**
2589 * usb4_dp_port_nrd() - Read non-reduced rate and lanes
2590 * @port: DP IN adapter
2591 * @rate: Non-reduced rate in Mb/s is placed here
2592 * @lanes: Non-reduced lanes are placed here
2593 *
2594 * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
2595 * %0 in success and negative errno otherwise. Specifically returns
2596 * %-EOPNOTSUPP if the adapter does not support this.
2597 */
usb4_dp_port_nrd(struct tb_port * port,int * rate,int * lanes)2598 int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
2599 {
2600 u32 val, tmp;
2601 int ret;
2602
2603 if (!is_usb4_dpin(port))
2604 return -EOPNOTSUPP;
2605
2606 ret = tb_port_read(port, &val, TB_CFG_PORT,
2607 port->cap_adap + ADP_DP_CS_2, 1);
2608 if (ret)
2609 return ret;
2610
2611 tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
2612 switch (tmp) {
2613 case DP_COMMON_CAP_RATE_RBR:
2614 *rate = 1620;
2615 break;
2616 case DP_COMMON_CAP_RATE_HBR:
2617 *rate = 2700;
2618 break;
2619 case DP_COMMON_CAP_RATE_HBR2:
2620 *rate = 5400;
2621 break;
2622 case DP_COMMON_CAP_RATE_HBR3:
2623 *rate = 8100;
2624 break;
2625 }
2626
2627 tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
2628 switch (tmp) {
2629 case DP_COMMON_CAP_1_LANE:
2630 *lanes = 1;
2631 break;
2632 case DP_COMMON_CAP_2_LANES:
2633 *lanes = 2;
2634 break;
2635 case DP_COMMON_CAP_4_LANES:
2636 *lanes = 4;
2637 break;
2638 }
2639
2640 return 0;
2641 }
2642
2643 /**
2644 * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
2645 * @port: DP IN adapter
2646 * @rate: Non-reduced rate in Mb/s
2647 * @lanes: Non-reduced lanes
2648 *
2649 * Before the capabilities reduction this function can be used to set
2650 * the non-reduced values for the DP IN adapter. Returns %0 in success
2651 * and negative errno otherwise. If the adapter does not support this
2652 * %-EOPNOTSUPP is returned.
2653 */
usb4_dp_port_set_nrd(struct tb_port * port,int rate,int lanes)2654 int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
2655 {
2656 u32 val;
2657 int ret;
2658
2659 if (!is_usb4_dpin(port))
2660 return -EOPNOTSUPP;
2661
2662 ret = tb_port_read(port, &val, TB_CFG_PORT,
2663 port->cap_adap + ADP_DP_CS_2, 1);
2664 if (ret)
2665 return ret;
2666
2667 val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
2668
2669 switch (rate) {
2670 case 1620:
2671 break;
2672 case 2700:
2673 val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
2674 & ADP_DP_CS_2_NRD_MLR_MASK;
2675 break;
2676 case 5400:
2677 val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2678 & ADP_DP_CS_2_NRD_MLR_MASK;
2679 break;
2680 case 8100:
2681 val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
2682 & ADP_DP_CS_2_NRD_MLR_MASK;
2683 break;
2684 default:
2685 return -EINVAL;
2686 }
2687
2688 val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
2689
2690 switch (lanes) {
2691 case 1:
2692 break;
2693 case 2:
2694 val |= DP_COMMON_CAP_2_LANES;
2695 break;
2696 case 4:
2697 val |= DP_COMMON_CAP_4_LANES;
2698 break;
2699 default:
2700 return -EINVAL;
2701 }
2702
2703 return tb_port_write(port, &val, TB_CFG_PORT,
2704 port->cap_adap + ADP_DP_CS_2, 1);
2705 }
2706
2707 /**
2708 * usb4_dp_port_granularity() - Return granularity for the bandwidth values
2709 * @port: DP IN adapter
2710 *
2711 * Reads the programmed granularity from @port. If the DP IN adapter does
2712 * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
2713 * errno in other error cases.
2714 */
usb4_dp_port_granularity(struct tb_port * port)2715 int usb4_dp_port_granularity(struct tb_port *port)
2716 {
2717 u32 val;
2718 int ret;
2719
2720 if (!is_usb4_dpin(port))
2721 return -EOPNOTSUPP;
2722
2723 ret = tb_port_read(port, &val, TB_CFG_PORT,
2724 port->cap_adap + ADP_DP_CS_2, 1);
2725 if (ret)
2726 return ret;
2727
2728 val &= ADP_DP_CS_2_GR_MASK;
2729 val >>= ADP_DP_CS_2_GR_SHIFT;
2730
2731 switch (val) {
2732 case ADP_DP_CS_2_GR_0_25G:
2733 return 250;
2734 case ADP_DP_CS_2_GR_0_5G:
2735 return 500;
2736 case ADP_DP_CS_2_GR_1G:
2737 return 1000;
2738 }
2739
2740 return -EINVAL;
2741 }
2742
2743 /**
2744 * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
2745 * @port: DP IN adapter
2746 * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
2747 *
2748 * Sets the granularity used with the estimated, allocated and requested
2749 * bandwidth. Returns %0 in success and negative errno otherwise. If the
2750 * adapter does not support this %-EOPNOTSUPP is returned.
2751 */
usb4_dp_port_set_granularity(struct tb_port * port,int granularity)2752 int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
2753 {
2754 u32 val;
2755 int ret;
2756
2757 if (!is_usb4_dpin(port))
2758 return -EOPNOTSUPP;
2759
2760 ret = tb_port_read(port, &val, TB_CFG_PORT,
2761 port->cap_adap + ADP_DP_CS_2, 1);
2762 if (ret)
2763 return ret;
2764
2765 val &= ~ADP_DP_CS_2_GR_MASK;
2766
2767 switch (granularity) {
2768 case 250:
2769 val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
2770 break;
2771 case 500:
2772 val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
2773 break;
2774 case 1000:
2775 val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
2776 break;
2777 default:
2778 return -EINVAL;
2779 }
2780
2781 return tb_port_write(port, &val, TB_CFG_PORT,
2782 port->cap_adap + ADP_DP_CS_2, 1);
2783 }
2784
2785 /**
2786 * usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
2787 * @port: DP IN adapter
2788 * @bw: Estimated bandwidth in Mb/s.
2789 *
2790 * Sets the estimated bandwidth to @bw. Set the granularity by calling
2791 * usb4_dp_port_set_granularity() before calling this. The @bw is round
2792 * down to the closest granularity multiplier. Returns %0 in success
2793 * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
2794 * the adapter does not support this.
2795 */
usb4_dp_port_set_estimated_bandwidth(struct tb_port * port,int bw)2796 int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
2797 {
2798 u32 val, granularity;
2799 int ret;
2800
2801 if (!is_usb4_dpin(port))
2802 return -EOPNOTSUPP;
2803
2804 ret = usb4_dp_port_granularity(port);
2805 if (ret < 0)
2806 return ret;
2807 granularity = ret;
2808
2809 ret = tb_port_read(port, &val, TB_CFG_PORT,
2810 port->cap_adap + ADP_DP_CS_2, 1);
2811 if (ret)
2812 return ret;
2813
2814 val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
2815 val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
2816
2817 return tb_port_write(port, &val, TB_CFG_PORT,
2818 port->cap_adap + ADP_DP_CS_2, 1);
2819 }
2820
2821 /**
2822 * usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
2823 * @port: DP IN adapter
2824 *
2825 * Reads and returns allocated bandwidth for @port in Mb/s (taking into
2826 * account the programmed granularity). Returns negative errno in case
2827 * of error.
2828 */
usb4_dp_port_allocated_bandwidth(struct tb_port * port)2829 int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
2830 {
2831 u32 val, granularity;
2832 int ret;
2833
2834 if (!is_usb4_dpin(port))
2835 return -EOPNOTSUPP;
2836
2837 ret = usb4_dp_port_granularity(port);
2838 if (ret < 0)
2839 return ret;
2840 granularity = ret;
2841
2842 ret = tb_port_read(port, &val, TB_CFG_PORT,
2843 port->cap_adap + DP_STATUS, 1);
2844 if (ret)
2845 return ret;
2846
2847 val &= DP_STATUS_ALLOCATED_BW_MASK;
2848 val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
2849
2850 return val * granularity;
2851 }
2852
__usb4_dp_port_set_cm_ack(struct tb_port * port,bool ack)2853 static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
2854 {
2855 u32 val;
2856 int ret;
2857
2858 ret = tb_port_read(port, &val, TB_CFG_PORT,
2859 port->cap_adap + ADP_DP_CS_2, 1);
2860 if (ret)
2861 return ret;
2862
2863 if (ack)
2864 val |= ADP_DP_CS_2_CA;
2865 else
2866 val &= ~ADP_DP_CS_2_CA;
2867
2868 return tb_port_write(port, &val, TB_CFG_PORT,
2869 port->cap_adap + ADP_DP_CS_2, 1);
2870 }
2871
usb4_dp_port_set_cm_ack(struct tb_port * port)2872 static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
2873 {
2874 return __usb4_dp_port_set_cm_ack(port, true);
2875 }
2876
usb4_dp_port_wait_and_clear_cm_ack(struct tb_port * port,int timeout_msec)2877 static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
2878 int timeout_msec)
2879 {
2880 ktime_t end;
2881 u32 val;
2882 int ret;
2883
2884 ret = __usb4_dp_port_set_cm_ack(port, false);
2885 if (ret)
2886 return ret;
2887
2888 end = ktime_add_ms(ktime_get(), timeout_msec);
2889 do {
2890 ret = tb_port_read(port, &val, TB_CFG_PORT,
2891 port->cap_adap + ADP_DP_CS_8, 1);
2892 if (ret)
2893 return ret;
2894
2895 if (!(val & ADP_DP_CS_8_DR))
2896 break;
2897
2898 usleep_range(50, 100);
2899 } while (ktime_before(ktime_get(), end));
2900
2901 if (val & ADP_DP_CS_8_DR) {
2902 tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
2903 return -ETIMEDOUT;
2904 }
2905
2906 ret = tb_port_read(port, &val, TB_CFG_PORT,
2907 port->cap_adap + ADP_DP_CS_2, 1);
2908 if (ret)
2909 return ret;
2910
2911 val &= ~ADP_DP_CS_2_CA;
2912 return tb_port_write(port, &val, TB_CFG_PORT,
2913 port->cap_adap + ADP_DP_CS_2, 1);
2914 }
2915
2916 /**
2917 * usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
2918 * @port: DP IN adapter
2919 * @bw: New allocated bandwidth in Mb/s
2920 *
2921 * Communicates the new allocated bandwidth with the DPCD (graphics
2922 * driver). Takes into account the programmed granularity. Returns %0 in
2923 * success and negative errno in case of error.
2924 */
usb4_dp_port_allocate_bandwidth(struct tb_port * port,int bw)2925 int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
2926 {
2927 u32 val, granularity;
2928 int ret;
2929
2930 if (!is_usb4_dpin(port))
2931 return -EOPNOTSUPP;
2932
2933 ret = usb4_dp_port_granularity(port);
2934 if (ret < 0)
2935 return ret;
2936 granularity = ret;
2937
2938 ret = tb_port_read(port, &val, TB_CFG_PORT,
2939 port->cap_adap + DP_STATUS, 1);
2940 if (ret)
2941 return ret;
2942
2943 val &= ~DP_STATUS_ALLOCATED_BW_MASK;
2944 val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
2945
2946 ret = tb_port_write(port, &val, TB_CFG_PORT,
2947 port->cap_adap + DP_STATUS, 1);
2948 if (ret)
2949 return ret;
2950
2951 ret = usb4_dp_port_set_cm_ack(port);
2952 if (ret)
2953 return ret;
2954
2955 return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
2956 }
2957
2958 /**
2959 * usb4_dp_port_requested_bandwidth() - Read requested bandwidth
2960 * @port: DP IN adapter
2961 *
2962 * Reads the DPCD (graphics driver) requested bandwidth and returns it
2963 * in Mb/s. Takes the programmed granularity into account. In case of
2964 * error returns negative errno. Specifically returns %-EOPNOTSUPP if
2965 * the adapter does not support bandwidth allocation mode, and %ENODATA
2966 * if there is no active bandwidth request from the graphics driver.
2967 */
usb4_dp_port_requested_bandwidth(struct tb_port * port)2968 int usb4_dp_port_requested_bandwidth(struct tb_port *port)
2969 {
2970 u32 val, granularity;
2971 int ret;
2972
2973 if (!is_usb4_dpin(port))
2974 return -EOPNOTSUPP;
2975
2976 ret = usb4_dp_port_granularity(port);
2977 if (ret < 0)
2978 return ret;
2979 granularity = ret;
2980
2981 ret = tb_port_read(port, &val, TB_CFG_PORT,
2982 port->cap_adap + ADP_DP_CS_8, 1);
2983 if (ret)
2984 return ret;
2985
2986 if (!(val & ADP_DP_CS_8_DR))
2987 return -ENODATA;
2988
2989 return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
2990 }
2991
2992 /**
2993 * usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
2994 * @port: PCIe adapter
2995 * @enable: Enable/disable extended encapsulation
2996 *
2997 * Enables or disables extended encapsulation used in PCIe tunneling. Caller
2998 * needs to make sure both adapters support this before enabling. Returns %0 on
2999 * success and negative errno otherwise.
3000 */
usb4_pci_port_set_ext_encapsulation(struct tb_port * port,bool enable)3001 int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
3002 {
3003 u32 val;
3004 int ret;
3005
3006 if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
3007 return -EINVAL;
3008
3009 ret = tb_port_read(port, &val, TB_CFG_PORT,
3010 port->cap_adap + ADP_PCIE_CS_1, 1);
3011 if (ret)
3012 return ret;
3013
3014 if (enable)
3015 val |= ADP_PCIE_CS_1_EE;
3016 else
3017 val &= ~ADP_PCIE_CS_1_EE;
3018
3019 return tb_port_write(port, &val, TB_CFG_PORT,
3020 port->cap_adap + ADP_PCIE_CS_1, 1);
3021 }
3022