1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Power Delivery protocol stack.
6 */
7
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/string_choices.h>
25 #include <linux/usb.h>
26 #include <linux/usb/pd.h>
27 #include <linux/usb/pd_ado.h>
28 #include <linux/usb/pd_bdo.h>
29 #include <linux/usb/pd_ext_sdb.h>
30 #include <linux/usb/pd_vdo.h>
31 #include <linux/usb/role.h>
32 #include <linux/usb/tcpm.h>
33 #include <linux/usb/typec_altmode.h>
34
35 #include <uapi/linux/sched/types.h>
36
37 #define FOREACH_STATE(S) \
38 S(INVALID_STATE), \
39 S(TOGGLING), \
40 S(CHECK_CONTAMINANT), \
41 S(SRC_UNATTACHED), \
42 S(SRC_ATTACH_WAIT), \
43 S(SRC_ATTACHED), \
44 S(SRC_STARTUP), \
45 S(SRC_SEND_CAPABILITIES), \
46 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
47 S(SRC_NEGOTIATE_CAPABILITIES), \
48 S(SRC_TRANSITION_SUPPLY), \
49 S(SRC_READY), \
50 S(SRC_WAIT_NEW_CAPABILITIES), \
51 \
52 S(SNK_UNATTACHED), \
53 S(SNK_ATTACH_WAIT), \
54 S(SNK_DEBOUNCED), \
55 S(SNK_ATTACHED), \
56 S(SNK_STARTUP), \
57 S(SNK_DISCOVERY), \
58 S(SNK_DISCOVERY_DEBOUNCE), \
59 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
60 S(SNK_WAIT_CAPABILITIES), \
61 S(SNK_WAIT_CAPABILITIES_TIMEOUT), \
62 S(SNK_NEGOTIATE_CAPABILITIES), \
63 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
64 S(SNK_TRANSITION_SINK), \
65 S(SNK_TRANSITION_SINK_VBUS), \
66 S(SNK_READY), \
67 \
68 S(ACC_UNATTACHED), \
69 S(DEBUG_ACC_ATTACHED), \
70 S(DEBUG_ACC_DEBOUNCE), \
71 S(AUDIO_ACC_ATTACHED), \
72 S(AUDIO_ACC_DEBOUNCE), \
73 \
74 S(HARD_RESET_SEND), \
75 S(HARD_RESET_START), \
76 S(SRC_HARD_RESET_VBUS_OFF), \
77 S(SRC_HARD_RESET_VBUS_ON), \
78 S(SNK_HARD_RESET_SINK_OFF), \
79 S(SNK_HARD_RESET_WAIT_VBUS), \
80 S(SNK_HARD_RESET_SINK_ON), \
81 \
82 S(SOFT_RESET), \
83 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
84 S(SNK_SOFT_RESET), \
85 S(SOFT_RESET_SEND), \
86 \
87 S(DR_SWAP_ACCEPT), \
88 S(DR_SWAP_SEND), \
89 S(DR_SWAP_SEND_TIMEOUT), \
90 S(DR_SWAP_CANCEL), \
91 S(DR_SWAP_CHANGE_DR), \
92 \
93 S(PR_SWAP_ACCEPT), \
94 S(PR_SWAP_SEND), \
95 S(PR_SWAP_SEND_TIMEOUT), \
96 S(PR_SWAP_CANCEL), \
97 S(PR_SWAP_START), \
98 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
99 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
100 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
101 S(PR_SWAP_SRC_SNK_SINK_ON), \
102 S(PR_SWAP_SNK_SRC_SINK_OFF), \
103 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
104 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
105 \
106 S(VCONN_SWAP_ACCEPT), \
107 S(VCONN_SWAP_SEND), \
108 S(VCONN_SWAP_SEND_TIMEOUT), \
109 S(VCONN_SWAP_CANCEL), \
110 S(VCONN_SWAP_START), \
111 S(VCONN_SWAP_WAIT_FOR_VCONN), \
112 S(VCONN_SWAP_TURN_ON_VCONN), \
113 S(VCONN_SWAP_TURN_OFF_VCONN), \
114 S(VCONN_SWAP_SEND_SOFT_RESET), \
115 \
116 S(FR_SWAP_SEND), \
117 S(FR_SWAP_SEND_TIMEOUT), \
118 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
119 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
120 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
121 S(FR_SWAP_CANCEL), \
122 \
123 S(SNK_TRY), \
124 S(SNK_TRY_WAIT), \
125 S(SNK_TRY_WAIT_DEBOUNCE), \
126 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
127 S(SRC_TRYWAIT), \
128 S(SRC_TRYWAIT_DEBOUNCE), \
129 S(SRC_TRYWAIT_UNATTACHED), \
130 \
131 S(SRC_TRY), \
132 S(SRC_TRY_WAIT), \
133 S(SRC_TRY_DEBOUNCE), \
134 S(SNK_TRYWAIT), \
135 S(SNK_TRYWAIT_DEBOUNCE), \
136 S(SNK_TRYWAIT_VBUS), \
137 S(BIST_RX), \
138 \
139 S(GET_STATUS_SEND), \
140 S(GET_STATUS_SEND_TIMEOUT), \
141 S(GET_PPS_STATUS_SEND), \
142 S(GET_PPS_STATUS_SEND_TIMEOUT), \
143 \
144 S(GET_SINK_CAP), \
145 S(GET_SINK_CAP_TIMEOUT), \
146 \
147 S(ERROR_RECOVERY), \
148 S(PORT_RESET), \
149 S(PORT_RESET_WAIT_OFF), \
150 \
151 S(AMS_START), \
152 S(CHUNK_NOT_SUPP), \
153 \
154 S(SRC_VDM_IDENTITY_REQUEST)
155
156 #define FOREACH_AMS(S) \
157 S(NONE_AMS), \
158 S(POWER_NEGOTIATION), \
159 S(GOTOMIN), \
160 S(SOFT_RESET_AMS), \
161 S(HARD_RESET), \
162 S(CABLE_RESET), \
163 S(GET_SOURCE_CAPABILITIES), \
164 S(GET_SINK_CAPABILITIES), \
165 S(POWER_ROLE_SWAP), \
166 S(FAST_ROLE_SWAP), \
167 S(DATA_ROLE_SWAP), \
168 S(VCONN_SWAP), \
169 S(SOURCE_ALERT), \
170 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
171 S(GETTING_SOURCE_SINK_STATUS), \
172 S(GETTING_BATTERY_CAPABILITIES), \
173 S(GETTING_BATTERY_STATUS), \
174 S(GETTING_MANUFACTURER_INFORMATION), \
175 S(SECURITY), \
176 S(FIRMWARE_UPDATE), \
177 S(DISCOVER_IDENTITY), \
178 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
179 S(DISCOVER_SVIDS), \
180 S(DISCOVER_MODES), \
181 S(DFP_TO_UFP_ENTER_MODE), \
182 S(DFP_TO_UFP_EXIT_MODE), \
183 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
184 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
185 S(ATTENTION), \
186 S(BIST), \
187 S(UNSTRUCTURED_VDMS), \
188 S(STRUCTURED_VDMS), \
189 S(COUNTRY_INFO), \
190 S(COUNTRY_CODES), \
191 S(REVISION_INFORMATION)
192
193 #define GENERATE_ENUM(e) e
194 #define GENERATE_STRING(s) #s
195
196 enum tcpm_state {
197 FOREACH_STATE(GENERATE_ENUM)
198 };
199
200 static const char * const tcpm_states[] = {
201 FOREACH_STATE(GENERATE_STRING)
202 };
203
204 enum tcpm_ams {
205 FOREACH_AMS(GENERATE_ENUM)
206 };
207
208 static const char * const tcpm_ams_str[] = {
209 FOREACH_AMS(GENERATE_STRING)
210 };
211
212 enum vdm_states {
213 VDM_STATE_ERR_BUSY = -3,
214 VDM_STATE_ERR_SEND = -2,
215 VDM_STATE_ERR_TMOUT = -1,
216 VDM_STATE_DONE = 0,
217 /* Anything >0 represents an active state */
218 VDM_STATE_READY = 1,
219 VDM_STATE_BUSY = 2,
220 VDM_STATE_WAIT_RSP_BUSY = 3,
221 VDM_STATE_SEND_MESSAGE = 4,
222 };
223
224 enum pd_msg_request {
225 PD_MSG_NONE = 0,
226 PD_MSG_CTRL_REJECT,
227 PD_MSG_CTRL_WAIT,
228 PD_MSG_CTRL_NOT_SUPP,
229 PD_MSG_DATA_SINK_CAP,
230 PD_MSG_DATA_SOURCE_CAP,
231 PD_MSG_DATA_REV,
232 };
233
234 enum adev_actions {
235 ADEV_NONE = 0,
236 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
237 ADEV_QUEUE_VDM,
238 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
239 ADEV_ATTENTION,
240 };
241
242 /*
243 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
244 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
245 * Version 1.2"
246 */
247 enum frs_typec_current {
248 FRS_NOT_SUPPORTED,
249 FRS_DEFAULT_POWER,
250 FRS_5V_1P5A,
251 FRS_5V_3A,
252 };
253
254 /* Events from low level driver */
255
256 #define TCPM_CC_EVENT BIT(0)
257 #define TCPM_VBUS_EVENT BIT(1)
258 #define TCPM_RESET_EVENT BIT(2)
259 #define TCPM_FRS_EVENT BIT(3)
260 #define TCPM_SOURCING_VBUS BIT(4)
261 #define TCPM_PORT_CLEAN BIT(5)
262 #define TCPM_PORT_ERROR BIT(6)
263
264 #define LOG_BUFFER_ENTRIES 1024
265 #define LOG_BUFFER_ENTRY_SIZE 128
266
267 /* Alternate mode support */
268
269 #define SVID_DISCOVERY_MAX 16
270 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
271
272 #define GET_SINK_CAP_RETRY_MS 100
273 #define SEND_DISCOVER_RETRY_MS 100
274
275 struct pd_mode_data {
276 int svid_index; /* current SVID index */
277 int nsvids;
278 u16 svids[SVID_DISCOVERY_MAX];
279 int altmodes; /* number of alternate modes */
280 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
281 };
282
283 /*
284 * @min_volt: Actual min voltage at the local port
285 * @req_min_volt: Requested min voltage to the port partner
286 * @max_volt: Actual max voltage at the local port
287 * @req_max_volt: Requested max voltage to the port partner
288 * @max_curr: Actual max current at the local port
289 * @req_max_curr: Requested max current of the port partner
290 * @req_out_volt: Requested output voltage to the port partner
291 * @req_op_curr: Requested operating current to the port partner
292 * @supported: Parter has at least one APDO hence supports PPS
293 * @active: PPS mode is active
294 */
295 struct pd_pps_data {
296 u32 min_volt;
297 u32 req_min_volt;
298 u32 max_volt;
299 u32 req_max_volt;
300 u32 max_curr;
301 u32 req_max_curr;
302 u32 req_out_volt;
303 u32 req_op_curr;
304 bool supported;
305 bool active;
306 };
307
308 struct pd_data {
309 struct usb_power_delivery *pd;
310 struct usb_power_delivery_capabilities *source_cap;
311 struct usb_power_delivery_capabilities_desc source_desc;
312 struct usb_power_delivery_capabilities *sink_cap;
313 struct usb_power_delivery_capabilities_desc sink_desc;
314 unsigned int operating_snk_mw;
315 };
316
317 #define PD_CAP_REV10 0x1
318 #define PD_CAP_REV20 0x2
319 #define PD_CAP_REV30 0x3
320
321 struct pd_revision_info {
322 u8 rev_major;
323 u8 rev_minor;
324 u8 ver_major;
325 u8 ver_minor;
326 };
327
328 /*
329 * @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
330 * @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
331 * @cc_debounce_time: Deadline (in ms) for tCCDebounce timer
332 */
333 struct pd_timings {
334 u32 sink_wait_cap_time;
335 u32 ps_src_off_time;
336 u32 cc_debounce_time;
337 u32 snk_bc12_cmpletion_time;
338 };
339
340 struct tcpm_port {
341 struct device *dev;
342
343 struct mutex lock; /* tcpm state machine lock */
344 struct kthread_worker *wq;
345
346 struct typec_capability typec_caps;
347 struct typec_port *typec_port;
348
349 struct tcpc_dev *tcpc;
350 struct usb_role_switch *role_sw;
351
352 enum typec_role vconn_role;
353 enum typec_role pwr_role;
354 enum typec_data_role data_role;
355 enum typec_pwr_opmode pwr_opmode;
356
357 struct usb_pd_identity partner_ident;
358 struct typec_partner_desc partner_desc;
359 struct typec_partner *partner;
360
361 struct usb_pd_identity cable_ident;
362 struct typec_cable_desc cable_desc;
363 struct typec_cable *cable;
364 struct typec_plug_desc plug_prime_desc;
365 struct typec_plug *plug_prime;
366
367 enum typec_cc_status cc_req;
368 enum typec_cc_status src_rp; /* work only if pd_supported == false */
369
370 enum typec_cc_status cc1;
371 enum typec_cc_status cc2;
372 enum typec_cc_polarity polarity;
373
374 bool attached;
375 bool connected;
376 bool registered;
377 bool pd_supported;
378 enum typec_port_type port_type;
379
380 /*
381 * Set to true when vbus is greater than VSAFE5V min.
382 * Set to false when vbus falls below vSinkDisconnect max threshold.
383 */
384 bool vbus_present;
385
386 /*
387 * Set to true when vbus is less than VSAFE0V max.
388 * Set to false when vbus is greater than VSAFE0V max.
389 */
390 bool vbus_vsafe0v;
391
392 bool vbus_never_low;
393 bool vbus_source;
394 bool vbus_charge;
395
396 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
397 bool send_discover;
398 bool op_vsafe5v;
399
400 int try_role;
401 int try_snk_count;
402 int try_src_count;
403
404 enum pd_msg_request queued_message;
405
406 enum tcpm_state enter_state;
407 enum tcpm_state prev_state;
408 enum tcpm_state state;
409 enum tcpm_state delayed_state;
410 ktime_t delayed_runtime;
411 unsigned long delay_ms;
412
413 spinlock_t pd_event_lock;
414 u32 pd_events;
415
416 struct kthread_work event_work;
417 struct hrtimer state_machine_timer;
418 struct kthread_work state_machine;
419 struct hrtimer vdm_state_machine_timer;
420 struct kthread_work vdm_state_machine;
421 struct hrtimer enable_frs_timer;
422 struct kthread_work enable_frs;
423 struct hrtimer send_discover_timer;
424 struct kthread_work send_discover_work;
425 bool state_machine_running;
426 /* Set to true when VDM State Machine has following actions. */
427 bool vdm_sm_running;
428
429 struct completion tx_complete;
430 enum tcpm_transmit_status tx_status;
431
432 struct mutex swap_lock; /* swap command lock */
433 bool swap_pending;
434 bool non_pd_role_swap;
435 struct completion swap_complete;
436 int swap_status;
437
438 unsigned int negotiated_rev;
439 unsigned int message_id;
440 unsigned int caps_count;
441 unsigned int hard_reset_count;
442 bool pd_capable;
443 bool explicit_contract;
444 unsigned int rx_msgid;
445
446 /* USB PD objects */
447 struct usb_power_delivery **pds;
448 struct pd_data **pd_list;
449 struct usb_power_delivery_capabilities *port_source_caps;
450 struct usb_power_delivery_capabilities *port_sink_caps;
451 struct usb_power_delivery *partner_pd;
452 struct usb_power_delivery_capabilities *partner_source_caps;
453 struct usb_power_delivery_capabilities *partner_sink_caps;
454 struct usb_power_delivery *selected_pd;
455
456 /* Partner capabilities/requests */
457 u32 sink_request;
458 u32 source_caps[PDO_MAX_OBJECTS];
459 unsigned int nr_source_caps;
460 u32 sink_caps[PDO_MAX_OBJECTS];
461 unsigned int nr_sink_caps;
462
463 /* Local capabilities */
464 unsigned int pd_count;
465 u32 src_pdo[PDO_MAX_OBJECTS];
466 unsigned int nr_src_pdo;
467 u32 snk_pdo[PDO_MAX_OBJECTS];
468 unsigned int nr_snk_pdo;
469 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
470 unsigned int nr_snk_vdo_v1;
471 u32 snk_vdo[VDO_MAX_OBJECTS];
472 unsigned int nr_snk_vdo;
473
474 unsigned int operating_snk_mw;
475 bool update_sink_caps;
476
477 /* Requested current / voltage to the port partner */
478 u32 req_current_limit;
479 u32 req_supply_voltage;
480 /* Actual current / voltage limit of the local port */
481 u32 current_limit;
482 u32 supply_voltage;
483
484 /* Used to export TA voltage and current */
485 struct power_supply *psy;
486 struct power_supply_desc psy_desc;
487 enum power_supply_usb_type usb_type;
488
489 u32 bist_request;
490
491 /* PD state for Vendor Defined Messages */
492 enum vdm_states vdm_state;
493 u32 vdm_retries;
494 /* next Vendor Defined Message to send */
495 u32 vdo_data[VDO_MAX_SIZE];
496 u8 vdo_count;
497 /* VDO to retry if UFP responder replied busy */
498 u32 vdo_retry;
499
500 /* PPS */
501 struct pd_pps_data pps_data;
502 struct completion pps_complete;
503 bool pps_pending;
504 int pps_status;
505
506 /* Alternate mode data */
507 struct pd_mode_data mode_data;
508 struct pd_mode_data mode_data_prime;
509 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
510 struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
511 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
512
513 /* Deadline in jiffies to exit src_try_wait state */
514 unsigned long max_wait;
515
516 /* port belongs to a self powered device */
517 bool self_powered;
518
519 /* Sink FRS */
520 enum frs_typec_current new_source_frs_current;
521
522 /* Sink caps have been queried */
523 bool sink_cap_done;
524
525 /* Collision Avoidance and Atomic Message Sequence */
526 enum tcpm_state upcoming_state;
527 enum tcpm_ams ams;
528 enum tcpm_ams next_ams;
529 bool in_ams;
530
531 /* Auto vbus discharge status */
532 bool auto_vbus_discharge_enabled;
533
534 /*
535 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
536 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
537 * SNK_READY for non-pd link.
538 */
539 bool slow_charger_loop;
540
541 /*
542 * When true indicates that the lower level drivers indicate potential presence
543 * of contaminant in the connector pins based on the tcpm state machine
544 * transitions.
545 */
546 bool potential_contaminant;
547
548 /* SOP* Related Fields */
549 /*
550 * Flag to determine if SOP' Discover Identity is available. The flag
551 * is set if Discover Identity on SOP' does not immediately follow
552 * Discover Identity on SOP.
553 */
554 bool send_discover_prime;
555 /*
556 * tx_sop_type determines which SOP* a message is being sent on.
557 * For messages that are queued and not sent immediately such as in
558 * tcpm_queue_message or messages that send after state changes,
559 * the tx_sop_type is set accordingly.
560 */
561 enum tcpm_transmit_type tx_sop_type;
562 /*
563 * Prior to discovering the port partner's Specification Revision, the
564 * Vconn source and cable plug will use the lower of their two revisions.
565 *
566 * When the port partner's Specification Revision is discovered, the following
567 * rules are put in place.
568 * 1. If the cable revision (1) is lower than the revision negotiated
569 * between the port and partner (2), the port and partner will communicate
570 * on revision (2), but the port and cable will communicate on revision (1).
571 * 2. If the cable revision (1) is higher than the revision negotiated
572 * between the port and partner (2), the port and partner will communicate
573 * on revision (2), and the port and cable will communicate on revision (2)
574 * as well.
575 */
576 unsigned int negotiated_rev_prime;
577 /*
578 * Each SOP* type must maintain their own tx and rx message IDs
579 */
580 unsigned int message_id_prime;
581 unsigned int rx_msgid_prime;
582
583 /* Timer deadline values configured at runtime */
584 struct pd_timings timings;
585
586 /* Indicates maximum (revision, version) supported */
587 struct pd_revision_info pd_rev;
588 #ifdef CONFIG_DEBUG_FS
589 struct dentry *dentry;
590 struct mutex logbuffer_lock; /* log buffer access lock */
591 int logbuffer_head;
592 int logbuffer_tail;
593 u8 *logbuffer[LOG_BUFFER_ENTRIES];
594 #endif
595 };
596
597 struct pd_rx_event {
598 struct kthread_work work;
599 struct tcpm_port *port;
600 struct pd_message msg;
601 enum tcpm_transmit_type rx_sop_type;
602 };
603
604 struct altmode_vdm_event {
605 struct kthread_work work;
606 struct tcpm_port *port;
607 u32 header;
608 u32 *data;
609 int cnt;
610 enum tcpm_transmit_type tx_sop_type;
611 };
612
613 static const char * const pd_rev[] = {
614 [PD_REV10] = "rev1",
615 [PD_REV20] = "rev2",
616 [PD_REV30] = "rev3",
617 };
618
619 #define tcpm_cc_is_sink(cc) \
620 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
621 (cc) == TYPEC_CC_RP_3_0)
622
623 /* As long as cc is pulled up, we can consider it as sink. */
624 #define tcpm_port_is_sink(port) \
625 (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
626
627 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
628 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
629 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
630
631 #define tcpm_port_is_source(port) \
632 ((tcpm_cc_is_source((port)->cc1) && \
633 !tcpm_cc_is_source((port)->cc2)) || \
634 (tcpm_cc_is_source((port)->cc2) && \
635 !tcpm_cc_is_source((port)->cc1)))
636
637 #define tcpm_port_is_debug(port) \
638 ((tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) || \
639 (tcpm_cc_is_sink((port)->cc1) && tcpm_cc_is_sink((port)->cc2)))
640
641 #define tcpm_port_is_audio(port) \
642 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
643
644 #define tcpm_port_is_audio_detached(port) \
645 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
646 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
647
648 #define tcpm_try_snk(port) \
649 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
650 (port)->port_type == TYPEC_PORT_DRP)
651
652 #define tcpm_try_src(port) \
653 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
654 (port)->port_type == TYPEC_PORT_DRP)
655
656 #define tcpm_data_role_for_source(port) \
657 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
658 TYPEC_DEVICE : TYPEC_HOST)
659
660 #define tcpm_data_role_for_sink(port) \
661 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
662 TYPEC_HOST : TYPEC_DEVICE)
663
664 #define tcpm_sink_tx_ok(port) \
665 (tcpm_port_is_sink(port) && \
666 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
667
668 #define tcpm_wait_for_discharge(port) \
669 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
670
tcpm_default_state(struct tcpm_port * port)671 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
672 {
673 if (port->port_type == TYPEC_PORT_DRP) {
674 if (port->try_role == TYPEC_SINK)
675 return SNK_UNATTACHED;
676 else if (port->try_role == TYPEC_SOURCE)
677 return SRC_UNATTACHED;
678 /* Fall through to return SRC_UNATTACHED */
679 } else if (port->port_type == TYPEC_PORT_SNK) {
680 return SNK_UNATTACHED;
681 }
682 return SRC_UNATTACHED;
683 }
684
tcpm_port_is_disconnected(struct tcpm_port * port)685 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
686 {
687 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
688 port->cc2 == TYPEC_CC_OPEN) ||
689 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
690 port->cc1 == TYPEC_CC_OPEN) ||
691 (port->polarity == TYPEC_POLARITY_CC2 &&
692 port->cc2 == TYPEC_CC_OPEN)));
693 }
694
695 /*
696 * Logging
697 */
698
699 #ifdef CONFIG_DEBUG_FS
700
tcpm_log_full(struct tcpm_port * port)701 static bool tcpm_log_full(struct tcpm_port *port)
702 {
703 return port->logbuffer_tail ==
704 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
705 }
706
707 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)708 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
709 {
710 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
711 u64 ts_nsec = local_clock();
712 unsigned long rem_nsec;
713
714 mutex_lock(&port->logbuffer_lock);
715 if (!port->logbuffer[port->logbuffer_head]) {
716 port->logbuffer[port->logbuffer_head] =
717 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
718 if (!port->logbuffer[port->logbuffer_head]) {
719 mutex_unlock(&port->logbuffer_lock);
720 return;
721 }
722 }
723
724 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
725
726 if (tcpm_log_full(port)) {
727 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
728 strcpy(tmpbuffer, "overflow");
729 }
730
731 if (port->logbuffer_head < 0 ||
732 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
733 dev_warn(port->dev,
734 "Bad log buffer index %d\n", port->logbuffer_head);
735 goto abort;
736 }
737
738 if (!port->logbuffer[port->logbuffer_head]) {
739 dev_warn(port->dev,
740 "Log buffer index %d is NULL\n", port->logbuffer_head);
741 goto abort;
742 }
743
744 rem_nsec = do_div(ts_nsec, 1000000000);
745 scnprintf(port->logbuffer[port->logbuffer_head],
746 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
747 (unsigned long)ts_nsec, rem_nsec / 1000,
748 tmpbuffer);
749 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
750
751 abort:
752 mutex_unlock(&port->logbuffer_lock);
753 }
754
755 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)756 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
757 {
758 va_list args;
759
760 /* Do not log while disconnected and unattached */
761 if (tcpm_port_is_disconnected(port) &&
762 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
763 port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
764 return;
765
766 va_start(args, fmt);
767 _tcpm_log(port, fmt, args);
768 va_end(args);
769 }
770
771 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)772 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
773 {
774 va_list args;
775
776 va_start(args, fmt);
777 _tcpm_log(port, fmt, args);
778 va_end(args);
779 }
780
tcpm_log_source_caps(struct tcpm_port * port)781 static void tcpm_log_source_caps(struct tcpm_port *port)
782 {
783 int i;
784
785 for (i = 0; i < port->nr_source_caps; i++) {
786 u32 pdo = port->source_caps[i];
787 enum pd_pdo_type type = pdo_type(pdo);
788 char msg[64];
789
790 switch (type) {
791 case PDO_TYPE_FIXED:
792 scnprintf(msg, sizeof(msg),
793 "%u mV, %u mA [%s%s%s%s%s%s]",
794 pdo_fixed_voltage(pdo),
795 pdo_max_current(pdo),
796 (pdo & PDO_FIXED_DUAL_ROLE) ?
797 "R" : "",
798 (pdo & PDO_FIXED_SUSPEND) ?
799 "S" : "",
800 (pdo & PDO_FIXED_HIGHER_CAP) ?
801 "H" : "",
802 (pdo & PDO_FIXED_USB_COMM) ?
803 "U" : "",
804 (pdo & PDO_FIXED_DATA_SWAP) ?
805 "D" : "",
806 (pdo & PDO_FIXED_EXTPOWER) ?
807 "E" : "");
808 break;
809 case PDO_TYPE_VAR:
810 scnprintf(msg, sizeof(msg),
811 "%u-%u mV, %u mA",
812 pdo_min_voltage(pdo),
813 pdo_max_voltage(pdo),
814 pdo_max_current(pdo));
815 break;
816 case PDO_TYPE_BATT:
817 scnprintf(msg, sizeof(msg),
818 "%u-%u mV, %u mW",
819 pdo_min_voltage(pdo),
820 pdo_max_voltage(pdo),
821 pdo_max_power(pdo));
822 break;
823 case PDO_TYPE_APDO:
824 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
825 scnprintf(msg, sizeof(msg),
826 "%u-%u mV, %u mA",
827 pdo_pps_apdo_min_voltage(pdo),
828 pdo_pps_apdo_max_voltage(pdo),
829 pdo_pps_apdo_max_current(pdo));
830 else
831 strcpy(msg, "undefined APDO");
832 break;
833 default:
834 strcpy(msg, "undefined");
835 break;
836 }
837 tcpm_log(port, " PDO %d: type %d, %s",
838 i, type, msg);
839 }
840 }
841
tcpm_debug_show(struct seq_file * s,void * v)842 static int tcpm_debug_show(struct seq_file *s, void *v)
843 {
844 struct tcpm_port *port = s->private;
845 int tail;
846
847 mutex_lock(&port->logbuffer_lock);
848 tail = port->logbuffer_tail;
849 while (tail != port->logbuffer_head) {
850 seq_printf(s, "%s\n", port->logbuffer[tail]);
851 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
852 }
853 if (!seq_has_overflowed(s))
854 port->logbuffer_tail = tail;
855 mutex_unlock(&port->logbuffer_lock);
856
857 return 0;
858 }
859 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
860
tcpm_debugfs_init(struct tcpm_port * port)861 static void tcpm_debugfs_init(struct tcpm_port *port)
862 {
863 char name[NAME_MAX];
864
865 mutex_init(&port->logbuffer_lock);
866 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
867 port->dentry = debugfs_create_dir(name, usb_debug_root);
868 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
869 &tcpm_debug_fops);
870 }
871
tcpm_debugfs_exit(struct tcpm_port * port)872 static void tcpm_debugfs_exit(struct tcpm_port *port)
873 {
874 int i;
875
876 mutex_lock(&port->logbuffer_lock);
877 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
878 kfree(port->logbuffer[i]);
879 port->logbuffer[i] = NULL;
880 }
881 mutex_unlock(&port->logbuffer_lock);
882
883 debugfs_remove(port->dentry);
884 }
885
886 #else
887
888 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)889 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
890 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)891 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)892 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)893 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)894 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
895
896 #endif
897
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)898 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
899 {
900 tcpm_log(port, "cc:=%d", cc);
901 port->cc_req = cc;
902 port->tcpc->set_cc(port->tcpc, cc);
903 }
904
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)905 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
906 {
907 int ret = 0;
908
909 if (port->tcpc->enable_auto_vbus_discharge) {
910 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
911 tcpm_log_force(port, "%s vbus discharge ret:%d",
912 str_enable_disable(enable), ret);
913 if (!ret)
914 port->auto_vbus_discharge_enabled = enable;
915 }
916
917 return ret;
918 }
919
tcpm_apply_rc(struct tcpm_port * port)920 static void tcpm_apply_rc(struct tcpm_port *port)
921 {
922 /*
923 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
924 * when Vbus auto discharge on disconnect is enabled.
925 */
926 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
927 tcpm_log(port, "Apply_RC");
928 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
929 tcpm_enable_auto_vbus_discharge(port, false);
930 }
931 }
932
933 /*
934 * Determine RP value to set based on maximum current supported
935 * by a port if configured as source.
936 * Returns CC value to report to link partner.
937 */
tcpm_rp_cc(struct tcpm_port * port)938 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
939 {
940 const u32 *src_pdo = port->src_pdo;
941 int nr_pdo = port->nr_src_pdo;
942 int i;
943
944 if (!port->pd_supported)
945 return port->src_rp;
946
947 /*
948 * Search for first entry with matching voltage.
949 * It should report the maximum supported current.
950 */
951 for (i = 0; i < nr_pdo; i++) {
952 const u32 pdo = src_pdo[i];
953
954 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
955 pdo_fixed_voltage(pdo) == 5000) {
956 unsigned int curr = pdo_max_current(pdo);
957
958 if (curr >= 3000)
959 return TYPEC_CC_RP_3_0;
960 else if (curr >= 1500)
961 return TYPEC_CC_RP_1_5;
962 return TYPEC_CC_RP_DEF;
963 }
964 }
965
966 return TYPEC_CC_RP_DEF;
967 }
968
tcpm_ams_finish(struct tcpm_port * port)969 static void tcpm_ams_finish(struct tcpm_port *port)
970 {
971 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
972
973 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
974 if (port->negotiated_rev >= PD_REV30)
975 tcpm_set_cc(port, SINK_TX_OK);
976 else
977 tcpm_set_cc(port, SINK_TX_NG);
978 } else if (port->pwr_role == TYPEC_SOURCE) {
979 tcpm_set_cc(port, tcpm_rp_cc(port));
980 }
981
982 port->in_ams = false;
983 port->ams = NONE_AMS;
984 }
985
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type tx_sop_type,const struct pd_message * msg)986 static int tcpm_pd_transmit(struct tcpm_port *port,
987 enum tcpm_transmit_type tx_sop_type,
988 const struct pd_message *msg)
989 {
990 unsigned long time_left;
991 int ret;
992 unsigned int negotiated_rev;
993
994 switch (tx_sop_type) {
995 case TCPC_TX_SOP_PRIME:
996 negotiated_rev = port->negotiated_rev_prime;
997 break;
998 case TCPC_TX_SOP:
999 default:
1000 negotiated_rev = port->negotiated_rev;
1001 break;
1002 }
1003
1004 if (msg)
1005 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
1006 else
1007 tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
1008
1009 reinit_completion(&port->tx_complete);
1010 ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
1011 if (ret < 0)
1012 return ret;
1013
1014 mutex_unlock(&port->lock);
1015 time_left = wait_for_completion_timeout(&port->tx_complete,
1016 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
1017 mutex_lock(&port->lock);
1018 if (!time_left)
1019 return -ETIMEDOUT;
1020
1021 switch (port->tx_status) {
1022 case TCPC_TX_SUCCESS:
1023 switch (tx_sop_type) {
1024 case TCPC_TX_SOP_PRIME:
1025 port->message_id_prime = (port->message_id_prime + 1) &
1026 PD_HEADER_ID_MASK;
1027 break;
1028 case TCPC_TX_SOP:
1029 default:
1030 port->message_id = (port->message_id + 1) &
1031 PD_HEADER_ID_MASK;
1032 break;
1033 }
1034 /*
1035 * USB PD rev 2.0, 8.3.2.2.1:
1036 * USB PD rev 3.0, 8.3.2.1.3:
1037 * "... Note that every AMS is Interruptible until the first
1038 * Message in the sequence has been successfully sent (GoodCRC
1039 * Message received)."
1040 */
1041 if (port->ams != NONE_AMS)
1042 port->in_ams = true;
1043 break;
1044 case TCPC_TX_DISCARDED:
1045 ret = -EAGAIN;
1046 break;
1047 case TCPC_TX_FAILED:
1048 default:
1049 ret = -EIO;
1050 break;
1051 }
1052
1053 /* Some AMS don't expect responses. Finish them here. */
1054 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1055 tcpm_ams_finish(port);
1056
1057 return ret;
1058 }
1059
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)1060 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1061 enum tcpm_transmit_status status)
1062 {
1063 tcpm_log(port, "PD TX complete, status: %u", status);
1064 port->tx_status = status;
1065 complete(&port->tx_complete);
1066 }
1067 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1068
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)1069 static int tcpm_mux_set(struct tcpm_port *port, int state,
1070 enum usb_role usb_role,
1071 enum typec_orientation orientation)
1072 {
1073 int ret;
1074
1075 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1076 state, usb_role, orientation);
1077
1078 ret = typec_set_orientation(port->typec_port, orientation);
1079 if (ret)
1080 return ret;
1081
1082 if (port->role_sw) {
1083 ret = usb_role_switch_set_role(port->role_sw, usb_role);
1084 if (ret)
1085 return ret;
1086 }
1087
1088 return typec_set_mode(port->typec_port, state);
1089 }
1090
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)1091 static int tcpm_set_polarity(struct tcpm_port *port,
1092 enum typec_cc_polarity polarity)
1093 {
1094 int ret;
1095
1096 tcpm_log(port, "polarity %d", polarity);
1097
1098 ret = port->tcpc->set_polarity(port->tcpc, polarity);
1099 if (ret < 0)
1100 return ret;
1101
1102 port->polarity = polarity;
1103
1104 return 0;
1105 }
1106
tcpm_set_vconn(struct tcpm_port * port,bool enable)1107 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1108 {
1109 int ret;
1110
1111 tcpm_log(port, "vconn:=%d", enable);
1112
1113 ret = port->tcpc->set_vconn(port->tcpc, enable);
1114 if (!ret) {
1115 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1116 typec_set_vconn_role(port->typec_port, port->vconn_role);
1117 }
1118
1119 return ret;
1120 }
1121
tcpm_get_current_limit(struct tcpm_port * port)1122 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1123 {
1124 enum typec_cc_status cc;
1125 u32 limit;
1126
1127 cc = port->polarity ? port->cc2 : port->cc1;
1128 switch (cc) {
1129 case TYPEC_CC_RP_1_5:
1130 limit = 1500;
1131 break;
1132 case TYPEC_CC_RP_3_0:
1133 limit = 3000;
1134 break;
1135 case TYPEC_CC_RP_DEF:
1136 default:
1137 if (port->tcpc->get_current_limit)
1138 limit = port->tcpc->get_current_limit(port->tcpc);
1139 else
1140 limit = 0;
1141 break;
1142 }
1143
1144 return limit;
1145 }
1146
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1147 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1148 {
1149 int ret = -EOPNOTSUPP;
1150
1151 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1152
1153 port->supply_voltage = mv;
1154 port->current_limit = max_ma;
1155 power_supply_changed(port->psy);
1156
1157 if (port->tcpc->set_current_limit)
1158 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1159
1160 return ret;
1161 }
1162
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1163 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1164 {
1165 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1166 port->data_role);
1167 }
1168
tcpm_set_roles(struct tcpm_port * port,bool attached,int state,enum typec_role role,enum typec_data_role data)1169 static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state,
1170 enum typec_role role, enum typec_data_role data)
1171 {
1172 enum typec_orientation orientation;
1173 enum usb_role usb_role;
1174 int ret;
1175
1176 if (port->polarity == TYPEC_POLARITY_CC1)
1177 orientation = TYPEC_ORIENTATION_NORMAL;
1178 else
1179 orientation = TYPEC_ORIENTATION_REVERSE;
1180
1181 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1182 if (data == TYPEC_HOST)
1183 usb_role = USB_ROLE_HOST;
1184 else
1185 usb_role = USB_ROLE_DEVICE;
1186 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1187 if (data == TYPEC_HOST) {
1188 if (role == TYPEC_SOURCE)
1189 usb_role = USB_ROLE_HOST;
1190 else
1191 usb_role = USB_ROLE_NONE;
1192 } else {
1193 return -ENOTSUPP;
1194 }
1195 } else {
1196 if (data == TYPEC_DEVICE) {
1197 if (role == TYPEC_SINK)
1198 usb_role = USB_ROLE_DEVICE;
1199 else
1200 usb_role = USB_ROLE_NONE;
1201 } else {
1202 return -ENOTSUPP;
1203 }
1204 }
1205
1206 ret = tcpm_mux_set(port, state, usb_role, orientation);
1207 if (ret < 0)
1208 return ret;
1209
1210 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1211 if (ret < 0)
1212 return ret;
1213
1214 if (port->tcpc->set_orientation) {
1215 ret = port->tcpc->set_orientation(port->tcpc, orientation);
1216 if (ret < 0)
1217 return ret;
1218 }
1219
1220 port->pwr_role = role;
1221 port->data_role = data;
1222 typec_set_data_role(port->typec_port, data);
1223 typec_set_pwr_role(port->typec_port, role);
1224
1225 return 0;
1226 }
1227
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1228 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1229 {
1230 int ret;
1231
1232 ret = port->tcpc->set_roles(port->tcpc, true, role,
1233 port->data_role);
1234 if (ret < 0)
1235 return ret;
1236
1237 port->pwr_role = role;
1238 typec_set_pwr_role(port->typec_port, role);
1239
1240 return 0;
1241 }
1242
1243 /*
1244 * Transform the PDO to be compliant to PD rev2.0.
1245 * Return 0 if the PDO type is not defined in PD rev2.0.
1246 * Otherwise, return the converted PDO.
1247 */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1248 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1249 {
1250 switch (pdo_type(pdo)) {
1251 case PDO_TYPE_FIXED:
1252 if (role == TYPEC_SINK)
1253 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1254 else
1255 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1256 case PDO_TYPE_VAR:
1257 case PDO_TYPE_BATT:
1258 return pdo;
1259 case PDO_TYPE_APDO:
1260 default:
1261 return 0;
1262 }
1263 }
1264
tcpm_pd_send_revision(struct tcpm_port * port)1265 static int tcpm_pd_send_revision(struct tcpm_port *port)
1266 {
1267 struct pd_message msg;
1268 u32 rmdo;
1269
1270 memset(&msg, 0, sizeof(msg));
1271 rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
1272 port->pd_rev.ver_major, port->pd_rev.ver_minor);
1273 msg.payload[0] = cpu_to_le32(rmdo);
1274 msg.header = PD_HEADER_LE(PD_DATA_REVISION,
1275 port->pwr_role,
1276 port->data_role,
1277 port->negotiated_rev,
1278 port->message_id,
1279 1);
1280 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1281 }
1282
tcpm_pd_send_source_caps(struct tcpm_port * port)1283 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1284 {
1285 struct pd_message msg;
1286 u32 pdo;
1287 unsigned int i, nr_pdo = 0;
1288
1289 memset(&msg, 0, sizeof(msg));
1290
1291 for (i = 0; i < port->nr_src_pdo; i++) {
1292 if (port->negotiated_rev >= PD_REV30) {
1293 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1294 } else {
1295 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1296 if (pdo)
1297 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1298 }
1299 }
1300
1301 if (!nr_pdo) {
1302 /* No source capabilities defined, sink only */
1303 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1304 port->pwr_role,
1305 port->data_role,
1306 port->negotiated_rev,
1307 port->message_id, 0);
1308 } else {
1309 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1310 port->pwr_role,
1311 port->data_role,
1312 port->negotiated_rev,
1313 port->message_id,
1314 nr_pdo);
1315 }
1316
1317 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1318 }
1319
tcpm_pd_send_sink_caps(struct tcpm_port * port)1320 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1321 {
1322 struct pd_message msg;
1323 u32 pdo;
1324 unsigned int i, nr_pdo = 0;
1325
1326 memset(&msg, 0, sizeof(msg));
1327
1328 for (i = 0; i < port->nr_snk_pdo; i++) {
1329 if (port->negotiated_rev >= PD_REV30) {
1330 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1331 } else {
1332 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1333 if (pdo)
1334 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1335 }
1336 }
1337
1338 if (!nr_pdo) {
1339 /* No sink capabilities defined, source only */
1340 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1341 port->pwr_role,
1342 port->data_role,
1343 port->negotiated_rev,
1344 port->message_id, 0);
1345 } else {
1346 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1347 port->pwr_role,
1348 port->data_role,
1349 port->negotiated_rev,
1350 port->message_id,
1351 nr_pdo);
1352 }
1353
1354 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1355 }
1356
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1357 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1358 {
1359 if (delay_ms) {
1360 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1361 } else {
1362 hrtimer_cancel(&port->state_machine_timer);
1363 kthread_queue_work(port->wq, &port->state_machine);
1364 }
1365 }
1366
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1367 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1368 {
1369 if (delay_ms) {
1370 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1371 HRTIMER_MODE_REL);
1372 } else {
1373 hrtimer_cancel(&port->vdm_state_machine_timer);
1374 kthread_queue_work(port->wq, &port->vdm_state_machine);
1375 }
1376 }
1377
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1378 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1379 {
1380 if (delay_ms) {
1381 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1382 } else {
1383 hrtimer_cancel(&port->enable_frs_timer);
1384 kthread_queue_work(port->wq, &port->enable_frs);
1385 }
1386 }
1387
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1388 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1389 {
1390 if (delay_ms) {
1391 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1392 } else {
1393 hrtimer_cancel(&port->send_discover_timer);
1394 kthread_queue_work(port->wq, &port->send_discover_work);
1395 }
1396 }
1397
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1398 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1399 unsigned int delay_ms)
1400 {
1401 if (delay_ms) {
1402 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1403 tcpm_states[port->state], tcpm_states[state], delay_ms,
1404 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1405 port->delayed_state = state;
1406 mod_tcpm_delayed_work(port, delay_ms);
1407 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1408 port->delay_ms = delay_ms;
1409 } else {
1410 tcpm_log(port, "state change %s -> %s [%s %s]",
1411 tcpm_states[port->state], tcpm_states[state],
1412 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1413 port->delayed_state = INVALID_STATE;
1414 port->prev_state = port->state;
1415 port->state = state;
1416 /*
1417 * Don't re-queue the state machine work item if we're currently
1418 * in the state machine and we're immediately changing states.
1419 * tcpm_state_machine_work() will continue running the state
1420 * machine.
1421 */
1422 if (!port->state_machine_running)
1423 mod_tcpm_delayed_work(port, 0);
1424 }
1425 }
1426
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1427 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1428 unsigned int delay_ms)
1429 {
1430 if (port->enter_state == port->state)
1431 tcpm_set_state(port, state, delay_ms);
1432 else
1433 tcpm_log(port,
1434 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1435 delay_ms ? "delayed " : "",
1436 tcpm_states[port->state], tcpm_states[state],
1437 delay_ms, tcpm_states[port->enter_state],
1438 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1439 }
1440
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1441 static void tcpm_queue_message(struct tcpm_port *port,
1442 enum pd_msg_request message)
1443 {
1444 port->queued_message = message;
1445 mod_tcpm_delayed_work(port, 0);
1446 }
1447
tcpm_vdm_ams(struct tcpm_port * port)1448 static bool tcpm_vdm_ams(struct tcpm_port *port)
1449 {
1450 switch (port->ams) {
1451 case DISCOVER_IDENTITY:
1452 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1453 case DISCOVER_SVIDS:
1454 case DISCOVER_MODES:
1455 case DFP_TO_UFP_ENTER_MODE:
1456 case DFP_TO_UFP_EXIT_MODE:
1457 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1458 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1459 case ATTENTION:
1460 case UNSTRUCTURED_VDMS:
1461 case STRUCTURED_VDMS:
1462 break;
1463 default:
1464 return false;
1465 }
1466
1467 return true;
1468 }
1469
tcpm_ams_interruptible(struct tcpm_port * port)1470 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1471 {
1472 switch (port->ams) {
1473 /* Interruptible AMS */
1474 case NONE_AMS:
1475 case SECURITY:
1476 case FIRMWARE_UPDATE:
1477 case DISCOVER_IDENTITY:
1478 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1479 case DISCOVER_SVIDS:
1480 case DISCOVER_MODES:
1481 case DFP_TO_UFP_ENTER_MODE:
1482 case DFP_TO_UFP_EXIT_MODE:
1483 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1484 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1485 case UNSTRUCTURED_VDMS:
1486 case STRUCTURED_VDMS:
1487 case COUNTRY_INFO:
1488 case COUNTRY_CODES:
1489 break;
1490 /* Non-Interruptible AMS */
1491 default:
1492 if (port->in_ams)
1493 return false;
1494 break;
1495 }
1496
1497 return true;
1498 }
1499
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1500 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1501 {
1502 int ret = 0;
1503
1504 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1505
1506 if (!tcpm_ams_interruptible(port) &&
1507 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1508 port->upcoming_state = INVALID_STATE;
1509 tcpm_log(port, "AMS %s not interruptible, aborting",
1510 tcpm_ams_str[port->ams]);
1511 return -EAGAIN;
1512 }
1513
1514 if (port->pwr_role == TYPEC_SOURCE) {
1515 enum typec_cc_status cc_req = port->cc_req;
1516
1517 port->ams = ams;
1518
1519 if (ams == HARD_RESET) {
1520 tcpm_set_cc(port, tcpm_rp_cc(port));
1521 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1522 tcpm_set_state(port, HARD_RESET_START, 0);
1523 return ret;
1524 } else if (ams == SOFT_RESET_AMS) {
1525 if (!port->explicit_contract)
1526 tcpm_set_cc(port, tcpm_rp_cc(port));
1527 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1528 return ret;
1529 } else if (tcpm_vdm_ams(port)) {
1530 /* tSinkTx is enforced in vdm_run_state_machine */
1531 if (port->negotiated_rev >= PD_REV30)
1532 tcpm_set_cc(port, SINK_TX_NG);
1533 return ret;
1534 }
1535
1536 if (port->negotiated_rev >= PD_REV30)
1537 tcpm_set_cc(port, SINK_TX_NG);
1538
1539 switch (port->state) {
1540 case SRC_READY:
1541 case SRC_STARTUP:
1542 case SRC_SOFT_RESET_WAIT_SNK_TX:
1543 case SOFT_RESET:
1544 case SOFT_RESET_SEND:
1545 if (port->negotiated_rev >= PD_REV30)
1546 tcpm_set_state(port, AMS_START,
1547 cc_req == SINK_TX_OK ?
1548 PD_T_SINK_TX : 0);
1549 else
1550 tcpm_set_state(port, AMS_START, 0);
1551 break;
1552 default:
1553 if (port->negotiated_rev >= PD_REV30)
1554 tcpm_set_state(port, SRC_READY,
1555 cc_req == SINK_TX_OK ?
1556 PD_T_SINK_TX : 0);
1557 else
1558 tcpm_set_state(port, SRC_READY, 0);
1559 break;
1560 }
1561 } else {
1562 if (port->negotiated_rev >= PD_REV30 &&
1563 !tcpm_sink_tx_ok(port) &&
1564 ams != SOFT_RESET_AMS &&
1565 ams != HARD_RESET) {
1566 port->upcoming_state = INVALID_STATE;
1567 tcpm_log(port, "Sink TX No Go");
1568 return -EAGAIN;
1569 }
1570
1571 port->ams = ams;
1572
1573 if (ams == HARD_RESET) {
1574 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1575 tcpm_set_state(port, HARD_RESET_START, 0);
1576 return ret;
1577 } else if (tcpm_vdm_ams(port)) {
1578 return ret;
1579 }
1580
1581 if (port->state == SNK_READY ||
1582 port->state == SNK_SOFT_RESET)
1583 tcpm_set_state(port, AMS_START, 0);
1584 else
1585 tcpm_set_state(port, SNK_READY, 0);
1586 }
1587
1588 return ret;
1589 }
1590
1591 /*
1592 * VDM/VDO handling functions
1593 */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1594 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1595 const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1596 {
1597 u32 vdo_hdr = port->vdo_data[0];
1598
1599 WARN_ON(!mutex_is_locked(&port->lock));
1600
1601 /* If is sending discover_identity, handle received message first */
1602 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1603 if (tx_sop_type == TCPC_TX_SOP_PRIME)
1604 port->send_discover_prime = true;
1605 else
1606 port->send_discover = true;
1607 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1608 } else {
1609 /* Make sure we are not still processing a previous VDM packet */
1610 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1611 }
1612
1613 port->vdo_count = cnt + 1;
1614 port->vdo_data[0] = header;
1615 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1616 /* Set ready, vdm state machine will actually send */
1617 port->vdm_retries = 0;
1618 port->vdm_state = VDM_STATE_READY;
1619 port->vdm_sm_running = true;
1620
1621 port->tx_sop_type = tx_sop_type;
1622
1623 mod_vdm_delayed_work(port, 0);
1624 }
1625
tcpm_queue_vdm_work(struct kthread_work * work)1626 static void tcpm_queue_vdm_work(struct kthread_work *work)
1627 {
1628 struct altmode_vdm_event *event = container_of(work,
1629 struct altmode_vdm_event,
1630 work);
1631 struct tcpm_port *port = event->port;
1632
1633 mutex_lock(&port->lock);
1634 if (port->state != SRC_READY && port->state != SNK_READY &&
1635 port->state != SRC_VDM_IDENTITY_REQUEST) {
1636 tcpm_log_force(port, "dropping altmode_vdm_event");
1637 goto port_unlock;
1638 }
1639
1640 tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type);
1641
1642 port_unlock:
1643 kfree(event->data);
1644 kfree(event);
1645 mutex_unlock(&port->lock);
1646 }
1647
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1648 static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1649 const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1650 {
1651 struct altmode_vdm_event *event;
1652 u32 *data_cpy;
1653 int ret = -ENOMEM;
1654
1655 event = kzalloc(sizeof(*event), GFP_KERNEL);
1656 if (!event)
1657 goto err_event;
1658
1659 data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL);
1660 if (!data_cpy)
1661 goto err_data;
1662
1663 kthread_init_work(&event->work, tcpm_queue_vdm_work);
1664 event->port = port;
1665 event->header = header;
1666 memcpy(data_cpy, data, sizeof(u32) * cnt);
1667 event->data = data_cpy;
1668 event->cnt = cnt;
1669 event->tx_sop_type = tx_sop_type;
1670
1671 ret = kthread_queue_work(port->wq, &event->work);
1672 if (!ret) {
1673 ret = -EBUSY;
1674 goto err_queue;
1675 }
1676
1677 return 0;
1678
1679 err_queue:
1680 kfree(data_cpy);
1681 err_data:
1682 kfree(event);
1683 err_event:
1684 tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret);
1685 return ret;
1686 }
1687
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1688 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1689 {
1690 u32 vdo = p[VDO_INDEX_IDH];
1691 u32 product = p[VDO_INDEX_PRODUCT];
1692
1693 memset(&port->mode_data, 0, sizeof(port->mode_data));
1694
1695 port->partner_ident.id_header = vdo;
1696 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1697 port->partner_ident.product = product;
1698
1699 if (port->partner)
1700 typec_partner_set_identity(port->partner);
1701
1702 tcpm_log(port, "Identity: %04x:%04x.%04x",
1703 PD_IDH_VID(vdo),
1704 PD_PRODUCT_PID(product), product & 0xffff);
1705 }
1706
svdm_consume_identity_sop_prime(struct tcpm_port * port,const u32 * p,int cnt)1707 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1708 {
1709 u32 idh = p[VDO_INDEX_IDH];
1710 u32 product = p[VDO_INDEX_PRODUCT];
1711 int svdm_version;
1712
1713 /*
1714 * Attempt to consume identity only if cable currently is not set
1715 */
1716 if (!IS_ERR_OR_NULL(port->cable))
1717 goto register_plug;
1718
1719 /* Reset cable identity */
1720 memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1721
1722 /* Fill out id header, cert, product, cable VDO 1 */
1723 port->cable_ident.id_header = idh;
1724 port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1725 port->cable_ident.product = product;
1726 port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1727
1728 /* Fill out cable desc, infer svdm_version from pd revision */
1729 port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1730 USB_PLUG_TYPE_A);
1731 port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1732 /* Log PD Revision and additional cable VDO from negotiated revision */
1733 switch (port->negotiated_rev_prime) {
1734 case PD_REV30:
1735 port->cable_desc.pd_revision = 0x0300;
1736 if (port->cable_desc.active)
1737 port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1738 break;
1739 case PD_REV20:
1740 port->cable_desc.pd_revision = 0x0200;
1741 break;
1742 default:
1743 port->cable_desc.pd_revision = 0x0200;
1744 break;
1745 }
1746 port->cable_desc.identity = &port->cable_ident;
1747 /* Register Cable, set identity and svdm_version */
1748 port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1749 if (IS_ERR_OR_NULL(port->cable))
1750 return;
1751 typec_cable_set_identity(port->cable);
1752 /* Get SVDM version */
1753 svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1754 typec_cable_set_svdm_version(port->cable, svdm_version);
1755
1756 register_plug:
1757 if (IS_ERR_OR_NULL(port->plug_prime)) {
1758 port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1759 port->plug_prime = typec_register_plug(port->cable,
1760 &port->plug_prime_desc);
1761 }
1762 }
1763
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1764 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1765 enum tcpm_transmit_type rx_sop_type)
1766 {
1767 struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1768 &port->mode_data_prime : &port->mode_data;
1769 int i;
1770
1771 for (i = 1; i < cnt; i++) {
1772 u16 svid;
1773
1774 svid = (p[i] >> 16) & 0xffff;
1775 if (!svid)
1776 return false;
1777
1778 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1779 goto abort;
1780
1781 pmdata->svids[pmdata->nsvids++] = svid;
1782 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1783
1784 svid = p[i] & 0xffff;
1785 if (!svid)
1786 return false;
1787
1788 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1789 goto abort;
1790
1791 pmdata->svids[pmdata->nsvids++] = svid;
1792 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1793 }
1794
1795 /*
1796 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1797 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1798 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1799 * SVIDs Command Shall be executed multiple times until a Discover
1800 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1801 * the last part of the last VDO or with a VDO containing two SVIDs
1802 * with values of 0x0000.
1803 *
1804 * However, some odd dockers support SVIDs less than 12 but without
1805 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1806 * request and return false here.
1807 */
1808 return cnt == 7;
1809 abort:
1810 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1811 return false;
1812 }
1813
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1814 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1815 enum tcpm_transmit_type rx_sop_type)
1816 {
1817 struct pd_mode_data *pmdata = &port->mode_data;
1818 struct typec_altmode_desc *paltmode;
1819 int i;
1820
1821 switch (rx_sop_type) {
1822 case TCPC_TX_SOP_PRIME:
1823 pmdata = &port->mode_data_prime;
1824 if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1825 /* Already logged in svdm_consume_svids() */
1826 return;
1827 }
1828 break;
1829 case TCPC_TX_SOP:
1830 pmdata = &port->mode_data;
1831 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1832 /* Already logged in svdm_consume_svids() */
1833 return;
1834 }
1835 break;
1836 default:
1837 return;
1838 }
1839
1840 for (i = 1; i < cnt; i++) {
1841 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1842 memset(paltmode, 0, sizeof(*paltmode));
1843
1844 paltmode->svid = pmdata->svids[pmdata->svid_index];
1845 paltmode->mode = i;
1846 paltmode->vdo = p[i];
1847
1848 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1849 pmdata->altmodes, paltmode->svid,
1850 paltmode->mode, paltmode->vdo);
1851
1852 pmdata->altmodes++;
1853 }
1854 }
1855
tcpm_register_partner_altmodes(struct tcpm_port * port)1856 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1857 {
1858 struct pd_mode_data *modep = &port->mode_data;
1859 struct typec_altmode *altmode;
1860 int i;
1861
1862 if (!port->partner)
1863 return;
1864
1865 for (i = 0; i < modep->altmodes; i++) {
1866 altmode = typec_partner_register_altmode(port->partner,
1867 &modep->altmode_desc[i]);
1868 if (IS_ERR(altmode)) {
1869 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1870 modep->altmode_desc[i].svid);
1871 altmode = NULL;
1872 }
1873 port->partner_altmode[i] = altmode;
1874 }
1875 }
1876
tcpm_register_plug_altmodes(struct tcpm_port * port)1877 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1878 {
1879 struct pd_mode_data *modep = &port->mode_data_prime;
1880 struct typec_altmode *altmode;
1881 int i;
1882
1883 typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1884
1885 for (i = 0; i < modep->altmodes; i++) {
1886 altmode = typec_plug_register_altmode(port->plug_prime,
1887 &modep->altmode_desc[i]);
1888 if (IS_ERR(altmode)) {
1889 tcpm_log(port, "Failed to register plug SVID 0x%04x",
1890 modep->altmode_desc[i].svid);
1891 altmode = NULL;
1892 }
1893 port->plug_prime_altmode[i] = altmode;
1894 }
1895 }
1896
1897 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1898 #define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1899 #define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1900
1901 /*
1902 * Helper to determine whether the port is capable of SOP' communication at the
1903 * current point in time.
1904 */
tcpm_can_communicate_sop_prime(struct tcpm_port * port)1905 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1906 {
1907 /* Check to see if tcpc supports SOP' communication */
1908 if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1909 return false;
1910 /*
1911 * Power Delivery 2.0 Section 6.3.11
1912 * Before communicating with a Cable Plug a Port Should ensure that it
1913 * is the Vconn Source and that the Cable Plugs are powered by
1914 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1915 * that the present Vconn Source is supplying Vconn, the only means to
1916 * ensure that the Cable Plugs are powered is for a Port wishing to
1917 * communicate with a Cable Plug is to become the Vconn Source.
1918 *
1919 * Power Delivery 3.0 Section 6.3.11
1920 * Before communicating with a Cable Plug a Port Shall ensure that it
1921 * is the Vconn source.
1922 */
1923 if (port->vconn_role != TYPEC_SOURCE)
1924 return false;
1925 /*
1926 * Power Delivery 2.0 Section 2.4.4
1927 * When no Contract or an Implicit Contract is in place the Source can
1928 * communicate with a Cable Plug using SOP' packets in order to discover
1929 * its characteristics.
1930 *
1931 * Power Delivery 3.0 Section 2.4.4
1932 * When no Contract or an Implicit Contract is in place only the Source
1933 * port that is supplying Vconn is allowed to send packets to a Cable
1934 * Plug and is allowed to respond to packets from the Cable Plug.
1935 */
1936 if (!port->explicit_contract)
1937 return port->pwr_role == TYPEC_SOURCE;
1938 if (port->negotiated_rev == PD_REV30)
1939 return true;
1940 /*
1941 * Power Delivery 2.0 Section 2.4.4
1942 *
1943 * When an Explicit Contract is in place the DFP (either the Source or
1944 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1945 * Packets (see Figure 2-3).
1946 */
1947 if (port->negotiated_rev == PD_REV20)
1948 return port->data_role == TYPEC_HOST;
1949 return false;
1950 }
1951
tcpm_attempt_vconn_swap_discovery(struct tcpm_port * port)1952 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1953 {
1954 if (!port->tcpc->attempt_vconn_swap_discovery)
1955 return false;
1956
1957 /* Port is already source, no need to perform swap */
1958 if (port->vconn_role == TYPEC_SOURCE)
1959 return false;
1960
1961 /*
1962 * Partner needs to support Alternate Modes with modal support. If
1963 * partner is also capable of being a USB Host, it could be a device
1964 * that supports Alternate Modes as the DFP.
1965 */
1966 if (!supports_modal(port) || supports_host(port))
1967 return false;
1968
1969 if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1970 port->negotiated_rev == PD_REV30)
1971 return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1972
1973 return false;
1974 }
1975
1976
tcpm_cable_vdm_supported(struct tcpm_port * port)1977 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1978 {
1979 return !IS_ERR_OR_NULL(port->cable) &&
1980 typec_cable_is_active(port->cable) &&
1981 supports_modal_cable(port) &&
1982 tcpm_can_communicate_sop_prime(port);
1983 }
1984
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action,enum tcpm_transmit_type rx_sop_type,enum tcpm_transmit_type * response_tx_sop_type)1985 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1986 const u32 *p, int cnt, u32 *response,
1987 enum adev_actions *adev_action,
1988 enum tcpm_transmit_type rx_sop_type,
1989 enum tcpm_transmit_type *response_tx_sop_type)
1990 {
1991 struct typec_port *typec = port->typec_port;
1992 struct typec_altmode *pdev, *pdev_prime;
1993 struct pd_mode_data *modep, *modep_prime;
1994 int svdm_version;
1995 int rlen = 0;
1996 int cmd_type;
1997 int cmd;
1998 int i;
1999 int ret;
2000
2001 cmd_type = PD_VDO_CMDT(p[0]);
2002 cmd = PD_VDO_CMD(p[0]);
2003
2004 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
2005 p[0], cmd_type, cmd, cnt);
2006
2007 switch (rx_sop_type) {
2008 case TCPC_TX_SOP_PRIME:
2009 modep_prime = &port->mode_data_prime;
2010 pdev_prime = typec_match_altmode(port->plug_prime_altmode,
2011 ALTMODE_DISCOVERY_MAX,
2012 PD_VDO_VID(p[0]),
2013 PD_VDO_OPOS(p[0]));
2014 svdm_version = typec_get_cable_svdm_version(typec);
2015 /*
2016 * Update SVDM version if cable was discovered before port partner.
2017 */
2018 if (!IS_ERR_OR_NULL(port->cable) &&
2019 PD_VDO_SVDM_VER(p[0]) < svdm_version)
2020 typec_cable_set_svdm_version(port->cable, svdm_version);
2021 break;
2022 case TCPC_TX_SOP:
2023 modep = &port->mode_data;
2024 pdev = typec_match_altmode(port->partner_altmode,
2025 ALTMODE_DISCOVERY_MAX,
2026 PD_VDO_VID(p[0]),
2027 PD_VDO_OPOS(p[0]));
2028 svdm_version = typec_get_negotiated_svdm_version(typec);
2029 if (svdm_version < 0)
2030 return 0;
2031 break;
2032 default:
2033 modep = &port->mode_data;
2034 pdev = typec_match_altmode(port->partner_altmode,
2035 ALTMODE_DISCOVERY_MAX,
2036 PD_VDO_VID(p[0]),
2037 PD_VDO_OPOS(p[0]));
2038 svdm_version = typec_get_negotiated_svdm_version(typec);
2039 if (svdm_version < 0)
2040 return 0;
2041 break;
2042 }
2043
2044 switch (cmd_type) {
2045 case CMDT_INIT:
2046 /*
2047 * Only the port or port partner is allowed to initialize SVDM
2048 * commands over SOP'. In case the port partner initializes a
2049 * sequence when it is not allowed to send SOP' messages, drop
2050 * the message should the TCPM port try to process it.
2051 */
2052 if (rx_sop_type == TCPC_TX_SOP_PRIME)
2053 return 0;
2054
2055 switch (cmd) {
2056 case CMD_DISCOVER_IDENT:
2057 if (PD_VDO_VID(p[0]) != USB_SID_PD)
2058 break;
2059
2060 if (IS_ERR_OR_NULL(port->partner))
2061 break;
2062
2063 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2064 typec_partner_set_svdm_version(port->partner,
2065 PD_VDO_SVDM_VER(p[0]));
2066 svdm_version = PD_VDO_SVDM_VER(p[0]);
2067 }
2068
2069 port->ams = DISCOVER_IDENTITY;
2070 /*
2071 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
2072 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
2073 * "wrong configuation" or "Unrecognized"
2074 */
2075 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
2076 port->nr_snk_vdo) {
2077 if (svdm_version < SVDM_VER_2_0) {
2078 for (i = 0; i < port->nr_snk_vdo_v1; i++)
2079 response[i + 1] = port->snk_vdo_v1[i];
2080 rlen = port->nr_snk_vdo_v1 + 1;
2081
2082 } else {
2083 for (i = 0; i < port->nr_snk_vdo; i++)
2084 response[i + 1] = port->snk_vdo[i];
2085 rlen = port->nr_snk_vdo + 1;
2086 }
2087 }
2088 break;
2089 case CMD_DISCOVER_SVID:
2090 port->ams = DISCOVER_SVIDS;
2091 break;
2092 case CMD_DISCOVER_MODES:
2093 port->ams = DISCOVER_MODES;
2094 break;
2095 case CMD_ENTER_MODE:
2096 port->ams = DFP_TO_UFP_ENTER_MODE;
2097 break;
2098 case CMD_EXIT_MODE:
2099 port->ams = DFP_TO_UFP_EXIT_MODE;
2100 break;
2101 case CMD_ATTENTION:
2102 /* Attention command does not have response */
2103 *adev_action = ADEV_ATTENTION;
2104 return 0;
2105 default:
2106 break;
2107 }
2108 if (rlen >= 1) {
2109 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
2110 } else if (rlen == 0) {
2111 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2112 rlen = 1;
2113 } else {
2114 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2115 rlen = 1;
2116 }
2117 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2118 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2119 break;
2120 case CMDT_RSP_ACK:
2121 /*
2122 * Silently drop message if we are not connected, but can process
2123 * if SOP' Discover Identity prior to explicit contract.
2124 */
2125 if (IS_ERR_OR_NULL(port->partner) &&
2126 !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2127 break;
2128
2129 tcpm_ams_finish(port);
2130
2131 switch (cmd) {
2132 /*
2133 * SVDM Command Flow for SOP and SOP':
2134 * SOP Discover Identity
2135 * SOP' Discover Identity
2136 * SOP Discover SVIDs
2137 * Discover Modes
2138 * (Active Cables)
2139 * SOP' Discover SVIDs
2140 * Discover Modes
2141 *
2142 * Perform Discover SOP' if the port can communicate with cable
2143 * plug.
2144 */
2145 case CMD_DISCOVER_IDENT:
2146 switch (rx_sop_type) {
2147 case TCPC_TX_SOP:
2148 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2149 typec_partner_set_svdm_version(port->partner,
2150 PD_VDO_SVDM_VER(p[0]));
2151 /* If cable is discovered before partner, downgrade svdm */
2152 if (!IS_ERR_OR_NULL(port->cable) &&
2153 (typec_get_cable_svdm_version(port->typec_port) >
2154 svdm_version))
2155 typec_cable_set_svdm_version(port->cable,
2156 svdm_version);
2157 }
2158 /* 6.4.4.3.1 */
2159 svdm_consume_identity(port, p, cnt);
2160 /* Attempt Vconn swap, delay SOP' discovery if necessary */
2161 if (tcpm_attempt_vconn_swap_discovery(port)) {
2162 port->send_discover_prime = true;
2163 port->upcoming_state = VCONN_SWAP_SEND;
2164 ret = tcpm_ams_start(port, VCONN_SWAP);
2165 if (!ret)
2166 return 0;
2167 /* Cannot perform Vconn swap */
2168 port->upcoming_state = INVALID_STATE;
2169 port->send_discover_prime = false;
2170 }
2171
2172 /*
2173 * Attempt Discover Identity on SOP' if the
2174 * cable was not discovered previously, and use
2175 * the SVDM version of the partner to probe.
2176 */
2177 if (IS_ERR_OR_NULL(port->cable) &&
2178 tcpm_can_communicate_sop_prime(port)) {
2179 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2180 port->send_discover_prime = true;
2181 response[0] = VDO(USB_SID_PD, 1,
2182 typec_get_negotiated_svdm_version(typec),
2183 CMD_DISCOVER_IDENT);
2184 rlen = 1;
2185 } else {
2186 *response_tx_sop_type = TCPC_TX_SOP;
2187 response[0] = VDO(USB_SID_PD, 1,
2188 typec_get_negotiated_svdm_version(typec),
2189 CMD_DISCOVER_SVID);
2190 rlen = 1;
2191 }
2192 break;
2193 case TCPC_TX_SOP_PRIME:
2194 /*
2195 * svdm_consume_identity_sop_prime will determine
2196 * the svdm_version for the cable moving forward.
2197 */
2198 svdm_consume_identity_sop_prime(port, p, cnt);
2199
2200 /*
2201 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2202 * to SRC_SEND_CAPABILITIES
2203 */
2204 if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2205 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2206 return 0;
2207 }
2208
2209 *response_tx_sop_type = TCPC_TX_SOP;
2210 response[0] = VDO(USB_SID_PD, 1,
2211 typec_get_negotiated_svdm_version(typec),
2212 CMD_DISCOVER_SVID);
2213 rlen = 1;
2214 break;
2215 default:
2216 return 0;
2217 }
2218 break;
2219 case CMD_DISCOVER_SVID:
2220 *response_tx_sop_type = rx_sop_type;
2221 /* 6.4.4.3.2 */
2222 if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2223 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2224 rlen = 1;
2225 } else {
2226 if (rx_sop_type == TCPC_TX_SOP) {
2227 if (modep->nsvids && supports_modal(port)) {
2228 response[0] = VDO(modep->svids[0], 1, svdm_version,
2229 CMD_DISCOVER_MODES);
2230 rlen = 1;
2231 }
2232 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2233 if (modep_prime->nsvids) {
2234 response[0] = VDO(modep_prime->svids[0], 1,
2235 svdm_version, CMD_DISCOVER_MODES);
2236 rlen = 1;
2237 }
2238 }
2239 }
2240 break;
2241 case CMD_DISCOVER_MODES:
2242 if (rx_sop_type == TCPC_TX_SOP) {
2243 /* 6.4.4.3.3 */
2244 svdm_consume_modes(port, p, cnt, rx_sop_type);
2245 modep->svid_index++;
2246 if (modep->svid_index < modep->nsvids) {
2247 u16 svid = modep->svids[modep->svid_index];
2248 *response_tx_sop_type = TCPC_TX_SOP;
2249 response[0] = VDO(svid, 1, svdm_version,
2250 CMD_DISCOVER_MODES);
2251 rlen = 1;
2252 } else if (tcpm_cable_vdm_supported(port)) {
2253 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2254 response[0] = VDO(USB_SID_PD, 1,
2255 typec_get_cable_svdm_version(typec),
2256 CMD_DISCOVER_SVID);
2257 rlen = 1;
2258 } else {
2259 tcpm_register_partner_altmodes(port);
2260 }
2261 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2262 /* 6.4.4.3.3 */
2263 svdm_consume_modes(port, p, cnt, rx_sop_type);
2264 modep_prime->svid_index++;
2265 if (modep_prime->svid_index < modep_prime->nsvids) {
2266 u16 svid = modep_prime->svids[modep_prime->svid_index];
2267 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2268 response[0] = VDO(svid, 1,
2269 typec_get_cable_svdm_version(typec),
2270 CMD_DISCOVER_MODES);
2271 rlen = 1;
2272 } else {
2273 tcpm_register_plug_altmodes(port);
2274 tcpm_register_partner_altmodes(port);
2275 }
2276 }
2277 break;
2278 case CMD_ENTER_MODE:
2279 *response_tx_sop_type = rx_sop_type;
2280 if (rx_sop_type == TCPC_TX_SOP) {
2281 if (adev && pdev) {
2282 typec_altmode_update_active(pdev, true);
2283 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2284 }
2285 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2286 if (adev && pdev_prime) {
2287 typec_altmode_update_active(pdev_prime, true);
2288 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2289 }
2290 }
2291 return 0;
2292 case CMD_EXIT_MODE:
2293 *response_tx_sop_type = rx_sop_type;
2294 if (rx_sop_type == TCPC_TX_SOP) {
2295 if (adev && pdev) {
2296 typec_altmode_update_active(pdev, false);
2297 /* Back to USB Operation */
2298 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2299 return 0;
2300 }
2301 }
2302 break;
2303 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2304 break;
2305 default:
2306 /* Unrecognized SVDM */
2307 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2308 rlen = 1;
2309 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2310 (VDO_SVDM_VERS(svdm_version));
2311 break;
2312 }
2313 break;
2314 case CMDT_RSP_NAK:
2315 tcpm_ams_finish(port);
2316 switch (cmd) {
2317 case CMD_DISCOVER_IDENT:
2318 case CMD_DISCOVER_SVID:
2319 case CMD_DISCOVER_MODES:
2320 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2321 break;
2322 case CMD_ENTER_MODE:
2323 /* Back to USB Operation */
2324 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2325 return 0;
2326 default:
2327 /* Unrecognized SVDM */
2328 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2329 rlen = 1;
2330 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2331 (VDO_SVDM_VERS(svdm_version));
2332 break;
2333 }
2334 break;
2335 default:
2336 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2337 rlen = 1;
2338 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2339 (VDO_SVDM_VERS(svdm_version));
2340 break;
2341 }
2342
2343 /* Informing the alternate mode drivers about everything */
2344 *adev_action = ADEV_QUEUE_VDM;
2345 return rlen;
2346 }
2347
2348 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2349 enum pd_msg_request message,
2350 enum tcpm_ams ams);
2351
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt,enum tcpm_transmit_type rx_sop_type)2352 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2353 const __le32 *payload, int cnt,
2354 enum tcpm_transmit_type rx_sop_type)
2355 {
2356 enum adev_actions adev_action = ADEV_NONE;
2357 struct typec_altmode *adev;
2358 u32 p[PD_MAX_PAYLOAD];
2359 u32 response[8] = { };
2360 int i, rlen = 0;
2361 enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2362
2363 for (i = 0; i < cnt; i++)
2364 p[i] = le32_to_cpu(payload[i]);
2365
2366 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2367 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2368
2369 if (port->vdm_state == VDM_STATE_BUSY) {
2370 /* If UFP responded busy retry after timeout */
2371 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2372 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2373 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2374 CMDT_INIT;
2375 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2376 return;
2377 }
2378 port->vdm_state = VDM_STATE_DONE;
2379 }
2380
2381 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2382 /*
2383 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2384 * advance because we are dropping the lock but may send VDMs soon.
2385 * For the cases of INIT received:
2386 * - If no response to send, it will be cleared later in this function.
2387 * - If there are responses to send, it will be cleared in the state machine.
2388 * For the cases of RSP received:
2389 * - If no further INIT to send, it will be cleared later in this function.
2390 * - Otherwise, it will be cleared in the state machine if timeout or it will go
2391 * back here until no further INIT to send.
2392 * For the cases of unknown type received:
2393 * - We will send NAK and the flag will be cleared in the state machine.
2394 */
2395 port->vdm_sm_running = true;
2396 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2397 rx_sop_type, &response_tx_sop_type);
2398 } else {
2399 if (port->negotiated_rev >= PD_REV30)
2400 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2401 }
2402
2403 /*
2404 * We are done with any state stored in the port struct now, except
2405 * for any port struct changes done by the tcpm_queue_vdm() call
2406 * below, which is a separate operation.
2407 *
2408 * So we can safely release the lock here; and we MUST release the
2409 * lock here to avoid an AB BA lock inversion:
2410 *
2411 * If we keep the lock here then the lock ordering in this path is:
2412 * 1. tcpm_pd_rx_handler take the tcpm port lock
2413 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2414 *
2415 * And we also have this ordering:
2416 * 1. alt-mode driver takes the alt-mode's lock
2417 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2418 * tcpm port lock
2419 *
2420 * Dropping our lock here avoids this.
2421 */
2422 mutex_unlock(&port->lock);
2423
2424 if (adev) {
2425 switch (adev_action) {
2426 case ADEV_NONE:
2427 break;
2428 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2429 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2430 typec_altmode_vdm(adev, p[0], &p[1], cnt);
2431 break;
2432 case ADEV_QUEUE_VDM:
2433 if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
2434 typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2435 else
2436 typec_altmode_vdm(adev, p[0], &p[1], cnt);
2437 break;
2438 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2439 if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
2440 if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2441 p[0], &p[1], cnt)) {
2442 int svdm_version = typec_get_cable_svdm_version(
2443 port->typec_port);
2444 if (svdm_version < 0)
2445 break;
2446
2447 response[0] = VDO(adev->svid, 1, svdm_version,
2448 CMD_EXIT_MODE);
2449 response[0] |= VDO_OPOS(adev->mode);
2450 rlen = 1;
2451 }
2452 } else {
2453 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2454 int svdm_version = typec_get_negotiated_svdm_version(
2455 port->typec_port);
2456 if (svdm_version < 0)
2457 break;
2458
2459 response[0] = VDO(adev->svid, 1, svdm_version,
2460 CMD_EXIT_MODE);
2461 response[0] |= VDO_OPOS(adev->mode);
2462 rlen = 1;
2463 }
2464 }
2465 break;
2466 case ADEV_ATTENTION:
2467 if (typec_altmode_attention(adev, p[1]))
2468 tcpm_log(port, "typec_altmode_attention no port partner altmode");
2469 break;
2470 }
2471 }
2472
2473 /*
2474 * We must re-take the lock here to balance the unlock in
2475 * tcpm_pd_rx_handler, note that no changes, other then the
2476 * tcpm_queue_vdm call, are made while the lock is held again.
2477 * All that is done after the call is unwinding the call stack until
2478 * we return to tcpm_pd_rx_handler and do the unlock there.
2479 */
2480 mutex_lock(&port->lock);
2481
2482 if (rlen > 0)
2483 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2484 else
2485 port->vdm_sm_running = false;
2486 }
2487
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count,enum tcpm_transmit_type tx_sop_type)2488 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2489 const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2490 {
2491 int svdm_version;
2492 u32 header;
2493
2494 switch (tx_sop_type) {
2495 case TCPC_TX_SOP_PRIME:
2496 /*
2497 * If the port partner is discovered, then the port partner's
2498 * SVDM Version will be returned
2499 */
2500 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2501 if (svdm_version < 0)
2502 svdm_version = SVDM_VER_MAX;
2503 break;
2504 case TCPC_TX_SOP:
2505 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2506 if (svdm_version < 0)
2507 return;
2508 break;
2509 default:
2510 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2511 if (svdm_version < 0)
2512 return;
2513 break;
2514 }
2515
2516 if (WARN_ON(count > VDO_MAX_SIZE - 1))
2517 count = VDO_MAX_SIZE - 1;
2518
2519 /* set VDM header with VID & CMD */
2520 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2521 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2522 svdm_version, cmd);
2523 tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2524 }
2525
vdm_ready_timeout(u32 vdm_hdr)2526 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2527 {
2528 unsigned int timeout;
2529 int cmd = PD_VDO_CMD(vdm_hdr);
2530
2531 /* its not a structured VDM command */
2532 if (!PD_VDO_SVDM(vdm_hdr))
2533 return PD_T_VDM_UNSTRUCTURED;
2534
2535 switch (PD_VDO_CMDT(vdm_hdr)) {
2536 case CMDT_INIT:
2537 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2538 timeout = PD_T_VDM_WAIT_MODE_E;
2539 else
2540 timeout = PD_T_VDM_SNDR_RSP;
2541 break;
2542 default:
2543 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2544 timeout = PD_T_VDM_E_MODE;
2545 else
2546 timeout = PD_T_VDM_RCVR_RSP;
2547 break;
2548 }
2549 return timeout;
2550 }
2551
vdm_run_state_machine(struct tcpm_port * port)2552 static void vdm_run_state_machine(struct tcpm_port *port)
2553 {
2554 struct pd_message msg;
2555 int i, res = 0;
2556 u32 vdo_hdr = port->vdo_data[0];
2557 u32 response[8] = { };
2558
2559 switch (port->vdm_state) {
2560 case VDM_STATE_READY:
2561 /* Only transmit VDM if attached */
2562 if (!port->attached) {
2563 port->vdm_state = VDM_STATE_ERR_BUSY;
2564 break;
2565 }
2566
2567 /*
2568 * if there's traffic or we're not in PDO ready state don't send
2569 * a VDM.
2570 */
2571 if (port->state != SRC_READY && port->state != SNK_READY &&
2572 port->state != SRC_VDM_IDENTITY_REQUEST) {
2573 port->vdm_sm_running = false;
2574 break;
2575 }
2576
2577 /* TODO: AMS operation for Unstructured VDM */
2578 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2579 switch (PD_VDO_CMD(vdo_hdr)) {
2580 case CMD_DISCOVER_IDENT:
2581 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2582 if (res == 0) {
2583 switch (port->tx_sop_type) {
2584 case TCPC_TX_SOP_PRIME:
2585 port->send_discover_prime = false;
2586 break;
2587 case TCPC_TX_SOP:
2588 port->send_discover = false;
2589 break;
2590 default:
2591 port->send_discover = false;
2592 break;
2593 }
2594 } else if (res == -EAGAIN) {
2595 port->vdo_data[0] = 0;
2596 mod_send_discover_delayed_work(port,
2597 SEND_DISCOVER_RETRY_MS);
2598 }
2599 break;
2600 case CMD_DISCOVER_SVID:
2601 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2602 break;
2603 case CMD_DISCOVER_MODES:
2604 res = tcpm_ams_start(port, DISCOVER_MODES);
2605 break;
2606 case CMD_ENTER_MODE:
2607 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2608 break;
2609 case CMD_EXIT_MODE:
2610 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2611 break;
2612 case CMD_ATTENTION:
2613 res = tcpm_ams_start(port, ATTENTION);
2614 break;
2615 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2616 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2617 break;
2618 default:
2619 res = -EOPNOTSUPP;
2620 break;
2621 }
2622
2623 if (res < 0) {
2624 port->vdm_state = VDM_STATE_ERR_BUSY;
2625 return;
2626 }
2627 }
2628
2629 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2630 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2631 port->pwr_role == TYPEC_SOURCE &&
2632 PD_VDO_SVDM(vdo_hdr) &&
2633 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2634 PD_T_SINK_TX : 0);
2635 break;
2636 case VDM_STATE_WAIT_RSP_BUSY:
2637 port->vdo_data[0] = port->vdo_retry;
2638 port->vdo_count = 1;
2639 port->vdm_state = VDM_STATE_READY;
2640 tcpm_ams_finish(port);
2641 break;
2642 case VDM_STATE_BUSY:
2643 port->vdm_state = VDM_STATE_ERR_TMOUT;
2644 if (port->ams != NONE_AMS)
2645 tcpm_ams_finish(port);
2646 break;
2647 case VDM_STATE_ERR_SEND:
2648 /*
2649 * When sending Discover Identity to SOP' before establishing an
2650 * explicit contract, do not retry. Instead, weave sending
2651 * Source_Capabilities over SOP and Discover Identity over SOP'.
2652 */
2653 if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2654 tcpm_ams_finish(port);
2655 port->vdm_state = VDM_STATE_DONE;
2656 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2657 /*
2658 * A partner which does not support USB PD will not reply,
2659 * so this is not a fatal error. At the same time, some
2660 * devices may not return GoodCRC under some circumstances,
2661 * so we need to retry.
2662 */
2663 } else if (port->vdm_retries < 3) {
2664 tcpm_log(port, "VDM Tx error, retry");
2665 port->vdm_retries++;
2666 port->vdm_state = VDM_STATE_READY;
2667 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2668 tcpm_ams_finish(port);
2669 } else {
2670 tcpm_ams_finish(port);
2671 if (port->tx_sop_type == TCPC_TX_SOP)
2672 break;
2673 /* Handle SOP' Transmission Errors */
2674 switch (PD_VDO_CMD(vdo_hdr)) {
2675 /*
2676 * If Discover Identity fails on SOP', then resume
2677 * discovery process on SOP only.
2678 */
2679 case CMD_DISCOVER_IDENT:
2680 port->vdo_data[0] = 0;
2681 response[0] = VDO(USB_SID_PD, 1,
2682 typec_get_negotiated_svdm_version(
2683 port->typec_port),
2684 CMD_DISCOVER_SVID);
2685 tcpm_queue_vdm(port, response[0], &response[1],
2686 0, TCPC_TX_SOP);
2687 break;
2688 /*
2689 * If Discover SVIDs or Discover Modes fail, then
2690 * proceed with Alt Mode discovery process on SOP.
2691 */
2692 case CMD_DISCOVER_SVID:
2693 tcpm_register_partner_altmodes(port);
2694 break;
2695 case CMD_DISCOVER_MODES:
2696 tcpm_register_partner_altmodes(port);
2697 break;
2698 default:
2699 break;
2700 }
2701 }
2702 break;
2703 case VDM_STATE_SEND_MESSAGE:
2704 /* Prepare and send VDM */
2705 memset(&msg, 0, sizeof(msg));
2706 if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2707 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2708 0, /* Cable Plug Indicator for DFP/UFP */
2709 0, /* Reserved */
2710 port->negotiated_rev_prime,
2711 port->message_id_prime,
2712 port->vdo_count);
2713 } else {
2714 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2715 port->pwr_role,
2716 port->data_role,
2717 port->negotiated_rev,
2718 port->message_id,
2719 port->vdo_count);
2720 }
2721 for (i = 0; i < port->vdo_count; i++)
2722 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2723 res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2724 if (res < 0) {
2725 port->vdm_state = VDM_STATE_ERR_SEND;
2726 } else {
2727 unsigned long timeout;
2728
2729 port->vdm_retries = 0;
2730 port->vdo_data[0] = 0;
2731 port->vdm_state = VDM_STATE_BUSY;
2732 timeout = vdm_ready_timeout(vdo_hdr);
2733 mod_vdm_delayed_work(port, timeout);
2734 }
2735 break;
2736 default:
2737 break;
2738 }
2739 }
2740
vdm_state_machine_work(struct kthread_work * work)2741 static void vdm_state_machine_work(struct kthread_work *work)
2742 {
2743 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2744 enum vdm_states prev_state;
2745
2746 mutex_lock(&port->lock);
2747
2748 /*
2749 * Continue running as long as the port is not busy and there was
2750 * a state change.
2751 */
2752 do {
2753 prev_state = port->vdm_state;
2754 vdm_run_state_machine(port);
2755 } while (port->vdm_state != prev_state &&
2756 port->vdm_state != VDM_STATE_BUSY &&
2757 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2758
2759 if (port->vdm_state < VDM_STATE_READY)
2760 port->vdm_sm_running = false;
2761
2762 mutex_unlock(&port->lock);
2763 }
2764
2765 enum pdo_err {
2766 PDO_NO_ERR,
2767 PDO_ERR_NO_VSAFE5V,
2768 PDO_ERR_VSAFE5V_NOT_FIRST,
2769 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2770 PDO_ERR_FIXED_NOT_SORTED,
2771 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2772 PDO_ERR_DUPE_PDO,
2773 PDO_ERR_PPS_APDO_NOT_SORTED,
2774 PDO_ERR_DUPE_PPS_APDO,
2775 };
2776
2777 static const char * const pdo_err_msg[] = {
2778 [PDO_ERR_NO_VSAFE5V] =
2779 " err: source/sink caps should at least have vSafe5V",
2780 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2781 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2782 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2783 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2784 [PDO_ERR_FIXED_NOT_SORTED] =
2785 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2786 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2787 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2788 [PDO_ERR_DUPE_PDO] =
2789 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2790 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2791 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2792 [PDO_ERR_DUPE_PPS_APDO] =
2793 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2794 };
2795
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2796 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2797 unsigned int nr_pdo)
2798 {
2799 unsigned int i;
2800
2801 /* Should at least contain vSafe5v */
2802 if (nr_pdo < 1)
2803 return PDO_ERR_NO_VSAFE5V;
2804
2805 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2806 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2807 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2808 return PDO_ERR_VSAFE5V_NOT_FIRST;
2809
2810 for (i = 1; i < nr_pdo; i++) {
2811 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2812 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2813 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2814 enum pd_pdo_type type = pdo_type(pdo[i]);
2815
2816 switch (type) {
2817 /*
2818 * The remaining Fixed Supply Objects, if
2819 * present, shall be sent in voltage order;
2820 * lowest to highest.
2821 */
2822 case PDO_TYPE_FIXED:
2823 if (pdo_fixed_voltage(pdo[i]) <=
2824 pdo_fixed_voltage(pdo[i - 1]))
2825 return PDO_ERR_FIXED_NOT_SORTED;
2826 break;
2827 /*
2828 * The Battery Supply Objects and Variable
2829 * supply, if present shall be sent in Minimum
2830 * Voltage order; lowest to highest.
2831 */
2832 case PDO_TYPE_VAR:
2833 case PDO_TYPE_BATT:
2834 if (pdo_min_voltage(pdo[i]) <
2835 pdo_min_voltage(pdo[i - 1]))
2836 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2837 else if ((pdo_min_voltage(pdo[i]) ==
2838 pdo_min_voltage(pdo[i - 1])) &&
2839 (pdo_max_voltage(pdo[i]) ==
2840 pdo_max_voltage(pdo[i - 1])))
2841 return PDO_ERR_DUPE_PDO;
2842 break;
2843 /*
2844 * The Programmable Power Supply APDOs, if present,
2845 * shall be sent in Maximum Voltage order;
2846 * lowest to highest.
2847 */
2848 case PDO_TYPE_APDO:
2849 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2850 break;
2851
2852 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2853 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2854 return PDO_ERR_PPS_APDO_NOT_SORTED;
2855 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2856 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2857 pdo_pps_apdo_max_voltage(pdo[i]) ==
2858 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2859 pdo_pps_apdo_max_current(pdo[i]) ==
2860 pdo_pps_apdo_max_current(pdo[i - 1]))
2861 return PDO_ERR_DUPE_PPS_APDO;
2862 break;
2863 default:
2864 tcpm_log_force(port, " Unknown pdo type");
2865 }
2866 }
2867 }
2868
2869 return PDO_NO_ERR;
2870 }
2871
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2872 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2873 unsigned int nr_pdo)
2874 {
2875 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2876
2877 if (err_index != PDO_NO_ERR) {
2878 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2879 return -EINVAL;
2880 }
2881
2882 return 0;
2883 }
2884
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2885 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2886 {
2887 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2888 int svdm_version;
2889 u32 header;
2890
2891 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2892 if (svdm_version < 0)
2893 return svdm_version;
2894
2895 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2896 header |= VDO_OPOS(altmode->mode);
2897
2898 return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2899 }
2900
tcpm_altmode_exit(struct typec_altmode * altmode)2901 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2902 {
2903 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2904 int svdm_version;
2905 u32 header;
2906
2907 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2908 if (svdm_version < 0)
2909 return svdm_version;
2910
2911 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2912 header |= VDO_OPOS(altmode->mode);
2913
2914 return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2915 }
2916
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2917 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2918 u32 header, const u32 *data, int count)
2919 {
2920 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2921
2922 return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2923 }
2924
2925 static const struct typec_altmode_ops tcpm_altmode_ops = {
2926 .enter = tcpm_altmode_enter,
2927 .exit = tcpm_altmode_exit,
2928 .vdm = tcpm_altmode_vdm,
2929 };
2930
2931
tcpm_cable_altmode_enter(struct typec_altmode * altmode,enum typec_plug_index sop,u32 * vdo)2932 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2933 u32 *vdo)
2934 {
2935 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2936 int svdm_version;
2937 u32 header;
2938
2939 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2940 if (svdm_version < 0)
2941 return svdm_version;
2942
2943 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2944 header |= VDO_OPOS(altmode->mode);
2945
2946 return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2947 }
2948
tcpm_cable_altmode_exit(struct typec_altmode * altmode,enum typec_plug_index sop)2949 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2950 {
2951 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2952 int svdm_version;
2953 u32 header;
2954
2955 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2956 if (svdm_version < 0)
2957 return svdm_version;
2958
2959 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2960 header |= VDO_OPOS(altmode->mode);
2961
2962 return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2963 }
2964
tcpm_cable_altmode_vdm(struct typec_altmode * altmode,enum typec_plug_index sop,u32 header,const u32 * data,int count)2965 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2966 u32 header, const u32 *data, int count)
2967 {
2968 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2969
2970 return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2971 }
2972
2973 static const struct typec_cable_ops tcpm_cable_ops = {
2974 .enter = tcpm_cable_altmode_enter,
2975 .exit = tcpm_cable_altmode_exit,
2976 .vdm = tcpm_cable_altmode_vdm,
2977 };
2978
2979 /*
2980 * PD (data, control) command handling functions
2981 */
ready_state(struct tcpm_port * port)2982 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2983 {
2984 if (port->pwr_role == TYPEC_SOURCE)
2985 return SRC_READY;
2986 else
2987 return SNK_READY;
2988 }
2989
2990 static int tcpm_pd_send_control(struct tcpm_port *port,
2991 enum pd_ctrl_msg_type type,
2992 enum tcpm_transmit_type tx_sop_type);
2993
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)2994 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2995 int cnt)
2996 {
2997 u32 p0 = le32_to_cpu(payload[0]);
2998 unsigned int type = usb_pd_ado_type(p0);
2999
3000 if (!type) {
3001 tcpm_log(port, "Alert message received with no type");
3002 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3003 return;
3004 }
3005
3006 /* Just handling non-battery alerts for now */
3007 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
3008 if (port->pwr_role == TYPEC_SOURCE) {
3009 port->upcoming_state = GET_STATUS_SEND;
3010 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
3011 } else {
3012 /*
3013 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
3014 * SinkTxOk in time.
3015 */
3016 port->ams = GETTING_SOURCE_SINK_STATUS;
3017 tcpm_set_state(port, GET_STATUS_SEND, 0);
3018 }
3019 } else {
3020 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3021 }
3022 }
3023
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)3024 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
3025 enum typec_pwr_opmode mode, bool pps_active,
3026 u32 requested_vbus_voltage)
3027 {
3028 int ret;
3029
3030 if (!port->tcpc->set_auto_vbus_discharge_threshold)
3031 return 0;
3032
3033 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
3034 requested_vbus_voltage,
3035 port->pps_data.min_volt);
3036 tcpm_log_force(port,
3037 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
3038 mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
3039 port->pps_data.min_volt, ret);
3040
3041 return ret;
3042 }
3043
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)3044 static void tcpm_pd_handle_state(struct tcpm_port *port,
3045 enum tcpm_state state,
3046 enum tcpm_ams ams,
3047 unsigned int delay_ms)
3048 {
3049 switch (port->state) {
3050 case SRC_READY:
3051 case SNK_READY:
3052 port->ams = ams;
3053 tcpm_set_state(port, state, delay_ms);
3054 break;
3055 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
3056 case SNK_TRANSITION_SINK:
3057 case SNK_TRANSITION_SINK_VBUS:
3058 case SRC_TRANSITION_SUPPLY:
3059 tcpm_set_state(port, HARD_RESET_SEND, 0);
3060 break;
3061 default:
3062 if (!tcpm_ams_interruptible(port)) {
3063 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3064 SRC_SOFT_RESET_WAIT_SNK_TX :
3065 SNK_SOFT_RESET,
3066 0);
3067 } else {
3068 /* process the Message 6.8.1 */
3069 port->upcoming_state = state;
3070 port->next_ams = ams;
3071 tcpm_set_state(port, ready_state(port), delay_ms);
3072 }
3073 break;
3074 }
3075 }
3076
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)3077 static void tcpm_pd_handle_msg(struct tcpm_port *port,
3078 enum pd_msg_request message,
3079 enum tcpm_ams ams)
3080 {
3081 switch (port->state) {
3082 case SRC_READY:
3083 case SNK_READY:
3084 port->ams = ams;
3085 tcpm_queue_message(port, message);
3086 break;
3087 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
3088 case SNK_TRANSITION_SINK:
3089 case SNK_TRANSITION_SINK_VBUS:
3090 case SRC_TRANSITION_SUPPLY:
3091 tcpm_set_state(port, HARD_RESET_SEND, 0);
3092 break;
3093 default:
3094 if (!tcpm_ams_interruptible(port)) {
3095 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3096 SRC_SOFT_RESET_WAIT_SNK_TX :
3097 SNK_SOFT_RESET,
3098 0);
3099 } else {
3100 port->next_ams = ams;
3101 tcpm_set_state(port, ready_state(port), 0);
3102 /* 6.8.1 process the Message */
3103 tcpm_queue_message(port, message);
3104 }
3105 break;
3106 }
3107 }
3108
tcpm_register_source_caps(struct tcpm_port * port)3109 static int tcpm_register_source_caps(struct tcpm_port *port)
3110 {
3111 struct usb_power_delivery_desc desc = { port->negotiated_rev };
3112 struct usb_power_delivery_capabilities_desc caps = { };
3113 struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3114
3115 if (!port->partner_pd)
3116 port->partner_pd = usb_power_delivery_register(NULL, &desc);
3117 if (IS_ERR(port->partner_pd))
3118 return PTR_ERR(port->partner_pd);
3119
3120 memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3121 caps.role = TYPEC_SOURCE;
3122
3123 if (cap) {
3124 usb_power_delivery_unregister_capabilities(cap);
3125 port->partner_source_caps = NULL;
3126 }
3127
3128 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3129 if (IS_ERR(cap))
3130 return PTR_ERR(cap);
3131
3132 port->partner_source_caps = cap;
3133
3134 return 0;
3135 }
3136
tcpm_register_sink_caps(struct tcpm_port * port)3137 static int tcpm_register_sink_caps(struct tcpm_port *port)
3138 {
3139 struct usb_power_delivery_desc desc = { port->negotiated_rev };
3140 struct usb_power_delivery_capabilities_desc caps = { };
3141 struct usb_power_delivery_capabilities *cap;
3142
3143 if (!port->partner_pd)
3144 port->partner_pd = usb_power_delivery_register(NULL, &desc);
3145 if (IS_ERR(port->partner_pd))
3146 return PTR_ERR(port->partner_pd);
3147
3148 memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3149 caps.role = TYPEC_SINK;
3150
3151 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3152 if (IS_ERR(cap))
3153 return PTR_ERR(cap);
3154
3155 port->partner_sink_caps = cap;
3156
3157 return 0;
3158 }
3159
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3160 static void tcpm_pd_data_request(struct tcpm_port *port,
3161 const struct pd_message *msg,
3162 enum tcpm_transmit_type rx_sop_type)
3163 {
3164 enum pd_data_msg_type type = pd_header_type_le(msg->header);
3165 unsigned int cnt = pd_header_cnt_le(msg->header);
3166 unsigned int rev = pd_header_rev_le(msg->header);
3167 unsigned int i;
3168 enum frs_typec_current partner_frs_current;
3169 bool frs_enable;
3170 int ret;
3171
3172 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3173 port->vdm_state = VDM_STATE_ERR_BUSY;
3174 tcpm_ams_finish(port);
3175 mod_vdm_delayed_work(port, 0);
3176 }
3177
3178 switch (type) {
3179 case PD_DATA_SOURCE_CAP:
3180 for (i = 0; i < cnt; i++)
3181 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3182
3183 port->nr_source_caps = cnt;
3184
3185 tcpm_log_source_caps(port);
3186
3187 tcpm_validate_caps(port, port->source_caps,
3188 port->nr_source_caps);
3189
3190 tcpm_register_source_caps(port);
3191
3192 /*
3193 * Adjust revision in subsequent message headers, as required,
3194 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3195 * support Rev 1.0 so just do nothing in that scenario.
3196 */
3197 if (rev == PD_REV10) {
3198 if (port->ams == GET_SOURCE_CAPABILITIES)
3199 tcpm_ams_finish(port);
3200 break;
3201 }
3202
3203 if (rev < PD_MAX_REV) {
3204 port->negotiated_rev = rev;
3205 if (port->negotiated_rev_prime > port->negotiated_rev)
3206 port->negotiated_rev_prime = port->negotiated_rev;
3207 }
3208
3209 if (port->pwr_role == TYPEC_SOURCE) {
3210 if (port->ams == GET_SOURCE_CAPABILITIES)
3211 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3212 /* Unexpected Source Capabilities */
3213 else
3214 tcpm_pd_handle_msg(port,
3215 port->negotiated_rev < PD_REV30 ?
3216 PD_MSG_CTRL_REJECT :
3217 PD_MSG_CTRL_NOT_SUPP,
3218 NONE_AMS);
3219 } else if (port->state == SNK_WAIT_CAPABILITIES ||
3220 port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) {
3221 /*
3222 * This message may be received even if VBUS is not
3223 * present. This is quite unexpected; see USB PD
3224 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3225 * However, at the same time, we must be ready to
3226 * receive this message and respond to it 15ms after
3227 * receiving PS_RDY during power swap operations, no matter
3228 * if VBUS is available or not (USB PD specification,
3229 * section 6.5.9.2).
3230 * So we need to accept the message either way,
3231 * but be prepared to keep waiting for VBUS after it was
3232 * handled.
3233 */
3234 port->ams = POWER_NEGOTIATION;
3235 port->in_ams = true;
3236 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3237 } else {
3238 if (port->ams == GET_SOURCE_CAPABILITIES)
3239 tcpm_ams_finish(port);
3240 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3241 POWER_NEGOTIATION, 0);
3242 }
3243 break;
3244 case PD_DATA_REQUEST:
3245 /*
3246 * Adjust revision in subsequent message headers, as required,
3247 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3248 * support Rev 1.0 so just reject in that scenario.
3249 */
3250 if (rev == PD_REV10) {
3251 tcpm_pd_handle_msg(port,
3252 port->negotiated_rev < PD_REV30 ?
3253 PD_MSG_CTRL_REJECT :
3254 PD_MSG_CTRL_NOT_SUPP,
3255 NONE_AMS);
3256 break;
3257 }
3258
3259 if (rev < PD_MAX_REV) {
3260 port->negotiated_rev = rev;
3261 if (port->negotiated_rev_prime > port->negotiated_rev)
3262 port->negotiated_rev_prime = port->negotiated_rev;
3263 }
3264
3265 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3266 tcpm_pd_handle_msg(port,
3267 port->negotiated_rev < PD_REV30 ?
3268 PD_MSG_CTRL_REJECT :
3269 PD_MSG_CTRL_NOT_SUPP,
3270 NONE_AMS);
3271 break;
3272 }
3273
3274 port->sink_request = le32_to_cpu(msg->payload[0]);
3275
3276 if (port->vdm_sm_running && port->explicit_contract) {
3277 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3278 break;
3279 }
3280
3281 if (port->state == SRC_SEND_CAPABILITIES)
3282 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3283 else
3284 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3285 POWER_NEGOTIATION, 0);
3286 break;
3287 case PD_DATA_SINK_CAP:
3288 /* We don't do anything with this at the moment... */
3289 for (i = 0; i < cnt; i++)
3290 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3291
3292 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3293 PDO_FIXED_FRS_CURR_SHIFT;
3294 frs_enable = partner_frs_current && (partner_frs_current <=
3295 port->new_source_frs_current);
3296 tcpm_log(port,
3297 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3298 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3299 if (frs_enable) {
3300 ret = port->tcpc->enable_frs(port->tcpc, true);
3301 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3302 }
3303
3304 port->nr_sink_caps = cnt;
3305 port->sink_cap_done = true;
3306 tcpm_register_sink_caps(port);
3307
3308 if (port->ams == GET_SINK_CAPABILITIES)
3309 tcpm_set_state(port, ready_state(port), 0);
3310 /* Unexpected Sink Capabilities */
3311 else
3312 tcpm_pd_handle_msg(port,
3313 port->negotiated_rev < PD_REV30 ?
3314 PD_MSG_CTRL_REJECT :
3315 PD_MSG_CTRL_NOT_SUPP,
3316 NONE_AMS);
3317 break;
3318 case PD_DATA_VENDOR_DEF:
3319 tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3320 break;
3321 case PD_DATA_BIST:
3322 port->bist_request = le32_to_cpu(msg->payload[0]);
3323 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3324 break;
3325 case PD_DATA_ALERT:
3326 if (port->state != SRC_READY && port->state != SNK_READY)
3327 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3328 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3329 NONE_AMS, 0);
3330 else
3331 tcpm_handle_alert(port, msg->payload, cnt);
3332 break;
3333 case PD_DATA_BATT_STATUS:
3334 case PD_DATA_GET_COUNTRY_INFO:
3335 /* Currently unsupported */
3336 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3337 PD_MSG_CTRL_REJECT :
3338 PD_MSG_CTRL_NOT_SUPP,
3339 NONE_AMS);
3340 break;
3341 default:
3342 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3343 PD_MSG_CTRL_REJECT :
3344 PD_MSG_CTRL_NOT_SUPP,
3345 NONE_AMS);
3346 tcpm_log(port, "Unrecognized data message type %#x", type);
3347 break;
3348 }
3349 }
3350
tcpm_pps_complete(struct tcpm_port * port,int result)3351 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3352 {
3353 if (port->pps_pending) {
3354 port->pps_status = result;
3355 port->pps_pending = false;
3356 complete(&port->pps_complete);
3357 }
3358 }
3359
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3360 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3361 const struct pd_message *msg,
3362 enum tcpm_transmit_type rx_sop_type)
3363 {
3364 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3365 enum tcpm_state next_state;
3366 unsigned int rev = pd_header_rev_le(msg->header);
3367
3368 /*
3369 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3370 * VDM AMS if waiting for VDM responses and will be handled later.
3371 */
3372 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3373 port->vdm_state = VDM_STATE_ERR_BUSY;
3374 tcpm_ams_finish(port);
3375 mod_vdm_delayed_work(port, 0);
3376 }
3377
3378 switch (type) {
3379 case PD_CTRL_GOOD_CRC:
3380 case PD_CTRL_PING:
3381 break;
3382 case PD_CTRL_GET_SOURCE_CAP:
3383 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3384 break;
3385 case PD_CTRL_GET_SINK_CAP:
3386 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3387 break;
3388 case PD_CTRL_GOTO_MIN:
3389 break;
3390 case PD_CTRL_PS_RDY:
3391 switch (port->state) {
3392 case SNK_TRANSITION_SINK:
3393 if (port->vbus_present) {
3394 tcpm_set_current_limit(port,
3395 port->req_current_limit,
3396 port->req_supply_voltage);
3397 port->explicit_contract = true;
3398 tcpm_set_auto_vbus_discharge_threshold(port,
3399 TYPEC_PWR_MODE_PD,
3400 port->pps_data.active,
3401 port->supply_voltage);
3402 tcpm_set_state(port, SNK_READY, 0);
3403 } else {
3404 /*
3405 * Seen after power swap. Keep waiting for VBUS
3406 * in a transitional state.
3407 */
3408 tcpm_set_state(port,
3409 SNK_TRANSITION_SINK_VBUS, 0);
3410 }
3411 break;
3412 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3413 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3414 break;
3415 case PR_SWAP_SNK_SRC_SINK_OFF:
3416 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3417 break;
3418 case VCONN_SWAP_WAIT_FOR_VCONN:
3419 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3420 break;
3421 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3422 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3423 break;
3424 default:
3425 tcpm_pd_handle_state(port,
3426 port->pwr_role == TYPEC_SOURCE ?
3427 SRC_SOFT_RESET_WAIT_SNK_TX :
3428 SNK_SOFT_RESET,
3429 NONE_AMS, 0);
3430 break;
3431 }
3432 break;
3433 case PD_CTRL_REJECT:
3434 case PD_CTRL_WAIT:
3435 case PD_CTRL_NOT_SUPP:
3436 switch (port->state) {
3437 case SNK_NEGOTIATE_CAPABILITIES:
3438 /* USB PD specification, Figure 8-43 */
3439 if (port->explicit_contract)
3440 next_state = SNK_READY;
3441 else
3442 next_state = SNK_WAIT_CAPABILITIES;
3443
3444 /* Threshold was relaxed before sending Request. Restore it back. */
3445 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3446 port->pps_data.active,
3447 port->supply_voltage);
3448 tcpm_set_state(port, next_state, 0);
3449 break;
3450 case SNK_NEGOTIATE_PPS_CAPABILITIES:
3451 /* Revert data back from any requested PPS updates */
3452 port->pps_data.req_out_volt = port->supply_voltage;
3453 port->pps_data.req_op_curr = port->current_limit;
3454 port->pps_status = (type == PD_CTRL_WAIT ?
3455 -EAGAIN : -EOPNOTSUPP);
3456
3457 /* Threshold was relaxed before sending Request. Restore it back. */
3458 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3459 port->pps_data.active,
3460 port->supply_voltage);
3461
3462 tcpm_set_state(port, SNK_READY, 0);
3463 break;
3464 case DR_SWAP_SEND:
3465 port->swap_status = (type == PD_CTRL_WAIT ?
3466 -EAGAIN : -EOPNOTSUPP);
3467 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3468 break;
3469 case PR_SWAP_SEND:
3470 port->swap_status = (type == PD_CTRL_WAIT ?
3471 -EAGAIN : -EOPNOTSUPP);
3472 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3473 break;
3474 case VCONN_SWAP_SEND:
3475 port->swap_status = (type == PD_CTRL_WAIT ?
3476 -EAGAIN : -EOPNOTSUPP);
3477 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3478 break;
3479 case FR_SWAP_SEND:
3480 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3481 break;
3482 case GET_SINK_CAP:
3483 port->sink_cap_done = true;
3484 tcpm_set_state(port, ready_state(port), 0);
3485 break;
3486 /*
3487 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3488 * prevent redundant power re-negotiation
3489 */
3490 case GET_STATUS_SEND:
3491 tcpm_set_state(port, ready_state(port), 0);
3492 break;
3493 case SRC_READY:
3494 case SNK_READY:
3495 if (port->vdm_state > VDM_STATE_READY) {
3496 port->vdm_state = VDM_STATE_DONE;
3497 if (tcpm_vdm_ams(port))
3498 tcpm_ams_finish(port);
3499 mod_vdm_delayed_work(port, 0);
3500 break;
3501 }
3502 fallthrough;
3503 default:
3504 tcpm_pd_handle_state(port,
3505 port->pwr_role == TYPEC_SOURCE ?
3506 SRC_SOFT_RESET_WAIT_SNK_TX :
3507 SNK_SOFT_RESET,
3508 NONE_AMS, 0);
3509 break;
3510 }
3511 break;
3512 case PD_CTRL_ACCEPT:
3513 switch (port->state) {
3514 case SNK_NEGOTIATE_CAPABILITIES:
3515 port->pps_data.active = false;
3516 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3517 break;
3518 case SNK_NEGOTIATE_PPS_CAPABILITIES:
3519 port->pps_data.active = true;
3520 port->pps_data.min_volt = port->pps_data.req_min_volt;
3521 port->pps_data.max_volt = port->pps_data.req_max_volt;
3522 port->pps_data.max_curr = port->pps_data.req_max_curr;
3523 port->req_supply_voltage = port->pps_data.req_out_volt;
3524 port->req_current_limit = port->pps_data.req_op_curr;
3525 power_supply_changed(port->psy);
3526 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3527 break;
3528 case SOFT_RESET_SEND:
3529 if (port->ams == SOFT_RESET_AMS)
3530 tcpm_ams_finish(port);
3531 /*
3532 * SOP' Soft Reset is done after Vconn Swap,
3533 * which returns to ready state
3534 */
3535 if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3536 if (rev < port->negotiated_rev_prime)
3537 port->negotiated_rev_prime = rev;
3538 tcpm_set_state(port, ready_state(port), 0);
3539 break;
3540 }
3541 if (port->pwr_role == TYPEC_SOURCE) {
3542 port->upcoming_state = SRC_SEND_CAPABILITIES;
3543 tcpm_ams_start(port, POWER_NEGOTIATION);
3544 } else {
3545 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3546 }
3547 break;
3548 case DR_SWAP_SEND:
3549 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3550 break;
3551 case PR_SWAP_SEND:
3552 tcpm_set_state(port, PR_SWAP_START, 0);
3553 break;
3554 case VCONN_SWAP_SEND:
3555 tcpm_set_state(port, VCONN_SWAP_START, 0);
3556 break;
3557 case FR_SWAP_SEND:
3558 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3559 break;
3560 default:
3561 tcpm_pd_handle_state(port,
3562 port->pwr_role == TYPEC_SOURCE ?
3563 SRC_SOFT_RESET_WAIT_SNK_TX :
3564 SNK_SOFT_RESET,
3565 NONE_AMS, 0);
3566 break;
3567 }
3568 break;
3569 case PD_CTRL_SOFT_RESET:
3570 port->ams = SOFT_RESET_AMS;
3571 tcpm_set_state(port, SOFT_RESET, 0);
3572 break;
3573 case PD_CTRL_DR_SWAP:
3574 /*
3575 * XXX
3576 * 6.3.9: If an alternate mode is active, a request to swap
3577 * alternate modes shall trigger a port reset.
3578 */
3579 if (port->typec_caps.data != TYPEC_PORT_DRD) {
3580 tcpm_pd_handle_msg(port,
3581 port->negotiated_rev < PD_REV30 ?
3582 PD_MSG_CTRL_REJECT :
3583 PD_MSG_CTRL_NOT_SUPP,
3584 NONE_AMS);
3585 } else {
3586 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3587 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3588 break;
3589 }
3590
3591 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3592 }
3593 break;
3594 case PD_CTRL_PR_SWAP:
3595 if (port->port_type != TYPEC_PORT_DRP) {
3596 tcpm_pd_handle_msg(port,
3597 port->negotiated_rev < PD_REV30 ?
3598 PD_MSG_CTRL_REJECT :
3599 PD_MSG_CTRL_NOT_SUPP,
3600 NONE_AMS);
3601 } else {
3602 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3603 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3604 break;
3605 }
3606
3607 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3608 }
3609 break;
3610 case PD_CTRL_VCONN_SWAP:
3611 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3612 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3613 break;
3614 }
3615
3616 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3617 break;
3618 case PD_CTRL_GET_SOURCE_CAP_EXT:
3619 case PD_CTRL_GET_STATUS:
3620 case PD_CTRL_FR_SWAP:
3621 case PD_CTRL_GET_PPS_STATUS:
3622 case PD_CTRL_GET_COUNTRY_CODES:
3623 /* Currently not supported */
3624 tcpm_pd_handle_msg(port,
3625 port->negotiated_rev < PD_REV30 ?
3626 PD_MSG_CTRL_REJECT :
3627 PD_MSG_CTRL_NOT_SUPP,
3628 NONE_AMS);
3629 break;
3630 case PD_CTRL_GET_REVISION:
3631 if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
3632 tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
3633 REVISION_INFORMATION);
3634 else
3635 tcpm_pd_handle_msg(port,
3636 port->negotiated_rev < PD_REV30 ?
3637 PD_MSG_CTRL_REJECT :
3638 PD_MSG_CTRL_NOT_SUPP,
3639 NONE_AMS);
3640 break;
3641 default:
3642 tcpm_pd_handle_msg(port,
3643 port->negotiated_rev < PD_REV30 ?
3644 PD_MSG_CTRL_REJECT :
3645 PD_MSG_CTRL_NOT_SUPP,
3646 NONE_AMS);
3647 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3648 break;
3649 }
3650 }
3651
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)3652 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3653 const struct pd_message *msg)
3654 {
3655 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3656 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3657
3658 /* stopping VDM state machine if interrupted by other Messages */
3659 if (tcpm_vdm_ams(port)) {
3660 port->vdm_state = VDM_STATE_ERR_BUSY;
3661 tcpm_ams_finish(port);
3662 mod_vdm_delayed_work(port, 0);
3663 }
3664
3665 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3666 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3667 tcpm_log(port, "Unchunked extended messages unsupported");
3668 return;
3669 }
3670
3671 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3672 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3673 tcpm_log(port, "Chunk handling not yet supported");
3674 return;
3675 }
3676
3677 switch (type) {
3678 case PD_EXT_STATUS:
3679 case PD_EXT_PPS_STATUS:
3680 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3681 tcpm_ams_finish(port);
3682 tcpm_set_state(port, ready_state(port), 0);
3683 } else {
3684 /* unexpected Status or PPS_Status Message */
3685 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3686 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3687 NONE_AMS, 0);
3688 }
3689 break;
3690 case PD_EXT_SOURCE_CAP_EXT:
3691 case PD_EXT_GET_BATT_CAP:
3692 case PD_EXT_GET_BATT_STATUS:
3693 case PD_EXT_BATT_CAP:
3694 case PD_EXT_GET_MANUFACTURER_INFO:
3695 case PD_EXT_MANUFACTURER_INFO:
3696 case PD_EXT_SECURITY_REQUEST:
3697 case PD_EXT_SECURITY_RESPONSE:
3698 case PD_EXT_FW_UPDATE_REQUEST:
3699 case PD_EXT_FW_UPDATE_RESPONSE:
3700 case PD_EXT_COUNTRY_INFO:
3701 case PD_EXT_COUNTRY_CODES:
3702 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3703 break;
3704 default:
3705 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3706 tcpm_log(port, "Unrecognized extended message type %#x", type);
3707 break;
3708 }
3709 }
3710
tcpm_pd_rx_handler(struct kthread_work * work)3711 static void tcpm_pd_rx_handler(struct kthread_work *work)
3712 {
3713 struct pd_rx_event *event = container_of(work,
3714 struct pd_rx_event, work);
3715 const struct pd_message *msg = &event->msg;
3716 unsigned int cnt = pd_header_cnt_le(msg->header);
3717 struct tcpm_port *port = event->port;
3718 enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3719
3720 mutex_lock(&port->lock);
3721
3722 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3723 port->attached);
3724
3725 if (port->attached) {
3726 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3727 unsigned int msgid = pd_header_msgid_le(msg->header);
3728
3729 /*
3730 * Drop SOP' messages if cannot receive via
3731 * tcpm_can_communicate_sop_prime
3732 */
3733 if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3734 !tcpm_can_communicate_sop_prime(port))
3735 goto done;
3736
3737 /*
3738 * USB PD standard, 6.6.1.2:
3739 * "... if MessageID value in a received Message is the
3740 * same as the stored value, the receiver shall return a
3741 * GoodCRC Message with that MessageID value and drop
3742 * the Message (this is a retry of an already received
3743 * Message). Note: this shall not apply to the Soft_Reset
3744 * Message which always has a MessageID value of zero."
3745 */
3746 switch (rx_sop_type) {
3747 case TCPC_TX_SOP_PRIME:
3748 if (msgid == port->rx_msgid_prime)
3749 goto done;
3750 port->rx_msgid_prime = msgid;
3751 break;
3752 case TCPC_TX_SOP:
3753 default:
3754 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3755 goto done;
3756 port->rx_msgid = msgid;
3757 break;
3758 }
3759
3760 /*
3761 * If both ends believe to be DFP/host, we have a data role
3762 * mismatch.
3763 */
3764 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3765 (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3766 tcpm_log(port,
3767 "Data role mismatch, initiating error recovery");
3768 tcpm_set_state(port, ERROR_RECOVERY, 0);
3769 } else {
3770 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3771 tcpm_pd_ext_msg_request(port, msg);
3772 else if (cnt)
3773 tcpm_pd_data_request(port, msg, rx_sop_type);
3774 else
3775 tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3776 }
3777 }
3778
3779 done:
3780 mutex_unlock(&port->lock);
3781 kfree(event);
3782 }
3783
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3784 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3785 enum tcpm_transmit_type rx_sop_type)
3786 {
3787 struct pd_rx_event *event;
3788
3789 event = kzalloc(sizeof(*event), GFP_ATOMIC);
3790 if (!event)
3791 return;
3792
3793 kthread_init_work(&event->work, tcpm_pd_rx_handler);
3794 event->port = port;
3795 event->rx_sop_type = rx_sop_type;
3796 memcpy(&event->msg, msg, sizeof(*msg));
3797 kthread_queue_work(port->wq, &event->work);
3798 }
3799 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3800
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type,enum tcpm_transmit_type tx_sop_type)3801 static int tcpm_pd_send_control(struct tcpm_port *port,
3802 enum pd_ctrl_msg_type type,
3803 enum tcpm_transmit_type tx_sop_type)
3804 {
3805 struct pd_message msg;
3806
3807 memset(&msg, 0, sizeof(msg));
3808 switch (tx_sop_type) {
3809 case TCPC_TX_SOP_PRIME:
3810 msg.header = PD_HEADER_LE(type,
3811 0, /* Cable Plug Indicator for DFP/UFP */
3812 0, /* Reserved */
3813 port->negotiated_rev,
3814 port->message_id_prime,
3815 0);
3816 break;
3817 case TCPC_TX_SOP:
3818 msg.header = PD_HEADER_LE(type,
3819 port->pwr_role,
3820 port->data_role,
3821 port->negotiated_rev,
3822 port->message_id,
3823 0);
3824 break;
3825 default:
3826 msg.header = PD_HEADER_LE(type,
3827 port->pwr_role,
3828 port->data_role,
3829 port->negotiated_rev,
3830 port->message_id,
3831 0);
3832 break;
3833 }
3834
3835 return tcpm_pd_transmit(port, tx_sop_type, &msg);
3836 }
3837
3838 /*
3839 * Send queued message without affecting state.
3840 * Return true if state machine should go back to sleep,
3841 * false otherwise.
3842 */
tcpm_send_queued_message(struct tcpm_port * port)3843 static bool tcpm_send_queued_message(struct tcpm_port *port)
3844 {
3845 enum pd_msg_request queued_message;
3846 int ret;
3847
3848 do {
3849 queued_message = port->queued_message;
3850 port->queued_message = PD_MSG_NONE;
3851
3852 switch (queued_message) {
3853 case PD_MSG_CTRL_WAIT:
3854 tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3855 break;
3856 case PD_MSG_CTRL_REJECT:
3857 tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3858 break;
3859 case PD_MSG_CTRL_NOT_SUPP:
3860 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3861 break;
3862 case PD_MSG_DATA_SINK_CAP:
3863 ret = tcpm_pd_send_sink_caps(port);
3864 if (ret < 0) {
3865 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3866 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3867 }
3868 tcpm_ams_finish(port);
3869 break;
3870 case PD_MSG_DATA_SOURCE_CAP:
3871 ret = tcpm_pd_send_source_caps(port);
3872 if (ret < 0) {
3873 tcpm_log(port,
3874 "Unable to send src caps, ret=%d",
3875 ret);
3876 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3877 } else if (port->pwr_role == TYPEC_SOURCE) {
3878 tcpm_ams_finish(port);
3879 tcpm_set_state(port, HARD_RESET_SEND,
3880 PD_T_SENDER_RESPONSE);
3881 } else {
3882 tcpm_ams_finish(port);
3883 }
3884 break;
3885 case PD_MSG_DATA_REV:
3886 ret = tcpm_pd_send_revision(port);
3887 if (ret)
3888 tcpm_log(port,
3889 "Unable to send revision msg, ret=%d",
3890 ret);
3891 tcpm_ams_finish(port);
3892 break;
3893 default:
3894 break;
3895 }
3896 } while (port->queued_message != PD_MSG_NONE);
3897
3898 if (port->delayed_state != INVALID_STATE) {
3899 if (ktime_after(port->delayed_runtime, ktime_get())) {
3900 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3901 ktime_get())));
3902 return true;
3903 }
3904 port->delayed_state = INVALID_STATE;
3905 }
3906 return false;
3907 }
3908
tcpm_pd_check_request(struct tcpm_port * port)3909 static int tcpm_pd_check_request(struct tcpm_port *port)
3910 {
3911 u32 pdo, rdo = port->sink_request;
3912 unsigned int max, op, pdo_max, index;
3913 enum pd_pdo_type type;
3914
3915 index = rdo_index(rdo);
3916 if (!index || index > port->nr_src_pdo)
3917 return -EINVAL;
3918
3919 pdo = port->src_pdo[index - 1];
3920 type = pdo_type(pdo);
3921 switch (type) {
3922 case PDO_TYPE_FIXED:
3923 case PDO_TYPE_VAR:
3924 max = rdo_max_current(rdo);
3925 op = rdo_op_current(rdo);
3926 pdo_max = pdo_max_current(pdo);
3927
3928 if (op > pdo_max)
3929 return -EINVAL;
3930 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3931 return -EINVAL;
3932
3933 if (type == PDO_TYPE_FIXED)
3934 tcpm_log(port,
3935 "Requested %u mV, %u mA for %u / %u mA",
3936 pdo_fixed_voltage(pdo), pdo_max, op, max);
3937 else
3938 tcpm_log(port,
3939 "Requested %u -> %u mV, %u mA for %u / %u mA",
3940 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3941 pdo_max, op, max);
3942 break;
3943 case PDO_TYPE_BATT:
3944 max = rdo_max_power(rdo);
3945 op = rdo_op_power(rdo);
3946 pdo_max = pdo_max_power(pdo);
3947
3948 if (op > pdo_max)
3949 return -EINVAL;
3950 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3951 return -EINVAL;
3952 tcpm_log(port,
3953 "Requested %u -> %u mV, %u mW for %u / %u mW",
3954 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3955 pdo_max, op, max);
3956 break;
3957 default:
3958 return -EINVAL;
3959 }
3960
3961 port->op_vsafe5v = index == 1;
3962
3963 return 0;
3964 }
3965
3966 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3967 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3968
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3969 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3970 int *src_pdo)
3971 {
3972 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3973 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3974 min_snk_mv = 0;
3975 int ret = -EINVAL;
3976
3977 port->pps_data.supported = false;
3978 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3979 power_supply_changed(port->psy);
3980
3981 /*
3982 * Select the source PDO providing the most power which has a
3983 * matchig sink cap.
3984 */
3985 for (i = 0; i < port->nr_source_caps; i++) {
3986 u32 pdo = port->source_caps[i];
3987 enum pd_pdo_type type = pdo_type(pdo);
3988
3989 switch (type) {
3990 case PDO_TYPE_FIXED:
3991 max_src_mv = pdo_fixed_voltage(pdo);
3992 min_src_mv = max_src_mv;
3993 break;
3994 case PDO_TYPE_BATT:
3995 case PDO_TYPE_VAR:
3996 max_src_mv = pdo_max_voltage(pdo);
3997 min_src_mv = pdo_min_voltage(pdo);
3998 break;
3999 case PDO_TYPE_APDO:
4000 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
4001 port->pps_data.supported = true;
4002 port->usb_type =
4003 POWER_SUPPLY_USB_TYPE_PD_PPS;
4004 power_supply_changed(port->psy);
4005 }
4006 continue;
4007 default:
4008 tcpm_log(port, "Invalid source PDO type, ignoring");
4009 continue;
4010 }
4011
4012 switch (type) {
4013 case PDO_TYPE_FIXED:
4014 case PDO_TYPE_VAR:
4015 src_ma = pdo_max_current(pdo);
4016 src_mw = src_ma * min_src_mv / 1000;
4017 break;
4018 case PDO_TYPE_BATT:
4019 src_mw = pdo_max_power(pdo);
4020 break;
4021 case PDO_TYPE_APDO:
4022 continue;
4023 default:
4024 tcpm_log(port, "Invalid source PDO type, ignoring");
4025 continue;
4026 }
4027
4028 for (j = 0; j < port->nr_snk_pdo; j++) {
4029 pdo = port->snk_pdo[j];
4030
4031 switch (pdo_type(pdo)) {
4032 case PDO_TYPE_FIXED:
4033 max_snk_mv = pdo_fixed_voltage(pdo);
4034 min_snk_mv = max_snk_mv;
4035 break;
4036 case PDO_TYPE_BATT:
4037 case PDO_TYPE_VAR:
4038 max_snk_mv = pdo_max_voltage(pdo);
4039 min_snk_mv = pdo_min_voltage(pdo);
4040 break;
4041 case PDO_TYPE_APDO:
4042 continue;
4043 default:
4044 tcpm_log(port, "Invalid sink PDO type, ignoring");
4045 continue;
4046 }
4047
4048 if (max_src_mv <= max_snk_mv &&
4049 min_src_mv >= min_snk_mv) {
4050 /* Prefer higher voltages if available */
4051 if ((src_mw == max_mw && min_src_mv > max_mv) ||
4052 src_mw > max_mw) {
4053 *src_pdo = i;
4054 *sink_pdo = j;
4055 max_mw = src_mw;
4056 max_mv = min_src_mv;
4057 ret = 0;
4058 }
4059 }
4060 }
4061 }
4062
4063 return ret;
4064 }
4065
tcpm_pd_select_pps_apdo(struct tcpm_port * port)4066 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
4067 {
4068 unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
4069 unsigned int src_pdo = 0;
4070 u32 pdo, src;
4071
4072 for (i = 1; i < port->nr_source_caps; ++i) {
4073 pdo = port->source_caps[i];
4074
4075 switch (pdo_type(pdo)) {
4076 case PDO_TYPE_APDO:
4077 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
4078 tcpm_log(port, "Not PPS APDO (source), ignoring");
4079 continue;
4080 }
4081
4082 if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
4083 port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
4084 continue;
4085
4086 src_ma = pdo_pps_apdo_max_current(pdo);
4087 max_op_ma = min(src_ma, port->pps_data.req_op_curr);
4088 op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
4089 if (op_mw > max_temp_mw) {
4090 src_pdo = i;
4091 max_temp_mw = op_mw;
4092 }
4093 break;
4094 default:
4095 tcpm_log(port, "Not APDO type (source), ignoring");
4096 continue;
4097 }
4098 }
4099
4100 if (src_pdo) {
4101 src = port->source_caps[src_pdo];
4102
4103 port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
4104 port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
4105 port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
4106 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
4107 port->pps_data.req_op_curr);
4108 }
4109
4110 return src_pdo;
4111 }
4112
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)4113 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
4114 {
4115 unsigned int mv, ma, mw, flags;
4116 unsigned int max_ma, max_mw;
4117 enum pd_pdo_type type;
4118 u32 pdo, matching_snk_pdo;
4119 int src_pdo_index = 0;
4120 int snk_pdo_index = 0;
4121 int ret;
4122
4123 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
4124 if (ret < 0)
4125 return ret;
4126
4127 pdo = port->source_caps[src_pdo_index];
4128 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4129 type = pdo_type(pdo);
4130
4131 switch (type) {
4132 case PDO_TYPE_FIXED:
4133 mv = pdo_fixed_voltage(pdo);
4134 break;
4135 case PDO_TYPE_BATT:
4136 case PDO_TYPE_VAR:
4137 mv = pdo_min_voltage(pdo);
4138 break;
4139 default:
4140 tcpm_log(port, "Invalid PDO selected!");
4141 return -EINVAL;
4142 }
4143
4144 /* Select maximum available current within the sink pdo's limit */
4145 if (type == PDO_TYPE_BATT) {
4146 mw = min_power(pdo, matching_snk_pdo);
4147 ma = 1000 * mw / mv;
4148 } else {
4149 ma = min_current(pdo, matching_snk_pdo);
4150 mw = ma * mv / 1000;
4151 }
4152
4153 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4154
4155 /* Set mismatch bit if offered power is less than operating power */
4156 max_ma = ma;
4157 max_mw = mw;
4158 if (mw < port->operating_snk_mw) {
4159 flags |= RDO_CAP_MISMATCH;
4160 if (type == PDO_TYPE_BATT &&
4161 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4162 max_mw = pdo_max_power(matching_snk_pdo);
4163 else if (pdo_max_current(matching_snk_pdo) >
4164 pdo_max_current(pdo))
4165 max_ma = pdo_max_current(matching_snk_pdo);
4166 }
4167
4168 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4169 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4170 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4171 port->polarity);
4172
4173 if (type == PDO_TYPE_BATT) {
4174 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4175
4176 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4177 src_pdo_index, mv, mw,
4178 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4179 } else {
4180 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4181
4182 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4183 src_pdo_index, mv, ma,
4184 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4185 }
4186
4187 port->req_current_limit = ma;
4188 port->req_supply_voltage = mv;
4189
4190 return 0;
4191 }
4192
tcpm_pd_send_request(struct tcpm_port * port)4193 static int tcpm_pd_send_request(struct tcpm_port *port)
4194 {
4195 struct pd_message msg;
4196 int ret;
4197 u32 rdo;
4198
4199 ret = tcpm_pd_build_request(port, &rdo);
4200 if (ret < 0)
4201 return ret;
4202
4203 /*
4204 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4205 * It is safer to modify the threshold here.
4206 */
4207 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4208
4209 memset(&msg, 0, sizeof(msg));
4210 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4211 port->pwr_role,
4212 port->data_role,
4213 port->negotiated_rev,
4214 port->message_id, 1);
4215 msg.payload[0] = cpu_to_le32(rdo);
4216
4217 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4218 }
4219
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)4220 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4221 {
4222 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4223 unsigned int src_pdo_index;
4224
4225 src_pdo_index = tcpm_pd_select_pps_apdo(port);
4226 if (!src_pdo_index)
4227 return -EOPNOTSUPP;
4228
4229 max_mv = port->pps_data.req_max_volt;
4230 max_ma = port->pps_data.req_max_curr;
4231 out_mv = port->pps_data.req_out_volt;
4232 op_ma = port->pps_data.req_op_curr;
4233
4234 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4235
4236 op_mw = (op_ma * out_mv) / 1000;
4237 if (op_mw < port->operating_snk_mw) {
4238 /*
4239 * Try raising current to meet power needs. If that's not enough
4240 * then try upping the voltage. If that's still not enough
4241 * then we've obviously chosen a PPS APDO which really isn't
4242 * suitable so abandon ship.
4243 */
4244 op_ma = (port->operating_snk_mw * 1000) / out_mv;
4245 if ((port->operating_snk_mw * 1000) % out_mv)
4246 ++op_ma;
4247 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4248
4249 if (op_ma > max_ma) {
4250 op_ma = max_ma;
4251 out_mv = (port->operating_snk_mw * 1000) / op_ma;
4252 if ((port->operating_snk_mw * 1000) % op_ma)
4253 ++out_mv;
4254 out_mv += RDO_PROG_VOLT_MV_STEP -
4255 (out_mv % RDO_PROG_VOLT_MV_STEP);
4256
4257 if (out_mv > max_mv) {
4258 tcpm_log(port, "Invalid PPS APDO selected!");
4259 return -EINVAL;
4260 }
4261 }
4262 }
4263
4264 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4265 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4266 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4267 port->polarity);
4268
4269 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4270
4271 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4272 src_pdo_index, out_mv, op_ma);
4273
4274 port->pps_data.req_op_curr = op_ma;
4275 port->pps_data.req_out_volt = out_mv;
4276
4277 return 0;
4278 }
4279
tcpm_pd_send_pps_request(struct tcpm_port * port)4280 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4281 {
4282 struct pd_message msg;
4283 int ret;
4284 u32 rdo;
4285
4286 ret = tcpm_pd_build_pps_request(port, &rdo);
4287 if (ret < 0)
4288 return ret;
4289
4290 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
4291 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4292
4293 memset(&msg, 0, sizeof(msg));
4294 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4295 port->pwr_role,
4296 port->data_role,
4297 port->negotiated_rev,
4298 port->message_id, 1);
4299 msg.payload[0] = cpu_to_le32(rdo);
4300
4301 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4302 }
4303
tcpm_set_vbus(struct tcpm_port * port,bool enable)4304 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4305 {
4306 int ret;
4307
4308 if (enable && port->vbus_charge)
4309 return -EINVAL;
4310
4311 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4312
4313 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4314 if (ret < 0)
4315 return ret;
4316
4317 port->vbus_source = enable;
4318 return 0;
4319 }
4320
tcpm_set_charge(struct tcpm_port * port,bool charge)4321 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4322 {
4323 int ret;
4324
4325 if (charge && port->vbus_source)
4326 return -EINVAL;
4327
4328 if (charge != port->vbus_charge) {
4329 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4330 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4331 charge);
4332 if (ret < 0)
4333 return ret;
4334 }
4335 port->vbus_charge = charge;
4336 power_supply_changed(port->psy);
4337 return 0;
4338 }
4339
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)4340 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4341 {
4342 int ret;
4343
4344 if (!port->tcpc->start_toggling)
4345 return false;
4346
4347 tcpm_log_force(port, "Start toggling");
4348 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4349 return ret == 0;
4350 }
4351
tcpm_init_vbus(struct tcpm_port * port)4352 static int tcpm_init_vbus(struct tcpm_port *port)
4353 {
4354 int ret;
4355
4356 ret = port->tcpc->set_vbus(port->tcpc, false, false);
4357 port->vbus_source = false;
4358 port->vbus_charge = false;
4359 return ret;
4360 }
4361
tcpm_init_vconn(struct tcpm_port * port)4362 static int tcpm_init_vconn(struct tcpm_port *port)
4363 {
4364 int ret;
4365
4366 ret = port->tcpc->set_vconn(port->tcpc, false);
4367 port->vconn_role = TYPEC_SINK;
4368 return ret;
4369 }
4370
tcpm_typec_connect(struct tcpm_port * port)4371 static void tcpm_typec_connect(struct tcpm_port *port)
4372 {
4373 struct typec_partner *partner;
4374
4375 if (!port->connected) {
4376 port->connected = true;
4377 /* Make sure we don't report stale identity information */
4378 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4379 port->partner_desc.usb_pd = port->pd_capable;
4380 if (tcpm_port_is_debug(port))
4381 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4382 else if (tcpm_port_is_audio(port))
4383 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4384 else
4385 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4386 partner = typec_register_partner(port->typec_port, &port->partner_desc);
4387 if (IS_ERR(partner)) {
4388 dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4389 return;
4390 }
4391
4392 port->partner = partner;
4393 typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4394 }
4395 }
4396
tcpm_src_attach(struct tcpm_port * port)4397 static int tcpm_src_attach(struct tcpm_port *port)
4398 {
4399 enum typec_cc_polarity polarity =
4400 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4401 : TYPEC_POLARITY_CC1;
4402 int ret;
4403
4404 if (port->attached)
4405 return 0;
4406
4407 ret = tcpm_set_polarity(port, polarity);
4408 if (ret < 0)
4409 return ret;
4410
4411 tcpm_enable_auto_vbus_discharge(port, true);
4412
4413 ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
4414 TYPEC_SOURCE, tcpm_data_role_for_source(port));
4415 if (ret < 0)
4416 return ret;
4417
4418 if (port->pd_supported) {
4419 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4420 if (ret < 0)
4421 goto out_disable_mux;
4422 }
4423
4424 /*
4425 * USB Type-C specification, version 1.2,
4426 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4427 * Enable VCONN only if the non-RD port is set to RA.
4428 */
4429 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4430 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4431 ret = tcpm_set_vconn(port, true);
4432 if (ret < 0)
4433 goto out_disable_pd;
4434 }
4435
4436 ret = tcpm_set_vbus(port, true);
4437 if (ret < 0)
4438 goto out_disable_vconn;
4439
4440 port->pd_capable = false;
4441
4442 port->partner = NULL;
4443
4444 port->attached = true;
4445 port->send_discover = true;
4446 port->send_discover_prime = false;
4447
4448 return 0;
4449
4450 out_disable_vconn:
4451 tcpm_set_vconn(port, false);
4452 out_disable_pd:
4453 if (port->pd_supported)
4454 port->tcpc->set_pd_rx(port->tcpc, false);
4455 out_disable_mux:
4456 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4457 TYPEC_ORIENTATION_NONE);
4458 return ret;
4459 }
4460
tcpm_typec_disconnect(struct tcpm_port * port)4461 static void tcpm_typec_disconnect(struct tcpm_port *port)
4462 {
4463 /*
4464 * Unregister plug/cable outside of port->connected because cable can
4465 * be discovered before SRC_READY/SNK_READY states where port->connected
4466 * is set.
4467 */
4468 typec_unregister_plug(port->plug_prime);
4469 typec_unregister_cable(port->cable);
4470 port->plug_prime = NULL;
4471 port->cable = NULL;
4472 if (port->connected) {
4473 if (port->partner) {
4474 typec_partner_set_usb_power_delivery(port->partner, NULL);
4475 typec_unregister_partner(port->partner);
4476 port->partner = NULL;
4477 }
4478 port->connected = false;
4479 }
4480 }
4481
tcpm_unregister_altmodes(struct tcpm_port * port)4482 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4483 {
4484 struct pd_mode_data *modep = &port->mode_data;
4485 struct pd_mode_data *modep_prime = &port->mode_data_prime;
4486 int i;
4487
4488 for (i = 0; i < modep->altmodes; i++) {
4489 typec_unregister_altmode(port->partner_altmode[i]);
4490 port->partner_altmode[i] = NULL;
4491 }
4492 for (i = 0; i < modep_prime->altmodes; i++) {
4493 typec_unregister_altmode(port->plug_prime_altmode[i]);
4494 port->plug_prime_altmode[i] = NULL;
4495 }
4496
4497 memset(modep, 0, sizeof(*modep));
4498 memset(modep_prime, 0, sizeof(*modep_prime));
4499 }
4500
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)4501 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4502 {
4503 tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
4504
4505 if (port->tcpc->set_partner_usb_comm_capable)
4506 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4507 }
4508
tcpm_reset_port(struct tcpm_port * port)4509 static void tcpm_reset_port(struct tcpm_port *port)
4510 {
4511 tcpm_enable_auto_vbus_discharge(port, false);
4512 port->in_ams = false;
4513 port->ams = NONE_AMS;
4514 port->vdm_sm_running = false;
4515 tcpm_unregister_altmodes(port);
4516 tcpm_typec_disconnect(port);
4517 port->attached = false;
4518 port->pd_capable = false;
4519 port->pps_data.supported = false;
4520 tcpm_set_partner_usb_comm_capable(port, false);
4521
4522 /*
4523 * First Rx ID should be 0; set this to a sentinel of -1 so that
4524 * we can check tcpm_pd_rx_handler() if we had seen it before.
4525 */
4526 port->rx_msgid = -1;
4527 port->rx_msgid_prime = -1;
4528
4529 port->tcpc->set_pd_rx(port->tcpc, false);
4530 tcpm_init_vbus(port); /* also disables charging */
4531 tcpm_init_vconn(port);
4532 tcpm_set_current_limit(port, 0, 0);
4533 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4534 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4535 TYPEC_ORIENTATION_NONE);
4536 tcpm_set_attached_state(port, false);
4537 port->try_src_count = 0;
4538 port->try_snk_count = 0;
4539 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4540 power_supply_changed(port->psy);
4541 port->nr_sink_caps = 0;
4542 port->sink_cap_done = false;
4543 if (port->tcpc->enable_frs)
4544 port->tcpc->enable_frs(port->tcpc, false);
4545
4546 usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4547 port->partner_sink_caps = NULL;
4548 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4549 port->partner_source_caps = NULL;
4550 usb_power_delivery_unregister(port->partner_pd);
4551 port->partner_pd = NULL;
4552 }
4553
tcpm_detach(struct tcpm_port * port)4554 static void tcpm_detach(struct tcpm_port *port)
4555 {
4556 if (tcpm_port_is_disconnected(port))
4557 port->hard_reset_count = 0;
4558
4559 if (!port->attached)
4560 return;
4561
4562 if (port->tcpc->set_bist_data) {
4563 tcpm_log(port, "disable BIST MODE TESTDATA");
4564 port->tcpc->set_bist_data(port->tcpc, false);
4565 }
4566
4567 tcpm_reset_port(port);
4568 }
4569
tcpm_src_detach(struct tcpm_port * port)4570 static void tcpm_src_detach(struct tcpm_port *port)
4571 {
4572 tcpm_detach(port);
4573 }
4574
tcpm_snk_attach(struct tcpm_port * port)4575 static int tcpm_snk_attach(struct tcpm_port *port)
4576 {
4577 int ret;
4578
4579 if (port->attached)
4580 return 0;
4581
4582 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4583 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4584 if (ret < 0)
4585 return ret;
4586
4587 tcpm_enable_auto_vbus_discharge(port, true);
4588
4589 ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
4590 TYPEC_SINK, tcpm_data_role_for_sink(port));
4591 if (ret < 0)
4592 return ret;
4593
4594 port->pd_capable = false;
4595
4596 port->partner = NULL;
4597
4598 port->attached = true;
4599 port->send_discover = true;
4600 port->send_discover_prime = false;
4601
4602 return 0;
4603 }
4604
tcpm_snk_detach(struct tcpm_port * port)4605 static void tcpm_snk_detach(struct tcpm_port *port)
4606 {
4607 tcpm_detach(port);
4608 }
4609
tcpm_acc_attach(struct tcpm_port * port)4610 static int tcpm_acc_attach(struct tcpm_port *port)
4611 {
4612 int ret;
4613 enum typec_role role;
4614 enum typec_data_role data;
4615 int state = TYPEC_STATE_USB;
4616
4617 if (port->attached)
4618 return 0;
4619
4620 role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE;
4621 data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port)
4622 : tcpm_data_role_for_source(port);
4623
4624 if (tcpm_port_is_audio(port))
4625 state = TYPEC_MODE_AUDIO;
4626
4627 if (tcpm_port_is_debug(port))
4628 state = TYPEC_MODE_DEBUG;
4629
4630 ret = tcpm_set_roles(port, true, state, role, data);
4631 if (ret < 0)
4632 return ret;
4633
4634 port->partner = NULL;
4635
4636 tcpm_typec_connect(port);
4637
4638 port->attached = true;
4639
4640 return 0;
4641 }
4642
tcpm_acc_detach(struct tcpm_port * port)4643 static void tcpm_acc_detach(struct tcpm_port *port)
4644 {
4645 tcpm_detach(port);
4646 }
4647
hard_reset_state(struct tcpm_port * port)4648 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4649 {
4650 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4651 return HARD_RESET_SEND;
4652 if (port->pd_capable)
4653 return ERROR_RECOVERY;
4654 if (port->pwr_role == TYPEC_SOURCE)
4655 return SRC_UNATTACHED;
4656 if (port->state == SNK_WAIT_CAPABILITIES ||
4657 port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
4658 return SNK_READY;
4659 return SNK_UNATTACHED;
4660 }
4661
unattached_state(struct tcpm_port * port)4662 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4663 {
4664 if (port->port_type == TYPEC_PORT_DRP) {
4665 if (port->pwr_role == TYPEC_SOURCE)
4666 return SRC_UNATTACHED;
4667 else
4668 return SNK_UNATTACHED;
4669 } else if (port->port_type == TYPEC_PORT_SRC) {
4670 return SRC_UNATTACHED;
4671 }
4672
4673 return SNK_UNATTACHED;
4674 }
4675
tcpm_swap_complete(struct tcpm_port * port,int result)4676 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4677 {
4678 if (port->swap_pending) {
4679 port->swap_status = result;
4680 port->swap_pending = false;
4681 port->non_pd_role_swap = false;
4682 complete(&port->swap_complete);
4683 }
4684 }
4685
tcpm_get_pwr_opmode(enum typec_cc_status cc)4686 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4687 {
4688 switch (cc) {
4689 case TYPEC_CC_RP_1_5:
4690 return TYPEC_PWR_MODE_1_5A;
4691 case TYPEC_CC_RP_3_0:
4692 return TYPEC_PWR_MODE_3_0A;
4693 case TYPEC_CC_RP_DEF:
4694 default:
4695 return TYPEC_PWR_MODE_USB;
4696 }
4697 }
4698
tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)4699 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4700 {
4701 switch (opmode) {
4702 case TYPEC_PWR_MODE_USB:
4703 return TYPEC_CC_RP_DEF;
4704 case TYPEC_PWR_MODE_1_5A:
4705 return TYPEC_CC_RP_1_5;
4706 case TYPEC_PWR_MODE_3_0A:
4707 case TYPEC_PWR_MODE_PD:
4708 default:
4709 return TYPEC_CC_RP_3_0;
4710 }
4711 }
4712
tcpm_set_initial_svdm_version(struct tcpm_port * port)4713 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4714 {
4715 if (!port->partner)
4716 return;
4717
4718 switch (port->negotiated_rev) {
4719 case PD_REV30:
4720 break;
4721 /*
4722 * 6.4.4.2.3 Structured VDM Version
4723 * 2.0 states "At this time, there is only one version (1.0) defined.
4724 * This field Shall be set to zero to indicate Version 1.0."
4725 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4726 * To ensure that we follow the Power Delivery revision we are currently
4727 * operating on, downgrade the SVDM version to the highest one supported
4728 * by the Power Delivery revision.
4729 */
4730 case PD_REV20:
4731 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4732 break;
4733 default:
4734 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4735 break;
4736 }
4737 }
4738
tcpm_set_initial_negotiated_rev(struct tcpm_port * port)4739 static void tcpm_set_initial_negotiated_rev(struct tcpm_port *port)
4740 {
4741 switch (port->pd_rev.rev_major) {
4742 case PD_CAP_REV10:
4743 port->negotiated_rev = PD_REV10;
4744 break;
4745 case PD_CAP_REV20:
4746 port->negotiated_rev = PD_REV20;
4747 break;
4748 case PD_CAP_REV30:
4749 port->negotiated_rev = PD_REV30;
4750 break;
4751 default:
4752 port->negotiated_rev = PD_MAX_REV;
4753 break;
4754 }
4755 port->negotiated_rev_prime = port->negotiated_rev;
4756 }
4757
run_state_machine(struct tcpm_port * port)4758 static void run_state_machine(struct tcpm_port *port)
4759 {
4760 int ret;
4761 enum typec_pwr_opmode opmode;
4762 unsigned int msecs;
4763 enum tcpm_state upcoming_state;
4764
4765 if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4766 port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4767 port->state == SRC_UNATTACHED) ||
4768 (port->enter_state == SNK_ATTACH_WAIT &&
4769 port->state == SNK_UNATTACHED) ||
4770 (port->enter_state == SNK_DEBOUNCED &&
4771 port->state == SNK_UNATTACHED));
4772
4773 port->enter_state = port->state;
4774 switch (port->state) {
4775 case TOGGLING:
4776 break;
4777 case CHECK_CONTAMINANT:
4778 port->tcpc->check_contaminant(port->tcpc);
4779 break;
4780 /* SRC states */
4781 case SRC_UNATTACHED:
4782 if (!port->non_pd_role_swap)
4783 tcpm_swap_complete(port, -ENOTCONN);
4784 tcpm_src_detach(port);
4785 if (port->potential_contaminant) {
4786 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4787 break;
4788 }
4789 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4790 tcpm_set_state(port, TOGGLING, 0);
4791 break;
4792 }
4793 tcpm_set_cc(port, tcpm_rp_cc(port));
4794 if (port->port_type == TYPEC_PORT_DRP)
4795 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4796 break;
4797 case SRC_ATTACH_WAIT:
4798 if (tcpm_port_is_debug(port))
4799 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4800 port->timings.cc_debounce_time);
4801 else if (tcpm_port_is_audio(port))
4802 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4803 port->timings.cc_debounce_time);
4804 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4805 tcpm_set_state(port,
4806 tcpm_try_snk(port) ? SNK_TRY
4807 : SRC_ATTACHED,
4808 port->timings.cc_debounce_time);
4809 break;
4810
4811 case SNK_TRY:
4812 port->try_snk_count++;
4813 /*
4814 * Requirements:
4815 * - Do not drive vconn or vbus
4816 * - Terminate CC pins (both) to Rd
4817 * Action:
4818 * - Wait for tDRPTry (PD_T_DRP_TRY).
4819 * Until then, ignore any state changes.
4820 */
4821 tcpm_set_cc(port, TYPEC_CC_RD);
4822 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4823 break;
4824 case SNK_TRY_WAIT:
4825 if (tcpm_port_is_sink(port)) {
4826 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4827 } else {
4828 tcpm_set_state(port, SRC_TRYWAIT, 0);
4829 port->max_wait = 0;
4830 }
4831 break;
4832 case SNK_TRY_WAIT_DEBOUNCE:
4833 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4834 PD_T_TRY_CC_DEBOUNCE);
4835 break;
4836 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4837 if (port->vbus_present && tcpm_port_is_sink(port))
4838 tcpm_set_state(port, SNK_ATTACHED, 0);
4839 else
4840 port->max_wait = 0;
4841 break;
4842 case SRC_TRYWAIT:
4843 tcpm_set_cc(port, tcpm_rp_cc(port));
4844 if (port->max_wait == 0) {
4845 port->max_wait = jiffies +
4846 msecs_to_jiffies(PD_T_DRP_TRY);
4847 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4848 PD_T_DRP_TRY);
4849 } else {
4850 if (time_is_after_jiffies(port->max_wait))
4851 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4852 jiffies_to_msecs(port->max_wait -
4853 jiffies));
4854 else
4855 tcpm_set_state(port, SNK_UNATTACHED, 0);
4856 }
4857 break;
4858 case SRC_TRYWAIT_DEBOUNCE:
4859 tcpm_set_state(port, SRC_ATTACHED, port->timings.cc_debounce_time);
4860 break;
4861 case SRC_TRYWAIT_UNATTACHED:
4862 tcpm_set_state(port, SNK_UNATTACHED, 0);
4863 break;
4864
4865 case SRC_ATTACHED:
4866 ret = tcpm_src_attach(port);
4867 tcpm_set_state(port, SRC_UNATTACHED,
4868 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4869 break;
4870 case SRC_STARTUP:
4871 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4872 typec_set_pwr_opmode(port->typec_port, opmode);
4873 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4874 port->caps_count = 0;
4875 tcpm_set_initial_negotiated_rev(port);
4876 port->message_id = 0;
4877 port->message_id_prime = 0;
4878 port->rx_msgid = -1;
4879 port->rx_msgid_prime = -1;
4880 port->explicit_contract = false;
4881 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4882 if (port->ams == POWER_ROLE_SWAP ||
4883 port->ams == FAST_ROLE_SWAP)
4884 tcpm_ams_finish(port);
4885 if (!port->pd_supported) {
4886 tcpm_set_state(port, SRC_READY, 0);
4887 break;
4888 }
4889 port->upcoming_state = SRC_SEND_CAPABILITIES;
4890 tcpm_ams_start(port, POWER_NEGOTIATION);
4891 break;
4892 case SRC_SEND_CAPABILITIES:
4893 port->caps_count++;
4894 if (port->caps_count > PD_N_CAPS_COUNT) {
4895 tcpm_set_state(port, SRC_READY, 0);
4896 break;
4897 }
4898 ret = tcpm_pd_send_source_caps(port);
4899 if (ret < 0) {
4900 if (tcpm_can_communicate_sop_prime(port) &&
4901 IS_ERR_OR_NULL(port->cable))
4902 tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4903 else
4904 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4905 PD_T_SEND_SOURCE_CAP);
4906 } else {
4907 /*
4908 * Per standard, we should clear the reset counter here.
4909 * However, that can result in state machine hang-ups.
4910 * Reset it only in READY state to improve stability.
4911 */
4912 /* port->hard_reset_count = 0; */
4913 port->caps_count = 0;
4914 port->pd_capable = true;
4915 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4916 PD_T_SENDER_RESPONSE);
4917 }
4918 break;
4919 case SRC_SEND_CAPABILITIES_TIMEOUT:
4920 /*
4921 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4922 *
4923 * PD 2.0 sinks are supposed to accept src-capabilities with a
4924 * 3.0 header and simply ignore any src PDOs which the sink does
4925 * not understand such as PPS but some 2.0 sinks instead ignore
4926 * the entire PD_DATA_SOURCE_CAP message, causing contract
4927 * negotiation to fail.
4928 *
4929 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4930 * sending src-capabilities with a lower PD revision to
4931 * make these broken sinks work.
4932 */
4933 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4934 tcpm_set_state(port, HARD_RESET_SEND, 0);
4935 } else if (port->negotiated_rev > PD_REV20) {
4936 port->negotiated_rev--;
4937 port->hard_reset_count = 0;
4938 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4939 } else {
4940 tcpm_set_state(port, hard_reset_state(port), 0);
4941 }
4942 break;
4943 case SRC_NEGOTIATE_CAPABILITIES:
4944 ret = tcpm_pd_check_request(port);
4945 if (ret < 0) {
4946 tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4947 if (!port->explicit_contract) {
4948 tcpm_set_state(port,
4949 SRC_WAIT_NEW_CAPABILITIES, 0);
4950 } else {
4951 tcpm_set_state(port, SRC_READY, 0);
4952 }
4953 } else {
4954 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4955 tcpm_set_partner_usb_comm_capable(port,
4956 !!(port->sink_request & RDO_USB_COMM));
4957 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4958 PD_T_SRC_TRANSITION);
4959 }
4960 break;
4961 case SRC_TRANSITION_SUPPLY:
4962 /* XXX: regulator_set_voltage(vbus, ...) */
4963 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4964 port->explicit_contract = true;
4965 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4966 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4967 tcpm_set_state_cond(port, SRC_READY, 0);
4968 break;
4969 case SRC_READY:
4970 #if 1
4971 port->hard_reset_count = 0;
4972 #endif
4973 port->try_src_count = 0;
4974
4975 tcpm_swap_complete(port, 0);
4976 tcpm_typec_connect(port);
4977
4978 if (port->ams != NONE_AMS)
4979 tcpm_ams_finish(port);
4980 if (port->next_ams != NONE_AMS) {
4981 port->ams = port->next_ams;
4982 port->next_ams = NONE_AMS;
4983 }
4984
4985 /*
4986 * If previous AMS is interrupted, switch to the upcoming
4987 * state.
4988 */
4989 if (port->upcoming_state != INVALID_STATE) {
4990 upcoming_state = port->upcoming_state;
4991 port->upcoming_state = INVALID_STATE;
4992 tcpm_set_state(port, upcoming_state, 0);
4993 break;
4994 }
4995
4996 /*
4997 * 6.4.4.3.1 Discover Identity
4998 * "The Discover Identity Command Shall only be sent to SOP when there is an
4999 * Explicit Contract."
5000 *
5001 * Discover Identity on SOP' should be discovered prior to the
5002 * ready state, but if done after a Vconn Swap following Discover
5003 * Identity on SOP then the discovery process can be run here
5004 * as well.
5005 */
5006 if (port->explicit_contract) {
5007 if (port->send_discover_prime) {
5008 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5009 } else {
5010 port->tx_sop_type = TCPC_TX_SOP;
5011 tcpm_set_initial_svdm_version(port);
5012 }
5013 mod_send_discover_delayed_work(port, 0);
5014 } else {
5015 port->send_discover = false;
5016 port->send_discover_prime = false;
5017 }
5018
5019 /*
5020 * 6.3.5
5021 * Sending ping messages is not necessary if
5022 * - the source operates at vSafe5V
5023 * or
5024 * - The system is not operating in PD mode
5025 * or
5026 * - Both partners are connected using a Type-C connector
5027 *
5028 * There is no actual need to send PD messages since the local
5029 * port type-c and the spec does not clearly say whether PD is
5030 * possible when type-c is connected to Type-A/B
5031 */
5032 break;
5033 case SRC_WAIT_NEW_CAPABILITIES:
5034 /* Nothing to do... */
5035 break;
5036
5037 /* SNK states */
5038 case SNK_UNATTACHED:
5039 if (!port->non_pd_role_swap)
5040 tcpm_swap_complete(port, -ENOTCONN);
5041 tcpm_pps_complete(port, -ENOTCONN);
5042 tcpm_snk_detach(port);
5043 if (port->potential_contaminant) {
5044 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
5045 break;
5046 }
5047 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
5048 tcpm_set_state(port, TOGGLING, 0);
5049 break;
5050 }
5051 tcpm_set_cc(port, TYPEC_CC_RD);
5052 if (port->port_type == TYPEC_PORT_DRP)
5053 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
5054 break;
5055 case SNK_ATTACH_WAIT:
5056 if (tcpm_port_is_debug(port))
5057 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
5058 PD_T_CC_DEBOUNCE);
5059 else if (tcpm_port_is_audio(port))
5060 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
5061 PD_T_CC_DEBOUNCE);
5062 else if ((port->cc1 == TYPEC_CC_OPEN &&
5063 port->cc2 != TYPEC_CC_OPEN) ||
5064 (port->cc1 != TYPEC_CC_OPEN &&
5065 port->cc2 == TYPEC_CC_OPEN))
5066 tcpm_set_state(port, SNK_DEBOUNCED,
5067 port->timings.cc_debounce_time);
5068 else if (tcpm_port_is_disconnected(port))
5069 tcpm_set_state(port, SNK_UNATTACHED,
5070 PD_T_PD_DEBOUNCE);
5071 break;
5072 case SNK_DEBOUNCED:
5073 if (tcpm_port_is_disconnected(port))
5074 tcpm_set_state(port, SNK_UNATTACHED,
5075 PD_T_PD_DEBOUNCE);
5076 else if (tcpm_port_is_debug(port))
5077 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
5078 PD_T_CC_DEBOUNCE);
5079 else if (tcpm_port_is_audio(port))
5080 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
5081 PD_T_CC_DEBOUNCE);
5082 else if (port->vbus_present)
5083 tcpm_set_state(port,
5084 tcpm_try_src(port) ? SRC_TRY
5085 : SNK_ATTACHED,
5086 0);
5087 break;
5088 case SRC_TRY:
5089 port->try_src_count++;
5090 tcpm_set_cc(port, tcpm_rp_cc(port));
5091 port->max_wait = 0;
5092 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5093 break;
5094 case SRC_TRY_WAIT:
5095 if (port->max_wait == 0) {
5096 port->max_wait = jiffies +
5097 msecs_to_jiffies(PD_T_DRP_TRY);
5098 msecs = PD_T_DRP_TRY;
5099 } else {
5100 if (time_is_after_jiffies(port->max_wait))
5101 msecs = jiffies_to_msecs(port->max_wait -
5102 jiffies);
5103 else
5104 msecs = 0;
5105 }
5106 tcpm_set_state(port, SNK_TRYWAIT, msecs);
5107 break;
5108 case SRC_TRY_DEBOUNCE:
5109 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
5110 break;
5111 case SNK_TRYWAIT:
5112 tcpm_set_cc(port, TYPEC_CC_RD);
5113 tcpm_set_state(port, SNK_TRYWAIT_VBUS, port->timings.cc_debounce_time);
5114 break;
5115 case SNK_TRYWAIT_VBUS:
5116 /*
5117 * TCPM stays in this state indefinitely until VBUS
5118 * is detected as long as Rp is not detected for
5119 * more than a time period of tPDDebounce.
5120 */
5121 if (port->vbus_present && tcpm_port_is_sink(port)) {
5122 tcpm_set_state(port, SNK_ATTACHED, 0);
5123 break;
5124 }
5125 if (!tcpm_port_is_sink(port))
5126 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5127 break;
5128 case SNK_TRYWAIT_DEBOUNCE:
5129 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
5130 break;
5131 case SNK_ATTACHED:
5132 ret = tcpm_snk_attach(port);
5133 if (ret < 0)
5134 tcpm_set_state(port, SNK_UNATTACHED, 0);
5135 else
5136 /*
5137 * For Type C port controllers that use Battery Charging
5138 * Detection (based on BCv1.2 spec) to detect USB
5139 * charger type, add a delay of "snk_bc12_cmpletion_time"
5140 * before transitioning to SNK_STARTUP to allow BC1.2
5141 * detection to complete before PD is eventually enabled
5142 * in later states.
5143 */
5144 tcpm_set_state(port, SNK_STARTUP,
5145 port->timings.snk_bc12_cmpletion_time);
5146 break;
5147 case SNK_STARTUP:
5148 opmode = tcpm_get_pwr_opmode(port->polarity ?
5149 port->cc2 : port->cc1);
5150 typec_set_pwr_opmode(port->typec_port, opmode);
5151 port->pwr_opmode = TYPEC_PWR_MODE_USB;
5152 tcpm_set_initial_negotiated_rev(port);
5153 port->message_id = 0;
5154 port->message_id_prime = 0;
5155 port->rx_msgid = -1;
5156 port->rx_msgid_prime = -1;
5157 port->explicit_contract = false;
5158
5159 if (port->ams == POWER_ROLE_SWAP ||
5160 port->ams == FAST_ROLE_SWAP)
5161 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
5162 tcpm_ams_finish(port);
5163
5164 tcpm_set_state(port, SNK_DISCOVERY, 0);
5165 break;
5166 case SNK_DISCOVERY:
5167 if (port->vbus_present) {
5168 u32 current_lim = tcpm_get_current_limit(port);
5169
5170 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
5171 current_lim = PD_P_SNK_STDBY_MW / 5;
5172 tcpm_set_current_limit(port, current_lim, 5000);
5173 /* Not sink vbus if operational current is 0mA */
5174 tcpm_set_charge(port, !port->pd_supported ||
5175 pdo_max_current(port->snk_pdo[0]));
5176
5177 if (!port->pd_supported)
5178 tcpm_set_state(port, SNK_READY, 0);
5179 else
5180 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5181 break;
5182 }
5183 /*
5184 * For DRP, timeouts differ. Also, handling is supposed to be
5185 * different and much more complex (dead battery detection;
5186 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5187 */
5188 tcpm_set_state(port, hard_reset_state(port),
5189 port->port_type == TYPEC_PORT_DRP ?
5190 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5191 break;
5192 case SNK_DISCOVERY_DEBOUNCE:
5193 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5194 port->timings.cc_debounce_time);
5195 break;
5196 case SNK_DISCOVERY_DEBOUNCE_DONE:
5197 if (!tcpm_port_is_disconnected(port) &&
5198 tcpm_port_is_sink(port) &&
5199 ktime_after(port->delayed_runtime, ktime_get())) {
5200 tcpm_set_state(port, SNK_DISCOVERY,
5201 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5202 break;
5203 }
5204 tcpm_set_state(port, unattached_state(port), 0);
5205 break;
5206 case SNK_WAIT_CAPABILITIES:
5207 ret = port->tcpc->set_pd_rx(port->tcpc, true);
5208 if (ret < 0) {
5209 tcpm_set_state(port, SNK_READY, 0);
5210 break;
5211 }
5212 /*
5213 * If VBUS has never been low, and we time out waiting
5214 * for source cap, try a soft reset first, in case we
5215 * were already in a stable contract before this boot.
5216 * Do this only once.
5217 */
5218 if (port->vbus_never_low) {
5219 port->vbus_never_low = false;
5220 upcoming_state = SNK_SOFT_RESET;
5221 } else {
5222 if (!port->self_powered)
5223 upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
5224 else
5225 upcoming_state = hard_reset_state(port);
5226 }
5227
5228 tcpm_set_state(port, upcoming_state,
5229 port->timings.sink_wait_cap_time);
5230 break;
5231 case SNK_WAIT_CAPABILITIES_TIMEOUT:
5232 /*
5233 * There are some USB PD sources in the field, which do not
5234 * properly implement the specification and fail to start
5235 * sending Source Capability messages after a soft reset. The
5236 * specification suggests to do a hard reset when no Source
5237 * capability message is received within PD_T_SINK_WAIT_CAP,
5238 * but that might effectively kil the machine's power source.
5239 *
5240 * This slightly diverges from the specification and tries to
5241 * recover from this by explicitly asking for the capabilities
5242 * using the Get_Source_Cap control message before falling back
5243 * to a hard reset. The control message should also be supported
5244 * and handled by all USB PD source and dual role devices
5245 * according to the specification.
5246 */
5247 if (tcpm_pd_send_control(port, PD_CTRL_GET_SOURCE_CAP, TCPC_TX_SOP))
5248 tcpm_set_state_cond(port, hard_reset_state(port), 0);
5249 else
5250 tcpm_set_state(port, hard_reset_state(port),
5251 port->timings.sink_wait_cap_time);
5252 break;
5253 case SNK_NEGOTIATE_CAPABILITIES:
5254 port->pd_capable = true;
5255 tcpm_set_partner_usb_comm_capable(port,
5256 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5257 port->hard_reset_count = 0;
5258 ret = tcpm_pd_send_request(port);
5259 if (ret < 0) {
5260 /* Restore back to the original state */
5261 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5262 port->pps_data.active,
5263 port->supply_voltage);
5264 /* Let the Source send capabilities again. */
5265 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5266 } else {
5267 tcpm_set_state_cond(port, hard_reset_state(port),
5268 PD_T_SENDER_RESPONSE);
5269 }
5270 break;
5271 case SNK_NEGOTIATE_PPS_CAPABILITIES:
5272 ret = tcpm_pd_send_pps_request(port);
5273 if (ret < 0) {
5274 /* Restore back to the original state */
5275 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5276 port->pps_data.active,
5277 port->supply_voltage);
5278 port->pps_status = ret;
5279 /*
5280 * If this was called due to updates to sink
5281 * capabilities, and pps is no longer valid, we should
5282 * safely fall back to a standard PDO.
5283 */
5284 if (port->update_sink_caps)
5285 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5286 else
5287 tcpm_set_state(port, SNK_READY, 0);
5288 } else {
5289 tcpm_set_state_cond(port, hard_reset_state(port),
5290 PD_T_SENDER_RESPONSE);
5291 }
5292 break;
5293 case SNK_TRANSITION_SINK:
5294 /* From the USB PD spec:
5295 * "The Sink Shall transition to Sink Standby before a positive or
5296 * negative voltage transition of VBUS. During Sink Standby
5297 * the Sink Shall reduce its power draw to pSnkStdby."
5298 *
5299 * This is not applicable to PPS though as the port can continue
5300 * to draw negotiated power without switching to standby.
5301 */
5302 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5303 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5304 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5305
5306 tcpm_log(port, "Setting standby current %u mV @ %u mA",
5307 port->supply_voltage, stdby_ma);
5308 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5309 }
5310 fallthrough;
5311 case SNK_TRANSITION_SINK_VBUS:
5312 tcpm_set_state(port, hard_reset_state(port),
5313 PD_T_PS_TRANSITION);
5314 break;
5315 case SNK_READY:
5316 port->try_snk_count = 0;
5317 port->update_sink_caps = false;
5318 if (port->explicit_contract) {
5319 typec_set_pwr_opmode(port->typec_port,
5320 TYPEC_PWR_MODE_PD);
5321 port->pwr_opmode = TYPEC_PWR_MODE_PD;
5322 }
5323
5324 if (!port->pd_capable && port->slow_charger_loop)
5325 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5326 tcpm_swap_complete(port, 0);
5327 tcpm_typec_connect(port);
5328 if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5329 mod_enable_frs_delayed_work(port, 0);
5330 tcpm_pps_complete(port, port->pps_status);
5331
5332 if (port->ams != NONE_AMS)
5333 tcpm_ams_finish(port);
5334 if (port->next_ams != NONE_AMS) {
5335 port->ams = port->next_ams;
5336 port->next_ams = NONE_AMS;
5337 }
5338
5339 /*
5340 * If previous AMS is interrupted, switch to the upcoming
5341 * state.
5342 */
5343 if (port->upcoming_state != INVALID_STATE) {
5344 upcoming_state = port->upcoming_state;
5345 port->upcoming_state = INVALID_STATE;
5346 tcpm_set_state(port, upcoming_state, 0);
5347 break;
5348 }
5349
5350 /*
5351 * 6.4.4.3.1 Discover Identity
5352 * "The Discover Identity Command Shall only be sent to SOP when there is an
5353 * Explicit Contract."
5354 *
5355 * Discover Identity on SOP' should be discovered prior to the
5356 * ready state, but if done after a Vconn Swap following Discover
5357 * Identity on SOP then the discovery process can be run here
5358 * as well.
5359 */
5360 if (port->explicit_contract) {
5361 if (port->send_discover_prime) {
5362 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5363 } else {
5364 port->tx_sop_type = TCPC_TX_SOP;
5365 tcpm_set_initial_svdm_version(port);
5366 }
5367 mod_send_discover_delayed_work(port, 0);
5368 } else {
5369 port->send_discover = false;
5370 port->send_discover_prime = false;
5371 }
5372
5373 power_supply_changed(port->psy);
5374 break;
5375
5376 /* Accessory states */
5377 case ACC_UNATTACHED:
5378 tcpm_acc_detach(port);
5379 if (port->port_type == TYPEC_PORT_SRC)
5380 tcpm_set_state(port, SRC_UNATTACHED, 0);
5381 else
5382 tcpm_set_state(port, SNK_UNATTACHED, 0);
5383 break;
5384 case DEBUG_ACC_ATTACHED:
5385 case AUDIO_ACC_ATTACHED:
5386 ret = tcpm_acc_attach(port);
5387 if (ret < 0)
5388 tcpm_set_state(port, ACC_UNATTACHED, 0);
5389 break;
5390 case DEBUG_ACC_DEBOUNCE:
5391 case AUDIO_ACC_DEBOUNCE:
5392 tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
5393 break;
5394
5395 /* Hard_Reset states */
5396 case HARD_RESET_SEND:
5397 if (port->ams != NONE_AMS)
5398 tcpm_ams_finish(port);
5399 if (!port->self_powered && port->port_type == TYPEC_PORT_SNK)
5400 dev_err(port->dev, "Initiating hard-reset, which might result in machine power-loss.\n");
5401 /*
5402 * State machine will be directed to HARD_RESET_START,
5403 * thus set upcoming_state to INVALID_STATE.
5404 */
5405 port->upcoming_state = INVALID_STATE;
5406 tcpm_ams_start(port, HARD_RESET);
5407 break;
5408 case HARD_RESET_START:
5409 port->sink_cap_done = false;
5410 if (port->tcpc->enable_frs)
5411 port->tcpc->enable_frs(port->tcpc, false);
5412 port->hard_reset_count++;
5413 port->tcpc->set_pd_rx(port->tcpc, false);
5414 tcpm_unregister_altmodes(port);
5415 port->nr_sink_caps = 0;
5416 port->send_discover = true;
5417 port->send_discover_prime = false;
5418 if (port->pwr_role == TYPEC_SOURCE)
5419 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5420 PD_T_PS_HARD_RESET);
5421 else
5422 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5423 break;
5424 case SRC_HARD_RESET_VBUS_OFF:
5425 /*
5426 * 7.1.5 Response to Hard Resets
5427 * Hard Reset Signaling indicates a communication failure has occurred and the
5428 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5429 * drive VBUS to vSafe0V as shown in Figure 7-9.
5430 */
5431 tcpm_set_vconn(port, false);
5432 tcpm_set_vbus(port, false);
5433 tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE,
5434 tcpm_data_role_for_source(port));
5435 /*
5436 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5437 * PD_T_SRC_RECOVER before turning vbus back on.
5438 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5439 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5440 * tells the Device Policy Manager to instruct the power supply to perform a
5441 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5442 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5443 * re-establish communication with the Sink and resume USB Default Operation.
5444 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5445 */
5446 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5447 break;
5448 case SRC_HARD_RESET_VBUS_ON:
5449 tcpm_set_vconn(port, true);
5450 tcpm_set_vbus(port, true);
5451 if (port->ams == HARD_RESET)
5452 tcpm_ams_finish(port);
5453 if (port->pd_supported)
5454 port->tcpc->set_pd_rx(port->tcpc, true);
5455 tcpm_set_attached_state(port, true);
5456 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5457 break;
5458 case SNK_HARD_RESET_SINK_OFF:
5459 /* Do not discharge/disconnect during hard reset */
5460 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5461 memset(&port->pps_data, 0, sizeof(port->pps_data));
5462 tcpm_set_vconn(port, false);
5463 if (port->pd_capable)
5464 tcpm_set_charge(port, false);
5465 tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK,
5466 tcpm_data_role_for_sink(port));
5467 /*
5468 * VBUS may or may not toggle, depending on the adapter.
5469 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5470 * directly after timeout.
5471 */
5472 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5473 break;
5474 case SNK_HARD_RESET_WAIT_VBUS:
5475 if (port->ams == HARD_RESET)
5476 tcpm_ams_finish(port);
5477 /* Assume we're disconnected if VBUS doesn't come back. */
5478 tcpm_set_state(port, SNK_UNATTACHED,
5479 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5480 break;
5481 case SNK_HARD_RESET_SINK_ON:
5482 /* Note: There is no guarantee that VBUS is on in this state */
5483 /*
5484 * XXX:
5485 * The specification suggests that dual mode ports in sink
5486 * mode should transition to state PE_SRC_Transition_to_default.
5487 * See USB power delivery specification chapter 8.3.3.6.1.3.
5488 * This would mean to
5489 * - turn off VCONN, reset power supply
5490 * - request hardware reset
5491 * - turn on VCONN
5492 * - Transition to state PE_Src_Startup
5493 * SNK only ports shall transition to state Snk_Startup
5494 * (see chapter 8.3.3.3.8).
5495 * Similar, dual-mode ports in source mode should transition
5496 * to PE_SNK_Transition_to_default.
5497 */
5498 if (port->pd_capable) {
5499 tcpm_set_current_limit(port,
5500 tcpm_get_current_limit(port),
5501 5000);
5502 /* Not sink vbus if operational current is 0mA */
5503 tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5504 }
5505 if (port->ams == HARD_RESET)
5506 tcpm_ams_finish(port);
5507 tcpm_set_attached_state(port, true);
5508 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5509 tcpm_set_state(port, SNK_STARTUP, 0);
5510 break;
5511
5512 /* Soft_Reset states */
5513 case SOFT_RESET:
5514 port->message_id = 0;
5515 port->rx_msgid = -1;
5516 /* remove existing capabilities */
5517 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5518 port->partner_source_caps = NULL;
5519 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5520 tcpm_ams_finish(port);
5521 if (port->pwr_role == TYPEC_SOURCE) {
5522 port->upcoming_state = SRC_SEND_CAPABILITIES;
5523 tcpm_ams_start(port, POWER_NEGOTIATION);
5524 } else {
5525 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5526 }
5527 break;
5528 case SRC_SOFT_RESET_WAIT_SNK_TX:
5529 case SNK_SOFT_RESET:
5530 if (port->ams != NONE_AMS)
5531 tcpm_ams_finish(port);
5532 port->upcoming_state = SOFT_RESET_SEND;
5533 tcpm_ams_start(port, SOFT_RESET_AMS);
5534 break;
5535 case SOFT_RESET_SEND:
5536 /*
5537 * Power Delivery 3.0 Section 6.3.13
5538 *
5539 * A Soft_Reset Message Shall be targeted at a specific entity
5540 * depending on the type of SOP* packet used.
5541 */
5542 if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5543 port->message_id_prime = 0;
5544 port->rx_msgid_prime = -1;
5545 tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5546 tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5547 } else {
5548 port->message_id = 0;
5549 port->rx_msgid = -1;
5550 /* remove existing capabilities */
5551 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5552 port->partner_source_caps = NULL;
5553 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5554 tcpm_set_state_cond(port, hard_reset_state(port), 0);
5555 else
5556 tcpm_set_state_cond(port, hard_reset_state(port),
5557 PD_T_SENDER_RESPONSE);
5558 }
5559 break;
5560
5561 /* DR_Swap states */
5562 case DR_SWAP_SEND:
5563 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5564 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5565 port->send_discover = true;
5566 port->send_discover_prime = false;
5567 }
5568 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5569 PD_T_SENDER_RESPONSE);
5570 break;
5571 case DR_SWAP_ACCEPT:
5572 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5573 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5574 port->send_discover = true;
5575 port->send_discover_prime = false;
5576 }
5577 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5578 break;
5579 case DR_SWAP_SEND_TIMEOUT:
5580 tcpm_swap_complete(port, -ETIMEDOUT);
5581 port->send_discover = false;
5582 port->send_discover_prime = false;
5583 tcpm_ams_finish(port);
5584 tcpm_set_state(port, ready_state(port), 0);
5585 break;
5586 case DR_SWAP_CHANGE_DR:
5587 tcpm_unregister_altmodes(port);
5588 if (port->data_role == TYPEC_HOST)
5589 tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5590 TYPEC_DEVICE);
5591 else
5592 tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5593 TYPEC_HOST);
5594 tcpm_ams_finish(port);
5595 tcpm_set_state(port, ready_state(port), 0);
5596 break;
5597
5598 case FR_SWAP_SEND:
5599 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5600 tcpm_set_state(port, ERROR_RECOVERY, 0);
5601 break;
5602 }
5603 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5604 break;
5605 case FR_SWAP_SEND_TIMEOUT:
5606 tcpm_set_state(port, ERROR_RECOVERY, 0);
5607 break;
5608 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5609 tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5610 break;
5611 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5612 if (port->vbus_source)
5613 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5614 else
5615 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5616 break;
5617 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5618 tcpm_set_pwr_role(port, TYPEC_SOURCE);
5619 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5620 tcpm_set_state(port, ERROR_RECOVERY, 0);
5621 break;
5622 }
5623 tcpm_set_cc(port, tcpm_rp_cc(port));
5624 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5625 break;
5626
5627 /* PR_Swap states */
5628 case PR_SWAP_ACCEPT:
5629 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5630 tcpm_set_state(port, PR_SWAP_START, 0);
5631 break;
5632 case PR_SWAP_SEND:
5633 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5634 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5635 PD_T_SENDER_RESPONSE);
5636 break;
5637 case PR_SWAP_SEND_TIMEOUT:
5638 tcpm_swap_complete(port, -ETIMEDOUT);
5639 tcpm_set_state(port, ready_state(port), 0);
5640 break;
5641 case PR_SWAP_START:
5642 tcpm_apply_rc(port);
5643 if (port->pwr_role == TYPEC_SOURCE)
5644 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5645 PD_T_SRC_TRANSITION);
5646 else
5647 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5648 break;
5649 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5650 /*
5651 * Prevent vbus discharge circuit from turning on during PR_SWAP
5652 * as this is not a disconnect.
5653 */
5654 tcpm_set_vbus(port, false);
5655 port->explicit_contract = false;
5656 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
5657 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5658 PD_T_SRCSWAPSTDBY);
5659 break;
5660 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5661 tcpm_set_cc(port, TYPEC_CC_RD);
5662 /* allow CC debounce */
5663 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5664 port->timings.cc_debounce_time);
5665 break;
5666 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5667 /*
5668 * USB-PD standard, 6.2.1.4, Port Power Role:
5669 * "During the Power Role Swap Sequence, for the initial Source
5670 * Port, the Port Power Role field shall be set to Sink in the
5671 * PS_RDY Message indicating that the initial Source’s power
5672 * supply is turned off"
5673 */
5674 tcpm_set_pwr_role(port, TYPEC_SINK);
5675 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5676 tcpm_set_state(port, ERROR_RECOVERY, 0);
5677 break;
5678 }
5679 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5680 break;
5681 case PR_SWAP_SRC_SNK_SINK_ON:
5682 tcpm_enable_auto_vbus_discharge(port, true);
5683 /* Set the vbus disconnect threshold for implicit contract */
5684 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5685 tcpm_set_state(port, SNK_STARTUP, 0);
5686 break;
5687 case PR_SWAP_SNK_SRC_SINK_OFF:
5688 /* will be source, remove existing capabilities */
5689 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5690 port->partner_source_caps = NULL;
5691 /*
5692 * Prevent vbus discharge circuit from turning on during PR_SWAP
5693 * as this is not a disconnect.
5694 */
5695 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5696 port->pps_data.active, 0);
5697 tcpm_set_charge(port, false);
5698 tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5699 break;
5700 case PR_SWAP_SNK_SRC_SOURCE_ON:
5701 tcpm_enable_auto_vbus_discharge(port, true);
5702 tcpm_set_cc(port, tcpm_rp_cc(port));
5703 tcpm_set_vbus(port, true);
5704 /*
5705 * allow time VBUS ramp-up, must be < tNewSrc
5706 * Also, this window overlaps with CC debounce as well.
5707 * So, Wait for the max of two which is PD_T_NEWSRC
5708 */
5709 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5710 PD_T_NEWSRC);
5711 break;
5712 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5713 /*
5714 * USB PD standard, 6.2.1.4:
5715 * "Subsequent Messages initiated by the Policy Engine,
5716 * such as the PS_RDY Message sent to indicate that Vbus
5717 * is ready, will have the Port Power Role field set to
5718 * Source."
5719 */
5720 tcpm_set_pwr_role(port, TYPEC_SOURCE);
5721 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5722 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5723 break;
5724
5725 case VCONN_SWAP_ACCEPT:
5726 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5727 tcpm_ams_finish(port);
5728 tcpm_set_state(port, VCONN_SWAP_START, 0);
5729 break;
5730 case VCONN_SWAP_SEND:
5731 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5732 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5733 PD_T_SENDER_RESPONSE);
5734 break;
5735 case VCONN_SWAP_SEND_TIMEOUT:
5736 tcpm_swap_complete(port, -ETIMEDOUT);
5737 tcpm_set_state(port, ready_state(port), 0);
5738 break;
5739 case VCONN_SWAP_START:
5740 if (port->vconn_role == TYPEC_SOURCE)
5741 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5742 else
5743 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5744 break;
5745 case VCONN_SWAP_WAIT_FOR_VCONN:
5746 tcpm_set_state(port, hard_reset_state(port),
5747 PD_T_VCONN_SOURCE_ON);
5748 break;
5749 case VCONN_SWAP_TURN_ON_VCONN:
5750 ret = tcpm_set_vconn(port, true);
5751 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5752 /*
5753 * USB PD 3.0 Section 6.4.4.3.1
5754 *
5755 * Note that a Cable Plug or VPD will not be ready for PD
5756 * Communication until tVCONNStable after VCONN has been applied
5757 */
5758 if (!ret)
5759 tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5760 PD_T_VCONN_STABLE);
5761 else
5762 tcpm_set_state(port, ready_state(port), 0);
5763 break;
5764 case VCONN_SWAP_TURN_OFF_VCONN:
5765 tcpm_set_vconn(port, false);
5766 tcpm_set_state(port, ready_state(port), 0);
5767 break;
5768 case VCONN_SWAP_SEND_SOFT_RESET:
5769 tcpm_swap_complete(port, port->swap_status);
5770 if (tcpm_can_communicate_sop_prime(port)) {
5771 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5772 port->upcoming_state = SOFT_RESET_SEND;
5773 tcpm_ams_start(port, SOFT_RESET_AMS);
5774 } else {
5775 tcpm_set_state(port, ready_state(port), 0);
5776 }
5777 break;
5778
5779 case DR_SWAP_CANCEL:
5780 case PR_SWAP_CANCEL:
5781 case VCONN_SWAP_CANCEL:
5782 tcpm_swap_complete(port, port->swap_status);
5783 if (port->pwr_role == TYPEC_SOURCE)
5784 tcpm_set_state(port, SRC_READY, 0);
5785 else
5786 tcpm_set_state(port, SNK_READY, 0);
5787 break;
5788 case FR_SWAP_CANCEL:
5789 if (port->pwr_role == TYPEC_SOURCE)
5790 tcpm_set_state(port, SRC_READY, 0);
5791 else
5792 tcpm_set_state(port, SNK_READY, 0);
5793 break;
5794
5795 case BIST_RX:
5796 switch (BDO_MODE_MASK(port->bist_request)) {
5797 case BDO_MODE_CARRIER2:
5798 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5799 tcpm_set_state(port, unattached_state(port),
5800 PD_T_BIST_CONT_MODE);
5801 break;
5802 case BDO_MODE_TESTDATA:
5803 if (port->tcpc->set_bist_data) {
5804 tcpm_log(port, "Enable BIST MODE TESTDATA");
5805 port->tcpc->set_bist_data(port->tcpc, true);
5806 }
5807 break;
5808 default:
5809 break;
5810 }
5811 break;
5812 case GET_STATUS_SEND:
5813 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5814 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5815 PD_T_SENDER_RESPONSE);
5816 break;
5817 case GET_STATUS_SEND_TIMEOUT:
5818 tcpm_set_state(port, ready_state(port), 0);
5819 break;
5820 case GET_PPS_STATUS_SEND:
5821 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5822 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5823 PD_T_SENDER_RESPONSE);
5824 break;
5825 case GET_PPS_STATUS_SEND_TIMEOUT:
5826 tcpm_set_state(port, ready_state(port), 0);
5827 break;
5828 case GET_SINK_CAP:
5829 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5830 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5831 break;
5832 case GET_SINK_CAP_TIMEOUT:
5833 port->sink_cap_done = true;
5834 tcpm_set_state(port, ready_state(port), 0);
5835 break;
5836 case ERROR_RECOVERY:
5837 tcpm_swap_complete(port, -EPROTO);
5838 tcpm_pps_complete(port, -EPROTO);
5839 tcpm_set_state(port, PORT_RESET, 0);
5840 break;
5841 case PORT_RESET:
5842 tcpm_reset_port(port);
5843 if (port->self_powered)
5844 tcpm_set_cc(port, TYPEC_CC_OPEN);
5845 else
5846 tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5847 TYPEC_CC_RD : tcpm_rp_cc(port));
5848 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5849 PD_T_ERROR_RECOVERY);
5850 break;
5851 case PORT_RESET_WAIT_OFF:
5852 tcpm_set_state(port,
5853 tcpm_default_state(port),
5854 port->vbus_present ? port->timings.ps_src_off_time : 0);
5855 break;
5856
5857 /* AMS intermediate state */
5858 case AMS_START:
5859 if (port->upcoming_state == INVALID_STATE) {
5860 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5861 SRC_READY : SNK_READY, 0);
5862 break;
5863 }
5864
5865 upcoming_state = port->upcoming_state;
5866 port->upcoming_state = INVALID_STATE;
5867 tcpm_set_state(port, upcoming_state, 0);
5868 break;
5869
5870 /* Chunk state */
5871 case CHUNK_NOT_SUPP:
5872 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5873 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5874 break;
5875
5876 /* Cable states */
5877 case SRC_VDM_IDENTITY_REQUEST:
5878 port->send_discover_prime = true;
5879 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5880 mod_send_discover_delayed_work(port, 0);
5881 port->upcoming_state = SRC_SEND_CAPABILITIES;
5882 break;
5883
5884 default:
5885 WARN(1, "Unexpected port state %d\n", port->state);
5886 break;
5887 }
5888 }
5889
tcpm_state_machine_work(struct kthread_work * work)5890 static void tcpm_state_machine_work(struct kthread_work *work)
5891 {
5892 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5893 enum tcpm_state prev_state;
5894
5895 mutex_lock(&port->lock);
5896 port->state_machine_running = true;
5897
5898 if (port->queued_message && tcpm_send_queued_message(port))
5899 goto done;
5900
5901 /* If we were queued due to a delayed state change, update it now */
5902 if (port->delayed_state) {
5903 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5904 tcpm_states[port->state],
5905 tcpm_states[port->delayed_state], port->delay_ms);
5906 port->prev_state = port->state;
5907 port->state = port->delayed_state;
5908 port->delayed_state = INVALID_STATE;
5909 }
5910
5911 /*
5912 * Continue running as long as we have (non-delayed) state changes
5913 * to make.
5914 */
5915 do {
5916 prev_state = port->state;
5917 run_state_machine(port);
5918 if (port->queued_message)
5919 tcpm_send_queued_message(port);
5920 } while (port->state != prev_state && !port->delayed_state);
5921
5922 done:
5923 port->state_machine_running = false;
5924 mutex_unlock(&port->lock);
5925 }
5926
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)5927 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5928 enum typec_cc_status cc2)
5929 {
5930 enum typec_cc_status old_cc1, old_cc2;
5931 enum tcpm_state new_state;
5932
5933 old_cc1 = port->cc1;
5934 old_cc2 = port->cc2;
5935 port->cc1 = cc1;
5936 port->cc2 = cc2;
5937
5938 tcpm_log_force(port,
5939 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5940 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5941 port->polarity,
5942 tcpm_port_is_disconnected(port) ? "disconnected"
5943 : "connected");
5944
5945 switch (port->state) {
5946 case TOGGLING:
5947 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5948 tcpm_port_is_source(port))
5949 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5950 else if (tcpm_port_is_sink(port))
5951 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5952 break;
5953 case CHECK_CONTAMINANT:
5954 /* Wait for Toggling to be resumed */
5955 break;
5956 case SRC_UNATTACHED:
5957 case ACC_UNATTACHED:
5958 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5959 tcpm_port_is_source(port))
5960 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5961 break;
5962 case SRC_ATTACH_WAIT:
5963 if (tcpm_port_is_disconnected(port) ||
5964 tcpm_port_is_audio_detached(port))
5965 tcpm_set_state(port, SRC_UNATTACHED, 0);
5966 else if (cc1 != old_cc1 || cc2 != old_cc2)
5967 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5968 break;
5969 case SRC_ATTACHED:
5970 case SRC_STARTUP:
5971 case SRC_SEND_CAPABILITIES:
5972 case SRC_READY:
5973 if (tcpm_port_is_disconnected(port) ||
5974 !tcpm_port_is_source(port)) {
5975 if (port->port_type == TYPEC_PORT_SRC)
5976 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5977 else
5978 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5979 }
5980 break;
5981 case SNK_UNATTACHED:
5982 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5983 tcpm_port_is_sink(port))
5984 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5985 break;
5986 case SNK_ATTACH_WAIT:
5987 if ((port->cc1 == TYPEC_CC_OPEN &&
5988 port->cc2 != TYPEC_CC_OPEN) ||
5989 (port->cc1 != TYPEC_CC_OPEN &&
5990 port->cc2 == TYPEC_CC_OPEN))
5991 new_state = SNK_DEBOUNCED;
5992 else if (tcpm_port_is_disconnected(port))
5993 new_state = SNK_UNATTACHED;
5994 else
5995 break;
5996 if (new_state != port->delayed_state)
5997 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5998 break;
5999 case SNK_DEBOUNCED:
6000 if (tcpm_port_is_disconnected(port))
6001 new_state = SNK_UNATTACHED;
6002 else if (port->vbus_present)
6003 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
6004 else
6005 new_state = SNK_UNATTACHED;
6006 if (new_state != port->delayed_state)
6007 tcpm_set_state(port, SNK_DEBOUNCED, 0);
6008 break;
6009 case SNK_READY:
6010 /*
6011 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
6012 * "A port that has entered into USB PD communications with the Source and
6013 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
6014 * cable disconnect in addition to monitoring VBUS.
6015 *
6016 * A port that is monitoring the CC voltage for disconnect (but is not in
6017 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
6018 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
6019 * vRd-USB for tPDDebounce."
6020 *
6021 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
6022 * away before vbus decays to disconnect threshold. Allow
6023 * disconnect to be driven by vbus disconnect when auto vbus
6024 * discharge is enabled.
6025 */
6026 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
6027 tcpm_set_state(port, unattached_state(port), 0);
6028 else if (!port->pd_capable &&
6029 (cc1 != old_cc1 || cc2 != old_cc2))
6030 tcpm_set_current_limit(port,
6031 tcpm_get_current_limit(port),
6032 5000);
6033 break;
6034
6035 case AUDIO_ACC_ATTACHED:
6036 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6037 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
6038 break;
6039 case AUDIO_ACC_DEBOUNCE:
6040 if (tcpm_port_is_audio(port))
6041 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
6042 break;
6043
6044 case DEBUG_ACC_ATTACHED:
6045 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6046 tcpm_set_state(port, DEBUG_ACC_DEBOUNCE, 0);
6047 break;
6048
6049 case DEBUG_ACC_DEBOUNCE:
6050 if (tcpm_port_is_debug(port))
6051 tcpm_set_state(port, DEBUG_ACC_ATTACHED, 0);
6052 break;
6053
6054 case SNK_TRY:
6055 /* Do nothing, waiting for timeout */
6056 break;
6057
6058 case SNK_DISCOVERY:
6059 /* CC line is unstable, wait for debounce */
6060 if (tcpm_port_is_disconnected(port))
6061 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
6062 break;
6063 case SNK_DISCOVERY_DEBOUNCE:
6064 break;
6065
6066 case SRC_TRYWAIT:
6067 /* Hand over to state machine if needed */
6068 if (!port->vbus_present && tcpm_port_is_source(port))
6069 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6070 break;
6071 case SRC_TRYWAIT_DEBOUNCE:
6072 if (port->vbus_present || !tcpm_port_is_source(port))
6073 tcpm_set_state(port, SRC_TRYWAIT, 0);
6074 break;
6075 case SNK_TRY_WAIT_DEBOUNCE:
6076 if (!tcpm_port_is_sink(port)) {
6077 port->max_wait = 0;
6078 tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
6079 }
6080 break;
6081 case SRC_TRY_WAIT:
6082 if (tcpm_port_is_source(port))
6083 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
6084 break;
6085 case SRC_TRY_DEBOUNCE:
6086 tcpm_set_state(port, SRC_TRY_WAIT, 0);
6087 break;
6088 case SNK_TRYWAIT_DEBOUNCE:
6089 if (tcpm_port_is_sink(port))
6090 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
6091 break;
6092 case SNK_TRYWAIT_VBUS:
6093 if (!tcpm_port_is_sink(port))
6094 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
6095 break;
6096 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6097 if (!tcpm_port_is_sink(port))
6098 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
6099 else
6100 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
6101 break;
6102 case SNK_TRYWAIT:
6103 /* Do nothing, waiting for tCCDebounce */
6104 break;
6105 case PR_SWAP_SNK_SRC_SINK_OFF:
6106 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6107 case PR_SWAP_SRC_SNK_SOURCE_OFF:
6108 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
6109 case PR_SWAP_SNK_SRC_SOURCE_ON:
6110 /*
6111 * CC state change is expected in PR_SWAP
6112 * Ignore it.
6113 */
6114 break;
6115 case FR_SWAP_SEND:
6116 case FR_SWAP_SEND_TIMEOUT:
6117 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6118 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6119 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6120 /* Do nothing, CC change expected */
6121 break;
6122
6123 case PORT_RESET:
6124 case PORT_RESET_WAIT_OFF:
6125 /*
6126 * State set back to default mode once the timer completes.
6127 * Ignore CC changes here.
6128 */
6129 break;
6130 default:
6131 /*
6132 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
6133 * to be driven by vbus disconnect.
6134 */
6135 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
6136 port->auto_vbus_discharge_enabled))
6137 tcpm_set_state(port, unattached_state(port), 0);
6138 break;
6139 }
6140 }
6141
_tcpm_pd_vbus_on(struct tcpm_port * port)6142 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
6143 {
6144 tcpm_log_force(port, "VBUS on");
6145 port->vbus_present = true;
6146 /*
6147 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
6148 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
6149 */
6150 port->vbus_vsafe0v = false;
6151
6152 switch (port->state) {
6153 case SNK_TRANSITION_SINK_VBUS:
6154 port->explicit_contract = true;
6155 tcpm_set_state(port, SNK_READY, 0);
6156 break;
6157 case SNK_DISCOVERY:
6158 tcpm_set_state(port, SNK_DISCOVERY, 0);
6159 break;
6160
6161 case SNK_DEBOUNCED:
6162 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
6163 : SNK_ATTACHED,
6164 0);
6165 break;
6166 case SNK_HARD_RESET_WAIT_VBUS:
6167 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
6168 break;
6169 case SRC_ATTACHED:
6170 tcpm_set_state(port, SRC_STARTUP, 0);
6171 break;
6172 case SRC_HARD_RESET_VBUS_ON:
6173 tcpm_set_state(port, SRC_STARTUP, 0);
6174 break;
6175
6176 case SNK_TRY:
6177 /* Do nothing, waiting for timeout */
6178 break;
6179 case SRC_TRYWAIT:
6180 /* Do nothing, Waiting for Rd to be detected */
6181 break;
6182 case SRC_TRYWAIT_DEBOUNCE:
6183 tcpm_set_state(port, SRC_TRYWAIT, 0);
6184 break;
6185 case SNK_TRY_WAIT_DEBOUNCE:
6186 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
6187 break;
6188 case SNK_TRYWAIT:
6189 /* Do nothing, waiting for tCCDebounce */
6190 break;
6191 case SNK_TRYWAIT_VBUS:
6192 if (tcpm_port_is_sink(port))
6193 tcpm_set_state(port, SNK_ATTACHED, 0);
6194 break;
6195 case SNK_TRYWAIT_DEBOUNCE:
6196 /* Do nothing, waiting for Rp */
6197 break;
6198 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6199 if (port->vbus_present && tcpm_port_is_sink(port))
6200 tcpm_set_state(port, SNK_ATTACHED, 0);
6201 break;
6202 case SRC_TRY_WAIT:
6203 case SRC_TRY_DEBOUNCE:
6204 /* Do nothing, waiting for sink detection */
6205 break;
6206 case FR_SWAP_SEND:
6207 case FR_SWAP_SEND_TIMEOUT:
6208 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6209 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6210 if (port->tcpc->frs_sourcing_vbus)
6211 port->tcpc->frs_sourcing_vbus(port->tcpc);
6212 break;
6213 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6214 if (port->tcpc->frs_sourcing_vbus)
6215 port->tcpc->frs_sourcing_vbus(port->tcpc);
6216 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6217 break;
6218
6219 case PORT_RESET:
6220 case PORT_RESET_WAIT_OFF:
6221 /*
6222 * State set back to default mode once the timer completes.
6223 * Ignore vbus changes here.
6224 */
6225 break;
6226
6227 default:
6228 break;
6229 }
6230 }
6231
_tcpm_pd_vbus_off(struct tcpm_port * port)6232 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6233 {
6234 tcpm_log_force(port, "VBUS off");
6235 port->vbus_present = false;
6236 port->vbus_never_low = false;
6237 switch (port->state) {
6238 case SNK_HARD_RESET_SINK_OFF:
6239 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6240 break;
6241 case HARD_RESET_SEND:
6242 break;
6243 case SNK_TRY:
6244 /* Do nothing, waiting for timeout */
6245 break;
6246 case SRC_TRYWAIT:
6247 /* Hand over to state machine if needed */
6248 if (tcpm_port_is_source(port))
6249 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6250 break;
6251 case SNK_TRY_WAIT_DEBOUNCE:
6252 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
6253 break;
6254 case SNK_TRYWAIT:
6255 case SNK_TRYWAIT_VBUS:
6256 case SNK_TRYWAIT_DEBOUNCE:
6257 break;
6258 case SNK_ATTACH_WAIT:
6259 case SNK_DEBOUNCED:
6260 /* Do nothing, as TCPM is still waiting for vbus to reach VSAFE5V to connect */
6261 break;
6262
6263 case SNK_NEGOTIATE_CAPABILITIES:
6264 break;
6265
6266 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6267 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6268 break;
6269
6270 case PR_SWAP_SNK_SRC_SINK_OFF:
6271 /* Do nothing, expected */
6272 break;
6273
6274 case PR_SWAP_SNK_SRC_SOURCE_ON:
6275 /*
6276 * Do nothing when vbus off notification is received.
6277 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6278 * for the vbus source to ramp up.
6279 */
6280 break;
6281
6282 case PORT_RESET_WAIT_OFF:
6283 tcpm_set_state(port, tcpm_default_state(port), 0);
6284 break;
6285
6286 case SRC_TRY_WAIT:
6287 case SRC_TRY_DEBOUNCE:
6288 /* Do nothing, waiting for sink detection */
6289 break;
6290
6291 case SRC_STARTUP:
6292 case SRC_SEND_CAPABILITIES:
6293 case SRC_SEND_CAPABILITIES_TIMEOUT:
6294 case SRC_NEGOTIATE_CAPABILITIES:
6295 case SRC_TRANSITION_SUPPLY:
6296 case SRC_READY:
6297 case SRC_WAIT_NEW_CAPABILITIES:
6298 /*
6299 * Force to unattached state to re-initiate connection.
6300 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6301 * sink removed. Although sink removal here is due to source's vbus collapse,
6302 * treat it the same way for consistency.
6303 */
6304 if (port->port_type == TYPEC_PORT_SRC)
6305 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6306 else
6307 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6308 break;
6309
6310 case PORT_RESET:
6311 /*
6312 * State set back to default mode once the timer completes.
6313 * Ignore vbus changes here.
6314 */
6315 break;
6316
6317 case FR_SWAP_SEND:
6318 case FR_SWAP_SEND_TIMEOUT:
6319 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6320 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6321 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6322 /* Do nothing, vbus drop expected */
6323 break;
6324
6325 case SNK_HARD_RESET_WAIT_VBUS:
6326 /* Do nothing, its OK to receive vbus off events */
6327 break;
6328
6329 default:
6330 if (port->pwr_role == TYPEC_SINK && port->attached)
6331 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6332 break;
6333 }
6334 }
6335
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)6336 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6337 {
6338 tcpm_log_force(port, "VBUS VSAFE0V");
6339 port->vbus_vsafe0v = true;
6340 switch (port->state) {
6341 case SRC_HARD_RESET_VBUS_OFF:
6342 /*
6343 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6344 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6345 */
6346 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6347 break;
6348 case SRC_ATTACH_WAIT:
6349 if (tcpm_port_is_source(port))
6350 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6351 port->timings.cc_debounce_time);
6352 break;
6353 case SRC_STARTUP:
6354 case SRC_SEND_CAPABILITIES:
6355 case SRC_SEND_CAPABILITIES_TIMEOUT:
6356 case SRC_NEGOTIATE_CAPABILITIES:
6357 case SRC_TRANSITION_SUPPLY:
6358 case SRC_READY:
6359 case SRC_WAIT_NEW_CAPABILITIES:
6360 if (port->auto_vbus_discharge_enabled) {
6361 if (port->port_type == TYPEC_PORT_SRC)
6362 tcpm_set_state(port, SRC_UNATTACHED, 0);
6363 else
6364 tcpm_set_state(port, SNK_UNATTACHED, 0);
6365 }
6366 break;
6367 case PR_SWAP_SNK_SRC_SINK_OFF:
6368 case PR_SWAP_SNK_SRC_SOURCE_ON:
6369 /* Do nothing, vsafe0v is expected during transition */
6370 break;
6371 case SNK_ATTACH_WAIT:
6372 case SNK_DEBOUNCED:
6373 /*Do nothing, still waiting for VSAFE5V for connect */
6374 break;
6375 case SNK_HARD_RESET_WAIT_VBUS:
6376 /* Do nothing, its OK to receive vbus off events */
6377 break;
6378 default:
6379 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6380 tcpm_set_state(port, SNK_UNATTACHED, 0);
6381 break;
6382 }
6383 }
6384
_tcpm_pd_hard_reset(struct tcpm_port * port)6385 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6386 {
6387 tcpm_log_force(port, "Received hard reset");
6388 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6389 port->tcpc->set_bist_data(port->tcpc, false);
6390
6391 switch (port->state) {
6392 case TOGGLING:
6393 case ERROR_RECOVERY:
6394 case PORT_RESET:
6395 case PORT_RESET_WAIT_OFF:
6396 return;
6397 default:
6398 break;
6399 }
6400
6401 if (port->ams != NONE_AMS)
6402 port->ams = NONE_AMS;
6403 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6404 port->ams = HARD_RESET;
6405 /*
6406 * If we keep receiving hard reset requests, executing the hard reset
6407 * must have failed. Revert to error recovery if that happens.
6408 */
6409 tcpm_set_state(port,
6410 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6411 HARD_RESET_START : ERROR_RECOVERY,
6412 0);
6413 }
6414
tcpm_pd_event_handler(struct kthread_work * work)6415 static void tcpm_pd_event_handler(struct kthread_work *work)
6416 {
6417 struct tcpm_port *port = container_of(work, struct tcpm_port,
6418 event_work);
6419 u32 events;
6420
6421 mutex_lock(&port->lock);
6422
6423 spin_lock(&port->pd_event_lock);
6424 while (port->pd_events) {
6425 events = port->pd_events;
6426 port->pd_events = 0;
6427 spin_unlock(&port->pd_event_lock);
6428 if (events & TCPM_RESET_EVENT)
6429 _tcpm_pd_hard_reset(port);
6430 if (events & TCPM_VBUS_EVENT) {
6431 bool vbus;
6432
6433 vbus = port->tcpc->get_vbus(port->tcpc);
6434 if (vbus) {
6435 _tcpm_pd_vbus_on(port);
6436 } else {
6437 _tcpm_pd_vbus_off(port);
6438 /*
6439 * When TCPC does not support detecting vsafe0v voltage level,
6440 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6441 * to see if vbus has discharge to VSAFE0V.
6442 */
6443 if (!port->tcpc->is_vbus_vsafe0v ||
6444 port->tcpc->is_vbus_vsafe0v(port->tcpc))
6445 _tcpm_pd_vbus_vsafe0v(port);
6446 }
6447 }
6448 if (events & TCPM_CC_EVENT) {
6449 enum typec_cc_status cc1, cc2;
6450
6451 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6452 _tcpm_cc_change(port, cc1, cc2);
6453 }
6454 if (events & TCPM_FRS_EVENT) {
6455 if (port->state == SNK_READY) {
6456 int ret;
6457
6458 port->upcoming_state = FR_SWAP_SEND;
6459 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6460 if (ret == -EAGAIN)
6461 port->upcoming_state = INVALID_STATE;
6462 } else {
6463 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6464 }
6465 }
6466 if (events & TCPM_SOURCING_VBUS) {
6467 tcpm_log(port, "sourcing vbus");
6468 /*
6469 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6470 * true as TCPM wouldn't have called tcpm_set_vbus.
6471 *
6472 * When vbus is sourced on the command on TCPM i.e. TCPM called
6473 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6474 */
6475 port->vbus_source = true;
6476 _tcpm_pd_vbus_on(port);
6477 }
6478 if (events & TCPM_PORT_CLEAN) {
6479 tcpm_log(port, "port clean");
6480 if (port->state == CHECK_CONTAMINANT) {
6481 if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6482 tcpm_set_state(port, TOGGLING, 0);
6483 else
6484 tcpm_set_state(port, tcpm_default_state(port), 0);
6485 }
6486 }
6487 if (events & TCPM_PORT_ERROR) {
6488 tcpm_log(port, "port triggering error recovery");
6489 tcpm_set_state(port, ERROR_RECOVERY, 0);
6490 }
6491
6492 spin_lock(&port->pd_event_lock);
6493 }
6494 spin_unlock(&port->pd_event_lock);
6495 mutex_unlock(&port->lock);
6496 }
6497
tcpm_cc_change(struct tcpm_port * port)6498 void tcpm_cc_change(struct tcpm_port *port)
6499 {
6500 spin_lock(&port->pd_event_lock);
6501 port->pd_events |= TCPM_CC_EVENT;
6502 spin_unlock(&port->pd_event_lock);
6503 kthread_queue_work(port->wq, &port->event_work);
6504 }
6505 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6506
tcpm_vbus_change(struct tcpm_port * port)6507 void tcpm_vbus_change(struct tcpm_port *port)
6508 {
6509 spin_lock(&port->pd_event_lock);
6510 port->pd_events |= TCPM_VBUS_EVENT;
6511 spin_unlock(&port->pd_event_lock);
6512 kthread_queue_work(port->wq, &port->event_work);
6513 }
6514 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6515
tcpm_pd_hard_reset(struct tcpm_port * port)6516 void tcpm_pd_hard_reset(struct tcpm_port *port)
6517 {
6518 spin_lock(&port->pd_event_lock);
6519 port->pd_events = TCPM_RESET_EVENT;
6520 spin_unlock(&port->pd_event_lock);
6521 kthread_queue_work(port->wq, &port->event_work);
6522 }
6523 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6524
tcpm_sink_frs(struct tcpm_port * port)6525 void tcpm_sink_frs(struct tcpm_port *port)
6526 {
6527 spin_lock(&port->pd_event_lock);
6528 port->pd_events |= TCPM_FRS_EVENT;
6529 spin_unlock(&port->pd_event_lock);
6530 kthread_queue_work(port->wq, &port->event_work);
6531 }
6532 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6533
tcpm_sourcing_vbus(struct tcpm_port * port)6534 void tcpm_sourcing_vbus(struct tcpm_port *port)
6535 {
6536 spin_lock(&port->pd_event_lock);
6537 port->pd_events |= TCPM_SOURCING_VBUS;
6538 spin_unlock(&port->pd_event_lock);
6539 kthread_queue_work(port->wq, &port->event_work);
6540 }
6541 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6542
tcpm_port_clean(struct tcpm_port * port)6543 void tcpm_port_clean(struct tcpm_port *port)
6544 {
6545 spin_lock(&port->pd_event_lock);
6546 port->pd_events |= TCPM_PORT_CLEAN;
6547 spin_unlock(&port->pd_event_lock);
6548 kthread_queue_work(port->wq, &port->event_work);
6549 }
6550 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6551
tcpm_port_is_toggling(struct tcpm_port * port)6552 bool tcpm_port_is_toggling(struct tcpm_port *port)
6553 {
6554 return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6555 }
6556 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6557
tcpm_port_error_recovery(struct tcpm_port * port)6558 void tcpm_port_error_recovery(struct tcpm_port *port)
6559 {
6560 spin_lock(&port->pd_event_lock);
6561 port->pd_events |= TCPM_PORT_ERROR;
6562 spin_unlock(&port->pd_event_lock);
6563 kthread_queue_work(port->wq, &port->event_work);
6564 }
6565 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6566
tcpm_enable_frs_work(struct kthread_work * work)6567 static void tcpm_enable_frs_work(struct kthread_work *work)
6568 {
6569 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6570 int ret;
6571
6572 mutex_lock(&port->lock);
6573 /* Not FRS capable */
6574 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6575 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6576 !port->tcpc->enable_frs ||
6577 /* Sink caps queried */
6578 port->sink_cap_done || port->negotiated_rev < PD_REV30)
6579 goto unlock;
6580
6581 /* Send when the state machine is idle */
6582 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6583 port->send_discover_prime)
6584 goto resched;
6585
6586 port->upcoming_state = GET_SINK_CAP;
6587 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6588 if (ret == -EAGAIN) {
6589 port->upcoming_state = INVALID_STATE;
6590 } else {
6591 port->sink_cap_done = true;
6592 goto unlock;
6593 }
6594 resched:
6595 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6596 unlock:
6597 mutex_unlock(&port->lock);
6598 }
6599
tcpm_send_discover_work(struct kthread_work * work)6600 static void tcpm_send_discover_work(struct kthread_work *work)
6601 {
6602 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6603
6604 mutex_lock(&port->lock);
6605 /* No need to send DISCOVER_IDENTITY anymore */
6606 if (!port->send_discover && !port->send_discover_prime)
6607 goto unlock;
6608
6609 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6610 port->send_discover = false;
6611 port->send_discover_prime = false;
6612 goto unlock;
6613 }
6614
6615 /* Retry if the port is not idle */
6616 if ((port->state != SRC_READY && port->state != SNK_READY &&
6617 port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6618 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6619 goto unlock;
6620 }
6621
6622 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6623
6624 unlock:
6625 mutex_unlock(&port->lock);
6626 }
6627
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)6628 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6629 {
6630 struct tcpm_port *port = typec_get_drvdata(p);
6631 int ret;
6632
6633 mutex_lock(&port->swap_lock);
6634 mutex_lock(&port->lock);
6635
6636 if (port->typec_caps.data != TYPEC_PORT_DRD) {
6637 ret = -EINVAL;
6638 goto port_unlock;
6639 }
6640 if (port->state != SRC_READY && port->state != SNK_READY) {
6641 ret = -EAGAIN;
6642 goto port_unlock;
6643 }
6644
6645 if (port->data_role == data) {
6646 ret = 0;
6647 goto port_unlock;
6648 }
6649
6650 /*
6651 * XXX
6652 * 6.3.9: If an alternate mode is active, a request to swap
6653 * alternate modes shall trigger a port reset.
6654 * Reject data role swap request in this case.
6655 */
6656
6657 if (!port->pd_capable) {
6658 /*
6659 * If the partner is not PD capable, reset the port to
6660 * trigger a role change. This can only work if a preferred
6661 * role is configured, and if it matches the requested role.
6662 */
6663 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6664 port->try_role == port->pwr_role) {
6665 ret = -EINVAL;
6666 goto port_unlock;
6667 }
6668 port->non_pd_role_swap = true;
6669 tcpm_set_state(port, PORT_RESET, 0);
6670 } else {
6671 port->upcoming_state = DR_SWAP_SEND;
6672 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6673 if (ret == -EAGAIN) {
6674 port->upcoming_state = INVALID_STATE;
6675 goto port_unlock;
6676 }
6677 }
6678
6679 port->swap_status = 0;
6680 port->swap_pending = true;
6681 reinit_completion(&port->swap_complete);
6682 mutex_unlock(&port->lock);
6683
6684 if (!wait_for_completion_timeout(&port->swap_complete,
6685 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6686 ret = -ETIMEDOUT;
6687 else
6688 ret = port->swap_status;
6689
6690 port->non_pd_role_swap = false;
6691 goto swap_unlock;
6692
6693 port_unlock:
6694 mutex_unlock(&port->lock);
6695 swap_unlock:
6696 mutex_unlock(&port->swap_lock);
6697 return ret;
6698 }
6699
tcpm_pr_set(struct typec_port * p,enum typec_role role)6700 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6701 {
6702 struct tcpm_port *port = typec_get_drvdata(p);
6703 int ret;
6704
6705 mutex_lock(&port->swap_lock);
6706 mutex_lock(&port->lock);
6707
6708 if (port->port_type != TYPEC_PORT_DRP) {
6709 ret = -EINVAL;
6710 goto port_unlock;
6711 }
6712 if (port->state != SRC_READY && port->state != SNK_READY) {
6713 ret = -EAGAIN;
6714 goto port_unlock;
6715 }
6716
6717 if (role == port->pwr_role) {
6718 ret = 0;
6719 goto port_unlock;
6720 }
6721
6722 port->upcoming_state = PR_SWAP_SEND;
6723 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6724 if (ret == -EAGAIN) {
6725 port->upcoming_state = INVALID_STATE;
6726 goto port_unlock;
6727 }
6728
6729 port->swap_status = 0;
6730 port->swap_pending = true;
6731 reinit_completion(&port->swap_complete);
6732 mutex_unlock(&port->lock);
6733
6734 if (!wait_for_completion_timeout(&port->swap_complete,
6735 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6736 ret = -ETIMEDOUT;
6737 else
6738 ret = port->swap_status;
6739
6740 goto swap_unlock;
6741
6742 port_unlock:
6743 mutex_unlock(&port->lock);
6744 swap_unlock:
6745 mutex_unlock(&port->swap_lock);
6746 return ret;
6747 }
6748
tcpm_vconn_set(struct typec_port * p,enum typec_role role)6749 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6750 {
6751 struct tcpm_port *port = typec_get_drvdata(p);
6752 int ret;
6753
6754 mutex_lock(&port->swap_lock);
6755 mutex_lock(&port->lock);
6756
6757 if (port->state != SRC_READY && port->state != SNK_READY) {
6758 ret = -EAGAIN;
6759 goto port_unlock;
6760 }
6761
6762 if (role == port->vconn_role) {
6763 ret = 0;
6764 goto port_unlock;
6765 }
6766
6767 port->upcoming_state = VCONN_SWAP_SEND;
6768 ret = tcpm_ams_start(port, VCONN_SWAP);
6769 if (ret == -EAGAIN) {
6770 port->upcoming_state = INVALID_STATE;
6771 goto port_unlock;
6772 }
6773
6774 port->swap_status = 0;
6775 port->swap_pending = true;
6776 reinit_completion(&port->swap_complete);
6777 mutex_unlock(&port->lock);
6778
6779 if (!wait_for_completion_timeout(&port->swap_complete,
6780 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6781 ret = -ETIMEDOUT;
6782 else
6783 ret = port->swap_status;
6784
6785 goto swap_unlock;
6786
6787 port_unlock:
6788 mutex_unlock(&port->lock);
6789 swap_unlock:
6790 mutex_unlock(&port->swap_lock);
6791 return ret;
6792 }
6793
tcpm_try_role(struct typec_port * p,int role)6794 static int tcpm_try_role(struct typec_port *p, int role)
6795 {
6796 struct tcpm_port *port = typec_get_drvdata(p);
6797 struct tcpc_dev *tcpc = port->tcpc;
6798 int ret = 0;
6799
6800 mutex_lock(&port->lock);
6801 if (tcpc->try_role)
6802 ret = tcpc->try_role(tcpc, role);
6803 if (!ret)
6804 port->try_role = role;
6805 port->try_src_count = 0;
6806 port->try_snk_count = 0;
6807 mutex_unlock(&port->lock);
6808
6809 return ret;
6810 }
6811
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)6812 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6813 {
6814 unsigned int target_mw;
6815 int ret;
6816
6817 mutex_lock(&port->swap_lock);
6818 mutex_lock(&port->lock);
6819
6820 if (!port->pps_data.active) {
6821 ret = -EOPNOTSUPP;
6822 goto port_unlock;
6823 }
6824
6825 if (port->state != SNK_READY) {
6826 ret = -EAGAIN;
6827 goto port_unlock;
6828 }
6829
6830 if (req_op_curr > port->pps_data.max_curr) {
6831 ret = -EINVAL;
6832 goto port_unlock;
6833 }
6834
6835 target_mw = (req_op_curr * port->supply_voltage) / 1000;
6836 if (target_mw < port->operating_snk_mw) {
6837 ret = -EINVAL;
6838 goto port_unlock;
6839 }
6840
6841 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6842 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6843 if (ret == -EAGAIN) {
6844 port->upcoming_state = INVALID_STATE;
6845 goto port_unlock;
6846 }
6847
6848 /* Round down operating current to align with PPS valid steps */
6849 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6850
6851 reinit_completion(&port->pps_complete);
6852 port->pps_data.req_op_curr = req_op_curr;
6853 port->pps_status = 0;
6854 port->pps_pending = true;
6855 mutex_unlock(&port->lock);
6856
6857 if (!wait_for_completion_timeout(&port->pps_complete,
6858 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6859 ret = -ETIMEDOUT;
6860 else
6861 ret = port->pps_status;
6862
6863 goto swap_unlock;
6864
6865 port_unlock:
6866 mutex_unlock(&port->lock);
6867 swap_unlock:
6868 mutex_unlock(&port->swap_lock);
6869
6870 return ret;
6871 }
6872
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)6873 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6874 {
6875 unsigned int target_mw;
6876 int ret;
6877
6878 mutex_lock(&port->swap_lock);
6879 mutex_lock(&port->lock);
6880
6881 if (!port->pps_data.active) {
6882 ret = -EOPNOTSUPP;
6883 goto port_unlock;
6884 }
6885
6886 if (port->state != SNK_READY) {
6887 ret = -EAGAIN;
6888 goto port_unlock;
6889 }
6890
6891 target_mw = (port->current_limit * req_out_volt) / 1000;
6892 if (target_mw < port->operating_snk_mw) {
6893 ret = -EINVAL;
6894 goto port_unlock;
6895 }
6896
6897 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6898 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6899 if (ret == -EAGAIN) {
6900 port->upcoming_state = INVALID_STATE;
6901 goto port_unlock;
6902 }
6903
6904 /* Round down output voltage to align with PPS valid steps */
6905 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6906
6907 reinit_completion(&port->pps_complete);
6908 port->pps_data.req_out_volt = req_out_volt;
6909 port->pps_status = 0;
6910 port->pps_pending = true;
6911 mutex_unlock(&port->lock);
6912
6913 if (!wait_for_completion_timeout(&port->pps_complete,
6914 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6915 ret = -ETIMEDOUT;
6916 else
6917 ret = port->pps_status;
6918
6919 goto swap_unlock;
6920
6921 port_unlock:
6922 mutex_unlock(&port->lock);
6923 swap_unlock:
6924 mutex_unlock(&port->swap_lock);
6925
6926 return ret;
6927 }
6928
tcpm_pps_activate(struct tcpm_port * port,bool activate)6929 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6930 {
6931 int ret = 0;
6932
6933 mutex_lock(&port->swap_lock);
6934 mutex_lock(&port->lock);
6935
6936 if (!port->pps_data.supported) {
6937 ret = -EOPNOTSUPP;
6938 goto port_unlock;
6939 }
6940
6941 /* Trying to deactivate PPS when already deactivated so just bail */
6942 if (!port->pps_data.active && !activate)
6943 goto port_unlock;
6944
6945 if (port->state != SNK_READY) {
6946 ret = -EAGAIN;
6947 goto port_unlock;
6948 }
6949
6950 if (activate)
6951 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6952 else
6953 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6954 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6955 if (ret == -EAGAIN) {
6956 port->upcoming_state = INVALID_STATE;
6957 goto port_unlock;
6958 }
6959
6960 reinit_completion(&port->pps_complete);
6961 port->pps_status = 0;
6962 port->pps_pending = true;
6963
6964 /* Trigger PPS request or move back to standard PDO contract */
6965 if (activate) {
6966 port->pps_data.req_out_volt = port->supply_voltage;
6967 port->pps_data.req_op_curr = port->current_limit;
6968 }
6969 mutex_unlock(&port->lock);
6970
6971 if (!wait_for_completion_timeout(&port->pps_complete,
6972 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6973 ret = -ETIMEDOUT;
6974 else
6975 ret = port->pps_status;
6976
6977 goto swap_unlock;
6978
6979 port_unlock:
6980 mutex_unlock(&port->lock);
6981 swap_unlock:
6982 mutex_unlock(&port->swap_lock);
6983
6984 return ret;
6985 }
6986
tcpm_init(struct tcpm_port * port)6987 static void tcpm_init(struct tcpm_port *port)
6988 {
6989 enum typec_cc_status cc1, cc2;
6990
6991 port->tcpc->init(port->tcpc);
6992
6993 tcpm_reset_port(port);
6994
6995 /*
6996 * XXX
6997 * Should possibly wait for VBUS to settle if it was enabled locally
6998 * since tcpm_reset_port() will disable VBUS.
6999 */
7000 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
7001 if (port->vbus_present)
7002 port->vbus_never_low = true;
7003
7004 /*
7005 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
7006 * So implicitly vbus_vsafe0v = false.
7007 *
7008 * 2. When vbus_present is false and TCPC does NOT support querying
7009 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
7010 * vbus_vsafe0v is true.
7011 *
7012 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
7013 * then, query tcpc for vsafe0v status.
7014 */
7015 if (port->vbus_present)
7016 port->vbus_vsafe0v = false;
7017 else if (!port->tcpc->is_vbus_vsafe0v)
7018 port->vbus_vsafe0v = true;
7019 else
7020 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
7021
7022 tcpm_set_state(port, tcpm_default_state(port), 0);
7023
7024 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
7025 _tcpm_cc_change(port, cc1, cc2);
7026
7027 /*
7028 * Some adapters need a clean slate at startup, and won't recover
7029 * otherwise. So do not try to be fancy and force a clean disconnect.
7030 */
7031 tcpm_set_state(port, PORT_RESET, 0);
7032 }
7033
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)7034 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
7035 {
7036 struct tcpm_port *port = typec_get_drvdata(p);
7037
7038 mutex_lock(&port->lock);
7039 if (type == port->port_type)
7040 goto port_unlock;
7041
7042 port->port_type = type;
7043
7044 if (!port->connected) {
7045 tcpm_set_state(port, PORT_RESET, 0);
7046 } else if (type == TYPEC_PORT_SNK) {
7047 if (!(port->pwr_role == TYPEC_SINK &&
7048 port->data_role == TYPEC_DEVICE))
7049 tcpm_set_state(port, PORT_RESET, 0);
7050 } else if (type == TYPEC_PORT_SRC) {
7051 if (!(port->pwr_role == TYPEC_SOURCE &&
7052 port->data_role == TYPEC_HOST))
7053 tcpm_set_state(port, PORT_RESET, 0);
7054 }
7055
7056 port_unlock:
7057 mutex_unlock(&port->lock);
7058 return 0;
7059 }
7060
tcpm_find_pd_data(struct tcpm_port * port,struct usb_power_delivery * pd)7061 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
7062 {
7063 int i;
7064
7065 for (i = 0; port->pd_list[i]; i++) {
7066 if (port->pd_list[i]->pd == pd)
7067 return port->pd_list[i];
7068 }
7069
7070 return ERR_PTR(-ENODATA);
7071 }
7072
tcpm_pd_get(struct typec_port * p)7073 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
7074 {
7075 struct tcpm_port *port = typec_get_drvdata(p);
7076
7077 return port->pds;
7078 }
7079
tcpm_pd_set(struct typec_port * p,struct usb_power_delivery * pd)7080 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
7081 {
7082 struct tcpm_port *port = typec_get_drvdata(p);
7083 struct pd_data *data;
7084 int i, ret = 0;
7085
7086 mutex_lock(&port->lock);
7087
7088 if (port->selected_pd == pd)
7089 goto unlock;
7090
7091 data = tcpm_find_pd_data(port, pd);
7092 if (IS_ERR(data)) {
7093 ret = PTR_ERR(data);
7094 goto unlock;
7095 }
7096
7097 if (data->sink_desc.pdo[0]) {
7098 for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
7099 port->snk_pdo[i] = data->sink_desc.pdo[i];
7100 port->nr_snk_pdo = i;
7101 port->operating_snk_mw = data->operating_snk_mw;
7102 }
7103
7104 if (data->source_desc.pdo[0]) {
7105 for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
7106 port->src_pdo[i] = data->source_desc.pdo[i];
7107 port->nr_src_pdo = i;
7108 }
7109
7110 switch (port->state) {
7111 case SRC_UNATTACHED:
7112 case SRC_ATTACH_WAIT:
7113 case SRC_TRYWAIT:
7114 tcpm_set_cc(port, tcpm_rp_cc(port));
7115 break;
7116 case SRC_SEND_CAPABILITIES:
7117 case SRC_SEND_CAPABILITIES_TIMEOUT:
7118 case SRC_NEGOTIATE_CAPABILITIES:
7119 case SRC_READY:
7120 case SRC_WAIT_NEW_CAPABILITIES:
7121 port->caps_count = 0;
7122 port->upcoming_state = SRC_SEND_CAPABILITIES;
7123 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7124 if (ret == -EAGAIN) {
7125 port->upcoming_state = INVALID_STATE;
7126 goto unlock;
7127 }
7128 break;
7129 case SNK_NEGOTIATE_CAPABILITIES:
7130 case SNK_NEGOTIATE_PPS_CAPABILITIES:
7131 case SNK_READY:
7132 case SNK_TRANSITION_SINK:
7133 case SNK_TRANSITION_SINK_VBUS:
7134 if (port->pps_data.active)
7135 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
7136 else if (port->pd_capable)
7137 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
7138 else
7139 break;
7140
7141 port->update_sink_caps = true;
7142
7143 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7144 if (ret == -EAGAIN) {
7145 port->upcoming_state = INVALID_STATE;
7146 goto unlock;
7147 }
7148 break;
7149 default:
7150 break;
7151 }
7152
7153 port->port_source_caps = data->source_cap;
7154 port->port_sink_caps = data->sink_cap;
7155 typec_port_set_usb_power_delivery(p, NULL);
7156 port->selected_pd = pd;
7157 typec_port_set_usb_power_delivery(p, port->selected_pd);
7158 unlock:
7159 mutex_unlock(&port->lock);
7160 return ret;
7161 }
7162
7163 static const struct typec_operations tcpm_ops = {
7164 .try_role = tcpm_try_role,
7165 .dr_set = tcpm_dr_set,
7166 .pr_set = tcpm_pr_set,
7167 .vconn_set = tcpm_vconn_set,
7168 .port_type_set = tcpm_port_type_set,
7169 .pd_get = tcpm_pd_get,
7170 .pd_set = tcpm_pd_set
7171 };
7172
tcpm_tcpc_reset(struct tcpm_port * port)7173 void tcpm_tcpc_reset(struct tcpm_port *port)
7174 {
7175 mutex_lock(&port->lock);
7176 /* XXX: Maintain PD connection if possible? */
7177 tcpm_init(port);
7178 mutex_unlock(&port->lock);
7179 }
7180 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
7181
tcpm_port_unregister_pd(struct tcpm_port * port)7182 static void tcpm_port_unregister_pd(struct tcpm_port *port)
7183 {
7184 int i;
7185
7186 port->port_sink_caps = NULL;
7187 port->port_source_caps = NULL;
7188 for (i = 0; i < port->pd_count; i++) {
7189 usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
7190 usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
7191 devm_kfree(port->dev, port->pd_list[i]);
7192 port->pd_list[i] = NULL;
7193 usb_power_delivery_unregister(port->pds[i]);
7194 port->pds[i] = NULL;
7195 }
7196 }
7197
tcpm_port_register_pd(struct tcpm_port * port)7198 static int tcpm_port_register_pd(struct tcpm_port *port)
7199 {
7200 u16 pd_revision = port->typec_caps.pd_revision;
7201 u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
7202 struct usb_power_delivery_desc desc = { pd_revision, pd_version };
7203 struct usb_power_delivery_capabilities *cap;
7204 int ret, i;
7205
7206 if (!port->nr_src_pdo && !port->nr_snk_pdo)
7207 return 0;
7208
7209 for (i = 0; i < port->pd_count; i++) {
7210 port->pds[i] = usb_power_delivery_register(port->dev, &desc);
7211 if (IS_ERR(port->pds[i])) {
7212 ret = PTR_ERR(port->pds[i]);
7213 goto err_unregister;
7214 }
7215 port->pd_list[i]->pd = port->pds[i];
7216
7217 if (port->pd_list[i]->source_desc.pdo[0]) {
7218 cap = usb_power_delivery_register_capabilities(port->pds[i],
7219 &port->pd_list[i]->source_desc);
7220 if (IS_ERR(cap)) {
7221 ret = PTR_ERR(cap);
7222 goto err_unregister;
7223 }
7224 port->pd_list[i]->source_cap = cap;
7225 }
7226
7227 if (port->pd_list[i]->sink_desc.pdo[0]) {
7228 cap = usb_power_delivery_register_capabilities(port->pds[i],
7229 &port->pd_list[i]->sink_desc);
7230 if (IS_ERR(cap)) {
7231 ret = PTR_ERR(cap);
7232 goto err_unregister;
7233 }
7234 port->pd_list[i]->sink_cap = cap;
7235 }
7236 }
7237
7238 port->port_source_caps = port->pd_list[0]->source_cap;
7239 port->port_sink_caps = port->pd_list[0]->sink_cap;
7240 port->selected_pd = port->pds[0];
7241 return 0;
7242
7243 err_unregister:
7244 tcpm_port_unregister_pd(port);
7245
7246 return ret;
7247 }
7248
tcpm_fw_get_timings(struct tcpm_port * port,struct fwnode_handle * fwnode)7249 static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fwnode)
7250 {
7251 int ret;
7252 u32 val;
7253
7254 ret = fwnode_property_read_u32(fwnode, "sink-wait-cap-time-ms", &val);
7255 if (!ret)
7256 port->timings.sink_wait_cap_time = val;
7257 else
7258 port->timings.sink_wait_cap_time = PD_T_SINK_WAIT_CAP;
7259
7260 ret = fwnode_property_read_u32(fwnode, "ps-source-off-time-ms", &val);
7261 if (!ret)
7262 port->timings.ps_src_off_time = val;
7263 else
7264 port->timings.ps_src_off_time = PD_T_PS_SOURCE_OFF;
7265
7266 ret = fwnode_property_read_u32(fwnode, "cc-debounce-time-ms", &val);
7267 if (!ret)
7268 port->timings.cc_debounce_time = val;
7269 else
7270 port->timings.cc_debounce_time = PD_T_CC_DEBOUNCE;
7271
7272 ret = fwnode_property_read_u32(fwnode, "sink-bc12-completion-time-ms", &val);
7273 if (!ret)
7274 port->timings.snk_bc12_cmpletion_time = val;
7275 }
7276
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)7277 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7278 {
7279 struct fwnode_handle *capabilities, *caps = NULL;
7280 unsigned int nr_src_pdo, nr_snk_pdo;
7281 const char *opmode_str;
7282 u32 *src_pdo, *snk_pdo;
7283 u32 uw, frs_current;
7284 int ret = 0, i;
7285 int mode;
7286
7287 if (!fwnode)
7288 return -EINVAL;
7289
7290 /*
7291 * This fwnode has a "compatible" property, but is never populated as a
7292 * struct device. Instead we simply parse it to read the properties.
7293 * This it breaks fw_devlink=on. To maintain backward compatibility
7294 * with existing DT files, we work around this by deleting any
7295 * fwnode_links to/from this fwnode.
7296 */
7297 fw_devlink_purge_absent_suppliers(fwnode);
7298
7299 ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7300 if (ret < 0)
7301 return ret;
7302
7303 mode = 0;
7304
7305 if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7306 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7307
7308 if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7309 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7310
7311 port->port_type = port->typec_caps.type;
7312 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7313 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7314 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7315
7316 if (!port->pd_supported) {
7317 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7318 if (ret)
7319 return ret;
7320 ret = typec_find_pwr_opmode(opmode_str);
7321 if (ret < 0)
7322 return ret;
7323 port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7324 return 0;
7325 }
7326
7327 /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7328
7329 /* FRS can only be supported by DRP ports */
7330 if (port->port_type == TYPEC_PORT_DRP) {
7331 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7332 &frs_current);
7333 if (!ret && frs_current <= FRS_5V_3A)
7334 port->new_source_frs_current = frs_current;
7335
7336 if (ret)
7337 ret = 0;
7338 }
7339
7340 /* For the backward compatibility, "capabilities" node is optional. */
7341 capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7342 if (!capabilities) {
7343 port->pd_count = 1;
7344 } else {
7345 port->pd_count = fwnode_get_child_node_count(capabilities);
7346 if (!port->pd_count) {
7347 ret = -ENODATA;
7348 goto put_capabilities;
7349 }
7350 }
7351
7352 port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7353 GFP_KERNEL);
7354 if (!port->pds) {
7355 ret = -ENOMEM;
7356 goto put_capabilities;
7357 }
7358
7359 port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7360 GFP_KERNEL);
7361 if (!port->pd_list) {
7362 ret = -ENOMEM;
7363 goto put_capabilities;
7364 }
7365
7366 for (i = 0; i < port->pd_count; i++) {
7367 port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7368 if (!port->pd_list[i]) {
7369 ret = -ENOMEM;
7370 goto put_capabilities;
7371 }
7372
7373 src_pdo = port->pd_list[i]->source_desc.pdo;
7374 port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7375 snk_pdo = port->pd_list[i]->sink_desc.pdo;
7376 port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7377
7378 /* If "capabilities" is NULL, fall back to single pd cap population. */
7379 if (!capabilities)
7380 caps = fwnode;
7381 else
7382 caps = fwnode_get_next_child_node(capabilities, caps);
7383
7384 if (port->port_type != TYPEC_PORT_SNK) {
7385 ret = fwnode_property_count_u32(caps, "source-pdos");
7386 if (ret == 0) {
7387 ret = -EINVAL;
7388 goto put_caps;
7389 }
7390 if (ret < 0)
7391 goto put_caps;
7392
7393 nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7394 ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7395 nr_src_pdo);
7396 if (ret)
7397 goto put_caps;
7398
7399 ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7400 if (ret)
7401 goto put_caps;
7402
7403 if (i == 0) {
7404 port->nr_src_pdo = nr_src_pdo;
7405 memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7406 port->pd_list[0]->source_desc.pdo,
7407 sizeof(u32) * nr_src_pdo,
7408 0);
7409 }
7410 }
7411
7412 if (port->port_type != TYPEC_PORT_SRC) {
7413 ret = fwnode_property_count_u32(caps, "sink-pdos");
7414 if (ret == 0) {
7415 ret = -EINVAL;
7416 goto put_caps;
7417 }
7418
7419 if (ret < 0)
7420 goto put_caps;
7421
7422 nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7423 ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7424 nr_snk_pdo);
7425 if (ret)
7426 goto put_caps;
7427
7428 ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7429 if (ret)
7430 goto put_caps;
7431
7432 if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7433 ret = -EINVAL;
7434 goto put_caps;
7435 }
7436
7437 port->pd_list[i]->operating_snk_mw = uw / 1000;
7438
7439 if (i == 0) {
7440 port->nr_snk_pdo = nr_snk_pdo;
7441 memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7442 port->pd_list[0]->sink_desc.pdo,
7443 sizeof(u32) * nr_snk_pdo,
7444 0);
7445 port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7446 }
7447 }
7448 }
7449
7450 put_caps:
7451 if (caps != fwnode)
7452 fwnode_handle_put(caps);
7453 put_capabilities:
7454 fwnode_handle_put(capabilities);
7455 return ret;
7456 }
7457
tcpm_fw_get_snk_vdos(struct tcpm_port * port,struct fwnode_handle * fwnode)7458 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7459 {
7460 int ret;
7461
7462 /* sink-vdos is optional */
7463 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7464 if (ret < 0)
7465 return 0;
7466
7467 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7468 if (port->nr_snk_vdo) {
7469 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7470 port->snk_vdo,
7471 port->nr_snk_vdo);
7472 if (ret < 0)
7473 return ret;
7474 }
7475
7476 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7477 if (port->nr_snk_vdo) {
7478 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7479 if (ret < 0)
7480 return ret;
7481 else if (ret == 0)
7482 return -ENODATA;
7483
7484 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7485 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7486 port->snk_vdo_v1,
7487 port->nr_snk_vdo_v1);
7488 if (ret < 0)
7489 return ret;
7490 }
7491
7492 return 0;
7493 }
7494
tcpm_fw_get_pd_revision(struct tcpm_port * port,struct fwnode_handle * fwnode)7495 static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
7496 {
7497 int ret;
7498 u8 val[4];
7499
7500 ret = fwnode_property_count_u8(fwnode, "pd-revision");
7501 if (!ret || ret != 4) {
7502 tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
7503 return;
7504 }
7505
7506 ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
7507 if (ret) {
7508 tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
7509 return;
7510 }
7511
7512 port->pd_rev.rev_major = val[0];
7513 port->pd_rev.rev_minor = val[1];
7514 port->pd_rev.ver_major = val[2];
7515 port->pd_rev.ver_minor = val[3];
7516 }
7517
7518 /* Power Supply access to expose source power information */
7519 enum tcpm_psy_online_states {
7520 TCPM_PSY_OFFLINE = 0,
7521 TCPM_PSY_FIXED_ONLINE,
7522 TCPM_PSY_PROG_ONLINE,
7523 };
7524
7525 static enum power_supply_property tcpm_psy_props[] = {
7526 POWER_SUPPLY_PROP_USB_TYPE,
7527 POWER_SUPPLY_PROP_ONLINE,
7528 POWER_SUPPLY_PROP_VOLTAGE_MIN,
7529 POWER_SUPPLY_PROP_VOLTAGE_MAX,
7530 POWER_SUPPLY_PROP_VOLTAGE_NOW,
7531 POWER_SUPPLY_PROP_CURRENT_MAX,
7532 POWER_SUPPLY_PROP_CURRENT_NOW,
7533 };
7534
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)7535 static int tcpm_psy_get_online(struct tcpm_port *port,
7536 union power_supply_propval *val)
7537 {
7538 if (port->vbus_charge) {
7539 if (port->pps_data.active)
7540 val->intval = TCPM_PSY_PROG_ONLINE;
7541 else
7542 val->intval = TCPM_PSY_FIXED_ONLINE;
7543 } else {
7544 val->intval = TCPM_PSY_OFFLINE;
7545 }
7546
7547 return 0;
7548 }
7549
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)7550 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7551 union power_supply_propval *val)
7552 {
7553 if (port->pps_data.active)
7554 val->intval = port->pps_data.min_volt * 1000;
7555 else
7556 val->intval = port->supply_voltage * 1000;
7557
7558 return 0;
7559 }
7560
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)7561 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7562 union power_supply_propval *val)
7563 {
7564 if (port->pps_data.active)
7565 val->intval = port->pps_data.max_volt * 1000;
7566 else
7567 val->intval = port->supply_voltage * 1000;
7568
7569 return 0;
7570 }
7571
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)7572 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7573 union power_supply_propval *val)
7574 {
7575 val->intval = port->supply_voltage * 1000;
7576
7577 return 0;
7578 }
7579
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)7580 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7581 union power_supply_propval *val)
7582 {
7583 if (port->pps_data.active)
7584 val->intval = port->pps_data.max_curr * 1000;
7585 else
7586 val->intval = port->current_limit * 1000;
7587
7588 return 0;
7589 }
7590
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)7591 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7592 union power_supply_propval *val)
7593 {
7594 val->intval = port->current_limit * 1000;
7595
7596 return 0;
7597 }
7598
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)7599 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7600 union power_supply_propval *val)
7601 {
7602 unsigned int src_mv, src_ma, max_src_uw = 0;
7603 unsigned int i, tmp;
7604
7605 for (i = 0; i < port->nr_source_caps; i++) {
7606 u32 pdo = port->source_caps[i];
7607
7608 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7609 src_mv = pdo_fixed_voltage(pdo);
7610 src_ma = pdo_max_current(pdo);
7611 tmp = src_mv * src_ma;
7612 max_src_uw = max(tmp, max_src_uw);
7613 }
7614 }
7615
7616 val->intval = max_src_uw;
7617 return 0;
7618 }
7619
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)7620 static int tcpm_psy_get_prop(struct power_supply *psy,
7621 enum power_supply_property psp,
7622 union power_supply_propval *val)
7623 {
7624 struct tcpm_port *port = power_supply_get_drvdata(psy);
7625 int ret = 0;
7626
7627 switch (psp) {
7628 case POWER_SUPPLY_PROP_USB_TYPE:
7629 val->intval = port->usb_type;
7630 break;
7631 case POWER_SUPPLY_PROP_ONLINE:
7632 ret = tcpm_psy_get_online(port, val);
7633 break;
7634 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7635 ret = tcpm_psy_get_voltage_min(port, val);
7636 break;
7637 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7638 ret = tcpm_psy_get_voltage_max(port, val);
7639 break;
7640 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7641 ret = tcpm_psy_get_voltage_now(port, val);
7642 break;
7643 case POWER_SUPPLY_PROP_CURRENT_MAX:
7644 ret = tcpm_psy_get_current_max(port, val);
7645 break;
7646 case POWER_SUPPLY_PROP_CURRENT_NOW:
7647 ret = tcpm_psy_get_current_now(port, val);
7648 break;
7649 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7650 tcpm_psy_get_input_power_limit(port, val);
7651 break;
7652 default:
7653 ret = -EINVAL;
7654 break;
7655 }
7656
7657 return ret;
7658 }
7659
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)7660 static int tcpm_psy_set_online(struct tcpm_port *port,
7661 const union power_supply_propval *val)
7662 {
7663 int ret;
7664
7665 switch (val->intval) {
7666 case TCPM_PSY_FIXED_ONLINE:
7667 ret = tcpm_pps_activate(port, false);
7668 break;
7669 case TCPM_PSY_PROG_ONLINE:
7670 ret = tcpm_pps_activate(port, true);
7671 break;
7672 default:
7673 ret = -EINVAL;
7674 break;
7675 }
7676
7677 return ret;
7678 }
7679
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)7680 static int tcpm_psy_set_prop(struct power_supply *psy,
7681 enum power_supply_property psp,
7682 const union power_supply_propval *val)
7683 {
7684 struct tcpm_port *port = power_supply_get_drvdata(psy);
7685 int ret;
7686
7687 /*
7688 * All the properties below are related to USB PD. The check needs to be
7689 * property specific when a non-pd related property is added.
7690 */
7691 if (!port->pd_supported)
7692 return -EOPNOTSUPP;
7693
7694 switch (psp) {
7695 case POWER_SUPPLY_PROP_ONLINE:
7696 ret = tcpm_psy_set_online(port, val);
7697 break;
7698 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7699 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7700 break;
7701 case POWER_SUPPLY_PROP_CURRENT_NOW:
7702 if (val->intval > port->pps_data.max_curr * 1000)
7703 ret = -EINVAL;
7704 else
7705 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7706 break;
7707 default:
7708 ret = -EINVAL;
7709 break;
7710 }
7711 power_supply_changed(port->psy);
7712 return ret;
7713 }
7714
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)7715 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7716 enum power_supply_property psp)
7717 {
7718 switch (psp) {
7719 case POWER_SUPPLY_PROP_ONLINE:
7720 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7721 case POWER_SUPPLY_PROP_CURRENT_NOW:
7722 return 1;
7723 default:
7724 return 0;
7725 }
7726 }
7727
7728 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7729
devm_tcpm_psy_register(struct tcpm_port * port)7730 static int devm_tcpm_psy_register(struct tcpm_port *port)
7731 {
7732 struct power_supply_config psy_cfg = {};
7733 const char *port_dev_name = dev_name(port->dev);
7734 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7735 strlen(port_dev_name) + 1;
7736 char *psy_name;
7737
7738 psy_cfg.drv_data = port;
7739 psy_cfg.fwnode = dev_fwnode(port->dev);
7740 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7741 if (!psy_name)
7742 return -ENOMEM;
7743
7744 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7745 port_dev_name);
7746 port->psy_desc.name = psy_name;
7747 port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7748 port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
7749 BIT(POWER_SUPPLY_USB_TYPE_PD) |
7750 BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
7751 port->psy_desc.properties = tcpm_psy_props;
7752 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7753 port->psy_desc.get_property = tcpm_psy_get_prop;
7754 port->psy_desc.set_property = tcpm_psy_set_prop;
7755 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7756
7757 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7758
7759 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7760 &psy_cfg);
7761
7762 return PTR_ERR_OR_ZERO(port->psy);
7763 }
7764
state_machine_timer_handler(struct hrtimer * timer)7765 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7766 {
7767 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7768
7769 if (port->registered)
7770 kthread_queue_work(port->wq, &port->state_machine);
7771 return HRTIMER_NORESTART;
7772 }
7773
vdm_state_machine_timer_handler(struct hrtimer * timer)7774 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7775 {
7776 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7777
7778 if (port->registered)
7779 kthread_queue_work(port->wq, &port->vdm_state_machine);
7780 return HRTIMER_NORESTART;
7781 }
7782
enable_frs_timer_handler(struct hrtimer * timer)7783 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7784 {
7785 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7786
7787 if (port->registered)
7788 kthread_queue_work(port->wq, &port->enable_frs);
7789 return HRTIMER_NORESTART;
7790 }
7791
send_discover_timer_handler(struct hrtimer * timer)7792 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7793 {
7794 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7795
7796 if (port->registered)
7797 kthread_queue_work(port->wq, &port->send_discover_work);
7798 return HRTIMER_NORESTART;
7799 }
7800
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)7801 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7802 {
7803 struct tcpm_port *port;
7804 int err;
7805
7806 if (!dev || !tcpc ||
7807 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7808 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7809 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7810 return ERR_PTR(-EINVAL);
7811
7812 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7813 if (!port)
7814 return ERR_PTR(-ENOMEM);
7815
7816 port->dev = dev;
7817 port->tcpc = tcpc;
7818
7819 mutex_init(&port->lock);
7820 mutex_init(&port->swap_lock);
7821
7822 port->wq = kthread_run_worker(0, dev_name(dev));
7823 if (IS_ERR(port->wq))
7824 return ERR_CAST(port->wq);
7825 sched_set_fifo(port->wq->task);
7826
7827 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7828 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7829 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7830 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7831 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7832 hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
7833 HRTIMER_MODE_REL);
7834 hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
7835 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7836 hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
7837 HRTIMER_MODE_REL);
7838 hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
7839 HRTIMER_MODE_REL);
7840
7841 spin_lock_init(&port->pd_event_lock);
7842
7843 init_completion(&port->tx_complete);
7844 init_completion(&port->swap_complete);
7845 init_completion(&port->pps_complete);
7846 tcpm_debugfs_init(port);
7847
7848 err = tcpm_fw_get_caps(port, tcpc->fwnode);
7849 if (err < 0)
7850 goto out_destroy_wq;
7851 err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7852 if (err < 0)
7853 goto out_destroy_wq;
7854
7855 tcpm_fw_get_timings(port, tcpc->fwnode);
7856 tcpm_fw_get_pd_revision(port, tcpc->fwnode);
7857
7858 port->try_role = port->typec_caps.prefer_role;
7859
7860 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
7861
7862 if (port->pd_rev.rev_major)
7863 port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
7864 port->pd_rev.rev_minor;
7865 else
7866 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
7867
7868 port->typec_caps.svdm_version = SVDM_VER_2_0;
7869 port->typec_caps.driver_data = port;
7870 port->typec_caps.ops = &tcpm_ops;
7871 port->typec_caps.orientation_aware = 1;
7872
7873 port->partner_desc.identity = &port->partner_ident;
7874
7875 port->role_sw = usb_role_switch_get(port->dev);
7876 if (!port->role_sw)
7877 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7878 if (IS_ERR(port->role_sw)) {
7879 err = PTR_ERR(port->role_sw);
7880 goto out_destroy_wq;
7881 }
7882
7883 err = devm_tcpm_psy_register(port);
7884 if (err)
7885 goto out_role_sw_put;
7886 power_supply_changed(port->psy);
7887
7888 err = tcpm_port_register_pd(port);
7889 if (err)
7890 goto out_role_sw_put;
7891
7892 if (port->pds)
7893 port->typec_caps.pd = port->pds[0];
7894
7895 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7896 if (IS_ERR(port->typec_port)) {
7897 err = PTR_ERR(port->typec_port);
7898 goto out_unregister_pd;
7899 }
7900
7901 typec_port_register_altmodes(port->typec_port,
7902 &tcpm_altmode_ops, port,
7903 port->port_altmode, ALTMODE_DISCOVERY_MAX);
7904 typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7905 &tcpm_cable_ops);
7906 port->registered = true;
7907
7908 mutex_lock(&port->lock);
7909 tcpm_init(port);
7910 mutex_unlock(&port->lock);
7911
7912 tcpm_log(port, "%s: registered", dev_name(dev));
7913 return port;
7914
7915 out_unregister_pd:
7916 tcpm_port_unregister_pd(port);
7917 out_role_sw_put:
7918 usb_role_switch_put(port->role_sw);
7919 out_destroy_wq:
7920 tcpm_debugfs_exit(port);
7921 kthread_destroy_worker(port->wq);
7922 return ERR_PTR(err);
7923 }
7924 EXPORT_SYMBOL_GPL(tcpm_register_port);
7925
tcpm_unregister_port(struct tcpm_port * port)7926 void tcpm_unregister_port(struct tcpm_port *port)
7927 {
7928 int i;
7929
7930 port->registered = false;
7931 kthread_destroy_worker(port->wq);
7932
7933 hrtimer_cancel(&port->send_discover_timer);
7934 hrtimer_cancel(&port->enable_frs_timer);
7935 hrtimer_cancel(&port->vdm_state_machine_timer);
7936 hrtimer_cancel(&port->state_machine_timer);
7937
7938 tcpm_reset_port(port);
7939
7940 tcpm_port_unregister_pd(port);
7941
7942 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7943 typec_unregister_altmode(port->port_altmode[i]);
7944 typec_unregister_port(port->typec_port);
7945 usb_role_switch_put(port->role_sw);
7946 tcpm_debugfs_exit(port);
7947 }
7948 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7949
7950 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
7951 MODULE_DESCRIPTION("USB Type-C Port Manager");
7952 MODULE_LICENSE("GPL");
7953