xref: /linux/drivers/usb/typec/tcpm/tcpm.c (revision b82779648dfd3814df4e381f086326ec70fd791f)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2017 Google, Inc
4  *
5  * USB Power Delivery protocol stack.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
33 
34 #include <uapi/linux/sched/types.h>
35 
36 #define FOREACH_STATE(S)			\
37 	S(INVALID_STATE),			\
38 	S(TOGGLING),			\
39 	S(CHECK_CONTAMINANT),			\
40 	S(SRC_UNATTACHED),			\
41 	S(SRC_ATTACH_WAIT),			\
42 	S(SRC_ATTACHED),			\
43 	S(SRC_STARTUP),				\
44 	S(SRC_SEND_CAPABILITIES),		\
45 	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
46 	S(SRC_NEGOTIATE_CAPABILITIES),		\
47 	S(SRC_TRANSITION_SUPPLY),		\
48 	S(SRC_READY),				\
49 	S(SRC_WAIT_NEW_CAPABILITIES),		\
50 						\
51 	S(SNK_UNATTACHED),			\
52 	S(SNK_ATTACH_WAIT),			\
53 	S(SNK_DEBOUNCED),			\
54 	S(SNK_ATTACHED),			\
55 	S(SNK_STARTUP),				\
56 	S(SNK_DISCOVERY),			\
57 	S(SNK_DISCOVERY_DEBOUNCE),		\
58 	S(SNK_DISCOVERY_DEBOUNCE_DONE),		\
59 	S(SNK_WAIT_CAPABILITIES),		\
60 	S(SNK_NEGOTIATE_CAPABILITIES),		\
61 	S(SNK_NEGOTIATE_PPS_CAPABILITIES),	\
62 	S(SNK_TRANSITION_SINK),			\
63 	S(SNK_TRANSITION_SINK_VBUS),		\
64 	S(SNK_READY),				\
65 						\
66 	S(ACC_UNATTACHED),			\
67 	S(DEBUG_ACC_ATTACHED),			\
68 	S(AUDIO_ACC_ATTACHED),			\
69 	S(AUDIO_ACC_DEBOUNCE),			\
70 						\
71 	S(HARD_RESET_SEND),			\
72 	S(HARD_RESET_START),			\
73 	S(SRC_HARD_RESET_VBUS_OFF),		\
74 	S(SRC_HARD_RESET_VBUS_ON),		\
75 	S(SNK_HARD_RESET_SINK_OFF),		\
76 	S(SNK_HARD_RESET_WAIT_VBUS),		\
77 	S(SNK_HARD_RESET_SINK_ON),		\
78 						\
79 	S(SOFT_RESET),				\
80 	S(SRC_SOFT_RESET_WAIT_SNK_TX),		\
81 	S(SNK_SOFT_RESET),			\
82 	S(SOFT_RESET_SEND),			\
83 						\
84 	S(DR_SWAP_ACCEPT),			\
85 	S(DR_SWAP_SEND),			\
86 	S(DR_SWAP_SEND_TIMEOUT),		\
87 	S(DR_SWAP_CANCEL),			\
88 	S(DR_SWAP_CHANGE_DR),			\
89 						\
90 	S(PR_SWAP_ACCEPT),			\
91 	S(PR_SWAP_SEND),			\
92 	S(PR_SWAP_SEND_TIMEOUT),		\
93 	S(PR_SWAP_CANCEL),			\
94 	S(PR_SWAP_START),			\
95 	S(PR_SWAP_SRC_SNK_TRANSITION_OFF),	\
96 	S(PR_SWAP_SRC_SNK_SOURCE_OFF),		\
97 	S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
98 	S(PR_SWAP_SRC_SNK_SINK_ON),		\
99 	S(PR_SWAP_SNK_SRC_SINK_OFF),		\
100 	S(PR_SWAP_SNK_SRC_SOURCE_ON),		\
101 	S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP),    \
102 						\
103 	S(VCONN_SWAP_ACCEPT),			\
104 	S(VCONN_SWAP_SEND),			\
105 	S(VCONN_SWAP_SEND_TIMEOUT),		\
106 	S(VCONN_SWAP_CANCEL),			\
107 	S(VCONN_SWAP_START),			\
108 	S(VCONN_SWAP_WAIT_FOR_VCONN),		\
109 	S(VCONN_SWAP_TURN_ON_VCONN),		\
110 	S(VCONN_SWAP_TURN_OFF_VCONN),		\
111 	S(VCONN_SWAP_SEND_SOFT_RESET),		\
112 						\
113 	S(FR_SWAP_SEND),			\
114 	S(FR_SWAP_SEND_TIMEOUT),		\
115 	S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF),			\
116 	S(FR_SWAP_SNK_SRC_NEW_SINK_READY),		\
117 	S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED),	\
118 	S(FR_SWAP_CANCEL),			\
119 						\
120 	S(SNK_TRY),				\
121 	S(SNK_TRY_WAIT),			\
122 	S(SNK_TRY_WAIT_DEBOUNCE),               \
123 	S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS),    \
124 	S(SRC_TRYWAIT),				\
125 	S(SRC_TRYWAIT_DEBOUNCE),		\
126 	S(SRC_TRYWAIT_UNATTACHED),		\
127 						\
128 	S(SRC_TRY),				\
129 	S(SRC_TRY_WAIT),                        \
130 	S(SRC_TRY_DEBOUNCE),			\
131 	S(SNK_TRYWAIT),				\
132 	S(SNK_TRYWAIT_DEBOUNCE),		\
133 	S(SNK_TRYWAIT_VBUS),			\
134 	S(BIST_RX),				\
135 						\
136 	S(GET_STATUS_SEND),			\
137 	S(GET_STATUS_SEND_TIMEOUT),		\
138 	S(GET_PPS_STATUS_SEND),			\
139 	S(GET_PPS_STATUS_SEND_TIMEOUT),		\
140 						\
141 	S(GET_SINK_CAP),			\
142 	S(GET_SINK_CAP_TIMEOUT),		\
143 						\
144 	S(ERROR_RECOVERY),			\
145 	S(PORT_RESET),				\
146 	S(PORT_RESET_WAIT_OFF),			\
147 						\
148 	S(AMS_START),				\
149 	S(CHUNK_NOT_SUPP),			\
150 						\
151 	S(SRC_VDM_IDENTITY_REQUEST)
152 
153 #define FOREACH_AMS(S)				\
154 	S(NONE_AMS),				\
155 	S(POWER_NEGOTIATION),			\
156 	S(GOTOMIN),				\
157 	S(SOFT_RESET_AMS),			\
158 	S(HARD_RESET),				\
159 	S(CABLE_RESET),				\
160 	S(GET_SOURCE_CAPABILITIES),		\
161 	S(GET_SINK_CAPABILITIES),		\
162 	S(POWER_ROLE_SWAP),			\
163 	S(FAST_ROLE_SWAP),			\
164 	S(DATA_ROLE_SWAP),			\
165 	S(VCONN_SWAP),				\
166 	S(SOURCE_ALERT),			\
167 	S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
168 	S(GETTING_SOURCE_SINK_STATUS),		\
169 	S(GETTING_BATTERY_CAPABILITIES),	\
170 	S(GETTING_BATTERY_STATUS),		\
171 	S(GETTING_MANUFACTURER_INFORMATION),	\
172 	S(SECURITY),				\
173 	S(FIRMWARE_UPDATE),			\
174 	S(DISCOVER_IDENTITY),			\
175 	S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY),	\
176 	S(DISCOVER_SVIDS),			\
177 	S(DISCOVER_MODES),			\
178 	S(DFP_TO_UFP_ENTER_MODE),		\
179 	S(DFP_TO_UFP_EXIT_MODE),		\
180 	S(DFP_TO_CABLE_PLUG_ENTER_MODE),	\
181 	S(DFP_TO_CABLE_PLUG_EXIT_MODE),		\
182 	S(ATTENTION),				\
183 	S(BIST),				\
184 	S(UNSTRUCTURED_VDMS),			\
185 	S(STRUCTURED_VDMS),			\
186 	S(COUNTRY_INFO),			\
187 	S(COUNTRY_CODES)
188 
189 #define GENERATE_ENUM(e)	e
190 #define GENERATE_STRING(s)	#s
191 
192 enum tcpm_state {
193 	FOREACH_STATE(GENERATE_ENUM)
194 };
195 
196 static const char * const tcpm_states[] = {
197 	FOREACH_STATE(GENERATE_STRING)
198 };
199 
200 enum tcpm_ams {
201 	FOREACH_AMS(GENERATE_ENUM)
202 };
203 
204 static const char * const tcpm_ams_str[] = {
205 	FOREACH_AMS(GENERATE_STRING)
206 };
207 
208 enum vdm_states {
209 	VDM_STATE_ERR_BUSY = -3,
210 	VDM_STATE_ERR_SEND = -2,
211 	VDM_STATE_ERR_TMOUT = -1,
212 	VDM_STATE_DONE = 0,
213 	/* Anything >0 represents an active state */
214 	VDM_STATE_READY = 1,
215 	VDM_STATE_BUSY = 2,
216 	VDM_STATE_WAIT_RSP_BUSY = 3,
217 	VDM_STATE_SEND_MESSAGE = 4,
218 };
219 
220 enum pd_msg_request {
221 	PD_MSG_NONE = 0,
222 	PD_MSG_CTRL_REJECT,
223 	PD_MSG_CTRL_WAIT,
224 	PD_MSG_CTRL_NOT_SUPP,
225 	PD_MSG_DATA_SINK_CAP,
226 	PD_MSG_DATA_SOURCE_CAP,
227 };
228 
229 enum adev_actions {
230 	ADEV_NONE = 0,
231 	ADEV_NOTIFY_USB_AND_QUEUE_VDM,
232 	ADEV_QUEUE_VDM,
233 	ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
234 	ADEV_ATTENTION,
235 };
236 
237 /*
238  * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
239  * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
240  * Version 1.2"
241  */
242 enum frs_typec_current {
243 	FRS_NOT_SUPPORTED,
244 	FRS_DEFAULT_POWER,
245 	FRS_5V_1P5A,
246 	FRS_5V_3A,
247 };
248 
249 /* Events from low level driver */
250 
251 #define TCPM_CC_EVENT		BIT(0)
252 #define TCPM_VBUS_EVENT		BIT(1)
253 #define TCPM_RESET_EVENT	BIT(2)
254 #define TCPM_FRS_EVENT		BIT(3)
255 #define TCPM_SOURCING_VBUS	BIT(4)
256 #define TCPM_PORT_CLEAN		BIT(5)
257 #define TCPM_PORT_ERROR		BIT(6)
258 
259 #define LOG_BUFFER_ENTRIES	1024
260 #define LOG_BUFFER_ENTRY_SIZE	128
261 
262 /* Alternate mode support */
263 
264 #define SVID_DISCOVERY_MAX	16
265 #define ALTMODE_DISCOVERY_MAX	(SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
266 
267 #define GET_SINK_CAP_RETRY_MS	100
268 #define SEND_DISCOVER_RETRY_MS	100
269 
270 struct pd_mode_data {
271 	int svid_index;		/* current SVID index		*/
272 	int nsvids;
273 	u16 svids[SVID_DISCOVERY_MAX];
274 	int altmodes;		/* number of alternate modes	*/
275 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
276 };
277 
278 /*
279  * @min_volt: Actual min voltage at the local port
280  * @req_min_volt: Requested min voltage to the port partner
281  * @max_volt: Actual max voltage at the local port
282  * @req_max_volt: Requested max voltage to the port partner
283  * @max_curr: Actual max current at the local port
284  * @req_max_curr: Requested max current of the port partner
285  * @req_out_volt: Requested output voltage to the port partner
286  * @req_op_curr: Requested operating current to the port partner
287  * @supported: Parter has at least one APDO hence supports PPS
288  * @active: PPS mode is active
289  */
290 struct pd_pps_data {
291 	u32 min_volt;
292 	u32 req_min_volt;
293 	u32 max_volt;
294 	u32 req_max_volt;
295 	u32 max_curr;
296 	u32 req_max_curr;
297 	u32 req_out_volt;
298 	u32 req_op_curr;
299 	bool supported;
300 	bool active;
301 };
302 
303 struct pd_data {
304 	struct usb_power_delivery *pd;
305 	struct usb_power_delivery_capabilities *source_cap;
306 	struct usb_power_delivery_capabilities_desc source_desc;
307 	struct usb_power_delivery_capabilities *sink_cap;
308 	struct usb_power_delivery_capabilities_desc sink_desc;
309 	unsigned int operating_snk_mw;
310 };
311 
312 struct tcpm_port {
313 	struct device *dev;
314 
315 	struct mutex lock;		/* tcpm state machine lock */
316 	struct kthread_worker *wq;
317 
318 	struct typec_capability typec_caps;
319 	struct typec_port *typec_port;
320 
321 	struct tcpc_dev	*tcpc;
322 	struct usb_role_switch *role_sw;
323 
324 	enum typec_role vconn_role;
325 	enum typec_role pwr_role;
326 	enum typec_data_role data_role;
327 	enum typec_pwr_opmode pwr_opmode;
328 
329 	struct usb_pd_identity partner_ident;
330 	struct typec_partner_desc partner_desc;
331 	struct typec_partner *partner;
332 
333 	struct usb_pd_identity cable_ident;
334 	struct typec_cable_desc cable_desc;
335 	struct typec_cable *cable;
336 	struct typec_plug_desc plug_prime_desc;
337 	struct typec_plug *plug_prime;
338 
339 	enum typec_cc_status cc_req;
340 	enum typec_cc_status src_rp;	/* work only if pd_supported == false */
341 
342 	enum typec_cc_status cc1;
343 	enum typec_cc_status cc2;
344 	enum typec_cc_polarity polarity;
345 
346 	bool attached;
347 	bool connected;
348 	bool registered;
349 	bool pd_supported;
350 	enum typec_port_type port_type;
351 
352 	/*
353 	 * Set to true when vbus is greater than VSAFE5V min.
354 	 * Set to false when vbus falls below vSinkDisconnect max threshold.
355 	 */
356 	bool vbus_present;
357 
358 	/*
359 	 * Set to true when vbus is less than VSAFE0V max.
360 	 * Set to false when vbus is greater than VSAFE0V max.
361 	 */
362 	bool vbus_vsafe0v;
363 
364 	bool vbus_never_low;
365 	bool vbus_source;
366 	bool vbus_charge;
367 
368 	/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
369 	bool send_discover;
370 	bool op_vsafe5v;
371 
372 	int try_role;
373 	int try_snk_count;
374 	int try_src_count;
375 
376 	enum pd_msg_request queued_message;
377 
378 	enum tcpm_state enter_state;
379 	enum tcpm_state prev_state;
380 	enum tcpm_state state;
381 	enum tcpm_state delayed_state;
382 	ktime_t delayed_runtime;
383 	unsigned long delay_ms;
384 
385 	spinlock_t pd_event_lock;
386 	u32 pd_events;
387 
388 	struct kthread_work event_work;
389 	struct hrtimer state_machine_timer;
390 	struct kthread_work state_machine;
391 	struct hrtimer vdm_state_machine_timer;
392 	struct kthread_work vdm_state_machine;
393 	struct hrtimer enable_frs_timer;
394 	struct kthread_work enable_frs;
395 	struct hrtimer send_discover_timer;
396 	struct kthread_work send_discover_work;
397 	bool state_machine_running;
398 	/* Set to true when VDM State Machine has following actions. */
399 	bool vdm_sm_running;
400 
401 	struct completion tx_complete;
402 	enum tcpm_transmit_status tx_status;
403 
404 	struct mutex swap_lock;		/* swap command lock */
405 	bool swap_pending;
406 	bool non_pd_role_swap;
407 	struct completion swap_complete;
408 	int swap_status;
409 
410 	unsigned int negotiated_rev;
411 	unsigned int message_id;
412 	unsigned int caps_count;
413 	unsigned int hard_reset_count;
414 	bool pd_capable;
415 	bool explicit_contract;
416 	unsigned int rx_msgid;
417 
418 	/* USB PD objects */
419 	struct usb_power_delivery **pds;
420 	struct pd_data **pd_list;
421 	struct usb_power_delivery_capabilities *port_source_caps;
422 	struct usb_power_delivery_capabilities *port_sink_caps;
423 	struct usb_power_delivery *partner_pd;
424 	struct usb_power_delivery_capabilities *partner_source_caps;
425 	struct usb_power_delivery_capabilities *partner_sink_caps;
426 	struct usb_power_delivery *selected_pd;
427 
428 	/* Partner capabilities/requests */
429 	u32 sink_request;
430 	u32 source_caps[PDO_MAX_OBJECTS];
431 	unsigned int nr_source_caps;
432 	u32 sink_caps[PDO_MAX_OBJECTS];
433 	unsigned int nr_sink_caps;
434 
435 	/* Local capabilities */
436 	unsigned int pd_count;
437 	u32 src_pdo[PDO_MAX_OBJECTS];
438 	unsigned int nr_src_pdo;
439 	u32 snk_pdo[PDO_MAX_OBJECTS];
440 	unsigned int nr_snk_pdo;
441 	u32 snk_vdo_v1[VDO_MAX_OBJECTS];
442 	unsigned int nr_snk_vdo_v1;
443 	u32 snk_vdo[VDO_MAX_OBJECTS];
444 	unsigned int nr_snk_vdo;
445 
446 	unsigned int operating_snk_mw;
447 	bool update_sink_caps;
448 
449 	/* Requested current / voltage to the port partner */
450 	u32 req_current_limit;
451 	u32 req_supply_voltage;
452 	/* Actual current / voltage limit of the local port */
453 	u32 current_limit;
454 	u32 supply_voltage;
455 
456 	/* Used to export TA voltage and current */
457 	struct power_supply *psy;
458 	struct power_supply_desc psy_desc;
459 	enum power_supply_usb_type usb_type;
460 
461 	u32 bist_request;
462 
463 	/* PD state for Vendor Defined Messages */
464 	enum vdm_states vdm_state;
465 	u32 vdm_retries;
466 	/* next Vendor Defined Message to send */
467 	u32 vdo_data[VDO_MAX_SIZE];
468 	u8 vdo_count;
469 	/* VDO to retry if UFP responder replied busy */
470 	u32 vdo_retry;
471 
472 	/* PPS */
473 	struct pd_pps_data pps_data;
474 	struct completion pps_complete;
475 	bool pps_pending;
476 	int pps_status;
477 
478 	/* Alternate mode data */
479 	struct pd_mode_data mode_data;
480 	struct pd_mode_data mode_data_prime;
481 	struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
482 	struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
483 	struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
484 
485 	/* Deadline in jiffies to exit src_try_wait state */
486 	unsigned long max_wait;
487 
488 	/* port belongs to a self powered device */
489 	bool self_powered;
490 
491 	/* Sink FRS */
492 	enum frs_typec_current new_source_frs_current;
493 
494 	/* Sink caps have been queried */
495 	bool sink_cap_done;
496 
497 	/* Collision Avoidance and Atomic Message Sequence */
498 	enum tcpm_state upcoming_state;
499 	enum tcpm_ams ams;
500 	enum tcpm_ams next_ams;
501 	bool in_ams;
502 
503 	/* Auto vbus discharge status */
504 	bool auto_vbus_discharge_enabled;
505 
506 	/*
507 	 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
508 	 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
509 	 * SNK_READY for non-pd link.
510 	 */
511 	bool slow_charger_loop;
512 
513 	/*
514 	 * When true indicates that the lower level drivers indicate potential presence
515 	 * of contaminant in the connector pins based on the tcpm state machine
516 	 * transitions.
517 	 */
518 	bool potential_contaminant;
519 
520 	/* SOP* Related Fields */
521 	/*
522 	 * Flag to determine if SOP' Discover Identity is available. The flag
523 	 * is set if Discover Identity on SOP' does not immediately follow
524 	 * Discover Identity on SOP.
525 	 */
526 	bool send_discover_prime;
527 	/*
528 	 * tx_sop_type determines which SOP* a message is being sent on.
529 	 * For messages that are queued and not sent immediately such as in
530 	 * tcpm_queue_message or messages that send after state changes,
531 	 * the tx_sop_type is set accordingly.
532 	 */
533 	enum tcpm_transmit_type tx_sop_type;
534 	/*
535 	 * Prior to discovering the port partner's Specification Revision, the
536 	 * Vconn source and cable plug will use the lower of their two revisions.
537 	 *
538 	 * When the port partner's Specification Revision is discovered, the following
539 	 * rules are put in place.
540 	 *	1. If the cable revision (1) is lower than the revision negotiated
541 	 * between the port and partner (2), the port and partner will communicate
542 	 * on revision (2), but the port and cable will communicate on revision (1).
543 	 *	2. If the cable revision (1) is higher than the revision negotiated
544 	 * between the port and partner (2), the port and partner will communicate
545 	 * on revision (2), and the port and cable will communicate on revision (2)
546 	 * as well.
547 	 */
548 	unsigned int negotiated_rev_prime;
549 	/*
550 	 * Each SOP* type must maintain their own tx and rx message IDs
551 	 */
552 	unsigned int message_id_prime;
553 	unsigned int rx_msgid_prime;
554 #ifdef CONFIG_DEBUG_FS
555 	struct dentry *dentry;
556 	struct mutex logbuffer_lock;	/* log buffer access lock */
557 	int logbuffer_head;
558 	int logbuffer_tail;
559 	u8 *logbuffer[LOG_BUFFER_ENTRIES];
560 #endif
561 };
562 
563 struct pd_rx_event {
564 	struct kthread_work work;
565 	struct tcpm_port *port;
566 	struct pd_message msg;
567 	enum tcpm_transmit_type rx_sop_type;
568 };
569 
570 static const char * const pd_rev[] = {
571 	[PD_REV10]		= "rev1",
572 	[PD_REV20]		= "rev2",
573 	[PD_REV30]		= "rev3",
574 };
575 
576 #define tcpm_cc_is_sink(cc) \
577 	((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
578 	 (cc) == TYPEC_CC_RP_3_0)
579 
580 /* As long as cc is pulled up, we can consider it as sink. */
581 #define tcpm_port_is_sink(port) \
582 	(tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
583 
584 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
585 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
586 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
587 
588 #define tcpm_port_is_source(port) \
589 	((tcpm_cc_is_source((port)->cc1) && \
590 	 !tcpm_cc_is_source((port)->cc2)) || \
591 	 (tcpm_cc_is_source((port)->cc2) && \
592 	  !tcpm_cc_is_source((port)->cc1)))
593 
594 #define tcpm_port_is_debug(port) \
595 	(tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
596 
597 #define tcpm_port_is_audio(port) \
598 	(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
599 
600 #define tcpm_port_is_audio_detached(port) \
601 	((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
602 	 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
603 
604 #define tcpm_try_snk(port) \
605 	((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
606 	(port)->port_type == TYPEC_PORT_DRP)
607 
608 #define tcpm_try_src(port) \
609 	((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
610 	(port)->port_type == TYPEC_PORT_DRP)
611 
612 #define tcpm_data_role_for_source(port) \
613 	((port)->typec_caps.data == TYPEC_PORT_UFP ? \
614 	TYPEC_DEVICE : TYPEC_HOST)
615 
616 #define tcpm_data_role_for_sink(port) \
617 	((port)->typec_caps.data == TYPEC_PORT_DFP ? \
618 	TYPEC_HOST : TYPEC_DEVICE)
619 
620 #define tcpm_sink_tx_ok(port) \
621 	(tcpm_port_is_sink(port) && \
622 	((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
623 
624 #define tcpm_wait_for_discharge(port) \
625 	(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
626 
tcpm_default_state(struct tcpm_port * port)627 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
628 {
629 	if (port->port_type == TYPEC_PORT_DRP) {
630 		if (port->try_role == TYPEC_SINK)
631 			return SNK_UNATTACHED;
632 		else if (port->try_role == TYPEC_SOURCE)
633 			return SRC_UNATTACHED;
634 		/* Fall through to return SRC_UNATTACHED */
635 	} else if (port->port_type == TYPEC_PORT_SNK) {
636 		return SNK_UNATTACHED;
637 	}
638 	return SRC_UNATTACHED;
639 }
640 
tcpm_port_is_disconnected(struct tcpm_port * port)641 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
642 {
643 	return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
644 		port->cc2 == TYPEC_CC_OPEN) ||
645 	       (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
646 				    port->cc1 == TYPEC_CC_OPEN) ||
647 				   (port->polarity == TYPEC_POLARITY_CC2 &&
648 				    port->cc2 == TYPEC_CC_OPEN)));
649 }
650 
651 /*
652  * Logging
653  */
654 
655 #ifdef CONFIG_DEBUG_FS
656 
tcpm_log_full(struct tcpm_port * port)657 static bool tcpm_log_full(struct tcpm_port *port)
658 {
659 	return port->logbuffer_tail ==
660 		(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
661 }
662 
663 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)664 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
665 {
666 	char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
667 	u64 ts_nsec = local_clock();
668 	unsigned long rem_nsec;
669 
670 	mutex_lock(&port->logbuffer_lock);
671 	if (!port->logbuffer[port->logbuffer_head]) {
672 		port->logbuffer[port->logbuffer_head] =
673 				kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
674 		if (!port->logbuffer[port->logbuffer_head]) {
675 			mutex_unlock(&port->logbuffer_lock);
676 			return;
677 		}
678 	}
679 
680 	vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
681 
682 	if (tcpm_log_full(port)) {
683 		port->logbuffer_head = max(port->logbuffer_head - 1, 0);
684 		strcpy(tmpbuffer, "overflow");
685 	}
686 
687 	if (port->logbuffer_head < 0 ||
688 	    port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
689 		dev_warn(port->dev,
690 			 "Bad log buffer index %d\n", port->logbuffer_head);
691 		goto abort;
692 	}
693 
694 	if (!port->logbuffer[port->logbuffer_head]) {
695 		dev_warn(port->dev,
696 			 "Log buffer index %d is NULL\n", port->logbuffer_head);
697 		goto abort;
698 	}
699 
700 	rem_nsec = do_div(ts_nsec, 1000000000);
701 	scnprintf(port->logbuffer[port->logbuffer_head],
702 		  LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
703 		  (unsigned long)ts_nsec, rem_nsec / 1000,
704 		  tmpbuffer);
705 	port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
706 
707 abort:
708 	mutex_unlock(&port->logbuffer_lock);
709 }
710 
711 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)712 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
713 {
714 	va_list args;
715 
716 	/* Do not log while disconnected and unattached */
717 	if (tcpm_port_is_disconnected(port) &&
718 	    (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
719 	     port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
720 		return;
721 
722 	va_start(args, fmt);
723 	_tcpm_log(port, fmt, args);
724 	va_end(args);
725 }
726 
727 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)728 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
729 {
730 	va_list args;
731 
732 	va_start(args, fmt);
733 	_tcpm_log(port, fmt, args);
734 	va_end(args);
735 }
736 
tcpm_log_source_caps(struct tcpm_port * port)737 static void tcpm_log_source_caps(struct tcpm_port *port)
738 {
739 	int i;
740 
741 	for (i = 0; i < port->nr_source_caps; i++) {
742 		u32 pdo = port->source_caps[i];
743 		enum pd_pdo_type type = pdo_type(pdo);
744 		char msg[64];
745 
746 		switch (type) {
747 		case PDO_TYPE_FIXED:
748 			scnprintf(msg, sizeof(msg),
749 				  "%u mV, %u mA [%s%s%s%s%s%s]",
750 				  pdo_fixed_voltage(pdo),
751 				  pdo_max_current(pdo),
752 				  (pdo & PDO_FIXED_DUAL_ROLE) ?
753 							"R" : "",
754 				  (pdo & PDO_FIXED_SUSPEND) ?
755 							"S" : "",
756 				  (pdo & PDO_FIXED_HIGHER_CAP) ?
757 							"H" : "",
758 				  (pdo & PDO_FIXED_USB_COMM) ?
759 							"U" : "",
760 				  (pdo & PDO_FIXED_DATA_SWAP) ?
761 							"D" : "",
762 				  (pdo & PDO_FIXED_EXTPOWER) ?
763 							"E" : "");
764 			break;
765 		case PDO_TYPE_VAR:
766 			scnprintf(msg, sizeof(msg),
767 				  "%u-%u mV, %u mA",
768 				  pdo_min_voltage(pdo),
769 				  pdo_max_voltage(pdo),
770 				  pdo_max_current(pdo));
771 			break;
772 		case PDO_TYPE_BATT:
773 			scnprintf(msg, sizeof(msg),
774 				  "%u-%u mV, %u mW",
775 				  pdo_min_voltage(pdo),
776 				  pdo_max_voltage(pdo),
777 				  pdo_max_power(pdo));
778 			break;
779 		case PDO_TYPE_APDO:
780 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
781 				scnprintf(msg, sizeof(msg),
782 					  "%u-%u mV, %u mA",
783 					  pdo_pps_apdo_min_voltage(pdo),
784 					  pdo_pps_apdo_max_voltage(pdo),
785 					  pdo_pps_apdo_max_current(pdo));
786 			else
787 				strcpy(msg, "undefined APDO");
788 			break;
789 		default:
790 			strcpy(msg, "undefined");
791 			break;
792 		}
793 		tcpm_log(port, " PDO %d: type %d, %s",
794 			 i, type, msg);
795 	}
796 }
797 
tcpm_debug_show(struct seq_file * s,void * v)798 static int tcpm_debug_show(struct seq_file *s, void *v)
799 {
800 	struct tcpm_port *port = s->private;
801 	int tail;
802 
803 	mutex_lock(&port->logbuffer_lock);
804 	tail = port->logbuffer_tail;
805 	while (tail != port->logbuffer_head) {
806 		seq_printf(s, "%s\n", port->logbuffer[tail]);
807 		tail = (tail + 1) % LOG_BUFFER_ENTRIES;
808 	}
809 	if (!seq_has_overflowed(s))
810 		port->logbuffer_tail = tail;
811 	mutex_unlock(&port->logbuffer_lock);
812 
813 	return 0;
814 }
815 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
816 
tcpm_debugfs_init(struct tcpm_port * port)817 static void tcpm_debugfs_init(struct tcpm_port *port)
818 {
819 	char name[NAME_MAX];
820 
821 	mutex_init(&port->logbuffer_lock);
822 	snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
823 	port->dentry = debugfs_create_dir(name, usb_debug_root);
824 	debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
825 			    &tcpm_debug_fops);
826 }
827 
tcpm_debugfs_exit(struct tcpm_port * port)828 static void tcpm_debugfs_exit(struct tcpm_port *port)
829 {
830 	int i;
831 
832 	mutex_lock(&port->logbuffer_lock);
833 	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
834 		kfree(port->logbuffer[i]);
835 		port->logbuffer[i] = NULL;
836 	}
837 	mutex_unlock(&port->logbuffer_lock);
838 
839 	debugfs_remove(port->dentry);
840 }
841 
842 #else
843 
844 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)845 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
846 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)847 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)848 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)849 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)850 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
851 
852 #endif
853 
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)854 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
855 {
856 	tcpm_log(port, "cc:=%d", cc);
857 	port->cc_req = cc;
858 	port->tcpc->set_cc(port->tcpc, cc);
859 }
860 
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)861 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
862 {
863 	int ret = 0;
864 
865 	if (port->tcpc->enable_auto_vbus_discharge) {
866 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
867 		tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
868 			       ret);
869 		if (!ret)
870 			port->auto_vbus_discharge_enabled = enable;
871 	}
872 
873 	return ret;
874 }
875 
tcpm_apply_rc(struct tcpm_port * port)876 static void tcpm_apply_rc(struct tcpm_port *port)
877 {
878 	/*
879 	 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
880 	 * when Vbus auto discharge on disconnect is enabled.
881 	 */
882 	if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
883 		tcpm_log(port, "Apply_RC");
884 		port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
885 		tcpm_enable_auto_vbus_discharge(port, false);
886 	}
887 }
888 
889 /*
890  * Determine RP value to set based on maximum current supported
891  * by a port if configured as source.
892  * Returns CC value to report to link partner.
893  */
tcpm_rp_cc(struct tcpm_port * port)894 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
895 {
896 	const u32 *src_pdo = port->src_pdo;
897 	int nr_pdo = port->nr_src_pdo;
898 	int i;
899 
900 	if (!port->pd_supported)
901 		return port->src_rp;
902 
903 	/*
904 	 * Search for first entry with matching voltage.
905 	 * It should report the maximum supported current.
906 	 */
907 	for (i = 0; i < nr_pdo; i++) {
908 		const u32 pdo = src_pdo[i];
909 
910 		if (pdo_type(pdo) == PDO_TYPE_FIXED &&
911 		    pdo_fixed_voltage(pdo) == 5000) {
912 			unsigned int curr = pdo_max_current(pdo);
913 
914 			if (curr >= 3000)
915 				return TYPEC_CC_RP_3_0;
916 			else if (curr >= 1500)
917 				return TYPEC_CC_RP_1_5;
918 			return TYPEC_CC_RP_DEF;
919 		}
920 	}
921 
922 	return TYPEC_CC_RP_DEF;
923 }
924 
tcpm_ams_finish(struct tcpm_port * port)925 static void tcpm_ams_finish(struct tcpm_port *port)
926 {
927 	tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
928 
929 	if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
930 		if (port->negotiated_rev >= PD_REV30)
931 			tcpm_set_cc(port, SINK_TX_OK);
932 		else
933 			tcpm_set_cc(port, SINK_TX_NG);
934 	} else if (port->pwr_role == TYPEC_SOURCE) {
935 		tcpm_set_cc(port, tcpm_rp_cc(port));
936 	}
937 
938 	port->in_ams = false;
939 	port->ams = NONE_AMS;
940 }
941 
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type tx_sop_type,const struct pd_message * msg)942 static int tcpm_pd_transmit(struct tcpm_port *port,
943 			    enum tcpm_transmit_type tx_sop_type,
944 			    const struct pd_message *msg)
945 {
946 	unsigned long timeout;
947 	int ret;
948 	unsigned int negotiated_rev;
949 
950 	switch (tx_sop_type) {
951 	case TCPC_TX_SOP_PRIME:
952 		negotiated_rev = port->negotiated_rev_prime;
953 		break;
954 	case TCPC_TX_SOP:
955 	default:
956 		negotiated_rev = port->negotiated_rev;
957 		break;
958 	}
959 
960 	if (msg)
961 		tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
962 	else
963 		tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
964 
965 	reinit_completion(&port->tx_complete);
966 	ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
967 	if (ret < 0)
968 		return ret;
969 
970 	mutex_unlock(&port->lock);
971 	timeout = wait_for_completion_timeout(&port->tx_complete,
972 				msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
973 	mutex_lock(&port->lock);
974 	if (!timeout)
975 		return -ETIMEDOUT;
976 
977 	switch (port->tx_status) {
978 	case TCPC_TX_SUCCESS:
979 		switch (tx_sop_type) {
980 		case TCPC_TX_SOP_PRIME:
981 			port->message_id_prime = (port->message_id_prime + 1) &
982 						 PD_HEADER_ID_MASK;
983 			break;
984 		case TCPC_TX_SOP:
985 		default:
986 			port->message_id = (port->message_id + 1) &
987 					   PD_HEADER_ID_MASK;
988 			break;
989 		}
990 		/*
991 		 * USB PD rev 2.0, 8.3.2.2.1:
992 		 * USB PD rev 3.0, 8.3.2.1.3:
993 		 * "... Note that every AMS is Interruptible until the first
994 		 * Message in the sequence has been successfully sent (GoodCRC
995 		 * Message received)."
996 		 */
997 		if (port->ams != NONE_AMS)
998 			port->in_ams = true;
999 		break;
1000 	case TCPC_TX_DISCARDED:
1001 		ret = -EAGAIN;
1002 		break;
1003 	case TCPC_TX_FAILED:
1004 	default:
1005 		ret = -EIO;
1006 		break;
1007 	}
1008 
1009 	/* Some AMS don't expect responses. Finish them here. */
1010 	if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1011 		tcpm_ams_finish(port);
1012 
1013 	return ret;
1014 }
1015 
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)1016 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1017 			       enum tcpm_transmit_status status)
1018 {
1019 	tcpm_log(port, "PD TX complete, status: %u", status);
1020 	port->tx_status = status;
1021 	complete(&port->tx_complete);
1022 }
1023 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1024 
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)1025 static int tcpm_mux_set(struct tcpm_port *port, int state,
1026 			enum usb_role usb_role,
1027 			enum typec_orientation orientation)
1028 {
1029 	int ret;
1030 
1031 	tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1032 		 state, usb_role, orientation);
1033 
1034 	ret = typec_set_orientation(port->typec_port, orientation);
1035 	if (ret)
1036 		return ret;
1037 
1038 	if (port->role_sw) {
1039 		ret = usb_role_switch_set_role(port->role_sw, usb_role);
1040 		if (ret)
1041 			return ret;
1042 	}
1043 
1044 	return typec_set_mode(port->typec_port, state);
1045 }
1046 
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)1047 static int tcpm_set_polarity(struct tcpm_port *port,
1048 			     enum typec_cc_polarity polarity)
1049 {
1050 	int ret;
1051 
1052 	tcpm_log(port, "polarity %d", polarity);
1053 
1054 	ret = port->tcpc->set_polarity(port->tcpc, polarity);
1055 	if (ret < 0)
1056 		return ret;
1057 
1058 	port->polarity = polarity;
1059 
1060 	return 0;
1061 }
1062 
tcpm_set_vconn(struct tcpm_port * port,bool enable)1063 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1064 {
1065 	int ret;
1066 
1067 	tcpm_log(port, "vconn:=%d", enable);
1068 
1069 	ret = port->tcpc->set_vconn(port->tcpc, enable);
1070 	if (!ret) {
1071 		port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1072 		typec_set_vconn_role(port->typec_port, port->vconn_role);
1073 	}
1074 
1075 	return ret;
1076 }
1077 
tcpm_get_current_limit(struct tcpm_port * port)1078 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1079 {
1080 	enum typec_cc_status cc;
1081 	u32 limit;
1082 
1083 	cc = port->polarity ? port->cc2 : port->cc1;
1084 	switch (cc) {
1085 	case TYPEC_CC_RP_1_5:
1086 		limit = 1500;
1087 		break;
1088 	case TYPEC_CC_RP_3_0:
1089 		limit = 3000;
1090 		break;
1091 	case TYPEC_CC_RP_DEF:
1092 	default:
1093 		if (port->tcpc->get_current_limit)
1094 			limit = port->tcpc->get_current_limit(port->tcpc);
1095 		else
1096 			limit = 0;
1097 		break;
1098 	}
1099 
1100 	return limit;
1101 }
1102 
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1103 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1104 {
1105 	int ret = -EOPNOTSUPP;
1106 
1107 	tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1108 
1109 	port->supply_voltage = mv;
1110 	port->current_limit = max_ma;
1111 	power_supply_changed(port->psy);
1112 
1113 	if (port->tcpc->set_current_limit)
1114 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1115 
1116 	return ret;
1117 }
1118 
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1119 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1120 {
1121 	return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1122 				     port->data_role);
1123 }
1124 
tcpm_set_roles(struct tcpm_port * port,bool attached,enum typec_role role,enum typec_data_role data)1125 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1126 			  enum typec_role role, enum typec_data_role data)
1127 {
1128 	enum typec_orientation orientation;
1129 	enum usb_role usb_role;
1130 	int ret;
1131 
1132 	if (port->polarity == TYPEC_POLARITY_CC1)
1133 		orientation = TYPEC_ORIENTATION_NORMAL;
1134 	else
1135 		orientation = TYPEC_ORIENTATION_REVERSE;
1136 
1137 	if (port->typec_caps.data == TYPEC_PORT_DRD) {
1138 		if (data == TYPEC_HOST)
1139 			usb_role = USB_ROLE_HOST;
1140 		else
1141 			usb_role = USB_ROLE_DEVICE;
1142 	} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1143 		if (data == TYPEC_HOST) {
1144 			if (role == TYPEC_SOURCE)
1145 				usb_role = USB_ROLE_HOST;
1146 			else
1147 				usb_role = USB_ROLE_NONE;
1148 		} else {
1149 			return -ENOTSUPP;
1150 		}
1151 	} else {
1152 		if (data == TYPEC_DEVICE) {
1153 			if (role == TYPEC_SINK)
1154 				usb_role = USB_ROLE_DEVICE;
1155 			else
1156 				usb_role = USB_ROLE_NONE;
1157 		} else {
1158 			return -ENOTSUPP;
1159 		}
1160 	}
1161 
1162 	ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1163 	if (ret < 0)
1164 		return ret;
1165 
1166 	ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1167 	if (ret < 0)
1168 		return ret;
1169 
1170 	if (port->tcpc->set_orientation) {
1171 		ret = port->tcpc->set_orientation(port->tcpc, orientation);
1172 		if (ret < 0)
1173 			return ret;
1174 	}
1175 
1176 	port->pwr_role = role;
1177 	port->data_role = data;
1178 	typec_set_data_role(port->typec_port, data);
1179 	typec_set_pwr_role(port->typec_port, role);
1180 
1181 	return 0;
1182 }
1183 
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1184 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1185 {
1186 	int ret;
1187 
1188 	ret = port->tcpc->set_roles(port->tcpc, true, role,
1189 				    port->data_role);
1190 	if (ret < 0)
1191 		return ret;
1192 
1193 	port->pwr_role = role;
1194 	typec_set_pwr_role(port->typec_port, role);
1195 
1196 	return 0;
1197 }
1198 
1199 /*
1200  * Transform the PDO to be compliant to PD rev2.0.
1201  * Return 0 if the PDO type is not defined in PD rev2.0.
1202  * Otherwise, return the converted PDO.
1203  */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1204 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1205 {
1206 	switch (pdo_type(pdo)) {
1207 	case PDO_TYPE_FIXED:
1208 		if (role == TYPEC_SINK)
1209 			return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1210 		else
1211 			return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1212 	case PDO_TYPE_VAR:
1213 	case PDO_TYPE_BATT:
1214 		return pdo;
1215 	case PDO_TYPE_APDO:
1216 	default:
1217 		return 0;
1218 	}
1219 }
1220 
tcpm_pd_send_source_caps(struct tcpm_port * port)1221 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1222 {
1223 	struct pd_message msg;
1224 	u32 pdo;
1225 	unsigned int i, nr_pdo = 0;
1226 
1227 	memset(&msg, 0, sizeof(msg));
1228 
1229 	for (i = 0; i < port->nr_src_pdo; i++) {
1230 		if (port->negotiated_rev >= PD_REV30) {
1231 			msg.payload[nr_pdo++] =	cpu_to_le32(port->src_pdo[i]);
1232 		} else {
1233 			pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1234 			if (pdo)
1235 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1236 		}
1237 	}
1238 
1239 	if (!nr_pdo) {
1240 		/* No source capabilities defined, sink only */
1241 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1242 					  port->pwr_role,
1243 					  port->data_role,
1244 					  port->negotiated_rev,
1245 					  port->message_id, 0);
1246 	} else {
1247 		msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1248 					  port->pwr_role,
1249 					  port->data_role,
1250 					  port->negotiated_rev,
1251 					  port->message_id,
1252 					  nr_pdo);
1253 	}
1254 
1255 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1256 }
1257 
tcpm_pd_send_sink_caps(struct tcpm_port * port)1258 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1259 {
1260 	struct pd_message msg;
1261 	u32 pdo;
1262 	unsigned int i, nr_pdo = 0;
1263 
1264 	memset(&msg, 0, sizeof(msg));
1265 
1266 	for (i = 0; i < port->nr_snk_pdo; i++) {
1267 		if (port->negotiated_rev >= PD_REV30) {
1268 			msg.payload[nr_pdo++] =	cpu_to_le32(port->snk_pdo[i]);
1269 		} else {
1270 			pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1271 			if (pdo)
1272 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1273 		}
1274 	}
1275 
1276 	if (!nr_pdo) {
1277 		/* No sink capabilities defined, source only */
1278 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1279 					  port->pwr_role,
1280 					  port->data_role,
1281 					  port->negotiated_rev,
1282 					  port->message_id, 0);
1283 	} else {
1284 		msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1285 					  port->pwr_role,
1286 					  port->data_role,
1287 					  port->negotiated_rev,
1288 					  port->message_id,
1289 					  nr_pdo);
1290 	}
1291 
1292 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1293 }
1294 
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1295 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1296 {
1297 	if (delay_ms) {
1298 		hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1299 	} else {
1300 		hrtimer_cancel(&port->state_machine_timer);
1301 		kthread_queue_work(port->wq, &port->state_machine);
1302 	}
1303 }
1304 
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1305 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1306 {
1307 	if (delay_ms) {
1308 		hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1309 			      HRTIMER_MODE_REL);
1310 	} else {
1311 		hrtimer_cancel(&port->vdm_state_machine_timer);
1312 		kthread_queue_work(port->wq, &port->vdm_state_machine);
1313 	}
1314 }
1315 
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1316 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1317 {
1318 	if (delay_ms) {
1319 		hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1320 	} else {
1321 		hrtimer_cancel(&port->enable_frs_timer);
1322 		kthread_queue_work(port->wq, &port->enable_frs);
1323 	}
1324 }
1325 
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1326 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1327 {
1328 	if (delay_ms) {
1329 		hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1330 	} else {
1331 		hrtimer_cancel(&port->send_discover_timer);
1332 		kthread_queue_work(port->wq, &port->send_discover_work);
1333 	}
1334 }
1335 
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1336 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1337 			   unsigned int delay_ms)
1338 {
1339 	if (delay_ms) {
1340 		tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1341 			 tcpm_states[port->state], tcpm_states[state], delay_ms,
1342 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1343 		port->delayed_state = state;
1344 		mod_tcpm_delayed_work(port, delay_ms);
1345 		port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1346 		port->delay_ms = delay_ms;
1347 	} else {
1348 		tcpm_log(port, "state change %s -> %s [%s %s]",
1349 			 tcpm_states[port->state], tcpm_states[state],
1350 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1351 		port->delayed_state = INVALID_STATE;
1352 		port->prev_state = port->state;
1353 		port->state = state;
1354 		/*
1355 		 * Don't re-queue the state machine work item if we're currently
1356 		 * in the state machine and we're immediately changing states.
1357 		 * tcpm_state_machine_work() will continue running the state
1358 		 * machine.
1359 		 */
1360 		if (!port->state_machine_running)
1361 			mod_tcpm_delayed_work(port, 0);
1362 	}
1363 }
1364 
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1365 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1366 				unsigned int delay_ms)
1367 {
1368 	if (port->enter_state == port->state)
1369 		tcpm_set_state(port, state, delay_ms);
1370 	else
1371 		tcpm_log(port,
1372 			 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1373 			 delay_ms ? "delayed " : "",
1374 			 tcpm_states[port->state], tcpm_states[state],
1375 			 delay_ms, tcpm_states[port->enter_state],
1376 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1377 }
1378 
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1379 static void tcpm_queue_message(struct tcpm_port *port,
1380 			       enum pd_msg_request message)
1381 {
1382 	port->queued_message = message;
1383 	mod_tcpm_delayed_work(port, 0);
1384 }
1385 
tcpm_vdm_ams(struct tcpm_port * port)1386 static bool tcpm_vdm_ams(struct tcpm_port *port)
1387 {
1388 	switch (port->ams) {
1389 	case DISCOVER_IDENTITY:
1390 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1391 	case DISCOVER_SVIDS:
1392 	case DISCOVER_MODES:
1393 	case DFP_TO_UFP_ENTER_MODE:
1394 	case DFP_TO_UFP_EXIT_MODE:
1395 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1396 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1397 	case ATTENTION:
1398 	case UNSTRUCTURED_VDMS:
1399 	case STRUCTURED_VDMS:
1400 		break;
1401 	default:
1402 		return false;
1403 	}
1404 
1405 	return true;
1406 }
1407 
tcpm_ams_interruptible(struct tcpm_port * port)1408 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1409 {
1410 	switch (port->ams) {
1411 	/* Interruptible AMS */
1412 	case NONE_AMS:
1413 	case SECURITY:
1414 	case FIRMWARE_UPDATE:
1415 	case DISCOVER_IDENTITY:
1416 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1417 	case DISCOVER_SVIDS:
1418 	case DISCOVER_MODES:
1419 	case DFP_TO_UFP_ENTER_MODE:
1420 	case DFP_TO_UFP_EXIT_MODE:
1421 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1422 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1423 	case UNSTRUCTURED_VDMS:
1424 	case STRUCTURED_VDMS:
1425 	case COUNTRY_INFO:
1426 	case COUNTRY_CODES:
1427 		break;
1428 	/* Non-Interruptible AMS */
1429 	default:
1430 		if (port->in_ams)
1431 			return false;
1432 		break;
1433 	}
1434 
1435 	return true;
1436 }
1437 
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1438 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1439 {
1440 	int ret = 0;
1441 
1442 	tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1443 
1444 	if (!tcpm_ams_interruptible(port) &&
1445 	    !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1446 		port->upcoming_state = INVALID_STATE;
1447 		tcpm_log(port, "AMS %s not interruptible, aborting",
1448 			 tcpm_ams_str[port->ams]);
1449 		return -EAGAIN;
1450 	}
1451 
1452 	if (port->pwr_role == TYPEC_SOURCE) {
1453 		enum typec_cc_status cc_req = port->cc_req;
1454 
1455 		port->ams = ams;
1456 
1457 		if (ams == HARD_RESET) {
1458 			tcpm_set_cc(port, tcpm_rp_cc(port));
1459 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1460 			tcpm_set_state(port, HARD_RESET_START, 0);
1461 			return ret;
1462 		} else if (ams == SOFT_RESET_AMS) {
1463 			if (!port->explicit_contract)
1464 				tcpm_set_cc(port, tcpm_rp_cc(port));
1465 			tcpm_set_state(port, SOFT_RESET_SEND, 0);
1466 			return ret;
1467 		} else if (tcpm_vdm_ams(port)) {
1468 			/* tSinkTx is enforced in vdm_run_state_machine */
1469 			if (port->negotiated_rev >= PD_REV30)
1470 				tcpm_set_cc(port, SINK_TX_NG);
1471 			return ret;
1472 		}
1473 
1474 		if (port->negotiated_rev >= PD_REV30)
1475 			tcpm_set_cc(port, SINK_TX_NG);
1476 
1477 		switch (port->state) {
1478 		case SRC_READY:
1479 		case SRC_STARTUP:
1480 		case SRC_SOFT_RESET_WAIT_SNK_TX:
1481 		case SOFT_RESET:
1482 		case SOFT_RESET_SEND:
1483 			if (port->negotiated_rev >= PD_REV30)
1484 				tcpm_set_state(port, AMS_START,
1485 					       cc_req == SINK_TX_OK ?
1486 					       PD_T_SINK_TX : 0);
1487 			else
1488 				tcpm_set_state(port, AMS_START, 0);
1489 			break;
1490 		default:
1491 			if (port->negotiated_rev >= PD_REV30)
1492 				tcpm_set_state(port, SRC_READY,
1493 					       cc_req == SINK_TX_OK ?
1494 					       PD_T_SINK_TX : 0);
1495 			else
1496 				tcpm_set_state(port, SRC_READY, 0);
1497 			break;
1498 		}
1499 	} else {
1500 		if (port->negotiated_rev >= PD_REV30 &&
1501 		    !tcpm_sink_tx_ok(port) &&
1502 		    ams != SOFT_RESET_AMS &&
1503 		    ams != HARD_RESET) {
1504 			port->upcoming_state = INVALID_STATE;
1505 			tcpm_log(port, "Sink TX No Go");
1506 			return -EAGAIN;
1507 		}
1508 
1509 		port->ams = ams;
1510 
1511 		if (ams == HARD_RESET) {
1512 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1513 			tcpm_set_state(port, HARD_RESET_START, 0);
1514 			return ret;
1515 		} else if (tcpm_vdm_ams(port)) {
1516 			return ret;
1517 		}
1518 
1519 		if (port->state == SNK_READY ||
1520 		    port->state == SNK_SOFT_RESET)
1521 			tcpm_set_state(port, AMS_START, 0);
1522 		else
1523 			tcpm_set_state(port, SNK_READY, 0);
1524 	}
1525 
1526 	return ret;
1527 }
1528 
1529 /*
1530  * VDM/VDO handling functions
1531  */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1532 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1533 			   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1534 {
1535 	u32 vdo_hdr = port->vdo_data[0];
1536 
1537 	WARN_ON(!mutex_is_locked(&port->lock));
1538 
1539 	/* If is sending discover_identity, handle received message first */
1540 	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1541 		if (tx_sop_type == TCPC_TX_SOP_PRIME)
1542 			port->send_discover_prime = true;
1543 		else
1544 			port->send_discover = true;
1545 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1546 	} else {
1547 		/* Make sure we are not still processing a previous VDM packet */
1548 		WARN_ON(port->vdm_state > VDM_STATE_DONE);
1549 	}
1550 
1551 	port->vdo_count = cnt + 1;
1552 	port->vdo_data[0] = header;
1553 	memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1554 	/* Set ready, vdm state machine will actually send */
1555 	port->vdm_retries = 0;
1556 	port->vdm_state = VDM_STATE_READY;
1557 	port->vdm_sm_running = true;
1558 
1559 	port->tx_sop_type = tx_sop_type;
1560 
1561 	mod_vdm_delayed_work(port, 0);
1562 }
1563 
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1564 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1565 				    const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1566 {
1567 	if (port->state != SRC_READY && port->state != SNK_READY &&
1568 	    port->state != SRC_VDM_IDENTITY_REQUEST)
1569 		return;
1570 
1571 	mutex_lock(&port->lock);
1572 	tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
1573 	mutex_unlock(&port->lock);
1574 }
1575 
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1576 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1577 {
1578 	u32 vdo = p[VDO_INDEX_IDH];
1579 	u32 product = p[VDO_INDEX_PRODUCT];
1580 
1581 	memset(&port->mode_data, 0, sizeof(port->mode_data));
1582 
1583 	port->partner_ident.id_header = vdo;
1584 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1585 	port->partner_ident.product = product;
1586 
1587 	if (port->partner)
1588 		typec_partner_set_identity(port->partner);
1589 
1590 	tcpm_log(port, "Identity: %04x:%04x.%04x",
1591 		 PD_IDH_VID(vdo),
1592 		 PD_PRODUCT_PID(product), product & 0xffff);
1593 }
1594 
svdm_consume_identity_sop_prime(struct tcpm_port * port,const u32 * p,int cnt)1595 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1596 {
1597 	u32 idh = p[VDO_INDEX_IDH];
1598 	u32 product = p[VDO_INDEX_PRODUCT];
1599 	int svdm_version;
1600 
1601 	/*
1602 	 * Attempt to consume identity only if cable currently is not set
1603 	 */
1604 	if (!IS_ERR_OR_NULL(port->cable))
1605 		goto register_plug;
1606 
1607 	/* Reset cable identity */
1608 	memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1609 
1610 	/* Fill out id header, cert, product, cable VDO 1 */
1611 	port->cable_ident.id_header = idh;
1612 	port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1613 	port->cable_ident.product = product;
1614 	port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1615 
1616 	/* Fill out cable desc, infer svdm_version from pd revision */
1617 	port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1618 							USB_PLUG_TYPE_A);
1619 	port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1620 	/* Log PD Revision and additional cable VDO from negotiated revision */
1621 	switch (port->negotiated_rev_prime) {
1622 	case PD_REV30:
1623 		port->cable_desc.pd_revision = 0x0300;
1624 		if (port->cable_desc.active)
1625 			port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1626 		break;
1627 	case PD_REV20:
1628 		port->cable_desc.pd_revision = 0x0200;
1629 		break;
1630 	default:
1631 		port->cable_desc.pd_revision = 0x0200;
1632 		break;
1633 	}
1634 	port->cable_desc.identity = &port->cable_ident;
1635 	/* Register Cable, set identity and svdm_version */
1636 	port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1637 	if (IS_ERR_OR_NULL(port->cable))
1638 		return;
1639 	typec_cable_set_identity(port->cable);
1640 	/* Get SVDM version */
1641 	svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1642 	typec_cable_set_svdm_version(port->cable, svdm_version);
1643 
1644 register_plug:
1645 	if (IS_ERR_OR_NULL(port->plug_prime)) {
1646 		port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1647 		port->plug_prime = typec_register_plug(port->cable,
1648 						       &port->plug_prime_desc);
1649 	}
1650 }
1651 
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1652 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1653 			       enum tcpm_transmit_type rx_sop_type)
1654 {
1655 	struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1656 				      &port->mode_data_prime : &port->mode_data;
1657 	int i;
1658 
1659 	for (i = 1; i < cnt; i++) {
1660 		u16 svid;
1661 
1662 		svid = (p[i] >> 16) & 0xffff;
1663 		if (!svid)
1664 			return false;
1665 
1666 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1667 			goto abort;
1668 
1669 		pmdata->svids[pmdata->nsvids++] = svid;
1670 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1671 
1672 		svid = p[i] & 0xffff;
1673 		if (!svid)
1674 			return false;
1675 
1676 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1677 			goto abort;
1678 
1679 		pmdata->svids[pmdata->nsvids++] = svid;
1680 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1681 	}
1682 
1683 	/*
1684 	 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1685 	 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1686 	 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1687 	 * SVIDs Command Shall be executed multiple times until a Discover
1688 	 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1689 	 * the last part of the last VDO or with a VDO containing two SVIDs
1690 	 * with values of 0x0000.
1691 	 *
1692 	 * However, some odd dockers support SVIDs less than 12 but without
1693 	 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1694 	 * request and return false here.
1695 	 */
1696 	return cnt == 7;
1697 abort:
1698 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1699 	return false;
1700 }
1701 
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1702 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1703 			       enum tcpm_transmit_type rx_sop_type)
1704 {
1705 	struct pd_mode_data *pmdata = &port->mode_data;
1706 	struct typec_altmode_desc *paltmode;
1707 	int i;
1708 
1709 	switch (rx_sop_type) {
1710 	case TCPC_TX_SOP_PRIME:
1711 		pmdata = &port->mode_data_prime;
1712 		if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1713 			/* Already logged in svdm_consume_svids() */
1714 			return;
1715 		}
1716 		break;
1717 	case TCPC_TX_SOP:
1718 		pmdata = &port->mode_data;
1719 		if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1720 			/* Already logged in svdm_consume_svids() */
1721 			return;
1722 		}
1723 		break;
1724 	default:
1725 		return;
1726 	}
1727 
1728 	for (i = 1; i < cnt; i++) {
1729 		paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1730 		memset(paltmode, 0, sizeof(*paltmode));
1731 
1732 		paltmode->svid = pmdata->svids[pmdata->svid_index];
1733 		paltmode->mode = i;
1734 		paltmode->vdo = p[i];
1735 
1736 		tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1737 			 pmdata->altmodes, paltmode->svid,
1738 			 paltmode->mode, paltmode->vdo);
1739 
1740 		pmdata->altmodes++;
1741 	}
1742 }
1743 
tcpm_register_partner_altmodes(struct tcpm_port * port)1744 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1745 {
1746 	struct pd_mode_data *modep = &port->mode_data;
1747 	struct typec_altmode *altmode;
1748 	int i;
1749 
1750 	if (!port->partner)
1751 		return;
1752 
1753 	for (i = 0; i < modep->altmodes; i++) {
1754 		altmode = typec_partner_register_altmode(port->partner,
1755 						&modep->altmode_desc[i]);
1756 		if (IS_ERR(altmode)) {
1757 			tcpm_log(port, "Failed to register partner SVID 0x%04x",
1758 				 modep->altmode_desc[i].svid);
1759 			altmode = NULL;
1760 		}
1761 		port->partner_altmode[i] = altmode;
1762 	}
1763 }
1764 
tcpm_register_plug_altmodes(struct tcpm_port * port)1765 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1766 {
1767 	struct pd_mode_data *modep = &port->mode_data_prime;
1768 	struct typec_altmode *altmode;
1769 	int i;
1770 
1771 	typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1772 
1773 	for (i = 0; i < modep->altmodes; i++) {
1774 		altmode = typec_plug_register_altmode(port->plug_prime,
1775 						&modep->altmode_desc[i]);
1776 		if (IS_ERR(altmode)) {
1777 			tcpm_log(port, "Failed to register plug SVID 0x%04x",
1778 				 modep->altmode_desc[i].svid);
1779 			altmode = NULL;
1780 		}
1781 		port->plug_prime_altmode[i] = altmode;
1782 	}
1783 }
1784 
1785 #define supports_modal(port)	PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1786 #define supports_modal_cable(port)     PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1787 #define supports_host(port)    PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1788 
1789 /*
1790  * Helper to determine whether the port is capable of SOP' communication at the
1791  * current point in time.
1792  */
tcpm_can_communicate_sop_prime(struct tcpm_port * port)1793 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1794 {
1795 	/* Check to see if tcpc supports SOP' communication */
1796 	if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1797 		return false;
1798 	/*
1799 	 * Power Delivery 2.0 Section 6.3.11
1800 	 * Before communicating with a Cable Plug a Port Should ensure that it
1801 	 * is the Vconn Source and that the Cable Plugs are powered by
1802 	 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1803 	 * that the present Vconn Source is supplying Vconn, the only means to
1804 	 * ensure that the Cable Plugs are powered is for a Port wishing to
1805 	 * communicate with a Cable Plug is to become the Vconn Source.
1806 	 *
1807 	 * Power Delivery 3.0 Section 6.3.11
1808 	 * Before communicating with a Cable Plug a Port Shall ensure that it
1809 	 * is the Vconn source.
1810 	 */
1811 	if (port->vconn_role != TYPEC_SOURCE)
1812 		return false;
1813 	/*
1814 	 * Power Delivery 2.0 Section 2.4.4
1815 	 * When no Contract or an Implicit Contract is in place the Source can
1816 	 * communicate with a Cable Plug using SOP' packets in order to discover
1817 	 * its characteristics.
1818 	 *
1819 	 * Power Delivery 3.0 Section 2.4.4
1820 	 * When no Contract or an Implicit Contract is in place only the Source
1821 	 * port that is supplying Vconn is allowed to send packets to a Cable
1822 	 * Plug and is allowed to respond to packets from the Cable Plug.
1823 	 */
1824 	if (!port->explicit_contract)
1825 		return port->pwr_role == TYPEC_SOURCE;
1826 	if (port->negotiated_rev == PD_REV30)
1827 		return true;
1828 	/*
1829 	 * Power Delivery 2.0 Section 2.4.4
1830 	 *
1831 	 * When an Explicit Contract is in place the DFP (either the Source or
1832 	 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1833 	 * Packets (see Figure 2-3).
1834 	 */
1835 	if (port->negotiated_rev == PD_REV20)
1836 		return port->data_role == TYPEC_HOST;
1837 	return false;
1838 }
1839 
tcpm_attempt_vconn_swap_discovery(struct tcpm_port * port)1840 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1841 {
1842 	if (!port->tcpc->attempt_vconn_swap_discovery)
1843 		return false;
1844 
1845 	/* Port is already source, no need to perform swap */
1846 	if (port->vconn_role == TYPEC_SOURCE)
1847 		return false;
1848 
1849 	/*
1850 	 * Partner needs to support Alternate Modes with modal support. If
1851 	 * partner is also capable of being a USB Host, it could be a device
1852 	 * that supports Alternate Modes as the DFP.
1853 	 */
1854 	if (!supports_modal(port) || supports_host(port))
1855 		return false;
1856 
1857 	if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1858 	    port->negotiated_rev == PD_REV30)
1859 		return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1860 
1861 	return false;
1862 }
1863 
1864 
tcpm_cable_vdm_supported(struct tcpm_port * port)1865 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1866 {
1867 	return !IS_ERR_OR_NULL(port->cable) &&
1868 	       typec_cable_is_active(port->cable) &&
1869 	       supports_modal_cable(port) &&
1870 	       tcpm_can_communicate_sop_prime(port);
1871 }
1872 
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action,enum tcpm_transmit_type rx_sop_type,enum tcpm_transmit_type * response_tx_sop_type)1873 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1874 			const u32 *p, int cnt, u32 *response,
1875 			enum adev_actions *adev_action,
1876 			enum tcpm_transmit_type rx_sop_type,
1877 			enum tcpm_transmit_type *response_tx_sop_type)
1878 {
1879 	struct typec_port *typec = port->typec_port;
1880 	struct typec_altmode *pdev, *pdev_prime;
1881 	struct pd_mode_data *modep, *modep_prime;
1882 	int svdm_version;
1883 	int rlen = 0;
1884 	int cmd_type;
1885 	int cmd;
1886 	int i;
1887 	int ret;
1888 
1889 	cmd_type = PD_VDO_CMDT(p[0]);
1890 	cmd = PD_VDO_CMD(p[0]);
1891 
1892 	tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1893 		 p[0], cmd_type, cmd, cnt);
1894 
1895 	switch (rx_sop_type) {
1896 	case TCPC_TX_SOP_PRIME:
1897 		modep_prime = &port->mode_data_prime;
1898 		pdev_prime = typec_match_altmode(port->plug_prime_altmode,
1899 						 ALTMODE_DISCOVERY_MAX,
1900 						 PD_VDO_VID(p[0]),
1901 						 PD_VDO_OPOS(p[0]));
1902 		svdm_version = typec_get_cable_svdm_version(typec);
1903 		/*
1904 		 * Update SVDM version if cable was discovered before port partner.
1905 		 */
1906 		if (!IS_ERR_OR_NULL(port->cable) &&
1907 		    PD_VDO_SVDM_VER(p[0]) < svdm_version)
1908 			typec_cable_set_svdm_version(port->cable, svdm_version);
1909 		break;
1910 	case TCPC_TX_SOP:
1911 		modep = &port->mode_data;
1912 		pdev = typec_match_altmode(port->partner_altmode,
1913 					   ALTMODE_DISCOVERY_MAX,
1914 					   PD_VDO_VID(p[0]),
1915 					   PD_VDO_OPOS(p[0]));
1916 		svdm_version = typec_get_negotiated_svdm_version(typec);
1917 		if (svdm_version < 0)
1918 			return 0;
1919 		break;
1920 	default:
1921 		modep = &port->mode_data;
1922 		pdev = typec_match_altmode(port->partner_altmode,
1923 					   ALTMODE_DISCOVERY_MAX,
1924 					   PD_VDO_VID(p[0]),
1925 					   PD_VDO_OPOS(p[0]));
1926 		svdm_version = typec_get_negotiated_svdm_version(typec);
1927 		if (svdm_version < 0)
1928 			return 0;
1929 		break;
1930 	}
1931 
1932 	switch (cmd_type) {
1933 	case CMDT_INIT:
1934 		/*
1935 		 * Only the port or port partner is allowed to initialize SVDM
1936 		 * commands over SOP'. In case the port partner initializes a
1937 		 * sequence when it is not allowed to send SOP' messages, drop
1938 		 * the message should the TCPM port try to process it.
1939 		 */
1940 		if (rx_sop_type == TCPC_TX_SOP_PRIME)
1941 			return 0;
1942 
1943 		switch (cmd) {
1944 		case CMD_DISCOVER_IDENT:
1945 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
1946 				break;
1947 
1948 			if (IS_ERR_OR_NULL(port->partner))
1949 				break;
1950 
1951 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1952 				typec_partner_set_svdm_version(port->partner,
1953 							       PD_VDO_SVDM_VER(p[0]));
1954 				svdm_version = PD_VDO_SVDM_VER(p[0]);
1955 			}
1956 
1957 			port->ams = DISCOVER_IDENTITY;
1958 			/*
1959 			 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1960 			 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1961 			 * "wrong configuation" or "Unrecognized"
1962 			 */
1963 			if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1964 			    port->nr_snk_vdo) {
1965 				if (svdm_version < SVDM_VER_2_0) {
1966 					for (i = 0; i < port->nr_snk_vdo_v1; i++)
1967 						response[i + 1] = port->snk_vdo_v1[i];
1968 					rlen = port->nr_snk_vdo_v1 + 1;
1969 
1970 				} else {
1971 					for (i = 0; i < port->nr_snk_vdo; i++)
1972 						response[i + 1] = port->snk_vdo[i];
1973 					rlen = port->nr_snk_vdo + 1;
1974 				}
1975 			}
1976 			break;
1977 		case CMD_DISCOVER_SVID:
1978 			port->ams = DISCOVER_SVIDS;
1979 			break;
1980 		case CMD_DISCOVER_MODES:
1981 			port->ams = DISCOVER_MODES;
1982 			break;
1983 		case CMD_ENTER_MODE:
1984 			port->ams = DFP_TO_UFP_ENTER_MODE;
1985 			break;
1986 		case CMD_EXIT_MODE:
1987 			port->ams = DFP_TO_UFP_EXIT_MODE;
1988 			break;
1989 		case CMD_ATTENTION:
1990 			/* Attention command does not have response */
1991 			*adev_action = ADEV_ATTENTION;
1992 			return 0;
1993 		default:
1994 			break;
1995 		}
1996 		if (rlen >= 1) {
1997 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
1998 		} else if (rlen == 0) {
1999 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2000 			rlen = 1;
2001 		} else {
2002 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2003 			rlen = 1;
2004 		}
2005 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2006 			      (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2007 		break;
2008 	case CMDT_RSP_ACK:
2009 		/*
2010 		 * Silently drop message if we are not connected, but can process
2011 		 * if SOP' Discover Identity prior to explicit contract.
2012 		 */
2013 		if (IS_ERR_OR_NULL(port->partner) &&
2014 		    !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2015 			break;
2016 
2017 		tcpm_ams_finish(port);
2018 
2019 		switch (cmd) {
2020 		/*
2021 		 * SVDM Command Flow for SOP and SOP':
2022 		 * SOP		Discover Identity
2023 		 * SOP'		Discover Identity
2024 		 * SOP		Discover SVIDs
2025 		 *		Discover Modes
2026 		 * (Active Cables)
2027 		 * SOP'		Discover SVIDs
2028 		 *		Discover Modes
2029 		 *
2030 		 * Perform Discover SOP' if the port can communicate with cable
2031 		 * plug.
2032 		 */
2033 		case CMD_DISCOVER_IDENT:
2034 			switch (rx_sop_type) {
2035 			case TCPC_TX_SOP:
2036 				if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2037 					typec_partner_set_svdm_version(port->partner,
2038 								       PD_VDO_SVDM_VER(p[0]));
2039 					/* If cable is discovered before partner, downgrade svdm */
2040 					if (!IS_ERR_OR_NULL(port->cable) &&
2041 					    (typec_get_cable_svdm_version(port->typec_port) >
2042 					    svdm_version))
2043 						typec_cable_set_svdm_version(port->cable,
2044 									     svdm_version);
2045 				}
2046 				/* 6.4.4.3.1 */
2047 				svdm_consume_identity(port, p, cnt);
2048 				/* Attempt Vconn swap, delay SOP' discovery if necessary */
2049 				if (tcpm_attempt_vconn_swap_discovery(port)) {
2050 					port->send_discover_prime = true;
2051 					port->upcoming_state = VCONN_SWAP_SEND;
2052 					ret = tcpm_ams_start(port, VCONN_SWAP);
2053 					if (!ret)
2054 						return 0;
2055 					/* Cannot perform Vconn swap */
2056 					port->upcoming_state = INVALID_STATE;
2057 					port->send_discover_prime = false;
2058 				}
2059 
2060 				/*
2061 				 * Attempt Discover Identity on SOP' if the
2062 				 * cable was not discovered previously, and use
2063 				 * the SVDM version of the partner to probe.
2064 				 */
2065 				if (IS_ERR_OR_NULL(port->cable) &&
2066 				    tcpm_can_communicate_sop_prime(port)) {
2067 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2068 					port->send_discover_prime = true;
2069 					response[0] = VDO(USB_SID_PD, 1,
2070 							  typec_get_negotiated_svdm_version(typec),
2071 							  CMD_DISCOVER_IDENT);
2072 					rlen = 1;
2073 				} else {
2074 					*response_tx_sop_type = TCPC_TX_SOP;
2075 					response[0] = VDO(USB_SID_PD, 1,
2076 							  typec_get_negotiated_svdm_version(typec),
2077 							  CMD_DISCOVER_SVID);
2078 					rlen = 1;
2079 				}
2080 				break;
2081 			case TCPC_TX_SOP_PRIME:
2082 				/*
2083 				 * svdm_consume_identity_sop_prime will determine
2084 				 * the svdm_version for the cable moving forward.
2085 				 */
2086 				svdm_consume_identity_sop_prime(port, p, cnt);
2087 
2088 				/*
2089 				 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2090 				 * to SRC_SEND_CAPABILITIES
2091 				 */
2092 				if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2093 					tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2094 					return 0;
2095 				}
2096 
2097 				*response_tx_sop_type = TCPC_TX_SOP;
2098 				response[0] = VDO(USB_SID_PD, 1,
2099 						  typec_get_negotiated_svdm_version(typec),
2100 						  CMD_DISCOVER_SVID);
2101 				rlen = 1;
2102 				break;
2103 			default:
2104 				return 0;
2105 			}
2106 			break;
2107 		case CMD_DISCOVER_SVID:
2108 			*response_tx_sop_type = rx_sop_type;
2109 			/* 6.4.4.3.2 */
2110 			if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2111 				response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2112 				rlen = 1;
2113 			} else {
2114 				if (rx_sop_type == TCPC_TX_SOP) {
2115 					if (modep->nsvids && supports_modal(port)) {
2116 						response[0] = VDO(modep->svids[0], 1, svdm_version,
2117 								CMD_DISCOVER_MODES);
2118 						rlen = 1;
2119 					}
2120 				} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2121 					if (modep_prime->nsvids) {
2122 						response[0] = VDO(modep_prime->svids[0], 1,
2123 								  svdm_version, CMD_DISCOVER_MODES);
2124 						rlen = 1;
2125 					}
2126 				}
2127 			}
2128 			break;
2129 		case CMD_DISCOVER_MODES:
2130 			if (rx_sop_type == TCPC_TX_SOP) {
2131 				/* 6.4.4.3.3 */
2132 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2133 				modep->svid_index++;
2134 				if (modep->svid_index < modep->nsvids) {
2135 					u16 svid = modep->svids[modep->svid_index];
2136 					*response_tx_sop_type = TCPC_TX_SOP;
2137 					response[0] = VDO(svid, 1, svdm_version,
2138 							  CMD_DISCOVER_MODES);
2139 					rlen = 1;
2140 				} else if (tcpm_cable_vdm_supported(port)) {
2141 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2142 					response[0] = VDO(USB_SID_PD, 1,
2143 							  typec_get_cable_svdm_version(typec),
2144 							  CMD_DISCOVER_SVID);
2145 					rlen = 1;
2146 				} else {
2147 					tcpm_register_partner_altmodes(port);
2148 				}
2149 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2150 				/* 6.4.4.3.3 */
2151 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2152 				modep_prime->svid_index++;
2153 				if (modep_prime->svid_index < modep_prime->nsvids) {
2154 					u16 svid = modep_prime->svids[modep_prime->svid_index];
2155 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2156 					response[0] = VDO(svid, 1,
2157 							  typec_get_cable_svdm_version(typec),
2158 							  CMD_DISCOVER_MODES);
2159 					rlen = 1;
2160 				} else {
2161 					tcpm_register_plug_altmodes(port);
2162 					tcpm_register_partner_altmodes(port);
2163 				}
2164 			}
2165 			break;
2166 		case CMD_ENTER_MODE:
2167 			*response_tx_sop_type = rx_sop_type;
2168 			if (rx_sop_type == TCPC_TX_SOP) {
2169 				if (adev && pdev) {
2170 					typec_altmode_update_active(pdev, true);
2171 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2172 				}
2173 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2174 				if (adev && pdev_prime) {
2175 					typec_altmode_update_active(pdev_prime, true);
2176 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2177 				}
2178 			}
2179 			return 0;
2180 		case CMD_EXIT_MODE:
2181 			*response_tx_sop_type = rx_sop_type;
2182 			if (rx_sop_type == TCPC_TX_SOP) {
2183 				if (adev && pdev) {
2184 					typec_altmode_update_active(pdev, false);
2185 					/* Back to USB Operation */
2186 					*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2187 					return 0;
2188 				}
2189 			}
2190 			break;
2191 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2192 			break;
2193 		default:
2194 			/* Unrecognized SVDM */
2195 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2196 			rlen = 1;
2197 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2198 				      (VDO_SVDM_VERS(svdm_version));
2199 			break;
2200 		}
2201 		break;
2202 	case CMDT_RSP_NAK:
2203 		tcpm_ams_finish(port);
2204 		switch (cmd) {
2205 		case CMD_DISCOVER_IDENT:
2206 		case CMD_DISCOVER_SVID:
2207 		case CMD_DISCOVER_MODES:
2208 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2209 			break;
2210 		case CMD_ENTER_MODE:
2211 			/* Back to USB Operation */
2212 			*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2213 			return 0;
2214 		default:
2215 			/* Unrecognized SVDM */
2216 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2217 			rlen = 1;
2218 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2219 				      (VDO_SVDM_VERS(svdm_version));
2220 			break;
2221 		}
2222 		break;
2223 	default:
2224 		response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2225 		rlen = 1;
2226 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2227 			      (VDO_SVDM_VERS(svdm_version));
2228 		break;
2229 	}
2230 
2231 	/* Informing the alternate mode drivers about everything */
2232 	*adev_action = ADEV_QUEUE_VDM;
2233 	return rlen;
2234 }
2235 
2236 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2237 			       enum pd_msg_request message,
2238 			       enum tcpm_ams ams);
2239 
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt,enum tcpm_transmit_type rx_sop_type)2240 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2241 				    const __le32 *payload, int cnt,
2242 				    enum tcpm_transmit_type rx_sop_type)
2243 {
2244 	enum adev_actions adev_action = ADEV_NONE;
2245 	struct typec_altmode *adev;
2246 	u32 p[PD_MAX_PAYLOAD];
2247 	u32 response[8] = { };
2248 	int i, rlen = 0;
2249 	enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2250 
2251 	for (i = 0; i < cnt; i++)
2252 		p[i] = le32_to_cpu(payload[i]);
2253 
2254 	adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2255 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2256 
2257 	if (port->vdm_state == VDM_STATE_BUSY) {
2258 		/* If UFP responded busy retry after timeout */
2259 		if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2260 			port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2261 			port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2262 				CMDT_INIT;
2263 			mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2264 			return;
2265 		}
2266 		port->vdm_state = VDM_STATE_DONE;
2267 	}
2268 
2269 	if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2270 		/*
2271 		 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2272 		 * advance because we are dropping the lock but may send VDMs soon.
2273 		 * For the cases of INIT received:
2274 		 *  - If no response to send, it will be cleared later in this function.
2275 		 *  - If there are responses to send, it will be cleared in the state machine.
2276 		 * For the cases of RSP received:
2277 		 *  - If no further INIT to send, it will be cleared later in this function.
2278 		 *  - Otherwise, it will be cleared in the state machine if timeout or it will go
2279 		 *    back here until no further INIT to send.
2280 		 * For the cases of unknown type received:
2281 		 *  - We will send NAK and the flag will be cleared in the state machine.
2282 		 */
2283 		port->vdm_sm_running = true;
2284 		rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2285 				    rx_sop_type, &response_tx_sop_type);
2286 	} else {
2287 		if (port->negotiated_rev >= PD_REV30)
2288 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2289 	}
2290 
2291 	/*
2292 	 * We are done with any state stored in the port struct now, except
2293 	 * for any port struct changes done by the tcpm_queue_vdm() call
2294 	 * below, which is a separate operation.
2295 	 *
2296 	 * So we can safely release the lock here; and we MUST release the
2297 	 * lock here to avoid an AB BA lock inversion:
2298 	 *
2299 	 * If we keep the lock here then the lock ordering in this path is:
2300 	 * 1. tcpm_pd_rx_handler take the tcpm port lock
2301 	 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2302 	 *
2303 	 * And we also have this ordering:
2304 	 * 1. alt-mode driver takes the alt-mode's lock
2305 	 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2306 	 *    tcpm port lock
2307 	 *
2308 	 * Dropping our lock here avoids this.
2309 	 */
2310 	mutex_unlock(&port->lock);
2311 
2312 	if (adev) {
2313 		switch (adev_action) {
2314 		case ADEV_NONE:
2315 			break;
2316 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2317 			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2318 			typec_altmode_vdm(adev, p[0], &p[1], cnt);
2319 			break;
2320 		case ADEV_QUEUE_VDM:
2321 			if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
2322 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2323 			else
2324 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2325 			break;
2326 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2327 			if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
2328 				if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2329 							    p[0], &p[1], cnt)) {
2330 					int svdm_version = typec_get_cable_svdm_version(
2331 										port->typec_port);
2332 					if (svdm_version < 0)
2333 						break;
2334 
2335 					response[0] = VDO(adev->svid, 1, svdm_version,
2336 							CMD_EXIT_MODE);
2337 					response[0] |= VDO_OPOS(adev->mode);
2338 					rlen = 1;
2339 				}
2340 			} else {
2341 				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2342 					int svdm_version = typec_get_negotiated_svdm_version(
2343 										port->typec_port);
2344 					if (svdm_version < 0)
2345 						break;
2346 
2347 					response[0] = VDO(adev->svid, 1, svdm_version,
2348 							CMD_EXIT_MODE);
2349 					response[0] |= VDO_OPOS(adev->mode);
2350 					rlen = 1;
2351 				}
2352 			}
2353 			break;
2354 		case ADEV_ATTENTION:
2355 			if (typec_altmode_attention(adev, p[1]))
2356 				tcpm_log(port, "typec_altmode_attention no port partner altmode");
2357 			break;
2358 		}
2359 	}
2360 
2361 	/*
2362 	 * We must re-take the lock here to balance the unlock in
2363 	 * tcpm_pd_rx_handler, note that no changes, other then the
2364 	 * tcpm_queue_vdm call, are made while the lock is held again.
2365 	 * All that is done after the call is unwinding the call stack until
2366 	 * we return to tcpm_pd_rx_handler and do the unlock there.
2367 	 */
2368 	mutex_lock(&port->lock);
2369 
2370 	if (rlen > 0)
2371 		tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2372 	else
2373 		port->vdm_sm_running = false;
2374 }
2375 
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count,enum tcpm_transmit_type tx_sop_type)2376 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2377 			  const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2378 {
2379 	int svdm_version;
2380 	u32 header;
2381 
2382 	switch (tx_sop_type) {
2383 	case TCPC_TX_SOP_PRIME:
2384 		/*
2385 		 * If the port partner is discovered, then the port partner's
2386 		 * SVDM Version will be returned
2387 		 */
2388 		svdm_version = typec_get_cable_svdm_version(port->typec_port);
2389 		if (svdm_version < 0)
2390 			svdm_version = SVDM_VER_MAX;
2391 		break;
2392 	case TCPC_TX_SOP:
2393 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2394 		if (svdm_version < 0)
2395 			return;
2396 		break;
2397 	default:
2398 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2399 		if (svdm_version < 0)
2400 			return;
2401 		break;
2402 	}
2403 
2404 	if (WARN_ON(count > VDO_MAX_SIZE - 1))
2405 		count = VDO_MAX_SIZE - 1;
2406 
2407 	/* set VDM header with VID & CMD */
2408 	header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2409 			1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2410 			svdm_version, cmd);
2411 	tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2412 }
2413 
vdm_ready_timeout(u32 vdm_hdr)2414 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2415 {
2416 	unsigned int timeout;
2417 	int cmd = PD_VDO_CMD(vdm_hdr);
2418 
2419 	/* its not a structured VDM command */
2420 	if (!PD_VDO_SVDM(vdm_hdr))
2421 		return PD_T_VDM_UNSTRUCTURED;
2422 
2423 	switch (PD_VDO_CMDT(vdm_hdr)) {
2424 	case CMDT_INIT:
2425 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2426 			timeout = PD_T_VDM_WAIT_MODE_E;
2427 		else
2428 			timeout = PD_T_VDM_SNDR_RSP;
2429 		break;
2430 	default:
2431 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2432 			timeout = PD_T_VDM_E_MODE;
2433 		else
2434 			timeout = PD_T_VDM_RCVR_RSP;
2435 		break;
2436 	}
2437 	return timeout;
2438 }
2439 
vdm_run_state_machine(struct tcpm_port * port)2440 static void vdm_run_state_machine(struct tcpm_port *port)
2441 {
2442 	struct pd_message msg;
2443 	int i, res = 0;
2444 	u32 vdo_hdr = port->vdo_data[0];
2445 	u32 response[8] = { };
2446 
2447 	switch (port->vdm_state) {
2448 	case VDM_STATE_READY:
2449 		/* Only transmit VDM if attached */
2450 		if (!port->attached) {
2451 			port->vdm_state = VDM_STATE_ERR_BUSY;
2452 			break;
2453 		}
2454 
2455 		/*
2456 		 * if there's traffic or we're not in PDO ready state don't send
2457 		 * a VDM.
2458 		 */
2459 		if (port->state != SRC_READY && port->state != SNK_READY &&
2460 		    port->state != SRC_VDM_IDENTITY_REQUEST) {
2461 			port->vdm_sm_running = false;
2462 			break;
2463 		}
2464 
2465 		/* TODO: AMS operation for Unstructured VDM */
2466 		if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2467 			switch (PD_VDO_CMD(vdo_hdr)) {
2468 			case CMD_DISCOVER_IDENT:
2469 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2470 				if (res == 0) {
2471 					switch (port->tx_sop_type) {
2472 					case TCPC_TX_SOP_PRIME:
2473 						port->send_discover_prime = false;
2474 						break;
2475 					case TCPC_TX_SOP:
2476 						port->send_discover = false;
2477 						break;
2478 					default:
2479 						port->send_discover = false;
2480 						break;
2481 					}
2482 				} else if (res == -EAGAIN) {
2483 					port->vdo_data[0] = 0;
2484 					mod_send_discover_delayed_work(port,
2485 								       SEND_DISCOVER_RETRY_MS);
2486 				}
2487 				break;
2488 			case CMD_DISCOVER_SVID:
2489 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
2490 				break;
2491 			case CMD_DISCOVER_MODES:
2492 				res = tcpm_ams_start(port, DISCOVER_MODES);
2493 				break;
2494 			case CMD_ENTER_MODE:
2495 				res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2496 				break;
2497 			case CMD_EXIT_MODE:
2498 				res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2499 				break;
2500 			case CMD_ATTENTION:
2501 				res = tcpm_ams_start(port, ATTENTION);
2502 				break;
2503 			case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2504 				res = tcpm_ams_start(port, STRUCTURED_VDMS);
2505 				break;
2506 			default:
2507 				res = -EOPNOTSUPP;
2508 				break;
2509 			}
2510 
2511 			if (res < 0) {
2512 				port->vdm_state = VDM_STATE_ERR_BUSY;
2513 				return;
2514 			}
2515 		}
2516 
2517 		port->vdm_state = VDM_STATE_SEND_MESSAGE;
2518 		mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2519 					    port->pwr_role == TYPEC_SOURCE &&
2520 					    PD_VDO_SVDM(vdo_hdr) &&
2521 					    PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2522 					   PD_T_SINK_TX : 0);
2523 		break;
2524 	case VDM_STATE_WAIT_RSP_BUSY:
2525 		port->vdo_data[0] = port->vdo_retry;
2526 		port->vdo_count = 1;
2527 		port->vdm_state = VDM_STATE_READY;
2528 		tcpm_ams_finish(port);
2529 		break;
2530 	case VDM_STATE_BUSY:
2531 		port->vdm_state = VDM_STATE_ERR_TMOUT;
2532 		if (port->ams != NONE_AMS)
2533 			tcpm_ams_finish(port);
2534 		break;
2535 	case VDM_STATE_ERR_SEND:
2536 		/*
2537 		 * When sending Discover Identity to SOP' before establishing an
2538 		 * explicit contract, do not retry. Instead, weave sending
2539 		 * Source_Capabilities over SOP and Discover Identity over SOP'.
2540 		 */
2541 		if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2542 			tcpm_ams_finish(port);
2543 			port->vdm_state = VDM_STATE_DONE;
2544 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2545 		/*
2546 		 * A partner which does not support USB PD will not reply,
2547 		 * so this is not a fatal error. At the same time, some
2548 		 * devices may not return GoodCRC under some circumstances,
2549 		 * so we need to retry.
2550 		 */
2551 		} else if (port->vdm_retries < 3) {
2552 			tcpm_log(port, "VDM Tx error, retry");
2553 			port->vdm_retries++;
2554 			port->vdm_state = VDM_STATE_READY;
2555 			if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2556 				tcpm_ams_finish(port);
2557 		} else {
2558 			tcpm_ams_finish(port);
2559 			if (port->tx_sop_type == TCPC_TX_SOP)
2560 				break;
2561 			/* Handle SOP' Transmission Errors */
2562 			switch (PD_VDO_CMD(vdo_hdr)) {
2563 			/*
2564 			 * If Discover Identity fails on SOP', then resume
2565 			 * discovery process on SOP only.
2566 			 */
2567 			case CMD_DISCOVER_IDENT:
2568 				port->vdo_data[0] = 0;
2569 				response[0] = VDO(USB_SID_PD, 1,
2570 						  typec_get_negotiated_svdm_version(
2571 									port->typec_port),
2572 						  CMD_DISCOVER_SVID);
2573 				tcpm_queue_vdm(port, response[0], &response[1],
2574 					       0, TCPC_TX_SOP);
2575 				break;
2576 			/*
2577 			 * If Discover SVIDs or Discover Modes fail, then
2578 			 * proceed with Alt Mode discovery process on SOP.
2579 			 */
2580 			case CMD_DISCOVER_SVID:
2581 				tcpm_register_partner_altmodes(port);
2582 				break;
2583 			case CMD_DISCOVER_MODES:
2584 				tcpm_register_partner_altmodes(port);
2585 				break;
2586 			default:
2587 				break;
2588 			}
2589 		}
2590 		break;
2591 	case VDM_STATE_SEND_MESSAGE:
2592 		/* Prepare and send VDM */
2593 		memset(&msg, 0, sizeof(msg));
2594 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2595 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2596 						  0,	/* Cable Plug Indicator for DFP/UFP */
2597 						  0,	/* Reserved */
2598 						  port->negotiated_rev_prime,
2599 						  port->message_id_prime,
2600 						  port->vdo_count);
2601 		} else {
2602 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2603 						  port->pwr_role,
2604 						  port->data_role,
2605 						  port->negotiated_rev,
2606 						  port->message_id,
2607 						  port->vdo_count);
2608 		}
2609 		for (i = 0; i < port->vdo_count; i++)
2610 			msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2611 		res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2612 		if (res < 0) {
2613 			port->vdm_state = VDM_STATE_ERR_SEND;
2614 		} else {
2615 			unsigned long timeout;
2616 
2617 			port->vdm_retries = 0;
2618 			port->vdo_data[0] = 0;
2619 			port->vdm_state = VDM_STATE_BUSY;
2620 			timeout = vdm_ready_timeout(vdo_hdr);
2621 			mod_vdm_delayed_work(port, timeout);
2622 		}
2623 		break;
2624 	default:
2625 		break;
2626 	}
2627 }
2628 
vdm_state_machine_work(struct kthread_work * work)2629 static void vdm_state_machine_work(struct kthread_work *work)
2630 {
2631 	struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2632 	enum vdm_states prev_state;
2633 
2634 	mutex_lock(&port->lock);
2635 
2636 	/*
2637 	 * Continue running as long as the port is not busy and there was
2638 	 * a state change.
2639 	 */
2640 	do {
2641 		prev_state = port->vdm_state;
2642 		vdm_run_state_machine(port);
2643 	} while (port->vdm_state != prev_state &&
2644 		 port->vdm_state != VDM_STATE_BUSY &&
2645 		 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2646 
2647 	if (port->vdm_state < VDM_STATE_READY)
2648 		port->vdm_sm_running = false;
2649 
2650 	mutex_unlock(&port->lock);
2651 }
2652 
2653 enum pdo_err {
2654 	PDO_NO_ERR,
2655 	PDO_ERR_NO_VSAFE5V,
2656 	PDO_ERR_VSAFE5V_NOT_FIRST,
2657 	PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2658 	PDO_ERR_FIXED_NOT_SORTED,
2659 	PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2660 	PDO_ERR_DUPE_PDO,
2661 	PDO_ERR_PPS_APDO_NOT_SORTED,
2662 	PDO_ERR_DUPE_PPS_APDO,
2663 };
2664 
2665 static const char * const pdo_err_msg[] = {
2666 	[PDO_ERR_NO_VSAFE5V] =
2667 	" err: source/sink caps should at least have vSafe5V",
2668 	[PDO_ERR_VSAFE5V_NOT_FIRST] =
2669 	" err: vSafe5V Fixed Supply Object Shall always be the first object",
2670 	[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2671 	" err: PDOs should be in the following order: Fixed; Battery; Variable",
2672 	[PDO_ERR_FIXED_NOT_SORTED] =
2673 	" err: Fixed supply pdos should be in increasing order of their fixed voltage",
2674 	[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2675 	" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2676 	[PDO_ERR_DUPE_PDO] =
2677 	" err: Variable/Batt supply pdos cannot have same min/max voltage",
2678 	[PDO_ERR_PPS_APDO_NOT_SORTED] =
2679 	" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2680 	[PDO_ERR_DUPE_PPS_APDO] =
2681 	" err: Programmable power supply apdos cannot have same min/max voltage and max current",
2682 };
2683 
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2684 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2685 				  unsigned int nr_pdo)
2686 {
2687 	unsigned int i;
2688 
2689 	/* Should at least contain vSafe5v */
2690 	if (nr_pdo < 1)
2691 		return PDO_ERR_NO_VSAFE5V;
2692 
2693 	/* The vSafe5V Fixed Supply Object Shall always be the first object */
2694 	if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2695 	    pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2696 		return PDO_ERR_VSAFE5V_NOT_FIRST;
2697 
2698 	for (i = 1; i < nr_pdo; i++) {
2699 		if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2700 			return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2701 		} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2702 			enum pd_pdo_type type = pdo_type(pdo[i]);
2703 
2704 			switch (type) {
2705 			/*
2706 			 * The remaining Fixed Supply Objects, if
2707 			 * present, shall be sent in voltage order;
2708 			 * lowest to highest.
2709 			 */
2710 			case PDO_TYPE_FIXED:
2711 				if (pdo_fixed_voltage(pdo[i]) <=
2712 				    pdo_fixed_voltage(pdo[i - 1]))
2713 					return PDO_ERR_FIXED_NOT_SORTED;
2714 				break;
2715 			/*
2716 			 * The Battery Supply Objects and Variable
2717 			 * supply, if present shall be sent in Minimum
2718 			 * Voltage order; lowest to highest.
2719 			 */
2720 			case PDO_TYPE_VAR:
2721 			case PDO_TYPE_BATT:
2722 				if (pdo_min_voltage(pdo[i]) <
2723 				    pdo_min_voltage(pdo[i - 1]))
2724 					return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2725 				else if ((pdo_min_voltage(pdo[i]) ==
2726 					  pdo_min_voltage(pdo[i - 1])) &&
2727 					 (pdo_max_voltage(pdo[i]) ==
2728 					  pdo_max_voltage(pdo[i - 1])))
2729 					return PDO_ERR_DUPE_PDO;
2730 				break;
2731 			/*
2732 			 * The Programmable Power Supply APDOs, if present,
2733 			 * shall be sent in Maximum Voltage order;
2734 			 * lowest to highest.
2735 			 */
2736 			case PDO_TYPE_APDO:
2737 				if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2738 					break;
2739 
2740 				if (pdo_pps_apdo_max_voltage(pdo[i]) <
2741 				    pdo_pps_apdo_max_voltage(pdo[i - 1]))
2742 					return PDO_ERR_PPS_APDO_NOT_SORTED;
2743 				else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2744 					  pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2745 					 pdo_pps_apdo_max_voltage(pdo[i]) ==
2746 					  pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2747 					 pdo_pps_apdo_max_current(pdo[i]) ==
2748 					  pdo_pps_apdo_max_current(pdo[i - 1]))
2749 					return PDO_ERR_DUPE_PPS_APDO;
2750 				break;
2751 			default:
2752 				tcpm_log_force(port, " Unknown pdo type");
2753 			}
2754 		}
2755 	}
2756 
2757 	return PDO_NO_ERR;
2758 }
2759 
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2760 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2761 			      unsigned int nr_pdo)
2762 {
2763 	enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2764 
2765 	if (err_index != PDO_NO_ERR) {
2766 		tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2767 		return -EINVAL;
2768 	}
2769 
2770 	return 0;
2771 }
2772 
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2773 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2774 {
2775 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2776 	int svdm_version;
2777 	u32 header;
2778 
2779 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2780 	if (svdm_version < 0)
2781 		return svdm_version;
2782 
2783 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2784 	header |= VDO_OPOS(altmode->mode);
2785 
2786 	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2787 	return 0;
2788 }
2789 
tcpm_altmode_exit(struct typec_altmode * altmode)2790 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2791 {
2792 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2793 	int svdm_version;
2794 	u32 header;
2795 
2796 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2797 	if (svdm_version < 0)
2798 		return svdm_version;
2799 
2800 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2801 	header |= VDO_OPOS(altmode->mode);
2802 
2803 	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2804 	return 0;
2805 }
2806 
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2807 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2808 			    u32 header, const u32 *data, int count)
2809 {
2810 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2811 
2812 	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2813 
2814 	return 0;
2815 }
2816 
2817 static const struct typec_altmode_ops tcpm_altmode_ops = {
2818 	.enter = tcpm_altmode_enter,
2819 	.exit = tcpm_altmode_exit,
2820 	.vdm = tcpm_altmode_vdm,
2821 };
2822 
2823 
tcpm_cable_altmode_enter(struct typec_altmode * altmode,enum typec_plug_index sop,u32 * vdo)2824 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2825 				    u32 *vdo)
2826 {
2827 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2828 	int svdm_version;
2829 	u32 header;
2830 
2831 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2832 	if (svdm_version < 0)
2833 		return svdm_version;
2834 
2835 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2836 	header |= VDO_OPOS(altmode->mode);
2837 
2838 	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2839 	return 0;
2840 }
2841 
tcpm_cable_altmode_exit(struct typec_altmode * altmode,enum typec_plug_index sop)2842 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2843 {
2844 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2845 	int svdm_version;
2846 	u32 header;
2847 
2848 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2849 	if (svdm_version < 0)
2850 		return svdm_version;
2851 
2852 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2853 	header |= VDO_OPOS(altmode->mode);
2854 
2855 	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2856 	return 0;
2857 }
2858 
tcpm_cable_altmode_vdm(struct typec_altmode * altmode,enum typec_plug_index sop,u32 header,const u32 * data,int count)2859 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2860 				  u32 header, const u32 *data, int count)
2861 {
2862 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2863 
2864 	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2865 
2866 	return 0;
2867 }
2868 
2869 static const struct typec_cable_ops tcpm_cable_ops = {
2870 	.enter = tcpm_cable_altmode_enter,
2871 	.exit = tcpm_cable_altmode_exit,
2872 	.vdm = tcpm_cable_altmode_vdm,
2873 };
2874 
2875 /*
2876  * PD (data, control) command handling functions
2877  */
ready_state(struct tcpm_port * port)2878 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2879 {
2880 	if (port->pwr_role == TYPEC_SOURCE)
2881 		return SRC_READY;
2882 	else
2883 		return SNK_READY;
2884 }
2885 
2886 static int tcpm_pd_send_control(struct tcpm_port *port,
2887 				enum pd_ctrl_msg_type type,
2888 				enum tcpm_transmit_type tx_sop_type);
2889 
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)2890 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2891 			      int cnt)
2892 {
2893 	u32 p0 = le32_to_cpu(payload[0]);
2894 	unsigned int type = usb_pd_ado_type(p0);
2895 
2896 	if (!type) {
2897 		tcpm_log(port, "Alert message received with no type");
2898 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2899 		return;
2900 	}
2901 
2902 	/* Just handling non-battery alerts for now */
2903 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2904 		if (port->pwr_role == TYPEC_SOURCE) {
2905 			port->upcoming_state = GET_STATUS_SEND;
2906 			tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2907 		} else {
2908 			/*
2909 			 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2910 			 * SinkTxOk in time.
2911 			 */
2912 			port->ams = GETTING_SOURCE_SINK_STATUS;
2913 			tcpm_set_state(port, GET_STATUS_SEND, 0);
2914 		}
2915 	} else {
2916 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2917 	}
2918 }
2919 
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)2920 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2921 						  enum typec_pwr_opmode mode, bool pps_active,
2922 						  u32 requested_vbus_voltage)
2923 {
2924 	int ret;
2925 
2926 	if (!port->tcpc->set_auto_vbus_discharge_threshold)
2927 		return 0;
2928 
2929 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2930 							    requested_vbus_voltage);
2931 	tcpm_log_force(port,
2932 		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2933 		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2934 
2935 	return ret;
2936 }
2937 
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)2938 static void tcpm_pd_handle_state(struct tcpm_port *port,
2939 				 enum tcpm_state state,
2940 				 enum tcpm_ams ams,
2941 				 unsigned int delay_ms)
2942 {
2943 	switch (port->state) {
2944 	case SRC_READY:
2945 	case SNK_READY:
2946 		port->ams = ams;
2947 		tcpm_set_state(port, state, delay_ms);
2948 		break;
2949 	/* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2950 	case SNK_TRANSITION_SINK:
2951 	case SNK_TRANSITION_SINK_VBUS:
2952 	case SRC_TRANSITION_SUPPLY:
2953 		tcpm_set_state(port, HARD_RESET_SEND, 0);
2954 		break;
2955 	default:
2956 		if (!tcpm_ams_interruptible(port)) {
2957 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2958 				       SRC_SOFT_RESET_WAIT_SNK_TX :
2959 				       SNK_SOFT_RESET,
2960 				       0);
2961 		} else {
2962 			/* process the Message 6.8.1 */
2963 			port->upcoming_state = state;
2964 			port->next_ams = ams;
2965 			tcpm_set_state(port, ready_state(port), delay_ms);
2966 		}
2967 		break;
2968 	}
2969 }
2970 
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)2971 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2972 			       enum pd_msg_request message,
2973 			       enum tcpm_ams ams)
2974 {
2975 	switch (port->state) {
2976 	case SRC_READY:
2977 	case SNK_READY:
2978 		port->ams = ams;
2979 		tcpm_queue_message(port, message);
2980 		break;
2981 	/* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2982 	case SNK_TRANSITION_SINK:
2983 	case SNK_TRANSITION_SINK_VBUS:
2984 	case SRC_TRANSITION_SUPPLY:
2985 		tcpm_set_state(port, HARD_RESET_SEND, 0);
2986 		break;
2987 	default:
2988 		if (!tcpm_ams_interruptible(port)) {
2989 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2990 				       SRC_SOFT_RESET_WAIT_SNK_TX :
2991 				       SNK_SOFT_RESET,
2992 				       0);
2993 		} else {
2994 			port->next_ams = ams;
2995 			tcpm_set_state(port, ready_state(port), 0);
2996 			/* 6.8.1 process the Message */
2997 			tcpm_queue_message(port, message);
2998 		}
2999 		break;
3000 	}
3001 }
3002 
tcpm_register_source_caps(struct tcpm_port * port)3003 static int tcpm_register_source_caps(struct tcpm_port *port)
3004 {
3005 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3006 	struct usb_power_delivery_capabilities_desc caps = { };
3007 	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3008 
3009 	if (!port->partner_pd)
3010 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3011 	if (IS_ERR(port->partner_pd))
3012 		return PTR_ERR(port->partner_pd);
3013 
3014 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3015 	caps.role = TYPEC_SOURCE;
3016 
3017 	if (cap)
3018 		usb_power_delivery_unregister_capabilities(cap);
3019 
3020 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3021 	if (IS_ERR(cap))
3022 		return PTR_ERR(cap);
3023 
3024 	port->partner_source_caps = cap;
3025 
3026 	return 0;
3027 }
3028 
tcpm_register_sink_caps(struct tcpm_port * port)3029 static int tcpm_register_sink_caps(struct tcpm_port *port)
3030 {
3031 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3032 	struct usb_power_delivery_capabilities_desc caps = { };
3033 	struct usb_power_delivery_capabilities *cap;
3034 
3035 	if (!port->partner_pd)
3036 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3037 	if (IS_ERR(port->partner_pd))
3038 		return PTR_ERR(port->partner_pd);
3039 
3040 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3041 	caps.role = TYPEC_SINK;
3042 
3043 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3044 	if (IS_ERR(cap))
3045 		return PTR_ERR(cap);
3046 
3047 	port->partner_sink_caps = cap;
3048 
3049 	return 0;
3050 }
3051 
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3052 static void tcpm_pd_data_request(struct tcpm_port *port,
3053 				 const struct pd_message *msg,
3054 				 enum tcpm_transmit_type rx_sop_type)
3055 {
3056 	enum pd_data_msg_type type = pd_header_type_le(msg->header);
3057 	unsigned int cnt = pd_header_cnt_le(msg->header);
3058 	unsigned int rev = pd_header_rev_le(msg->header);
3059 	unsigned int i;
3060 	enum frs_typec_current partner_frs_current;
3061 	bool frs_enable;
3062 	int ret;
3063 
3064 	if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3065 		port->vdm_state = VDM_STATE_ERR_BUSY;
3066 		tcpm_ams_finish(port);
3067 		mod_vdm_delayed_work(port, 0);
3068 	}
3069 
3070 	switch (type) {
3071 	case PD_DATA_SOURCE_CAP:
3072 		for (i = 0; i < cnt; i++)
3073 			port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3074 
3075 		port->nr_source_caps = cnt;
3076 
3077 		tcpm_log_source_caps(port);
3078 
3079 		tcpm_validate_caps(port, port->source_caps,
3080 				   port->nr_source_caps);
3081 
3082 		tcpm_register_source_caps(port);
3083 
3084 		/*
3085 		 * Adjust revision in subsequent message headers, as required,
3086 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3087 		 * support Rev 1.0 so just do nothing in that scenario.
3088 		 */
3089 		if (rev == PD_REV10) {
3090 			if (port->ams == GET_SOURCE_CAPABILITIES)
3091 				tcpm_ams_finish(port);
3092 			break;
3093 		}
3094 
3095 		if (rev < PD_MAX_REV) {
3096 			port->negotiated_rev = rev;
3097 			if (port->negotiated_rev_prime > port->negotiated_rev)
3098 				port->negotiated_rev_prime = port->negotiated_rev;
3099 		}
3100 
3101 		if (port->pwr_role == TYPEC_SOURCE) {
3102 			if (port->ams == GET_SOURCE_CAPABILITIES)
3103 				tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3104 			/* Unexpected Source Capabilities */
3105 			else
3106 				tcpm_pd_handle_msg(port,
3107 						   port->negotiated_rev < PD_REV30 ?
3108 						   PD_MSG_CTRL_REJECT :
3109 						   PD_MSG_CTRL_NOT_SUPP,
3110 						   NONE_AMS);
3111 		} else if (port->state == SNK_WAIT_CAPABILITIES) {
3112 		/*
3113 		 * This message may be received even if VBUS is not
3114 		 * present. This is quite unexpected; see USB PD
3115 		 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3116 		 * However, at the same time, we must be ready to
3117 		 * receive this message and respond to it 15ms after
3118 		 * receiving PS_RDY during power swap operations, no matter
3119 		 * if VBUS is available or not (USB PD specification,
3120 		 * section 6.5.9.2).
3121 		 * So we need to accept the message either way,
3122 		 * but be prepared to keep waiting for VBUS after it was
3123 		 * handled.
3124 		 */
3125 			port->ams = POWER_NEGOTIATION;
3126 			port->in_ams = true;
3127 			tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3128 		} else {
3129 			if (port->ams == GET_SOURCE_CAPABILITIES)
3130 				tcpm_ams_finish(port);
3131 			tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3132 					     POWER_NEGOTIATION, 0);
3133 		}
3134 		break;
3135 	case PD_DATA_REQUEST:
3136 		/*
3137 		 * Adjust revision in subsequent message headers, as required,
3138 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3139 		 * support Rev 1.0 so just reject in that scenario.
3140 		 */
3141 		if (rev == PD_REV10) {
3142 			tcpm_pd_handle_msg(port,
3143 					   port->negotiated_rev < PD_REV30 ?
3144 					   PD_MSG_CTRL_REJECT :
3145 					   PD_MSG_CTRL_NOT_SUPP,
3146 					   NONE_AMS);
3147 			break;
3148 		}
3149 
3150 		if (rev < PD_MAX_REV) {
3151 			port->negotiated_rev = rev;
3152 			if (port->negotiated_rev_prime > port->negotiated_rev)
3153 				port->negotiated_rev_prime = port->negotiated_rev;
3154 		}
3155 
3156 		if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3157 			tcpm_pd_handle_msg(port,
3158 					   port->negotiated_rev < PD_REV30 ?
3159 					   PD_MSG_CTRL_REJECT :
3160 					   PD_MSG_CTRL_NOT_SUPP,
3161 					   NONE_AMS);
3162 			break;
3163 		}
3164 
3165 		port->sink_request = le32_to_cpu(msg->payload[0]);
3166 
3167 		if (port->vdm_sm_running && port->explicit_contract) {
3168 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3169 			break;
3170 		}
3171 
3172 		if (port->state == SRC_SEND_CAPABILITIES)
3173 			tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3174 		else
3175 			tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3176 					     POWER_NEGOTIATION, 0);
3177 		break;
3178 	case PD_DATA_SINK_CAP:
3179 		/* We don't do anything with this at the moment... */
3180 		for (i = 0; i < cnt; i++)
3181 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3182 
3183 		partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3184 			PDO_FIXED_FRS_CURR_SHIFT;
3185 		frs_enable = partner_frs_current && (partner_frs_current <=
3186 						     port->new_source_frs_current);
3187 		tcpm_log(port,
3188 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3189 			 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3190 		if (frs_enable) {
3191 			ret  = port->tcpc->enable_frs(port->tcpc, true);
3192 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3193 		}
3194 
3195 		port->nr_sink_caps = cnt;
3196 		port->sink_cap_done = true;
3197 		tcpm_register_sink_caps(port);
3198 
3199 		if (port->ams == GET_SINK_CAPABILITIES)
3200 			tcpm_set_state(port, ready_state(port), 0);
3201 		/* Unexpected Sink Capabilities */
3202 		else
3203 			tcpm_pd_handle_msg(port,
3204 					   port->negotiated_rev < PD_REV30 ?
3205 					   PD_MSG_CTRL_REJECT :
3206 					   PD_MSG_CTRL_NOT_SUPP,
3207 					   NONE_AMS);
3208 		break;
3209 	case PD_DATA_VENDOR_DEF:
3210 		tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3211 		break;
3212 	case PD_DATA_BIST:
3213 		port->bist_request = le32_to_cpu(msg->payload[0]);
3214 		tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3215 		break;
3216 	case PD_DATA_ALERT:
3217 		if (port->state != SRC_READY && port->state != SNK_READY)
3218 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3219 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3220 					     NONE_AMS, 0);
3221 		else
3222 			tcpm_handle_alert(port, msg->payload, cnt);
3223 		break;
3224 	case PD_DATA_BATT_STATUS:
3225 	case PD_DATA_GET_COUNTRY_INFO:
3226 		/* Currently unsupported */
3227 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3228 				   PD_MSG_CTRL_REJECT :
3229 				   PD_MSG_CTRL_NOT_SUPP,
3230 				   NONE_AMS);
3231 		break;
3232 	default:
3233 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3234 				   PD_MSG_CTRL_REJECT :
3235 				   PD_MSG_CTRL_NOT_SUPP,
3236 				   NONE_AMS);
3237 		tcpm_log(port, "Unrecognized data message type %#x", type);
3238 		break;
3239 	}
3240 }
3241 
tcpm_pps_complete(struct tcpm_port * port,int result)3242 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3243 {
3244 	if (port->pps_pending) {
3245 		port->pps_status = result;
3246 		port->pps_pending = false;
3247 		complete(&port->pps_complete);
3248 	}
3249 }
3250 
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3251 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3252 				 const struct pd_message *msg,
3253 				 enum tcpm_transmit_type rx_sop_type)
3254 {
3255 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3256 	enum tcpm_state next_state;
3257 	unsigned int rev = pd_header_rev_le(msg->header);
3258 
3259 	/*
3260 	 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3261 	 * VDM AMS if waiting for VDM responses and will be handled later.
3262 	 */
3263 	if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3264 		port->vdm_state = VDM_STATE_ERR_BUSY;
3265 		tcpm_ams_finish(port);
3266 		mod_vdm_delayed_work(port, 0);
3267 	}
3268 
3269 	switch (type) {
3270 	case PD_CTRL_GOOD_CRC:
3271 	case PD_CTRL_PING:
3272 		break;
3273 	case PD_CTRL_GET_SOURCE_CAP:
3274 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3275 		break;
3276 	case PD_CTRL_GET_SINK_CAP:
3277 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3278 		break;
3279 	case PD_CTRL_GOTO_MIN:
3280 		break;
3281 	case PD_CTRL_PS_RDY:
3282 		switch (port->state) {
3283 		case SNK_TRANSITION_SINK:
3284 			if (port->vbus_present) {
3285 				tcpm_set_current_limit(port,
3286 						       port->req_current_limit,
3287 						       port->req_supply_voltage);
3288 				port->explicit_contract = true;
3289 				tcpm_set_auto_vbus_discharge_threshold(port,
3290 								       TYPEC_PWR_MODE_PD,
3291 								       port->pps_data.active,
3292 								       port->supply_voltage);
3293 				tcpm_set_state(port, SNK_READY, 0);
3294 			} else {
3295 				/*
3296 				 * Seen after power swap. Keep waiting for VBUS
3297 				 * in a transitional state.
3298 				 */
3299 				tcpm_set_state(port,
3300 					       SNK_TRANSITION_SINK_VBUS, 0);
3301 			}
3302 			break;
3303 		case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3304 			tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3305 			break;
3306 		case PR_SWAP_SNK_SRC_SINK_OFF:
3307 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3308 			break;
3309 		case VCONN_SWAP_WAIT_FOR_VCONN:
3310 			tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3311 			break;
3312 		case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3313 			tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3314 			break;
3315 		default:
3316 			tcpm_pd_handle_state(port,
3317 					     port->pwr_role == TYPEC_SOURCE ?
3318 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3319 					     SNK_SOFT_RESET,
3320 					     NONE_AMS, 0);
3321 			break;
3322 		}
3323 		break;
3324 	case PD_CTRL_REJECT:
3325 	case PD_CTRL_WAIT:
3326 	case PD_CTRL_NOT_SUPP:
3327 		switch (port->state) {
3328 		case SNK_NEGOTIATE_CAPABILITIES:
3329 			/* USB PD specification, Figure 8-43 */
3330 			if (port->explicit_contract)
3331 				next_state = SNK_READY;
3332 			else
3333 				next_state = SNK_WAIT_CAPABILITIES;
3334 
3335 			/* Threshold was relaxed before sending Request. Restore it back. */
3336 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3337 							       port->pps_data.active,
3338 							       port->supply_voltage);
3339 			tcpm_set_state(port, next_state, 0);
3340 			break;
3341 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3342 			/* Revert data back from any requested PPS updates */
3343 			port->pps_data.req_out_volt = port->supply_voltage;
3344 			port->pps_data.req_op_curr = port->current_limit;
3345 			port->pps_status = (type == PD_CTRL_WAIT ?
3346 					    -EAGAIN : -EOPNOTSUPP);
3347 
3348 			/* Threshold was relaxed before sending Request. Restore it back. */
3349 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3350 							       port->pps_data.active,
3351 							       port->supply_voltage);
3352 
3353 			tcpm_set_state(port, SNK_READY, 0);
3354 			break;
3355 		case DR_SWAP_SEND:
3356 			port->swap_status = (type == PD_CTRL_WAIT ?
3357 					     -EAGAIN : -EOPNOTSUPP);
3358 			tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3359 			break;
3360 		case PR_SWAP_SEND:
3361 			port->swap_status = (type == PD_CTRL_WAIT ?
3362 					     -EAGAIN : -EOPNOTSUPP);
3363 			tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3364 			break;
3365 		case VCONN_SWAP_SEND:
3366 			port->swap_status = (type == PD_CTRL_WAIT ?
3367 					     -EAGAIN : -EOPNOTSUPP);
3368 			tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3369 			break;
3370 		case FR_SWAP_SEND:
3371 			tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3372 			break;
3373 		case GET_SINK_CAP:
3374 			port->sink_cap_done = true;
3375 			tcpm_set_state(port, ready_state(port), 0);
3376 			break;
3377 		/*
3378 		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3379 		 * prevent redundant power re-negotiation
3380 		 */
3381 		case GET_STATUS_SEND:
3382 			tcpm_set_state(port, ready_state(port), 0);
3383 			break;
3384 		case SRC_READY:
3385 		case SNK_READY:
3386 			if (port->vdm_state > VDM_STATE_READY) {
3387 				port->vdm_state = VDM_STATE_DONE;
3388 				if (tcpm_vdm_ams(port))
3389 					tcpm_ams_finish(port);
3390 				mod_vdm_delayed_work(port, 0);
3391 				break;
3392 			}
3393 			fallthrough;
3394 		default:
3395 			tcpm_pd_handle_state(port,
3396 					     port->pwr_role == TYPEC_SOURCE ?
3397 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3398 					     SNK_SOFT_RESET,
3399 					     NONE_AMS, 0);
3400 			break;
3401 		}
3402 		break;
3403 	case PD_CTRL_ACCEPT:
3404 		switch (port->state) {
3405 		case SNK_NEGOTIATE_CAPABILITIES:
3406 			port->pps_data.active = false;
3407 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3408 			break;
3409 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3410 			port->pps_data.active = true;
3411 			port->pps_data.min_volt = port->pps_data.req_min_volt;
3412 			port->pps_data.max_volt = port->pps_data.req_max_volt;
3413 			port->pps_data.max_curr = port->pps_data.req_max_curr;
3414 			port->req_supply_voltage = port->pps_data.req_out_volt;
3415 			port->req_current_limit = port->pps_data.req_op_curr;
3416 			power_supply_changed(port->psy);
3417 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3418 			break;
3419 		case SOFT_RESET_SEND:
3420 			if (port->ams == SOFT_RESET_AMS)
3421 				tcpm_ams_finish(port);
3422 			/*
3423 			 * SOP' Soft Reset is done after Vconn Swap,
3424 			 * which returns to ready state
3425 			 */
3426 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3427 				if (rev < port->negotiated_rev_prime)
3428 					port->negotiated_rev_prime = rev;
3429 				tcpm_set_state(port, ready_state(port), 0);
3430 				break;
3431 			}
3432 			if (port->pwr_role == TYPEC_SOURCE) {
3433 				port->upcoming_state = SRC_SEND_CAPABILITIES;
3434 				tcpm_ams_start(port, POWER_NEGOTIATION);
3435 			} else {
3436 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3437 			}
3438 			break;
3439 		case DR_SWAP_SEND:
3440 			tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3441 			break;
3442 		case PR_SWAP_SEND:
3443 			tcpm_set_state(port, PR_SWAP_START, 0);
3444 			break;
3445 		case VCONN_SWAP_SEND:
3446 			tcpm_set_state(port, VCONN_SWAP_START, 0);
3447 			break;
3448 		case FR_SWAP_SEND:
3449 			tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3450 			break;
3451 		default:
3452 			tcpm_pd_handle_state(port,
3453 					     port->pwr_role == TYPEC_SOURCE ?
3454 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3455 					     SNK_SOFT_RESET,
3456 					     NONE_AMS, 0);
3457 			break;
3458 		}
3459 		break;
3460 	case PD_CTRL_SOFT_RESET:
3461 		port->ams = SOFT_RESET_AMS;
3462 		tcpm_set_state(port, SOFT_RESET, 0);
3463 		break;
3464 	case PD_CTRL_DR_SWAP:
3465 		/*
3466 		 * XXX
3467 		 * 6.3.9: If an alternate mode is active, a request to swap
3468 		 * alternate modes shall trigger a port reset.
3469 		 */
3470 		if (port->typec_caps.data != TYPEC_PORT_DRD) {
3471 			tcpm_pd_handle_msg(port,
3472 					   port->negotiated_rev < PD_REV30 ?
3473 					   PD_MSG_CTRL_REJECT :
3474 					   PD_MSG_CTRL_NOT_SUPP,
3475 					   NONE_AMS);
3476 		} else {
3477 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3478 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3479 				break;
3480 			}
3481 
3482 			tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3483 		}
3484 		break;
3485 	case PD_CTRL_PR_SWAP:
3486 		if (port->port_type != TYPEC_PORT_DRP) {
3487 			tcpm_pd_handle_msg(port,
3488 					   port->negotiated_rev < PD_REV30 ?
3489 					   PD_MSG_CTRL_REJECT :
3490 					   PD_MSG_CTRL_NOT_SUPP,
3491 					   NONE_AMS);
3492 		} else {
3493 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3494 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3495 				break;
3496 			}
3497 
3498 			tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3499 		}
3500 		break;
3501 	case PD_CTRL_VCONN_SWAP:
3502 		if (port->send_discover && port->negotiated_rev < PD_REV30) {
3503 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3504 			break;
3505 		}
3506 
3507 		tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3508 		break;
3509 	case PD_CTRL_GET_SOURCE_CAP_EXT:
3510 	case PD_CTRL_GET_STATUS:
3511 	case PD_CTRL_FR_SWAP:
3512 	case PD_CTRL_GET_PPS_STATUS:
3513 	case PD_CTRL_GET_COUNTRY_CODES:
3514 		/* Currently not supported */
3515 		tcpm_pd_handle_msg(port,
3516 				   port->negotiated_rev < PD_REV30 ?
3517 				   PD_MSG_CTRL_REJECT :
3518 				   PD_MSG_CTRL_NOT_SUPP,
3519 				   NONE_AMS);
3520 		break;
3521 	default:
3522 		tcpm_pd_handle_msg(port,
3523 				   port->negotiated_rev < PD_REV30 ?
3524 				   PD_MSG_CTRL_REJECT :
3525 				   PD_MSG_CTRL_NOT_SUPP,
3526 				   NONE_AMS);
3527 		tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3528 		break;
3529 	}
3530 }
3531 
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)3532 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3533 				    const struct pd_message *msg)
3534 {
3535 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3536 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3537 
3538 	/* stopping VDM state machine if interrupted by other Messages */
3539 	if (tcpm_vdm_ams(port)) {
3540 		port->vdm_state = VDM_STATE_ERR_BUSY;
3541 		tcpm_ams_finish(port);
3542 		mod_vdm_delayed_work(port, 0);
3543 	}
3544 
3545 	if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3546 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3547 		tcpm_log(port, "Unchunked extended messages unsupported");
3548 		return;
3549 	}
3550 
3551 	if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3552 		tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3553 		tcpm_log(port, "Chunk handling not yet supported");
3554 		return;
3555 	}
3556 
3557 	switch (type) {
3558 	case PD_EXT_STATUS:
3559 	case PD_EXT_PPS_STATUS:
3560 		if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3561 			tcpm_ams_finish(port);
3562 			tcpm_set_state(port, ready_state(port), 0);
3563 		} else {
3564 			/* unexpected Status or PPS_Status Message */
3565 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3566 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3567 					     NONE_AMS, 0);
3568 		}
3569 		break;
3570 	case PD_EXT_SOURCE_CAP_EXT:
3571 	case PD_EXT_GET_BATT_CAP:
3572 	case PD_EXT_GET_BATT_STATUS:
3573 	case PD_EXT_BATT_CAP:
3574 	case PD_EXT_GET_MANUFACTURER_INFO:
3575 	case PD_EXT_MANUFACTURER_INFO:
3576 	case PD_EXT_SECURITY_REQUEST:
3577 	case PD_EXT_SECURITY_RESPONSE:
3578 	case PD_EXT_FW_UPDATE_REQUEST:
3579 	case PD_EXT_FW_UPDATE_RESPONSE:
3580 	case PD_EXT_COUNTRY_INFO:
3581 	case PD_EXT_COUNTRY_CODES:
3582 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3583 		break;
3584 	default:
3585 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3586 		tcpm_log(port, "Unrecognized extended message type %#x", type);
3587 		break;
3588 	}
3589 }
3590 
tcpm_pd_rx_handler(struct kthread_work * work)3591 static void tcpm_pd_rx_handler(struct kthread_work *work)
3592 {
3593 	struct pd_rx_event *event = container_of(work,
3594 						 struct pd_rx_event, work);
3595 	const struct pd_message *msg = &event->msg;
3596 	unsigned int cnt = pd_header_cnt_le(msg->header);
3597 	struct tcpm_port *port = event->port;
3598 	enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3599 
3600 	mutex_lock(&port->lock);
3601 
3602 	tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3603 		 port->attached);
3604 
3605 	if (port->attached) {
3606 		enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3607 		unsigned int msgid = pd_header_msgid_le(msg->header);
3608 
3609 		/*
3610 		 * Drop SOP' messages if cannot receive via
3611 		 * tcpm_can_communicate_sop_prime
3612 		 */
3613 		if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3614 		    !tcpm_can_communicate_sop_prime(port))
3615 			goto done;
3616 
3617 		/*
3618 		 * USB PD standard, 6.6.1.2:
3619 		 * "... if MessageID value in a received Message is the
3620 		 * same as the stored value, the receiver shall return a
3621 		 * GoodCRC Message with that MessageID value and drop
3622 		 * the Message (this is a retry of an already received
3623 		 * Message). Note: this shall not apply to the Soft_Reset
3624 		 * Message which always has a MessageID value of zero."
3625 		 */
3626 		switch (rx_sop_type) {
3627 		case TCPC_TX_SOP_PRIME:
3628 			if (msgid == port->rx_msgid_prime)
3629 				goto done;
3630 			port->rx_msgid_prime = msgid;
3631 			break;
3632 		case TCPC_TX_SOP:
3633 		default:
3634 			if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3635 				goto done;
3636 			port->rx_msgid = msgid;
3637 			break;
3638 		}
3639 
3640 		/*
3641 		 * If both ends believe to be DFP/host, we have a data role
3642 		 * mismatch.
3643 		 */
3644 		if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3645 		    (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3646 			tcpm_log(port,
3647 				 "Data role mismatch, initiating error recovery");
3648 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3649 		} else {
3650 			if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3651 				tcpm_pd_ext_msg_request(port, msg);
3652 			else if (cnt)
3653 				tcpm_pd_data_request(port, msg, rx_sop_type);
3654 			else
3655 				tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3656 		}
3657 	}
3658 
3659 done:
3660 	mutex_unlock(&port->lock);
3661 	kfree(event);
3662 }
3663 
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3664 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3665 		     enum tcpm_transmit_type rx_sop_type)
3666 {
3667 	struct pd_rx_event *event;
3668 
3669 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
3670 	if (!event)
3671 		return;
3672 
3673 	kthread_init_work(&event->work, tcpm_pd_rx_handler);
3674 	event->port = port;
3675 	event->rx_sop_type = rx_sop_type;
3676 	memcpy(&event->msg, msg, sizeof(*msg));
3677 	kthread_queue_work(port->wq, &event->work);
3678 }
3679 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3680 
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type,enum tcpm_transmit_type tx_sop_type)3681 static int tcpm_pd_send_control(struct tcpm_port *port,
3682 				enum pd_ctrl_msg_type type,
3683 				enum tcpm_transmit_type tx_sop_type)
3684 {
3685 	struct pd_message msg;
3686 
3687 	memset(&msg, 0, sizeof(msg));
3688 	switch (tx_sop_type) {
3689 	case TCPC_TX_SOP_PRIME:
3690 		msg.header = PD_HEADER_LE(type,
3691 					  0,	/* Cable Plug Indicator for DFP/UFP */
3692 					  0,	/* Reserved */
3693 					  port->negotiated_rev,
3694 					  port->message_id_prime,
3695 					  0);
3696 		break;
3697 	case TCPC_TX_SOP:
3698 		msg.header = PD_HEADER_LE(type,
3699 					  port->pwr_role,
3700 					  port->data_role,
3701 					  port->negotiated_rev,
3702 					  port->message_id,
3703 					  0);
3704 		break;
3705 	default:
3706 		msg.header = PD_HEADER_LE(type,
3707 					  port->pwr_role,
3708 					  port->data_role,
3709 					  port->negotiated_rev,
3710 					  port->message_id,
3711 					  0);
3712 		break;
3713 	}
3714 
3715 	return tcpm_pd_transmit(port, tx_sop_type, &msg);
3716 }
3717 
3718 /*
3719  * Send queued message without affecting state.
3720  * Return true if state machine should go back to sleep,
3721  * false otherwise.
3722  */
tcpm_send_queued_message(struct tcpm_port * port)3723 static bool tcpm_send_queued_message(struct tcpm_port *port)
3724 {
3725 	enum pd_msg_request queued_message;
3726 	int ret;
3727 
3728 	do {
3729 		queued_message = port->queued_message;
3730 		port->queued_message = PD_MSG_NONE;
3731 
3732 		switch (queued_message) {
3733 		case PD_MSG_CTRL_WAIT:
3734 			tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3735 			break;
3736 		case PD_MSG_CTRL_REJECT:
3737 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3738 			break;
3739 		case PD_MSG_CTRL_NOT_SUPP:
3740 			tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3741 			break;
3742 		case PD_MSG_DATA_SINK_CAP:
3743 			ret = tcpm_pd_send_sink_caps(port);
3744 			if (ret < 0) {
3745 				tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3746 				tcpm_set_state(port, SNK_SOFT_RESET, 0);
3747 			}
3748 			tcpm_ams_finish(port);
3749 			break;
3750 		case PD_MSG_DATA_SOURCE_CAP:
3751 			ret = tcpm_pd_send_source_caps(port);
3752 			if (ret < 0) {
3753 				tcpm_log(port,
3754 					 "Unable to send src caps, ret=%d",
3755 					 ret);
3756 				tcpm_set_state(port, SOFT_RESET_SEND, 0);
3757 			} else if (port->pwr_role == TYPEC_SOURCE) {
3758 				tcpm_ams_finish(port);
3759 				tcpm_set_state(port, HARD_RESET_SEND,
3760 					       PD_T_SENDER_RESPONSE);
3761 			} else {
3762 				tcpm_ams_finish(port);
3763 			}
3764 			break;
3765 		default:
3766 			break;
3767 		}
3768 	} while (port->queued_message != PD_MSG_NONE);
3769 
3770 	if (port->delayed_state != INVALID_STATE) {
3771 		if (ktime_after(port->delayed_runtime, ktime_get())) {
3772 			mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3773 									  ktime_get())));
3774 			return true;
3775 		}
3776 		port->delayed_state = INVALID_STATE;
3777 	}
3778 	return false;
3779 }
3780 
tcpm_pd_check_request(struct tcpm_port * port)3781 static int tcpm_pd_check_request(struct tcpm_port *port)
3782 {
3783 	u32 pdo, rdo = port->sink_request;
3784 	unsigned int max, op, pdo_max, index;
3785 	enum pd_pdo_type type;
3786 
3787 	index = rdo_index(rdo);
3788 	if (!index || index > port->nr_src_pdo)
3789 		return -EINVAL;
3790 
3791 	pdo = port->src_pdo[index - 1];
3792 	type = pdo_type(pdo);
3793 	switch (type) {
3794 	case PDO_TYPE_FIXED:
3795 	case PDO_TYPE_VAR:
3796 		max = rdo_max_current(rdo);
3797 		op = rdo_op_current(rdo);
3798 		pdo_max = pdo_max_current(pdo);
3799 
3800 		if (op > pdo_max)
3801 			return -EINVAL;
3802 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3803 			return -EINVAL;
3804 
3805 		if (type == PDO_TYPE_FIXED)
3806 			tcpm_log(port,
3807 				 "Requested %u mV, %u mA for %u / %u mA",
3808 				 pdo_fixed_voltage(pdo), pdo_max, op, max);
3809 		else
3810 			tcpm_log(port,
3811 				 "Requested %u -> %u mV, %u mA for %u / %u mA",
3812 				 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3813 				 pdo_max, op, max);
3814 		break;
3815 	case PDO_TYPE_BATT:
3816 		max = rdo_max_power(rdo);
3817 		op = rdo_op_power(rdo);
3818 		pdo_max = pdo_max_power(pdo);
3819 
3820 		if (op > pdo_max)
3821 			return -EINVAL;
3822 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3823 			return -EINVAL;
3824 		tcpm_log(port,
3825 			 "Requested %u -> %u mV, %u mW for %u / %u mW",
3826 			 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3827 			 pdo_max, op, max);
3828 		break;
3829 	default:
3830 		return -EINVAL;
3831 	}
3832 
3833 	port->op_vsafe5v = index == 1;
3834 
3835 	return 0;
3836 }
3837 
3838 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3839 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3840 
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3841 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3842 			      int *src_pdo)
3843 {
3844 	unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3845 		     max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3846 		     min_snk_mv = 0;
3847 	int ret = -EINVAL;
3848 
3849 	port->pps_data.supported = false;
3850 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3851 	power_supply_changed(port->psy);
3852 
3853 	/*
3854 	 * Select the source PDO providing the most power which has a
3855 	 * matchig sink cap.
3856 	 */
3857 	for (i = 0; i < port->nr_source_caps; i++) {
3858 		u32 pdo = port->source_caps[i];
3859 		enum pd_pdo_type type = pdo_type(pdo);
3860 
3861 		switch (type) {
3862 		case PDO_TYPE_FIXED:
3863 			max_src_mv = pdo_fixed_voltage(pdo);
3864 			min_src_mv = max_src_mv;
3865 			break;
3866 		case PDO_TYPE_BATT:
3867 		case PDO_TYPE_VAR:
3868 			max_src_mv = pdo_max_voltage(pdo);
3869 			min_src_mv = pdo_min_voltage(pdo);
3870 			break;
3871 		case PDO_TYPE_APDO:
3872 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3873 				port->pps_data.supported = true;
3874 				port->usb_type =
3875 					POWER_SUPPLY_USB_TYPE_PD_PPS;
3876 				power_supply_changed(port->psy);
3877 			}
3878 			continue;
3879 		default:
3880 			tcpm_log(port, "Invalid source PDO type, ignoring");
3881 			continue;
3882 		}
3883 
3884 		switch (type) {
3885 		case PDO_TYPE_FIXED:
3886 		case PDO_TYPE_VAR:
3887 			src_ma = pdo_max_current(pdo);
3888 			src_mw = src_ma * min_src_mv / 1000;
3889 			break;
3890 		case PDO_TYPE_BATT:
3891 			src_mw = pdo_max_power(pdo);
3892 			break;
3893 		case PDO_TYPE_APDO:
3894 			continue;
3895 		default:
3896 			tcpm_log(port, "Invalid source PDO type, ignoring");
3897 			continue;
3898 		}
3899 
3900 		for (j = 0; j < port->nr_snk_pdo; j++) {
3901 			pdo = port->snk_pdo[j];
3902 
3903 			switch (pdo_type(pdo)) {
3904 			case PDO_TYPE_FIXED:
3905 				max_snk_mv = pdo_fixed_voltage(pdo);
3906 				min_snk_mv = max_snk_mv;
3907 				break;
3908 			case PDO_TYPE_BATT:
3909 			case PDO_TYPE_VAR:
3910 				max_snk_mv = pdo_max_voltage(pdo);
3911 				min_snk_mv = pdo_min_voltage(pdo);
3912 				break;
3913 			case PDO_TYPE_APDO:
3914 				continue;
3915 			default:
3916 				tcpm_log(port, "Invalid sink PDO type, ignoring");
3917 				continue;
3918 			}
3919 
3920 			if (max_src_mv <= max_snk_mv &&
3921 				min_src_mv >= min_snk_mv) {
3922 				/* Prefer higher voltages if available */
3923 				if ((src_mw == max_mw && min_src_mv > max_mv) ||
3924 							src_mw > max_mw) {
3925 					*src_pdo = i;
3926 					*sink_pdo = j;
3927 					max_mw = src_mw;
3928 					max_mv = min_src_mv;
3929 					ret = 0;
3930 				}
3931 			}
3932 		}
3933 	}
3934 
3935 	return ret;
3936 }
3937 
tcpm_pd_select_pps_apdo(struct tcpm_port * port)3938 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3939 {
3940 	unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
3941 	unsigned int src_pdo = 0;
3942 	u32 pdo, src;
3943 
3944 	for (i = 1; i < port->nr_source_caps; ++i) {
3945 		pdo = port->source_caps[i];
3946 
3947 		switch (pdo_type(pdo)) {
3948 		case PDO_TYPE_APDO:
3949 			if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3950 				tcpm_log(port, "Not PPS APDO (source), ignoring");
3951 				continue;
3952 			}
3953 
3954 			if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
3955 			    port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
3956 				continue;
3957 
3958 			src_ma = pdo_pps_apdo_max_current(pdo);
3959 			max_op_ma = min(src_ma, port->pps_data.req_op_curr);
3960 			op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
3961 			if (op_mw > max_temp_mw) {
3962 				src_pdo = i;
3963 				max_temp_mw = op_mw;
3964 			}
3965 			break;
3966 		default:
3967 			tcpm_log(port, "Not APDO type (source), ignoring");
3968 			continue;
3969 		}
3970 	}
3971 
3972 	if (src_pdo) {
3973 		src = port->source_caps[src_pdo];
3974 
3975 		port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
3976 		port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
3977 		port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
3978 		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3979 						 port->pps_data.req_op_curr);
3980 	}
3981 
3982 	return src_pdo;
3983 }
3984 
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)3985 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
3986 {
3987 	unsigned int mv, ma, mw, flags;
3988 	unsigned int max_ma, max_mw;
3989 	enum pd_pdo_type type;
3990 	u32 pdo, matching_snk_pdo;
3991 	int src_pdo_index = 0;
3992 	int snk_pdo_index = 0;
3993 	int ret;
3994 
3995 	ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
3996 	if (ret < 0)
3997 		return ret;
3998 
3999 	pdo = port->source_caps[src_pdo_index];
4000 	matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4001 	type = pdo_type(pdo);
4002 
4003 	switch (type) {
4004 	case PDO_TYPE_FIXED:
4005 		mv = pdo_fixed_voltage(pdo);
4006 		break;
4007 	case PDO_TYPE_BATT:
4008 	case PDO_TYPE_VAR:
4009 		mv = pdo_min_voltage(pdo);
4010 		break;
4011 	default:
4012 		tcpm_log(port, "Invalid PDO selected!");
4013 		return -EINVAL;
4014 	}
4015 
4016 	/* Select maximum available current within the sink pdo's limit */
4017 	if (type == PDO_TYPE_BATT) {
4018 		mw = min_power(pdo, matching_snk_pdo);
4019 		ma = 1000 * mw / mv;
4020 	} else {
4021 		ma = min_current(pdo, matching_snk_pdo);
4022 		mw = ma * mv / 1000;
4023 	}
4024 
4025 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4026 
4027 	/* Set mismatch bit if offered power is less than operating power */
4028 	max_ma = ma;
4029 	max_mw = mw;
4030 	if (mw < port->operating_snk_mw) {
4031 		flags |= RDO_CAP_MISMATCH;
4032 		if (type == PDO_TYPE_BATT &&
4033 		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4034 			max_mw = pdo_max_power(matching_snk_pdo);
4035 		else if (pdo_max_current(matching_snk_pdo) >
4036 			 pdo_max_current(pdo))
4037 			max_ma = pdo_max_current(matching_snk_pdo);
4038 	}
4039 
4040 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4041 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4042 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4043 		 port->polarity);
4044 
4045 	if (type == PDO_TYPE_BATT) {
4046 		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4047 
4048 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4049 			 src_pdo_index, mv, mw,
4050 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4051 	} else {
4052 		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4053 
4054 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4055 			 src_pdo_index, mv, ma,
4056 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4057 	}
4058 
4059 	port->req_current_limit = ma;
4060 	port->req_supply_voltage = mv;
4061 
4062 	return 0;
4063 }
4064 
tcpm_pd_send_request(struct tcpm_port * port)4065 static int tcpm_pd_send_request(struct tcpm_port *port)
4066 {
4067 	struct pd_message msg;
4068 	int ret;
4069 	u32 rdo;
4070 
4071 	ret = tcpm_pd_build_request(port, &rdo);
4072 	if (ret < 0)
4073 		return ret;
4074 
4075 	/*
4076 	 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4077 	 * It is safer to modify the threshold here.
4078 	 */
4079 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4080 
4081 	memset(&msg, 0, sizeof(msg));
4082 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4083 				  port->pwr_role,
4084 				  port->data_role,
4085 				  port->negotiated_rev,
4086 				  port->message_id, 1);
4087 	msg.payload[0] = cpu_to_le32(rdo);
4088 
4089 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4090 }
4091 
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)4092 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4093 {
4094 	unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4095 	unsigned int src_pdo_index;
4096 
4097 	src_pdo_index = tcpm_pd_select_pps_apdo(port);
4098 	if (!src_pdo_index)
4099 		return -EOPNOTSUPP;
4100 
4101 	max_mv = port->pps_data.req_max_volt;
4102 	max_ma = port->pps_data.req_max_curr;
4103 	out_mv = port->pps_data.req_out_volt;
4104 	op_ma = port->pps_data.req_op_curr;
4105 
4106 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4107 
4108 	op_mw = (op_ma * out_mv) / 1000;
4109 	if (op_mw < port->operating_snk_mw) {
4110 		/*
4111 		 * Try raising current to meet power needs. If that's not enough
4112 		 * then try upping the voltage. If that's still not enough
4113 		 * then we've obviously chosen a PPS APDO which really isn't
4114 		 * suitable so abandon ship.
4115 		 */
4116 		op_ma = (port->operating_snk_mw * 1000) / out_mv;
4117 		if ((port->operating_snk_mw * 1000) % out_mv)
4118 			++op_ma;
4119 		op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4120 
4121 		if (op_ma > max_ma) {
4122 			op_ma = max_ma;
4123 			out_mv = (port->operating_snk_mw * 1000) / op_ma;
4124 			if ((port->operating_snk_mw * 1000) % op_ma)
4125 				++out_mv;
4126 			out_mv += RDO_PROG_VOLT_MV_STEP -
4127 				  (out_mv % RDO_PROG_VOLT_MV_STEP);
4128 
4129 			if (out_mv > max_mv) {
4130 				tcpm_log(port, "Invalid PPS APDO selected!");
4131 				return -EINVAL;
4132 			}
4133 		}
4134 	}
4135 
4136 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4137 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4138 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4139 		 port->polarity);
4140 
4141 	*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4142 
4143 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4144 		 src_pdo_index, out_mv, op_ma);
4145 
4146 	port->pps_data.req_op_curr = op_ma;
4147 	port->pps_data.req_out_volt = out_mv;
4148 
4149 	return 0;
4150 }
4151 
tcpm_pd_send_pps_request(struct tcpm_port * port)4152 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4153 {
4154 	struct pd_message msg;
4155 	int ret;
4156 	u32 rdo;
4157 
4158 	ret = tcpm_pd_build_pps_request(port, &rdo);
4159 	if (ret < 0)
4160 		return ret;
4161 
4162 	/* Relax the threshold as voltage will be adjusted right after Accept Message. */
4163 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4164 
4165 	memset(&msg, 0, sizeof(msg));
4166 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4167 				  port->pwr_role,
4168 				  port->data_role,
4169 				  port->negotiated_rev,
4170 				  port->message_id, 1);
4171 	msg.payload[0] = cpu_to_le32(rdo);
4172 
4173 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4174 }
4175 
tcpm_set_vbus(struct tcpm_port * port,bool enable)4176 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4177 {
4178 	int ret;
4179 
4180 	if (enable && port->vbus_charge)
4181 		return -EINVAL;
4182 
4183 	tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4184 
4185 	ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4186 	if (ret < 0)
4187 		return ret;
4188 
4189 	port->vbus_source = enable;
4190 	return 0;
4191 }
4192 
tcpm_set_charge(struct tcpm_port * port,bool charge)4193 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4194 {
4195 	int ret;
4196 
4197 	if (charge && port->vbus_source)
4198 		return -EINVAL;
4199 
4200 	if (charge != port->vbus_charge) {
4201 		tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4202 		ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4203 					   charge);
4204 		if (ret < 0)
4205 			return ret;
4206 	}
4207 	port->vbus_charge = charge;
4208 	power_supply_changed(port->psy);
4209 	return 0;
4210 }
4211 
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)4212 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4213 {
4214 	int ret;
4215 
4216 	if (!port->tcpc->start_toggling)
4217 		return false;
4218 
4219 	tcpm_log_force(port, "Start toggling");
4220 	ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4221 	return ret == 0;
4222 }
4223 
tcpm_init_vbus(struct tcpm_port * port)4224 static int tcpm_init_vbus(struct tcpm_port *port)
4225 {
4226 	int ret;
4227 
4228 	ret = port->tcpc->set_vbus(port->tcpc, false, false);
4229 	port->vbus_source = false;
4230 	port->vbus_charge = false;
4231 	return ret;
4232 }
4233 
tcpm_init_vconn(struct tcpm_port * port)4234 static int tcpm_init_vconn(struct tcpm_port *port)
4235 {
4236 	int ret;
4237 
4238 	ret = port->tcpc->set_vconn(port->tcpc, false);
4239 	port->vconn_role = TYPEC_SINK;
4240 	return ret;
4241 }
4242 
tcpm_typec_connect(struct tcpm_port * port)4243 static void tcpm_typec_connect(struct tcpm_port *port)
4244 {
4245 	struct typec_partner *partner;
4246 
4247 	if (!port->connected) {
4248 		port->connected = true;
4249 		/* Make sure we don't report stale identity information */
4250 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4251 		port->partner_desc.usb_pd = port->pd_capable;
4252 		if (tcpm_port_is_debug(port))
4253 			port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4254 		else if (tcpm_port_is_audio(port))
4255 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4256 		else
4257 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4258 		partner = typec_register_partner(port->typec_port, &port->partner_desc);
4259 		if (IS_ERR(partner)) {
4260 			dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4261 			return;
4262 		}
4263 
4264 		port->partner = partner;
4265 		typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4266 	}
4267 }
4268 
tcpm_src_attach(struct tcpm_port * port)4269 static int tcpm_src_attach(struct tcpm_port *port)
4270 {
4271 	enum typec_cc_polarity polarity =
4272 				port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4273 							 : TYPEC_POLARITY_CC1;
4274 	int ret;
4275 
4276 	if (port->attached)
4277 		return 0;
4278 
4279 	ret = tcpm_set_polarity(port, polarity);
4280 	if (ret < 0)
4281 		return ret;
4282 
4283 	tcpm_enable_auto_vbus_discharge(port, true);
4284 
4285 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
4286 	if (ret < 0)
4287 		return ret;
4288 
4289 	if (port->pd_supported) {
4290 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
4291 		if (ret < 0)
4292 			goto out_disable_mux;
4293 	}
4294 
4295 	/*
4296 	 * USB Type-C specification, version 1.2,
4297 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4298 	 * Enable VCONN only if the non-RD port is set to RA.
4299 	 */
4300 	if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4301 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4302 		ret = tcpm_set_vconn(port, true);
4303 		if (ret < 0)
4304 			goto out_disable_pd;
4305 	}
4306 
4307 	ret = tcpm_set_vbus(port, true);
4308 	if (ret < 0)
4309 		goto out_disable_vconn;
4310 
4311 	port->pd_capable = false;
4312 
4313 	port->partner = NULL;
4314 
4315 	port->attached = true;
4316 	port->send_discover = true;
4317 	port->send_discover_prime = false;
4318 
4319 	return 0;
4320 
4321 out_disable_vconn:
4322 	tcpm_set_vconn(port, false);
4323 out_disable_pd:
4324 	if (port->pd_supported)
4325 		port->tcpc->set_pd_rx(port->tcpc, false);
4326 out_disable_mux:
4327 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4328 		     TYPEC_ORIENTATION_NONE);
4329 	return ret;
4330 }
4331 
tcpm_typec_disconnect(struct tcpm_port * port)4332 static void tcpm_typec_disconnect(struct tcpm_port *port)
4333 {
4334 	/*
4335 	 * Unregister plug/cable outside of port->connected because cable can
4336 	 * be discovered before SRC_READY/SNK_READY states where port->connected
4337 	 * is set.
4338 	 */
4339 	typec_unregister_plug(port->plug_prime);
4340 	typec_unregister_cable(port->cable);
4341 	port->plug_prime = NULL;
4342 	port->cable = NULL;
4343 	if (port->connected) {
4344 		if (port->partner) {
4345 			typec_partner_set_usb_power_delivery(port->partner, NULL);
4346 			typec_unregister_partner(port->partner);
4347 			port->partner = NULL;
4348 		}
4349 		port->connected = false;
4350 	}
4351 }
4352 
tcpm_unregister_altmodes(struct tcpm_port * port)4353 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4354 {
4355 	struct pd_mode_data *modep = &port->mode_data;
4356 	struct pd_mode_data *modep_prime = &port->mode_data_prime;
4357 	int i;
4358 
4359 	for (i = 0; i < modep->altmodes; i++) {
4360 		typec_unregister_altmode(port->partner_altmode[i]);
4361 		port->partner_altmode[i] = NULL;
4362 	}
4363 	for (i = 0; i < modep_prime->altmodes; i++) {
4364 		typec_unregister_altmode(port->plug_prime_altmode[i]);
4365 		port->plug_prime_altmode[i] = NULL;
4366 	}
4367 
4368 	memset(modep, 0, sizeof(*modep));
4369 	memset(modep_prime, 0, sizeof(*modep_prime));
4370 }
4371 
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)4372 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4373 {
4374 	tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
4375 
4376 	if (port->tcpc->set_partner_usb_comm_capable)
4377 		port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4378 }
4379 
tcpm_reset_port(struct tcpm_port * port)4380 static void tcpm_reset_port(struct tcpm_port *port)
4381 {
4382 	tcpm_enable_auto_vbus_discharge(port, false);
4383 	port->in_ams = false;
4384 	port->ams = NONE_AMS;
4385 	port->vdm_sm_running = false;
4386 	tcpm_unregister_altmodes(port);
4387 	tcpm_typec_disconnect(port);
4388 	port->attached = false;
4389 	port->pd_capable = false;
4390 	port->pps_data.supported = false;
4391 	tcpm_set_partner_usb_comm_capable(port, false);
4392 
4393 	/*
4394 	 * First Rx ID should be 0; set this to a sentinel of -1 so that
4395 	 * we can check tcpm_pd_rx_handler() if we had seen it before.
4396 	 */
4397 	port->rx_msgid = -1;
4398 	port->rx_msgid_prime = -1;
4399 
4400 	port->tcpc->set_pd_rx(port->tcpc, false);
4401 	tcpm_init_vbus(port);	/* also disables charging */
4402 	tcpm_init_vconn(port);
4403 	tcpm_set_current_limit(port, 0, 0);
4404 	tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4405 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4406 		     TYPEC_ORIENTATION_NONE);
4407 	tcpm_set_attached_state(port, false);
4408 	port->try_src_count = 0;
4409 	port->try_snk_count = 0;
4410 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4411 	power_supply_changed(port->psy);
4412 	port->nr_sink_caps = 0;
4413 	port->sink_cap_done = false;
4414 	if (port->tcpc->enable_frs)
4415 		port->tcpc->enable_frs(port->tcpc, false);
4416 
4417 	usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4418 	port->partner_sink_caps = NULL;
4419 	usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4420 	port->partner_source_caps = NULL;
4421 	usb_power_delivery_unregister(port->partner_pd);
4422 	port->partner_pd = NULL;
4423 }
4424 
tcpm_detach(struct tcpm_port * port)4425 static void tcpm_detach(struct tcpm_port *port)
4426 {
4427 	if (tcpm_port_is_disconnected(port))
4428 		port->hard_reset_count = 0;
4429 
4430 	if (!port->attached)
4431 		return;
4432 
4433 	if (port->tcpc->set_bist_data) {
4434 		tcpm_log(port, "disable BIST MODE TESTDATA");
4435 		port->tcpc->set_bist_data(port->tcpc, false);
4436 	}
4437 
4438 	tcpm_reset_port(port);
4439 }
4440 
tcpm_src_detach(struct tcpm_port * port)4441 static void tcpm_src_detach(struct tcpm_port *port)
4442 {
4443 	tcpm_detach(port);
4444 }
4445 
tcpm_snk_attach(struct tcpm_port * port)4446 static int tcpm_snk_attach(struct tcpm_port *port)
4447 {
4448 	int ret;
4449 
4450 	if (port->attached)
4451 		return 0;
4452 
4453 	ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4454 				TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4455 	if (ret < 0)
4456 		return ret;
4457 
4458 	tcpm_enable_auto_vbus_discharge(port, true);
4459 
4460 	ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
4461 	if (ret < 0)
4462 		return ret;
4463 
4464 	port->pd_capable = false;
4465 
4466 	port->partner = NULL;
4467 
4468 	port->attached = true;
4469 	port->send_discover = true;
4470 	port->send_discover_prime = false;
4471 
4472 	return 0;
4473 }
4474 
tcpm_snk_detach(struct tcpm_port * port)4475 static void tcpm_snk_detach(struct tcpm_port *port)
4476 {
4477 	tcpm_detach(port);
4478 }
4479 
tcpm_acc_attach(struct tcpm_port * port)4480 static int tcpm_acc_attach(struct tcpm_port *port)
4481 {
4482 	int ret;
4483 
4484 	if (port->attached)
4485 		return 0;
4486 
4487 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
4488 			     tcpm_data_role_for_source(port));
4489 	if (ret < 0)
4490 		return ret;
4491 
4492 	port->partner = NULL;
4493 
4494 	tcpm_typec_connect(port);
4495 
4496 	port->attached = true;
4497 
4498 	return 0;
4499 }
4500 
tcpm_acc_detach(struct tcpm_port * port)4501 static void tcpm_acc_detach(struct tcpm_port *port)
4502 {
4503 	tcpm_detach(port);
4504 }
4505 
hard_reset_state(struct tcpm_port * port)4506 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4507 {
4508 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4509 		return HARD_RESET_SEND;
4510 	if (port->pd_capable)
4511 		return ERROR_RECOVERY;
4512 	if (port->pwr_role == TYPEC_SOURCE)
4513 		return SRC_UNATTACHED;
4514 	if (port->state == SNK_WAIT_CAPABILITIES)
4515 		return SNK_READY;
4516 	return SNK_UNATTACHED;
4517 }
4518 
unattached_state(struct tcpm_port * port)4519 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4520 {
4521 	if (port->port_type == TYPEC_PORT_DRP) {
4522 		if (port->pwr_role == TYPEC_SOURCE)
4523 			return SRC_UNATTACHED;
4524 		else
4525 			return SNK_UNATTACHED;
4526 	} else if (port->port_type == TYPEC_PORT_SRC) {
4527 		return SRC_UNATTACHED;
4528 	}
4529 
4530 	return SNK_UNATTACHED;
4531 }
4532 
tcpm_swap_complete(struct tcpm_port * port,int result)4533 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4534 {
4535 	if (port->swap_pending) {
4536 		port->swap_status = result;
4537 		port->swap_pending = false;
4538 		port->non_pd_role_swap = false;
4539 		complete(&port->swap_complete);
4540 	}
4541 }
4542 
tcpm_get_pwr_opmode(enum typec_cc_status cc)4543 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4544 {
4545 	switch (cc) {
4546 	case TYPEC_CC_RP_1_5:
4547 		return TYPEC_PWR_MODE_1_5A;
4548 	case TYPEC_CC_RP_3_0:
4549 		return TYPEC_PWR_MODE_3_0A;
4550 	case TYPEC_CC_RP_DEF:
4551 	default:
4552 		return TYPEC_PWR_MODE_USB;
4553 	}
4554 }
4555 
tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)4556 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4557 {
4558 	switch (opmode) {
4559 	case TYPEC_PWR_MODE_USB:
4560 		return TYPEC_CC_RP_DEF;
4561 	case TYPEC_PWR_MODE_1_5A:
4562 		return TYPEC_CC_RP_1_5;
4563 	case TYPEC_PWR_MODE_3_0A:
4564 	case TYPEC_PWR_MODE_PD:
4565 	default:
4566 		return TYPEC_CC_RP_3_0;
4567 	}
4568 }
4569 
tcpm_set_initial_svdm_version(struct tcpm_port * port)4570 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4571 {
4572 	if (!port->partner)
4573 		return;
4574 
4575 	switch (port->negotiated_rev) {
4576 	case PD_REV30:
4577 		break;
4578 	/*
4579 	 * 6.4.4.2.3 Structured VDM Version
4580 	 * 2.0 states "At this time, there is only one version (1.0) defined.
4581 	 * This field Shall be set to zero to indicate Version 1.0."
4582 	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4583 	 * To ensure that we follow the Power Delivery revision we are currently
4584 	 * operating on, downgrade the SVDM version to the highest one supported
4585 	 * by the Power Delivery revision.
4586 	 */
4587 	case PD_REV20:
4588 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4589 		break;
4590 	default:
4591 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4592 		break;
4593 	}
4594 }
4595 
run_state_machine(struct tcpm_port * port)4596 static void run_state_machine(struct tcpm_port *port)
4597 {
4598 	int ret;
4599 	enum typec_pwr_opmode opmode;
4600 	unsigned int msecs;
4601 	enum tcpm_state upcoming_state;
4602 
4603 	if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4604 		port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4605 						port->state == SRC_UNATTACHED) ||
4606 					       (port->enter_state == SNK_ATTACH_WAIT &&
4607 						port->state == SNK_UNATTACHED) ||
4608 					       (port->enter_state == SNK_DEBOUNCED &&
4609 						port->state == SNK_UNATTACHED));
4610 
4611 	port->enter_state = port->state;
4612 	switch (port->state) {
4613 	case TOGGLING:
4614 		break;
4615 	case CHECK_CONTAMINANT:
4616 		port->tcpc->check_contaminant(port->tcpc);
4617 		break;
4618 	/* SRC states */
4619 	case SRC_UNATTACHED:
4620 		if (!port->non_pd_role_swap)
4621 			tcpm_swap_complete(port, -ENOTCONN);
4622 		tcpm_src_detach(port);
4623 		if (port->potential_contaminant) {
4624 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4625 			break;
4626 		}
4627 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4628 			tcpm_set_state(port, TOGGLING, 0);
4629 			break;
4630 		}
4631 		tcpm_set_cc(port, tcpm_rp_cc(port));
4632 		if (port->port_type == TYPEC_PORT_DRP)
4633 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4634 		break;
4635 	case SRC_ATTACH_WAIT:
4636 		if (tcpm_port_is_debug(port))
4637 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4638 				       PD_T_CC_DEBOUNCE);
4639 		else if (tcpm_port_is_audio(port))
4640 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4641 				       PD_T_CC_DEBOUNCE);
4642 		else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4643 			tcpm_set_state(port,
4644 				       tcpm_try_snk(port) ? SNK_TRY
4645 							  : SRC_ATTACHED,
4646 				       PD_T_CC_DEBOUNCE);
4647 		break;
4648 
4649 	case SNK_TRY:
4650 		port->try_snk_count++;
4651 		/*
4652 		 * Requirements:
4653 		 * - Do not drive vconn or vbus
4654 		 * - Terminate CC pins (both) to Rd
4655 		 * Action:
4656 		 * - Wait for tDRPTry (PD_T_DRP_TRY).
4657 		 *   Until then, ignore any state changes.
4658 		 */
4659 		tcpm_set_cc(port, TYPEC_CC_RD);
4660 		tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4661 		break;
4662 	case SNK_TRY_WAIT:
4663 		if (tcpm_port_is_sink(port)) {
4664 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4665 		} else {
4666 			tcpm_set_state(port, SRC_TRYWAIT, 0);
4667 			port->max_wait = 0;
4668 		}
4669 		break;
4670 	case SNK_TRY_WAIT_DEBOUNCE:
4671 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4672 			       PD_T_TRY_CC_DEBOUNCE);
4673 		break;
4674 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4675 		if (port->vbus_present && tcpm_port_is_sink(port))
4676 			tcpm_set_state(port, SNK_ATTACHED, 0);
4677 		else
4678 			port->max_wait = 0;
4679 		break;
4680 	case SRC_TRYWAIT:
4681 		tcpm_set_cc(port, tcpm_rp_cc(port));
4682 		if (port->max_wait == 0) {
4683 			port->max_wait = jiffies +
4684 					 msecs_to_jiffies(PD_T_DRP_TRY);
4685 			tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4686 				       PD_T_DRP_TRY);
4687 		} else {
4688 			if (time_is_after_jiffies(port->max_wait))
4689 				tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4690 					       jiffies_to_msecs(port->max_wait -
4691 								jiffies));
4692 			else
4693 				tcpm_set_state(port, SNK_UNATTACHED, 0);
4694 		}
4695 		break;
4696 	case SRC_TRYWAIT_DEBOUNCE:
4697 		tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
4698 		break;
4699 	case SRC_TRYWAIT_UNATTACHED:
4700 		tcpm_set_state(port, SNK_UNATTACHED, 0);
4701 		break;
4702 
4703 	case SRC_ATTACHED:
4704 		ret = tcpm_src_attach(port);
4705 		tcpm_set_state(port, SRC_UNATTACHED,
4706 			       ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4707 		break;
4708 	case SRC_STARTUP:
4709 		opmode =  tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4710 		typec_set_pwr_opmode(port->typec_port, opmode);
4711 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
4712 		port->caps_count = 0;
4713 		port->negotiated_rev = PD_MAX_REV;
4714 		port->negotiated_rev_prime = PD_MAX_REV;
4715 		port->message_id = 0;
4716 		port->message_id_prime = 0;
4717 		port->rx_msgid = -1;
4718 		port->rx_msgid_prime = -1;
4719 		port->explicit_contract = false;
4720 		/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4721 		if (port->ams == POWER_ROLE_SWAP ||
4722 		    port->ams == FAST_ROLE_SWAP)
4723 			tcpm_ams_finish(port);
4724 		if (!port->pd_supported) {
4725 			tcpm_set_state(port, SRC_READY, 0);
4726 			break;
4727 		}
4728 		port->upcoming_state = SRC_SEND_CAPABILITIES;
4729 		tcpm_ams_start(port, POWER_NEGOTIATION);
4730 		break;
4731 	case SRC_SEND_CAPABILITIES:
4732 		port->caps_count++;
4733 		if (port->caps_count > PD_N_CAPS_COUNT) {
4734 			tcpm_set_state(port, SRC_READY, 0);
4735 			break;
4736 		}
4737 		ret = tcpm_pd_send_source_caps(port);
4738 		if (ret < 0) {
4739 			if (tcpm_can_communicate_sop_prime(port) &&
4740 			    IS_ERR_OR_NULL(port->cable))
4741 				tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4742 			else
4743 				tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4744 					       PD_T_SEND_SOURCE_CAP);
4745 		} else {
4746 			/*
4747 			 * Per standard, we should clear the reset counter here.
4748 			 * However, that can result in state machine hang-ups.
4749 			 * Reset it only in READY state to improve stability.
4750 			 */
4751 			/* port->hard_reset_count = 0; */
4752 			port->caps_count = 0;
4753 			port->pd_capable = true;
4754 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4755 					    PD_T_SEND_SOURCE_CAP);
4756 		}
4757 		break;
4758 	case SRC_SEND_CAPABILITIES_TIMEOUT:
4759 		/*
4760 		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4761 		 *
4762 		 * PD 2.0 sinks are supposed to accept src-capabilities with a
4763 		 * 3.0 header and simply ignore any src PDOs which the sink does
4764 		 * not understand such as PPS but some 2.0 sinks instead ignore
4765 		 * the entire PD_DATA_SOURCE_CAP message, causing contract
4766 		 * negotiation to fail.
4767 		 *
4768 		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4769 		 * sending src-capabilities with a lower PD revision to
4770 		 * make these broken sinks work.
4771 		 */
4772 		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4773 			tcpm_set_state(port, HARD_RESET_SEND, 0);
4774 		} else if (port->negotiated_rev > PD_REV20) {
4775 			port->negotiated_rev--;
4776 			port->hard_reset_count = 0;
4777 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4778 		} else {
4779 			tcpm_set_state(port, hard_reset_state(port), 0);
4780 		}
4781 		break;
4782 	case SRC_NEGOTIATE_CAPABILITIES:
4783 		ret = tcpm_pd_check_request(port);
4784 		if (ret < 0) {
4785 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4786 			if (!port->explicit_contract) {
4787 				tcpm_set_state(port,
4788 					       SRC_WAIT_NEW_CAPABILITIES, 0);
4789 			} else {
4790 				tcpm_set_state(port, SRC_READY, 0);
4791 			}
4792 		} else {
4793 			tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4794 			tcpm_set_partner_usb_comm_capable(port,
4795 							  !!(port->sink_request & RDO_USB_COMM));
4796 			tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4797 				       PD_T_SRC_TRANSITION);
4798 		}
4799 		break;
4800 	case SRC_TRANSITION_SUPPLY:
4801 		/* XXX: regulator_set_voltage(vbus, ...) */
4802 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4803 		port->explicit_contract = true;
4804 		typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4805 		port->pwr_opmode = TYPEC_PWR_MODE_PD;
4806 		tcpm_set_state_cond(port, SRC_READY, 0);
4807 		break;
4808 	case SRC_READY:
4809 #if 1
4810 		port->hard_reset_count = 0;
4811 #endif
4812 		port->try_src_count = 0;
4813 
4814 		tcpm_swap_complete(port, 0);
4815 		tcpm_typec_connect(port);
4816 
4817 		if (port->ams != NONE_AMS)
4818 			tcpm_ams_finish(port);
4819 		if (port->next_ams != NONE_AMS) {
4820 			port->ams = port->next_ams;
4821 			port->next_ams = NONE_AMS;
4822 		}
4823 
4824 		/*
4825 		 * If previous AMS is interrupted, switch to the upcoming
4826 		 * state.
4827 		 */
4828 		if (port->upcoming_state != INVALID_STATE) {
4829 			upcoming_state = port->upcoming_state;
4830 			port->upcoming_state = INVALID_STATE;
4831 			tcpm_set_state(port, upcoming_state, 0);
4832 			break;
4833 		}
4834 
4835 		/*
4836 		 * 6.4.4.3.1 Discover Identity
4837 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
4838 		 * Explicit Contract."
4839 		 *
4840 		 * Discover Identity on SOP' should be discovered prior to the
4841 		 * ready state, but if done after a Vconn Swap following Discover
4842 		 * Identity on SOP then the discovery process can be run here
4843 		 * as well.
4844 		 */
4845 		if (port->explicit_contract) {
4846 			if (port->send_discover_prime) {
4847 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
4848 			} else {
4849 				port->tx_sop_type = TCPC_TX_SOP;
4850 				tcpm_set_initial_svdm_version(port);
4851 			}
4852 			mod_send_discover_delayed_work(port, 0);
4853 		} else {
4854 			port->send_discover = false;
4855 			port->send_discover_prime = false;
4856 		}
4857 
4858 		/*
4859 		 * 6.3.5
4860 		 * Sending ping messages is not necessary if
4861 		 * - the source operates at vSafe5V
4862 		 * or
4863 		 * - The system is not operating in PD mode
4864 		 * or
4865 		 * - Both partners are connected using a Type-C connector
4866 		 *
4867 		 * There is no actual need to send PD messages since the local
4868 		 * port type-c and the spec does not clearly say whether PD is
4869 		 * possible when type-c is connected to Type-A/B
4870 		 */
4871 		break;
4872 	case SRC_WAIT_NEW_CAPABILITIES:
4873 		/* Nothing to do... */
4874 		break;
4875 
4876 	/* SNK states */
4877 	case SNK_UNATTACHED:
4878 		if (!port->non_pd_role_swap)
4879 			tcpm_swap_complete(port, -ENOTCONN);
4880 		tcpm_pps_complete(port, -ENOTCONN);
4881 		tcpm_snk_detach(port);
4882 		if (port->potential_contaminant) {
4883 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4884 			break;
4885 		}
4886 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4887 			tcpm_set_state(port, TOGGLING, 0);
4888 			break;
4889 		}
4890 		tcpm_set_cc(port, TYPEC_CC_RD);
4891 		if (port->port_type == TYPEC_PORT_DRP)
4892 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4893 		break;
4894 	case SNK_ATTACH_WAIT:
4895 		if ((port->cc1 == TYPEC_CC_OPEN &&
4896 		     port->cc2 != TYPEC_CC_OPEN) ||
4897 		    (port->cc1 != TYPEC_CC_OPEN &&
4898 		     port->cc2 == TYPEC_CC_OPEN))
4899 			tcpm_set_state(port, SNK_DEBOUNCED,
4900 				       PD_T_CC_DEBOUNCE);
4901 		else if (tcpm_port_is_disconnected(port))
4902 			tcpm_set_state(port, SNK_UNATTACHED,
4903 				       PD_T_PD_DEBOUNCE);
4904 		break;
4905 	case SNK_DEBOUNCED:
4906 		if (tcpm_port_is_disconnected(port))
4907 			tcpm_set_state(port, SNK_UNATTACHED,
4908 				       PD_T_PD_DEBOUNCE);
4909 		else if (port->vbus_present)
4910 			tcpm_set_state(port,
4911 				       tcpm_try_src(port) ? SRC_TRY
4912 							  : SNK_ATTACHED,
4913 				       0);
4914 		break;
4915 	case SRC_TRY:
4916 		port->try_src_count++;
4917 		tcpm_set_cc(port, tcpm_rp_cc(port));
4918 		port->max_wait = 0;
4919 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
4920 		break;
4921 	case SRC_TRY_WAIT:
4922 		if (port->max_wait == 0) {
4923 			port->max_wait = jiffies +
4924 					 msecs_to_jiffies(PD_T_DRP_TRY);
4925 			msecs = PD_T_DRP_TRY;
4926 		} else {
4927 			if (time_is_after_jiffies(port->max_wait))
4928 				msecs = jiffies_to_msecs(port->max_wait -
4929 							 jiffies);
4930 			else
4931 				msecs = 0;
4932 		}
4933 		tcpm_set_state(port, SNK_TRYWAIT, msecs);
4934 		break;
4935 	case SRC_TRY_DEBOUNCE:
4936 		tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4937 		break;
4938 	case SNK_TRYWAIT:
4939 		tcpm_set_cc(port, TYPEC_CC_RD);
4940 		tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
4941 		break;
4942 	case SNK_TRYWAIT_VBUS:
4943 		/*
4944 		 * TCPM stays in this state indefinitely until VBUS
4945 		 * is detected as long as Rp is not detected for
4946 		 * more than a time period of tPDDebounce.
4947 		 */
4948 		if (port->vbus_present && tcpm_port_is_sink(port)) {
4949 			tcpm_set_state(port, SNK_ATTACHED, 0);
4950 			break;
4951 		}
4952 		if (!tcpm_port_is_sink(port))
4953 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4954 		break;
4955 	case SNK_TRYWAIT_DEBOUNCE:
4956 		tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4957 		break;
4958 	case SNK_ATTACHED:
4959 		ret = tcpm_snk_attach(port);
4960 		if (ret < 0)
4961 			tcpm_set_state(port, SNK_UNATTACHED, 0);
4962 		else
4963 			tcpm_set_state(port, SNK_STARTUP, 0);
4964 		break;
4965 	case SNK_STARTUP:
4966 		opmode =  tcpm_get_pwr_opmode(port->polarity ?
4967 					      port->cc2 : port->cc1);
4968 		typec_set_pwr_opmode(port->typec_port, opmode);
4969 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
4970 		port->negotiated_rev = PD_MAX_REV;
4971 		port->negotiated_rev_prime = PD_MAX_REV;
4972 		port->message_id = 0;
4973 		port->message_id_prime = 0;
4974 		port->rx_msgid = -1;
4975 		port->rx_msgid_prime = -1;
4976 		port->explicit_contract = false;
4977 
4978 		if (port->ams == POWER_ROLE_SWAP ||
4979 		    port->ams == FAST_ROLE_SWAP)
4980 			/* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
4981 			tcpm_ams_finish(port);
4982 
4983 		tcpm_set_state(port, SNK_DISCOVERY, 0);
4984 		break;
4985 	case SNK_DISCOVERY:
4986 		if (port->vbus_present) {
4987 			u32 current_lim = tcpm_get_current_limit(port);
4988 
4989 			if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
4990 				current_lim = PD_P_SNK_STDBY_MW / 5;
4991 			tcpm_set_current_limit(port, current_lim, 5000);
4992 			/* Not sink vbus if operational current is 0mA */
4993 			tcpm_set_charge(port, !port->pd_supported ||
4994 					pdo_max_current(port->snk_pdo[0]));
4995 
4996 			if (!port->pd_supported)
4997 				tcpm_set_state(port, SNK_READY, 0);
4998 			else
4999 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5000 			break;
5001 		}
5002 		/*
5003 		 * For DRP, timeouts differ. Also, handling is supposed to be
5004 		 * different and much more complex (dead battery detection;
5005 		 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5006 		 */
5007 		tcpm_set_state(port, hard_reset_state(port),
5008 			       port->port_type == TYPEC_PORT_DRP ?
5009 					PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5010 		break;
5011 	case SNK_DISCOVERY_DEBOUNCE:
5012 		tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5013 			       PD_T_CC_DEBOUNCE);
5014 		break;
5015 	case SNK_DISCOVERY_DEBOUNCE_DONE:
5016 		if (!tcpm_port_is_disconnected(port) &&
5017 		    tcpm_port_is_sink(port) &&
5018 		    ktime_after(port->delayed_runtime, ktime_get())) {
5019 			tcpm_set_state(port, SNK_DISCOVERY,
5020 				       ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5021 			break;
5022 		}
5023 		tcpm_set_state(port, unattached_state(port), 0);
5024 		break;
5025 	case SNK_WAIT_CAPABILITIES:
5026 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
5027 		if (ret < 0) {
5028 			tcpm_set_state(port, SNK_READY, 0);
5029 			break;
5030 		}
5031 		/*
5032 		 * If VBUS has never been low, and we time out waiting
5033 		 * for source cap, try a soft reset first, in case we
5034 		 * were already in a stable contract before this boot.
5035 		 * Do this only once.
5036 		 */
5037 		if (port->vbus_never_low) {
5038 			port->vbus_never_low = false;
5039 			tcpm_set_state(port, SNK_SOFT_RESET,
5040 				       PD_T_SINK_WAIT_CAP);
5041 		} else {
5042 			tcpm_set_state(port, hard_reset_state(port),
5043 				       PD_T_SINK_WAIT_CAP);
5044 		}
5045 		break;
5046 	case SNK_NEGOTIATE_CAPABILITIES:
5047 		port->pd_capable = true;
5048 		tcpm_set_partner_usb_comm_capable(port,
5049 						  !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5050 		port->hard_reset_count = 0;
5051 		ret = tcpm_pd_send_request(port);
5052 		if (ret < 0) {
5053 			/* Restore back to the original state */
5054 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5055 							       port->pps_data.active,
5056 							       port->supply_voltage);
5057 			/* Let the Source send capabilities again. */
5058 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5059 		} else {
5060 			tcpm_set_state_cond(port, hard_reset_state(port),
5061 					    PD_T_SENDER_RESPONSE);
5062 		}
5063 		break;
5064 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
5065 		ret = tcpm_pd_send_pps_request(port);
5066 		if (ret < 0) {
5067 			/* Restore back to the original state */
5068 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5069 							       port->pps_data.active,
5070 							       port->supply_voltage);
5071 			port->pps_status = ret;
5072 			/*
5073 			 * If this was called due to updates to sink
5074 			 * capabilities, and pps is no longer valid, we should
5075 			 * safely fall back to a standard PDO.
5076 			 */
5077 			if (port->update_sink_caps)
5078 				tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5079 			else
5080 				tcpm_set_state(port, SNK_READY, 0);
5081 		} else {
5082 			tcpm_set_state_cond(port, hard_reset_state(port),
5083 					    PD_T_SENDER_RESPONSE);
5084 		}
5085 		break;
5086 	case SNK_TRANSITION_SINK:
5087 		/* From the USB PD spec:
5088 		 * "The Sink Shall transition to Sink Standby before a positive or
5089 		 * negative voltage transition of VBUS. During Sink Standby
5090 		 * the Sink Shall reduce its power draw to pSnkStdby."
5091 		 *
5092 		 * This is not applicable to PPS though as the port can continue
5093 		 * to draw negotiated power without switching to standby.
5094 		 */
5095 		if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5096 		    port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5097 			u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5098 
5099 			tcpm_log(port, "Setting standby current %u mV @ %u mA",
5100 				 port->supply_voltage, stdby_ma);
5101 			tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5102 		}
5103 		fallthrough;
5104 	case SNK_TRANSITION_SINK_VBUS:
5105 		tcpm_set_state(port, hard_reset_state(port),
5106 			       PD_T_PS_TRANSITION);
5107 		break;
5108 	case SNK_READY:
5109 		port->try_snk_count = 0;
5110 		port->update_sink_caps = false;
5111 		if (port->explicit_contract) {
5112 			typec_set_pwr_opmode(port->typec_port,
5113 					     TYPEC_PWR_MODE_PD);
5114 			port->pwr_opmode = TYPEC_PWR_MODE_PD;
5115 		}
5116 
5117 		if (!port->pd_capable && port->slow_charger_loop)
5118 			tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5119 		tcpm_swap_complete(port, 0);
5120 		tcpm_typec_connect(port);
5121 		if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5122 			mod_enable_frs_delayed_work(port, 0);
5123 		tcpm_pps_complete(port, port->pps_status);
5124 
5125 		if (port->ams != NONE_AMS)
5126 			tcpm_ams_finish(port);
5127 		if (port->next_ams != NONE_AMS) {
5128 			port->ams = port->next_ams;
5129 			port->next_ams = NONE_AMS;
5130 		}
5131 
5132 		/*
5133 		 * If previous AMS is interrupted, switch to the upcoming
5134 		 * state.
5135 		 */
5136 		if (port->upcoming_state != INVALID_STATE) {
5137 			upcoming_state = port->upcoming_state;
5138 			port->upcoming_state = INVALID_STATE;
5139 			tcpm_set_state(port, upcoming_state, 0);
5140 			break;
5141 		}
5142 
5143 		/*
5144 		 * 6.4.4.3.1 Discover Identity
5145 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
5146 		 * Explicit Contract."
5147 		 *
5148 		 * Discover Identity on SOP' should be discovered prior to the
5149 		 * ready state, but if done after a Vconn Swap following Discover
5150 		 * Identity on SOP then the discovery process can be run here
5151 		 * as well.
5152 		 */
5153 		if (port->explicit_contract) {
5154 			if (port->send_discover_prime) {
5155 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
5156 			} else {
5157 				port->tx_sop_type = TCPC_TX_SOP;
5158 				tcpm_set_initial_svdm_version(port);
5159 			}
5160 			mod_send_discover_delayed_work(port, 0);
5161 		} else {
5162 			port->send_discover = false;
5163 			port->send_discover_prime = false;
5164 		}
5165 
5166 		power_supply_changed(port->psy);
5167 		break;
5168 
5169 	/* Accessory states */
5170 	case ACC_UNATTACHED:
5171 		tcpm_acc_detach(port);
5172 		tcpm_set_state(port, SRC_UNATTACHED, 0);
5173 		break;
5174 	case DEBUG_ACC_ATTACHED:
5175 	case AUDIO_ACC_ATTACHED:
5176 		ret = tcpm_acc_attach(port);
5177 		if (ret < 0)
5178 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5179 		break;
5180 	case AUDIO_ACC_DEBOUNCE:
5181 		tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
5182 		break;
5183 
5184 	/* Hard_Reset states */
5185 	case HARD_RESET_SEND:
5186 		if (port->ams != NONE_AMS)
5187 			tcpm_ams_finish(port);
5188 		/*
5189 		 * State machine will be directed to HARD_RESET_START,
5190 		 * thus set upcoming_state to INVALID_STATE.
5191 		 */
5192 		port->upcoming_state = INVALID_STATE;
5193 		tcpm_ams_start(port, HARD_RESET);
5194 		break;
5195 	case HARD_RESET_START:
5196 		port->sink_cap_done = false;
5197 		if (port->tcpc->enable_frs)
5198 			port->tcpc->enable_frs(port->tcpc, false);
5199 		port->hard_reset_count++;
5200 		port->tcpc->set_pd_rx(port->tcpc, false);
5201 		tcpm_unregister_altmodes(port);
5202 		port->nr_sink_caps = 0;
5203 		port->send_discover = true;
5204 		port->send_discover_prime = false;
5205 		if (port->pwr_role == TYPEC_SOURCE)
5206 			tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5207 				       PD_T_PS_HARD_RESET);
5208 		else
5209 			tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5210 		break;
5211 	case SRC_HARD_RESET_VBUS_OFF:
5212 		/*
5213 		 * 7.1.5 Response to Hard Resets
5214 		 * Hard Reset Signaling indicates a communication failure has occurred and the
5215 		 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5216 		 * drive VBUS to vSafe0V as shown in Figure 7-9.
5217 		 */
5218 		tcpm_set_vconn(port, false);
5219 		tcpm_set_vbus(port, false);
5220 		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
5221 			       tcpm_data_role_for_source(port));
5222 		/*
5223 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5224 		 * PD_T_SRC_RECOVER before turning vbus back on.
5225 		 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5226 		 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5227 		 * tells the Device Policy Manager to instruct the power supply to perform a
5228 		 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5229 		 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5230 		 * re-establish communication with the Sink and resume USB Default Operation.
5231 		 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5232 		 */
5233 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5234 		break;
5235 	case SRC_HARD_RESET_VBUS_ON:
5236 		tcpm_set_vconn(port, true);
5237 		tcpm_set_vbus(port, true);
5238 		if (port->ams == HARD_RESET)
5239 			tcpm_ams_finish(port);
5240 		if (port->pd_supported)
5241 			port->tcpc->set_pd_rx(port->tcpc, true);
5242 		tcpm_set_attached_state(port, true);
5243 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5244 		break;
5245 	case SNK_HARD_RESET_SINK_OFF:
5246 		/* Do not discharge/disconnect during hard reseet */
5247 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5248 		memset(&port->pps_data, 0, sizeof(port->pps_data));
5249 		tcpm_set_vconn(port, false);
5250 		if (port->pd_capable)
5251 			tcpm_set_charge(port, false);
5252 		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
5253 			       tcpm_data_role_for_sink(port));
5254 		/*
5255 		 * VBUS may or may not toggle, depending on the adapter.
5256 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5257 		 * directly after timeout.
5258 		 */
5259 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5260 		break;
5261 	case SNK_HARD_RESET_WAIT_VBUS:
5262 		if (port->ams == HARD_RESET)
5263 			tcpm_ams_finish(port);
5264 		/* Assume we're disconnected if VBUS doesn't come back. */
5265 		tcpm_set_state(port, SNK_UNATTACHED,
5266 			       PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5267 		break;
5268 	case SNK_HARD_RESET_SINK_ON:
5269 		/* Note: There is no guarantee that VBUS is on in this state */
5270 		/*
5271 		 * XXX:
5272 		 * The specification suggests that dual mode ports in sink
5273 		 * mode should transition to state PE_SRC_Transition_to_default.
5274 		 * See USB power delivery specification chapter 8.3.3.6.1.3.
5275 		 * This would mean to
5276 		 * - turn off VCONN, reset power supply
5277 		 * - request hardware reset
5278 		 * - turn on VCONN
5279 		 * - Transition to state PE_Src_Startup
5280 		 * SNK only ports shall transition to state Snk_Startup
5281 		 * (see chapter 8.3.3.3.8).
5282 		 * Similar, dual-mode ports in source mode should transition
5283 		 * to PE_SNK_Transition_to_default.
5284 		 */
5285 		if (port->pd_capable) {
5286 			tcpm_set_current_limit(port,
5287 					       tcpm_get_current_limit(port),
5288 					       5000);
5289 			/* Not sink vbus if operational current is 0mA */
5290 			tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5291 		}
5292 		if (port->ams == HARD_RESET)
5293 			tcpm_ams_finish(port);
5294 		tcpm_set_attached_state(port, true);
5295 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5296 		tcpm_set_state(port, SNK_STARTUP, 0);
5297 		break;
5298 
5299 	/* Soft_Reset states */
5300 	case SOFT_RESET:
5301 		port->message_id = 0;
5302 		port->rx_msgid = -1;
5303 		/* remove existing capabilities */
5304 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5305 		port->partner_source_caps = NULL;
5306 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5307 		tcpm_ams_finish(port);
5308 		if (port->pwr_role == TYPEC_SOURCE) {
5309 			port->upcoming_state = SRC_SEND_CAPABILITIES;
5310 			tcpm_ams_start(port, POWER_NEGOTIATION);
5311 		} else {
5312 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5313 		}
5314 		break;
5315 	case SRC_SOFT_RESET_WAIT_SNK_TX:
5316 	case SNK_SOFT_RESET:
5317 		if (port->ams != NONE_AMS)
5318 			tcpm_ams_finish(port);
5319 		port->upcoming_state = SOFT_RESET_SEND;
5320 		tcpm_ams_start(port, SOFT_RESET_AMS);
5321 		break;
5322 	case SOFT_RESET_SEND:
5323 		/*
5324 		 * Power Delivery 3.0 Section 6.3.13
5325 		 *
5326 		 * A Soft_Reset Message Shall be targeted at a specific entity
5327 		 * depending on the type of SOP* packet used.
5328 		 */
5329 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5330 			port->message_id_prime = 0;
5331 			port->rx_msgid_prime = -1;
5332 			tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5333 			tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5334 		} else {
5335 			port->message_id = 0;
5336 			port->rx_msgid = -1;
5337 			/* remove existing capabilities */
5338 			usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5339 			port->partner_source_caps = NULL;
5340 			if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5341 				tcpm_set_state_cond(port, hard_reset_state(port), 0);
5342 			else
5343 				tcpm_set_state_cond(port, hard_reset_state(port),
5344 						    PD_T_SENDER_RESPONSE);
5345 		}
5346 		break;
5347 
5348 	/* DR_Swap states */
5349 	case DR_SWAP_SEND:
5350 		tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5351 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5352 			port->send_discover = true;
5353 			port->send_discover_prime = false;
5354 		}
5355 		tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5356 				    PD_T_SENDER_RESPONSE);
5357 		break;
5358 	case DR_SWAP_ACCEPT:
5359 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5360 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5361 			port->send_discover = true;
5362 			port->send_discover_prime = false;
5363 		}
5364 		tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5365 		break;
5366 	case DR_SWAP_SEND_TIMEOUT:
5367 		tcpm_swap_complete(port, -ETIMEDOUT);
5368 		port->send_discover = false;
5369 		port->send_discover_prime = false;
5370 		tcpm_ams_finish(port);
5371 		tcpm_set_state(port, ready_state(port), 0);
5372 		break;
5373 	case DR_SWAP_CHANGE_DR:
5374 		tcpm_unregister_altmodes(port);
5375 		if (port->data_role == TYPEC_HOST)
5376 			tcpm_set_roles(port, true, port->pwr_role,
5377 				       TYPEC_DEVICE);
5378 		else
5379 			tcpm_set_roles(port, true, port->pwr_role,
5380 				       TYPEC_HOST);
5381 		tcpm_ams_finish(port);
5382 		tcpm_set_state(port, ready_state(port), 0);
5383 		break;
5384 
5385 	case FR_SWAP_SEND:
5386 		if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5387 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5388 			break;
5389 		}
5390 		tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5391 		break;
5392 	case FR_SWAP_SEND_TIMEOUT:
5393 		tcpm_set_state(port, ERROR_RECOVERY, 0);
5394 		break;
5395 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5396 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF);
5397 		break;
5398 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5399 		if (port->vbus_source)
5400 			tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5401 		else
5402 			tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5403 		break;
5404 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5405 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5406 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5407 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5408 			break;
5409 		}
5410 		tcpm_set_cc(port, tcpm_rp_cc(port));
5411 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5412 		break;
5413 
5414 	/* PR_Swap states */
5415 	case PR_SWAP_ACCEPT:
5416 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5417 		tcpm_set_state(port, PR_SWAP_START, 0);
5418 		break;
5419 	case PR_SWAP_SEND:
5420 		tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5421 		tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5422 				    PD_T_SENDER_RESPONSE);
5423 		break;
5424 	case PR_SWAP_SEND_TIMEOUT:
5425 		tcpm_swap_complete(port, -ETIMEDOUT);
5426 		tcpm_set_state(port, ready_state(port), 0);
5427 		break;
5428 	case PR_SWAP_START:
5429 		tcpm_apply_rc(port);
5430 		if (port->pwr_role == TYPEC_SOURCE)
5431 			tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5432 				       PD_T_SRC_TRANSITION);
5433 		else
5434 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5435 		break;
5436 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5437 		/*
5438 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5439 		 * as this is not a disconnect.
5440 		 */
5441 		tcpm_set_vbus(port, false);
5442 		port->explicit_contract = false;
5443 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
5444 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5445 			       PD_T_SRCSWAPSTDBY);
5446 		break;
5447 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5448 		tcpm_set_cc(port, TYPEC_CC_RD);
5449 		/* allow CC debounce */
5450 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5451 			       PD_T_CC_DEBOUNCE);
5452 		break;
5453 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5454 		/*
5455 		 * USB-PD standard, 6.2.1.4, Port Power Role:
5456 		 * "During the Power Role Swap Sequence, for the initial Source
5457 		 * Port, the Port Power Role field shall be set to Sink in the
5458 		 * PS_RDY Message indicating that the initial Source’s power
5459 		 * supply is turned off"
5460 		 */
5461 		tcpm_set_pwr_role(port, TYPEC_SINK);
5462 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5463 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5464 			break;
5465 		}
5466 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5467 		break;
5468 	case PR_SWAP_SRC_SNK_SINK_ON:
5469 		tcpm_enable_auto_vbus_discharge(port, true);
5470 		/* Set the vbus disconnect threshold for implicit contract */
5471 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5472 		tcpm_set_state(port, SNK_STARTUP, 0);
5473 		break;
5474 	case PR_SWAP_SNK_SRC_SINK_OFF:
5475 		/* will be source, remove existing capabilities */
5476 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5477 		port->partner_source_caps = NULL;
5478 		/*
5479 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5480 		 * as this is not a disconnect.
5481 		 */
5482 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5483 						       port->pps_data.active, 0);
5484 		tcpm_set_charge(port, false);
5485 		tcpm_set_state(port, hard_reset_state(port),
5486 			       PD_T_PS_SOURCE_OFF);
5487 		break;
5488 	case PR_SWAP_SNK_SRC_SOURCE_ON:
5489 		tcpm_enable_auto_vbus_discharge(port, true);
5490 		tcpm_set_cc(port, tcpm_rp_cc(port));
5491 		tcpm_set_vbus(port, true);
5492 		/*
5493 		 * allow time VBUS ramp-up, must be < tNewSrc
5494 		 * Also, this window overlaps with CC debounce as well.
5495 		 * So, Wait for the max of two which is PD_T_NEWSRC
5496 		 */
5497 		tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5498 			       PD_T_NEWSRC);
5499 		break;
5500 	case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5501 		/*
5502 		 * USB PD standard, 6.2.1.4:
5503 		 * "Subsequent Messages initiated by the Policy Engine,
5504 		 * such as the PS_RDY Message sent to indicate that Vbus
5505 		 * is ready, will have the Port Power Role field set to
5506 		 * Source."
5507 		 */
5508 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5509 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5510 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5511 		break;
5512 
5513 	case VCONN_SWAP_ACCEPT:
5514 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5515 		tcpm_ams_finish(port);
5516 		tcpm_set_state(port, VCONN_SWAP_START, 0);
5517 		break;
5518 	case VCONN_SWAP_SEND:
5519 		tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5520 		tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5521 			       PD_T_SENDER_RESPONSE);
5522 		break;
5523 	case VCONN_SWAP_SEND_TIMEOUT:
5524 		tcpm_swap_complete(port, -ETIMEDOUT);
5525 		tcpm_set_state(port, ready_state(port), 0);
5526 		break;
5527 	case VCONN_SWAP_START:
5528 		if (port->vconn_role == TYPEC_SOURCE)
5529 			tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5530 		else
5531 			tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5532 		break;
5533 	case VCONN_SWAP_WAIT_FOR_VCONN:
5534 		tcpm_set_state(port, hard_reset_state(port),
5535 			       PD_T_VCONN_SOURCE_ON);
5536 		break;
5537 	case VCONN_SWAP_TURN_ON_VCONN:
5538 		ret = tcpm_set_vconn(port, true);
5539 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5540 		/*
5541 		 * USB PD 3.0 Section 6.4.4.3.1
5542 		 *
5543 		 * Note that a Cable Plug or VPD will not be ready for PD
5544 		 * Communication until tVCONNStable after VCONN has been applied
5545 		 */
5546 		if (!ret)
5547 			tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5548 				       PD_T_VCONN_STABLE);
5549 		else
5550 			tcpm_set_state(port, ready_state(port), 0);
5551 		break;
5552 	case VCONN_SWAP_TURN_OFF_VCONN:
5553 		tcpm_set_vconn(port, false);
5554 		tcpm_set_state(port, ready_state(port), 0);
5555 		break;
5556 	case VCONN_SWAP_SEND_SOFT_RESET:
5557 		tcpm_swap_complete(port, port->swap_status);
5558 		if (tcpm_can_communicate_sop_prime(port)) {
5559 			port->tx_sop_type = TCPC_TX_SOP_PRIME;
5560 			port->upcoming_state = SOFT_RESET_SEND;
5561 			tcpm_ams_start(port, SOFT_RESET_AMS);
5562 		} else {
5563 			tcpm_set_state(port, ready_state(port), 0);
5564 		}
5565 		break;
5566 
5567 	case DR_SWAP_CANCEL:
5568 	case PR_SWAP_CANCEL:
5569 	case VCONN_SWAP_CANCEL:
5570 		tcpm_swap_complete(port, port->swap_status);
5571 		if (port->pwr_role == TYPEC_SOURCE)
5572 			tcpm_set_state(port, SRC_READY, 0);
5573 		else
5574 			tcpm_set_state(port, SNK_READY, 0);
5575 		break;
5576 	case FR_SWAP_CANCEL:
5577 		if (port->pwr_role == TYPEC_SOURCE)
5578 			tcpm_set_state(port, SRC_READY, 0);
5579 		else
5580 			tcpm_set_state(port, SNK_READY, 0);
5581 		break;
5582 
5583 	case BIST_RX:
5584 		switch (BDO_MODE_MASK(port->bist_request)) {
5585 		case BDO_MODE_CARRIER2:
5586 			tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5587 			tcpm_set_state(port, unattached_state(port),
5588 				       PD_T_BIST_CONT_MODE);
5589 			break;
5590 		case BDO_MODE_TESTDATA:
5591 			if (port->tcpc->set_bist_data) {
5592 				tcpm_log(port, "Enable BIST MODE TESTDATA");
5593 				port->tcpc->set_bist_data(port->tcpc, true);
5594 			}
5595 			break;
5596 		default:
5597 			break;
5598 		}
5599 		break;
5600 	case GET_STATUS_SEND:
5601 		tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5602 		tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5603 			       PD_T_SENDER_RESPONSE);
5604 		break;
5605 	case GET_STATUS_SEND_TIMEOUT:
5606 		tcpm_set_state(port, ready_state(port), 0);
5607 		break;
5608 	case GET_PPS_STATUS_SEND:
5609 		tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5610 		tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5611 			       PD_T_SENDER_RESPONSE);
5612 		break;
5613 	case GET_PPS_STATUS_SEND_TIMEOUT:
5614 		tcpm_set_state(port, ready_state(port), 0);
5615 		break;
5616 	case GET_SINK_CAP:
5617 		tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5618 		tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5619 		break;
5620 	case GET_SINK_CAP_TIMEOUT:
5621 		port->sink_cap_done = true;
5622 		tcpm_set_state(port, ready_state(port), 0);
5623 		break;
5624 	case ERROR_RECOVERY:
5625 		tcpm_swap_complete(port, -EPROTO);
5626 		tcpm_pps_complete(port, -EPROTO);
5627 		tcpm_set_state(port, PORT_RESET, 0);
5628 		break;
5629 	case PORT_RESET:
5630 		tcpm_reset_port(port);
5631 		port->pd_events = 0;
5632 		if (port->self_powered)
5633 			tcpm_set_cc(port, TYPEC_CC_OPEN);
5634 		else
5635 			tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5636 				    TYPEC_CC_RD : tcpm_rp_cc(port));
5637 		tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5638 			       PD_T_ERROR_RECOVERY);
5639 		break;
5640 	case PORT_RESET_WAIT_OFF:
5641 		tcpm_set_state(port,
5642 			       tcpm_default_state(port),
5643 			       port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
5644 		break;
5645 
5646 	/* AMS intermediate state */
5647 	case AMS_START:
5648 		if (port->upcoming_state == INVALID_STATE) {
5649 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5650 				       SRC_READY : SNK_READY, 0);
5651 			break;
5652 		}
5653 
5654 		upcoming_state = port->upcoming_state;
5655 		port->upcoming_state = INVALID_STATE;
5656 		tcpm_set_state(port, upcoming_state, 0);
5657 		break;
5658 
5659 	/* Chunk state */
5660 	case CHUNK_NOT_SUPP:
5661 		tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5662 		tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5663 		break;
5664 
5665 	/* Cable states */
5666 	case SRC_VDM_IDENTITY_REQUEST:
5667 		port->send_discover_prime = true;
5668 		port->tx_sop_type = TCPC_TX_SOP_PRIME;
5669 		mod_send_discover_delayed_work(port, 0);
5670 		port->upcoming_state = SRC_SEND_CAPABILITIES;
5671 		break;
5672 
5673 	default:
5674 		WARN(1, "Unexpected port state %d\n", port->state);
5675 		break;
5676 	}
5677 }
5678 
tcpm_state_machine_work(struct kthread_work * work)5679 static void tcpm_state_machine_work(struct kthread_work *work)
5680 {
5681 	struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5682 	enum tcpm_state prev_state;
5683 
5684 	mutex_lock(&port->lock);
5685 	port->state_machine_running = true;
5686 
5687 	if (port->queued_message && tcpm_send_queued_message(port))
5688 		goto done;
5689 
5690 	/* If we were queued due to a delayed state change, update it now */
5691 	if (port->delayed_state) {
5692 		tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5693 			 tcpm_states[port->state],
5694 			 tcpm_states[port->delayed_state], port->delay_ms);
5695 		port->prev_state = port->state;
5696 		port->state = port->delayed_state;
5697 		port->delayed_state = INVALID_STATE;
5698 	}
5699 
5700 	/*
5701 	 * Continue running as long as we have (non-delayed) state changes
5702 	 * to make.
5703 	 */
5704 	do {
5705 		prev_state = port->state;
5706 		run_state_machine(port);
5707 		if (port->queued_message)
5708 			tcpm_send_queued_message(port);
5709 	} while (port->state != prev_state && !port->delayed_state);
5710 
5711 done:
5712 	port->state_machine_running = false;
5713 	mutex_unlock(&port->lock);
5714 }
5715 
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)5716 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5717 			    enum typec_cc_status cc2)
5718 {
5719 	enum typec_cc_status old_cc1, old_cc2;
5720 	enum tcpm_state new_state;
5721 
5722 	old_cc1 = port->cc1;
5723 	old_cc2 = port->cc2;
5724 	port->cc1 = cc1;
5725 	port->cc2 = cc2;
5726 
5727 	tcpm_log_force(port,
5728 		       "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5729 		       old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5730 		       port->polarity,
5731 		       tcpm_port_is_disconnected(port) ? "disconnected"
5732 						       : "connected");
5733 
5734 	switch (port->state) {
5735 	case TOGGLING:
5736 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5737 		    tcpm_port_is_source(port))
5738 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5739 		else if (tcpm_port_is_sink(port))
5740 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5741 		break;
5742 	case CHECK_CONTAMINANT:
5743 		/* Wait for Toggling to be resumed */
5744 		break;
5745 	case SRC_UNATTACHED:
5746 	case ACC_UNATTACHED:
5747 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5748 		    tcpm_port_is_source(port))
5749 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5750 		break;
5751 	case SRC_ATTACH_WAIT:
5752 		if (tcpm_port_is_disconnected(port) ||
5753 		    tcpm_port_is_audio_detached(port))
5754 			tcpm_set_state(port, SRC_UNATTACHED, 0);
5755 		else if (cc1 != old_cc1 || cc2 != old_cc2)
5756 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5757 		break;
5758 	case SRC_ATTACHED:
5759 	case SRC_STARTUP:
5760 	case SRC_SEND_CAPABILITIES:
5761 	case SRC_READY:
5762 		if (tcpm_port_is_disconnected(port) ||
5763 		    !tcpm_port_is_source(port)) {
5764 			if (port->port_type == TYPEC_PORT_SRC)
5765 				tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5766 			else
5767 				tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5768 		}
5769 		break;
5770 	case SNK_UNATTACHED:
5771 		if (tcpm_port_is_sink(port))
5772 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5773 		break;
5774 	case SNK_ATTACH_WAIT:
5775 		if ((port->cc1 == TYPEC_CC_OPEN &&
5776 		     port->cc2 != TYPEC_CC_OPEN) ||
5777 		    (port->cc1 != TYPEC_CC_OPEN &&
5778 		     port->cc2 == TYPEC_CC_OPEN))
5779 			new_state = SNK_DEBOUNCED;
5780 		else if (tcpm_port_is_disconnected(port))
5781 			new_state = SNK_UNATTACHED;
5782 		else
5783 			break;
5784 		if (new_state != port->delayed_state)
5785 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5786 		break;
5787 	case SNK_DEBOUNCED:
5788 		if (tcpm_port_is_disconnected(port))
5789 			new_state = SNK_UNATTACHED;
5790 		else if (port->vbus_present)
5791 			new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5792 		else
5793 			new_state = SNK_UNATTACHED;
5794 		if (new_state != port->delayed_state)
5795 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
5796 		break;
5797 	case SNK_READY:
5798 		/*
5799 		 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5800 		 * "A port that has entered into USB PD communications with the Source and
5801 		 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5802 		 * cable disconnect in addition to monitoring VBUS.
5803 		 *
5804 		 * A port that is monitoring the CC voltage for disconnect (but is not in
5805 		 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5806 		 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5807 		 * vRd-USB for tPDDebounce."
5808 		 *
5809 		 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5810 		 * away before vbus decays to disconnect threshold. Allow
5811 		 * disconnect to be driven by vbus disconnect when auto vbus
5812 		 * discharge is enabled.
5813 		 */
5814 		if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5815 			tcpm_set_state(port, unattached_state(port), 0);
5816 		else if (!port->pd_capable &&
5817 			 (cc1 != old_cc1 || cc2 != old_cc2))
5818 			tcpm_set_current_limit(port,
5819 					       tcpm_get_current_limit(port),
5820 					       5000);
5821 		break;
5822 
5823 	case AUDIO_ACC_ATTACHED:
5824 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5825 			tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5826 		break;
5827 	case AUDIO_ACC_DEBOUNCE:
5828 		if (tcpm_port_is_audio(port))
5829 			tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5830 		break;
5831 
5832 	case DEBUG_ACC_ATTACHED:
5833 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5834 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5835 		break;
5836 
5837 	case SNK_TRY:
5838 		/* Do nothing, waiting for timeout */
5839 		break;
5840 
5841 	case SNK_DISCOVERY:
5842 		/* CC line is unstable, wait for debounce */
5843 		if (tcpm_port_is_disconnected(port))
5844 			tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5845 		break;
5846 	case SNK_DISCOVERY_DEBOUNCE:
5847 		break;
5848 
5849 	case SRC_TRYWAIT:
5850 		/* Hand over to state machine if needed */
5851 		if (!port->vbus_present && tcpm_port_is_source(port))
5852 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5853 		break;
5854 	case SRC_TRYWAIT_DEBOUNCE:
5855 		if (port->vbus_present || !tcpm_port_is_source(port))
5856 			tcpm_set_state(port, SRC_TRYWAIT, 0);
5857 		break;
5858 	case SNK_TRY_WAIT_DEBOUNCE:
5859 		if (!tcpm_port_is_sink(port)) {
5860 			port->max_wait = 0;
5861 			tcpm_set_state(port, SRC_TRYWAIT, 0);
5862 		}
5863 		break;
5864 	case SRC_TRY_WAIT:
5865 		if (tcpm_port_is_source(port))
5866 			tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5867 		break;
5868 	case SRC_TRY_DEBOUNCE:
5869 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
5870 		break;
5871 	case SNK_TRYWAIT_DEBOUNCE:
5872 		if (tcpm_port_is_sink(port))
5873 			tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5874 		break;
5875 	case SNK_TRYWAIT_VBUS:
5876 		if (!tcpm_port_is_sink(port))
5877 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5878 		break;
5879 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5880 		if (!tcpm_port_is_sink(port))
5881 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5882 		else
5883 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5884 		break;
5885 	case SNK_TRYWAIT:
5886 		/* Do nothing, waiting for tCCDebounce */
5887 		break;
5888 	case PR_SWAP_SNK_SRC_SINK_OFF:
5889 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5890 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5891 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5892 	case PR_SWAP_SNK_SRC_SOURCE_ON:
5893 		/*
5894 		 * CC state change is expected in PR_SWAP
5895 		 * Ignore it.
5896 		 */
5897 		break;
5898 	case FR_SWAP_SEND:
5899 	case FR_SWAP_SEND_TIMEOUT:
5900 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5901 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5902 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5903 		/* Do nothing, CC change expected */
5904 		break;
5905 
5906 	case PORT_RESET:
5907 	case PORT_RESET_WAIT_OFF:
5908 		/*
5909 		 * State set back to default mode once the timer completes.
5910 		 * Ignore CC changes here.
5911 		 */
5912 		break;
5913 	default:
5914 		/*
5915 		 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5916 		 * to be driven by vbus disconnect.
5917 		 */
5918 		if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5919 							 port->auto_vbus_discharge_enabled))
5920 			tcpm_set_state(port, unattached_state(port), 0);
5921 		break;
5922 	}
5923 }
5924 
_tcpm_pd_vbus_on(struct tcpm_port * port)5925 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5926 {
5927 	tcpm_log_force(port, "VBUS on");
5928 	port->vbus_present = true;
5929 	/*
5930 	 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5931 	 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5932 	 */
5933 	port->vbus_vsafe0v = false;
5934 
5935 	switch (port->state) {
5936 	case SNK_TRANSITION_SINK_VBUS:
5937 		port->explicit_contract = true;
5938 		tcpm_set_state(port, SNK_READY, 0);
5939 		break;
5940 	case SNK_DISCOVERY:
5941 		tcpm_set_state(port, SNK_DISCOVERY, 0);
5942 		break;
5943 
5944 	case SNK_DEBOUNCED:
5945 		tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
5946 							: SNK_ATTACHED,
5947 				       0);
5948 		break;
5949 	case SNK_HARD_RESET_WAIT_VBUS:
5950 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
5951 		break;
5952 	case SRC_ATTACHED:
5953 		tcpm_set_state(port, SRC_STARTUP, 0);
5954 		break;
5955 	case SRC_HARD_RESET_VBUS_ON:
5956 		tcpm_set_state(port, SRC_STARTUP, 0);
5957 		break;
5958 
5959 	case SNK_TRY:
5960 		/* Do nothing, waiting for timeout */
5961 		break;
5962 	case SRC_TRYWAIT:
5963 		/* Do nothing, Waiting for Rd to be detected */
5964 		break;
5965 	case SRC_TRYWAIT_DEBOUNCE:
5966 		tcpm_set_state(port, SRC_TRYWAIT, 0);
5967 		break;
5968 	case SNK_TRY_WAIT_DEBOUNCE:
5969 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
5970 		break;
5971 	case SNK_TRYWAIT:
5972 		/* Do nothing, waiting for tCCDebounce */
5973 		break;
5974 	case SNK_TRYWAIT_VBUS:
5975 		if (tcpm_port_is_sink(port))
5976 			tcpm_set_state(port, SNK_ATTACHED, 0);
5977 		break;
5978 	case SNK_TRYWAIT_DEBOUNCE:
5979 		/* Do nothing, waiting for Rp */
5980 		break;
5981 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5982 		if (port->vbus_present && tcpm_port_is_sink(port))
5983 			tcpm_set_state(port, SNK_ATTACHED, 0);
5984 		break;
5985 	case SRC_TRY_WAIT:
5986 	case SRC_TRY_DEBOUNCE:
5987 		/* Do nothing, waiting for sink detection */
5988 		break;
5989 	case FR_SWAP_SEND:
5990 	case FR_SWAP_SEND_TIMEOUT:
5991 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5992 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5993 		if (port->tcpc->frs_sourcing_vbus)
5994 			port->tcpc->frs_sourcing_vbus(port->tcpc);
5995 		break;
5996 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5997 		if (port->tcpc->frs_sourcing_vbus)
5998 			port->tcpc->frs_sourcing_vbus(port->tcpc);
5999 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6000 		break;
6001 
6002 	case PORT_RESET:
6003 	case PORT_RESET_WAIT_OFF:
6004 		/*
6005 		 * State set back to default mode once the timer completes.
6006 		 * Ignore vbus changes here.
6007 		 */
6008 		break;
6009 
6010 	default:
6011 		break;
6012 	}
6013 }
6014 
_tcpm_pd_vbus_off(struct tcpm_port * port)6015 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6016 {
6017 	tcpm_log_force(port, "VBUS off");
6018 	port->vbus_present = false;
6019 	port->vbus_never_low = false;
6020 	switch (port->state) {
6021 	case SNK_HARD_RESET_SINK_OFF:
6022 		tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6023 		break;
6024 	case HARD_RESET_SEND:
6025 		break;
6026 	case SNK_TRY:
6027 		/* Do nothing, waiting for timeout */
6028 		break;
6029 	case SRC_TRYWAIT:
6030 		/* Hand over to state machine if needed */
6031 		if (tcpm_port_is_source(port))
6032 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6033 		break;
6034 	case SNK_TRY_WAIT_DEBOUNCE:
6035 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6036 		break;
6037 	case SNK_TRYWAIT:
6038 	case SNK_TRYWAIT_VBUS:
6039 	case SNK_TRYWAIT_DEBOUNCE:
6040 		break;
6041 	case SNK_ATTACH_WAIT:
6042 	case SNK_DEBOUNCED:
6043 		/* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
6044 		break;
6045 
6046 	case SNK_NEGOTIATE_CAPABILITIES:
6047 		break;
6048 
6049 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6050 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6051 		break;
6052 
6053 	case PR_SWAP_SNK_SRC_SINK_OFF:
6054 		/* Do nothing, expected */
6055 		break;
6056 
6057 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6058 		/*
6059 		 * Do nothing when vbus off notification is received.
6060 		 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6061 		 * for the vbus source to ramp up.
6062 		 */
6063 		break;
6064 
6065 	case PORT_RESET_WAIT_OFF:
6066 		tcpm_set_state(port, tcpm_default_state(port), 0);
6067 		break;
6068 
6069 	case SRC_TRY_WAIT:
6070 	case SRC_TRY_DEBOUNCE:
6071 		/* Do nothing, waiting for sink detection */
6072 		break;
6073 
6074 	case SRC_STARTUP:
6075 	case SRC_SEND_CAPABILITIES:
6076 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6077 	case SRC_NEGOTIATE_CAPABILITIES:
6078 	case SRC_TRANSITION_SUPPLY:
6079 	case SRC_READY:
6080 	case SRC_WAIT_NEW_CAPABILITIES:
6081 		/*
6082 		 * Force to unattached state to re-initiate connection.
6083 		 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6084 		 * sink removed. Although sink removal here is due to source's vbus collapse,
6085 		 * treat it the same way for consistency.
6086 		 */
6087 		if (port->port_type == TYPEC_PORT_SRC)
6088 			tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6089 		else
6090 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6091 		break;
6092 
6093 	case PORT_RESET:
6094 		/*
6095 		 * State set back to default mode once the timer completes.
6096 		 * Ignore vbus changes here.
6097 		 */
6098 		break;
6099 
6100 	case FR_SWAP_SEND:
6101 	case FR_SWAP_SEND_TIMEOUT:
6102 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6103 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6104 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6105 		/* Do nothing, vbus drop expected */
6106 		break;
6107 
6108 	case SNK_HARD_RESET_WAIT_VBUS:
6109 		/* Do nothing, its OK to receive vbus off events */
6110 		break;
6111 
6112 	default:
6113 		if (port->pwr_role == TYPEC_SINK && port->attached)
6114 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6115 		break;
6116 	}
6117 }
6118 
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)6119 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6120 {
6121 	tcpm_log_force(port, "VBUS VSAFE0V");
6122 	port->vbus_vsafe0v = true;
6123 	switch (port->state) {
6124 	case SRC_HARD_RESET_VBUS_OFF:
6125 		/*
6126 		 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6127 		 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6128 		 */
6129 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6130 		break;
6131 	case SRC_ATTACH_WAIT:
6132 		if (tcpm_port_is_source(port))
6133 			tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6134 				       PD_T_CC_DEBOUNCE);
6135 		break;
6136 	case SRC_STARTUP:
6137 	case SRC_SEND_CAPABILITIES:
6138 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6139 	case SRC_NEGOTIATE_CAPABILITIES:
6140 	case SRC_TRANSITION_SUPPLY:
6141 	case SRC_READY:
6142 	case SRC_WAIT_NEW_CAPABILITIES:
6143 		if (port->auto_vbus_discharge_enabled) {
6144 			if (port->port_type == TYPEC_PORT_SRC)
6145 				tcpm_set_state(port, SRC_UNATTACHED, 0);
6146 			else
6147 				tcpm_set_state(port, SNK_UNATTACHED, 0);
6148 		}
6149 		break;
6150 	case PR_SWAP_SNK_SRC_SINK_OFF:
6151 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6152 		/* Do nothing, vsafe0v is expected during transition */
6153 		break;
6154 	case SNK_ATTACH_WAIT:
6155 	case SNK_DEBOUNCED:
6156 		/*Do nothing, still waiting for VSAFE5V for connect */
6157 		break;
6158 	case SNK_HARD_RESET_WAIT_VBUS:
6159 		/* Do nothing, its OK to receive vbus off events */
6160 		break;
6161 	default:
6162 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6163 			tcpm_set_state(port, SNK_UNATTACHED, 0);
6164 		break;
6165 	}
6166 }
6167 
_tcpm_pd_hard_reset(struct tcpm_port * port)6168 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6169 {
6170 	tcpm_log_force(port, "Received hard reset");
6171 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6172 		port->tcpc->set_bist_data(port->tcpc, false);
6173 
6174 	switch (port->state) {
6175 	case ERROR_RECOVERY:
6176 	case PORT_RESET:
6177 	case PORT_RESET_WAIT_OFF:
6178 		return;
6179 	default:
6180 		break;
6181 	}
6182 
6183 	if (port->ams != NONE_AMS)
6184 		port->ams = NONE_AMS;
6185 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6186 		port->ams = HARD_RESET;
6187 	/*
6188 	 * If we keep receiving hard reset requests, executing the hard reset
6189 	 * must have failed. Revert to error recovery if that happens.
6190 	 */
6191 	tcpm_set_state(port,
6192 		       port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6193 				HARD_RESET_START : ERROR_RECOVERY,
6194 		       0);
6195 }
6196 
tcpm_pd_event_handler(struct kthread_work * work)6197 static void tcpm_pd_event_handler(struct kthread_work *work)
6198 {
6199 	struct tcpm_port *port = container_of(work, struct tcpm_port,
6200 					      event_work);
6201 	u32 events;
6202 
6203 	mutex_lock(&port->lock);
6204 
6205 	spin_lock(&port->pd_event_lock);
6206 	while (port->pd_events) {
6207 		events = port->pd_events;
6208 		port->pd_events = 0;
6209 		spin_unlock(&port->pd_event_lock);
6210 		if (events & TCPM_RESET_EVENT)
6211 			_tcpm_pd_hard_reset(port);
6212 		if (events & TCPM_VBUS_EVENT) {
6213 			bool vbus;
6214 
6215 			vbus = port->tcpc->get_vbus(port->tcpc);
6216 			if (vbus) {
6217 				_tcpm_pd_vbus_on(port);
6218 			} else {
6219 				_tcpm_pd_vbus_off(port);
6220 				/*
6221 				 * When TCPC does not support detecting vsafe0v voltage level,
6222 				 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6223 				 * to see if vbus has discharge to VSAFE0V.
6224 				 */
6225 				if (!port->tcpc->is_vbus_vsafe0v ||
6226 				    port->tcpc->is_vbus_vsafe0v(port->tcpc))
6227 					_tcpm_pd_vbus_vsafe0v(port);
6228 			}
6229 		}
6230 		if (events & TCPM_CC_EVENT) {
6231 			enum typec_cc_status cc1, cc2;
6232 
6233 			if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6234 				_tcpm_cc_change(port, cc1, cc2);
6235 		}
6236 		if (events & TCPM_FRS_EVENT) {
6237 			if (port->state == SNK_READY) {
6238 				int ret;
6239 
6240 				port->upcoming_state = FR_SWAP_SEND;
6241 				ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6242 				if (ret == -EAGAIN)
6243 					port->upcoming_state = INVALID_STATE;
6244 			} else {
6245 				tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6246 			}
6247 		}
6248 		if (events & TCPM_SOURCING_VBUS) {
6249 			tcpm_log(port, "sourcing vbus");
6250 			/*
6251 			 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6252 			 * true as TCPM wouldn't have called tcpm_set_vbus.
6253 			 *
6254 			 * When vbus is sourced on the command on TCPM i.e. TCPM called
6255 			 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6256 			 */
6257 			port->vbus_source = true;
6258 			_tcpm_pd_vbus_on(port);
6259 		}
6260 		if (events & TCPM_PORT_CLEAN) {
6261 			tcpm_log(port, "port clean");
6262 			if (port->state == CHECK_CONTAMINANT) {
6263 				if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6264 					tcpm_set_state(port, TOGGLING, 0);
6265 				else
6266 					tcpm_set_state(port, tcpm_default_state(port), 0);
6267 			}
6268 		}
6269 		if (events & TCPM_PORT_ERROR) {
6270 			tcpm_log(port, "port triggering error recovery");
6271 			tcpm_set_state(port, ERROR_RECOVERY, 0);
6272 		}
6273 
6274 		spin_lock(&port->pd_event_lock);
6275 	}
6276 	spin_unlock(&port->pd_event_lock);
6277 	mutex_unlock(&port->lock);
6278 }
6279 
tcpm_cc_change(struct tcpm_port * port)6280 void tcpm_cc_change(struct tcpm_port *port)
6281 {
6282 	spin_lock(&port->pd_event_lock);
6283 	port->pd_events |= TCPM_CC_EVENT;
6284 	spin_unlock(&port->pd_event_lock);
6285 	kthread_queue_work(port->wq, &port->event_work);
6286 }
6287 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6288 
tcpm_vbus_change(struct tcpm_port * port)6289 void tcpm_vbus_change(struct tcpm_port *port)
6290 {
6291 	spin_lock(&port->pd_event_lock);
6292 	port->pd_events |= TCPM_VBUS_EVENT;
6293 	spin_unlock(&port->pd_event_lock);
6294 	kthread_queue_work(port->wq, &port->event_work);
6295 }
6296 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6297 
tcpm_pd_hard_reset(struct tcpm_port * port)6298 void tcpm_pd_hard_reset(struct tcpm_port *port)
6299 {
6300 	spin_lock(&port->pd_event_lock);
6301 	port->pd_events = TCPM_RESET_EVENT;
6302 	spin_unlock(&port->pd_event_lock);
6303 	kthread_queue_work(port->wq, &port->event_work);
6304 }
6305 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6306 
tcpm_sink_frs(struct tcpm_port * port)6307 void tcpm_sink_frs(struct tcpm_port *port)
6308 {
6309 	spin_lock(&port->pd_event_lock);
6310 	port->pd_events |= TCPM_FRS_EVENT;
6311 	spin_unlock(&port->pd_event_lock);
6312 	kthread_queue_work(port->wq, &port->event_work);
6313 }
6314 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6315 
tcpm_sourcing_vbus(struct tcpm_port * port)6316 void tcpm_sourcing_vbus(struct tcpm_port *port)
6317 {
6318 	spin_lock(&port->pd_event_lock);
6319 	port->pd_events |= TCPM_SOURCING_VBUS;
6320 	spin_unlock(&port->pd_event_lock);
6321 	kthread_queue_work(port->wq, &port->event_work);
6322 }
6323 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6324 
tcpm_port_clean(struct tcpm_port * port)6325 void tcpm_port_clean(struct tcpm_port *port)
6326 {
6327 	spin_lock(&port->pd_event_lock);
6328 	port->pd_events |= TCPM_PORT_CLEAN;
6329 	spin_unlock(&port->pd_event_lock);
6330 	kthread_queue_work(port->wq, &port->event_work);
6331 }
6332 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6333 
tcpm_port_is_toggling(struct tcpm_port * port)6334 bool tcpm_port_is_toggling(struct tcpm_port *port)
6335 {
6336 	return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6337 }
6338 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6339 
tcpm_port_error_recovery(struct tcpm_port * port)6340 void tcpm_port_error_recovery(struct tcpm_port *port)
6341 {
6342 	spin_lock(&port->pd_event_lock);
6343 	port->pd_events |= TCPM_PORT_ERROR;
6344 	spin_unlock(&port->pd_event_lock);
6345 	kthread_queue_work(port->wq, &port->event_work);
6346 }
6347 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6348 
tcpm_enable_frs_work(struct kthread_work * work)6349 static void tcpm_enable_frs_work(struct kthread_work *work)
6350 {
6351 	struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6352 	int ret;
6353 
6354 	mutex_lock(&port->lock);
6355 	/* Not FRS capable */
6356 	if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6357 	    port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6358 	    !port->tcpc->enable_frs ||
6359 	    /* Sink caps queried */
6360 	    port->sink_cap_done || port->negotiated_rev < PD_REV30)
6361 		goto unlock;
6362 
6363 	/* Send when the state machine is idle */
6364 	if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6365 	    port->send_discover_prime)
6366 		goto resched;
6367 
6368 	port->upcoming_state = GET_SINK_CAP;
6369 	ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6370 	if (ret == -EAGAIN) {
6371 		port->upcoming_state = INVALID_STATE;
6372 	} else {
6373 		port->sink_cap_done = true;
6374 		goto unlock;
6375 	}
6376 resched:
6377 	mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6378 unlock:
6379 	mutex_unlock(&port->lock);
6380 }
6381 
tcpm_send_discover_work(struct kthread_work * work)6382 static void tcpm_send_discover_work(struct kthread_work *work)
6383 {
6384 	struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6385 
6386 	mutex_lock(&port->lock);
6387 	/* No need to send DISCOVER_IDENTITY anymore */
6388 	if (!port->send_discover && !port->send_discover_prime)
6389 		goto unlock;
6390 
6391 	if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6392 		port->send_discover = false;
6393 		port->send_discover_prime = false;
6394 		goto unlock;
6395 	}
6396 
6397 	/* Retry if the port is not idle */
6398 	if ((port->state != SRC_READY && port->state != SNK_READY &&
6399 	     port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6400 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6401 		goto unlock;
6402 	}
6403 
6404 	tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6405 
6406 unlock:
6407 	mutex_unlock(&port->lock);
6408 }
6409 
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)6410 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6411 {
6412 	struct tcpm_port *port = typec_get_drvdata(p);
6413 	int ret;
6414 
6415 	mutex_lock(&port->swap_lock);
6416 	mutex_lock(&port->lock);
6417 
6418 	if (port->typec_caps.data != TYPEC_PORT_DRD) {
6419 		ret = -EINVAL;
6420 		goto port_unlock;
6421 	}
6422 	if (port->state != SRC_READY && port->state != SNK_READY) {
6423 		ret = -EAGAIN;
6424 		goto port_unlock;
6425 	}
6426 
6427 	if (port->data_role == data) {
6428 		ret = 0;
6429 		goto port_unlock;
6430 	}
6431 
6432 	/*
6433 	 * XXX
6434 	 * 6.3.9: If an alternate mode is active, a request to swap
6435 	 * alternate modes shall trigger a port reset.
6436 	 * Reject data role swap request in this case.
6437 	 */
6438 
6439 	if (!port->pd_capable) {
6440 		/*
6441 		 * If the partner is not PD capable, reset the port to
6442 		 * trigger a role change. This can only work if a preferred
6443 		 * role is configured, and if it matches the requested role.
6444 		 */
6445 		if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6446 		    port->try_role == port->pwr_role) {
6447 			ret = -EINVAL;
6448 			goto port_unlock;
6449 		}
6450 		port->non_pd_role_swap = true;
6451 		tcpm_set_state(port, PORT_RESET, 0);
6452 	} else {
6453 		port->upcoming_state = DR_SWAP_SEND;
6454 		ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6455 		if (ret == -EAGAIN) {
6456 			port->upcoming_state = INVALID_STATE;
6457 			goto port_unlock;
6458 		}
6459 	}
6460 
6461 	port->swap_status = 0;
6462 	port->swap_pending = true;
6463 	reinit_completion(&port->swap_complete);
6464 	mutex_unlock(&port->lock);
6465 
6466 	if (!wait_for_completion_timeout(&port->swap_complete,
6467 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6468 		ret = -ETIMEDOUT;
6469 	else
6470 		ret = port->swap_status;
6471 
6472 	port->non_pd_role_swap = false;
6473 	goto swap_unlock;
6474 
6475 port_unlock:
6476 	mutex_unlock(&port->lock);
6477 swap_unlock:
6478 	mutex_unlock(&port->swap_lock);
6479 	return ret;
6480 }
6481 
tcpm_pr_set(struct typec_port * p,enum typec_role role)6482 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6483 {
6484 	struct tcpm_port *port = typec_get_drvdata(p);
6485 	int ret;
6486 
6487 	mutex_lock(&port->swap_lock);
6488 	mutex_lock(&port->lock);
6489 
6490 	if (port->port_type != TYPEC_PORT_DRP) {
6491 		ret = -EINVAL;
6492 		goto port_unlock;
6493 	}
6494 	if (port->state != SRC_READY && port->state != SNK_READY) {
6495 		ret = -EAGAIN;
6496 		goto port_unlock;
6497 	}
6498 
6499 	if (role == port->pwr_role) {
6500 		ret = 0;
6501 		goto port_unlock;
6502 	}
6503 
6504 	port->upcoming_state = PR_SWAP_SEND;
6505 	ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6506 	if (ret == -EAGAIN) {
6507 		port->upcoming_state = INVALID_STATE;
6508 		goto port_unlock;
6509 	}
6510 
6511 	port->swap_status = 0;
6512 	port->swap_pending = true;
6513 	reinit_completion(&port->swap_complete);
6514 	mutex_unlock(&port->lock);
6515 
6516 	if (!wait_for_completion_timeout(&port->swap_complete,
6517 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6518 		ret = -ETIMEDOUT;
6519 	else
6520 		ret = port->swap_status;
6521 
6522 	goto swap_unlock;
6523 
6524 port_unlock:
6525 	mutex_unlock(&port->lock);
6526 swap_unlock:
6527 	mutex_unlock(&port->swap_lock);
6528 	return ret;
6529 }
6530 
tcpm_vconn_set(struct typec_port * p,enum typec_role role)6531 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6532 {
6533 	struct tcpm_port *port = typec_get_drvdata(p);
6534 	int ret;
6535 
6536 	mutex_lock(&port->swap_lock);
6537 	mutex_lock(&port->lock);
6538 
6539 	if (port->state != SRC_READY && port->state != SNK_READY) {
6540 		ret = -EAGAIN;
6541 		goto port_unlock;
6542 	}
6543 
6544 	if (role == port->vconn_role) {
6545 		ret = 0;
6546 		goto port_unlock;
6547 	}
6548 
6549 	port->upcoming_state = VCONN_SWAP_SEND;
6550 	ret = tcpm_ams_start(port, VCONN_SWAP);
6551 	if (ret == -EAGAIN) {
6552 		port->upcoming_state = INVALID_STATE;
6553 		goto port_unlock;
6554 	}
6555 
6556 	port->swap_status = 0;
6557 	port->swap_pending = true;
6558 	reinit_completion(&port->swap_complete);
6559 	mutex_unlock(&port->lock);
6560 
6561 	if (!wait_for_completion_timeout(&port->swap_complete,
6562 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6563 		ret = -ETIMEDOUT;
6564 	else
6565 		ret = port->swap_status;
6566 
6567 	goto swap_unlock;
6568 
6569 port_unlock:
6570 	mutex_unlock(&port->lock);
6571 swap_unlock:
6572 	mutex_unlock(&port->swap_lock);
6573 	return ret;
6574 }
6575 
tcpm_try_role(struct typec_port * p,int role)6576 static int tcpm_try_role(struct typec_port *p, int role)
6577 {
6578 	struct tcpm_port *port = typec_get_drvdata(p);
6579 	struct tcpc_dev	*tcpc = port->tcpc;
6580 	int ret = 0;
6581 
6582 	mutex_lock(&port->lock);
6583 	if (tcpc->try_role)
6584 		ret = tcpc->try_role(tcpc, role);
6585 	if (!ret)
6586 		port->try_role = role;
6587 	port->try_src_count = 0;
6588 	port->try_snk_count = 0;
6589 	mutex_unlock(&port->lock);
6590 
6591 	return ret;
6592 }
6593 
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)6594 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6595 {
6596 	unsigned int target_mw;
6597 	int ret;
6598 
6599 	mutex_lock(&port->swap_lock);
6600 	mutex_lock(&port->lock);
6601 
6602 	if (!port->pps_data.active) {
6603 		ret = -EOPNOTSUPP;
6604 		goto port_unlock;
6605 	}
6606 
6607 	if (port->state != SNK_READY) {
6608 		ret = -EAGAIN;
6609 		goto port_unlock;
6610 	}
6611 
6612 	if (req_op_curr > port->pps_data.max_curr) {
6613 		ret = -EINVAL;
6614 		goto port_unlock;
6615 	}
6616 
6617 	target_mw = (req_op_curr * port->supply_voltage) / 1000;
6618 	if (target_mw < port->operating_snk_mw) {
6619 		ret = -EINVAL;
6620 		goto port_unlock;
6621 	}
6622 
6623 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6624 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6625 	if (ret == -EAGAIN) {
6626 		port->upcoming_state = INVALID_STATE;
6627 		goto port_unlock;
6628 	}
6629 
6630 	/* Round down operating current to align with PPS valid steps */
6631 	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6632 
6633 	reinit_completion(&port->pps_complete);
6634 	port->pps_data.req_op_curr = req_op_curr;
6635 	port->pps_status = 0;
6636 	port->pps_pending = true;
6637 	mutex_unlock(&port->lock);
6638 
6639 	if (!wait_for_completion_timeout(&port->pps_complete,
6640 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6641 		ret = -ETIMEDOUT;
6642 	else
6643 		ret = port->pps_status;
6644 
6645 	goto swap_unlock;
6646 
6647 port_unlock:
6648 	mutex_unlock(&port->lock);
6649 swap_unlock:
6650 	mutex_unlock(&port->swap_lock);
6651 
6652 	return ret;
6653 }
6654 
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)6655 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6656 {
6657 	unsigned int target_mw;
6658 	int ret;
6659 
6660 	mutex_lock(&port->swap_lock);
6661 	mutex_lock(&port->lock);
6662 
6663 	if (!port->pps_data.active) {
6664 		ret = -EOPNOTSUPP;
6665 		goto port_unlock;
6666 	}
6667 
6668 	if (port->state != SNK_READY) {
6669 		ret = -EAGAIN;
6670 		goto port_unlock;
6671 	}
6672 
6673 	target_mw = (port->current_limit * req_out_volt) / 1000;
6674 	if (target_mw < port->operating_snk_mw) {
6675 		ret = -EINVAL;
6676 		goto port_unlock;
6677 	}
6678 
6679 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6680 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6681 	if (ret == -EAGAIN) {
6682 		port->upcoming_state = INVALID_STATE;
6683 		goto port_unlock;
6684 	}
6685 
6686 	/* Round down output voltage to align with PPS valid steps */
6687 	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6688 
6689 	reinit_completion(&port->pps_complete);
6690 	port->pps_data.req_out_volt = req_out_volt;
6691 	port->pps_status = 0;
6692 	port->pps_pending = true;
6693 	mutex_unlock(&port->lock);
6694 
6695 	if (!wait_for_completion_timeout(&port->pps_complete,
6696 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6697 		ret = -ETIMEDOUT;
6698 	else
6699 		ret = port->pps_status;
6700 
6701 	goto swap_unlock;
6702 
6703 port_unlock:
6704 	mutex_unlock(&port->lock);
6705 swap_unlock:
6706 	mutex_unlock(&port->swap_lock);
6707 
6708 	return ret;
6709 }
6710 
tcpm_pps_activate(struct tcpm_port * port,bool activate)6711 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6712 {
6713 	int ret = 0;
6714 
6715 	mutex_lock(&port->swap_lock);
6716 	mutex_lock(&port->lock);
6717 
6718 	if (!port->pps_data.supported) {
6719 		ret = -EOPNOTSUPP;
6720 		goto port_unlock;
6721 	}
6722 
6723 	/* Trying to deactivate PPS when already deactivated so just bail */
6724 	if (!port->pps_data.active && !activate)
6725 		goto port_unlock;
6726 
6727 	if (port->state != SNK_READY) {
6728 		ret = -EAGAIN;
6729 		goto port_unlock;
6730 	}
6731 
6732 	if (activate)
6733 		port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6734 	else
6735 		port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6736 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6737 	if (ret == -EAGAIN) {
6738 		port->upcoming_state = INVALID_STATE;
6739 		goto port_unlock;
6740 	}
6741 
6742 	reinit_completion(&port->pps_complete);
6743 	port->pps_status = 0;
6744 	port->pps_pending = true;
6745 
6746 	/* Trigger PPS request or move back to standard PDO contract */
6747 	if (activate) {
6748 		port->pps_data.req_out_volt = port->supply_voltage;
6749 		port->pps_data.req_op_curr = port->current_limit;
6750 	}
6751 	mutex_unlock(&port->lock);
6752 
6753 	if (!wait_for_completion_timeout(&port->pps_complete,
6754 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6755 		ret = -ETIMEDOUT;
6756 	else
6757 		ret = port->pps_status;
6758 
6759 	goto swap_unlock;
6760 
6761 port_unlock:
6762 	mutex_unlock(&port->lock);
6763 swap_unlock:
6764 	mutex_unlock(&port->swap_lock);
6765 
6766 	return ret;
6767 }
6768 
tcpm_init(struct tcpm_port * port)6769 static void tcpm_init(struct tcpm_port *port)
6770 {
6771 	enum typec_cc_status cc1, cc2;
6772 
6773 	port->tcpc->init(port->tcpc);
6774 
6775 	tcpm_reset_port(port);
6776 
6777 	/*
6778 	 * XXX
6779 	 * Should possibly wait for VBUS to settle if it was enabled locally
6780 	 * since tcpm_reset_port() will disable VBUS.
6781 	 */
6782 	port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6783 	if (port->vbus_present)
6784 		port->vbus_never_low = true;
6785 
6786 	/*
6787 	 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6788 	 * So implicitly vbus_vsafe0v = false.
6789 	 *
6790 	 * 2. When vbus_present is false and TCPC does NOT support querying
6791 	 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6792 	 * vbus_vsafe0v is true.
6793 	 *
6794 	 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6795 	 * then, query tcpc for vsafe0v status.
6796 	 */
6797 	if (port->vbus_present)
6798 		port->vbus_vsafe0v = false;
6799 	else if (!port->tcpc->is_vbus_vsafe0v)
6800 		port->vbus_vsafe0v = true;
6801 	else
6802 		port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6803 
6804 	tcpm_set_state(port, tcpm_default_state(port), 0);
6805 
6806 	if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6807 		_tcpm_cc_change(port, cc1, cc2);
6808 
6809 	/*
6810 	 * Some adapters need a clean slate at startup, and won't recover
6811 	 * otherwise. So do not try to be fancy and force a clean disconnect.
6812 	 */
6813 	tcpm_set_state(port, PORT_RESET, 0);
6814 }
6815 
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)6816 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6817 {
6818 	struct tcpm_port *port = typec_get_drvdata(p);
6819 
6820 	mutex_lock(&port->lock);
6821 	if (type == port->port_type)
6822 		goto port_unlock;
6823 
6824 	port->port_type = type;
6825 
6826 	if (!port->connected) {
6827 		tcpm_set_state(port, PORT_RESET, 0);
6828 	} else if (type == TYPEC_PORT_SNK) {
6829 		if (!(port->pwr_role == TYPEC_SINK &&
6830 		      port->data_role == TYPEC_DEVICE))
6831 			tcpm_set_state(port, PORT_RESET, 0);
6832 	} else if (type == TYPEC_PORT_SRC) {
6833 		if (!(port->pwr_role == TYPEC_SOURCE &&
6834 		      port->data_role == TYPEC_HOST))
6835 			tcpm_set_state(port, PORT_RESET, 0);
6836 	}
6837 
6838 port_unlock:
6839 	mutex_unlock(&port->lock);
6840 	return 0;
6841 }
6842 
tcpm_find_pd_data(struct tcpm_port * port,struct usb_power_delivery * pd)6843 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6844 {
6845 	int i;
6846 
6847 	for (i = 0; port->pd_list[i]; i++) {
6848 		if (port->pd_list[i]->pd == pd)
6849 			return port->pd_list[i];
6850 	}
6851 
6852 	return ERR_PTR(-ENODATA);
6853 }
6854 
tcpm_pd_get(struct typec_port * p)6855 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6856 {
6857 	struct tcpm_port *port = typec_get_drvdata(p);
6858 
6859 	return port->pds;
6860 }
6861 
tcpm_pd_set(struct typec_port * p,struct usb_power_delivery * pd)6862 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6863 {
6864 	struct tcpm_port *port = typec_get_drvdata(p);
6865 	struct pd_data *data;
6866 	int i, ret = 0;
6867 
6868 	mutex_lock(&port->lock);
6869 
6870 	if (port->selected_pd == pd)
6871 		goto unlock;
6872 
6873 	data = tcpm_find_pd_data(port, pd);
6874 	if (IS_ERR(data)) {
6875 		ret = PTR_ERR(data);
6876 		goto unlock;
6877 	}
6878 
6879 	if (data->sink_desc.pdo[0]) {
6880 		for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6881 			port->snk_pdo[i] = data->sink_desc.pdo[i];
6882 		port->nr_snk_pdo = i;
6883 		port->operating_snk_mw = data->operating_snk_mw;
6884 	}
6885 
6886 	if (data->source_desc.pdo[0]) {
6887 		for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6888 			port->src_pdo[i] = data->source_desc.pdo[i];
6889 		port->nr_src_pdo = i;
6890 	}
6891 
6892 	switch (port->state) {
6893 	case SRC_UNATTACHED:
6894 	case SRC_ATTACH_WAIT:
6895 	case SRC_TRYWAIT:
6896 		tcpm_set_cc(port, tcpm_rp_cc(port));
6897 		break;
6898 	case SRC_SEND_CAPABILITIES:
6899 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6900 	case SRC_NEGOTIATE_CAPABILITIES:
6901 	case SRC_READY:
6902 	case SRC_WAIT_NEW_CAPABILITIES:
6903 		port->caps_count = 0;
6904 		port->upcoming_state = SRC_SEND_CAPABILITIES;
6905 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6906 		if (ret == -EAGAIN) {
6907 			port->upcoming_state = INVALID_STATE;
6908 			goto unlock;
6909 		}
6910 		break;
6911 	case SNK_NEGOTIATE_CAPABILITIES:
6912 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
6913 	case SNK_READY:
6914 	case SNK_TRANSITION_SINK:
6915 	case SNK_TRANSITION_SINK_VBUS:
6916 		if (port->pps_data.active)
6917 			port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6918 		else if (port->pd_capable)
6919 			port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6920 		else
6921 			break;
6922 
6923 		port->update_sink_caps = true;
6924 
6925 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6926 		if (ret == -EAGAIN) {
6927 			port->upcoming_state = INVALID_STATE;
6928 			goto unlock;
6929 		}
6930 		break;
6931 	default:
6932 		break;
6933 	}
6934 
6935 	port->port_source_caps = data->source_cap;
6936 	port->port_sink_caps = data->sink_cap;
6937 	typec_port_set_usb_power_delivery(p, NULL);
6938 	port->selected_pd = pd;
6939 	typec_port_set_usb_power_delivery(p, port->selected_pd);
6940 unlock:
6941 	mutex_unlock(&port->lock);
6942 	return ret;
6943 }
6944 
6945 static const struct typec_operations tcpm_ops = {
6946 	.try_role = tcpm_try_role,
6947 	.dr_set = tcpm_dr_set,
6948 	.pr_set = tcpm_pr_set,
6949 	.vconn_set = tcpm_vconn_set,
6950 	.port_type_set = tcpm_port_type_set,
6951 	.pd_get = tcpm_pd_get,
6952 	.pd_set = tcpm_pd_set
6953 };
6954 
tcpm_tcpc_reset(struct tcpm_port * port)6955 void tcpm_tcpc_reset(struct tcpm_port *port)
6956 {
6957 	mutex_lock(&port->lock);
6958 	/* XXX: Maintain PD connection if possible? */
6959 	tcpm_init(port);
6960 	mutex_unlock(&port->lock);
6961 }
6962 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
6963 
tcpm_port_unregister_pd(struct tcpm_port * port)6964 static void tcpm_port_unregister_pd(struct tcpm_port *port)
6965 {
6966 	int i;
6967 
6968 	port->port_sink_caps = NULL;
6969 	port->port_source_caps = NULL;
6970 	for (i = 0; i < port->pd_count; i++) {
6971 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
6972 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
6973 		devm_kfree(port->dev, port->pd_list[i]);
6974 		port->pd_list[i] = NULL;
6975 		usb_power_delivery_unregister(port->pds[i]);
6976 		port->pds[i] = NULL;
6977 	}
6978 }
6979 
tcpm_port_register_pd(struct tcpm_port * port)6980 static int tcpm_port_register_pd(struct tcpm_port *port)
6981 {
6982 	struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
6983 	struct usb_power_delivery_capabilities *cap;
6984 	int ret, i;
6985 
6986 	if (!port->nr_src_pdo && !port->nr_snk_pdo)
6987 		return 0;
6988 
6989 	for (i = 0; i < port->pd_count; i++) {
6990 		port->pds[i] = usb_power_delivery_register(port->dev, &desc);
6991 		if (IS_ERR(port->pds[i])) {
6992 			ret = PTR_ERR(port->pds[i]);
6993 			goto err_unregister;
6994 		}
6995 		port->pd_list[i]->pd = port->pds[i];
6996 
6997 		if (port->pd_list[i]->source_desc.pdo[0]) {
6998 			cap = usb_power_delivery_register_capabilities(port->pds[i],
6999 								&port->pd_list[i]->source_desc);
7000 			if (IS_ERR(cap)) {
7001 				ret = PTR_ERR(cap);
7002 				goto err_unregister;
7003 			}
7004 			port->pd_list[i]->source_cap = cap;
7005 		}
7006 
7007 		if (port->pd_list[i]->sink_desc.pdo[0]) {
7008 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7009 								&port->pd_list[i]->sink_desc);
7010 			if (IS_ERR(cap)) {
7011 				ret = PTR_ERR(cap);
7012 				goto err_unregister;
7013 			}
7014 			port->pd_list[i]->sink_cap = cap;
7015 		}
7016 	}
7017 
7018 	port->port_source_caps = port->pd_list[0]->source_cap;
7019 	port->port_sink_caps = port->pd_list[0]->sink_cap;
7020 	port->selected_pd = port->pds[0];
7021 	return 0;
7022 
7023 err_unregister:
7024 	tcpm_port_unregister_pd(port);
7025 
7026 	return ret;
7027 }
7028 
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)7029 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7030 {
7031 	struct fwnode_handle *capabilities, *child, *caps = NULL;
7032 	unsigned int nr_src_pdo, nr_snk_pdo;
7033 	const char *opmode_str;
7034 	u32 *src_pdo, *snk_pdo;
7035 	u32 uw, frs_current;
7036 	int ret = 0, i;
7037 	int mode;
7038 
7039 	if (!fwnode)
7040 		return -EINVAL;
7041 
7042 	/*
7043 	 * This fwnode has a "compatible" property, but is never populated as a
7044 	 * struct device. Instead we simply parse it to read the properties.
7045 	 * This it breaks fw_devlink=on. To maintain backward compatibility
7046 	 * with existing DT files, we work around this by deleting any
7047 	 * fwnode_links to/from this fwnode.
7048 	 */
7049 	fw_devlink_purge_absent_suppliers(fwnode);
7050 
7051 	ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7052 	if (ret < 0)
7053 		return ret;
7054 
7055 	mode = 0;
7056 
7057 	if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7058 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7059 
7060 	if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7061 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7062 
7063 	port->port_type = port->typec_caps.type;
7064 	port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7065 	port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7066 	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7067 
7068 	if (!port->pd_supported) {
7069 		ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7070 		if (ret)
7071 			return ret;
7072 		ret = typec_find_pwr_opmode(opmode_str);
7073 		if (ret < 0)
7074 			return ret;
7075 		port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7076 		return 0;
7077 	}
7078 
7079 	/* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7080 
7081 	/* FRS can only be supported by DRP ports */
7082 	if (port->port_type == TYPEC_PORT_DRP) {
7083 		ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7084 					       &frs_current);
7085 		if (!ret && frs_current <= FRS_5V_3A)
7086 			port->new_source_frs_current = frs_current;
7087 
7088 		if (ret)
7089 			ret = 0;
7090 	}
7091 
7092 	/* For the backward compatibility, "capabilities" node is optional. */
7093 	capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7094 	if (!capabilities) {
7095 		port->pd_count = 1;
7096 	} else {
7097 		fwnode_for_each_child_node(capabilities, child)
7098 			port->pd_count++;
7099 
7100 		if (!port->pd_count) {
7101 			ret = -ENODATA;
7102 			goto put_capabilities;
7103 		}
7104 	}
7105 
7106 	port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7107 				 GFP_KERNEL);
7108 	if (!port->pds) {
7109 		ret = -ENOMEM;
7110 		goto put_capabilities;
7111 	}
7112 
7113 	port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7114 				     GFP_KERNEL);
7115 	if (!port->pd_list) {
7116 		ret = -ENOMEM;
7117 		goto put_capabilities;
7118 	}
7119 
7120 	for (i = 0; i < port->pd_count; i++) {
7121 		port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7122 		if (!port->pd_list[i]) {
7123 			ret = -ENOMEM;
7124 			goto put_capabilities;
7125 		}
7126 
7127 		src_pdo = port->pd_list[i]->source_desc.pdo;
7128 		port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7129 		snk_pdo = port->pd_list[i]->sink_desc.pdo;
7130 		port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7131 
7132 		/* If "capabilities" is NULL, fall back to single pd cap population. */
7133 		if (!capabilities)
7134 			caps = fwnode;
7135 		else
7136 			caps = fwnode_get_next_child_node(capabilities, caps);
7137 
7138 		if (port->port_type != TYPEC_PORT_SNK) {
7139 			ret = fwnode_property_count_u32(caps, "source-pdos");
7140 			if (ret == 0) {
7141 				ret = -EINVAL;
7142 				goto put_caps;
7143 			}
7144 			if (ret < 0)
7145 				goto put_caps;
7146 
7147 			nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7148 			ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7149 							     nr_src_pdo);
7150 			if (ret)
7151 				goto put_caps;
7152 
7153 			ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7154 			if (ret)
7155 				goto put_caps;
7156 
7157 			if (i == 0) {
7158 				port->nr_src_pdo = nr_src_pdo;
7159 				memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7160 					       port->pd_list[0]->source_desc.pdo,
7161 					       sizeof(u32) * nr_src_pdo,
7162 					       0);
7163 			}
7164 		}
7165 
7166 		if (port->port_type != TYPEC_PORT_SRC) {
7167 			ret = fwnode_property_count_u32(caps, "sink-pdos");
7168 			if (ret == 0) {
7169 				ret = -EINVAL;
7170 				goto put_caps;
7171 			}
7172 
7173 			if (ret < 0)
7174 				goto put_caps;
7175 
7176 			nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7177 			ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7178 							     nr_snk_pdo);
7179 			if (ret)
7180 				goto put_caps;
7181 
7182 			ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7183 			if (ret)
7184 				goto put_caps;
7185 
7186 			if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7187 				ret = -EINVAL;
7188 				goto put_caps;
7189 			}
7190 
7191 			port->pd_list[i]->operating_snk_mw = uw / 1000;
7192 
7193 			if (i == 0) {
7194 				port->nr_snk_pdo = nr_snk_pdo;
7195 				memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7196 					       port->pd_list[0]->sink_desc.pdo,
7197 					       sizeof(u32) * nr_snk_pdo,
7198 					       0);
7199 				port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7200 			}
7201 		}
7202 	}
7203 
7204 put_caps:
7205 	if (caps != fwnode)
7206 		fwnode_handle_put(caps);
7207 put_capabilities:
7208 	fwnode_handle_put(capabilities);
7209 	return ret;
7210 }
7211 
tcpm_fw_get_snk_vdos(struct tcpm_port * port,struct fwnode_handle * fwnode)7212 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7213 {
7214 	int ret;
7215 
7216 	/* sink-vdos is optional */
7217 	ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7218 	if (ret < 0)
7219 		return 0;
7220 
7221 	port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7222 	if (port->nr_snk_vdo) {
7223 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7224 						     port->snk_vdo,
7225 						     port->nr_snk_vdo);
7226 		if (ret < 0)
7227 			return ret;
7228 	}
7229 
7230 	/* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7231 	if (port->nr_snk_vdo) {
7232 		ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7233 		if (ret < 0)
7234 			return ret;
7235 		else if (ret == 0)
7236 			return -ENODATA;
7237 
7238 		port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7239 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7240 						     port->snk_vdo_v1,
7241 						     port->nr_snk_vdo_v1);
7242 		if (ret < 0)
7243 			return ret;
7244 	}
7245 
7246 	return 0;
7247 }
7248 
7249 /* Power Supply access to expose source power information */
7250 enum tcpm_psy_online_states {
7251 	TCPM_PSY_OFFLINE = 0,
7252 	TCPM_PSY_FIXED_ONLINE,
7253 	TCPM_PSY_PROG_ONLINE,
7254 };
7255 
7256 static enum power_supply_property tcpm_psy_props[] = {
7257 	POWER_SUPPLY_PROP_USB_TYPE,
7258 	POWER_SUPPLY_PROP_ONLINE,
7259 	POWER_SUPPLY_PROP_VOLTAGE_MIN,
7260 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
7261 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
7262 	POWER_SUPPLY_PROP_CURRENT_MAX,
7263 	POWER_SUPPLY_PROP_CURRENT_NOW,
7264 };
7265 
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)7266 static int tcpm_psy_get_online(struct tcpm_port *port,
7267 			       union power_supply_propval *val)
7268 {
7269 	if (port->vbus_charge) {
7270 		if (port->pps_data.active)
7271 			val->intval = TCPM_PSY_PROG_ONLINE;
7272 		else
7273 			val->intval = TCPM_PSY_FIXED_ONLINE;
7274 	} else {
7275 		val->intval = TCPM_PSY_OFFLINE;
7276 	}
7277 
7278 	return 0;
7279 }
7280 
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)7281 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7282 				    union power_supply_propval *val)
7283 {
7284 	if (port->pps_data.active)
7285 		val->intval = port->pps_data.min_volt * 1000;
7286 	else
7287 		val->intval = port->supply_voltage * 1000;
7288 
7289 	return 0;
7290 }
7291 
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)7292 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7293 				    union power_supply_propval *val)
7294 {
7295 	if (port->pps_data.active)
7296 		val->intval = port->pps_data.max_volt * 1000;
7297 	else
7298 		val->intval = port->supply_voltage * 1000;
7299 
7300 	return 0;
7301 }
7302 
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)7303 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7304 				    union power_supply_propval *val)
7305 {
7306 	val->intval = port->supply_voltage * 1000;
7307 
7308 	return 0;
7309 }
7310 
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)7311 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7312 				    union power_supply_propval *val)
7313 {
7314 	if (port->pps_data.active)
7315 		val->intval = port->pps_data.max_curr * 1000;
7316 	else
7317 		val->intval = port->current_limit * 1000;
7318 
7319 	return 0;
7320 }
7321 
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)7322 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7323 				    union power_supply_propval *val)
7324 {
7325 	val->intval = port->current_limit * 1000;
7326 
7327 	return 0;
7328 }
7329 
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)7330 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7331 					  union power_supply_propval *val)
7332 {
7333 	unsigned int src_mv, src_ma, max_src_uw = 0;
7334 	unsigned int i, tmp;
7335 
7336 	for (i = 0; i < port->nr_source_caps; i++) {
7337 		u32 pdo = port->source_caps[i];
7338 
7339 		if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7340 			src_mv = pdo_fixed_voltage(pdo);
7341 			src_ma = pdo_max_current(pdo);
7342 			tmp = src_mv * src_ma;
7343 			max_src_uw = tmp > max_src_uw ? tmp : max_src_uw;
7344 		}
7345 	}
7346 
7347 	val->intval = max_src_uw;
7348 	return 0;
7349 }
7350 
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)7351 static int tcpm_psy_get_prop(struct power_supply *psy,
7352 			     enum power_supply_property psp,
7353 			     union power_supply_propval *val)
7354 {
7355 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7356 	int ret = 0;
7357 
7358 	switch (psp) {
7359 	case POWER_SUPPLY_PROP_USB_TYPE:
7360 		val->intval = port->usb_type;
7361 		break;
7362 	case POWER_SUPPLY_PROP_ONLINE:
7363 		ret = tcpm_psy_get_online(port, val);
7364 		break;
7365 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7366 		ret = tcpm_psy_get_voltage_min(port, val);
7367 		break;
7368 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7369 		ret = tcpm_psy_get_voltage_max(port, val);
7370 		break;
7371 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7372 		ret = tcpm_psy_get_voltage_now(port, val);
7373 		break;
7374 	case POWER_SUPPLY_PROP_CURRENT_MAX:
7375 		ret = tcpm_psy_get_current_max(port, val);
7376 		break;
7377 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7378 		ret = tcpm_psy_get_current_now(port, val);
7379 		break;
7380 	case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7381 		tcpm_psy_get_input_power_limit(port, val);
7382 		break;
7383 	default:
7384 		ret = -EINVAL;
7385 		break;
7386 	}
7387 
7388 	return ret;
7389 }
7390 
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)7391 static int tcpm_psy_set_online(struct tcpm_port *port,
7392 			       const union power_supply_propval *val)
7393 {
7394 	int ret;
7395 
7396 	switch (val->intval) {
7397 	case TCPM_PSY_FIXED_ONLINE:
7398 		ret = tcpm_pps_activate(port, false);
7399 		break;
7400 	case TCPM_PSY_PROG_ONLINE:
7401 		ret = tcpm_pps_activate(port, true);
7402 		break;
7403 	default:
7404 		ret = -EINVAL;
7405 		break;
7406 	}
7407 
7408 	return ret;
7409 }
7410 
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)7411 static int tcpm_psy_set_prop(struct power_supply *psy,
7412 			     enum power_supply_property psp,
7413 			     const union power_supply_propval *val)
7414 {
7415 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7416 	int ret;
7417 
7418 	/*
7419 	 * All the properties below are related to USB PD. The check needs to be
7420 	 * property specific when a non-pd related property is added.
7421 	 */
7422 	if (!port->pd_supported)
7423 		return -EOPNOTSUPP;
7424 
7425 	switch (psp) {
7426 	case POWER_SUPPLY_PROP_ONLINE:
7427 		ret = tcpm_psy_set_online(port, val);
7428 		break;
7429 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7430 		ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7431 		break;
7432 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7433 		if (val->intval > port->pps_data.max_curr * 1000)
7434 			ret = -EINVAL;
7435 		else
7436 			ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7437 		break;
7438 	default:
7439 		ret = -EINVAL;
7440 		break;
7441 	}
7442 	power_supply_changed(port->psy);
7443 	return ret;
7444 }
7445 
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)7446 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7447 				   enum power_supply_property psp)
7448 {
7449 	switch (psp) {
7450 	case POWER_SUPPLY_PROP_ONLINE:
7451 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7452 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7453 		return 1;
7454 	default:
7455 		return 0;
7456 	}
7457 }
7458 
7459 static enum power_supply_usb_type tcpm_psy_usb_types[] = {
7460 	POWER_SUPPLY_USB_TYPE_C,
7461 	POWER_SUPPLY_USB_TYPE_PD,
7462 	POWER_SUPPLY_USB_TYPE_PD_PPS,
7463 };
7464 
7465 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7466 
devm_tcpm_psy_register(struct tcpm_port * port)7467 static int devm_tcpm_psy_register(struct tcpm_port *port)
7468 {
7469 	struct power_supply_config psy_cfg = {};
7470 	const char *port_dev_name = dev_name(port->dev);
7471 	size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7472 				     strlen(port_dev_name) + 1;
7473 	char *psy_name;
7474 
7475 	psy_cfg.drv_data = port;
7476 	psy_cfg.fwnode = dev_fwnode(port->dev);
7477 	psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7478 	if (!psy_name)
7479 		return -ENOMEM;
7480 
7481 	snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7482 		 port_dev_name);
7483 	port->psy_desc.name = psy_name;
7484 	port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7485 	port->psy_desc.usb_types = tcpm_psy_usb_types;
7486 	port->psy_desc.num_usb_types = ARRAY_SIZE(tcpm_psy_usb_types);
7487 	port->psy_desc.properties = tcpm_psy_props;
7488 	port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7489 	port->psy_desc.get_property = tcpm_psy_get_prop;
7490 	port->psy_desc.set_property = tcpm_psy_set_prop;
7491 	port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7492 
7493 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7494 
7495 	port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7496 					       &psy_cfg);
7497 
7498 	return PTR_ERR_OR_ZERO(port->psy);
7499 }
7500 
state_machine_timer_handler(struct hrtimer * timer)7501 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7502 {
7503 	struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7504 
7505 	if (port->registered)
7506 		kthread_queue_work(port->wq, &port->state_machine);
7507 	return HRTIMER_NORESTART;
7508 }
7509 
vdm_state_machine_timer_handler(struct hrtimer * timer)7510 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7511 {
7512 	struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7513 
7514 	if (port->registered)
7515 		kthread_queue_work(port->wq, &port->vdm_state_machine);
7516 	return HRTIMER_NORESTART;
7517 }
7518 
enable_frs_timer_handler(struct hrtimer * timer)7519 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7520 {
7521 	struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7522 
7523 	if (port->registered)
7524 		kthread_queue_work(port->wq, &port->enable_frs);
7525 	return HRTIMER_NORESTART;
7526 }
7527 
send_discover_timer_handler(struct hrtimer * timer)7528 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7529 {
7530 	struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7531 
7532 	if (port->registered)
7533 		kthread_queue_work(port->wq, &port->send_discover_work);
7534 	return HRTIMER_NORESTART;
7535 }
7536 
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)7537 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7538 {
7539 	struct tcpm_port *port;
7540 	int err;
7541 
7542 	if (!dev || !tcpc ||
7543 	    !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7544 	    !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7545 	    !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7546 		return ERR_PTR(-EINVAL);
7547 
7548 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7549 	if (!port)
7550 		return ERR_PTR(-ENOMEM);
7551 
7552 	port->dev = dev;
7553 	port->tcpc = tcpc;
7554 
7555 	mutex_init(&port->lock);
7556 	mutex_init(&port->swap_lock);
7557 
7558 	port->wq = kthread_create_worker(0, dev_name(dev));
7559 	if (IS_ERR(port->wq))
7560 		return ERR_CAST(port->wq);
7561 	sched_set_fifo(port->wq->task);
7562 
7563 	kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7564 	kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7565 	kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7566 	kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7567 	kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7568 	hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7569 	port->state_machine_timer.function = state_machine_timer_handler;
7570 	hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7571 	port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
7572 	hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7573 	port->enable_frs_timer.function = enable_frs_timer_handler;
7574 	hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7575 	port->send_discover_timer.function = send_discover_timer_handler;
7576 
7577 	spin_lock_init(&port->pd_event_lock);
7578 
7579 	init_completion(&port->tx_complete);
7580 	init_completion(&port->swap_complete);
7581 	init_completion(&port->pps_complete);
7582 	tcpm_debugfs_init(port);
7583 
7584 	err = tcpm_fw_get_caps(port, tcpc->fwnode);
7585 	if (err < 0)
7586 		goto out_destroy_wq;
7587 	err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7588 	if (err < 0)
7589 		goto out_destroy_wq;
7590 
7591 	port->try_role = port->typec_caps.prefer_role;
7592 
7593 	port->typec_caps.revision = 0x0120;	/* Type-C spec release 1.2 */
7594 	port->typec_caps.pd_revision = 0x0300;	/* USB-PD spec release 3.0 */
7595 	port->typec_caps.svdm_version = SVDM_VER_2_0;
7596 	port->typec_caps.driver_data = port;
7597 	port->typec_caps.ops = &tcpm_ops;
7598 	port->typec_caps.orientation_aware = 1;
7599 
7600 	port->partner_desc.identity = &port->partner_ident;
7601 
7602 	port->role_sw = usb_role_switch_get(port->dev);
7603 	if (!port->role_sw)
7604 		port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7605 	if (IS_ERR(port->role_sw)) {
7606 		err = PTR_ERR(port->role_sw);
7607 		goto out_destroy_wq;
7608 	}
7609 
7610 	err = devm_tcpm_psy_register(port);
7611 	if (err)
7612 		goto out_role_sw_put;
7613 	power_supply_changed(port->psy);
7614 
7615 	err = tcpm_port_register_pd(port);
7616 	if (err)
7617 		goto out_role_sw_put;
7618 
7619 	if (port->pds)
7620 		port->typec_caps.pd = port->pds[0];
7621 
7622 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7623 	if (IS_ERR(port->typec_port)) {
7624 		err = PTR_ERR(port->typec_port);
7625 		goto out_unregister_pd;
7626 	}
7627 
7628 	typec_port_register_altmodes(port->typec_port,
7629 				     &tcpm_altmode_ops, port,
7630 				     port->port_altmode, ALTMODE_DISCOVERY_MAX);
7631 	typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7632 				      &tcpm_cable_ops);
7633 	port->registered = true;
7634 
7635 	mutex_lock(&port->lock);
7636 	tcpm_init(port);
7637 	mutex_unlock(&port->lock);
7638 
7639 	tcpm_log(port, "%s: registered", dev_name(dev));
7640 	return port;
7641 
7642 out_unregister_pd:
7643 	tcpm_port_unregister_pd(port);
7644 out_role_sw_put:
7645 	usb_role_switch_put(port->role_sw);
7646 out_destroy_wq:
7647 	tcpm_debugfs_exit(port);
7648 	kthread_destroy_worker(port->wq);
7649 	return ERR_PTR(err);
7650 }
7651 EXPORT_SYMBOL_GPL(tcpm_register_port);
7652 
tcpm_unregister_port(struct tcpm_port * port)7653 void tcpm_unregister_port(struct tcpm_port *port)
7654 {
7655 	int i;
7656 
7657 	port->registered = false;
7658 	kthread_destroy_worker(port->wq);
7659 
7660 	hrtimer_cancel(&port->send_discover_timer);
7661 	hrtimer_cancel(&port->enable_frs_timer);
7662 	hrtimer_cancel(&port->vdm_state_machine_timer);
7663 	hrtimer_cancel(&port->state_machine_timer);
7664 
7665 	tcpm_reset_port(port);
7666 
7667 	tcpm_port_unregister_pd(port);
7668 
7669 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7670 		typec_unregister_altmode(port->port_altmode[i]);
7671 	typec_unregister_port(port->typec_port);
7672 	usb_role_switch_put(port->role_sw);
7673 	tcpm_debugfs_exit(port);
7674 }
7675 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7676 
7677 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
7678 MODULE_DESCRIPTION("USB Type-C Port Manager");
7679 MODULE_LICENSE("GPL");
7680