xref: /linux/drivers/usb/typec/tcpm/tcpm.c (revision f5e9d31e79c1ce8ba948ecac74d75e9c8d2f0c87)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2017 Google, Inc
4  *
5  * USB Power Delivery protocol stack.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/string_choices.h>
25 #include <linux/usb.h>
26 #include <linux/usb/pd.h>
27 #include <linux/usb/pd_ado.h>
28 #include <linux/usb/pd_bdo.h>
29 #include <linux/usb/pd_ext_sdb.h>
30 #include <linux/usb/pd_vdo.h>
31 #include <linux/usb/role.h>
32 #include <linux/usb/tcpm.h>
33 #include <linux/usb/typec_altmode.h>
34 
35 #include <uapi/linux/sched/types.h>
36 
37 #define FOREACH_STATE(S)			\
38 	S(INVALID_STATE),			\
39 	S(TOGGLING),			\
40 	S(CHECK_CONTAMINANT),			\
41 	S(SRC_UNATTACHED),			\
42 	S(SRC_ATTACH_WAIT),			\
43 	S(SRC_ATTACHED),			\
44 	S(SRC_STARTUP),				\
45 	S(SRC_SEND_CAPABILITIES),		\
46 	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
47 	S(SRC_NEGOTIATE_CAPABILITIES),		\
48 	S(SRC_TRANSITION_SUPPLY),		\
49 	S(SRC_READY),				\
50 	S(SRC_WAIT_NEW_CAPABILITIES),		\
51 						\
52 	S(SNK_UNATTACHED),			\
53 	S(SNK_ATTACH_WAIT),			\
54 	S(SNK_DEBOUNCED),			\
55 	S(SNK_ATTACHED),			\
56 	S(SNK_STARTUP),				\
57 	S(SNK_DISCOVERY),			\
58 	S(SNK_DISCOVERY_DEBOUNCE),		\
59 	S(SNK_DISCOVERY_DEBOUNCE_DONE),		\
60 	S(SNK_WAIT_CAPABILITIES),		\
61 	S(SNK_WAIT_CAPABILITIES_TIMEOUT),	\
62 	S(SNK_NEGOTIATE_CAPABILITIES),		\
63 	S(SNK_NEGOTIATE_PPS_CAPABILITIES),	\
64 	S(SNK_TRANSITION_SINK),			\
65 	S(SNK_TRANSITION_SINK_VBUS),		\
66 	S(SNK_READY),				\
67 						\
68 	S(ACC_UNATTACHED),			\
69 	S(DEBUG_ACC_ATTACHED),			\
70 	S(DEBUG_ACC_DEBOUNCE),			\
71 	S(AUDIO_ACC_ATTACHED),			\
72 	S(AUDIO_ACC_DEBOUNCE),			\
73 						\
74 	S(HARD_RESET_SEND),			\
75 	S(HARD_RESET_START),			\
76 	S(SRC_HARD_RESET_VBUS_OFF),		\
77 	S(SRC_HARD_RESET_VBUS_ON),		\
78 	S(SNK_HARD_RESET_SINK_OFF),		\
79 	S(SNK_HARD_RESET_WAIT_VBUS),		\
80 	S(SNK_HARD_RESET_SINK_ON),		\
81 						\
82 	S(SOFT_RESET),				\
83 	S(SRC_SOFT_RESET_WAIT_SNK_TX),		\
84 	S(SNK_SOFT_RESET),			\
85 	S(SOFT_RESET_SEND),			\
86 						\
87 	S(DR_SWAP_ACCEPT),			\
88 	S(DR_SWAP_SEND),			\
89 	S(DR_SWAP_SEND_TIMEOUT),		\
90 	S(DR_SWAP_CANCEL),			\
91 	S(DR_SWAP_CHANGE_DR),			\
92 						\
93 	S(PR_SWAP_ACCEPT),			\
94 	S(PR_SWAP_SEND),			\
95 	S(PR_SWAP_SEND_TIMEOUT),		\
96 	S(PR_SWAP_CANCEL),			\
97 	S(PR_SWAP_START),			\
98 	S(PR_SWAP_SRC_SNK_TRANSITION_OFF),	\
99 	S(PR_SWAP_SRC_SNK_SOURCE_OFF),		\
100 	S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
101 	S(PR_SWAP_SRC_SNK_SINK_ON),		\
102 	S(PR_SWAP_SNK_SRC_SINK_OFF),		\
103 	S(PR_SWAP_SNK_SRC_SOURCE_ON),		\
104 	S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP),    \
105 						\
106 	S(VCONN_SWAP_ACCEPT),			\
107 	S(VCONN_SWAP_SEND),			\
108 	S(VCONN_SWAP_SEND_TIMEOUT),		\
109 	S(VCONN_SWAP_CANCEL),			\
110 	S(VCONN_SWAP_START),			\
111 	S(VCONN_SWAP_WAIT_FOR_VCONN),		\
112 	S(VCONN_SWAP_TURN_ON_VCONN),		\
113 	S(VCONN_SWAP_TURN_OFF_VCONN),		\
114 	S(VCONN_SWAP_SEND_SOFT_RESET),		\
115 						\
116 	S(FR_SWAP_SEND),			\
117 	S(FR_SWAP_SEND_TIMEOUT),		\
118 	S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF),			\
119 	S(FR_SWAP_SNK_SRC_NEW_SINK_READY),		\
120 	S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED),	\
121 	S(FR_SWAP_CANCEL),			\
122 						\
123 	S(SNK_TRY),				\
124 	S(SNK_TRY_WAIT),			\
125 	S(SNK_TRY_WAIT_DEBOUNCE),               \
126 	S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS),    \
127 	S(SRC_TRYWAIT),				\
128 	S(SRC_TRYWAIT_DEBOUNCE),		\
129 	S(SRC_TRYWAIT_UNATTACHED),		\
130 						\
131 	S(SRC_TRY),				\
132 	S(SRC_TRY_WAIT),                        \
133 	S(SRC_TRY_DEBOUNCE),			\
134 	S(SNK_TRYWAIT),				\
135 	S(SNK_TRYWAIT_DEBOUNCE),		\
136 	S(SNK_TRYWAIT_VBUS),			\
137 	S(BIST_RX),				\
138 						\
139 	S(GET_STATUS_SEND),			\
140 	S(GET_STATUS_SEND_TIMEOUT),		\
141 	S(GET_PPS_STATUS_SEND),			\
142 	S(GET_PPS_STATUS_SEND_TIMEOUT),		\
143 						\
144 	S(GET_SINK_CAP),			\
145 	S(GET_SINK_CAP_TIMEOUT),		\
146 						\
147 	S(ERROR_RECOVERY),			\
148 	S(PORT_RESET),				\
149 	S(PORT_RESET_WAIT_OFF),			\
150 						\
151 	S(AMS_START),				\
152 	S(CHUNK_NOT_SUPP),			\
153 						\
154 	S(SRC_VDM_IDENTITY_REQUEST)
155 
156 #define FOREACH_AMS(S)				\
157 	S(NONE_AMS),				\
158 	S(POWER_NEGOTIATION),			\
159 	S(GOTOMIN),				\
160 	S(SOFT_RESET_AMS),			\
161 	S(HARD_RESET),				\
162 	S(CABLE_RESET),				\
163 	S(GET_SOURCE_CAPABILITIES),		\
164 	S(GET_SINK_CAPABILITIES),		\
165 	S(POWER_ROLE_SWAP),			\
166 	S(FAST_ROLE_SWAP),			\
167 	S(DATA_ROLE_SWAP),			\
168 	S(VCONN_SWAP),				\
169 	S(SOURCE_ALERT),			\
170 	S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
171 	S(GETTING_SOURCE_SINK_STATUS),		\
172 	S(GETTING_BATTERY_CAPABILITIES),	\
173 	S(GETTING_BATTERY_STATUS),		\
174 	S(GETTING_MANUFACTURER_INFORMATION),	\
175 	S(SECURITY),				\
176 	S(FIRMWARE_UPDATE),			\
177 	S(DISCOVER_IDENTITY),			\
178 	S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY),	\
179 	S(DISCOVER_SVIDS),			\
180 	S(DISCOVER_MODES),			\
181 	S(DFP_TO_UFP_ENTER_MODE),		\
182 	S(DFP_TO_UFP_EXIT_MODE),		\
183 	S(DFP_TO_CABLE_PLUG_ENTER_MODE),	\
184 	S(DFP_TO_CABLE_PLUG_EXIT_MODE),		\
185 	S(ATTENTION),				\
186 	S(BIST),				\
187 	S(UNSTRUCTURED_VDMS),			\
188 	S(STRUCTURED_VDMS),			\
189 	S(COUNTRY_INFO),			\
190 	S(COUNTRY_CODES),			\
191 	S(REVISION_INFORMATION)
192 
193 #define GENERATE_ENUM(e)	e
194 #define GENERATE_STRING(s)	#s
195 
196 enum tcpm_state {
197 	FOREACH_STATE(GENERATE_ENUM)
198 };
199 
200 static const char * const tcpm_states[] = {
201 	FOREACH_STATE(GENERATE_STRING)
202 };
203 
204 enum tcpm_ams {
205 	FOREACH_AMS(GENERATE_ENUM)
206 };
207 
208 static const char * const tcpm_ams_str[] = {
209 	FOREACH_AMS(GENERATE_STRING)
210 };
211 
212 enum vdm_states {
213 	VDM_STATE_ERR_BUSY = -3,
214 	VDM_STATE_ERR_SEND = -2,
215 	VDM_STATE_ERR_TMOUT = -1,
216 	VDM_STATE_DONE = 0,
217 	/* Anything >0 represents an active state */
218 	VDM_STATE_READY = 1,
219 	VDM_STATE_BUSY = 2,
220 	VDM_STATE_WAIT_RSP_BUSY = 3,
221 	VDM_STATE_SEND_MESSAGE = 4,
222 };
223 
224 enum pd_msg_request {
225 	PD_MSG_NONE = 0,
226 	PD_MSG_CTRL_REJECT,
227 	PD_MSG_CTRL_WAIT,
228 	PD_MSG_CTRL_NOT_SUPP,
229 	PD_MSG_DATA_SINK_CAP,
230 	PD_MSG_DATA_SOURCE_CAP,
231 	PD_MSG_DATA_REV,
232 };
233 
234 enum adev_actions {
235 	ADEV_NONE = 0,
236 	ADEV_NOTIFY_USB_AND_QUEUE_VDM,
237 	ADEV_QUEUE_VDM,
238 	ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
239 	ADEV_ATTENTION,
240 };
241 
242 /*
243  * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
244  * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
245  * Version 1.2"
246  */
247 enum frs_typec_current {
248 	FRS_NOT_SUPPORTED,
249 	FRS_DEFAULT_POWER,
250 	FRS_5V_1P5A,
251 	FRS_5V_3A,
252 };
253 
254 /* Events from low level driver */
255 
256 #define TCPM_CC_EVENT		BIT(0)
257 #define TCPM_VBUS_EVENT		BIT(1)
258 #define TCPM_RESET_EVENT	BIT(2)
259 #define TCPM_FRS_EVENT		BIT(3)
260 #define TCPM_SOURCING_VBUS	BIT(4)
261 #define TCPM_PORT_CLEAN		BIT(5)
262 #define TCPM_PORT_ERROR		BIT(6)
263 
264 #define LOG_BUFFER_ENTRIES	1024
265 #define LOG_BUFFER_ENTRY_SIZE	128
266 
267 /* Alternate mode support */
268 
269 #define SVID_DISCOVERY_MAX	16
270 #define ALTMODE_DISCOVERY_MAX	(SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
271 
272 #define GET_SINK_CAP_RETRY_MS	100
273 #define SEND_DISCOVER_RETRY_MS	100
274 
275 struct pd_mode_data {
276 	int svid_index;		/* current SVID index		*/
277 	int nsvids;
278 	u16 svids[SVID_DISCOVERY_MAX];
279 	int altmodes;		/* number of alternate modes	*/
280 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
281 };
282 
283 /*
284  * @min_volt: Actual min voltage at the local port
285  * @req_min_volt: Requested min voltage to the port partner
286  * @max_volt: Actual max voltage at the local port
287  * @req_max_volt: Requested max voltage to the port partner
288  * @max_curr: Actual max current at the local port
289  * @req_max_curr: Requested max current of the port partner
290  * @req_out_volt: Requested output voltage to the port partner
291  * @req_op_curr: Requested operating current to the port partner
292  * @supported: Parter has at least one APDO hence supports PPS
293  * @active: PPS mode is active
294  */
295 struct pd_pps_data {
296 	u32 min_volt;
297 	u32 req_min_volt;
298 	u32 max_volt;
299 	u32 req_max_volt;
300 	u32 max_curr;
301 	u32 req_max_curr;
302 	u32 req_out_volt;
303 	u32 req_op_curr;
304 	bool supported;
305 	bool active;
306 };
307 
308 struct pd_data {
309 	struct usb_power_delivery *pd;
310 	struct usb_power_delivery_capabilities *source_cap;
311 	struct usb_power_delivery_capabilities_desc source_desc;
312 	struct usb_power_delivery_capabilities *sink_cap;
313 	struct usb_power_delivery_capabilities_desc sink_desc;
314 	unsigned int operating_snk_mw;
315 };
316 
317 #define PD_CAP_REV10	0x1
318 #define PD_CAP_REV20	0x2
319 #define PD_CAP_REV30	0x3
320 
321 struct pd_revision_info {
322 	u8 rev_major;
323 	u8 rev_minor;
324 	u8 ver_major;
325 	u8 ver_minor;
326 };
327 
328 /*
329  * @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
330  * @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
331  * @cc_debounce_time: Deadline (in ms) for tCCDebounce timer
332  */
333 struct pd_timings {
334 	u32 sink_wait_cap_time;
335 	u32 ps_src_off_time;
336 	u32 cc_debounce_time;
337 	u32 snk_bc12_cmpletion_time;
338 };
339 
340 struct tcpm_port {
341 	struct device *dev;
342 
343 	struct mutex lock;		/* tcpm state machine lock */
344 	struct kthread_worker *wq;
345 
346 	struct typec_capability typec_caps;
347 	struct typec_port *typec_port;
348 
349 	struct tcpc_dev	*tcpc;
350 	struct usb_role_switch *role_sw;
351 
352 	enum typec_role vconn_role;
353 	enum typec_role pwr_role;
354 	enum typec_data_role data_role;
355 	enum typec_pwr_opmode pwr_opmode;
356 
357 	struct usb_pd_identity partner_ident;
358 	struct typec_partner_desc partner_desc;
359 	struct typec_partner *partner;
360 
361 	struct usb_pd_identity cable_ident;
362 	struct typec_cable_desc cable_desc;
363 	struct typec_cable *cable;
364 	struct typec_plug_desc plug_prime_desc;
365 	struct typec_plug *plug_prime;
366 
367 	enum typec_cc_status cc_req;
368 	enum typec_cc_status src_rp;	/* work only if pd_supported == false */
369 
370 	enum typec_cc_status cc1;
371 	enum typec_cc_status cc2;
372 	enum typec_cc_polarity polarity;
373 
374 	bool attached;
375 	bool connected;
376 	bool registered;
377 	bool pd_supported;
378 	enum typec_port_type port_type;
379 
380 	/*
381 	 * Set to true when vbus is greater than VSAFE5V min.
382 	 * Set to false when vbus falls below vSinkDisconnect max threshold.
383 	 */
384 	bool vbus_present;
385 
386 	/*
387 	 * Set to true when vbus is less than VSAFE0V max.
388 	 * Set to false when vbus is greater than VSAFE0V max.
389 	 */
390 	bool vbus_vsafe0v;
391 
392 	bool vbus_never_low;
393 	bool vbus_source;
394 	bool vbus_charge;
395 
396 	/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
397 	bool send_discover;
398 	bool op_vsafe5v;
399 
400 	int try_role;
401 	int try_snk_count;
402 	int try_src_count;
403 
404 	enum pd_msg_request queued_message;
405 
406 	enum tcpm_state enter_state;
407 	enum tcpm_state prev_state;
408 	enum tcpm_state state;
409 	enum tcpm_state delayed_state;
410 	ktime_t delayed_runtime;
411 	unsigned long delay_ms;
412 
413 	spinlock_t pd_event_lock;
414 	u32 pd_events;
415 
416 	struct kthread_work event_work;
417 	struct hrtimer state_machine_timer;
418 	struct kthread_work state_machine;
419 	struct hrtimer vdm_state_machine_timer;
420 	struct kthread_work vdm_state_machine;
421 	struct hrtimer enable_frs_timer;
422 	struct kthread_work enable_frs;
423 	struct hrtimer send_discover_timer;
424 	struct kthread_work send_discover_work;
425 	bool state_machine_running;
426 	/* Set to true when VDM State Machine has following actions. */
427 	bool vdm_sm_running;
428 
429 	struct completion tx_complete;
430 	enum tcpm_transmit_status tx_status;
431 
432 	struct mutex swap_lock;		/* swap command lock */
433 	bool swap_pending;
434 	bool non_pd_role_swap;
435 	struct completion swap_complete;
436 	int swap_status;
437 
438 	unsigned int negotiated_rev;
439 	unsigned int message_id;
440 	unsigned int caps_count;
441 	unsigned int hard_reset_count;
442 	bool pd_capable;
443 	bool explicit_contract;
444 	unsigned int rx_msgid;
445 
446 	/* USB PD objects */
447 	struct usb_power_delivery **pds;
448 	struct pd_data **pd_list;
449 	struct usb_power_delivery_capabilities *port_source_caps;
450 	struct usb_power_delivery_capabilities *port_sink_caps;
451 	struct usb_power_delivery *partner_pd;
452 	struct usb_power_delivery_capabilities *partner_source_caps;
453 	struct usb_power_delivery_capabilities *partner_sink_caps;
454 	struct usb_power_delivery *selected_pd;
455 
456 	/* Partner capabilities/requests */
457 	u32 sink_request;
458 	u32 source_caps[PDO_MAX_OBJECTS];
459 	unsigned int nr_source_caps;
460 	u32 sink_caps[PDO_MAX_OBJECTS];
461 	unsigned int nr_sink_caps;
462 
463 	/* Local capabilities */
464 	unsigned int pd_count;
465 	u32 src_pdo[PDO_MAX_OBJECTS];
466 	unsigned int nr_src_pdo;
467 	u32 snk_pdo[PDO_MAX_OBJECTS];
468 	unsigned int nr_snk_pdo;
469 	u32 snk_vdo_v1[VDO_MAX_OBJECTS];
470 	unsigned int nr_snk_vdo_v1;
471 	u32 snk_vdo[VDO_MAX_OBJECTS];
472 	unsigned int nr_snk_vdo;
473 
474 	unsigned int operating_snk_mw;
475 	bool update_sink_caps;
476 
477 	/* Requested current / voltage to the port partner */
478 	u32 req_current_limit;
479 	u32 req_supply_voltage;
480 	/* Actual current / voltage limit of the local port */
481 	u32 current_limit;
482 	u32 supply_voltage;
483 
484 	/* Used to export TA voltage and current */
485 	struct power_supply *psy;
486 	struct power_supply_desc psy_desc;
487 	enum power_supply_usb_type usb_type;
488 
489 	u32 bist_request;
490 
491 	/* PD state for Vendor Defined Messages */
492 	enum vdm_states vdm_state;
493 	u32 vdm_retries;
494 	/* next Vendor Defined Message to send */
495 	u32 vdo_data[VDO_MAX_SIZE];
496 	u8 vdo_count;
497 	/* VDO to retry if UFP responder replied busy */
498 	u32 vdo_retry;
499 
500 	/* PPS */
501 	struct pd_pps_data pps_data;
502 	struct completion pps_complete;
503 	bool pps_pending;
504 	int pps_status;
505 
506 	/* Alternate mode data */
507 	struct pd_mode_data mode_data;
508 	struct pd_mode_data mode_data_prime;
509 	struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
510 	struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
511 	struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
512 
513 	/* Deadline in jiffies to exit src_try_wait state */
514 	unsigned long max_wait;
515 
516 	/* port belongs to a self powered device */
517 	bool self_powered;
518 
519 	/* Sink FRS */
520 	enum frs_typec_current new_source_frs_current;
521 
522 	/* Sink caps have been queried */
523 	bool sink_cap_done;
524 
525 	/* Collision Avoidance and Atomic Message Sequence */
526 	enum tcpm_state upcoming_state;
527 	enum tcpm_ams ams;
528 	enum tcpm_ams next_ams;
529 	bool in_ams;
530 
531 	/* Auto vbus discharge status */
532 	bool auto_vbus_discharge_enabled;
533 
534 	/*
535 	 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
536 	 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
537 	 * SNK_READY for non-pd link.
538 	 */
539 	bool slow_charger_loop;
540 
541 	/*
542 	 * When true indicates that the lower level drivers indicate potential presence
543 	 * of contaminant in the connector pins based on the tcpm state machine
544 	 * transitions.
545 	 */
546 	bool potential_contaminant;
547 
548 	/* SOP* Related Fields */
549 	/*
550 	 * Flag to determine if SOP' Discover Identity is available. The flag
551 	 * is set if Discover Identity on SOP' does not immediately follow
552 	 * Discover Identity on SOP.
553 	 */
554 	bool send_discover_prime;
555 	/*
556 	 * tx_sop_type determines which SOP* a message is being sent on.
557 	 * For messages that are queued and not sent immediately such as in
558 	 * tcpm_queue_message or messages that send after state changes,
559 	 * the tx_sop_type is set accordingly.
560 	 */
561 	enum tcpm_transmit_type tx_sop_type;
562 	/*
563 	 * Prior to discovering the port partner's Specification Revision, the
564 	 * Vconn source and cable plug will use the lower of their two revisions.
565 	 *
566 	 * When the port partner's Specification Revision is discovered, the following
567 	 * rules are put in place.
568 	 *	1. If the cable revision (1) is lower than the revision negotiated
569 	 * between the port and partner (2), the port and partner will communicate
570 	 * on revision (2), but the port and cable will communicate on revision (1).
571 	 *	2. If the cable revision (1) is higher than the revision negotiated
572 	 * between the port and partner (2), the port and partner will communicate
573 	 * on revision (2), and the port and cable will communicate on revision (2)
574 	 * as well.
575 	 */
576 	unsigned int negotiated_rev_prime;
577 	/*
578 	 * Each SOP* type must maintain their own tx and rx message IDs
579 	 */
580 	unsigned int message_id_prime;
581 	unsigned int rx_msgid_prime;
582 
583 	/* Timer deadline values configured at runtime */
584 	struct pd_timings timings;
585 
586 	/* Indicates maximum (revision, version) supported */
587 	struct pd_revision_info pd_rev;
588 #ifdef CONFIG_DEBUG_FS
589 	struct dentry *dentry;
590 	struct mutex logbuffer_lock;	/* log buffer access lock */
591 	int logbuffer_head;
592 	int logbuffer_tail;
593 	u8 *logbuffer[LOG_BUFFER_ENTRIES];
594 #endif
595 };
596 
597 struct pd_rx_event {
598 	struct kthread_work work;
599 	struct tcpm_port *port;
600 	struct pd_message msg;
601 	enum tcpm_transmit_type rx_sop_type;
602 };
603 
604 struct altmode_vdm_event {
605 	struct kthread_work work;
606 	struct tcpm_port *port;
607 	u32 header;
608 	u32 *data;
609 	int cnt;
610 	enum tcpm_transmit_type tx_sop_type;
611 };
612 
613 static const char * const pd_rev[] = {
614 	[PD_REV10]		= "rev1",
615 	[PD_REV20]		= "rev2",
616 	[PD_REV30]		= "rev3",
617 };
618 
619 #define tcpm_cc_is_sink(cc) \
620 	((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
621 	 (cc) == TYPEC_CC_RP_3_0)
622 
623 /* As long as cc is pulled up, we can consider it as sink. */
624 #define tcpm_port_is_sink(port) \
625 	(tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
626 
627 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
628 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
629 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
630 
631 #define tcpm_port_is_source(port) \
632 	((tcpm_cc_is_source((port)->cc1) && \
633 	 !tcpm_cc_is_source((port)->cc2)) || \
634 	 (tcpm_cc_is_source((port)->cc2) && \
635 	  !tcpm_cc_is_source((port)->cc1)))
636 
637 #define tcpm_port_is_debug(port) \
638 	((tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2)) || \
639 	 (tcpm_cc_is_sink((port)->cc1) && tcpm_cc_is_sink((port)->cc2)))
640 
641 #define tcpm_port_is_audio(port) \
642 	(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
643 
644 #define tcpm_port_is_audio_detached(port) \
645 	((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
646 	 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
647 
648 #define tcpm_try_snk(port) \
649 	((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
650 	(port)->port_type == TYPEC_PORT_DRP)
651 
652 #define tcpm_try_src(port) \
653 	((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
654 	(port)->port_type == TYPEC_PORT_DRP)
655 
656 #define tcpm_data_role_for_source(port) \
657 	((port)->typec_caps.data == TYPEC_PORT_UFP ? \
658 	TYPEC_DEVICE : TYPEC_HOST)
659 
660 #define tcpm_data_role_for_sink(port) \
661 	((port)->typec_caps.data == TYPEC_PORT_DFP ? \
662 	TYPEC_HOST : TYPEC_DEVICE)
663 
664 #define tcpm_sink_tx_ok(port) \
665 	(tcpm_port_is_sink(port) && \
666 	((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
667 
668 #define tcpm_wait_for_discharge(port) \
669 	(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
670 
tcpm_default_state(struct tcpm_port * port)671 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
672 {
673 	if (port->port_type == TYPEC_PORT_DRP) {
674 		if (port->try_role == TYPEC_SINK)
675 			return SNK_UNATTACHED;
676 		else if (port->try_role == TYPEC_SOURCE)
677 			return SRC_UNATTACHED;
678 		/* Fall through to return SRC_UNATTACHED */
679 	} else if (port->port_type == TYPEC_PORT_SNK) {
680 		return SNK_UNATTACHED;
681 	}
682 	return SRC_UNATTACHED;
683 }
684 
tcpm_port_is_disconnected(struct tcpm_port * port)685 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
686 {
687 	return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
688 		port->cc2 == TYPEC_CC_OPEN) ||
689 	       (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
690 				    port->cc1 == TYPEC_CC_OPEN) ||
691 				   (port->polarity == TYPEC_POLARITY_CC2 &&
692 				    port->cc2 == TYPEC_CC_OPEN)));
693 }
694 
695 /*
696  * Logging
697  */
698 
699 #ifdef CONFIG_DEBUG_FS
700 
tcpm_log_full(struct tcpm_port * port)701 static bool tcpm_log_full(struct tcpm_port *port)
702 {
703 	return port->logbuffer_tail ==
704 		(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
705 }
706 
707 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)708 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
709 {
710 	char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
711 	u64 ts_nsec = local_clock();
712 	unsigned long rem_nsec;
713 
714 	mutex_lock(&port->logbuffer_lock);
715 	if (!port->logbuffer[port->logbuffer_head]) {
716 		port->logbuffer[port->logbuffer_head] =
717 				kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
718 		if (!port->logbuffer[port->logbuffer_head]) {
719 			mutex_unlock(&port->logbuffer_lock);
720 			return;
721 		}
722 	}
723 
724 	vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
725 
726 	if (tcpm_log_full(port)) {
727 		port->logbuffer_head = max(port->logbuffer_head - 1, 0);
728 		strcpy(tmpbuffer, "overflow");
729 	}
730 
731 	if (port->logbuffer_head < 0 ||
732 	    port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
733 		dev_warn(port->dev,
734 			 "Bad log buffer index %d\n", port->logbuffer_head);
735 		goto abort;
736 	}
737 
738 	if (!port->logbuffer[port->logbuffer_head]) {
739 		dev_warn(port->dev,
740 			 "Log buffer index %d is NULL\n", port->logbuffer_head);
741 		goto abort;
742 	}
743 
744 	rem_nsec = do_div(ts_nsec, 1000000000);
745 	scnprintf(port->logbuffer[port->logbuffer_head],
746 		  LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
747 		  (unsigned long)ts_nsec, rem_nsec / 1000,
748 		  tmpbuffer);
749 	port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
750 
751 abort:
752 	mutex_unlock(&port->logbuffer_lock);
753 }
754 
755 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)756 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
757 {
758 	va_list args;
759 
760 	/* Do not log while disconnected and unattached */
761 	if (tcpm_port_is_disconnected(port) &&
762 	    (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
763 	     port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
764 		return;
765 
766 	va_start(args, fmt);
767 	_tcpm_log(port, fmt, args);
768 	va_end(args);
769 }
770 
771 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)772 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
773 {
774 	va_list args;
775 
776 	va_start(args, fmt);
777 	_tcpm_log(port, fmt, args);
778 	va_end(args);
779 }
780 
tcpm_log_source_caps(struct tcpm_port * port)781 static void tcpm_log_source_caps(struct tcpm_port *port)
782 {
783 	int i;
784 
785 	for (i = 0; i < port->nr_source_caps; i++) {
786 		u32 pdo = port->source_caps[i];
787 		enum pd_pdo_type type = pdo_type(pdo);
788 		char msg[64];
789 
790 		switch (type) {
791 		case PDO_TYPE_FIXED:
792 			scnprintf(msg, sizeof(msg),
793 				  "%u mV, %u mA [%s%s%s%s%s%s]",
794 				  pdo_fixed_voltage(pdo),
795 				  pdo_max_current(pdo),
796 				  (pdo & PDO_FIXED_DUAL_ROLE) ?
797 							"R" : "",
798 				  (pdo & PDO_FIXED_SUSPEND) ?
799 							"S" : "",
800 				  (pdo & PDO_FIXED_HIGHER_CAP) ?
801 							"H" : "",
802 				  (pdo & PDO_FIXED_USB_COMM) ?
803 							"U" : "",
804 				  (pdo & PDO_FIXED_DATA_SWAP) ?
805 							"D" : "",
806 				  (pdo & PDO_FIXED_EXTPOWER) ?
807 							"E" : "");
808 			break;
809 		case PDO_TYPE_VAR:
810 			scnprintf(msg, sizeof(msg),
811 				  "%u-%u mV, %u mA",
812 				  pdo_min_voltage(pdo),
813 				  pdo_max_voltage(pdo),
814 				  pdo_max_current(pdo));
815 			break;
816 		case PDO_TYPE_BATT:
817 			scnprintf(msg, sizeof(msg),
818 				  "%u-%u mV, %u mW",
819 				  pdo_min_voltage(pdo),
820 				  pdo_max_voltage(pdo),
821 				  pdo_max_power(pdo));
822 			break;
823 		case PDO_TYPE_APDO:
824 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
825 				scnprintf(msg, sizeof(msg),
826 					  "PPS %u-%u mV, %u mA",
827 					  pdo_pps_apdo_min_voltage(pdo),
828 					  pdo_pps_apdo_max_voltage(pdo),
829 					  pdo_pps_apdo_max_current(pdo));
830 			else if (pdo_apdo_type(pdo) == APDO_TYPE_EPR_AVS)
831 				scnprintf(msg, sizeof(msg),
832 					  "EPR AVS %u-%u mV %u W peak_current: %u",
833 					  pdo_epr_avs_apdo_min_voltage_mv(pdo),
834 					  pdo_epr_avs_apdo_max_voltage_mv(pdo),
835 					  pdo_epr_avs_apdo_pdp_w(pdo),
836 					  pdo_epr_avs_apdo_src_peak_current(pdo));
837 			else if (pdo_apdo_type(pdo) == APDO_TYPE_SPR_AVS)
838 				scnprintf(msg, sizeof(msg),
839 					  "SPR AVS 9-15 V: %u mA 15-20 V: %u mA peak_current: %u",
840 					  pdo_spr_avs_apdo_9v_to_15v_max_current_ma(pdo),
841 					  pdo_spr_avs_apdo_15v_to_20v_max_current_ma(pdo),
842 					  pdo_spr_avs_apdo_src_peak_current(pdo));
843 			else
844 				strcpy(msg, "undefined APDO");
845 			break;
846 		default:
847 			strcpy(msg, "undefined");
848 			break;
849 		}
850 		tcpm_log(port, " PDO %d: type %d, %s",
851 			 i, type, msg);
852 	}
853 }
854 
tcpm_debug_show(struct seq_file * s,void * v)855 static int tcpm_debug_show(struct seq_file *s, void *v)
856 {
857 	struct tcpm_port *port = s->private;
858 	int tail;
859 
860 	mutex_lock(&port->logbuffer_lock);
861 	tail = port->logbuffer_tail;
862 	while (tail != port->logbuffer_head) {
863 		seq_printf(s, "%s\n", port->logbuffer[tail]);
864 		tail = (tail + 1) % LOG_BUFFER_ENTRIES;
865 	}
866 	if (!seq_has_overflowed(s))
867 		port->logbuffer_tail = tail;
868 	mutex_unlock(&port->logbuffer_lock);
869 
870 	return 0;
871 }
872 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
873 
tcpm_debugfs_init(struct tcpm_port * port)874 static void tcpm_debugfs_init(struct tcpm_port *port)
875 {
876 	char name[NAME_MAX];
877 
878 	mutex_init(&port->logbuffer_lock);
879 	snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
880 	port->dentry = debugfs_create_dir(name, usb_debug_root);
881 	debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
882 			    &tcpm_debug_fops);
883 }
884 
tcpm_debugfs_exit(struct tcpm_port * port)885 static void tcpm_debugfs_exit(struct tcpm_port *port)
886 {
887 	int i;
888 
889 	mutex_lock(&port->logbuffer_lock);
890 	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
891 		kfree(port->logbuffer[i]);
892 		port->logbuffer[i] = NULL;
893 	}
894 	mutex_unlock(&port->logbuffer_lock);
895 
896 	debugfs_remove(port->dentry);
897 }
898 
899 #else
900 
901 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)902 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
903 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)904 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)905 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)906 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)907 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
908 
909 #endif
910 
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)911 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
912 {
913 	tcpm_log(port, "cc:=%d", cc);
914 	port->cc_req = cc;
915 	port->tcpc->set_cc(port->tcpc, cc);
916 }
917 
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)918 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
919 {
920 	int ret = 0;
921 
922 	if (port->tcpc->enable_auto_vbus_discharge) {
923 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
924 		tcpm_log_force(port, "%s vbus discharge ret:%d",
925 			       str_enable_disable(enable), ret);
926 		if (!ret)
927 			port->auto_vbus_discharge_enabled = enable;
928 	}
929 
930 	return ret;
931 }
932 
tcpm_apply_rc(struct tcpm_port * port)933 static void tcpm_apply_rc(struct tcpm_port *port)
934 {
935 	/*
936 	 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
937 	 * when Vbus auto discharge on disconnect is enabled.
938 	 */
939 	if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
940 		tcpm_log(port, "Apply_RC");
941 		port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
942 		tcpm_enable_auto_vbus_discharge(port, false);
943 	}
944 }
945 
946 /*
947  * Determine RP value to set based on maximum current supported
948  * by a port if configured as source.
949  * Returns CC value to report to link partner.
950  */
tcpm_rp_cc(struct tcpm_port * port)951 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
952 {
953 	const u32 *src_pdo = port->src_pdo;
954 	int nr_pdo = port->nr_src_pdo;
955 	int i;
956 
957 	if (!port->pd_supported)
958 		return port->src_rp;
959 
960 	/*
961 	 * Search for first entry with matching voltage.
962 	 * It should report the maximum supported current.
963 	 */
964 	for (i = 0; i < nr_pdo; i++) {
965 		const u32 pdo = src_pdo[i];
966 
967 		if (pdo_type(pdo) == PDO_TYPE_FIXED &&
968 		    pdo_fixed_voltage(pdo) == 5000) {
969 			unsigned int curr = pdo_max_current(pdo);
970 
971 			if (curr >= 3000)
972 				return TYPEC_CC_RP_3_0;
973 			else if (curr >= 1500)
974 				return TYPEC_CC_RP_1_5;
975 			return TYPEC_CC_RP_DEF;
976 		}
977 	}
978 
979 	return TYPEC_CC_RP_DEF;
980 }
981 
tcpm_ams_finish(struct tcpm_port * port)982 static void tcpm_ams_finish(struct tcpm_port *port)
983 {
984 	tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
985 
986 	if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
987 		if (port->negotiated_rev >= PD_REV30)
988 			tcpm_set_cc(port, SINK_TX_OK);
989 		else
990 			tcpm_set_cc(port, SINK_TX_NG);
991 	} else if (port->pwr_role == TYPEC_SOURCE) {
992 		tcpm_set_cc(port, tcpm_rp_cc(port));
993 	}
994 
995 	port->in_ams = false;
996 	port->ams = NONE_AMS;
997 }
998 
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type tx_sop_type,const struct pd_message * msg)999 static int tcpm_pd_transmit(struct tcpm_port *port,
1000 			    enum tcpm_transmit_type tx_sop_type,
1001 			    const struct pd_message *msg)
1002 {
1003 	unsigned long time_left;
1004 	int ret;
1005 	unsigned int negotiated_rev;
1006 
1007 	switch (tx_sop_type) {
1008 	case TCPC_TX_SOP_PRIME:
1009 		negotiated_rev = port->negotiated_rev_prime;
1010 		break;
1011 	case TCPC_TX_SOP:
1012 	default:
1013 		negotiated_rev = port->negotiated_rev;
1014 		break;
1015 	}
1016 
1017 	if (msg)
1018 		tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
1019 	else
1020 		tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
1021 
1022 	reinit_completion(&port->tx_complete);
1023 	ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
1024 	if (ret < 0)
1025 		return ret;
1026 
1027 	mutex_unlock(&port->lock);
1028 	time_left = wait_for_completion_timeout(&port->tx_complete,
1029 						msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
1030 	mutex_lock(&port->lock);
1031 	if (!time_left)
1032 		return -ETIMEDOUT;
1033 
1034 	switch (port->tx_status) {
1035 	case TCPC_TX_SUCCESS:
1036 		switch (tx_sop_type) {
1037 		case TCPC_TX_SOP_PRIME:
1038 			port->message_id_prime = (port->message_id_prime + 1) &
1039 						 PD_HEADER_ID_MASK;
1040 			break;
1041 		case TCPC_TX_SOP:
1042 		default:
1043 			port->message_id = (port->message_id + 1) &
1044 					   PD_HEADER_ID_MASK;
1045 			break;
1046 		}
1047 		/*
1048 		 * USB PD rev 2.0, 8.3.2.2.1:
1049 		 * USB PD rev 3.0, 8.3.2.1.3:
1050 		 * "... Note that every AMS is Interruptible until the first
1051 		 * Message in the sequence has been successfully sent (GoodCRC
1052 		 * Message received)."
1053 		 */
1054 		if (port->ams != NONE_AMS)
1055 			port->in_ams = true;
1056 		break;
1057 	case TCPC_TX_DISCARDED:
1058 		ret = -EAGAIN;
1059 		break;
1060 	case TCPC_TX_FAILED:
1061 	default:
1062 		ret = -EIO;
1063 		break;
1064 	}
1065 
1066 	/* Some AMS don't expect responses. Finish them here. */
1067 	if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1068 		tcpm_ams_finish(port);
1069 
1070 	return ret;
1071 }
1072 
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)1073 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1074 			       enum tcpm_transmit_status status)
1075 {
1076 	tcpm_log(port, "PD TX complete, status: %u", status);
1077 	port->tx_status = status;
1078 	complete(&port->tx_complete);
1079 }
1080 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1081 
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)1082 static int tcpm_mux_set(struct tcpm_port *port, int state,
1083 			enum usb_role usb_role,
1084 			enum typec_orientation orientation)
1085 {
1086 	int ret;
1087 
1088 	tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1089 		 state, usb_role, orientation);
1090 
1091 	ret = typec_set_orientation(port->typec_port, orientation);
1092 	if (ret)
1093 		return ret;
1094 
1095 	if (port->role_sw) {
1096 		ret = usb_role_switch_set_role(port->role_sw, usb_role);
1097 		if (ret)
1098 			return ret;
1099 	}
1100 
1101 	return typec_set_mode(port->typec_port, state);
1102 }
1103 
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)1104 static int tcpm_set_polarity(struct tcpm_port *port,
1105 			     enum typec_cc_polarity polarity)
1106 {
1107 	int ret;
1108 
1109 	tcpm_log(port, "polarity %d", polarity);
1110 
1111 	ret = port->tcpc->set_polarity(port->tcpc, polarity);
1112 	if (ret < 0)
1113 		return ret;
1114 
1115 	port->polarity = polarity;
1116 
1117 	return 0;
1118 }
1119 
tcpm_set_vconn(struct tcpm_port * port,bool enable)1120 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1121 {
1122 	int ret;
1123 
1124 	tcpm_log(port, "vconn:=%d", enable);
1125 
1126 	ret = port->tcpc->set_vconn(port->tcpc, enable);
1127 	if (!ret) {
1128 		port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1129 		typec_set_vconn_role(port->typec_port, port->vconn_role);
1130 	}
1131 
1132 	return ret;
1133 }
1134 
tcpm_get_current_limit(struct tcpm_port * port)1135 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1136 {
1137 	enum typec_cc_status cc;
1138 	u32 limit;
1139 
1140 	cc = port->polarity ? port->cc2 : port->cc1;
1141 	switch (cc) {
1142 	case TYPEC_CC_RP_1_5:
1143 		limit = 1500;
1144 		break;
1145 	case TYPEC_CC_RP_3_0:
1146 		limit = 3000;
1147 		break;
1148 	case TYPEC_CC_RP_DEF:
1149 	default:
1150 		if (port->tcpc->get_current_limit)
1151 			limit = port->tcpc->get_current_limit(port->tcpc);
1152 		else
1153 			limit = 0;
1154 		break;
1155 	}
1156 
1157 	return limit;
1158 }
1159 
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1160 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1161 {
1162 	int ret = -EOPNOTSUPP;
1163 
1164 	tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1165 
1166 	port->supply_voltage = mv;
1167 	port->current_limit = max_ma;
1168 	power_supply_changed(port->psy);
1169 
1170 	if (port->tcpc->set_current_limit)
1171 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1172 
1173 	return ret;
1174 }
1175 
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1176 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1177 {
1178 	return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1179 				     port->data_role);
1180 }
1181 
tcpm_set_roles(struct tcpm_port * port,bool attached,int state,enum typec_role role,enum typec_data_role data)1182 static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state,
1183 			  enum typec_role role, enum typec_data_role data)
1184 {
1185 	enum typec_orientation orientation;
1186 	enum usb_role usb_role;
1187 	int ret;
1188 
1189 	if (port->polarity == TYPEC_POLARITY_CC1)
1190 		orientation = TYPEC_ORIENTATION_NORMAL;
1191 	else
1192 		orientation = TYPEC_ORIENTATION_REVERSE;
1193 
1194 	if (port->typec_caps.data == TYPEC_PORT_DRD) {
1195 		if (data == TYPEC_HOST)
1196 			usb_role = USB_ROLE_HOST;
1197 		else
1198 			usb_role = USB_ROLE_DEVICE;
1199 	} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1200 		if (data == TYPEC_HOST) {
1201 			if (role == TYPEC_SOURCE)
1202 				usb_role = USB_ROLE_HOST;
1203 			else
1204 				usb_role = USB_ROLE_NONE;
1205 		} else {
1206 			return -ENOTSUPP;
1207 		}
1208 	} else {
1209 		if (data == TYPEC_DEVICE) {
1210 			if (role == TYPEC_SINK)
1211 				usb_role = USB_ROLE_DEVICE;
1212 			else
1213 				usb_role = USB_ROLE_NONE;
1214 		} else {
1215 			return -ENOTSUPP;
1216 		}
1217 	}
1218 
1219 	ret = tcpm_mux_set(port, state, usb_role, orientation);
1220 	if (ret < 0)
1221 		return ret;
1222 
1223 	ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1224 	if (ret < 0)
1225 		return ret;
1226 
1227 	if (port->tcpc->set_orientation) {
1228 		ret = port->tcpc->set_orientation(port->tcpc, orientation);
1229 		if (ret < 0)
1230 			return ret;
1231 	}
1232 
1233 	port->pwr_role = role;
1234 	port->data_role = data;
1235 	typec_set_data_role(port->typec_port, data);
1236 	typec_set_pwr_role(port->typec_port, role);
1237 
1238 	return 0;
1239 }
1240 
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1241 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1242 {
1243 	int ret;
1244 
1245 	ret = port->tcpc->set_roles(port->tcpc, true, role,
1246 				    port->data_role);
1247 	if (ret < 0)
1248 		return ret;
1249 
1250 	port->pwr_role = role;
1251 	typec_set_pwr_role(port->typec_port, role);
1252 
1253 	return 0;
1254 }
1255 
1256 /*
1257  * Transform the PDO to be compliant to PD rev2.0.
1258  * Return 0 if the PDO type is not defined in PD rev2.0.
1259  * Otherwise, return the converted PDO.
1260  */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1261 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1262 {
1263 	switch (pdo_type(pdo)) {
1264 	case PDO_TYPE_FIXED:
1265 		if (role == TYPEC_SINK)
1266 			return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1267 		else
1268 			return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1269 	case PDO_TYPE_VAR:
1270 	case PDO_TYPE_BATT:
1271 		return pdo;
1272 	case PDO_TYPE_APDO:
1273 	default:
1274 		return 0;
1275 	}
1276 }
1277 
tcpm_pd_send_revision(struct tcpm_port * port)1278 static int tcpm_pd_send_revision(struct tcpm_port *port)
1279 {
1280 	struct pd_message msg;
1281 	u32 rmdo;
1282 
1283 	memset(&msg, 0, sizeof(msg));
1284 	rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
1285 		    port->pd_rev.ver_major, port->pd_rev.ver_minor);
1286 	msg.payload[0] = cpu_to_le32(rmdo);
1287 	msg.header = PD_HEADER_LE(PD_DATA_REVISION,
1288 				  port->pwr_role,
1289 				  port->data_role,
1290 				  port->negotiated_rev,
1291 				  port->message_id,
1292 				  1);
1293 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1294 }
1295 
tcpm_pd_send_source_caps(struct tcpm_port * port)1296 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1297 {
1298 	struct pd_message msg;
1299 	u32 pdo;
1300 	unsigned int i, nr_pdo = 0;
1301 
1302 	memset(&msg, 0, sizeof(msg));
1303 
1304 	for (i = 0; i < port->nr_src_pdo; i++) {
1305 		if (port->negotiated_rev >= PD_REV30) {
1306 			msg.payload[nr_pdo++] =	cpu_to_le32(port->src_pdo[i]);
1307 		} else {
1308 			pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1309 			if (pdo)
1310 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1311 		}
1312 	}
1313 
1314 	if (!nr_pdo) {
1315 		/* No source capabilities defined, sink only */
1316 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1317 					  port->pwr_role,
1318 					  port->data_role,
1319 					  port->negotiated_rev,
1320 					  port->message_id, 0);
1321 	} else {
1322 		msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1323 					  port->pwr_role,
1324 					  port->data_role,
1325 					  port->negotiated_rev,
1326 					  port->message_id,
1327 					  nr_pdo);
1328 	}
1329 
1330 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1331 }
1332 
tcpm_pd_send_sink_caps(struct tcpm_port * port)1333 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1334 {
1335 	struct pd_message msg;
1336 	u32 pdo;
1337 	unsigned int i, nr_pdo = 0;
1338 
1339 	memset(&msg, 0, sizeof(msg));
1340 
1341 	for (i = 0; i < port->nr_snk_pdo; i++) {
1342 		if (port->negotiated_rev >= PD_REV30) {
1343 			msg.payload[nr_pdo++] =	cpu_to_le32(port->snk_pdo[i]);
1344 		} else {
1345 			pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1346 			if (pdo)
1347 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1348 		}
1349 	}
1350 
1351 	if (!nr_pdo) {
1352 		/* No sink capabilities defined, source only */
1353 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1354 					  port->pwr_role,
1355 					  port->data_role,
1356 					  port->negotiated_rev,
1357 					  port->message_id, 0);
1358 	} else {
1359 		msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1360 					  port->pwr_role,
1361 					  port->data_role,
1362 					  port->negotiated_rev,
1363 					  port->message_id,
1364 					  nr_pdo);
1365 	}
1366 
1367 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1368 }
1369 
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1370 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1371 {
1372 	if (delay_ms) {
1373 		hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1374 	} else {
1375 		hrtimer_cancel(&port->state_machine_timer);
1376 		kthread_queue_work(port->wq, &port->state_machine);
1377 	}
1378 }
1379 
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1380 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1381 {
1382 	if (delay_ms) {
1383 		hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1384 			      HRTIMER_MODE_REL);
1385 	} else {
1386 		hrtimer_cancel(&port->vdm_state_machine_timer);
1387 		kthread_queue_work(port->wq, &port->vdm_state_machine);
1388 	}
1389 }
1390 
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1391 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1392 {
1393 	if (delay_ms) {
1394 		hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1395 	} else {
1396 		hrtimer_cancel(&port->enable_frs_timer);
1397 		kthread_queue_work(port->wq, &port->enable_frs);
1398 	}
1399 }
1400 
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1401 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1402 {
1403 	if (delay_ms) {
1404 		hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1405 	} else {
1406 		hrtimer_cancel(&port->send_discover_timer);
1407 		kthread_queue_work(port->wq, &port->send_discover_work);
1408 	}
1409 }
1410 
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1411 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1412 			   unsigned int delay_ms)
1413 {
1414 	if (delay_ms) {
1415 		tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1416 			 tcpm_states[port->state], tcpm_states[state], delay_ms,
1417 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1418 		port->delayed_state = state;
1419 		mod_tcpm_delayed_work(port, delay_ms);
1420 		port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1421 		port->delay_ms = delay_ms;
1422 	} else {
1423 		tcpm_log(port, "state change %s -> %s [%s %s]",
1424 			 tcpm_states[port->state], tcpm_states[state],
1425 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1426 		port->delayed_state = INVALID_STATE;
1427 		port->prev_state = port->state;
1428 		port->state = state;
1429 		/*
1430 		 * Don't re-queue the state machine work item if we're currently
1431 		 * in the state machine and we're immediately changing states.
1432 		 * tcpm_state_machine_work() will continue running the state
1433 		 * machine.
1434 		 */
1435 		if (!port->state_machine_running)
1436 			mod_tcpm_delayed_work(port, 0);
1437 	}
1438 }
1439 
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1440 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1441 				unsigned int delay_ms)
1442 {
1443 	if (port->enter_state == port->state)
1444 		tcpm_set_state(port, state, delay_ms);
1445 	else
1446 		tcpm_log(port,
1447 			 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1448 			 delay_ms ? "delayed " : "",
1449 			 tcpm_states[port->state], tcpm_states[state],
1450 			 delay_ms, tcpm_states[port->enter_state],
1451 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1452 }
1453 
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1454 static void tcpm_queue_message(struct tcpm_port *port,
1455 			       enum pd_msg_request message)
1456 {
1457 	port->queued_message = message;
1458 	mod_tcpm_delayed_work(port, 0);
1459 }
1460 
tcpm_vdm_ams(struct tcpm_port * port)1461 static bool tcpm_vdm_ams(struct tcpm_port *port)
1462 {
1463 	switch (port->ams) {
1464 	case DISCOVER_IDENTITY:
1465 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1466 	case DISCOVER_SVIDS:
1467 	case DISCOVER_MODES:
1468 	case DFP_TO_UFP_ENTER_MODE:
1469 	case DFP_TO_UFP_EXIT_MODE:
1470 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1471 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1472 	case ATTENTION:
1473 	case UNSTRUCTURED_VDMS:
1474 	case STRUCTURED_VDMS:
1475 		break;
1476 	default:
1477 		return false;
1478 	}
1479 
1480 	return true;
1481 }
1482 
tcpm_ams_interruptible(struct tcpm_port * port)1483 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1484 {
1485 	switch (port->ams) {
1486 	/* Interruptible AMS */
1487 	case NONE_AMS:
1488 	case SECURITY:
1489 	case FIRMWARE_UPDATE:
1490 	case DISCOVER_IDENTITY:
1491 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1492 	case DISCOVER_SVIDS:
1493 	case DISCOVER_MODES:
1494 	case DFP_TO_UFP_ENTER_MODE:
1495 	case DFP_TO_UFP_EXIT_MODE:
1496 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1497 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1498 	case UNSTRUCTURED_VDMS:
1499 	case STRUCTURED_VDMS:
1500 	case COUNTRY_INFO:
1501 	case COUNTRY_CODES:
1502 		break;
1503 	/* Non-Interruptible AMS */
1504 	default:
1505 		if (port->in_ams)
1506 			return false;
1507 		break;
1508 	}
1509 
1510 	return true;
1511 }
1512 
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1513 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1514 {
1515 	int ret = 0;
1516 
1517 	tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1518 
1519 	if (!tcpm_ams_interruptible(port) &&
1520 	    !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1521 		port->upcoming_state = INVALID_STATE;
1522 		tcpm_log(port, "AMS %s not interruptible, aborting",
1523 			 tcpm_ams_str[port->ams]);
1524 		return -EAGAIN;
1525 	}
1526 
1527 	if (port->pwr_role == TYPEC_SOURCE) {
1528 		enum typec_cc_status cc_req = port->cc_req;
1529 
1530 		port->ams = ams;
1531 
1532 		if (ams == HARD_RESET) {
1533 			tcpm_set_cc(port, tcpm_rp_cc(port));
1534 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1535 			tcpm_set_state(port, HARD_RESET_START, 0);
1536 			return ret;
1537 		} else if (ams == SOFT_RESET_AMS) {
1538 			if (!port->explicit_contract)
1539 				tcpm_set_cc(port, tcpm_rp_cc(port));
1540 			tcpm_set_state(port, SOFT_RESET_SEND, 0);
1541 			return ret;
1542 		} else if (tcpm_vdm_ams(port)) {
1543 			/* tSinkTx is enforced in vdm_run_state_machine */
1544 			if (port->negotiated_rev >= PD_REV30)
1545 				tcpm_set_cc(port, SINK_TX_NG);
1546 			return ret;
1547 		}
1548 
1549 		if (port->negotiated_rev >= PD_REV30)
1550 			tcpm_set_cc(port, SINK_TX_NG);
1551 
1552 		switch (port->state) {
1553 		case SRC_READY:
1554 		case SRC_STARTUP:
1555 		case SRC_SOFT_RESET_WAIT_SNK_TX:
1556 		case SOFT_RESET:
1557 		case SOFT_RESET_SEND:
1558 			if (port->negotiated_rev >= PD_REV30)
1559 				tcpm_set_state(port, AMS_START,
1560 					       cc_req == SINK_TX_OK ?
1561 					       PD_T_SINK_TX : 0);
1562 			else
1563 				tcpm_set_state(port, AMS_START, 0);
1564 			break;
1565 		default:
1566 			if (port->negotiated_rev >= PD_REV30)
1567 				tcpm_set_state(port, SRC_READY,
1568 					       cc_req == SINK_TX_OK ?
1569 					       PD_T_SINK_TX : 0);
1570 			else
1571 				tcpm_set_state(port, SRC_READY, 0);
1572 			break;
1573 		}
1574 	} else {
1575 		if (port->negotiated_rev >= PD_REV30 &&
1576 		    !tcpm_sink_tx_ok(port) &&
1577 		    ams != SOFT_RESET_AMS &&
1578 		    ams != HARD_RESET) {
1579 			port->upcoming_state = INVALID_STATE;
1580 			tcpm_log(port, "Sink TX No Go");
1581 			return -EAGAIN;
1582 		}
1583 
1584 		port->ams = ams;
1585 
1586 		if (ams == HARD_RESET) {
1587 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1588 			tcpm_set_state(port, HARD_RESET_START, 0);
1589 			return ret;
1590 		} else if (tcpm_vdm_ams(port)) {
1591 			return ret;
1592 		}
1593 
1594 		if (port->state == SNK_READY ||
1595 		    port->state == SNK_SOFT_RESET)
1596 			tcpm_set_state(port, AMS_START, 0);
1597 		else
1598 			tcpm_set_state(port, SNK_READY, 0);
1599 	}
1600 
1601 	return ret;
1602 }
1603 
1604 /*
1605  * VDM/VDO handling functions
1606  */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1607 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1608 			   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1609 {
1610 	u32 vdo_hdr = port->vdo_data[0];
1611 
1612 	WARN_ON(!mutex_is_locked(&port->lock));
1613 
1614 	/* If is sending discover_identity, handle received message first */
1615 	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1616 		if (tx_sop_type == TCPC_TX_SOP_PRIME)
1617 			port->send_discover_prime = true;
1618 		else
1619 			port->send_discover = true;
1620 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1621 	} else {
1622 		/* Make sure we are not still processing a previous VDM packet */
1623 		WARN_ON(port->vdm_state > VDM_STATE_DONE);
1624 	}
1625 
1626 	port->vdo_count = cnt + 1;
1627 	port->vdo_data[0] = header;
1628 	memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1629 	/* Set ready, vdm state machine will actually send */
1630 	port->vdm_retries = 0;
1631 	port->vdm_state = VDM_STATE_READY;
1632 	port->vdm_sm_running = true;
1633 
1634 	port->tx_sop_type = tx_sop_type;
1635 
1636 	mod_vdm_delayed_work(port, 0);
1637 }
1638 
tcpm_queue_vdm_work(struct kthread_work * work)1639 static void tcpm_queue_vdm_work(struct kthread_work *work)
1640 {
1641 	struct altmode_vdm_event *event = container_of(work,
1642 						       struct altmode_vdm_event,
1643 						       work);
1644 	struct tcpm_port *port = event->port;
1645 
1646 	mutex_lock(&port->lock);
1647 	if (port->state != SRC_READY && port->state != SNK_READY &&
1648 	    port->state != SRC_VDM_IDENTITY_REQUEST) {
1649 		tcpm_log_force(port, "dropping altmode_vdm_event");
1650 		goto port_unlock;
1651 	}
1652 
1653 	tcpm_queue_vdm(port, event->header, event->data, event->cnt, event->tx_sop_type);
1654 
1655 port_unlock:
1656 	kfree(event->data);
1657 	kfree(event);
1658 	mutex_unlock(&port->lock);
1659 }
1660 
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1661 static int tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1662 				   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1663 {
1664 	struct altmode_vdm_event *event;
1665 	u32 *data_cpy;
1666 	int ret = -ENOMEM;
1667 
1668 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1669 	if (!event)
1670 		goto err_event;
1671 
1672 	data_cpy = kcalloc(cnt, sizeof(u32), GFP_KERNEL);
1673 	if (!data_cpy)
1674 		goto err_data;
1675 
1676 	kthread_init_work(&event->work, tcpm_queue_vdm_work);
1677 	event->port = port;
1678 	event->header = header;
1679 	memcpy(data_cpy, data, sizeof(u32) * cnt);
1680 	event->data = data_cpy;
1681 	event->cnt = cnt;
1682 	event->tx_sop_type = tx_sop_type;
1683 
1684 	ret = kthread_queue_work(port->wq, &event->work);
1685 	if (!ret) {
1686 		ret = -EBUSY;
1687 		goto err_queue;
1688 	}
1689 
1690 	return 0;
1691 
1692 err_queue:
1693 	kfree(data_cpy);
1694 err_data:
1695 	kfree(event);
1696 err_event:
1697 	tcpm_log_force(port, "failed to queue altmode vdm, err:%d", ret);
1698 	return ret;
1699 }
1700 
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1701 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1702 {
1703 	u32 vdo = p[VDO_INDEX_IDH];
1704 	u32 product = p[VDO_INDEX_PRODUCT];
1705 
1706 	memset(&port->mode_data, 0, sizeof(port->mode_data));
1707 
1708 	port->partner_ident.id_header = vdo;
1709 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1710 	port->partner_ident.product = product;
1711 
1712 	if (port->partner)
1713 		typec_partner_set_identity(port->partner);
1714 
1715 	tcpm_log(port, "Identity: %04x:%04x.%04x",
1716 		 PD_IDH_VID(vdo),
1717 		 PD_PRODUCT_PID(product), product & 0xffff);
1718 }
1719 
svdm_consume_identity_sop_prime(struct tcpm_port * port,const u32 * p,int cnt)1720 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1721 {
1722 	u32 idh = p[VDO_INDEX_IDH];
1723 	u32 product = p[VDO_INDEX_PRODUCT];
1724 	int svdm_version;
1725 
1726 	/*
1727 	 * Attempt to consume identity only if cable currently is not set
1728 	 */
1729 	if (!IS_ERR_OR_NULL(port->cable))
1730 		goto register_plug;
1731 
1732 	/* Reset cable identity */
1733 	memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1734 
1735 	/* Fill out id header, cert, product, cable VDO 1 */
1736 	port->cable_ident.id_header = idh;
1737 	port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1738 	port->cable_ident.product = product;
1739 	port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1740 
1741 	/* Fill out cable desc, infer svdm_version from pd revision */
1742 	port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1743 							USB_PLUG_TYPE_A);
1744 	port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1745 	/* Log PD Revision and additional cable VDO from negotiated revision */
1746 	switch (port->negotiated_rev_prime) {
1747 	case PD_REV30:
1748 		port->cable_desc.pd_revision = 0x0300;
1749 		if (port->cable_desc.active)
1750 			port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1751 		break;
1752 	case PD_REV20:
1753 		port->cable_desc.pd_revision = 0x0200;
1754 		break;
1755 	default:
1756 		port->cable_desc.pd_revision = 0x0200;
1757 		break;
1758 	}
1759 	port->cable_desc.identity = &port->cable_ident;
1760 	/* Register Cable, set identity and svdm_version */
1761 	port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1762 	if (IS_ERR_OR_NULL(port->cable))
1763 		return;
1764 	typec_cable_set_identity(port->cable);
1765 	/* Get SVDM version */
1766 	svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1767 	typec_cable_set_svdm_version(port->cable, svdm_version);
1768 
1769 register_plug:
1770 	if (IS_ERR_OR_NULL(port->plug_prime)) {
1771 		port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1772 		port->plug_prime = typec_register_plug(port->cable,
1773 						       &port->plug_prime_desc);
1774 	}
1775 }
1776 
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1777 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1778 			       enum tcpm_transmit_type rx_sop_type)
1779 {
1780 	struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1781 				      &port->mode_data_prime : &port->mode_data;
1782 	int i;
1783 
1784 	for (i = 1; i < cnt; i++) {
1785 		u16 svid;
1786 
1787 		svid = (p[i] >> 16) & 0xffff;
1788 		if (!svid)
1789 			return false;
1790 
1791 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1792 			goto abort;
1793 
1794 		pmdata->svids[pmdata->nsvids++] = svid;
1795 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1796 
1797 		svid = p[i] & 0xffff;
1798 		if (!svid)
1799 			return false;
1800 
1801 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1802 			goto abort;
1803 
1804 		pmdata->svids[pmdata->nsvids++] = svid;
1805 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1806 	}
1807 
1808 	/*
1809 	 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1810 	 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1811 	 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1812 	 * SVIDs Command Shall be executed multiple times until a Discover
1813 	 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1814 	 * the last part of the last VDO or with a VDO containing two SVIDs
1815 	 * with values of 0x0000.
1816 	 *
1817 	 * However, some odd dockers support SVIDs less than 12 but without
1818 	 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1819 	 * request and return false here.
1820 	 */
1821 	return cnt == 7;
1822 abort:
1823 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1824 	return false;
1825 }
1826 
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1827 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1828 			       enum tcpm_transmit_type rx_sop_type)
1829 {
1830 	struct pd_mode_data *pmdata = &port->mode_data;
1831 	struct typec_altmode_desc *paltmode;
1832 	int i;
1833 
1834 	switch (rx_sop_type) {
1835 	case TCPC_TX_SOP_PRIME:
1836 		pmdata = &port->mode_data_prime;
1837 		if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1838 			/* Already logged in svdm_consume_svids() */
1839 			return;
1840 		}
1841 		break;
1842 	case TCPC_TX_SOP:
1843 		pmdata = &port->mode_data;
1844 		if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1845 			/* Already logged in svdm_consume_svids() */
1846 			return;
1847 		}
1848 		break;
1849 	default:
1850 		return;
1851 	}
1852 
1853 	for (i = 1; i < cnt; i++) {
1854 		paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1855 		memset(paltmode, 0, sizeof(*paltmode));
1856 
1857 		paltmode->svid = pmdata->svids[pmdata->svid_index];
1858 		paltmode->mode = i;
1859 		paltmode->vdo = p[i];
1860 
1861 		tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1862 			 pmdata->altmodes, paltmode->svid,
1863 			 paltmode->mode, paltmode->vdo);
1864 
1865 		pmdata->altmodes++;
1866 	}
1867 }
1868 
tcpm_register_partner_altmodes(struct tcpm_port * port)1869 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1870 {
1871 	struct pd_mode_data *modep = &port->mode_data;
1872 	struct typec_altmode *altmode;
1873 	int i;
1874 
1875 	if (!port->partner)
1876 		return;
1877 
1878 	for (i = 0; i < modep->altmodes; i++) {
1879 		altmode = typec_partner_register_altmode(port->partner,
1880 						&modep->altmode_desc[i]);
1881 		if (IS_ERR(altmode)) {
1882 			tcpm_log(port, "Failed to register partner SVID 0x%04x",
1883 				 modep->altmode_desc[i].svid);
1884 			altmode = NULL;
1885 		}
1886 		port->partner_altmode[i] = altmode;
1887 	}
1888 }
1889 
tcpm_register_plug_altmodes(struct tcpm_port * port)1890 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1891 {
1892 	struct pd_mode_data *modep = &port->mode_data_prime;
1893 	struct typec_altmode *altmode;
1894 	int i;
1895 
1896 	typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1897 
1898 	for (i = 0; i < modep->altmodes; i++) {
1899 		altmode = typec_plug_register_altmode(port->plug_prime,
1900 						&modep->altmode_desc[i]);
1901 		if (IS_ERR(altmode)) {
1902 			tcpm_log(port, "Failed to register plug SVID 0x%04x",
1903 				 modep->altmode_desc[i].svid);
1904 			altmode = NULL;
1905 		}
1906 		port->plug_prime_altmode[i] = altmode;
1907 	}
1908 }
1909 
1910 #define supports_modal(port)	PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1911 #define supports_modal_cable(port)     PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1912 #define supports_host(port)    PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1913 
1914 /*
1915  * Helper to determine whether the port is capable of SOP' communication at the
1916  * current point in time.
1917  */
tcpm_can_communicate_sop_prime(struct tcpm_port * port)1918 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1919 {
1920 	/* Check to see if tcpc supports SOP' communication */
1921 	if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1922 		return false;
1923 	/*
1924 	 * Power Delivery 2.0 Section 6.3.11
1925 	 * Before communicating with a Cable Plug a Port Should ensure that it
1926 	 * is the Vconn Source and that the Cable Plugs are powered by
1927 	 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1928 	 * that the present Vconn Source is supplying Vconn, the only means to
1929 	 * ensure that the Cable Plugs are powered is for a Port wishing to
1930 	 * communicate with a Cable Plug is to become the Vconn Source.
1931 	 *
1932 	 * Power Delivery 3.0 Section 6.3.11
1933 	 * Before communicating with a Cable Plug a Port Shall ensure that it
1934 	 * is the Vconn source.
1935 	 */
1936 	if (port->vconn_role != TYPEC_SOURCE)
1937 		return false;
1938 	/*
1939 	 * Power Delivery 2.0 Section 2.4.4
1940 	 * When no Contract or an Implicit Contract is in place the Source can
1941 	 * communicate with a Cable Plug using SOP' packets in order to discover
1942 	 * its characteristics.
1943 	 *
1944 	 * Power Delivery 3.0 Section 2.4.4
1945 	 * When no Contract or an Implicit Contract is in place only the Source
1946 	 * port that is supplying Vconn is allowed to send packets to a Cable
1947 	 * Plug and is allowed to respond to packets from the Cable Plug.
1948 	 */
1949 	if (!port->explicit_contract)
1950 		return port->pwr_role == TYPEC_SOURCE;
1951 	if (port->negotiated_rev == PD_REV30)
1952 		return true;
1953 	/*
1954 	 * Power Delivery 2.0 Section 2.4.4
1955 	 *
1956 	 * When an Explicit Contract is in place the DFP (either the Source or
1957 	 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1958 	 * Packets (see Figure 2-3).
1959 	 */
1960 	if (port->negotiated_rev == PD_REV20)
1961 		return port->data_role == TYPEC_HOST;
1962 	return false;
1963 }
1964 
tcpm_attempt_vconn_swap_discovery(struct tcpm_port * port)1965 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1966 {
1967 	if (!port->tcpc->attempt_vconn_swap_discovery)
1968 		return false;
1969 
1970 	/* Port is already source, no need to perform swap */
1971 	if (port->vconn_role == TYPEC_SOURCE)
1972 		return false;
1973 
1974 	/*
1975 	 * Partner needs to support Alternate Modes with modal support. If
1976 	 * partner is also capable of being a USB Host, it could be a device
1977 	 * that supports Alternate Modes as the DFP.
1978 	 */
1979 	if (!supports_modal(port) || supports_host(port))
1980 		return false;
1981 
1982 	if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1983 	    port->negotiated_rev == PD_REV30)
1984 		return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1985 
1986 	return false;
1987 }
1988 
1989 
tcpm_cable_vdm_supported(struct tcpm_port * port)1990 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1991 {
1992 	return !IS_ERR_OR_NULL(port->cable) &&
1993 	       typec_cable_is_active(port->cable) &&
1994 	       supports_modal_cable(port) &&
1995 	       tcpm_can_communicate_sop_prime(port);
1996 }
1997 
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action,enum tcpm_transmit_type rx_sop_type,enum tcpm_transmit_type * response_tx_sop_type)1998 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1999 			const u32 *p, int cnt, u32 *response,
2000 			enum adev_actions *adev_action,
2001 			enum tcpm_transmit_type rx_sop_type,
2002 			enum tcpm_transmit_type *response_tx_sop_type)
2003 {
2004 	struct typec_port *typec = port->typec_port;
2005 	struct typec_altmode *pdev, *pdev_prime;
2006 	struct pd_mode_data *modep, *modep_prime;
2007 	int svdm_version;
2008 	int rlen = 0;
2009 	int cmd_type;
2010 	int cmd;
2011 	int i;
2012 	int ret;
2013 
2014 	cmd_type = PD_VDO_CMDT(p[0]);
2015 	cmd = PD_VDO_CMD(p[0]);
2016 
2017 	tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
2018 		 p[0], cmd_type, cmd, cnt);
2019 
2020 	switch (rx_sop_type) {
2021 	case TCPC_TX_SOP_PRIME:
2022 		modep_prime = &port->mode_data_prime;
2023 		pdev_prime = typec_match_altmode(port->plug_prime_altmode,
2024 						 ALTMODE_DISCOVERY_MAX,
2025 						 PD_VDO_VID(p[0]),
2026 						 PD_VDO_OPOS(p[0]));
2027 		svdm_version = typec_get_cable_svdm_version(typec);
2028 		/*
2029 		 * Update SVDM version if cable was discovered before port partner.
2030 		 */
2031 		if (!IS_ERR_OR_NULL(port->cable) &&
2032 		    PD_VDO_SVDM_VER(p[0]) < svdm_version)
2033 			typec_cable_set_svdm_version(port->cable, svdm_version);
2034 		break;
2035 	case TCPC_TX_SOP:
2036 		modep = &port->mode_data;
2037 		pdev = typec_match_altmode(port->partner_altmode,
2038 					   ALTMODE_DISCOVERY_MAX,
2039 					   PD_VDO_VID(p[0]),
2040 					   PD_VDO_OPOS(p[0]));
2041 		svdm_version = typec_get_negotiated_svdm_version(typec);
2042 		if (svdm_version < 0)
2043 			return 0;
2044 		break;
2045 	default:
2046 		modep = &port->mode_data;
2047 		pdev = typec_match_altmode(port->partner_altmode,
2048 					   ALTMODE_DISCOVERY_MAX,
2049 					   PD_VDO_VID(p[0]),
2050 					   PD_VDO_OPOS(p[0]));
2051 		svdm_version = typec_get_negotiated_svdm_version(typec);
2052 		if (svdm_version < 0)
2053 			return 0;
2054 		break;
2055 	}
2056 
2057 	switch (cmd_type) {
2058 	case CMDT_INIT:
2059 		/*
2060 		 * Only the port or port partner is allowed to initialize SVDM
2061 		 * commands over SOP'. In case the port partner initializes a
2062 		 * sequence when it is not allowed to send SOP' messages, drop
2063 		 * the message should the TCPM port try to process it.
2064 		 */
2065 		if (rx_sop_type == TCPC_TX_SOP_PRIME)
2066 			return 0;
2067 
2068 		switch (cmd) {
2069 		case CMD_DISCOVER_IDENT:
2070 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
2071 				break;
2072 
2073 			if (IS_ERR_OR_NULL(port->partner))
2074 				break;
2075 
2076 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2077 				typec_partner_set_svdm_version(port->partner,
2078 							       PD_VDO_SVDM_VER(p[0]));
2079 				svdm_version = PD_VDO_SVDM_VER(p[0]);
2080 			}
2081 
2082 			port->ams = DISCOVER_IDENTITY;
2083 			/*
2084 			 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
2085 			 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
2086 			 * "wrong configuation" or "Unrecognized"
2087 			 */
2088 			if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
2089 			    port->nr_snk_vdo) {
2090 				if (svdm_version < SVDM_VER_2_0) {
2091 					for (i = 0; i < port->nr_snk_vdo_v1; i++)
2092 						response[i + 1] = port->snk_vdo_v1[i];
2093 					rlen = port->nr_snk_vdo_v1 + 1;
2094 
2095 				} else {
2096 					for (i = 0; i < port->nr_snk_vdo; i++)
2097 						response[i + 1] = port->snk_vdo[i];
2098 					rlen = port->nr_snk_vdo + 1;
2099 				}
2100 			}
2101 			break;
2102 		case CMD_DISCOVER_SVID:
2103 			port->ams = DISCOVER_SVIDS;
2104 			break;
2105 		case CMD_DISCOVER_MODES:
2106 			port->ams = DISCOVER_MODES;
2107 			break;
2108 		case CMD_ENTER_MODE:
2109 			port->ams = DFP_TO_UFP_ENTER_MODE;
2110 			break;
2111 		case CMD_EXIT_MODE:
2112 			port->ams = DFP_TO_UFP_EXIT_MODE;
2113 			break;
2114 		case CMD_ATTENTION:
2115 			/* Attention command does not have response */
2116 			*adev_action = ADEV_ATTENTION;
2117 			return 0;
2118 		default:
2119 			break;
2120 		}
2121 		if (rlen >= 1) {
2122 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
2123 		} else if (rlen == 0) {
2124 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2125 			rlen = 1;
2126 		} else {
2127 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2128 			rlen = 1;
2129 		}
2130 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2131 			      (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2132 		break;
2133 	case CMDT_RSP_ACK:
2134 		/*
2135 		 * Silently drop message if we are not connected, but can process
2136 		 * if SOP' Discover Identity prior to explicit contract.
2137 		 */
2138 		if (IS_ERR_OR_NULL(port->partner) &&
2139 		    !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2140 			break;
2141 
2142 		tcpm_ams_finish(port);
2143 
2144 		switch (cmd) {
2145 		/*
2146 		 * SVDM Command Flow for SOP and SOP':
2147 		 * SOP		Discover Identity
2148 		 * SOP'		Discover Identity
2149 		 * SOP		Discover SVIDs
2150 		 *		Discover Modes
2151 		 * (Active Cables)
2152 		 * SOP'		Discover SVIDs
2153 		 *		Discover Modes
2154 		 *
2155 		 * Perform Discover SOP' if the port can communicate with cable
2156 		 * plug.
2157 		 */
2158 		case CMD_DISCOVER_IDENT:
2159 			switch (rx_sop_type) {
2160 			case TCPC_TX_SOP:
2161 				if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2162 					typec_partner_set_svdm_version(port->partner,
2163 								       PD_VDO_SVDM_VER(p[0]));
2164 					/* If cable is discovered before partner, downgrade svdm */
2165 					if (!IS_ERR_OR_NULL(port->cable) &&
2166 					    (typec_get_cable_svdm_version(port->typec_port) >
2167 					    svdm_version))
2168 						typec_cable_set_svdm_version(port->cable,
2169 									     svdm_version);
2170 				}
2171 				/* 6.4.4.3.1 */
2172 				svdm_consume_identity(port, p, cnt);
2173 				/* Attempt Vconn swap, delay SOP' discovery if necessary */
2174 				if (tcpm_attempt_vconn_swap_discovery(port)) {
2175 					port->send_discover_prime = true;
2176 					port->upcoming_state = VCONN_SWAP_SEND;
2177 					ret = tcpm_ams_start(port, VCONN_SWAP);
2178 					if (!ret)
2179 						return 0;
2180 					/* Cannot perform Vconn swap */
2181 					port->upcoming_state = INVALID_STATE;
2182 					port->send_discover_prime = false;
2183 				}
2184 
2185 				/*
2186 				 * Attempt Discover Identity on SOP' if the
2187 				 * cable was not discovered previously, and use
2188 				 * the SVDM version of the partner to probe.
2189 				 */
2190 				if (IS_ERR_OR_NULL(port->cable) &&
2191 				    tcpm_can_communicate_sop_prime(port)) {
2192 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2193 					port->send_discover_prime = true;
2194 					response[0] = VDO(USB_SID_PD, 1,
2195 							  typec_get_negotiated_svdm_version(typec),
2196 							  CMD_DISCOVER_IDENT);
2197 					rlen = 1;
2198 				} else {
2199 					*response_tx_sop_type = TCPC_TX_SOP;
2200 					response[0] = VDO(USB_SID_PD, 1,
2201 							  typec_get_negotiated_svdm_version(typec),
2202 							  CMD_DISCOVER_SVID);
2203 					rlen = 1;
2204 				}
2205 				break;
2206 			case TCPC_TX_SOP_PRIME:
2207 				/*
2208 				 * svdm_consume_identity_sop_prime will determine
2209 				 * the svdm_version for the cable moving forward.
2210 				 */
2211 				svdm_consume_identity_sop_prime(port, p, cnt);
2212 
2213 				/*
2214 				 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2215 				 * to SRC_SEND_CAPABILITIES
2216 				 */
2217 				if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2218 					tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2219 					return 0;
2220 				}
2221 
2222 				*response_tx_sop_type = TCPC_TX_SOP;
2223 				response[0] = VDO(USB_SID_PD, 1,
2224 						  typec_get_negotiated_svdm_version(typec),
2225 						  CMD_DISCOVER_SVID);
2226 				rlen = 1;
2227 				break;
2228 			default:
2229 				return 0;
2230 			}
2231 			break;
2232 		case CMD_DISCOVER_SVID:
2233 			*response_tx_sop_type = rx_sop_type;
2234 			/* 6.4.4.3.2 */
2235 			if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2236 				response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2237 				rlen = 1;
2238 			} else {
2239 				if (rx_sop_type == TCPC_TX_SOP) {
2240 					if (modep->nsvids && supports_modal(port)) {
2241 						response[0] = VDO(modep->svids[0], 1, svdm_version,
2242 								CMD_DISCOVER_MODES);
2243 						rlen = 1;
2244 					}
2245 				} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2246 					if (modep_prime->nsvids) {
2247 						response[0] = VDO(modep_prime->svids[0], 1,
2248 								  svdm_version, CMD_DISCOVER_MODES);
2249 						rlen = 1;
2250 					}
2251 				}
2252 			}
2253 			break;
2254 		case CMD_DISCOVER_MODES:
2255 			if (rx_sop_type == TCPC_TX_SOP) {
2256 				/* 6.4.4.3.3 */
2257 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2258 				modep->svid_index++;
2259 				if (modep->svid_index < modep->nsvids) {
2260 					u16 svid = modep->svids[modep->svid_index];
2261 					*response_tx_sop_type = TCPC_TX_SOP;
2262 					response[0] = VDO(svid, 1, svdm_version,
2263 							  CMD_DISCOVER_MODES);
2264 					rlen = 1;
2265 				} else if (tcpm_cable_vdm_supported(port)) {
2266 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2267 					response[0] = VDO(USB_SID_PD, 1,
2268 							  typec_get_cable_svdm_version(typec),
2269 							  CMD_DISCOVER_SVID);
2270 					rlen = 1;
2271 				} else {
2272 					tcpm_register_partner_altmodes(port);
2273 				}
2274 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2275 				/* 6.4.4.3.3 */
2276 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2277 				modep_prime->svid_index++;
2278 				if (modep_prime->svid_index < modep_prime->nsvids) {
2279 					u16 svid = modep_prime->svids[modep_prime->svid_index];
2280 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2281 					response[0] = VDO(svid, 1,
2282 							  typec_get_cable_svdm_version(typec),
2283 							  CMD_DISCOVER_MODES);
2284 					rlen = 1;
2285 				} else {
2286 					tcpm_register_plug_altmodes(port);
2287 					tcpm_register_partner_altmodes(port);
2288 				}
2289 			}
2290 			break;
2291 		case CMD_ENTER_MODE:
2292 			*response_tx_sop_type = rx_sop_type;
2293 			if (rx_sop_type == TCPC_TX_SOP) {
2294 				if (adev && pdev) {
2295 					typec_altmode_update_active(pdev, true);
2296 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2297 				}
2298 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2299 				if (adev && pdev_prime) {
2300 					typec_altmode_update_active(pdev_prime, true);
2301 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2302 				}
2303 			}
2304 			return 0;
2305 		case CMD_EXIT_MODE:
2306 			*response_tx_sop_type = rx_sop_type;
2307 			if (rx_sop_type == TCPC_TX_SOP) {
2308 				if (adev && pdev) {
2309 					typec_altmode_update_active(pdev, false);
2310 					/* Back to USB Operation */
2311 					*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2312 					return 0;
2313 				}
2314 			}
2315 			break;
2316 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2317 			break;
2318 		default:
2319 			/* Unrecognized SVDM */
2320 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2321 			rlen = 1;
2322 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2323 				      (VDO_SVDM_VERS(svdm_version));
2324 			break;
2325 		}
2326 		break;
2327 	case CMDT_RSP_NAK:
2328 		tcpm_ams_finish(port);
2329 		switch (cmd) {
2330 		case CMD_DISCOVER_IDENT:
2331 		case CMD_DISCOVER_SVID:
2332 		case CMD_DISCOVER_MODES:
2333 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2334 			break;
2335 		case CMD_ENTER_MODE:
2336 			/* Back to USB Operation */
2337 			*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2338 			return 0;
2339 		default:
2340 			/* Unrecognized SVDM */
2341 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2342 			rlen = 1;
2343 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2344 				      (VDO_SVDM_VERS(svdm_version));
2345 			break;
2346 		}
2347 		break;
2348 	default:
2349 		response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2350 		rlen = 1;
2351 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2352 			      (VDO_SVDM_VERS(svdm_version));
2353 		break;
2354 	}
2355 
2356 	/* Informing the alternate mode drivers about everything */
2357 	*adev_action = ADEV_QUEUE_VDM;
2358 	return rlen;
2359 }
2360 
2361 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2362 			       enum pd_msg_request message,
2363 			       enum tcpm_ams ams);
2364 
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt,enum tcpm_transmit_type rx_sop_type)2365 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2366 				    const __le32 *payload, int cnt,
2367 				    enum tcpm_transmit_type rx_sop_type)
2368 {
2369 	enum adev_actions adev_action = ADEV_NONE;
2370 	struct typec_altmode *adev;
2371 	u32 p[PD_MAX_PAYLOAD];
2372 	u32 response[8] = { };
2373 	int i, rlen = 0;
2374 	enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2375 
2376 	for (i = 0; i < cnt; i++)
2377 		p[i] = le32_to_cpu(payload[i]);
2378 
2379 	adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2380 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2381 
2382 	if (port->vdm_state == VDM_STATE_BUSY) {
2383 		/* If UFP responded busy retry after timeout */
2384 		if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2385 			port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2386 			port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2387 				CMDT_INIT;
2388 			mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2389 			return;
2390 		}
2391 		port->vdm_state = VDM_STATE_DONE;
2392 	}
2393 
2394 	if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2395 		/*
2396 		 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2397 		 * advance because we are dropping the lock but may send VDMs soon.
2398 		 * For the cases of INIT received:
2399 		 *  - If no response to send, it will be cleared later in this function.
2400 		 *  - If there are responses to send, it will be cleared in the state machine.
2401 		 * For the cases of RSP received:
2402 		 *  - If no further INIT to send, it will be cleared later in this function.
2403 		 *  - Otherwise, it will be cleared in the state machine if timeout or it will go
2404 		 *    back here until no further INIT to send.
2405 		 * For the cases of unknown type received:
2406 		 *  - We will send NAK and the flag will be cleared in the state machine.
2407 		 */
2408 		port->vdm_sm_running = true;
2409 		rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2410 				    rx_sop_type, &response_tx_sop_type);
2411 	} else {
2412 		if (port->negotiated_rev >= PD_REV30)
2413 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2414 	}
2415 
2416 	/*
2417 	 * We are done with any state stored in the port struct now, except
2418 	 * for any port struct changes done by the tcpm_queue_vdm() call
2419 	 * below, which is a separate operation.
2420 	 *
2421 	 * So we can safely release the lock here; and we MUST release the
2422 	 * lock here to avoid an AB BA lock inversion:
2423 	 *
2424 	 * If we keep the lock here then the lock ordering in this path is:
2425 	 * 1. tcpm_pd_rx_handler take the tcpm port lock
2426 	 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2427 	 *
2428 	 * And we also have this ordering:
2429 	 * 1. alt-mode driver takes the alt-mode's lock
2430 	 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2431 	 *    tcpm port lock
2432 	 *
2433 	 * Dropping our lock here avoids this.
2434 	 */
2435 	mutex_unlock(&port->lock);
2436 
2437 	if (adev) {
2438 		switch (adev_action) {
2439 		case ADEV_NONE:
2440 			break;
2441 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2442 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2443 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2444 			} else {
2445 				WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2446 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2447 			}
2448 			break;
2449 		case ADEV_QUEUE_VDM:
2450 			if (rx_sop_type == TCPC_TX_SOP_PRIME)
2451 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2452 			else
2453 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2454 			break;
2455 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2456 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2457 				if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2458 							    p[0], &p[1], cnt)) {
2459 					int svdm_version = typec_get_cable_svdm_version(
2460 										port->typec_port);
2461 					if (svdm_version < 0)
2462 						break;
2463 
2464 					response[0] = VDO(adev->svid, 1, svdm_version,
2465 							CMD_EXIT_MODE);
2466 					response[0] |= VDO_OPOS(adev->mode);
2467 					rlen = 1;
2468 				}
2469 			} else {
2470 				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2471 					int svdm_version = typec_get_negotiated_svdm_version(
2472 										port->typec_port);
2473 					if (svdm_version < 0)
2474 						break;
2475 
2476 					response[0] = VDO(adev->svid, 1, svdm_version,
2477 							CMD_EXIT_MODE);
2478 					response[0] |= VDO_OPOS(adev->mode);
2479 					rlen = 1;
2480 				}
2481 			}
2482 			break;
2483 		case ADEV_ATTENTION:
2484 			if (typec_altmode_attention(adev, p[1]))
2485 				tcpm_log(port, "typec_altmode_attention no port partner altmode");
2486 			break;
2487 		}
2488 	}
2489 
2490 	/*
2491 	 * We must re-take the lock here to balance the unlock in
2492 	 * tcpm_pd_rx_handler, note that no changes, other then the
2493 	 * tcpm_queue_vdm call, are made while the lock is held again.
2494 	 * All that is done after the call is unwinding the call stack until
2495 	 * we return to tcpm_pd_rx_handler and do the unlock there.
2496 	 */
2497 	mutex_lock(&port->lock);
2498 
2499 	if (rlen > 0)
2500 		tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2501 	else
2502 		port->vdm_sm_running = false;
2503 }
2504 
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count,enum tcpm_transmit_type tx_sop_type)2505 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2506 			  const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2507 {
2508 	int svdm_version;
2509 	u32 header;
2510 
2511 	switch (tx_sop_type) {
2512 	case TCPC_TX_SOP_PRIME:
2513 		/*
2514 		 * If the port partner is discovered, then the port partner's
2515 		 * SVDM Version will be returned
2516 		 */
2517 		svdm_version = typec_get_cable_svdm_version(port->typec_port);
2518 		if (svdm_version < 0)
2519 			svdm_version = SVDM_VER_MAX;
2520 		break;
2521 	case TCPC_TX_SOP:
2522 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2523 		if (svdm_version < 0)
2524 			return;
2525 		break;
2526 	default:
2527 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2528 		if (svdm_version < 0)
2529 			return;
2530 		break;
2531 	}
2532 
2533 	if (WARN_ON(count > VDO_MAX_SIZE - 1))
2534 		count = VDO_MAX_SIZE - 1;
2535 
2536 	/* set VDM header with VID & CMD */
2537 	header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2538 			1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2539 			svdm_version, cmd);
2540 	tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2541 }
2542 
vdm_ready_timeout(u32 vdm_hdr)2543 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2544 {
2545 	unsigned int timeout;
2546 	int cmd = PD_VDO_CMD(vdm_hdr);
2547 
2548 	/* its not a structured VDM command */
2549 	if (!PD_VDO_SVDM(vdm_hdr))
2550 		return PD_T_VDM_UNSTRUCTURED;
2551 
2552 	switch (PD_VDO_CMDT(vdm_hdr)) {
2553 	case CMDT_INIT:
2554 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2555 			timeout = PD_T_VDM_WAIT_MODE_E;
2556 		else
2557 			timeout = PD_T_VDM_SNDR_RSP;
2558 		break;
2559 	default:
2560 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2561 			timeout = PD_T_VDM_E_MODE;
2562 		else
2563 			timeout = PD_T_VDM_RCVR_RSP;
2564 		break;
2565 	}
2566 	return timeout;
2567 }
2568 
vdm_run_state_machine(struct tcpm_port * port)2569 static void vdm_run_state_machine(struct tcpm_port *port)
2570 {
2571 	struct pd_message msg;
2572 	int i, res = 0;
2573 	u32 vdo_hdr = port->vdo_data[0];
2574 	u32 response[8] = { };
2575 
2576 	switch (port->vdm_state) {
2577 	case VDM_STATE_READY:
2578 		/* Only transmit VDM if attached */
2579 		if (!port->attached) {
2580 			port->vdm_state = VDM_STATE_ERR_BUSY;
2581 			break;
2582 		}
2583 
2584 		/*
2585 		 * if there's traffic or we're not in PDO ready state don't send
2586 		 * a VDM.
2587 		 */
2588 		if (port->state != SRC_READY && port->state != SNK_READY &&
2589 		    port->state != SRC_VDM_IDENTITY_REQUEST) {
2590 			port->vdm_sm_running = false;
2591 			break;
2592 		}
2593 
2594 		/* TODO: AMS operation for Unstructured VDM */
2595 		if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2596 			switch (PD_VDO_CMD(vdo_hdr)) {
2597 			case CMD_DISCOVER_IDENT:
2598 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2599 				if (res == 0) {
2600 					switch (port->tx_sop_type) {
2601 					case TCPC_TX_SOP_PRIME:
2602 						port->send_discover_prime = false;
2603 						break;
2604 					case TCPC_TX_SOP:
2605 						port->send_discover = false;
2606 						break;
2607 					default:
2608 						port->send_discover = false;
2609 						break;
2610 					}
2611 				} else if (res == -EAGAIN) {
2612 					port->vdo_data[0] = 0;
2613 					mod_send_discover_delayed_work(port,
2614 								       SEND_DISCOVER_RETRY_MS);
2615 				}
2616 				break;
2617 			case CMD_DISCOVER_SVID:
2618 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
2619 				break;
2620 			case CMD_DISCOVER_MODES:
2621 				res = tcpm_ams_start(port, DISCOVER_MODES);
2622 				break;
2623 			case CMD_ENTER_MODE:
2624 				res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2625 				break;
2626 			case CMD_EXIT_MODE:
2627 				res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2628 				break;
2629 			case CMD_ATTENTION:
2630 				res = tcpm_ams_start(port, ATTENTION);
2631 				break;
2632 			case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2633 				res = tcpm_ams_start(port, STRUCTURED_VDMS);
2634 				break;
2635 			default:
2636 				res = -EOPNOTSUPP;
2637 				break;
2638 			}
2639 
2640 			if (res < 0) {
2641 				port->vdm_state = VDM_STATE_ERR_BUSY;
2642 				return;
2643 			}
2644 		}
2645 
2646 		port->vdm_state = VDM_STATE_SEND_MESSAGE;
2647 		mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2648 					    port->pwr_role == TYPEC_SOURCE &&
2649 					    PD_VDO_SVDM(vdo_hdr) &&
2650 					    PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2651 					   PD_T_SINK_TX : 0);
2652 		break;
2653 	case VDM_STATE_WAIT_RSP_BUSY:
2654 		port->vdo_data[0] = port->vdo_retry;
2655 		port->vdo_count = 1;
2656 		port->vdm_state = VDM_STATE_READY;
2657 		tcpm_ams_finish(port);
2658 		break;
2659 	case VDM_STATE_BUSY:
2660 		port->vdm_state = VDM_STATE_ERR_TMOUT;
2661 		if (port->ams != NONE_AMS)
2662 			tcpm_ams_finish(port);
2663 		break;
2664 	case VDM_STATE_ERR_SEND:
2665 		/*
2666 		 * When sending Discover Identity to SOP' before establishing an
2667 		 * explicit contract, do not retry. Instead, weave sending
2668 		 * Source_Capabilities over SOP and Discover Identity over SOP'.
2669 		 */
2670 		if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2671 			tcpm_ams_finish(port);
2672 			port->vdm_state = VDM_STATE_DONE;
2673 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2674 		/*
2675 		 * A partner which does not support USB PD will not reply,
2676 		 * so this is not a fatal error. At the same time, some
2677 		 * devices may not return GoodCRC under some circumstances,
2678 		 * so we need to retry.
2679 		 */
2680 		} else if (port->vdm_retries < 3) {
2681 			tcpm_log(port, "VDM Tx error, retry");
2682 			port->vdm_retries++;
2683 			port->vdm_state = VDM_STATE_READY;
2684 			if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2685 				tcpm_ams_finish(port);
2686 		} else {
2687 			tcpm_ams_finish(port);
2688 			if (port->tx_sop_type == TCPC_TX_SOP)
2689 				break;
2690 			/* Handle SOP' Transmission Errors */
2691 			switch (PD_VDO_CMD(vdo_hdr)) {
2692 			/*
2693 			 * If Discover Identity fails on SOP', then resume
2694 			 * discovery process on SOP only.
2695 			 */
2696 			case CMD_DISCOVER_IDENT:
2697 				port->vdo_data[0] = 0;
2698 				response[0] = VDO(USB_SID_PD, 1,
2699 						  typec_get_negotiated_svdm_version(
2700 									port->typec_port),
2701 						  CMD_DISCOVER_SVID);
2702 				tcpm_queue_vdm(port, response[0], &response[1],
2703 					       0, TCPC_TX_SOP);
2704 				break;
2705 			/*
2706 			 * If Discover SVIDs or Discover Modes fail, then
2707 			 * proceed with Alt Mode discovery process on SOP.
2708 			 */
2709 			case CMD_DISCOVER_SVID:
2710 				tcpm_register_partner_altmodes(port);
2711 				break;
2712 			case CMD_DISCOVER_MODES:
2713 				tcpm_register_partner_altmodes(port);
2714 				break;
2715 			default:
2716 				break;
2717 			}
2718 		}
2719 		break;
2720 	case VDM_STATE_SEND_MESSAGE:
2721 		/* Prepare and send VDM */
2722 		memset(&msg, 0, sizeof(msg));
2723 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2724 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2725 						  0,	/* Cable Plug Indicator for DFP/UFP */
2726 						  0,	/* Reserved */
2727 						  port->negotiated_rev_prime,
2728 						  port->message_id_prime,
2729 						  port->vdo_count);
2730 		} else {
2731 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2732 						  port->pwr_role,
2733 						  port->data_role,
2734 						  port->negotiated_rev,
2735 						  port->message_id,
2736 						  port->vdo_count);
2737 		}
2738 		for (i = 0; i < port->vdo_count; i++)
2739 			msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2740 		res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2741 		if (res < 0) {
2742 			port->vdm_state = VDM_STATE_ERR_SEND;
2743 		} else {
2744 			unsigned long timeout;
2745 
2746 			port->vdm_retries = 0;
2747 			port->vdo_data[0] = 0;
2748 			port->vdm_state = VDM_STATE_BUSY;
2749 			timeout = vdm_ready_timeout(vdo_hdr);
2750 			mod_vdm_delayed_work(port, timeout);
2751 		}
2752 		break;
2753 	default:
2754 		break;
2755 	}
2756 }
2757 
vdm_state_machine_work(struct kthread_work * work)2758 static void vdm_state_machine_work(struct kthread_work *work)
2759 {
2760 	struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2761 	enum vdm_states prev_state;
2762 
2763 	mutex_lock(&port->lock);
2764 
2765 	/*
2766 	 * Continue running as long as the port is not busy and there was
2767 	 * a state change.
2768 	 */
2769 	do {
2770 		prev_state = port->vdm_state;
2771 		vdm_run_state_machine(port);
2772 	} while (port->vdm_state != prev_state &&
2773 		 port->vdm_state != VDM_STATE_BUSY &&
2774 		 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2775 
2776 	if (port->vdm_state < VDM_STATE_READY)
2777 		port->vdm_sm_running = false;
2778 
2779 	mutex_unlock(&port->lock);
2780 }
2781 
2782 enum pdo_err {
2783 	PDO_NO_ERR,
2784 	PDO_ERR_NO_VSAFE5V,
2785 	PDO_ERR_VSAFE5V_NOT_FIRST,
2786 	PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2787 	PDO_ERR_FIXED_NOT_SORTED,
2788 	PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2789 	PDO_ERR_DUPE_PDO,
2790 	PDO_ERR_PPS_APDO_NOT_SORTED,
2791 	PDO_ERR_DUPE_PPS_APDO,
2792 };
2793 
2794 static const char * const pdo_err_msg[] = {
2795 	[PDO_ERR_NO_VSAFE5V] =
2796 	" err: source/sink caps should at least have vSafe5V",
2797 	[PDO_ERR_VSAFE5V_NOT_FIRST] =
2798 	" err: vSafe5V Fixed Supply Object Shall always be the first object",
2799 	[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2800 	" err: PDOs should be in the following order: Fixed; Battery; Variable",
2801 	[PDO_ERR_FIXED_NOT_SORTED] =
2802 	" err: Fixed supply pdos should be in increasing order of their fixed voltage",
2803 	[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2804 	" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2805 	[PDO_ERR_DUPE_PDO] =
2806 	" err: Variable/Batt supply pdos cannot have same min/max voltage",
2807 	[PDO_ERR_PPS_APDO_NOT_SORTED] =
2808 	" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2809 	[PDO_ERR_DUPE_PPS_APDO] =
2810 	" err: Programmable power supply apdos cannot have same min/max voltage and max current",
2811 };
2812 
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2813 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2814 				  unsigned int nr_pdo)
2815 {
2816 	unsigned int i;
2817 
2818 	/* Should at least contain vSafe5v */
2819 	if (nr_pdo < 1)
2820 		return PDO_ERR_NO_VSAFE5V;
2821 
2822 	/* The vSafe5V Fixed Supply Object Shall always be the first object */
2823 	if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2824 	    pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2825 		return PDO_ERR_VSAFE5V_NOT_FIRST;
2826 
2827 	for (i = 1; i < nr_pdo; i++) {
2828 		if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2829 			return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2830 		} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2831 			enum pd_pdo_type type = pdo_type(pdo[i]);
2832 
2833 			switch (type) {
2834 			/*
2835 			 * The remaining Fixed Supply Objects, if
2836 			 * present, shall be sent in voltage order;
2837 			 * lowest to highest.
2838 			 */
2839 			case PDO_TYPE_FIXED:
2840 				if (pdo_fixed_voltage(pdo[i]) <=
2841 				    pdo_fixed_voltage(pdo[i - 1]))
2842 					return PDO_ERR_FIXED_NOT_SORTED;
2843 				break;
2844 			/*
2845 			 * The Battery Supply Objects and Variable
2846 			 * supply, if present shall be sent in Minimum
2847 			 * Voltage order; lowest to highest.
2848 			 */
2849 			case PDO_TYPE_VAR:
2850 			case PDO_TYPE_BATT:
2851 				if (pdo_min_voltage(pdo[i]) <
2852 				    pdo_min_voltage(pdo[i - 1]))
2853 					return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2854 				else if ((pdo_min_voltage(pdo[i]) ==
2855 					  pdo_min_voltage(pdo[i - 1])) &&
2856 					 (pdo_max_voltage(pdo[i]) ==
2857 					  pdo_max_voltage(pdo[i - 1])))
2858 					return PDO_ERR_DUPE_PDO;
2859 				break;
2860 			/*
2861 			 * The Programmable Power Supply APDOs, if present,
2862 			 * shall be sent in Maximum Voltage order;
2863 			 * lowest to highest.
2864 			 */
2865 			case PDO_TYPE_APDO:
2866 				if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2867 					break;
2868 
2869 				if (pdo_pps_apdo_max_voltage(pdo[i]) <
2870 				    pdo_pps_apdo_max_voltage(pdo[i - 1]))
2871 					return PDO_ERR_PPS_APDO_NOT_SORTED;
2872 				else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2873 					  pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2874 					 pdo_pps_apdo_max_voltage(pdo[i]) ==
2875 					  pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2876 					 pdo_pps_apdo_max_current(pdo[i]) ==
2877 					  pdo_pps_apdo_max_current(pdo[i - 1]))
2878 					return PDO_ERR_DUPE_PPS_APDO;
2879 				break;
2880 			default:
2881 				tcpm_log_force(port, " Unknown pdo type");
2882 			}
2883 		}
2884 	}
2885 
2886 	return PDO_NO_ERR;
2887 }
2888 
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2889 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2890 			      unsigned int nr_pdo)
2891 {
2892 	enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2893 
2894 	if (err_index != PDO_NO_ERR) {
2895 		tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2896 		return -EINVAL;
2897 	}
2898 
2899 	return 0;
2900 }
2901 
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2902 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2903 {
2904 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2905 	int svdm_version;
2906 	u32 header;
2907 
2908 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2909 	if (svdm_version < 0)
2910 		return svdm_version;
2911 
2912 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2913 	header |= VDO_OPOS(altmode->mode);
2914 
2915 	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2916 }
2917 
tcpm_altmode_exit(struct typec_altmode * altmode)2918 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2919 {
2920 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2921 	int svdm_version;
2922 	u32 header;
2923 
2924 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2925 	if (svdm_version < 0)
2926 		return svdm_version;
2927 
2928 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2929 	header |= VDO_OPOS(altmode->mode);
2930 
2931 	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2932 }
2933 
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2934 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2935 			    u32 header, const u32 *data, int count)
2936 {
2937 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2938 
2939 	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2940 }
2941 
2942 static const struct typec_altmode_ops tcpm_altmode_ops = {
2943 	.enter = tcpm_altmode_enter,
2944 	.exit = tcpm_altmode_exit,
2945 	.vdm = tcpm_altmode_vdm,
2946 };
2947 
2948 
tcpm_cable_altmode_enter(struct typec_altmode * altmode,enum typec_plug_index sop,u32 * vdo)2949 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2950 				    u32 *vdo)
2951 {
2952 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2953 	int svdm_version;
2954 	u32 header;
2955 
2956 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2957 	if (svdm_version < 0)
2958 		return svdm_version;
2959 
2960 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2961 	header |= VDO_OPOS(altmode->mode);
2962 
2963 	return tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2964 }
2965 
tcpm_cable_altmode_exit(struct typec_altmode * altmode,enum typec_plug_index sop)2966 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2967 {
2968 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2969 	int svdm_version;
2970 	u32 header;
2971 
2972 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2973 	if (svdm_version < 0)
2974 		return svdm_version;
2975 
2976 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2977 	header |= VDO_OPOS(altmode->mode);
2978 
2979 	return tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2980 }
2981 
tcpm_cable_altmode_vdm(struct typec_altmode * altmode,enum typec_plug_index sop,u32 header,const u32 * data,int count)2982 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2983 				  u32 header, const u32 *data, int count)
2984 {
2985 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2986 
2987 	return tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2988 }
2989 
2990 static const struct typec_cable_ops tcpm_cable_ops = {
2991 	.enter = tcpm_cable_altmode_enter,
2992 	.exit = tcpm_cable_altmode_exit,
2993 	.vdm = tcpm_cable_altmode_vdm,
2994 };
2995 
2996 /*
2997  * PD (data, control) command handling functions
2998  */
ready_state(struct tcpm_port * port)2999 static inline enum tcpm_state ready_state(struct tcpm_port *port)
3000 {
3001 	if (port->pwr_role == TYPEC_SOURCE)
3002 		return SRC_READY;
3003 	else
3004 		return SNK_READY;
3005 }
3006 
3007 static int tcpm_pd_send_control(struct tcpm_port *port,
3008 				enum pd_ctrl_msg_type type,
3009 				enum tcpm_transmit_type tx_sop_type);
3010 
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)3011 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
3012 			      int cnt)
3013 {
3014 	u32 p0 = le32_to_cpu(payload[0]);
3015 	unsigned int type = usb_pd_ado_type(p0);
3016 
3017 	if (!type) {
3018 		tcpm_log(port, "Alert message received with no type");
3019 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3020 		return;
3021 	}
3022 
3023 	/* Just handling non-battery alerts for now */
3024 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
3025 		if (port->pwr_role == TYPEC_SOURCE) {
3026 			port->upcoming_state = GET_STATUS_SEND;
3027 			tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
3028 		} else {
3029 			/*
3030 			 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
3031 			 * SinkTxOk in time.
3032 			 */
3033 			port->ams = GETTING_SOURCE_SINK_STATUS;
3034 			tcpm_set_state(port, GET_STATUS_SEND, 0);
3035 		}
3036 	} else {
3037 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
3038 	}
3039 }
3040 
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)3041 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
3042 						  enum typec_pwr_opmode mode, bool pps_active,
3043 						  u32 requested_vbus_voltage)
3044 {
3045 	int ret;
3046 
3047 	if (!port->tcpc->set_auto_vbus_discharge_threshold)
3048 		return 0;
3049 
3050 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
3051 							    requested_vbus_voltage,
3052 							    port->pps_data.min_volt);
3053 	tcpm_log_force(port,
3054 		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
3055 		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
3056 		       port->pps_data.min_volt, ret);
3057 
3058 	return ret;
3059 }
3060 
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)3061 static void tcpm_pd_handle_state(struct tcpm_port *port,
3062 				 enum tcpm_state state,
3063 				 enum tcpm_ams ams,
3064 				 unsigned int delay_ms)
3065 {
3066 	switch (port->state) {
3067 	case SRC_READY:
3068 	case SNK_READY:
3069 		port->ams = ams;
3070 		tcpm_set_state(port, state, delay_ms);
3071 		break;
3072 	/* 8.3.3.4.1.1 and 6.8.1 power transitioning */
3073 	case SNK_TRANSITION_SINK:
3074 	case SNK_TRANSITION_SINK_VBUS:
3075 	case SRC_TRANSITION_SUPPLY:
3076 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3077 		break;
3078 	default:
3079 		if (!tcpm_ams_interruptible(port)) {
3080 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3081 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3082 				       SNK_SOFT_RESET,
3083 				       0);
3084 		} else {
3085 			/* process the Message 6.8.1 */
3086 			port->upcoming_state = state;
3087 			port->next_ams = ams;
3088 			tcpm_set_state(port, ready_state(port), delay_ms);
3089 		}
3090 		break;
3091 	}
3092 }
3093 
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)3094 static void tcpm_pd_handle_msg(struct tcpm_port *port,
3095 			       enum pd_msg_request message,
3096 			       enum tcpm_ams ams)
3097 {
3098 	switch (port->state) {
3099 	case SRC_READY:
3100 	case SNK_READY:
3101 		port->ams = ams;
3102 		tcpm_queue_message(port, message);
3103 		break;
3104 	/* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
3105 	case SNK_TRANSITION_SINK:
3106 	case SNK_TRANSITION_SINK_VBUS:
3107 	case SRC_TRANSITION_SUPPLY:
3108 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3109 		break;
3110 	default:
3111 		if (!tcpm_ams_interruptible(port)) {
3112 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3113 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3114 				       SNK_SOFT_RESET,
3115 				       0);
3116 		} else {
3117 			port->next_ams = ams;
3118 			tcpm_set_state(port, ready_state(port), 0);
3119 			/* 6.8.1 process the Message */
3120 			tcpm_queue_message(port, message);
3121 		}
3122 		break;
3123 	}
3124 }
3125 
tcpm_register_source_caps(struct tcpm_port * port)3126 static int tcpm_register_source_caps(struct tcpm_port *port)
3127 {
3128 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3129 	struct usb_power_delivery_capabilities_desc caps = { };
3130 	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3131 
3132 	if (!port->partner_pd)
3133 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3134 	if (IS_ERR(port->partner_pd))
3135 		return PTR_ERR(port->partner_pd);
3136 
3137 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3138 	caps.role = TYPEC_SOURCE;
3139 
3140 	if (cap) {
3141 		usb_power_delivery_unregister_capabilities(cap);
3142 		port->partner_source_caps = NULL;
3143 	}
3144 
3145 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3146 	if (IS_ERR(cap))
3147 		return PTR_ERR(cap);
3148 
3149 	port->partner_source_caps = cap;
3150 
3151 	return 0;
3152 }
3153 
tcpm_register_sink_caps(struct tcpm_port * port)3154 static int tcpm_register_sink_caps(struct tcpm_port *port)
3155 {
3156 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3157 	struct usb_power_delivery_capabilities_desc caps = { };
3158 	struct usb_power_delivery_capabilities *cap;
3159 
3160 	if (!port->partner_pd)
3161 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3162 	if (IS_ERR(port->partner_pd))
3163 		return PTR_ERR(port->partner_pd);
3164 
3165 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3166 	caps.role = TYPEC_SINK;
3167 
3168 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3169 	if (IS_ERR(cap))
3170 		return PTR_ERR(cap);
3171 
3172 	port->partner_sink_caps = cap;
3173 
3174 	return 0;
3175 }
3176 
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3177 static void tcpm_pd_data_request(struct tcpm_port *port,
3178 				 const struct pd_message *msg,
3179 				 enum tcpm_transmit_type rx_sop_type)
3180 {
3181 	enum pd_data_msg_type type = pd_header_type_le(msg->header);
3182 	unsigned int cnt = pd_header_cnt_le(msg->header);
3183 	unsigned int rev = pd_header_rev_le(msg->header);
3184 	unsigned int i;
3185 	enum frs_typec_current partner_frs_current;
3186 	bool frs_enable;
3187 	int ret;
3188 
3189 	if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3190 		port->vdm_state = VDM_STATE_ERR_BUSY;
3191 		tcpm_ams_finish(port);
3192 		mod_vdm_delayed_work(port, 0);
3193 	}
3194 
3195 	switch (type) {
3196 	case PD_DATA_SOURCE_CAP:
3197 		for (i = 0; i < cnt; i++)
3198 			port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3199 
3200 		port->nr_source_caps = cnt;
3201 
3202 		tcpm_log_source_caps(port);
3203 
3204 		tcpm_validate_caps(port, port->source_caps,
3205 				   port->nr_source_caps);
3206 
3207 		tcpm_register_source_caps(port);
3208 
3209 		/*
3210 		 * Adjust revision in subsequent message headers, as required,
3211 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3212 		 * support Rev 1.0 so just do nothing in that scenario.
3213 		 */
3214 		if (rev == PD_REV10) {
3215 			if (port->ams == GET_SOURCE_CAPABILITIES)
3216 				tcpm_ams_finish(port);
3217 			break;
3218 		}
3219 
3220 		if (rev < PD_MAX_REV) {
3221 			port->negotiated_rev = rev;
3222 			if (port->negotiated_rev_prime > port->negotiated_rev)
3223 				port->negotiated_rev_prime = port->negotiated_rev;
3224 		}
3225 
3226 		if (port->pwr_role == TYPEC_SOURCE) {
3227 			if (port->ams == GET_SOURCE_CAPABILITIES)
3228 				tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3229 			/* Unexpected Source Capabilities */
3230 			else
3231 				tcpm_pd_handle_msg(port,
3232 						   port->negotiated_rev < PD_REV30 ?
3233 						   PD_MSG_CTRL_REJECT :
3234 						   PD_MSG_CTRL_NOT_SUPP,
3235 						   NONE_AMS);
3236 		} else if (port->state == SNK_WAIT_CAPABILITIES ||
3237 			   port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) {
3238 		/*
3239 		 * This message may be received even if VBUS is not
3240 		 * present. This is quite unexpected; see USB PD
3241 		 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3242 		 * However, at the same time, we must be ready to
3243 		 * receive this message and respond to it 15ms after
3244 		 * receiving PS_RDY during power swap operations, no matter
3245 		 * if VBUS is available or not (USB PD specification,
3246 		 * section 6.5.9.2).
3247 		 * So we need to accept the message either way,
3248 		 * but be prepared to keep waiting for VBUS after it was
3249 		 * handled.
3250 		 */
3251 			port->ams = POWER_NEGOTIATION;
3252 			port->in_ams = true;
3253 			tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3254 		} else {
3255 			if (port->ams == GET_SOURCE_CAPABILITIES)
3256 				tcpm_ams_finish(port);
3257 			tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3258 					     POWER_NEGOTIATION, 0);
3259 		}
3260 		break;
3261 	case PD_DATA_REQUEST:
3262 		/*
3263 		 * Adjust revision in subsequent message headers, as required,
3264 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3265 		 * support Rev 1.0 so just reject in that scenario.
3266 		 */
3267 		if (rev == PD_REV10) {
3268 			tcpm_pd_handle_msg(port,
3269 					   port->negotiated_rev < PD_REV30 ?
3270 					   PD_MSG_CTRL_REJECT :
3271 					   PD_MSG_CTRL_NOT_SUPP,
3272 					   NONE_AMS);
3273 			break;
3274 		}
3275 
3276 		if (rev < PD_MAX_REV) {
3277 			port->negotiated_rev = rev;
3278 			if (port->negotiated_rev_prime > port->negotiated_rev)
3279 				port->negotiated_rev_prime = port->negotiated_rev;
3280 		}
3281 
3282 		if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3283 			tcpm_pd_handle_msg(port,
3284 					   port->negotiated_rev < PD_REV30 ?
3285 					   PD_MSG_CTRL_REJECT :
3286 					   PD_MSG_CTRL_NOT_SUPP,
3287 					   NONE_AMS);
3288 			break;
3289 		}
3290 
3291 		port->sink_request = le32_to_cpu(msg->payload[0]);
3292 
3293 		if (port->vdm_sm_running && port->explicit_contract) {
3294 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3295 			break;
3296 		}
3297 
3298 		if (port->state == SRC_SEND_CAPABILITIES)
3299 			tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3300 		else
3301 			tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3302 					     POWER_NEGOTIATION, 0);
3303 		break;
3304 	case PD_DATA_SINK_CAP:
3305 		/* We don't do anything with this at the moment... */
3306 		for (i = 0; i < cnt; i++)
3307 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3308 
3309 		partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3310 			PDO_FIXED_FRS_CURR_SHIFT;
3311 		frs_enable = partner_frs_current && (partner_frs_current <=
3312 						     port->new_source_frs_current);
3313 		tcpm_log(port,
3314 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3315 			 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3316 		if (frs_enable) {
3317 			ret  = port->tcpc->enable_frs(port->tcpc, true);
3318 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3319 		}
3320 
3321 		port->nr_sink_caps = cnt;
3322 		port->sink_cap_done = true;
3323 		tcpm_register_sink_caps(port);
3324 
3325 		if (port->ams == GET_SINK_CAPABILITIES)
3326 			tcpm_set_state(port, ready_state(port), 0);
3327 		/* Unexpected Sink Capabilities */
3328 		else
3329 			tcpm_pd_handle_msg(port,
3330 					   port->negotiated_rev < PD_REV30 ?
3331 					   PD_MSG_CTRL_REJECT :
3332 					   PD_MSG_CTRL_NOT_SUPP,
3333 					   NONE_AMS);
3334 		break;
3335 	case PD_DATA_VENDOR_DEF:
3336 		tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3337 		break;
3338 	case PD_DATA_BIST:
3339 		port->bist_request = le32_to_cpu(msg->payload[0]);
3340 		tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3341 		break;
3342 	case PD_DATA_ALERT:
3343 		if (port->state != SRC_READY && port->state != SNK_READY)
3344 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3345 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3346 					     NONE_AMS, 0);
3347 		else
3348 			tcpm_handle_alert(port, msg->payload, cnt);
3349 		break;
3350 	case PD_DATA_BATT_STATUS:
3351 	case PD_DATA_GET_COUNTRY_INFO:
3352 		/* Currently unsupported */
3353 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3354 				   PD_MSG_CTRL_REJECT :
3355 				   PD_MSG_CTRL_NOT_SUPP,
3356 				   NONE_AMS);
3357 		break;
3358 	default:
3359 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3360 				   PD_MSG_CTRL_REJECT :
3361 				   PD_MSG_CTRL_NOT_SUPP,
3362 				   NONE_AMS);
3363 		tcpm_log(port, "Unrecognized data message type %#x", type);
3364 		break;
3365 	}
3366 }
3367 
tcpm_pps_complete(struct tcpm_port * port,int result)3368 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3369 {
3370 	if (port->pps_pending) {
3371 		port->pps_status = result;
3372 		port->pps_pending = false;
3373 		complete(&port->pps_complete);
3374 	}
3375 }
3376 
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3377 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3378 				 const struct pd_message *msg,
3379 				 enum tcpm_transmit_type rx_sop_type)
3380 {
3381 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3382 	enum tcpm_state next_state;
3383 	unsigned int rev = pd_header_rev_le(msg->header);
3384 
3385 	/*
3386 	 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3387 	 * VDM AMS if waiting for VDM responses and will be handled later.
3388 	 */
3389 	if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3390 		port->vdm_state = VDM_STATE_ERR_BUSY;
3391 		tcpm_ams_finish(port);
3392 		mod_vdm_delayed_work(port, 0);
3393 	}
3394 
3395 	switch (type) {
3396 	case PD_CTRL_GOOD_CRC:
3397 	case PD_CTRL_PING:
3398 		break;
3399 	case PD_CTRL_GET_SOURCE_CAP:
3400 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3401 		break;
3402 	case PD_CTRL_GET_SINK_CAP:
3403 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3404 		break;
3405 	case PD_CTRL_GOTO_MIN:
3406 		break;
3407 	case PD_CTRL_PS_RDY:
3408 		switch (port->state) {
3409 		case SNK_TRANSITION_SINK:
3410 			if (port->vbus_present) {
3411 				tcpm_set_current_limit(port,
3412 						       port->req_current_limit,
3413 						       port->req_supply_voltage);
3414 				port->explicit_contract = true;
3415 				tcpm_set_auto_vbus_discharge_threshold(port,
3416 								       TYPEC_PWR_MODE_PD,
3417 								       port->pps_data.active,
3418 								       port->supply_voltage);
3419 				tcpm_set_state(port, SNK_READY, 0);
3420 			} else {
3421 				/*
3422 				 * Seen after power swap. Keep waiting for VBUS
3423 				 * in a transitional state.
3424 				 */
3425 				tcpm_set_state(port,
3426 					       SNK_TRANSITION_SINK_VBUS, 0);
3427 			}
3428 			break;
3429 		case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3430 			tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3431 			break;
3432 		case PR_SWAP_SNK_SRC_SINK_OFF:
3433 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3434 			break;
3435 		case VCONN_SWAP_WAIT_FOR_VCONN:
3436 			tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3437 			break;
3438 		case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3439 			tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3440 			break;
3441 		default:
3442 			tcpm_pd_handle_state(port,
3443 					     port->pwr_role == TYPEC_SOURCE ?
3444 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3445 					     SNK_SOFT_RESET,
3446 					     NONE_AMS, 0);
3447 			break;
3448 		}
3449 		break;
3450 	case PD_CTRL_REJECT:
3451 	case PD_CTRL_WAIT:
3452 	case PD_CTRL_NOT_SUPP:
3453 		switch (port->state) {
3454 		case SNK_NEGOTIATE_CAPABILITIES:
3455 			/* USB PD specification, Figure 8-43 */
3456 			if (port->explicit_contract)
3457 				next_state = SNK_READY;
3458 			else
3459 				next_state = SNK_WAIT_CAPABILITIES;
3460 
3461 			/* Threshold was relaxed before sending Request. Restore it back. */
3462 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3463 							       port->pps_data.active,
3464 							       port->supply_voltage);
3465 			tcpm_set_state(port, next_state, 0);
3466 			break;
3467 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3468 			/* Revert data back from any requested PPS updates */
3469 			port->pps_data.req_out_volt = port->supply_voltage;
3470 			port->pps_data.req_op_curr = port->current_limit;
3471 			port->pps_status = (type == PD_CTRL_WAIT ?
3472 					    -EAGAIN : -EOPNOTSUPP);
3473 
3474 			/* Threshold was relaxed before sending Request. Restore it back. */
3475 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3476 							       port->pps_data.active,
3477 							       port->supply_voltage);
3478 
3479 			tcpm_set_state(port, SNK_READY, 0);
3480 			break;
3481 		case DR_SWAP_SEND:
3482 			port->swap_status = (type == PD_CTRL_WAIT ?
3483 					     -EAGAIN : -EOPNOTSUPP);
3484 			tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3485 			break;
3486 		case PR_SWAP_SEND:
3487 			port->swap_status = (type == PD_CTRL_WAIT ?
3488 					     -EAGAIN : -EOPNOTSUPP);
3489 			tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3490 			break;
3491 		case VCONN_SWAP_SEND:
3492 			port->swap_status = (type == PD_CTRL_WAIT ?
3493 					     -EAGAIN : -EOPNOTSUPP);
3494 			tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3495 			break;
3496 		case FR_SWAP_SEND:
3497 			tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3498 			break;
3499 		case GET_SINK_CAP:
3500 			port->sink_cap_done = true;
3501 			tcpm_set_state(port, ready_state(port), 0);
3502 			break;
3503 		/*
3504 		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3505 		 * prevent redundant power re-negotiation
3506 		 */
3507 		case GET_STATUS_SEND:
3508 			tcpm_set_state(port, ready_state(port), 0);
3509 			break;
3510 		case SRC_READY:
3511 		case SNK_READY:
3512 			if (port->vdm_state > VDM_STATE_READY) {
3513 				port->vdm_state = VDM_STATE_DONE;
3514 				if (tcpm_vdm_ams(port))
3515 					tcpm_ams_finish(port);
3516 				mod_vdm_delayed_work(port, 0);
3517 				break;
3518 			}
3519 			fallthrough;
3520 		default:
3521 			tcpm_pd_handle_state(port,
3522 					     port->pwr_role == TYPEC_SOURCE ?
3523 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3524 					     SNK_SOFT_RESET,
3525 					     NONE_AMS, 0);
3526 			break;
3527 		}
3528 		break;
3529 	case PD_CTRL_ACCEPT:
3530 		switch (port->state) {
3531 		case SNK_NEGOTIATE_CAPABILITIES:
3532 			port->pps_data.active = false;
3533 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3534 			break;
3535 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3536 			port->pps_data.active = true;
3537 			port->pps_data.min_volt = port->pps_data.req_min_volt;
3538 			port->pps_data.max_volt = port->pps_data.req_max_volt;
3539 			port->pps_data.max_curr = port->pps_data.req_max_curr;
3540 			port->req_supply_voltage = port->pps_data.req_out_volt;
3541 			port->req_current_limit = port->pps_data.req_op_curr;
3542 			power_supply_changed(port->psy);
3543 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3544 			break;
3545 		case SOFT_RESET_SEND:
3546 			if (port->ams == SOFT_RESET_AMS)
3547 				tcpm_ams_finish(port);
3548 			/*
3549 			 * SOP' Soft Reset is done after Vconn Swap,
3550 			 * which returns to ready state
3551 			 */
3552 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3553 				if (rev < port->negotiated_rev_prime)
3554 					port->negotiated_rev_prime = rev;
3555 				tcpm_set_state(port, ready_state(port), 0);
3556 				break;
3557 			}
3558 			if (port->pwr_role == TYPEC_SOURCE) {
3559 				port->upcoming_state = SRC_SEND_CAPABILITIES;
3560 				tcpm_ams_start(port, POWER_NEGOTIATION);
3561 			} else {
3562 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3563 			}
3564 			break;
3565 		case DR_SWAP_SEND:
3566 			tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3567 			break;
3568 		case PR_SWAP_SEND:
3569 			tcpm_set_state(port, PR_SWAP_START, 0);
3570 			break;
3571 		case VCONN_SWAP_SEND:
3572 			tcpm_set_state(port, VCONN_SWAP_START, 0);
3573 			break;
3574 		case FR_SWAP_SEND:
3575 			tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3576 			break;
3577 		default:
3578 			tcpm_pd_handle_state(port,
3579 					     port->pwr_role == TYPEC_SOURCE ?
3580 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3581 					     SNK_SOFT_RESET,
3582 					     NONE_AMS, 0);
3583 			break;
3584 		}
3585 		break;
3586 	case PD_CTRL_SOFT_RESET:
3587 		port->ams = SOFT_RESET_AMS;
3588 		tcpm_set_state(port, SOFT_RESET, 0);
3589 		break;
3590 	case PD_CTRL_DR_SWAP:
3591 		/*
3592 		 * XXX
3593 		 * 6.3.9: If an alternate mode is active, a request to swap
3594 		 * alternate modes shall trigger a port reset.
3595 		 */
3596 		if (port->typec_caps.data != TYPEC_PORT_DRD) {
3597 			tcpm_pd_handle_msg(port,
3598 					   port->negotiated_rev < PD_REV30 ?
3599 					   PD_MSG_CTRL_REJECT :
3600 					   PD_MSG_CTRL_NOT_SUPP,
3601 					   NONE_AMS);
3602 		} else {
3603 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3604 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3605 				break;
3606 			}
3607 
3608 			tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3609 		}
3610 		break;
3611 	case PD_CTRL_PR_SWAP:
3612 		if (port->port_type != TYPEC_PORT_DRP) {
3613 			tcpm_pd_handle_msg(port,
3614 					   port->negotiated_rev < PD_REV30 ?
3615 					   PD_MSG_CTRL_REJECT :
3616 					   PD_MSG_CTRL_NOT_SUPP,
3617 					   NONE_AMS);
3618 		} else {
3619 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3620 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3621 				break;
3622 			}
3623 
3624 			tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3625 		}
3626 		break;
3627 	case PD_CTRL_VCONN_SWAP:
3628 		if (port->send_discover && port->negotiated_rev < PD_REV30) {
3629 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3630 			break;
3631 		}
3632 
3633 		tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3634 		break;
3635 	case PD_CTRL_GET_SOURCE_CAP_EXT:
3636 	case PD_CTRL_GET_STATUS:
3637 	case PD_CTRL_FR_SWAP:
3638 	case PD_CTRL_GET_PPS_STATUS:
3639 	case PD_CTRL_GET_COUNTRY_CODES:
3640 		/* Currently not supported */
3641 		tcpm_pd_handle_msg(port,
3642 				   port->negotiated_rev < PD_REV30 ?
3643 				   PD_MSG_CTRL_REJECT :
3644 				   PD_MSG_CTRL_NOT_SUPP,
3645 				   NONE_AMS);
3646 		break;
3647 	case PD_CTRL_GET_REVISION:
3648 		if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
3649 			tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
3650 					   REVISION_INFORMATION);
3651 		else
3652 			tcpm_pd_handle_msg(port,
3653 					   port->negotiated_rev < PD_REV30 ?
3654 					   PD_MSG_CTRL_REJECT :
3655 					   PD_MSG_CTRL_NOT_SUPP,
3656 					   NONE_AMS);
3657 		break;
3658 	default:
3659 		tcpm_pd_handle_msg(port,
3660 				   port->negotiated_rev < PD_REV30 ?
3661 				   PD_MSG_CTRL_REJECT :
3662 				   PD_MSG_CTRL_NOT_SUPP,
3663 				   NONE_AMS);
3664 		tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3665 		break;
3666 	}
3667 }
3668 
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)3669 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3670 				    const struct pd_message *msg)
3671 {
3672 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3673 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3674 
3675 	/* stopping VDM state machine if interrupted by other Messages */
3676 	if (tcpm_vdm_ams(port)) {
3677 		port->vdm_state = VDM_STATE_ERR_BUSY;
3678 		tcpm_ams_finish(port);
3679 		mod_vdm_delayed_work(port, 0);
3680 	}
3681 
3682 	if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3683 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3684 		tcpm_log(port, "Unchunked extended messages unsupported");
3685 		return;
3686 	}
3687 
3688 	if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3689 		tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3690 		tcpm_log(port, "Chunk handling not yet supported");
3691 		return;
3692 	}
3693 
3694 	switch (type) {
3695 	case PD_EXT_STATUS:
3696 	case PD_EXT_PPS_STATUS:
3697 		if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3698 			tcpm_ams_finish(port);
3699 			tcpm_set_state(port, ready_state(port), 0);
3700 		} else {
3701 			/* unexpected Status or PPS_Status Message */
3702 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3703 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3704 					     NONE_AMS, 0);
3705 		}
3706 		break;
3707 	case PD_EXT_SOURCE_CAP_EXT:
3708 	case PD_EXT_GET_BATT_CAP:
3709 	case PD_EXT_GET_BATT_STATUS:
3710 	case PD_EXT_BATT_CAP:
3711 	case PD_EXT_GET_MANUFACTURER_INFO:
3712 	case PD_EXT_MANUFACTURER_INFO:
3713 	case PD_EXT_SECURITY_REQUEST:
3714 	case PD_EXT_SECURITY_RESPONSE:
3715 	case PD_EXT_FW_UPDATE_REQUEST:
3716 	case PD_EXT_FW_UPDATE_RESPONSE:
3717 	case PD_EXT_COUNTRY_INFO:
3718 	case PD_EXT_COUNTRY_CODES:
3719 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3720 		break;
3721 	default:
3722 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3723 		tcpm_log(port, "Unrecognized extended message type %#x", type);
3724 		break;
3725 	}
3726 }
3727 
tcpm_pd_rx_handler(struct kthread_work * work)3728 static void tcpm_pd_rx_handler(struct kthread_work *work)
3729 {
3730 	struct pd_rx_event *event = container_of(work,
3731 						 struct pd_rx_event, work);
3732 	const struct pd_message *msg = &event->msg;
3733 	unsigned int cnt = pd_header_cnt_le(msg->header);
3734 	struct tcpm_port *port = event->port;
3735 	enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3736 
3737 	mutex_lock(&port->lock);
3738 
3739 	tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3740 		 port->attached);
3741 
3742 	if (port->attached) {
3743 		enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3744 		unsigned int msgid = pd_header_msgid_le(msg->header);
3745 
3746 		/*
3747 		 * Drop SOP' messages if cannot receive via
3748 		 * tcpm_can_communicate_sop_prime
3749 		 */
3750 		if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3751 		    !tcpm_can_communicate_sop_prime(port))
3752 			goto done;
3753 
3754 		/*
3755 		 * USB PD standard, 6.6.1.2:
3756 		 * "... if MessageID value in a received Message is the
3757 		 * same as the stored value, the receiver shall return a
3758 		 * GoodCRC Message with that MessageID value and drop
3759 		 * the Message (this is a retry of an already received
3760 		 * Message). Note: this shall not apply to the Soft_Reset
3761 		 * Message which always has a MessageID value of zero."
3762 		 */
3763 		switch (rx_sop_type) {
3764 		case TCPC_TX_SOP_PRIME:
3765 			if (msgid == port->rx_msgid_prime)
3766 				goto done;
3767 			port->rx_msgid_prime = msgid;
3768 			break;
3769 		case TCPC_TX_SOP:
3770 		default:
3771 			if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3772 				goto done;
3773 			port->rx_msgid = msgid;
3774 			break;
3775 		}
3776 
3777 		/*
3778 		 * If both ends believe to be DFP/host, we have a data role
3779 		 * mismatch.
3780 		 */
3781 		if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3782 		    (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3783 			tcpm_log(port,
3784 				 "Data role mismatch, initiating error recovery");
3785 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3786 		} else {
3787 			if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3788 				tcpm_pd_ext_msg_request(port, msg);
3789 			else if (cnt)
3790 				tcpm_pd_data_request(port, msg, rx_sop_type);
3791 			else
3792 				tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3793 		}
3794 	}
3795 
3796 done:
3797 	mutex_unlock(&port->lock);
3798 	kfree(event);
3799 }
3800 
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3801 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3802 		     enum tcpm_transmit_type rx_sop_type)
3803 {
3804 	struct pd_rx_event *event;
3805 
3806 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
3807 	if (!event)
3808 		return;
3809 
3810 	kthread_init_work(&event->work, tcpm_pd_rx_handler);
3811 	event->port = port;
3812 	event->rx_sop_type = rx_sop_type;
3813 	memcpy(&event->msg, msg, sizeof(*msg));
3814 	kthread_queue_work(port->wq, &event->work);
3815 }
3816 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3817 
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type,enum tcpm_transmit_type tx_sop_type)3818 static int tcpm_pd_send_control(struct tcpm_port *port,
3819 				enum pd_ctrl_msg_type type,
3820 				enum tcpm_transmit_type tx_sop_type)
3821 {
3822 	struct pd_message msg;
3823 
3824 	memset(&msg, 0, sizeof(msg));
3825 	switch (tx_sop_type) {
3826 	case TCPC_TX_SOP_PRIME:
3827 		msg.header = PD_HEADER_LE(type,
3828 					  0,	/* Cable Plug Indicator for DFP/UFP */
3829 					  0,	/* Reserved */
3830 					  port->negotiated_rev,
3831 					  port->message_id_prime,
3832 					  0);
3833 		break;
3834 	case TCPC_TX_SOP:
3835 		msg.header = PD_HEADER_LE(type,
3836 					  port->pwr_role,
3837 					  port->data_role,
3838 					  port->negotiated_rev,
3839 					  port->message_id,
3840 					  0);
3841 		break;
3842 	default:
3843 		msg.header = PD_HEADER_LE(type,
3844 					  port->pwr_role,
3845 					  port->data_role,
3846 					  port->negotiated_rev,
3847 					  port->message_id,
3848 					  0);
3849 		break;
3850 	}
3851 
3852 	return tcpm_pd_transmit(port, tx_sop_type, &msg);
3853 }
3854 
3855 /*
3856  * Send queued message without affecting state.
3857  * Return true if state machine should go back to sleep,
3858  * false otherwise.
3859  */
tcpm_send_queued_message(struct tcpm_port * port)3860 static bool tcpm_send_queued_message(struct tcpm_port *port)
3861 {
3862 	enum pd_msg_request queued_message;
3863 	int ret;
3864 
3865 	do {
3866 		queued_message = port->queued_message;
3867 		port->queued_message = PD_MSG_NONE;
3868 
3869 		switch (queued_message) {
3870 		case PD_MSG_CTRL_WAIT:
3871 			tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3872 			break;
3873 		case PD_MSG_CTRL_REJECT:
3874 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3875 			break;
3876 		case PD_MSG_CTRL_NOT_SUPP:
3877 			tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3878 			break;
3879 		case PD_MSG_DATA_SINK_CAP:
3880 			ret = tcpm_pd_send_sink_caps(port);
3881 			if (ret < 0) {
3882 				tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3883 				tcpm_set_state(port, SNK_SOFT_RESET, 0);
3884 			}
3885 			tcpm_ams_finish(port);
3886 			break;
3887 		case PD_MSG_DATA_SOURCE_CAP:
3888 			ret = tcpm_pd_send_source_caps(port);
3889 			if (ret < 0) {
3890 				tcpm_log(port,
3891 					 "Unable to send src caps, ret=%d",
3892 					 ret);
3893 				tcpm_set_state(port, SOFT_RESET_SEND, 0);
3894 			} else if (port->pwr_role == TYPEC_SOURCE) {
3895 				tcpm_ams_finish(port);
3896 				tcpm_set_state(port, HARD_RESET_SEND,
3897 					       PD_T_SENDER_RESPONSE);
3898 			} else {
3899 				tcpm_ams_finish(port);
3900 			}
3901 			break;
3902 		case PD_MSG_DATA_REV:
3903 			ret = tcpm_pd_send_revision(port);
3904 			if (ret)
3905 				tcpm_log(port,
3906 					 "Unable to send revision msg, ret=%d",
3907 					 ret);
3908 			tcpm_ams_finish(port);
3909 			break;
3910 		default:
3911 			break;
3912 		}
3913 	} while (port->queued_message != PD_MSG_NONE);
3914 
3915 	if (port->delayed_state != INVALID_STATE) {
3916 		if (ktime_after(port->delayed_runtime, ktime_get())) {
3917 			mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3918 									  ktime_get())));
3919 			return true;
3920 		}
3921 		port->delayed_state = INVALID_STATE;
3922 	}
3923 	return false;
3924 }
3925 
tcpm_pd_check_request(struct tcpm_port * port)3926 static int tcpm_pd_check_request(struct tcpm_port *port)
3927 {
3928 	u32 pdo, rdo = port->sink_request;
3929 	unsigned int max, op, pdo_max, index;
3930 	enum pd_pdo_type type;
3931 
3932 	index = rdo_index(rdo);
3933 	if (!index || index > port->nr_src_pdo)
3934 		return -EINVAL;
3935 
3936 	pdo = port->src_pdo[index - 1];
3937 	type = pdo_type(pdo);
3938 	switch (type) {
3939 	case PDO_TYPE_FIXED:
3940 	case PDO_TYPE_VAR:
3941 		max = rdo_max_current(rdo);
3942 		op = rdo_op_current(rdo);
3943 		pdo_max = pdo_max_current(pdo);
3944 
3945 		if (op > pdo_max)
3946 			return -EINVAL;
3947 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3948 			return -EINVAL;
3949 
3950 		if (type == PDO_TYPE_FIXED)
3951 			tcpm_log(port,
3952 				 "Requested %u mV, %u mA for %u / %u mA",
3953 				 pdo_fixed_voltage(pdo), pdo_max, op, max);
3954 		else
3955 			tcpm_log(port,
3956 				 "Requested %u -> %u mV, %u mA for %u / %u mA",
3957 				 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3958 				 pdo_max, op, max);
3959 		break;
3960 	case PDO_TYPE_BATT:
3961 		max = rdo_max_power(rdo);
3962 		op = rdo_op_power(rdo);
3963 		pdo_max = pdo_max_power(pdo);
3964 
3965 		if (op > pdo_max)
3966 			return -EINVAL;
3967 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3968 			return -EINVAL;
3969 		tcpm_log(port,
3970 			 "Requested %u -> %u mV, %u mW for %u / %u mW",
3971 			 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3972 			 pdo_max, op, max);
3973 		break;
3974 	default:
3975 		return -EINVAL;
3976 	}
3977 
3978 	port->op_vsafe5v = index == 1;
3979 
3980 	return 0;
3981 }
3982 
3983 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3984 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3985 
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3986 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3987 			      int *src_pdo)
3988 {
3989 	unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3990 		     max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3991 		     min_snk_mv = 0;
3992 	int ret = -EINVAL;
3993 
3994 	port->pps_data.supported = false;
3995 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3996 	power_supply_changed(port->psy);
3997 
3998 	/*
3999 	 * Select the source PDO providing the most power which has a
4000 	 * matchig sink cap.
4001 	 */
4002 	for (i = 0; i < port->nr_source_caps; i++) {
4003 		u32 pdo = port->source_caps[i];
4004 		enum pd_pdo_type type = pdo_type(pdo);
4005 
4006 		switch (type) {
4007 		case PDO_TYPE_FIXED:
4008 			max_src_mv = pdo_fixed_voltage(pdo);
4009 			min_src_mv = max_src_mv;
4010 			break;
4011 		case PDO_TYPE_BATT:
4012 		case PDO_TYPE_VAR:
4013 			max_src_mv = pdo_max_voltage(pdo);
4014 			min_src_mv = pdo_min_voltage(pdo);
4015 			break;
4016 		case PDO_TYPE_APDO:
4017 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
4018 				port->pps_data.supported = true;
4019 				port->usb_type =
4020 					POWER_SUPPLY_USB_TYPE_PD_PPS;
4021 				power_supply_changed(port->psy);
4022 			}
4023 			continue;
4024 		default:
4025 			tcpm_log(port, "Invalid source PDO type, ignoring");
4026 			continue;
4027 		}
4028 
4029 		switch (type) {
4030 		case PDO_TYPE_FIXED:
4031 		case PDO_TYPE_VAR:
4032 			src_ma = pdo_max_current(pdo);
4033 			src_mw = src_ma * min_src_mv / 1000;
4034 			break;
4035 		case PDO_TYPE_BATT:
4036 			src_mw = pdo_max_power(pdo);
4037 			break;
4038 		case PDO_TYPE_APDO:
4039 			continue;
4040 		default:
4041 			tcpm_log(port, "Invalid source PDO type, ignoring");
4042 			continue;
4043 		}
4044 
4045 		for (j = 0; j < port->nr_snk_pdo; j++) {
4046 			pdo = port->snk_pdo[j];
4047 
4048 			switch (pdo_type(pdo)) {
4049 			case PDO_TYPE_FIXED:
4050 				max_snk_mv = pdo_fixed_voltage(pdo);
4051 				min_snk_mv = max_snk_mv;
4052 				break;
4053 			case PDO_TYPE_BATT:
4054 			case PDO_TYPE_VAR:
4055 				max_snk_mv = pdo_max_voltage(pdo);
4056 				min_snk_mv = pdo_min_voltage(pdo);
4057 				break;
4058 			case PDO_TYPE_APDO:
4059 				continue;
4060 			default:
4061 				tcpm_log(port, "Invalid sink PDO type, ignoring");
4062 				continue;
4063 			}
4064 
4065 			if (max_src_mv <= max_snk_mv &&
4066 				min_src_mv >= min_snk_mv) {
4067 				/* Prefer higher voltages if available */
4068 				if ((src_mw == max_mw && min_src_mv > max_mv) ||
4069 							src_mw > max_mw) {
4070 					*src_pdo = i;
4071 					*sink_pdo = j;
4072 					max_mw = src_mw;
4073 					max_mv = min_src_mv;
4074 					ret = 0;
4075 				}
4076 			}
4077 		}
4078 	}
4079 
4080 	return ret;
4081 }
4082 
tcpm_pd_select_pps_apdo(struct tcpm_port * port)4083 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
4084 {
4085 	unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
4086 	unsigned int src_pdo = 0;
4087 	u32 pdo, src;
4088 
4089 	for (i = 1; i < port->nr_source_caps; ++i) {
4090 		pdo = port->source_caps[i];
4091 
4092 		switch (pdo_type(pdo)) {
4093 		case PDO_TYPE_APDO:
4094 			if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
4095 				tcpm_log(port, "Not PPS APDO (source), ignoring");
4096 				continue;
4097 			}
4098 
4099 			if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
4100 			    port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
4101 				continue;
4102 
4103 			src_ma = pdo_pps_apdo_max_current(pdo);
4104 			max_op_ma = min(src_ma, port->pps_data.req_op_curr);
4105 			op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
4106 			if (op_mw > max_temp_mw) {
4107 				src_pdo = i;
4108 				max_temp_mw = op_mw;
4109 			}
4110 			break;
4111 		default:
4112 			tcpm_log(port, "Not APDO type (source), ignoring");
4113 			continue;
4114 		}
4115 	}
4116 
4117 	if (src_pdo) {
4118 		src = port->source_caps[src_pdo];
4119 
4120 		port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
4121 		port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
4122 		port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
4123 		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
4124 						 port->pps_data.req_op_curr);
4125 	}
4126 
4127 	return src_pdo;
4128 }
4129 
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)4130 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
4131 {
4132 	unsigned int mv, ma, mw, flags;
4133 	unsigned int max_ma, max_mw;
4134 	enum pd_pdo_type type;
4135 	u32 pdo, matching_snk_pdo;
4136 	int src_pdo_index = 0;
4137 	int snk_pdo_index = 0;
4138 	int ret;
4139 
4140 	ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
4141 	if (ret < 0)
4142 		return ret;
4143 
4144 	pdo = port->source_caps[src_pdo_index];
4145 	matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4146 	type = pdo_type(pdo);
4147 
4148 	switch (type) {
4149 	case PDO_TYPE_FIXED:
4150 		mv = pdo_fixed_voltage(pdo);
4151 		break;
4152 	case PDO_TYPE_BATT:
4153 	case PDO_TYPE_VAR:
4154 		mv = pdo_min_voltage(pdo);
4155 		break;
4156 	default:
4157 		tcpm_log(port, "Invalid PDO selected!");
4158 		return -EINVAL;
4159 	}
4160 
4161 	/* Select maximum available current within the sink pdo's limit */
4162 	if (type == PDO_TYPE_BATT) {
4163 		mw = min_power(pdo, matching_snk_pdo);
4164 		ma = 1000 * mw / mv;
4165 	} else {
4166 		ma = min_current(pdo, matching_snk_pdo);
4167 		mw = ma * mv / 1000;
4168 	}
4169 
4170 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4171 
4172 	/* Set mismatch bit if offered power is less than operating power */
4173 	max_ma = ma;
4174 	max_mw = mw;
4175 	if (mw < port->operating_snk_mw) {
4176 		flags |= RDO_CAP_MISMATCH;
4177 		if (type == PDO_TYPE_BATT &&
4178 		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4179 			max_mw = pdo_max_power(matching_snk_pdo);
4180 		else if (pdo_max_current(matching_snk_pdo) >
4181 			 pdo_max_current(pdo))
4182 			max_ma = pdo_max_current(matching_snk_pdo);
4183 	}
4184 
4185 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4186 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4187 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4188 		 port->polarity);
4189 
4190 	if (type == PDO_TYPE_BATT) {
4191 		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4192 
4193 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4194 			 src_pdo_index, mv, mw,
4195 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4196 	} else {
4197 		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4198 
4199 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4200 			 src_pdo_index, mv, ma,
4201 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4202 	}
4203 
4204 	port->req_current_limit = ma;
4205 	port->req_supply_voltage = mv;
4206 
4207 	return 0;
4208 }
4209 
tcpm_pd_send_request(struct tcpm_port * port)4210 static int tcpm_pd_send_request(struct tcpm_port *port)
4211 {
4212 	struct pd_message msg;
4213 	int ret;
4214 	u32 rdo;
4215 
4216 	ret = tcpm_pd_build_request(port, &rdo);
4217 	if (ret < 0)
4218 		return ret;
4219 
4220 	/*
4221 	 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4222 	 * It is safer to modify the threshold here.
4223 	 */
4224 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4225 
4226 	memset(&msg, 0, sizeof(msg));
4227 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4228 				  port->pwr_role,
4229 				  port->data_role,
4230 				  port->negotiated_rev,
4231 				  port->message_id, 1);
4232 	msg.payload[0] = cpu_to_le32(rdo);
4233 
4234 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4235 }
4236 
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)4237 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4238 {
4239 	unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4240 	unsigned int src_pdo_index;
4241 
4242 	src_pdo_index = tcpm_pd_select_pps_apdo(port);
4243 	if (!src_pdo_index)
4244 		return -EOPNOTSUPP;
4245 
4246 	max_mv = port->pps_data.req_max_volt;
4247 	max_ma = port->pps_data.req_max_curr;
4248 	out_mv = port->pps_data.req_out_volt;
4249 	op_ma = port->pps_data.req_op_curr;
4250 
4251 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4252 
4253 	op_mw = (op_ma * out_mv) / 1000;
4254 	if (op_mw < port->operating_snk_mw) {
4255 		/*
4256 		 * Try raising current to meet power needs. If that's not enough
4257 		 * then try upping the voltage. If that's still not enough
4258 		 * then we've obviously chosen a PPS APDO which really isn't
4259 		 * suitable so abandon ship.
4260 		 */
4261 		op_ma = (port->operating_snk_mw * 1000) / out_mv;
4262 		if ((port->operating_snk_mw * 1000) % out_mv)
4263 			++op_ma;
4264 		op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4265 
4266 		if (op_ma > max_ma) {
4267 			op_ma = max_ma;
4268 			out_mv = (port->operating_snk_mw * 1000) / op_ma;
4269 			if ((port->operating_snk_mw * 1000) % op_ma)
4270 				++out_mv;
4271 			out_mv += RDO_PROG_VOLT_MV_STEP -
4272 				  (out_mv % RDO_PROG_VOLT_MV_STEP);
4273 
4274 			if (out_mv > max_mv) {
4275 				tcpm_log(port, "Invalid PPS APDO selected!");
4276 				return -EINVAL;
4277 			}
4278 		}
4279 	}
4280 
4281 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4282 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4283 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4284 		 port->polarity);
4285 
4286 	*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4287 
4288 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4289 		 src_pdo_index, out_mv, op_ma);
4290 
4291 	port->pps_data.req_op_curr = op_ma;
4292 	port->pps_data.req_out_volt = out_mv;
4293 
4294 	return 0;
4295 }
4296 
tcpm_pd_send_pps_request(struct tcpm_port * port)4297 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4298 {
4299 	struct pd_message msg;
4300 	int ret;
4301 	u32 rdo;
4302 
4303 	ret = tcpm_pd_build_pps_request(port, &rdo);
4304 	if (ret < 0)
4305 		return ret;
4306 
4307 	/* Relax the threshold as voltage will be adjusted right after Accept Message. */
4308 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4309 
4310 	memset(&msg, 0, sizeof(msg));
4311 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4312 				  port->pwr_role,
4313 				  port->data_role,
4314 				  port->negotiated_rev,
4315 				  port->message_id, 1);
4316 	msg.payload[0] = cpu_to_le32(rdo);
4317 
4318 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4319 }
4320 
tcpm_set_vbus(struct tcpm_port * port,bool enable)4321 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4322 {
4323 	int ret;
4324 
4325 	if (enable && port->vbus_charge)
4326 		return -EINVAL;
4327 
4328 	tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4329 
4330 	ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4331 	if (ret < 0)
4332 		return ret;
4333 
4334 	port->vbus_source = enable;
4335 	return 0;
4336 }
4337 
tcpm_set_charge(struct tcpm_port * port,bool charge)4338 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4339 {
4340 	int ret;
4341 
4342 	if (charge && port->vbus_source)
4343 		return -EINVAL;
4344 
4345 	if (charge != port->vbus_charge) {
4346 		tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4347 		ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4348 					   charge);
4349 		if (ret < 0)
4350 			return ret;
4351 	}
4352 	port->vbus_charge = charge;
4353 	power_supply_changed(port->psy);
4354 	return 0;
4355 }
4356 
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)4357 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4358 {
4359 	int ret;
4360 
4361 	if (!port->tcpc->start_toggling)
4362 		return false;
4363 
4364 	tcpm_log_force(port, "Start toggling");
4365 	ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4366 	return ret == 0;
4367 }
4368 
tcpm_init_vbus(struct tcpm_port * port)4369 static int tcpm_init_vbus(struct tcpm_port *port)
4370 {
4371 	int ret;
4372 
4373 	ret = port->tcpc->set_vbus(port->tcpc, false, false);
4374 	port->vbus_source = false;
4375 	port->vbus_charge = false;
4376 	return ret;
4377 }
4378 
tcpm_init_vconn(struct tcpm_port * port)4379 static int tcpm_init_vconn(struct tcpm_port *port)
4380 {
4381 	int ret;
4382 
4383 	ret = port->tcpc->set_vconn(port->tcpc, false);
4384 	port->vconn_role = TYPEC_SINK;
4385 	return ret;
4386 }
4387 
tcpm_typec_connect(struct tcpm_port * port)4388 static void tcpm_typec_connect(struct tcpm_port *port)
4389 {
4390 	struct typec_partner *partner;
4391 
4392 	if (!port->connected) {
4393 		port->connected = true;
4394 		/* Make sure we don't report stale identity information */
4395 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4396 		port->partner_desc.usb_pd = port->pd_capable;
4397 		if (tcpm_port_is_debug(port))
4398 			port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4399 		else if (tcpm_port_is_audio(port))
4400 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4401 		else
4402 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4403 		partner = typec_register_partner(port->typec_port, &port->partner_desc);
4404 		if (IS_ERR(partner)) {
4405 			dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4406 			return;
4407 		}
4408 
4409 		port->partner = partner;
4410 		typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4411 	}
4412 }
4413 
tcpm_src_attach(struct tcpm_port * port)4414 static int tcpm_src_attach(struct tcpm_port *port)
4415 {
4416 	enum typec_cc_polarity polarity =
4417 				port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4418 							 : TYPEC_POLARITY_CC1;
4419 	int ret;
4420 
4421 	if (port->attached)
4422 		return 0;
4423 
4424 	ret = tcpm_set_polarity(port, polarity);
4425 	if (ret < 0)
4426 		return ret;
4427 
4428 	tcpm_enable_auto_vbus_discharge(port, true);
4429 
4430 	/*
4431 	 * USB Type-C specification, version 1.2,
4432 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4433 	 * Enable VCONN only if the non-RD port is set to RA.
4434 	 */
4435 	if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4436 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4437 		ret = tcpm_set_vconn(port, true);
4438 		if (ret < 0)
4439 			return ret;
4440 	}
4441 
4442 	ret = tcpm_set_vbus(port, true);
4443 	if (ret < 0)
4444 		goto out_disable_vconn;
4445 
4446 	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, TYPEC_SOURCE,
4447 			     tcpm_data_role_for_source(port));
4448 	if (ret < 0)
4449 		goto out_disable_vbus;
4450 
4451 	if (port->pd_supported) {
4452 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
4453 		if (ret < 0)
4454 			goto out_disable_mux;
4455 	}
4456 
4457 	port->pd_capable = false;
4458 
4459 	port->partner = NULL;
4460 
4461 	port->attached = true;
4462 	port->send_discover = true;
4463 	port->send_discover_prime = false;
4464 
4465 	return 0;
4466 
4467 out_disable_mux:
4468 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4469 		     TYPEC_ORIENTATION_NONE);
4470 out_disable_vbus:
4471 	tcpm_set_vbus(port, false);
4472 out_disable_vconn:
4473 	tcpm_set_vconn(port, false);
4474 
4475 	return ret;
4476 }
4477 
tcpm_typec_disconnect(struct tcpm_port * port)4478 static void tcpm_typec_disconnect(struct tcpm_port *port)
4479 {
4480 	/*
4481 	 * Unregister plug/cable outside of port->connected because cable can
4482 	 * be discovered before SRC_READY/SNK_READY states where port->connected
4483 	 * is set.
4484 	 */
4485 	typec_unregister_plug(port->plug_prime);
4486 	typec_unregister_cable(port->cable);
4487 	port->plug_prime = NULL;
4488 	port->cable = NULL;
4489 	if (port->connected) {
4490 		if (port->partner) {
4491 			typec_partner_set_usb_power_delivery(port->partner, NULL);
4492 			typec_unregister_partner(port->partner);
4493 			port->partner = NULL;
4494 		}
4495 		port->connected = false;
4496 	}
4497 }
4498 
tcpm_unregister_altmodes(struct tcpm_port * port)4499 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4500 {
4501 	struct pd_mode_data *modep = &port->mode_data;
4502 	struct pd_mode_data *modep_prime = &port->mode_data_prime;
4503 	int i;
4504 
4505 	for (i = 0; i < modep->altmodes; i++) {
4506 		typec_unregister_altmode(port->partner_altmode[i]);
4507 		port->partner_altmode[i] = NULL;
4508 	}
4509 	for (i = 0; i < modep_prime->altmodes; i++) {
4510 		typec_unregister_altmode(port->plug_prime_altmode[i]);
4511 		port->plug_prime_altmode[i] = NULL;
4512 	}
4513 
4514 	memset(modep, 0, sizeof(*modep));
4515 	memset(modep_prime, 0, sizeof(*modep_prime));
4516 }
4517 
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)4518 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4519 {
4520 	tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
4521 
4522 	if (port->tcpc->set_partner_usb_comm_capable)
4523 		port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4524 }
4525 
tcpm_reset_port(struct tcpm_port * port)4526 static void tcpm_reset_port(struct tcpm_port *port)
4527 {
4528 	tcpm_enable_auto_vbus_discharge(port, false);
4529 	port->in_ams = false;
4530 	port->ams = NONE_AMS;
4531 	port->vdm_sm_running = false;
4532 	tcpm_unregister_altmodes(port);
4533 	tcpm_typec_disconnect(port);
4534 	port->attached = false;
4535 	port->pd_capable = false;
4536 	port->pps_data.supported = false;
4537 	tcpm_set_partner_usb_comm_capable(port, false);
4538 
4539 	/*
4540 	 * First Rx ID should be 0; set this to a sentinel of -1 so that
4541 	 * we can check tcpm_pd_rx_handler() if we had seen it before.
4542 	 */
4543 	port->rx_msgid = -1;
4544 	port->rx_msgid_prime = -1;
4545 
4546 	port->tcpc->set_pd_rx(port->tcpc, false);
4547 	tcpm_init_vbus(port);	/* also disables charging */
4548 	tcpm_init_vconn(port);
4549 	tcpm_set_current_limit(port, 0, 0);
4550 	tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4551 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4552 		     TYPEC_ORIENTATION_NONE);
4553 	tcpm_set_attached_state(port, false);
4554 	port->try_src_count = 0;
4555 	port->try_snk_count = 0;
4556 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4557 	power_supply_changed(port->psy);
4558 	port->nr_sink_caps = 0;
4559 	port->sink_cap_done = false;
4560 	if (port->tcpc->enable_frs)
4561 		port->tcpc->enable_frs(port->tcpc, false);
4562 
4563 	usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4564 	port->partner_sink_caps = NULL;
4565 	usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4566 	port->partner_source_caps = NULL;
4567 	usb_power_delivery_unregister(port->partner_pd);
4568 	port->partner_pd = NULL;
4569 }
4570 
tcpm_detach(struct tcpm_port * port)4571 static void tcpm_detach(struct tcpm_port *port)
4572 {
4573 	if (tcpm_port_is_disconnected(port))
4574 		port->hard_reset_count = 0;
4575 
4576 	if (!port->attached)
4577 		return;
4578 
4579 	if (port->tcpc->set_bist_data) {
4580 		tcpm_log(port, "disable BIST MODE TESTDATA");
4581 		port->tcpc->set_bist_data(port->tcpc, false);
4582 	}
4583 
4584 	tcpm_reset_port(port);
4585 }
4586 
tcpm_src_detach(struct tcpm_port * port)4587 static void tcpm_src_detach(struct tcpm_port *port)
4588 {
4589 	tcpm_detach(port);
4590 }
4591 
tcpm_snk_attach(struct tcpm_port * port)4592 static int tcpm_snk_attach(struct tcpm_port *port)
4593 {
4594 	int ret;
4595 
4596 	if (port->attached)
4597 		return 0;
4598 
4599 	ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4600 				TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4601 	if (ret < 0)
4602 		return ret;
4603 
4604 	tcpm_enable_auto_vbus_discharge(port, true);
4605 
4606 	ret = tcpm_set_roles(port, true, TYPEC_STATE_USB,
4607 			     TYPEC_SINK, tcpm_data_role_for_sink(port));
4608 	if (ret < 0)
4609 		return ret;
4610 
4611 	port->pd_capable = false;
4612 
4613 	port->partner = NULL;
4614 
4615 	port->attached = true;
4616 	port->send_discover = true;
4617 	port->send_discover_prime = false;
4618 
4619 	return 0;
4620 }
4621 
tcpm_snk_detach(struct tcpm_port * port)4622 static void tcpm_snk_detach(struct tcpm_port *port)
4623 {
4624 	tcpm_detach(port);
4625 }
4626 
tcpm_acc_attach(struct tcpm_port * port)4627 static int tcpm_acc_attach(struct tcpm_port *port)
4628 {
4629 	int ret;
4630 	enum typec_role role;
4631 	enum typec_data_role data;
4632 	int state = TYPEC_STATE_USB;
4633 
4634 	if (port->attached)
4635 		return 0;
4636 
4637 	role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE;
4638 	data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port)
4639 				       : tcpm_data_role_for_source(port);
4640 
4641 	if (tcpm_port_is_audio(port))
4642 		state = TYPEC_MODE_AUDIO;
4643 
4644 	if (tcpm_port_is_debug(port))
4645 		state = TYPEC_MODE_DEBUG;
4646 
4647 	ret = tcpm_set_roles(port, true, state, role, data);
4648 	if (ret < 0)
4649 		return ret;
4650 
4651 	port->partner = NULL;
4652 
4653 	tcpm_typec_connect(port);
4654 
4655 	port->attached = true;
4656 
4657 	return 0;
4658 }
4659 
tcpm_acc_detach(struct tcpm_port * port)4660 static void tcpm_acc_detach(struct tcpm_port *port)
4661 {
4662 	tcpm_detach(port);
4663 }
4664 
hard_reset_state(struct tcpm_port * port)4665 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4666 {
4667 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4668 		return HARD_RESET_SEND;
4669 	if (port->pd_capable)
4670 		return ERROR_RECOVERY;
4671 	if (port->pwr_role == TYPEC_SOURCE)
4672 		return SRC_UNATTACHED;
4673 	if (port->state == SNK_WAIT_CAPABILITIES ||
4674 	    port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
4675 		return SNK_READY;
4676 	return SNK_UNATTACHED;
4677 }
4678 
unattached_state(struct tcpm_port * port)4679 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4680 {
4681 	if (port->port_type == TYPEC_PORT_DRP) {
4682 		if (port->pwr_role == TYPEC_SOURCE)
4683 			return SRC_UNATTACHED;
4684 		else
4685 			return SNK_UNATTACHED;
4686 	} else if (port->port_type == TYPEC_PORT_SRC) {
4687 		return SRC_UNATTACHED;
4688 	}
4689 
4690 	return SNK_UNATTACHED;
4691 }
4692 
tcpm_swap_complete(struct tcpm_port * port,int result)4693 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4694 {
4695 	if (port->swap_pending) {
4696 		port->swap_status = result;
4697 		port->swap_pending = false;
4698 		port->non_pd_role_swap = false;
4699 		complete(&port->swap_complete);
4700 	}
4701 }
4702 
tcpm_get_pwr_opmode(enum typec_cc_status cc)4703 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4704 {
4705 	switch (cc) {
4706 	case TYPEC_CC_RP_1_5:
4707 		return TYPEC_PWR_MODE_1_5A;
4708 	case TYPEC_CC_RP_3_0:
4709 		return TYPEC_PWR_MODE_3_0A;
4710 	case TYPEC_CC_RP_DEF:
4711 	default:
4712 		return TYPEC_PWR_MODE_USB;
4713 	}
4714 }
4715 
tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)4716 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4717 {
4718 	switch (opmode) {
4719 	case TYPEC_PWR_MODE_USB:
4720 		return TYPEC_CC_RP_DEF;
4721 	case TYPEC_PWR_MODE_1_5A:
4722 		return TYPEC_CC_RP_1_5;
4723 	case TYPEC_PWR_MODE_3_0A:
4724 	case TYPEC_PWR_MODE_PD:
4725 	default:
4726 		return TYPEC_CC_RP_3_0;
4727 	}
4728 }
4729 
tcpm_set_initial_svdm_version(struct tcpm_port * port)4730 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4731 {
4732 	if (!port->partner)
4733 		return;
4734 
4735 	switch (port->negotiated_rev) {
4736 	case PD_REV30:
4737 		break;
4738 	/*
4739 	 * 6.4.4.2.3 Structured VDM Version
4740 	 * 2.0 states "At this time, there is only one version (1.0) defined.
4741 	 * This field Shall be set to zero to indicate Version 1.0."
4742 	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4743 	 * To ensure that we follow the Power Delivery revision we are currently
4744 	 * operating on, downgrade the SVDM version to the highest one supported
4745 	 * by the Power Delivery revision.
4746 	 */
4747 	case PD_REV20:
4748 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4749 		break;
4750 	default:
4751 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4752 		break;
4753 	}
4754 }
4755 
tcpm_set_initial_negotiated_rev(struct tcpm_port * port)4756 static void tcpm_set_initial_negotiated_rev(struct tcpm_port *port)
4757 {
4758 	switch (port->pd_rev.rev_major) {
4759 	case PD_CAP_REV10:
4760 		port->negotiated_rev = PD_REV10;
4761 		break;
4762 	case PD_CAP_REV20:
4763 		port->negotiated_rev = PD_REV20;
4764 		break;
4765 	case PD_CAP_REV30:
4766 		port->negotiated_rev = PD_REV30;
4767 		break;
4768 	default:
4769 		port->negotiated_rev = PD_MAX_REV;
4770 		break;
4771 	}
4772 	port->negotiated_rev_prime = port->negotiated_rev;
4773 }
4774 
run_state_machine(struct tcpm_port * port)4775 static void run_state_machine(struct tcpm_port *port)
4776 {
4777 	int ret;
4778 	enum typec_pwr_opmode opmode;
4779 	unsigned int msecs;
4780 	enum tcpm_state upcoming_state;
4781 
4782 	if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4783 		port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4784 						port->state == SRC_UNATTACHED) ||
4785 					       (port->enter_state == SNK_ATTACH_WAIT &&
4786 						port->state == SNK_UNATTACHED) ||
4787 					       (port->enter_state == SNK_DEBOUNCED &&
4788 						port->state == SNK_UNATTACHED));
4789 
4790 	port->enter_state = port->state;
4791 	switch (port->state) {
4792 	case TOGGLING:
4793 		break;
4794 	case CHECK_CONTAMINANT:
4795 		port->tcpc->check_contaminant(port->tcpc);
4796 		break;
4797 	/* SRC states */
4798 	case SRC_UNATTACHED:
4799 		if (!port->non_pd_role_swap)
4800 			tcpm_swap_complete(port, -ENOTCONN);
4801 		tcpm_src_detach(port);
4802 		if (port->potential_contaminant) {
4803 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4804 			break;
4805 		}
4806 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4807 			tcpm_set_state(port, TOGGLING, 0);
4808 			break;
4809 		}
4810 		tcpm_set_cc(port, tcpm_rp_cc(port));
4811 		if (port->port_type == TYPEC_PORT_DRP)
4812 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4813 		break;
4814 	case SRC_ATTACH_WAIT:
4815 		if (tcpm_port_is_debug(port))
4816 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4817 				       port->timings.cc_debounce_time);
4818 		else if (tcpm_port_is_audio(port))
4819 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4820 				       port->timings.cc_debounce_time);
4821 		else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4822 			tcpm_set_state(port,
4823 				       tcpm_try_snk(port) ? SNK_TRY
4824 							  : SRC_ATTACHED,
4825 				       port->timings.cc_debounce_time);
4826 		break;
4827 
4828 	case SNK_TRY:
4829 		port->try_snk_count++;
4830 		/*
4831 		 * Requirements:
4832 		 * - Do not drive vconn or vbus
4833 		 * - Terminate CC pins (both) to Rd
4834 		 * Action:
4835 		 * - Wait for tDRPTry (PD_T_DRP_TRY).
4836 		 *   Until then, ignore any state changes.
4837 		 */
4838 		tcpm_set_cc(port, TYPEC_CC_RD);
4839 		tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4840 		break;
4841 	case SNK_TRY_WAIT:
4842 		if (tcpm_port_is_sink(port)) {
4843 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4844 		} else {
4845 			tcpm_set_state(port, SRC_TRYWAIT, 0);
4846 			port->max_wait = 0;
4847 		}
4848 		break;
4849 	case SNK_TRY_WAIT_DEBOUNCE:
4850 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4851 			       PD_T_TRY_CC_DEBOUNCE);
4852 		break;
4853 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4854 		if (port->vbus_present && tcpm_port_is_sink(port))
4855 			tcpm_set_state(port, SNK_ATTACHED, 0);
4856 		else
4857 			port->max_wait = 0;
4858 		break;
4859 	case SRC_TRYWAIT:
4860 		tcpm_set_cc(port, tcpm_rp_cc(port));
4861 		if (port->max_wait == 0) {
4862 			port->max_wait = jiffies +
4863 					 msecs_to_jiffies(PD_T_DRP_TRY);
4864 			tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4865 				       PD_T_DRP_TRY);
4866 		} else {
4867 			if (time_is_after_jiffies(port->max_wait))
4868 				tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4869 					       jiffies_to_msecs(port->max_wait -
4870 								jiffies));
4871 			else
4872 				tcpm_set_state(port, SNK_UNATTACHED, 0);
4873 		}
4874 		break;
4875 	case SRC_TRYWAIT_DEBOUNCE:
4876 		tcpm_set_state(port, SRC_ATTACHED, port->timings.cc_debounce_time);
4877 		break;
4878 	case SRC_TRYWAIT_UNATTACHED:
4879 		tcpm_set_state(port, SNK_UNATTACHED, 0);
4880 		break;
4881 
4882 	case SRC_ATTACHED:
4883 		ret = tcpm_src_attach(port);
4884 		tcpm_set_state(port, SRC_UNATTACHED,
4885 			       ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4886 		break;
4887 	case SRC_STARTUP:
4888 		opmode =  tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4889 		typec_set_pwr_opmode(port->typec_port, opmode);
4890 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
4891 		port->caps_count = 0;
4892 		tcpm_set_initial_negotiated_rev(port);
4893 		port->message_id = 0;
4894 		port->message_id_prime = 0;
4895 		port->rx_msgid = -1;
4896 		port->rx_msgid_prime = -1;
4897 		port->explicit_contract = false;
4898 		/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4899 		if (port->ams == POWER_ROLE_SWAP ||
4900 		    port->ams == FAST_ROLE_SWAP)
4901 			tcpm_ams_finish(port);
4902 		if (!port->pd_supported) {
4903 			tcpm_set_state(port, SRC_READY, 0);
4904 			break;
4905 		}
4906 		port->upcoming_state = SRC_SEND_CAPABILITIES;
4907 		tcpm_ams_start(port, POWER_NEGOTIATION);
4908 		break;
4909 	case SRC_SEND_CAPABILITIES:
4910 		port->caps_count++;
4911 		if (port->caps_count > PD_N_CAPS_COUNT) {
4912 			tcpm_set_state(port, SRC_READY, 0);
4913 			break;
4914 		}
4915 		ret = tcpm_pd_send_source_caps(port);
4916 		if (ret < 0) {
4917 			if (tcpm_can_communicate_sop_prime(port) &&
4918 			    IS_ERR_OR_NULL(port->cable))
4919 				tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4920 			else
4921 				tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4922 					       PD_T_SEND_SOURCE_CAP);
4923 		} else {
4924 			/*
4925 			 * Per standard, we should clear the reset counter here.
4926 			 * However, that can result in state machine hang-ups.
4927 			 * Reset it only in READY state to improve stability.
4928 			 */
4929 			/* port->hard_reset_count = 0; */
4930 			port->caps_count = 0;
4931 			port->pd_capable = true;
4932 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4933 					    PD_T_SENDER_RESPONSE);
4934 		}
4935 		break;
4936 	case SRC_SEND_CAPABILITIES_TIMEOUT:
4937 		/*
4938 		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4939 		 *
4940 		 * PD 2.0 sinks are supposed to accept src-capabilities with a
4941 		 * 3.0 header and simply ignore any src PDOs which the sink does
4942 		 * not understand such as PPS but some 2.0 sinks instead ignore
4943 		 * the entire PD_DATA_SOURCE_CAP message, causing contract
4944 		 * negotiation to fail.
4945 		 *
4946 		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4947 		 * sending src-capabilities with a lower PD revision to
4948 		 * make these broken sinks work.
4949 		 */
4950 		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4951 			tcpm_set_state(port, HARD_RESET_SEND, 0);
4952 		} else if (port->negotiated_rev > PD_REV20) {
4953 			port->negotiated_rev--;
4954 			port->hard_reset_count = 0;
4955 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4956 		} else {
4957 			tcpm_set_state(port, hard_reset_state(port), 0);
4958 		}
4959 		break;
4960 	case SRC_NEGOTIATE_CAPABILITIES:
4961 		ret = tcpm_pd_check_request(port);
4962 		if (ret < 0) {
4963 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4964 			if (!port->explicit_contract) {
4965 				tcpm_set_state(port,
4966 					       SRC_WAIT_NEW_CAPABILITIES, 0);
4967 			} else {
4968 				tcpm_set_state(port, SRC_READY, 0);
4969 			}
4970 		} else {
4971 			tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4972 			tcpm_set_partner_usb_comm_capable(port,
4973 							  !!(port->sink_request & RDO_USB_COMM));
4974 			tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4975 				       PD_T_SRC_TRANSITION);
4976 		}
4977 		break;
4978 	case SRC_TRANSITION_SUPPLY:
4979 		/* XXX: regulator_set_voltage(vbus, ...) */
4980 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4981 		port->explicit_contract = true;
4982 		typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4983 		port->pwr_opmode = TYPEC_PWR_MODE_PD;
4984 		tcpm_set_state_cond(port, SRC_READY, 0);
4985 		break;
4986 	case SRC_READY:
4987 #if 1
4988 		port->hard_reset_count = 0;
4989 #endif
4990 		port->try_src_count = 0;
4991 
4992 		tcpm_swap_complete(port, 0);
4993 		tcpm_typec_connect(port);
4994 
4995 		if (port->ams != NONE_AMS)
4996 			tcpm_ams_finish(port);
4997 		if (port->next_ams != NONE_AMS) {
4998 			port->ams = port->next_ams;
4999 			port->next_ams = NONE_AMS;
5000 		}
5001 
5002 		/*
5003 		 * If previous AMS is interrupted, switch to the upcoming
5004 		 * state.
5005 		 */
5006 		if (port->upcoming_state != INVALID_STATE) {
5007 			upcoming_state = port->upcoming_state;
5008 			port->upcoming_state = INVALID_STATE;
5009 			tcpm_set_state(port, upcoming_state, 0);
5010 			break;
5011 		}
5012 
5013 		/*
5014 		 * 6.4.4.3.1 Discover Identity
5015 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
5016 		 * Explicit Contract."
5017 		 *
5018 		 * Discover Identity on SOP' should be discovered prior to the
5019 		 * ready state, but if done after a Vconn Swap following Discover
5020 		 * Identity on SOP then the discovery process can be run here
5021 		 * as well.
5022 		 */
5023 		if (port->explicit_contract) {
5024 			if (port->send_discover_prime) {
5025 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
5026 			} else {
5027 				port->tx_sop_type = TCPC_TX_SOP;
5028 				tcpm_set_initial_svdm_version(port);
5029 			}
5030 			mod_send_discover_delayed_work(port, 0);
5031 		} else {
5032 			port->send_discover = false;
5033 			port->send_discover_prime = false;
5034 		}
5035 
5036 		/*
5037 		 * 6.3.5
5038 		 * Sending ping messages is not necessary if
5039 		 * - the source operates at vSafe5V
5040 		 * or
5041 		 * - The system is not operating in PD mode
5042 		 * or
5043 		 * - Both partners are connected using a Type-C connector
5044 		 *
5045 		 * There is no actual need to send PD messages since the local
5046 		 * port type-c and the spec does not clearly say whether PD is
5047 		 * possible when type-c is connected to Type-A/B
5048 		 */
5049 		break;
5050 	case SRC_WAIT_NEW_CAPABILITIES:
5051 		/* Nothing to do... */
5052 		break;
5053 
5054 	/* SNK states */
5055 	case SNK_UNATTACHED:
5056 		if (!port->non_pd_role_swap)
5057 			tcpm_swap_complete(port, -ENOTCONN);
5058 		tcpm_pps_complete(port, -ENOTCONN);
5059 		tcpm_snk_detach(port);
5060 		if (port->potential_contaminant) {
5061 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
5062 			break;
5063 		}
5064 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
5065 			tcpm_set_state(port, TOGGLING, 0);
5066 			break;
5067 		}
5068 		tcpm_set_cc(port, TYPEC_CC_RD);
5069 		if (port->port_type == TYPEC_PORT_DRP)
5070 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
5071 		break;
5072 	case SNK_ATTACH_WAIT:
5073 		if (tcpm_port_is_debug(port))
5074 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
5075 				       PD_T_CC_DEBOUNCE);
5076 		else if (tcpm_port_is_audio(port))
5077 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
5078 				       PD_T_CC_DEBOUNCE);
5079 		else if ((port->cc1 == TYPEC_CC_OPEN &&
5080 		     port->cc2 != TYPEC_CC_OPEN) ||
5081 		    (port->cc1 != TYPEC_CC_OPEN &&
5082 		     port->cc2 == TYPEC_CC_OPEN))
5083 			tcpm_set_state(port, SNK_DEBOUNCED,
5084 				       port->timings.cc_debounce_time);
5085 		else if (tcpm_port_is_disconnected(port))
5086 			tcpm_set_state(port, SNK_UNATTACHED,
5087 				       PD_T_PD_DEBOUNCE);
5088 		break;
5089 	case SNK_DEBOUNCED:
5090 		if (tcpm_port_is_disconnected(port))
5091 			tcpm_set_state(port, SNK_UNATTACHED,
5092 				       PD_T_PD_DEBOUNCE);
5093 		else if (tcpm_port_is_debug(port))
5094 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
5095 				       PD_T_CC_DEBOUNCE);
5096 		else if (tcpm_port_is_audio(port))
5097 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
5098 				       PD_T_CC_DEBOUNCE);
5099 		else if (port->vbus_present)
5100 			tcpm_set_state(port,
5101 				       tcpm_try_src(port) ? SRC_TRY
5102 							  : SNK_ATTACHED,
5103 				       0);
5104 		break;
5105 	case SRC_TRY:
5106 		port->try_src_count++;
5107 		tcpm_set_cc(port, tcpm_rp_cc(port));
5108 		port->max_wait = 0;
5109 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
5110 		break;
5111 	case SRC_TRY_WAIT:
5112 		if (port->max_wait == 0) {
5113 			port->max_wait = jiffies +
5114 					 msecs_to_jiffies(PD_T_DRP_TRY);
5115 			msecs = PD_T_DRP_TRY;
5116 		} else {
5117 			if (time_is_after_jiffies(port->max_wait))
5118 				msecs = jiffies_to_msecs(port->max_wait -
5119 							 jiffies);
5120 			else
5121 				msecs = 0;
5122 		}
5123 		tcpm_set_state(port, SNK_TRYWAIT, msecs);
5124 		break;
5125 	case SRC_TRY_DEBOUNCE:
5126 		tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
5127 		break;
5128 	case SNK_TRYWAIT:
5129 		tcpm_set_cc(port, TYPEC_CC_RD);
5130 		tcpm_set_state(port, SNK_TRYWAIT_VBUS, port->timings.cc_debounce_time);
5131 		break;
5132 	case SNK_TRYWAIT_VBUS:
5133 		/*
5134 		 * TCPM stays in this state indefinitely until VBUS
5135 		 * is detected as long as Rp is not detected for
5136 		 * more than a time period of tPDDebounce.
5137 		 */
5138 		if (port->vbus_present && tcpm_port_is_sink(port)) {
5139 			tcpm_set_state(port, SNK_ATTACHED, 0);
5140 			break;
5141 		}
5142 		if (!tcpm_port_is_sink(port))
5143 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5144 		break;
5145 	case SNK_TRYWAIT_DEBOUNCE:
5146 		tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
5147 		break;
5148 	case SNK_ATTACHED:
5149 		ret = tcpm_snk_attach(port);
5150 		if (ret < 0)
5151 			tcpm_set_state(port, SNK_UNATTACHED, 0);
5152 		else
5153 			/*
5154 			 * For Type C port controllers that use Battery Charging
5155 			 * Detection (based on BCv1.2 spec) to detect USB
5156 			 * charger type, add a delay of "snk_bc12_cmpletion_time"
5157 			 * before transitioning to SNK_STARTUP to allow BC1.2
5158 			 * detection to complete before PD is eventually enabled
5159 			 * in later states.
5160 			 */
5161 			tcpm_set_state(port, SNK_STARTUP,
5162 				       port->timings.snk_bc12_cmpletion_time);
5163 		break;
5164 	case SNK_STARTUP:
5165 		opmode =  tcpm_get_pwr_opmode(port->polarity ?
5166 					      port->cc2 : port->cc1);
5167 		typec_set_pwr_opmode(port->typec_port, opmode);
5168 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
5169 		tcpm_set_initial_negotiated_rev(port);
5170 		port->message_id = 0;
5171 		port->message_id_prime = 0;
5172 		port->rx_msgid = -1;
5173 		port->rx_msgid_prime = -1;
5174 		port->explicit_contract = false;
5175 
5176 		if (port->ams == POWER_ROLE_SWAP ||
5177 		    port->ams == FAST_ROLE_SWAP)
5178 			/* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
5179 			tcpm_ams_finish(port);
5180 
5181 		tcpm_set_state(port, SNK_DISCOVERY, 0);
5182 		break;
5183 	case SNK_DISCOVERY:
5184 		if (port->vbus_present) {
5185 			u32 current_lim = tcpm_get_current_limit(port);
5186 
5187 			if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
5188 				current_lim = PD_P_SNK_STDBY_MW / 5;
5189 			tcpm_set_current_limit(port, current_lim, 5000);
5190 			/* Not sink vbus if operational current is 0mA */
5191 			tcpm_set_charge(port, !port->pd_supported ||
5192 					pdo_max_current(port->snk_pdo[0]));
5193 
5194 			if (!port->pd_supported)
5195 				tcpm_set_state(port, SNK_READY, 0);
5196 			else
5197 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5198 			break;
5199 		}
5200 		/*
5201 		 * For DRP, timeouts differ. Also, handling is supposed to be
5202 		 * different and much more complex (dead battery detection;
5203 		 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5204 		 */
5205 		tcpm_set_state(port, hard_reset_state(port),
5206 			       port->port_type == TYPEC_PORT_DRP ?
5207 					PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5208 		break;
5209 	case SNK_DISCOVERY_DEBOUNCE:
5210 		tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5211 			       port->timings.cc_debounce_time);
5212 		break;
5213 	case SNK_DISCOVERY_DEBOUNCE_DONE:
5214 		if (!tcpm_port_is_disconnected(port) &&
5215 		    tcpm_port_is_sink(port) &&
5216 		    ktime_after(port->delayed_runtime, ktime_get())) {
5217 			tcpm_set_state(port, SNK_DISCOVERY,
5218 				       ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5219 			break;
5220 		}
5221 		tcpm_set_state(port, unattached_state(port), 0);
5222 		break;
5223 	case SNK_WAIT_CAPABILITIES:
5224 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
5225 		if (ret < 0) {
5226 			tcpm_set_state(port, SNK_READY, 0);
5227 			break;
5228 		}
5229 		/*
5230 		 * If VBUS has never been low, and we time out waiting
5231 		 * for source cap, try a soft reset first, in case we
5232 		 * were already in a stable contract before this boot.
5233 		 * Do this only once.
5234 		 */
5235 		if (port->vbus_never_low) {
5236 			port->vbus_never_low = false;
5237 			upcoming_state = SNK_SOFT_RESET;
5238 		} else {
5239 			if (!port->self_powered)
5240 				upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
5241 			else
5242 				upcoming_state = hard_reset_state(port);
5243 		}
5244 
5245 		tcpm_set_state(port, upcoming_state,
5246 			       port->timings.sink_wait_cap_time);
5247 		break;
5248 	case SNK_WAIT_CAPABILITIES_TIMEOUT:
5249 		/*
5250 		 * There are some USB PD sources in the field, which do not
5251 		 * properly implement the specification and fail to start
5252 		 * sending Source Capability messages after a soft reset. The
5253 		 * specification suggests to do a hard reset when no Source
5254 		 * capability message is received within PD_T_SINK_WAIT_CAP,
5255 		 * but that might effectively kil the machine's power source.
5256 		 *
5257 		 * This slightly diverges from the specification and tries to
5258 		 * recover from this by explicitly asking for the capabilities
5259 		 * using the Get_Source_Cap control message before falling back
5260 		 * to a hard reset. The control message should also be supported
5261 		 * and handled by all USB PD source and dual role devices
5262 		 * according to the specification.
5263 		 */
5264 		if (tcpm_pd_send_control(port, PD_CTRL_GET_SOURCE_CAP, TCPC_TX_SOP))
5265 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
5266 		else
5267 			tcpm_set_state(port, hard_reset_state(port),
5268 				       port->timings.sink_wait_cap_time);
5269 		break;
5270 	case SNK_NEGOTIATE_CAPABILITIES:
5271 		port->pd_capable = true;
5272 		tcpm_set_partner_usb_comm_capable(port,
5273 						  !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5274 		port->hard_reset_count = 0;
5275 		ret = tcpm_pd_send_request(port);
5276 		if (ret < 0) {
5277 			/* Restore back to the original state */
5278 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5279 							       port->pps_data.active,
5280 							       port->supply_voltage);
5281 			/* Let the Source send capabilities again. */
5282 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5283 		} else {
5284 			tcpm_set_state_cond(port, hard_reset_state(port),
5285 					    PD_T_SENDER_RESPONSE);
5286 		}
5287 		break;
5288 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
5289 		ret = tcpm_pd_send_pps_request(port);
5290 		if (ret < 0) {
5291 			/* Restore back to the original state */
5292 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5293 							       port->pps_data.active,
5294 							       port->supply_voltage);
5295 			port->pps_status = ret;
5296 			/*
5297 			 * If this was called due to updates to sink
5298 			 * capabilities, and pps is no longer valid, we should
5299 			 * safely fall back to a standard PDO.
5300 			 */
5301 			if (port->update_sink_caps)
5302 				tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5303 			else
5304 				tcpm_set_state(port, SNK_READY, 0);
5305 		} else {
5306 			tcpm_set_state_cond(port, hard_reset_state(port),
5307 					    PD_T_SENDER_RESPONSE);
5308 		}
5309 		break;
5310 	case SNK_TRANSITION_SINK:
5311 		/* From the USB PD spec:
5312 		 * "The Sink Shall transition to Sink Standby before a positive or
5313 		 * negative voltage transition of VBUS. During Sink Standby
5314 		 * the Sink Shall reduce its power draw to pSnkStdby."
5315 		 *
5316 		 * This is not applicable to PPS though as the port can continue
5317 		 * to draw negotiated power without switching to standby.
5318 		 */
5319 		if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5320 		    port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5321 			u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5322 
5323 			tcpm_log(port, "Setting standby current %u mV @ %u mA",
5324 				 port->supply_voltage, stdby_ma);
5325 			tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5326 		}
5327 		fallthrough;
5328 	case SNK_TRANSITION_SINK_VBUS:
5329 		tcpm_set_state(port, hard_reset_state(port),
5330 			       PD_T_PS_TRANSITION);
5331 		break;
5332 	case SNK_READY:
5333 		port->try_snk_count = 0;
5334 		port->update_sink_caps = false;
5335 		if (port->explicit_contract) {
5336 			typec_set_pwr_opmode(port->typec_port,
5337 					     TYPEC_PWR_MODE_PD);
5338 			port->pwr_opmode = TYPEC_PWR_MODE_PD;
5339 		}
5340 
5341 		if (!port->pd_capable && port->slow_charger_loop)
5342 			tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5343 		tcpm_swap_complete(port, 0);
5344 		tcpm_typec_connect(port);
5345 		if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5346 			mod_enable_frs_delayed_work(port, 0);
5347 		tcpm_pps_complete(port, port->pps_status);
5348 
5349 		if (port->ams != NONE_AMS)
5350 			tcpm_ams_finish(port);
5351 		if (port->next_ams != NONE_AMS) {
5352 			port->ams = port->next_ams;
5353 			port->next_ams = NONE_AMS;
5354 		}
5355 
5356 		/*
5357 		 * If previous AMS is interrupted, switch to the upcoming
5358 		 * state.
5359 		 */
5360 		if (port->upcoming_state != INVALID_STATE) {
5361 			upcoming_state = port->upcoming_state;
5362 			port->upcoming_state = INVALID_STATE;
5363 			tcpm_set_state(port, upcoming_state, 0);
5364 			break;
5365 		}
5366 
5367 		/*
5368 		 * 6.4.4.3.1 Discover Identity
5369 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
5370 		 * Explicit Contract."
5371 		 *
5372 		 * Discover Identity on SOP' should be discovered prior to the
5373 		 * ready state, but if done after a Vconn Swap following Discover
5374 		 * Identity on SOP then the discovery process can be run here
5375 		 * as well.
5376 		 */
5377 		if (port->explicit_contract) {
5378 			if (port->send_discover_prime) {
5379 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
5380 			} else {
5381 				port->tx_sop_type = TCPC_TX_SOP;
5382 				tcpm_set_initial_svdm_version(port);
5383 			}
5384 			mod_send_discover_delayed_work(port, 0);
5385 		} else {
5386 			port->send_discover = false;
5387 			port->send_discover_prime = false;
5388 		}
5389 
5390 		power_supply_changed(port->psy);
5391 		break;
5392 
5393 	/* Accessory states */
5394 	case ACC_UNATTACHED:
5395 		tcpm_acc_detach(port);
5396 		if (port->port_type == TYPEC_PORT_SRC)
5397 			tcpm_set_state(port, SRC_UNATTACHED, 0);
5398 		else
5399 			tcpm_set_state(port, SNK_UNATTACHED, 0);
5400 		break;
5401 	case DEBUG_ACC_ATTACHED:
5402 	case AUDIO_ACC_ATTACHED:
5403 		ret = tcpm_acc_attach(port);
5404 		if (ret < 0)
5405 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5406 		break;
5407 	case DEBUG_ACC_DEBOUNCE:
5408 	case AUDIO_ACC_DEBOUNCE:
5409 		tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
5410 		break;
5411 
5412 	/* Hard_Reset states */
5413 	case HARD_RESET_SEND:
5414 		if (port->ams != NONE_AMS)
5415 			tcpm_ams_finish(port);
5416 		if (!port->self_powered && port->port_type == TYPEC_PORT_SNK)
5417 			dev_err(port->dev, "Initiating hard-reset, which might result in machine power-loss.\n");
5418 		/*
5419 		 * State machine will be directed to HARD_RESET_START,
5420 		 * thus set upcoming_state to INVALID_STATE.
5421 		 */
5422 		port->upcoming_state = INVALID_STATE;
5423 		tcpm_ams_start(port, HARD_RESET);
5424 		break;
5425 	case HARD_RESET_START:
5426 		port->sink_cap_done = false;
5427 		if (port->tcpc->enable_frs)
5428 			port->tcpc->enable_frs(port->tcpc, false);
5429 		port->hard_reset_count++;
5430 		port->tcpc->set_pd_rx(port->tcpc, false);
5431 		tcpm_unregister_altmodes(port);
5432 		port->nr_sink_caps = 0;
5433 		port->send_discover = true;
5434 		port->send_discover_prime = false;
5435 		if (port->pwr_role == TYPEC_SOURCE)
5436 			tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5437 				       PD_T_PS_HARD_RESET);
5438 		else
5439 			tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5440 		break;
5441 	case SRC_HARD_RESET_VBUS_OFF:
5442 		/*
5443 		 * 7.1.5 Response to Hard Resets
5444 		 * Hard Reset Signaling indicates a communication failure has occurred and the
5445 		 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5446 		 * drive VBUS to vSafe0V as shown in Figure 7-9.
5447 		 */
5448 		tcpm_set_vconn(port, false);
5449 		tcpm_set_vbus(port, false);
5450 		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE,
5451 			       tcpm_data_role_for_source(port));
5452 		/*
5453 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5454 		 * PD_T_SRC_RECOVER before turning vbus back on.
5455 		 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5456 		 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5457 		 * tells the Device Policy Manager to instruct the power supply to perform a
5458 		 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5459 		 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5460 		 * re-establish communication with the Sink and resume USB Default Operation.
5461 		 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5462 		 */
5463 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5464 		break;
5465 	case SRC_HARD_RESET_VBUS_ON:
5466 		tcpm_set_vconn(port, true);
5467 		tcpm_set_vbus(port, true);
5468 		if (port->ams == HARD_RESET)
5469 			tcpm_ams_finish(port);
5470 		if (port->pd_supported)
5471 			port->tcpc->set_pd_rx(port->tcpc, true);
5472 		tcpm_set_attached_state(port, true);
5473 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5474 		break;
5475 	case SNK_HARD_RESET_SINK_OFF:
5476 		/* Do not discharge/disconnect during hard reset */
5477 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5478 		memset(&port->pps_data, 0, sizeof(port->pps_data));
5479 		tcpm_set_vconn(port, false);
5480 		if (port->pd_capable)
5481 			tcpm_set_charge(port, false);
5482 		tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK,
5483 			       tcpm_data_role_for_sink(port));
5484 		/*
5485 		 * VBUS may or may not toggle, depending on the adapter.
5486 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5487 		 * directly after timeout.
5488 		 */
5489 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5490 		break;
5491 	case SNK_HARD_RESET_WAIT_VBUS:
5492 		if (port->ams == HARD_RESET)
5493 			tcpm_ams_finish(port);
5494 		/* Assume we're disconnected if VBUS doesn't come back. */
5495 		tcpm_set_state(port, SNK_UNATTACHED,
5496 			       PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5497 		break;
5498 	case SNK_HARD_RESET_SINK_ON:
5499 		/* Note: There is no guarantee that VBUS is on in this state */
5500 		/*
5501 		 * XXX:
5502 		 * The specification suggests that dual mode ports in sink
5503 		 * mode should transition to state PE_SRC_Transition_to_default.
5504 		 * See USB power delivery specification chapter 8.3.3.6.1.3.
5505 		 * This would mean to
5506 		 * - turn off VCONN, reset power supply
5507 		 * - request hardware reset
5508 		 * - turn on VCONN
5509 		 * - Transition to state PE_Src_Startup
5510 		 * SNK only ports shall transition to state Snk_Startup
5511 		 * (see chapter 8.3.3.3.8).
5512 		 * Similar, dual-mode ports in source mode should transition
5513 		 * to PE_SNK_Transition_to_default.
5514 		 */
5515 		if (port->pd_capable) {
5516 			tcpm_set_current_limit(port,
5517 					       tcpm_get_current_limit(port),
5518 					       5000);
5519 			/* Not sink vbus if operational current is 0mA */
5520 			tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5521 		}
5522 		if (port->ams == HARD_RESET)
5523 			tcpm_ams_finish(port);
5524 		tcpm_set_attached_state(port, true);
5525 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5526 		tcpm_set_state(port, SNK_STARTUP, 0);
5527 		break;
5528 
5529 	/* Soft_Reset states */
5530 	case SOFT_RESET:
5531 		port->message_id = 0;
5532 		port->rx_msgid = -1;
5533 		/* remove existing capabilities */
5534 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5535 		port->partner_source_caps = NULL;
5536 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5537 		tcpm_ams_finish(port);
5538 		if (port->pwr_role == TYPEC_SOURCE) {
5539 			port->upcoming_state = SRC_SEND_CAPABILITIES;
5540 			tcpm_ams_start(port, POWER_NEGOTIATION);
5541 		} else {
5542 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5543 		}
5544 		break;
5545 	case SRC_SOFT_RESET_WAIT_SNK_TX:
5546 	case SNK_SOFT_RESET:
5547 		if (port->ams != NONE_AMS)
5548 			tcpm_ams_finish(port);
5549 		port->upcoming_state = SOFT_RESET_SEND;
5550 		tcpm_ams_start(port, SOFT_RESET_AMS);
5551 		break;
5552 	case SOFT_RESET_SEND:
5553 		/*
5554 		 * Power Delivery 3.0 Section 6.3.13
5555 		 *
5556 		 * A Soft_Reset Message Shall be targeted at a specific entity
5557 		 * depending on the type of SOP* packet used.
5558 		 */
5559 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5560 			port->message_id_prime = 0;
5561 			port->rx_msgid_prime = -1;
5562 			tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5563 			tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5564 		} else {
5565 			port->message_id = 0;
5566 			port->rx_msgid = -1;
5567 			/* remove existing capabilities */
5568 			usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5569 			port->partner_source_caps = NULL;
5570 			if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5571 				tcpm_set_state_cond(port, hard_reset_state(port), 0);
5572 			else
5573 				tcpm_set_state_cond(port, hard_reset_state(port),
5574 						    PD_T_SENDER_RESPONSE);
5575 		}
5576 		break;
5577 
5578 	/* DR_Swap states */
5579 	case DR_SWAP_SEND:
5580 		tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5581 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5582 			port->send_discover = true;
5583 			port->send_discover_prime = false;
5584 		}
5585 		tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5586 				    PD_T_SENDER_RESPONSE);
5587 		break;
5588 	case DR_SWAP_ACCEPT:
5589 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5590 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5591 			port->send_discover = true;
5592 			port->send_discover_prime = false;
5593 		}
5594 		tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5595 		break;
5596 	case DR_SWAP_SEND_TIMEOUT:
5597 		tcpm_swap_complete(port, -ETIMEDOUT);
5598 		port->send_discover = false;
5599 		port->send_discover_prime = false;
5600 		tcpm_ams_finish(port);
5601 		tcpm_set_state(port, ready_state(port), 0);
5602 		break;
5603 	case DR_SWAP_CHANGE_DR:
5604 		tcpm_unregister_altmodes(port);
5605 		if (port->data_role == TYPEC_HOST)
5606 			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5607 				       TYPEC_DEVICE);
5608 		else
5609 			tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role,
5610 				       TYPEC_HOST);
5611 		tcpm_ams_finish(port);
5612 		tcpm_set_state(port, ready_state(port), 0);
5613 		break;
5614 
5615 	case FR_SWAP_SEND:
5616 		if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5617 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5618 			break;
5619 		}
5620 		tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5621 		break;
5622 	case FR_SWAP_SEND_TIMEOUT:
5623 		tcpm_set_state(port, ERROR_RECOVERY, 0);
5624 		break;
5625 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5626 		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5627 		break;
5628 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5629 		if (port->vbus_source)
5630 			tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5631 		else
5632 			tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5633 		break;
5634 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5635 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5636 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5637 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5638 			break;
5639 		}
5640 		tcpm_set_cc(port, tcpm_rp_cc(port));
5641 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5642 		break;
5643 
5644 	/* PR_Swap states */
5645 	case PR_SWAP_ACCEPT:
5646 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5647 		tcpm_set_state(port, PR_SWAP_START, 0);
5648 		break;
5649 	case PR_SWAP_SEND:
5650 		tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5651 		tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5652 				    PD_T_SENDER_RESPONSE);
5653 		break;
5654 	case PR_SWAP_SEND_TIMEOUT:
5655 		tcpm_swap_complete(port, -ETIMEDOUT);
5656 		tcpm_set_state(port, ready_state(port), 0);
5657 		break;
5658 	case PR_SWAP_START:
5659 		tcpm_apply_rc(port);
5660 		if (port->pwr_role == TYPEC_SOURCE)
5661 			tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5662 				       PD_T_SRC_TRANSITION);
5663 		else
5664 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5665 		break;
5666 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5667 		/*
5668 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5669 		 * as this is not a disconnect.
5670 		 */
5671 		tcpm_set_vbus(port, false);
5672 		port->explicit_contract = false;
5673 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
5674 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5675 			       PD_T_SRCSWAPSTDBY);
5676 		break;
5677 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5678 		tcpm_set_cc(port, TYPEC_CC_RD);
5679 		/* allow CC debounce */
5680 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5681 			       port->timings.cc_debounce_time);
5682 		break;
5683 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5684 		/*
5685 		 * USB-PD standard, 6.2.1.4, Port Power Role:
5686 		 * "During the Power Role Swap Sequence, for the initial Source
5687 		 * Port, the Port Power Role field shall be set to Sink in the
5688 		 * PS_RDY Message indicating that the initial Source’s power
5689 		 * supply is turned off"
5690 		 */
5691 		tcpm_set_pwr_role(port, TYPEC_SINK);
5692 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5693 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5694 			break;
5695 		}
5696 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5697 		break;
5698 	case PR_SWAP_SRC_SNK_SINK_ON:
5699 		tcpm_enable_auto_vbus_discharge(port, true);
5700 		/* Set the vbus disconnect threshold for implicit contract */
5701 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5702 		tcpm_set_state(port, SNK_STARTUP, 0);
5703 		break;
5704 	case PR_SWAP_SNK_SRC_SINK_OFF:
5705 		/* will be source, remove existing capabilities */
5706 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5707 		port->partner_source_caps = NULL;
5708 		/*
5709 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5710 		 * as this is not a disconnect.
5711 		 */
5712 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5713 						       port->pps_data.active, 0);
5714 		tcpm_set_charge(port, false);
5715 		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5716 		break;
5717 	case PR_SWAP_SNK_SRC_SOURCE_ON:
5718 		tcpm_enable_auto_vbus_discharge(port, true);
5719 		tcpm_set_cc(port, tcpm_rp_cc(port));
5720 		tcpm_set_vbus(port, true);
5721 		/*
5722 		 * allow time VBUS ramp-up, must be < tNewSrc
5723 		 * Also, this window overlaps with CC debounce as well.
5724 		 * So, Wait for the max of two which is PD_T_NEWSRC
5725 		 */
5726 		tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5727 			       PD_T_NEWSRC);
5728 		break;
5729 	case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5730 		/*
5731 		 * USB PD standard, 6.2.1.4:
5732 		 * "Subsequent Messages initiated by the Policy Engine,
5733 		 * such as the PS_RDY Message sent to indicate that Vbus
5734 		 * is ready, will have the Port Power Role field set to
5735 		 * Source."
5736 		 */
5737 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5738 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5739 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5740 		break;
5741 
5742 	case VCONN_SWAP_ACCEPT:
5743 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5744 		tcpm_ams_finish(port);
5745 		tcpm_set_state(port, VCONN_SWAP_START, 0);
5746 		break;
5747 	case VCONN_SWAP_SEND:
5748 		tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5749 		tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5750 			       PD_T_SENDER_RESPONSE);
5751 		break;
5752 	case VCONN_SWAP_SEND_TIMEOUT:
5753 		tcpm_swap_complete(port, -ETIMEDOUT);
5754 		tcpm_set_state(port, ready_state(port), 0);
5755 		break;
5756 	case VCONN_SWAP_START:
5757 		if (port->vconn_role == TYPEC_SOURCE)
5758 			tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5759 		else
5760 			tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5761 		break;
5762 	case VCONN_SWAP_WAIT_FOR_VCONN:
5763 		tcpm_set_state(port, hard_reset_state(port),
5764 			       PD_T_VCONN_SOURCE_ON);
5765 		break;
5766 	case VCONN_SWAP_TURN_ON_VCONN:
5767 		ret = tcpm_set_vconn(port, true);
5768 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5769 		/*
5770 		 * USB PD 3.0 Section 6.4.4.3.1
5771 		 *
5772 		 * Note that a Cable Plug or VPD will not be ready for PD
5773 		 * Communication until tVCONNStable after VCONN has been applied
5774 		 */
5775 		if (!ret)
5776 			tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5777 				       PD_T_VCONN_STABLE);
5778 		else
5779 			tcpm_set_state(port, ready_state(port), 0);
5780 		break;
5781 	case VCONN_SWAP_TURN_OFF_VCONN:
5782 		tcpm_set_vconn(port, false);
5783 		tcpm_set_state(port, ready_state(port), 0);
5784 		break;
5785 	case VCONN_SWAP_SEND_SOFT_RESET:
5786 		tcpm_swap_complete(port, port->swap_status);
5787 		if (tcpm_can_communicate_sop_prime(port)) {
5788 			port->tx_sop_type = TCPC_TX_SOP_PRIME;
5789 			port->upcoming_state = SOFT_RESET_SEND;
5790 			tcpm_ams_start(port, SOFT_RESET_AMS);
5791 		} else {
5792 			tcpm_set_state(port, ready_state(port), 0);
5793 		}
5794 		break;
5795 
5796 	case DR_SWAP_CANCEL:
5797 	case PR_SWAP_CANCEL:
5798 	case VCONN_SWAP_CANCEL:
5799 		tcpm_swap_complete(port, port->swap_status);
5800 		if (port->pwr_role == TYPEC_SOURCE)
5801 			tcpm_set_state(port, SRC_READY, 0);
5802 		else
5803 			tcpm_set_state(port, SNK_READY, 0);
5804 		break;
5805 	case FR_SWAP_CANCEL:
5806 		if (port->pwr_role == TYPEC_SOURCE)
5807 			tcpm_set_state(port, SRC_READY, 0);
5808 		else
5809 			tcpm_set_state(port, SNK_READY, 0);
5810 		break;
5811 
5812 	case BIST_RX:
5813 		switch (BDO_MODE_MASK(port->bist_request)) {
5814 		case BDO_MODE_CARRIER2:
5815 			tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5816 			tcpm_set_state(port, unattached_state(port),
5817 				       PD_T_BIST_CONT_MODE);
5818 			break;
5819 		case BDO_MODE_TESTDATA:
5820 			if (port->tcpc->set_bist_data) {
5821 				tcpm_log(port, "Enable BIST MODE TESTDATA");
5822 				port->tcpc->set_bist_data(port->tcpc, true);
5823 			}
5824 			break;
5825 		default:
5826 			break;
5827 		}
5828 		break;
5829 	case GET_STATUS_SEND:
5830 		tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5831 		tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5832 			       PD_T_SENDER_RESPONSE);
5833 		break;
5834 	case GET_STATUS_SEND_TIMEOUT:
5835 		tcpm_set_state(port, ready_state(port), 0);
5836 		break;
5837 	case GET_PPS_STATUS_SEND:
5838 		tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5839 		tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5840 			       PD_T_SENDER_RESPONSE);
5841 		break;
5842 	case GET_PPS_STATUS_SEND_TIMEOUT:
5843 		tcpm_set_state(port, ready_state(port), 0);
5844 		break;
5845 	case GET_SINK_CAP:
5846 		tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5847 		tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5848 		break;
5849 	case GET_SINK_CAP_TIMEOUT:
5850 		port->sink_cap_done = true;
5851 		tcpm_set_state(port, ready_state(port), 0);
5852 		break;
5853 	case ERROR_RECOVERY:
5854 		tcpm_swap_complete(port, -EPROTO);
5855 		tcpm_pps_complete(port, -EPROTO);
5856 		tcpm_set_state(port, PORT_RESET, 0);
5857 		break;
5858 	case PORT_RESET:
5859 		tcpm_reset_port(port);
5860 		if (port->self_powered)
5861 			tcpm_set_cc(port, TYPEC_CC_OPEN);
5862 		else
5863 			tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5864 				    TYPEC_CC_RD : tcpm_rp_cc(port));
5865 		tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5866 			       PD_T_ERROR_RECOVERY);
5867 		break;
5868 	case PORT_RESET_WAIT_OFF:
5869 		tcpm_set_state(port,
5870 			       tcpm_default_state(port),
5871 			       port->vbus_present ? port->timings.ps_src_off_time : 0);
5872 		break;
5873 
5874 	/* AMS intermediate state */
5875 	case AMS_START:
5876 		if (port->upcoming_state == INVALID_STATE) {
5877 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5878 				       SRC_READY : SNK_READY, 0);
5879 			break;
5880 		}
5881 
5882 		upcoming_state = port->upcoming_state;
5883 		port->upcoming_state = INVALID_STATE;
5884 		tcpm_set_state(port, upcoming_state, 0);
5885 		break;
5886 
5887 	/* Chunk state */
5888 	case CHUNK_NOT_SUPP:
5889 		tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5890 		tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5891 		break;
5892 
5893 	/* Cable states */
5894 	case SRC_VDM_IDENTITY_REQUEST:
5895 		port->send_discover_prime = true;
5896 		port->tx_sop_type = TCPC_TX_SOP_PRIME;
5897 		mod_send_discover_delayed_work(port, 0);
5898 		port->upcoming_state = SRC_SEND_CAPABILITIES;
5899 		break;
5900 
5901 	default:
5902 		WARN(1, "Unexpected port state %d\n", port->state);
5903 		break;
5904 	}
5905 }
5906 
tcpm_state_machine_work(struct kthread_work * work)5907 static void tcpm_state_machine_work(struct kthread_work *work)
5908 {
5909 	struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5910 	enum tcpm_state prev_state;
5911 
5912 	mutex_lock(&port->lock);
5913 	port->state_machine_running = true;
5914 
5915 	if (port->queued_message && tcpm_send_queued_message(port))
5916 		goto done;
5917 
5918 	/* If we were queued due to a delayed state change, update it now */
5919 	if (port->delayed_state) {
5920 		tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5921 			 tcpm_states[port->state],
5922 			 tcpm_states[port->delayed_state], port->delay_ms);
5923 		port->prev_state = port->state;
5924 		port->state = port->delayed_state;
5925 		port->delayed_state = INVALID_STATE;
5926 	}
5927 
5928 	/*
5929 	 * Continue running as long as we have (non-delayed) state changes
5930 	 * to make.
5931 	 */
5932 	do {
5933 		prev_state = port->state;
5934 		run_state_machine(port);
5935 		if (port->queued_message)
5936 			tcpm_send_queued_message(port);
5937 	} while (port->state != prev_state && !port->delayed_state);
5938 
5939 done:
5940 	port->state_machine_running = false;
5941 	mutex_unlock(&port->lock);
5942 }
5943 
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)5944 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5945 			    enum typec_cc_status cc2)
5946 {
5947 	enum typec_cc_status old_cc1, old_cc2;
5948 	enum tcpm_state new_state;
5949 
5950 	old_cc1 = port->cc1;
5951 	old_cc2 = port->cc2;
5952 	port->cc1 = cc1;
5953 	port->cc2 = cc2;
5954 
5955 	tcpm_log_force(port,
5956 		       "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5957 		       old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5958 		       port->polarity,
5959 		       tcpm_port_is_disconnected(port) ? "disconnected"
5960 						       : "connected");
5961 
5962 	switch (port->state) {
5963 	case TOGGLING:
5964 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5965 		    tcpm_port_is_source(port))
5966 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5967 		else if (tcpm_port_is_sink(port))
5968 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5969 		break;
5970 	case CHECK_CONTAMINANT:
5971 		/* Wait for Toggling to be resumed */
5972 		break;
5973 	case SRC_UNATTACHED:
5974 	case ACC_UNATTACHED:
5975 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5976 		    tcpm_port_is_source(port))
5977 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5978 		break;
5979 	case SRC_ATTACH_WAIT:
5980 		if (tcpm_port_is_disconnected(port) ||
5981 		    tcpm_port_is_audio_detached(port))
5982 			tcpm_set_state(port, SRC_UNATTACHED, 0);
5983 		else if (cc1 != old_cc1 || cc2 != old_cc2)
5984 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5985 		break;
5986 	case SRC_ATTACHED:
5987 	case SRC_STARTUP:
5988 	case SRC_SEND_CAPABILITIES:
5989 	case SRC_READY:
5990 		if (tcpm_port_is_disconnected(port) ||
5991 		    !tcpm_port_is_source(port)) {
5992 			if (port->port_type == TYPEC_PORT_SRC)
5993 				tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5994 			else
5995 				tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5996 		}
5997 		break;
5998 	case SNK_UNATTACHED:
5999 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
6000 		    tcpm_port_is_sink(port))
6001 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
6002 		break;
6003 	case SNK_ATTACH_WAIT:
6004 		if ((port->cc1 == TYPEC_CC_OPEN &&
6005 		     port->cc2 != TYPEC_CC_OPEN) ||
6006 		    (port->cc1 != TYPEC_CC_OPEN &&
6007 		     port->cc2 == TYPEC_CC_OPEN))
6008 			new_state = SNK_DEBOUNCED;
6009 		else if (tcpm_port_is_disconnected(port))
6010 			new_state = SNK_UNATTACHED;
6011 		else
6012 			break;
6013 		if (new_state != port->delayed_state)
6014 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
6015 		break;
6016 	case SNK_DEBOUNCED:
6017 		if (tcpm_port_is_disconnected(port))
6018 			new_state = SNK_UNATTACHED;
6019 		else if (port->vbus_present)
6020 			new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
6021 		else
6022 			new_state = SNK_UNATTACHED;
6023 		if (new_state != port->delayed_state)
6024 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
6025 		break;
6026 	case SNK_READY:
6027 		/*
6028 		 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
6029 		 * "A port that has entered into USB PD communications with the Source and
6030 		 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
6031 		 * cable disconnect in addition to monitoring VBUS.
6032 		 *
6033 		 * A port that is monitoring the CC voltage for disconnect (but is not in
6034 		 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
6035 		 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
6036 		 * vRd-USB for tPDDebounce."
6037 		 *
6038 		 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
6039 		 * away before vbus decays to disconnect threshold. Allow
6040 		 * disconnect to be driven by vbus disconnect when auto vbus
6041 		 * discharge is enabled.
6042 		 */
6043 		if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
6044 			tcpm_set_state(port, unattached_state(port), 0);
6045 		else if (!port->pd_capable &&
6046 			 (cc1 != old_cc1 || cc2 != old_cc2))
6047 			tcpm_set_current_limit(port,
6048 					       tcpm_get_current_limit(port),
6049 					       5000);
6050 		break;
6051 
6052 	case AUDIO_ACC_ATTACHED:
6053 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6054 			tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
6055 		break;
6056 	case AUDIO_ACC_DEBOUNCE:
6057 		if (tcpm_port_is_audio(port))
6058 			tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
6059 		break;
6060 
6061 	case DEBUG_ACC_ATTACHED:
6062 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
6063 			tcpm_set_state(port, DEBUG_ACC_DEBOUNCE, 0);
6064 		break;
6065 
6066 	case DEBUG_ACC_DEBOUNCE:
6067 		if (tcpm_port_is_debug(port))
6068 			tcpm_set_state(port, DEBUG_ACC_ATTACHED, 0);
6069 		break;
6070 
6071 	case SNK_TRY:
6072 		/* Do nothing, waiting for timeout */
6073 		break;
6074 
6075 	case SNK_DISCOVERY:
6076 		/* CC line is unstable, wait for debounce */
6077 		if (tcpm_port_is_disconnected(port))
6078 			tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
6079 		break;
6080 	case SNK_DISCOVERY_DEBOUNCE:
6081 		break;
6082 
6083 	case SRC_TRYWAIT:
6084 		/* Hand over to state machine if needed */
6085 		if (!port->vbus_present && tcpm_port_is_source(port))
6086 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6087 		break;
6088 	case SRC_TRYWAIT_DEBOUNCE:
6089 		if (port->vbus_present || !tcpm_port_is_source(port))
6090 			tcpm_set_state(port, SRC_TRYWAIT, 0);
6091 		break;
6092 	case SNK_TRY_WAIT_DEBOUNCE:
6093 		if (!tcpm_port_is_sink(port)) {
6094 			port->max_wait = 0;
6095 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
6096 		}
6097 		break;
6098 	case SRC_TRY_WAIT:
6099 		if (tcpm_port_is_source(port))
6100 			tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
6101 		break;
6102 	case SRC_TRY_DEBOUNCE:
6103 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
6104 		break;
6105 	case SNK_TRYWAIT_DEBOUNCE:
6106 		if (tcpm_port_is_sink(port))
6107 			tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
6108 		break;
6109 	case SNK_TRYWAIT_VBUS:
6110 		if (!tcpm_port_is_sink(port))
6111 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
6112 		break;
6113 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6114 		if (!tcpm_port_is_sink(port))
6115 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
6116 		else
6117 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
6118 		break;
6119 	case SNK_TRYWAIT:
6120 		/* Do nothing, waiting for tCCDebounce */
6121 		break;
6122 	case PR_SWAP_SNK_SRC_SINK_OFF:
6123 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6124 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
6125 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
6126 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6127 		/*
6128 		 * CC state change is expected in PR_SWAP
6129 		 * Ignore it.
6130 		 */
6131 		break;
6132 	case FR_SWAP_SEND:
6133 	case FR_SWAP_SEND_TIMEOUT:
6134 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6135 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6136 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6137 		/* Do nothing, CC change expected */
6138 		break;
6139 
6140 	case PORT_RESET:
6141 	case PORT_RESET_WAIT_OFF:
6142 		/*
6143 		 * State set back to default mode once the timer completes.
6144 		 * Ignore CC changes here.
6145 		 */
6146 		break;
6147 	default:
6148 		/*
6149 		 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
6150 		 * to be driven by vbus disconnect.
6151 		 */
6152 		if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
6153 							 port->auto_vbus_discharge_enabled))
6154 			tcpm_set_state(port, unattached_state(port), 0);
6155 		break;
6156 	}
6157 }
6158 
_tcpm_pd_vbus_on(struct tcpm_port * port)6159 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
6160 {
6161 	tcpm_log_force(port, "VBUS on");
6162 	port->vbus_present = true;
6163 	/*
6164 	 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
6165 	 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
6166 	 */
6167 	port->vbus_vsafe0v = false;
6168 
6169 	switch (port->state) {
6170 	case SNK_TRANSITION_SINK_VBUS:
6171 		port->explicit_contract = true;
6172 		tcpm_set_state(port, SNK_READY, 0);
6173 		break;
6174 	case SNK_DISCOVERY:
6175 		tcpm_set_state(port, SNK_DISCOVERY, 0);
6176 		break;
6177 
6178 	case SNK_DEBOUNCED:
6179 		tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
6180 							: SNK_ATTACHED,
6181 				       0);
6182 		break;
6183 	case SNK_HARD_RESET_WAIT_VBUS:
6184 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
6185 		break;
6186 	case SRC_ATTACHED:
6187 		tcpm_set_state(port, SRC_STARTUP, 0);
6188 		break;
6189 	case SRC_HARD_RESET_VBUS_ON:
6190 		tcpm_set_state(port, SRC_STARTUP, 0);
6191 		break;
6192 
6193 	case SNK_TRY:
6194 		/* Do nothing, waiting for timeout */
6195 		break;
6196 	case SRC_TRYWAIT:
6197 		/* Do nothing, Waiting for Rd to be detected */
6198 		break;
6199 	case SRC_TRYWAIT_DEBOUNCE:
6200 		tcpm_set_state(port, SRC_TRYWAIT, 0);
6201 		break;
6202 	case SNK_TRY_WAIT_DEBOUNCE:
6203 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6204 		break;
6205 	case SNK_TRYWAIT:
6206 		/* Do nothing, waiting for tCCDebounce */
6207 		break;
6208 	case SNK_TRYWAIT_VBUS:
6209 		if (tcpm_port_is_sink(port))
6210 			tcpm_set_state(port, SNK_ATTACHED, 0);
6211 		break;
6212 	case SNK_TRYWAIT_DEBOUNCE:
6213 		/* Do nothing, waiting for Rp */
6214 		break;
6215 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6216 		if (port->vbus_present && tcpm_port_is_sink(port))
6217 			tcpm_set_state(port, SNK_ATTACHED, 0);
6218 		break;
6219 	case SRC_TRY_WAIT:
6220 	case SRC_TRY_DEBOUNCE:
6221 		/* Do nothing, waiting for sink detection */
6222 		break;
6223 	case FR_SWAP_SEND:
6224 	case FR_SWAP_SEND_TIMEOUT:
6225 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6226 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6227 		if (port->tcpc->frs_sourcing_vbus)
6228 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6229 		break;
6230 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6231 		if (port->tcpc->frs_sourcing_vbus)
6232 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6233 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6234 		break;
6235 
6236 	case PORT_RESET:
6237 	case PORT_RESET_WAIT_OFF:
6238 		/*
6239 		 * State set back to default mode once the timer completes.
6240 		 * Ignore vbus changes here.
6241 		 */
6242 		break;
6243 
6244 	default:
6245 		break;
6246 	}
6247 }
6248 
_tcpm_pd_vbus_off(struct tcpm_port * port)6249 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6250 {
6251 	tcpm_log_force(port, "VBUS off");
6252 	port->vbus_present = false;
6253 	port->vbus_never_low = false;
6254 	switch (port->state) {
6255 	case SNK_HARD_RESET_SINK_OFF:
6256 		tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6257 		break;
6258 	case HARD_RESET_SEND:
6259 		break;
6260 	case SNK_TRY:
6261 		/* Do nothing, waiting for timeout */
6262 		break;
6263 	case SRC_TRYWAIT:
6264 		/* Hand over to state machine if needed */
6265 		if (tcpm_port_is_source(port))
6266 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6267 		break;
6268 	case SNK_TRY_WAIT_DEBOUNCE:
6269 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6270 		break;
6271 	case SNK_TRYWAIT:
6272 	case SNK_TRYWAIT_VBUS:
6273 	case SNK_TRYWAIT_DEBOUNCE:
6274 		break;
6275 	case SNK_ATTACH_WAIT:
6276 	case SNK_DEBOUNCED:
6277 		/* Do nothing, as TCPM is still waiting for vbus to reach VSAFE5V to connect */
6278 		break;
6279 
6280 	case SNK_NEGOTIATE_CAPABILITIES:
6281 		break;
6282 
6283 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6284 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6285 		break;
6286 
6287 	case PR_SWAP_SNK_SRC_SINK_OFF:
6288 		/* Do nothing, expected */
6289 		break;
6290 
6291 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6292 		/*
6293 		 * Do nothing when vbus off notification is received.
6294 		 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6295 		 * for the vbus source to ramp up.
6296 		 */
6297 		break;
6298 
6299 	case PORT_RESET_WAIT_OFF:
6300 		tcpm_set_state(port, tcpm_default_state(port), 0);
6301 		break;
6302 
6303 	case SRC_TRY_WAIT:
6304 	case SRC_TRY_DEBOUNCE:
6305 		/* Do nothing, waiting for sink detection */
6306 		break;
6307 
6308 	case SRC_STARTUP:
6309 	case SRC_SEND_CAPABILITIES:
6310 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6311 	case SRC_NEGOTIATE_CAPABILITIES:
6312 	case SRC_TRANSITION_SUPPLY:
6313 	case SRC_READY:
6314 	case SRC_WAIT_NEW_CAPABILITIES:
6315 		/*
6316 		 * Force to unattached state to re-initiate connection.
6317 		 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6318 		 * sink removed. Although sink removal here is due to source's vbus collapse,
6319 		 * treat it the same way for consistency.
6320 		 */
6321 		if (port->port_type == TYPEC_PORT_SRC)
6322 			tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6323 		else
6324 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6325 		break;
6326 
6327 	case PORT_RESET:
6328 		/*
6329 		 * State set back to default mode once the timer completes.
6330 		 * Ignore vbus changes here.
6331 		 */
6332 		break;
6333 
6334 	case FR_SWAP_SEND:
6335 	case FR_SWAP_SEND_TIMEOUT:
6336 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6337 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6338 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6339 		/* Do nothing, vbus drop expected */
6340 		break;
6341 
6342 	case SNK_HARD_RESET_WAIT_VBUS:
6343 		/* Do nothing, its OK to receive vbus off events */
6344 		break;
6345 
6346 	default:
6347 		if (port->pwr_role == TYPEC_SINK && port->attached)
6348 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6349 		break;
6350 	}
6351 }
6352 
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)6353 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6354 {
6355 	tcpm_log_force(port, "VBUS VSAFE0V");
6356 	port->vbus_vsafe0v = true;
6357 	switch (port->state) {
6358 	case SRC_HARD_RESET_VBUS_OFF:
6359 		/*
6360 		 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6361 		 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6362 		 */
6363 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6364 		break;
6365 	case SRC_ATTACH_WAIT:
6366 		if (tcpm_port_is_source(port))
6367 			tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6368 				       port->timings.cc_debounce_time);
6369 		break;
6370 	case SRC_STARTUP:
6371 	case SRC_SEND_CAPABILITIES:
6372 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6373 	case SRC_NEGOTIATE_CAPABILITIES:
6374 	case SRC_TRANSITION_SUPPLY:
6375 	case SRC_READY:
6376 	case SRC_WAIT_NEW_CAPABILITIES:
6377 		if (port->auto_vbus_discharge_enabled) {
6378 			if (port->port_type == TYPEC_PORT_SRC)
6379 				tcpm_set_state(port, SRC_UNATTACHED, 0);
6380 			else
6381 				tcpm_set_state(port, SNK_UNATTACHED, 0);
6382 		}
6383 		break;
6384 	case PR_SWAP_SNK_SRC_SINK_OFF:
6385 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6386 		/* Do nothing, vsafe0v is expected during transition */
6387 		break;
6388 	case SNK_ATTACH_WAIT:
6389 	case SNK_DEBOUNCED:
6390 		/*Do nothing, still waiting for VSAFE5V for connect */
6391 		break;
6392 	case SNK_HARD_RESET_WAIT_VBUS:
6393 		/* Do nothing, its OK to receive vbus off events */
6394 		break;
6395 	default:
6396 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6397 			tcpm_set_state(port, SNK_UNATTACHED, 0);
6398 		break;
6399 	}
6400 }
6401 
_tcpm_pd_hard_reset(struct tcpm_port * port)6402 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6403 {
6404 	tcpm_log_force(port, "Received hard reset");
6405 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6406 		port->tcpc->set_bist_data(port->tcpc, false);
6407 
6408 	switch (port->state) {
6409 	case TOGGLING:
6410 	case ERROR_RECOVERY:
6411 	case PORT_RESET:
6412 	case PORT_RESET_WAIT_OFF:
6413 		return;
6414 	default:
6415 		break;
6416 	}
6417 
6418 	if (port->ams != NONE_AMS)
6419 		port->ams = NONE_AMS;
6420 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6421 		port->ams = HARD_RESET;
6422 	/*
6423 	 * If we keep receiving hard reset requests, executing the hard reset
6424 	 * must have failed. Revert to error recovery if that happens.
6425 	 */
6426 	tcpm_set_state(port,
6427 		       port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6428 				HARD_RESET_START : ERROR_RECOVERY,
6429 		       0);
6430 }
6431 
tcpm_pd_event_handler(struct kthread_work * work)6432 static void tcpm_pd_event_handler(struct kthread_work *work)
6433 {
6434 	struct tcpm_port *port = container_of(work, struct tcpm_port,
6435 					      event_work);
6436 	u32 events;
6437 
6438 	mutex_lock(&port->lock);
6439 
6440 	spin_lock(&port->pd_event_lock);
6441 	while (port->pd_events) {
6442 		events = port->pd_events;
6443 		port->pd_events = 0;
6444 		spin_unlock(&port->pd_event_lock);
6445 		if (events & TCPM_RESET_EVENT)
6446 			_tcpm_pd_hard_reset(port);
6447 		if (events & TCPM_VBUS_EVENT) {
6448 			bool vbus;
6449 
6450 			vbus = port->tcpc->get_vbus(port->tcpc);
6451 			if (vbus) {
6452 				_tcpm_pd_vbus_on(port);
6453 			} else {
6454 				_tcpm_pd_vbus_off(port);
6455 				/*
6456 				 * When TCPC does not support detecting vsafe0v voltage level,
6457 				 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6458 				 * to see if vbus has discharge to VSAFE0V.
6459 				 */
6460 				if (!port->tcpc->is_vbus_vsafe0v ||
6461 				    port->tcpc->is_vbus_vsafe0v(port->tcpc))
6462 					_tcpm_pd_vbus_vsafe0v(port);
6463 			}
6464 		}
6465 		if (events & TCPM_CC_EVENT) {
6466 			enum typec_cc_status cc1, cc2;
6467 
6468 			if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6469 				_tcpm_cc_change(port, cc1, cc2);
6470 		}
6471 		if (events & TCPM_FRS_EVENT) {
6472 			if (port->state == SNK_READY) {
6473 				int ret;
6474 
6475 				port->upcoming_state = FR_SWAP_SEND;
6476 				ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6477 				if (ret == -EAGAIN)
6478 					port->upcoming_state = INVALID_STATE;
6479 			} else {
6480 				tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6481 			}
6482 		}
6483 		if (events & TCPM_SOURCING_VBUS) {
6484 			tcpm_log(port, "sourcing vbus");
6485 			/*
6486 			 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6487 			 * true as TCPM wouldn't have called tcpm_set_vbus.
6488 			 *
6489 			 * When vbus is sourced on the command on TCPM i.e. TCPM called
6490 			 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6491 			 */
6492 			port->vbus_source = true;
6493 			_tcpm_pd_vbus_on(port);
6494 		}
6495 		if (events & TCPM_PORT_CLEAN) {
6496 			tcpm_log(port, "port clean");
6497 			if (port->state == CHECK_CONTAMINANT) {
6498 				if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6499 					tcpm_set_state(port, TOGGLING, 0);
6500 				else
6501 					tcpm_set_state(port, tcpm_default_state(port), 0);
6502 			}
6503 		}
6504 		if (events & TCPM_PORT_ERROR) {
6505 			tcpm_log(port, "port triggering error recovery");
6506 			tcpm_set_state(port, ERROR_RECOVERY, 0);
6507 		}
6508 
6509 		spin_lock(&port->pd_event_lock);
6510 	}
6511 	spin_unlock(&port->pd_event_lock);
6512 	mutex_unlock(&port->lock);
6513 }
6514 
tcpm_cc_change(struct tcpm_port * port)6515 void tcpm_cc_change(struct tcpm_port *port)
6516 {
6517 	spin_lock(&port->pd_event_lock);
6518 	port->pd_events |= TCPM_CC_EVENT;
6519 	spin_unlock(&port->pd_event_lock);
6520 	kthread_queue_work(port->wq, &port->event_work);
6521 }
6522 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6523 
tcpm_vbus_change(struct tcpm_port * port)6524 void tcpm_vbus_change(struct tcpm_port *port)
6525 {
6526 	spin_lock(&port->pd_event_lock);
6527 	port->pd_events |= TCPM_VBUS_EVENT;
6528 	spin_unlock(&port->pd_event_lock);
6529 	kthread_queue_work(port->wq, &port->event_work);
6530 }
6531 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6532 
tcpm_pd_hard_reset(struct tcpm_port * port)6533 void tcpm_pd_hard_reset(struct tcpm_port *port)
6534 {
6535 	spin_lock(&port->pd_event_lock);
6536 	port->pd_events = TCPM_RESET_EVENT;
6537 	spin_unlock(&port->pd_event_lock);
6538 	kthread_queue_work(port->wq, &port->event_work);
6539 }
6540 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6541 
tcpm_sink_frs(struct tcpm_port * port)6542 void tcpm_sink_frs(struct tcpm_port *port)
6543 {
6544 	spin_lock(&port->pd_event_lock);
6545 	port->pd_events |= TCPM_FRS_EVENT;
6546 	spin_unlock(&port->pd_event_lock);
6547 	kthread_queue_work(port->wq, &port->event_work);
6548 }
6549 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6550 
tcpm_sourcing_vbus(struct tcpm_port * port)6551 void tcpm_sourcing_vbus(struct tcpm_port *port)
6552 {
6553 	spin_lock(&port->pd_event_lock);
6554 	port->pd_events |= TCPM_SOURCING_VBUS;
6555 	spin_unlock(&port->pd_event_lock);
6556 	kthread_queue_work(port->wq, &port->event_work);
6557 }
6558 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6559 
tcpm_port_clean(struct tcpm_port * port)6560 void tcpm_port_clean(struct tcpm_port *port)
6561 {
6562 	spin_lock(&port->pd_event_lock);
6563 	port->pd_events |= TCPM_PORT_CLEAN;
6564 	spin_unlock(&port->pd_event_lock);
6565 	kthread_queue_work(port->wq, &port->event_work);
6566 }
6567 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6568 
tcpm_port_is_toggling(struct tcpm_port * port)6569 bool tcpm_port_is_toggling(struct tcpm_port *port)
6570 {
6571 	return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6572 }
6573 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6574 
tcpm_port_error_recovery(struct tcpm_port * port)6575 void tcpm_port_error_recovery(struct tcpm_port *port)
6576 {
6577 	spin_lock(&port->pd_event_lock);
6578 	port->pd_events |= TCPM_PORT_ERROR;
6579 	spin_unlock(&port->pd_event_lock);
6580 	kthread_queue_work(port->wq, &port->event_work);
6581 }
6582 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6583 
tcpm_enable_frs_work(struct kthread_work * work)6584 static void tcpm_enable_frs_work(struct kthread_work *work)
6585 {
6586 	struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6587 	int ret;
6588 
6589 	mutex_lock(&port->lock);
6590 	/* Not FRS capable */
6591 	if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6592 	    port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6593 	    !port->tcpc->enable_frs ||
6594 	    /* Sink caps queried */
6595 	    port->sink_cap_done || port->negotiated_rev < PD_REV30)
6596 		goto unlock;
6597 
6598 	/* Send when the state machine is idle */
6599 	if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6600 	    port->send_discover_prime)
6601 		goto resched;
6602 
6603 	port->upcoming_state = GET_SINK_CAP;
6604 	ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6605 	if (ret == -EAGAIN) {
6606 		port->upcoming_state = INVALID_STATE;
6607 	} else {
6608 		port->sink_cap_done = true;
6609 		goto unlock;
6610 	}
6611 resched:
6612 	mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6613 unlock:
6614 	mutex_unlock(&port->lock);
6615 }
6616 
tcpm_send_discover_work(struct kthread_work * work)6617 static void tcpm_send_discover_work(struct kthread_work *work)
6618 {
6619 	struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6620 
6621 	mutex_lock(&port->lock);
6622 	/* No need to send DISCOVER_IDENTITY anymore */
6623 	if (!port->send_discover && !port->send_discover_prime)
6624 		goto unlock;
6625 
6626 	if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6627 		port->send_discover = false;
6628 		port->send_discover_prime = false;
6629 		goto unlock;
6630 	}
6631 
6632 	/* Retry if the port is not idle */
6633 	if ((port->state != SRC_READY && port->state != SNK_READY &&
6634 	     port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6635 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6636 		goto unlock;
6637 	}
6638 
6639 	tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6640 
6641 unlock:
6642 	mutex_unlock(&port->lock);
6643 }
6644 
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)6645 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6646 {
6647 	struct tcpm_port *port = typec_get_drvdata(p);
6648 	int ret;
6649 
6650 	mutex_lock(&port->swap_lock);
6651 	mutex_lock(&port->lock);
6652 
6653 	if (port->typec_caps.data != TYPEC_PORT_DRD) {
6654 		ret = -EINVAL;
6655 		goto port_unlock;
6656 	}
6657 	if (port->state != SRC_READY && port->state != SNK_READY) {
6658 		ret = -EAGAIN;
6659 		goto port_unlock;
6660 	}
6661 
6662 	if (port->data_role == data) {
6663 		ret = 0;
6664 		goto port_unlock;
6665 	}
6666 
6667 	/*
6668 	 * XXX
6669 	 * 6.3.9: If an alternate mode is active, a request to swap
6670 	 * alternate modes shall trigger a port reset.
6671 	 * Reject data role swap request in this case.
6672 	 */
6673 
6674 	if (!port->pd_capable) {
6675 		/*
6676 		 * If the partner is not PD capable, reset the port to
6677 		 * trigger a role change. This can only work if a preferred
6678 		 * role is configured, and if it matches the requested role.
6679 		 */
6680 		if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6681 		    port->try_role == port->pwr_role) {
6682 			ret = -EINVAL;
6683 			goto port_unlock;
6684 		}
6685 		port->non_pd_role_swap = true;
6686 		tcpm_set_state(port, PORT_RESET, 0);
6687 	} else {
6688 		port->upcoming_state = DR_SWAP_SEND;
6689 		ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6690 		if (ret == -EAGAIN) {
6691 			port->upcoming_state = INVALID_STATE;
6692 			goto port_unlock;
6693 		}
6694 	}
6695 
6696 	port->swap_status = 0;
6697 	port->swap_pending = true;
6698 	reinit_completion(&port->swap_complete);
6699 	mutex_unlock(&port->lock);
6700 
6701 	if (!wait_for_completion_timeout(&port->swap_complete,
6702 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6703 		ret = -ETIMEDOUT;
6704 	else
6705 		ret = port->swap_status;
6706 
6707 	port->non_pd_role_swap = false;
6708 	goto swap_unlock;
6709 
6710 port_unlock:
6711 	mutex_unlock(&port->lock);
6712 swap_unlock:
6713 	mutex_unlock(&port->swap_lock);
6714 	return ret;
6715 }
6716 
tcpm_pr_set(struct typec_port * p,enum typec_role role)6717 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6718 {
6719 	struct tcpm_port *port = typec_get_drvdata(p);
6720 	int ret;
6721 
6722 	mutex_lock(&port->swap_lock);
6723 	mutex_lock(&port->lock);
6724 
6725 	if (port->port_type != TYPEC_PORT_DRP) {
6726 		ret = -EINVAL;
6727 		goto port_unlock;
6728 	}
6729 	if (port->state != SRC_READY && port->state != SNK_READY) {
6730 		ret = -EAGAIN;
6731 		goto port_unlock;
6732 	}
6733 
6734 	if (role == port->pwr_role) {
6735 		ret = 0;
6736 		goto port_unlock;
6737 	}
6738 
6739 	port->upcoming_state = PR_SWAP_SEND;
6740 	ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6741 	if (ret == -EAGAIN) {
6742 		port->upcoming_state = INVALID_STATE;
6743 		goto port_unlock;
6744 	}
6745 
6746 	port->swap_status = 0;
6747 	port->swap_pending = true;
6748 	reinit_completion(&port->swap_complete);
6749 	mutex_unlock(&port->lock);
6750 
6751 	if (!wait_for_completion_timeout(&port->swap_complete,
6752 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6753 		ret = -ETIMEDOUT;
6754 	else
6755 		ret = port->swap_status;
6756 
6757 	goto swap_unlock;
6758 
6759 port_unlock:
6760 	mutex_unlock(&port->lock);
6761 swap_unlock:
6762 	mutex_unlock(&port->swap_lock);
6763 	return ret;
6764 }
6765 
tcpm_vconn_set(struct typec_port * p,enum typec_role role)6766 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6767 {
6768 	struct tcpm_port *port = typec_get_drvdata(p);
6769 	int ret;
6770 
6771 	mutex_lock(&port->swap_lock);
6772 	mutex_lock(&port->lock);
6773 
6774 	if (port->state != SRC_READY && port->state != SNK_READY) {
6775 		ret = -EAGAIN;
6776 		goto port_unlock;
6777 	}
6778 
6779 	if (role == port->vconn_role) {
6780 		ret = 0;
6781 		goto port_unlock;
6782 	}
6783 
6784 	port->upcoming_state = VCONN_SWAP_SEND;
6785 	ret = tcpm_ams_start(port, VCONN_SWAP);
6786 	if (ret == -EAGAIN) {
6787 		port->upcoming_state = INVALID_STATE;
6788 		goto port_unlock;
6789 	}
6790 
6791 	port->swap_status = 0;
6792 	port->swap_pending = true;
6793 	reinit_completion(&port->swap_complete);
6794 	mutex_unlock(&port->lock);
6795 
6796 	if (!wait_for_completion_timeout(&port->swap_complete,
6797 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6798 		ret = -ETIMEDOUT;
6799 	else
6800 		ret = port->swap_status;
6801 
6802 	goto swap_unlock;
6803 
6804 port_unlock:
6805 	mutex_unlock(&port->lock);
6806 swap_unlock:
6807 	mutex_unlock(&port->swap_lock);
6808 	return ret;
6809 }
6810 
tcpm_try_role(struct typec_port * p,int role)6811 static int tcpm_try_role(struct typec_port *p, int role)
6812 {
6813 	struct tcpm_port *port = typec_get_drvdata(p);
6814 	struct tcpc_dev	*tcpc = port->tcpc;
6815 	int ret = 0;
6816 
6817 	mutex_lock(&port->lock);
6818 	if (tcpc->try_role)
6819 		ret = tcpc->try_role(tcpc, role);
6820 	if (!ret)
6821 		port->try_role = role;
6822 	port->try_src_count = 0;
6823 	port->try_snk_count = 0;
6824 	mutex_unlock(&port->lock);
6825 
6826 	return ret;
6827 }
6828 
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)6829 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6830 {
6831 	unsigned int target_mw;
6832 	int ret;
6833 
6834 	mutex_lock(&port->swap_lock);
6835 	mutex_lock(&port->lock);
6836 
6837 	if (!port->pps_data.active) {
6838 		ret = -EOPNOTSUPP;
6839 		goto port_unlock;
6840 	}
6841 
6842 	if (port->state != SNK_READY) {
6843 		ret = -EAGAIN;
6844 		goto port_unlock;
6845 	}
6846 
6847 	if (req_op_curr > port->pps_data.max_curr) {
6848 		ret = -EINVAL;
6849 		goto port_unlock;
6850 	}
6851 
6852 	target_mw = (req_op_curr * port->supply_voltage) / 1000;
6853 	if (target_mw < port->operating_snk_mw) {
6854 		ret = -EINVAL;
6855 		goto port_unlock;
6856 	}
6857 
6858 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6859 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6860 	if (ret == -EAGAIN) {
6861 		port->upcoming_state = INVALID_STATE;
6862 		goto port_unlock;
6863 	}
6864 
6865 	/* Round down operating current to align with PPS valid steps */
6866 	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6867 
6868 	reinit_completion(&port->pps_complete);
6869 	port->pps_data.req_op_curr = req_op_curr;
6870 	port->pps_status = 0;
6871 	port->pps_pending = true;
6872 	mutex_unlock(&port->lock);
6873 
6874 	if (!wait_for_completion_timeout(&port->pps_complete,
6875 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6876 		ret = -ETIMEDOUT;
6877 	else
6878 		ret = port->pps_status;
6879 
6880 	goto swap_unlock;
6881 
6882 port_unlock:
6883 	mutex_unlock(&port->lock);
6884 swap_unlock:
6885 	mutex_unlock(&port->swap_lock);
6886 
6887 	return ret;
6888 }
6889 
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)6890 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6891 {
6892 	unsigned int target_mw;
6893 	int ret;
6894 
6895 	mutex_lock(&port->swap_lock);
6896 	mutex_lock(&port->lock);
6897 
6898 	if (!port->pps_data.active) {
6899 		ret = -EOPNOTSUPP;
6900 		goto port_unlock;
6901 	}
6902 
6903 	if (port->state != SNK_READY) {
6904 		ret = -EAGAIN;
6905 		goto port_unlock;
6906 	}
6907 
6908 	target_mw = (port->current_limit * req_out_volt) / 1000;
6909 	if (target_mw < port->operating_snk_mw) {
6910 		ret = -EINVAL;
6911 		goto port_unlock;
6912 	}
6913 
6914 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6915 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6916 	if (ret == -EAGAIN) {
6917 		port->upcoming_state = INVALID_STATE;
6918 		goto port_unlock;
6919 	}
6920 
6921 	/* Round down output voltage to align with PPS valid steps */
6922 	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6923 
6924 	reinit_completion(&port->pps_complete);
6925 	port->pps_data.req_out_volt = req_out_volt;
6926 	port->pps_status = 0;
6927 	port->pps_pending = true;
6928 	mutex_unlock(&port->lock);
6929 
6930 	if (!wait_for_completion_timeout(&port->pps_complete,
6931 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6932 		ret = -ETIMEDOUT;
6933 	else
6934 		ret = port->pps_status;
6935 
6936 	goto swap_unlock;
6937 
6938 port_unlock:
6939 	mutex_unlock(&port->lock);
6940 swap_unlock:
6941 	mutex_unlock(&port->swap_lock);
6942 
6943 	return ret;
6944 }
6945 
tcpm_pps_activate(struct tcpm_port * port,bool activate)6946 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6947 {
6948 	int ret = 0;
6949 
6950 	mutex_lock(&port->swap_lock);
6951 	mutex_lock(&port->lock);
6952 
6953 	if (!port->pps_data.supported) {
6954 		ret = -EOPNOTSUPP;
6955 		goto port_unlock;
6956 	}
6957 
6958 	/* Trying to deactivate PPS when already deactivated so just bail */
6959 	if (!port->pps_data.active && !activate)
6960 		goto port_unlock;
6961 
6962 	if (port->state != SNK_READY) {
6963 		ret = -EAGAIN;
6964 		goto port_unlock;
6965 	}
6966 
6967 	if (activate)
6968 		port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6969 	else
6970 		port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6971 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6972 	if (ret == -EAGAIN) {
6973 		port->upcoming_state = INVALID_STATE;
6974 		goto port_unlock;
6975 	}
6976 
6977 	reinit_completion(&port->pps_complete);
6978 	port->pps_status = 0;
6979 	port->pps_pending = true;
6980 
6981 	/* Trigger PPS request or move back to standard PDO contract */
6982 	if (activate) {
6983 		port->pps_data.req_out_volt = port->supply_voltage;
6984 		port->pps_data.req_op_curr = port->current_limit;
6985 	}
6986 	mutex_unlock(&port->lock);
6987 
6988 	if (!wait_for_completion_timeout(&port->pps_complete,
6989 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6990 		ret = -ETIMEDOUT;
6991 	else
6992 		ret = port->pps_status;
6993 
6994 	goto swap_unlock;
6995 
6996 port_unlock:
6997 	mutex_unlock(&port->lock);
6998 swap_unlock:
6999 	mutex_unlock(&port->swap_lock);
7000 
7001 	return ret;
7002 }
7003 
tcpm_init(struct tcpm_port * port)7004 static void tcpm_init(struct tcpm_port *port)
7005 {
7006 	enum typec_cc_status cc1, cc2;
7007 
7008 	port->tcpc->init(port->tcpc);
7009 
7010 	tcpm_reset_port(port);
7011 
7012 	/*
7013 	 * XXX
7014 	 * Should possibly wait for VBUS to settle if it was enabled locally
7015 	 * since tcpm_reset_port() will disable VBUS.
7016 	 */
7017 	port->vbus_present = port->tcpc->get_vbus(port->tcpc);
7018 	if (port->vbus_present)
7019 		port->vbus_never_low = true;
7020 
7021 	/*
7022 	 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
7023 	 * So implicitly vbus_vsafe0v = false.
7024 	 *
7025 	 * 2. When vbus_present is false and TCPC does NOT support querying
7026 	 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
7027 	 * vbus_vsafe0v is true.
7028 	 *
7029 	 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
7030 	 * then, query tcpc for vsafe0v status.
7031 	 */
7032 	if (port->vbus_present)
7033 		port->vbus_vsafe0v = false;
7034 	else if (!port->tcpc->is_vbus_vsafe0v)
7035 		port->vbus_vsafe0v = true;
7036 	else
7037 		port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
7038 
7039 	tcpm_set_state(port, tcpm_default_state(port), 0);
7040 
7041 	if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
7042 		_tcpm_cc_change(port, cc1, cc2);
7043 
7044 	/*
7045 	 * Some adapters need a clean slate at startup, and won't recover
7046 	 * otherwise. So do not try to be fancy and force a clean disconnect.
7047 	 */
7048 	tcpm_set_state(port, PORT_RESET, 0);
7049 }
7050 
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)7051 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
7052 {
7053 	struct tcpm_port *port = typec_get_drvdata(p);
7054 
7055 	mutex_lock(&port->lock);
7056 	if (type == port->port_type)
7057 		goto port_unlock;
7058 
7059 	port->port_type = type;
7060 
7061 	if (!port->connected) {
7062 		tcpm_set_state(port, PORT_RESET, 0);
7063 	} else if (type == TYPEC_PORT_SNK) {
7064 		if (!(port->pwr_role == TYPEC_SINK &&
7065 		      port->data_role == TYPEC_DEVICE))
7066 			tcpm_set_state(port, PORT_RESET, 0);
7067 	} else if (type == TYPEC_PORT_SRC) {
7068 		if (!(port->pwr_role == TYPEC_SOURCE &&
7069 		      port->data_role == TYPEC_HOST))
7070 			tcpm_set_state(port, PORT_RESET, 0);
7071 	}
7072 
7073 port_unlock:
7074 	mutex_unlock(&port->lock);
7075 	return 0;
7076 }
7077 
tcpm_find_pd_data(struct tcpm_port * port,struct usb_power_delivery * pd)7078 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
7079 {
7080 	int i;
7081 
7082 	for (i = 0; port->pd_list[i]; i++) {
7083 		if (port->pd_list[i]->pd == pd)
7084 			return port->pd_list[i];
7085 	}
7086 
7087 	return ERR_PTR(-ENODATA);
7088 }
7089 
tcpm_pd_get(struct typec_port * p)7090 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
7091 {
7092 	struct tcpm_port *port = typec_get_drvdata(p);
7093 
7094 	return port->pds;
7095 }
7096 
tcpm_pd_set(struct typec_port * p,struct usb_power_delivery * pd)7097 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
7098 {
7099 	struct tcpm_port *port = typec_get_drvdata(p);
7100 	struct pd_data *data;
7101 	int i, ret = 0;
7102 
7103 	mutex_lock(&port->lock);
7104 
7105 	if (port->selected_pd == pd)
7106 		goto unlock;
7107 
7108 	data = tcpm_find_pd_data(port, pd);
7109 	if (IS_ERR(data)) {
7110 		ret = PTR_ERR(data);
7111 		goto unlock;
7112 	}
7113 
7114 	if (data->sink_desc.pdo[0]) {
7115 		for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
7116 			port->snk_pdo[i] = data->sink_desc.pdo[i];
7117 		port->nr_snk_pdo = i;
7118 		port->operating_snk_mw = data->operating_snk_mw;
7119 	}
7120 
7121 	if (data->source_desc.pdo[0]) {
7122 		for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
7123 			port->src_pdo[i] = data->source_desc.pdo[i];
7124 		port->nr_src_pdo = i;
7125 	}
7126 
7127 	switch (port->state) {
7128 	case SRC_UNATTACHED:
7129 	case SRC_ATTACH_WAIT:
7130 	case SRC_TRYWAIT:
7131 		tcpm_set_cc(port, tcpm_rp_cc(port));
7132 		break;
7133 	case SRC_SEND_CAPABILITIES:
7134 	case SRC_SEND_CAPABILITIES_TIMEOUT:
7135 	case SRC_NEGOTIATE_CAPABILITIES:
7136 	case SRC_READY:
7137 	case SRC_WAIT_NEW_CAPABILITIES:
7138 		port->caps_count = 0;
7139 		port->upcoming_state = SRC_SEND_CAPABILITIES;
7140 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7141 		if (ret == -EAGAIN) {
7142 			port->upcoming_state = INVALID_STATE;
7143 			goto unlock;
7144 		}
7145 		break;
7146 	case SNK_NEGOTIATE_CAPABILITIES:
7147 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
7148 	case SNK_READY:
7149 	case SNK_TRANSITION_SINK:
7150 	case SNK_TRANSITION_SINK_VBUS:
7151 		if (port->pps_data.active)
7152 			port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
7153 		else if (port->pd_capable)
7154 			port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
7155 		else
7156 			break;
7157 
7158 		port->update_sink_caps = true;
7159 
7160 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7161 		if (ret == -EAGAIN) {
7162 			port->upcoming_state = INVALID_STATE;
7163 			goto unlock;
7164 		}
7165 		break;
7166 	default:
7167 		break;
7168 	}
7169 
7170 	port->port_source_caps = data->source_cap;
7171 	port->port_sink_caps = data->sink_cap;
7172 	typec_port_set_usb_power_delivery(p, NULL);
7173 	port->selected_pd = pd;
7174 	typec_port_set_usb_power_delivery(p, port->selected_pd);
7175 unlock:
7176 	mutex_unlock(&port->lock);
7177 	return ret;
7178 }
7179 
7180 static const struct typec_operations tcpm_ops = {
7181 	.try_role = tcpm_try_role,
7182 	.dr_set = tcpm_dr_set,
7183 	.pr_set = tcpm_pr_set,
7184 	.vconn_set = tcpm_vconn_set,
7185 	.port_type_set = tcpm_port_type_set,
7186 	.pd_get = tcpm_pd_get,
7187 	.pd_set = tcpm_pd_set
7188 };
7189 
tcpm_tcpc_reset(struct tcpm_port * port)7190 void tcpm_tcpc_reset(struct tcpm_port *port)
7191 {
7192 	mutex_lock(&port->lock);
7193 	/* XXX: Maintain PD connection if possible? */
7194 	tcpm_init(port);
7195 	mutex_unlock(&port->lock);
7196 }
7197 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
7198 
tcpm_port_unregister_pd(struct tcpm_port * port)7199 static void tcpm_port_unregister_pd(struct tcpm_port *port)
7200 {
7201 	int i;
7202 
7203 	port->port_sink_caps = NULL;
7204 	port->port_source_caps = NULL;
7205 	for (i = 0; i < port->pd_count; i++) {
7206 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
7207 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
7208 		devm_kfree(port->dev, port->pd_list[i]);
7209 		port->pd_list[i] = NULL;
7210 		usb_power_delivery_unregister(port->pds[i]);
7211 		port->pds[i] = NULL;
7212 	}
7213 }
7214 
tcpm_port_register_pd(struct tcpm_port * port)7215 static int tcpm_port_register_pd(struct tcpm_port *port)
7216 {
7217 	u16 pd_revision = port->typec_caps.pd_revision;
7218 	u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
7219 	struct usb_power_delivery_desc desc = { pd_revision, pd_version };
7220 	struct usb_power_delivery_capabilities *cap;
7221 	int ret, i;
7222 
7223 	if (!port->nr_src_pdo && !port->nr_snk_pdo)
7224 		return 0;
7225 
7226 	for (i = 0; i < port->pd_count; i++) {
7227 		port->pds[i] = usb_power_delivery_register(port->dev, &desc);
7228 		if (IS_ERR(port->pds[i])) {
7229 			ret = PTR_ERR(port->pds[i]);
7230 			goto err_unregister;
7231 		}
7232 		port->pd_list[i]->pd = port->pds[i];
7233 
7234 		if (port->pd_list[i]->source_desc.pdo[0]) {
7235 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7236 								&port->pd_list[i]->source_desc);
7237 			if (IS_ERR(cap)) {
7238 				ret = PTR_ERR(cap);
7239 				goto err_unregister;
7240 			}
7241 			port->pd_list[i]->source_cap = cap;
7242 		}
7243 
7244 		if (port->pd_list[i]->sink_desc.pdo[0]) {
7245 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7246 								&port->pd_list[i]->sink_desc);
7247 			if (IS_ERR(cap)) {
7248 				ret = PTR_ERR(cap);
7249 				goto err_unregister;
7250 			}
7251 			port->pd_list[i]->sink_cap = cap;
7252 		}
7253 	}
7254 
7255 	port->port_source_caps = port->pd_list[0]->source_cap;
7256 	port->port_sink_caps = port->pd_list[0]->sink_cap;
7257 	port->selected_pd = port->pds[0];
7258 	return 0;
7259 
7260 err_unregister:
7261 	tcpm_port_unregister_pd(port);
7262 
7263 	return ret;
7264 }
7265 
tcpm_fw_get_timings(struct tcpm_port * port,struct fwnode_handle * fwnode)7266 static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fwnode)
7267 {
7268 	int ret;
7269 	u32 val;
7270 
7271 	ret = fwnode_property_read_u32(fwnode, "sink-wait-cap-time-ms", &val);
7272 	if (!ret)
7273 		port->timings.sink_wait_cap_time = val;
7274 	else
7275 		port->timings.sink_wait_cap_time = PD_T_SINK_WAIT_CAP;
7276 
7277 	ret = fwnode_property_read_u32(fwnode, "ps-source-off-time-ms", &val);
7278 	if (!ret)
7279 		port->timings.ps_src_off_time = val;
7280 	else
7281 		port->timings.ps_src_off_time = PD_T_PS_SOURCE_OFF;
7282 
7283 	ret = fwnode_property_read_u32(fwnode, "cc-debounce-time-ms", &val);
7284 	if (!ret)
7285 		port->timings.cc_debounce_time = val;
7286 	else
7287 		port->timings.cc_debounce_time = PD_T_CC_DEBOUNCE;
7288 
7289 	ret = fwnode_property_read_u32(fwnode, "sink-bc12-completion-time-ms", &val);
7290 	if (!ret)
7291 		port->timings.snk_bc12_cmpletion_time = val;
7292 }
7293 
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)7294 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7295 {
7296 	struct fwnode_handle *capabilities, *caps = NULL;
7297 	unsigned int nr_src_pdo, nr_snk_pdo;
7298 	const char *opmode_str;
7299 	u32 *src_pdo, *snk_pdo;
7300 	u32 uw, frs_current;
7301 	int ret = 0, i;
7302 	int mode;
7303 
7304 	if (!fwnode)
7305 		return -EINVAL;
7306 
7307 	/*
7308 	 * This fwnode has a "compatible" property, but is never populated as a
7309 	 * struct device. Instead we simply parse it to read the properties.
7310 	 * This it breaks fw_devlink=on. To maintain backward compatibility
7311 	 * with existing DT files, we work around this by deleting any
7312 	 * fwnode_links to/from this fwnode.
7313 	 */
7314 	fw_devlink_purge_absent_suppliers(fwnode);
7315 
7316 	ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7317 	if (ret < 0)
7318 		return ret;
7319 
7320 	mode = 0;
7321 
7322 	if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7323 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7324 
7325 	if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7326 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7327 
7328 	port->port_type = port->typec_caps.type;
7329 	port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7330 	port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7331 	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7332 
7333 	if (!port->pd_supported) {
7334 		ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7335 		if (ret)
7336 			return ret;
7337 		ret = typec_find_pwr_opmode(opmode_str);
7338 		if (ret < 0)
7339 			return ret;
7340 		port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7341 		return 0;
7342 	}
7343 
7344 	/* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7345 
7346 	/* FRS can only be supported by DRP ports */
7347 	if (port->port_type == TYPEC_PORT_DRP) {
7348 		ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7349 					       &frs_current);
7350 		if (!ret && frs_current <= FRS_5V_3A)
7351 			port->new_source_frs_current = frs_current;
7352 
7353 		if (ret)
7354 			ret = 0;
7355 	}
7356 
7357 	/* For the backward compatibility, "capabilities" node is optional. */
7358 	capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7359 	if (!capabilities) {
7360 		port->pd_count = 1;
7361 	} else {
7362 		port->pd_count = fwnode_get_child_node_count(capabilities);
7363 		if (!port->pd_count) {
7364 			ret = -ENODATA;
7365 			goto put_capabilities;
7366 		}
7367 	}
7368 
7369 	port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7370 				 GFP_KERNEL);
7371 	if (!port->pds) {
7372 		ret = -ENOMEM;
7373 		goto put_capabilities;
7374 	}
7375 
7376 	port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7377 				     GFP_KERNEL);
7378 	if (!port->pd_list) {
7379 		ret = -ENOMEM;
7380 		goto put_capabilities;
7381 	}
7382 
7383 	for (i = 0; i < port->pd_count; i++) {
7384 		port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7385 		if (!port->pd_list[i]) {
7386 			ret = -ENOMEM;
7387 			goto put_capabilities;
7388 		}
7389 
7390 		src_pdo = port->pd_list[i]->source_desc.pdo;
7391 		port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7392 		snk_pdo = port->pd_list[i]->sink_desc.pdo;
7393 		port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7394 
7395 		/* If "capabilities" is NULL, fall back to single pd cap population. */
7396 		if (!capabilities)
7397 			caps = fwnode;
7398 		else
7399 			caps = fwnode_get_next_child_node(capabilities, caps);
7400 
7401 		if (port->port_type != TYPEC_PORT_SNK) {
7402 			ret = fwnode_property_count_u32(caps, "source-pdos");
7403 			if (ret == 0) {
7404 				ret = -EINVAL;
7405 				goto put_caps;
7406 			}
7407 			if (ret < 0)
7408 				goto put_caps;
7409 
7410 			nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7411 			ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7412 							     nr_src_pdo);
7413 			if (ret)
7414 				goto put_caps;
7415 
7416 			ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7417 			if (ret)
7418 				goto put_caps;
7419 
7420 			if (i == 0) {
7421 				port->nr_src_pdo = nr_src_pdo;
7422 				memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7423 					       port->pd_list[0]->source_desc.pdo,
7424 					       sizeof(u32) * nr_src_pdo,
7425 					       0);
7426 			}
7427 		}
7428 
7429 		if (port->port_type != TYPEC_PORT_SRC) {
7430 			ret = fwnode_property_count_u32(caps, "sink-pdos");
7431 			if (ret == 0) {
7432 				ret = -EINVAL;
7433 				goto put_caps;
7434 			}
7435 
7436 			if (ret < 0)
7437 				goto put_caps;
7438 
7439 			nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7440 			ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7441 							     nr_snk_pdo);
7442 			if (ret)
7443 				goto put_caps;
7444 
7445 			ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7446 			if (ret)
7447 				goto put_caps;
7448 
7449 			if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7450 				ret = -EINVAL;
7451 				goto put_caps;
7452 			}
7453 
7454 			port->pd_list[i]->operating_snk_mw = uw / 1000;
7455 
7456 			if (i == 0) {
7457 				port->nr_snk_pdo = nr_snk_pdo;
7458 				memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7459 					       port->pd_list[0]->sink_desc.pdo,
7460 					       sizeof(u32) * nr_snk_pdo,
7461 					       0);
7462 				port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7463 			}
7464 		}
7465 	}
7466 
7467 put_caps:
7468 	if (caps != fwnode)
7469 		fwnode_handle_put(caps);
7470 put_capabilities:
7471 	fwnode_handle_put(capabilities);
7472 	return ret;
7473 }
7474 
tcpm_fw_get_snk_vdos(struct tcpm_port * port,struct fwnode_handle * fwnode)7475 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7476 {
7477 	int ret;
7478 
7479 	/* sink-vdos is optional */
7480 	ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7481 	if (ret < 0)
7482 		return 0;
7483 
7484 	port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7485 	if (port->nr_snk_vdo) {
7486 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7487 						     port->snk_vdo,
7488 						     port->nr_snk_vdo);
7489 		if (ret < 0)
7490 			return ret;
7491 	}
7492 
7493 	/* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7494 	if (port->nr_snk_vdo) {
7495 		ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7496 		if (ret < 0)
7497 			return ret;
7498 		else if (ret == 0)
7499 			return -ENODATA;
7500 
7501 		port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7502 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7503 						     port->snk_vdo_v1,
7504 						     port->nr_snk_vdo_v1);
7505 		if (ret < 0)
7506 			return ret;
7507 	}
7508 
7509 	return 0;
7510 }
7511 
tcpm_fw_get_pd_revision(struct tcpm_port * port,struct fwnode_handle * fwnode)7512 static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
7513 {
7514 	int ret;
7515 	u8 val[4];
7516 
7517 	ret = fwnode_property_count_u8(fwnode, "pd-revision");
7518 	if (!ret || ret != 4) {
7519 		tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
7520 		return;
7521 	}
7522 
7523 	ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
7524 	if (ret) {
7525 		tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
7526 		return;
7527 	}
7528 
7529 	port->pd_rev.rev_major = val[0];
7530 	port->pd_rev.rev_minor = val[1];
7531 	port->pd_rev.ver_major = val[2];
7532 	port->pd_rev.ver_minor = val[3];
7533 }
7534 
7535 /* Power Supply access to expose source power information */
7536 enum tcpm_psy_online_states {
7537 	TCPM_PSY_OFFLINE = 0,
7538 	TCPM_PSY_FIXED_ONLINE,
7539 	TCPM_PSY_PROG_ONLINE,
7540 };
7541 
7542 static enum power_supply_property tcpm_psy_props[] = {
7543 	POWER_SUPPLY_PROP_USB_TYPE,
7544 	POWER_SUPPLY_PROP_ONLINE,
7545 	POWER_SUPPLY_PROP_VOLTAGE_MIN,
7546 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
7547 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
7548 	POWER_SUPPLY_PROP_CURRENT_MAX,
7549 	POWER_SUPPLY_PROP_CURRENT_NOW,
7550 };
7551 
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)7552 static int tcpm_psy_get_online(struct tcpm_port *port,
7553 			       union power_supply_propval *val)
7554 {
7555 	if (port->vbus_charge) {
7556 		if (port->pps_data.active)
7557 			val->intval = TCPM_PSY_PROG_ONLINE;
7558 		else
7559 			val->intval = TCPM_PSY_FIXED_ONLINE;
7560 	} else {
7561 		val->intval = TCPM_PSY_OFFLINE;
7562 	}
7563 
7564 	return 0;
7565 }
7566 
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)7567 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7568 				    union power_supply_propval *val)
7569 {
7570 	if (port->pps_data.active)
7571 		val->intval = port->pps_data.min_volt * 1000;
7572 	else
7573 		val->intval = port->supply_voltage * 1000;
7574 
7575 	return 0;
7576 }
7577 
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)7578 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7579 				    union power_supply_propval *val)
7580 {
7581 	if (port->pps_data.active)
7582 		val->intval = port->pps_data.max_volt * 1000;
7583 	else
7584 		val->intval = port->supply_voltage * 1000;
7585 
7586 	return 0;
7587 }
7588 
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)7589 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7590 				    union power_supply_propval *val)
7591 {
7592 	val->intval = port->supply_voltage * 1000;
7593 
7594 	return 0;
7595 }
7596 
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)7597 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7598 				    union power_supply_propval *val)
7599 {
7600 	if (port->pps_data.active)
7601 		val->intval = port->pps_data.max_curr * 1000;
7602 	else
7603 		val->intval = port->current_limit * 1000;
7604 
7605 	return 0;
7606 }
7607 
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)7608 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7609 				    union power_supply_propval *val)
7610 {
7611 	val->intval = port->current_limit * 1000;
7612 
7613 	return 0;
7614 }
7615 
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)7616 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7617 					  union power_supply_propval *val)
7618 {
7619 	unsigned int src_mv, src_ma, max_src_uw = 0;
7620 	unsigned int i, tmp;
7621 
7622 	for (i = 0; i < port->nr_source_caps; i++) {
7623 		u32 pdo = port->source_caps[i];
7624 
7625 		if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7626 			src_mv = pdo_fixed_voltage(pdo);
7627 			src_ma = pdo_max_current(pdo);
7628 			tmp = src_mv * src_ma;
7629 			max_src_uw = max(tmp, max_src_uw);
7630 		}
7631 	}
7632 
7633 	val->intval = max_src_uw;
7634 	return 0;
7635 }
7636 
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)7637 static int tcpm_psy_get_prop(struct power_supply *psy,
7638 			     enum power_supply_property psp,
7639 			     union power_supply_propval *val)
7640 {
7641 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7642 	int ret = 0;
7643 
7644 	switch (psp) {
7645 	case POWER_SUPPLY_PROP_USB_TYPE:
7646 		val->intval = port->usb_type;
7647 		break;
7648 	case POWER_SUPPLY_PROP_ONLINE:
7649 		ret = tcpm_psy_get_online(port, val);
7650 		break;
7651 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7652 		ret = tcpm_psy_get_voltage_min(port, val);
7653 		break;
7654 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7655 		ret = tcpm_psy_get_voltage_max(port, val);
7656 		break;
7657 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7658 		ret = tcpm_psy_get_voltage_now(port, val);
7659 		break;
7660 	case POWER_SUPPLY_PROP_CURRENT_MAX:
7661 		ret = tcpm_psy_get_current_max(port, val);
7662 		break;
7663 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7664 		ret = tcpm_psy_get_current_now(port, val);
7665 		break;
7666 	case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7667 		tcpm_psy_get_input_power_limit(port, val);
7668 		break;
7669 	default:
7670 		ret = -EINVAL;
7671 		break;
7672 	}
7673 
7674 	return ret;
7675 }
7676 
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)7677 static int tcpm_psy_set_online(struct tcpm_port *port,
7678 			       const union power_supply_propval *val)
7679 {
7680 	int ret;
7681 
7682 	switch (val->intval) {
7683 	case TCPM_PSY_FIXED_ONLINE:
7684 		ret = tcpm_pps_activate(port, false);
7685 		break;
7686 	case TCPM_PSY_PROG_ONLINE:
7687 		ret = tcpm_pps_activate(port, true);
7688 		break;
7689 	default:
7690 		ret = -EINVAL;
7691 		break;
7692 	}
7693 
7694 	return ret;
7695 }
7696 
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)7697 static int tcpm_psy_set_prop(struct power_supply *psy,
7698 			     enum power_supply_property psp,
7699 			     const union power_supply_propval *val)
7700 {
7701 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7702 	int ret;
7703 
7704 	/*
7705 	 * All the properties below are related to USB PD. The check needs to be
7706 	 * property specific when a non-pd related property is added.
7707 	 */
7708 	if (!port->pd_supported)
7709 		return -EOPNOTSUPP;
7710 
7711 	switch (psp) {
7712 	case POWER_SUPPLY_PROP_ONLINE:
7713 		ret = tcpm_psy_set_online(port, val);
7714 		break;
7715 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7716 		ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7717 		break;
7718 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7719 		if (val->intval > port->pps_data.max_curr * 1000)
7720 			ret = -EINVAL;
7721 		else
7722 			ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7723 		break;
7724 	default:
7725 		ret = -EINVAL;
7726 		break;
7727 	}
7728 	power_supply_changed(port->psy);
7729 	return ret;
7730 }
7731 
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)7732 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7733 				   enum power_supply_property psp)
7734 {
7735 	switch (psp) {
7736 	case POWER_SUPPLY_PROP_ONLINE:
7737 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7738 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7739 		return 1;
7740 	default:
7741 		return 0;
7742 	}
7743 }
7744 
7745 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7746 
devm_tcpm_psy_register(struct tcpm_port * port)7747 static int devm_tcpm_psy_register(struct tcpm_port *port)
7748 {
7749 	struct power_supply_config psy_cfg = {};
7750 	const char *port_dev_name = dev_name(port->dev);
7751 	size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7752 				     strlen(port_dev_name) + 1;
7753 	char *psy_name;
7754 
7755 	psy_cfg.drv_data = port;
7756 	psy_cfg.fwnode = dev_fwnode(port->dev);
7757 	psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7758 	if (!psy_name)
7759 		return -ENOMEM;
7760 
7761 	snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7762 		 port_dev_name);
7763 	port->psy_desc.name = psy_name;
7764 	port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7765 	port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C)  |
7766 				   BIT(POWER_SUPPLY_USB_TYPE_PD) |
7767 				   BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
7768 	port->psy_desc.properties = tcpm_psy_props;
7769 	port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7770 	port->psy_desc.get_property = tcpm_psy_get_prop;
7771 	port->psy_desc.set_property = tcpm_psy_set_prop;
7772 	port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7773 
7774 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7775 
7776 	port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7777 					       &psy_cfg);
7778 
7779 	return PTR_ERR_OR_ZERO(port->psy);
7780 }
7781 
state_machine_timer_handler(struct hrtimer * timer)7782 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7783 {
7784 	struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7785 
7786 	if (port->registered)
7787 		kthread_queue_work(port->wq, &port->state_machine);
7788 	return HRTIMER_NORESTART;
7789 }
7790 
vdm_state_machine_timer_handler(struct hrtimer * timer)7791 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7792 {
7793 	struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7794 
7795 	if (port->registered)
7796 		kthread_queue_work(port->wq, &port->vdm_state_machine);
7797 	return HRTIMER_NORESTART;
7798 }
7799 
enable_frs_timer_handler(struct hrtimer * timer)7800 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7801 {
7802 	struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7803 
7804 	if (port->registered)
7805 		kthread_queue_work(port->wq, &port->enable_frs);
7806 	return HRTIMER_NORESTART;
7807 }
7808 
send_discover_timer_handler(struct hrtimer * timer)7809 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7810 {
7811 	struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7812 
7813 	if (port->registered)
7814 		kthread_queue_work(port->wq, &port->send_discover_work);
7815 	return HRTIMER_NORESTART;
7816 }
7817 
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)7818 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7819 {
7820 	struct tcpm_port *port;
7821 	int err;
7822 
7823 	if (!dev || !tcpc ||
7824 	    !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7825 	    !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7826 	    !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7827 		return ERR_PTR(-EINVAL);
7828 
7829 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7830 	if (!port)
7831 		return ERR_PTR(-ENOMEM);
7832 
7833 	port->dev = dev;
7834 	port->tcpc = tcpc;
7835 
7836 	mutex_init(&port->lock);
7837 	mutex_init(&port->swap_lock);
7838 
7839 	port->wq = kthread_run_worker(0, dev_name(dev));
7840 	if (IS_ERR(port->wq))
7841 		return ERR_CAST(port->wq);
7842 	sched_set_fifo(port->wq->task);
7843 
7844 	kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7845 	kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7846 	kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7847 	kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7848 	kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7849 	hrtimer_setup(&port->state_machine_timer, state_machine_timer_handler, CLOCK_MONOTONIC,
7850 		      HRTIMER_MODE_REL);
7851 	hrtimer_setup(&port->vdm_state_machine_timer, vdm_state_machine_timer_handler,
7852 		      CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7853 	hrtimer_setup(&port->enable_frs_timer, enable_frs_timer_handler, CLOCK_MONOTONIC,
7854 		      HRTIMER_MODE_REL);
7855 	hrtimer_setup(&port->send_discover_timer, send_discover_timer_handler, CLOCK_MONOTONIC,
7856 		      HRTIMER_MODE_REL);
7857 
7858 	spin_lock_init(&port->pd_event_lock);
7859 
7860 	init_completion(&port->tx_complete);
7861 	init_completion(&port->swap_complete);
7862 	init_completion(&port->pps_complete);
7863 	tcpm_debugfs_init(port);
7864 
7865 	err = tcpm_fw_get_caps(port, tcpc->fwnode);
7866 	if (err < 0)
7867 		goto out_destroy_wq;
7868 	err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7869 	if (err < 0)
7870 		goto out_destroy_wq;
7871 
7872 	tcpm_fw_get_timings(port, tcpc->fwnode);
7873 	tcpm_fw_get_pd_revision(port, tcpc->fwnode);
7874 
7875 	port->try_role = port->typec_caps.prefer_role;
7876 
7877 	port->typec_caps.revision = 0x0120;	/* Type-C spec release 1.2 */
7878 
7879 	if (port->pd_rev.rev_major)
7880 		port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
7881 					       port->pd_rev.rev_minor;
7882 	else
7883 		port->typec_caps.pd_revision = 0x0300;	/* USB-PD spec release 3.0 */
7884 
7885 	port->typec_caps.svdm_version = SVDM_VER_2_0;
7886 	port->typec_caps.driver_data = port;
7887 	port->typec_caps.ops = &tcpm_ops;
7888 	port->typec_caps.orientation_aware = 1;
7889 
7890 	port->partner_desc.identity = &port->partner_ident;
7891 
7892 	port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7893 	if (!port->role_sw)
7894 		port->role_sw = usb_role_switch_get(port->dev);
7895 	if (IS_ERR(port->role_sw)) {
7896 		err = PTR_ERR(port->role_sw);
7897 		goto out_destroy_wq;
7898 	}
7899 
7900 	err = devm_tcpm_psy_register(port);
7901 	if (err)
7902 		goto out_role_sw_put;
7903 	power_supply_changed(port->psy);
7904 
7905 	err = tcpm_port_register_pd(port);
7906 	if (err)
7907 		goto out_role_sw_put;
7908 
7909 	if (port->pds)
7910 		port->typec_caps.pd = port->pds[0];
7911 
7912 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7913 	if (IS_ERR(port->typec_port)) {
7914 		err = PTR_ERR(port->typec_port);
7915 		goto out_unregister_pd;
7916 	}
7917 
7918 	typec_port_register_altmodes(port->typec_port,
7919 				     &tcpm_altmode_ops, port,
7920 				     port->port_altmode, ALTMODE_DISCOVERY_MAX);
7921 	typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7922 				      &tcpm_cable_ops);
7923 	port->registered = true;
7924 
7925 	mutex_lock(&port->lock);
7926 	tcpm_init(port);
7927 	mutex_unlock(&port->lock);
7928 
7929 	tcpm_log(port, "%s: registered", dev_name(dev));
7930 	return port;
7931 
7932 out_unregister_pd:
7933 	tcpm_port_unregister_pd(port);
7934 out_role_sw_put:
7935 	usb_role_switch_put(port->role_sw);
7936 out_destroy_wq:
7937 	tcpm_debugfs_exit(port);
7938 	kthread_destroy_worker(port->wq);
7939 	return ERR_PTR(err);
7940 }
7941 EXPORT_SYMBOL_GPL(tcpm_register_port);
7942 
tcpm_unregister_port(struct tcpm_port * port)7943 void tcpm_unregister_port(struct tcpm_port *port)
7944 {
7945 	int i;
7946 
7947 	port->registered = false;
7948 	kthread_destroy_worker(port->wq);
7949 
7950 	hrtimer_cancel(&port->send_discover_timer);
7951 	hrtimer_cancel(&port->enable_frs_timer);
7952 	hrtimer_cancel(&port->vdm_state_machine_timer);
7953 	hrtimer_cancel(&port->state_machine_timer);
7954 
7955 	tcpm_reset_port(port);
7956 
7957 	tcpm_port_unregister_pd(port);
7958 
7959 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7960 		typec_unregister_altmode(port->port_altmode[i]);
7961 	typec_unregister_port(port->typec_port);
7962 	usb_role_switch_put(port->role_sw);
7963 	tcpm_debugfs_exit(port);
7964 }
7965 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7966 
7967 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
7968 MODULE_DESCRIPTION("USB Type-C Port Manager");
7969 MODULE_LICENSE("GPL");
7970