xref: /linux/drivers/usb/typec/tcpm/tcpm.c (revision cb82ca153949c6204af793de24b18a04236e79fd)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2015-2017 Google, Inc
4  *
5  * USB Power Delivery protocol stack.
6  */
7 
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/string_choices.h>
25 #include <linux/usb.h>
26 #include <linux/usb/pd.h>
27 #include <linux/usb/pd_ado.h>
28 #include <linux/usb/pd_bdo.h>
29 #include <linux/usb/pd_ext_sdb.h>
30 #include <linux/usb/pd_vdo.h>
31 #include <linux/usb/role.h>
32 #include <linux/usb/tcpm.h>
33 #include <linux/usb/typec_altmode.h>
34 
35 #include <uapi/linux/sched/types.h>
36 
37 #define FOREACH_STATE(S)			\
38 	S(INVALID_STATE),			\
39 	S(TOGGLING),			\
40 	S(CHECK_CONTAMINANT),			\
41 	S(SRC_UNATTACHED),			\
42 	S(SRC_ATTACH_WAIT),			\
43 	S(SRC_ATTACHED),			\
44 	S(SRC_STARTUP),				\
45 	S(SRC_SEND_CAPABILITIES),		\
46 	S(SRC_SEND_CAPABILITIES_TIMEOUT),	\
47 	S(SRC_NEGOTIATE_CAPABILITIES),		\
48 	S(SRC_TRANSITION_SUPPLY),		\
49 	S(SRC_READY),				\
50 	S(SRC_WAIT_NEW_CAPABILITIES),		\
51 						\
52 	S(SNK_UNATTACHED),			\
53 	S(SNK_ATTACH_WAIT),			\
54 	S(SNK_DEBOUNCED),			\
55 	S(SNK_ATTACHED),			\
56 	S(SNK_STARTUP),				\
57 	S(SNK_DISCOVERY),			\
58 	S(SNK_DISCOVERY_DEBOUNCE),		\
59 	S(SNK_DISCOVERY_DEBOUNCE_DONE),		\
60 	S(SNK_WAIT_CAPABILITIES),		\
61 	S(SNK_WAIT_CAPABILITIES_TIMEOUT),	\
62 	S(SNK_NEGOTIATE_CAPABILITIES),		\
63 	S(SNK_NEGOTIATE_PPS_CAPABILITIES),	\
64 	S(SNK_TRANSITION_SINK),			\
65 	S(SNK_TRANSITION_SINK_VBUS),		\
66 	S(SNK_READY),				\
67 						\
68 	S(ACC_UNATTACHED),			\
69 	S(DEBUG_ACC_ATTACHED),			\
70 	S(AUDIO_ACC_ATTACHED),			\
71 	S(AUDIO_ACC_DEBOUNCE),			\
72 						\
73 	S(HARD_RESET_SEND),			\
74 	S(HARD_RESET_START),			\
75 	S(SRC_HARD_RESET_VBUS_OFF),		\
76 	S(SRC_HARD_RESET_VBUS_ON),		\
77 	S(SNK_HARD_RESET_SINK_OFF),		\
78 	S(SNK_HARD_RESET_WAIT_VBUS),		\
79 	S(SNK_HARD_RESET_SINK_ON),		\
80 						\
81 	S(SOFT_RESET),				\
82 	S(SRC_SOFT_RESET_WAIT_SNK_TX),		\
83 	S(SNK_SOFT_RESET),			\
84 	S(SOFT_RESET_SEND),			\
85 						\
86 	S(DR_SWAP_ACCEPT),			\
87 	S(DR_SWAP_SEND),			\
88 	S(DR_SWAP_SEND_TIMEOUT),		\
89 	S(DR_SWAP_CANCEL),			\
90 	S(DR_SWAP_CHANGE_DR),			\
91 						\
92 	S(PR_SWAP_ACCEPT),			\
93 	S(PR_SWAP_SEND),			\
94 	S(PR_SWAP_SEND_TIMEOUT),		\
95 	S(PR_SWAP_CANCEL),			\
96 	S(PR_SWAP_START),			\
97 	S(PR_SWAP_SRC_SNK_TRANSITION_OFF),	\
98 	S(PR_SWAP_SRC_SNK_SOURCE_OFF),		\
99 	S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
100 	S(PR_SWAP_SRC_SNK_SINK_ON),		\
101 	S(PR_SWAP_SNK_SRC_SINK_OFF),		\
102 	S(PR_SWAP_SNK_SRC_SOURCE_ON),		\
103 	S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP),    \
104 						\
105 	S(VCONN_SWAP_ACCEPT),			\
106 	S(VCONN_SWAP_SEND),			\
107 	S(VCONN_SWAP_SEND_TIMEOUT),		\
108 	S(VCONN_SWAP_CANCEL),			\
109 	S(VCONN_SWAP_START),			\
110 	S(VCONN_SWAP_WAIT_FOR_VCONN),		\
111 	S(VCONN_SWAP_TURN_ON_VCONN),		\
112 	S(VCONN_SWAP_TURN_OFF_VCONN),		\
113 	S(VCONN_SWAP_SEND_SOFT_RESET),		\
114 						\
115 	S(FR_SWAP_SEND),			\
116 	S(FR_SWAP_SEND_TIMEOUT),		\
117 	S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF),			\
118 	S(FR_SWAP_SNK_SRC_NEW_SINK_READY),		\
119 	S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED),	\
120 	S(FR_SWAP_CANCEL),			\
121 						\
122 	S(SNK_TRY),				\
123 	S(SNK_TRY_WAIT),			\
124 	S(SNK_TRY_WAIT_DEBOUNCE),               \
125 	S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS),    \
126 	S(SRC_TRYWAIT),				\
127 	S(SRC_TRYWAIT_DEBOUNCE),		\
128 	S(SRC_TRYWAIT_UNATTACHED),		\
129 						\
130 	S(SRC_TRY),				\
131 	S(SRC_TRY_WAIT),                        \
132 	S(SRC_TRY_DEBOUNCE),			\
133 	S(SNK_TRYWAIT),				\
134 	S(SNK_TRYWAIT_DEBOUNCE),		\
135 	S(SNK_TRYWAIT_VBUS),			\
136 	S(BIST_RX),				\
137 						\
138 	S(GET_STATUS_SEND),			\
139 	S(GET_STATUS_SEND_TIMEOUT),		\
140 	S(GET_PPS_STATUS_SEND),			\
141 	S(GET_PPS_STATUS_SEND_TIMEOUT),		\
142 						\
143 	S(GET_SINK_CAP),			\
144 	S(GET_SINK_CAP_TIMEOUT),		\
145 						\
146 	S(ERROR_RECOVERY),			\
147 	S(PORT_RESET),				\
148 	S(PORT_RESET_WAIT_OFF),			\
149 						\
150 	S(AMS_START),				\
151 	S(CHUNK_NOT_SUPP),			\
152 						\
153 	S(SRC_VDM_IDENTITY_REQUEST)
154 
155 #define FOREACH_AMS(S)				\
156 	S(NONE_AMS),				\
157 	S(POWER_NEGOTIATION),			\
158 	S(GOTOMIN),				\
159 	S(SOFT_RESET_AMS),			\
160 	S(HARD_RESET),				\
161 	S(CABLE_RESET),				\
162 	S(GET_SOURCE_CAPABILITIES),		\
163 	S(GET_SINK_CAPABILITIES),		\
164 	S(POWER_ROLE_SWAP),			\
165 	S(FAST_ROLE_SWAP),			\
166 	S(DATA_ROLE_SWAP),			\
167 	S(VCONN_SWAP),				\
168 	S(SOURCE_ALERT),			\
169 	S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
170 	S(GETTING_SOURCE_SINK_STATUS),		\
171 	S(GETTING_BATTERY_CAPABILITIES),	\
172 	S(GETTING_BATTERY_STATUS),		\
173 	S(GETTING_MANUFACTURER_INFORMATION),	\
174 	S(SECURITY),				\
175 	S(FIRMWARE_UPDATE),			\
176 	S(DISCOVER_IDENTITY),			\
177 	S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY),	\
178 	S(DISCOVER_SVIDS),			\
179 	S(DISCOVER_MODES),			\
180 	S(DFP_TO_UFP_ENTER_MODE),		\
181 	S(DFP_TO_UFP_EXIT_MODE),		\
182 	S(DFP_TO_CABLE_PLUG_ENTER_MODE),	\
183 	S(DFP_TO_CABLE_PLUG_EXIT_MODE),		\
184 	S(ATTENTION),				\
185 	S(BIST),				\
186 	S(UNSTRUCTURED_VDMS),			\
187 	S(STRUCTURED_VDMS),			\
188 	S(COUNTRY_INFO),			\
189 	S(COUNTRY_CODES),			\
190 	S(REVISION_INFORMATION)
191 
192 #define GENERATE_ENUM(e)	e
193 #define GENERATE_STRING(s)	#s
194 
195 enum tcpm_state {
196 	FOREACH_STATE(GENERATE_ENUM)
197 };
198 
199 static const char * const tcpm_states[] = {
200 	FOREACH_STATE(GENERATE_STRING)
201 };
202 
203 enum tcpm_ams {
204 	FOREACH_AMS(GENERATE_ENUM)
205 };
206 
207 static const char * const tcpm_ams_str[] = {
208 	FOREACH_AMS(GENERATE_STRING)
209 };
210 
211 enum vdm_states {
212 	VDM_STATE_ERR_BUSY = -3,
213 	VDM_STATE_ERR_SEND = -2,
214 	VDM_STATE_ERR_TMOUT = -1,
215 	VDM_STATE_DONE = 0,
216 	/* Anything >0 represents an active state */
217 	VDM_STATE_READY = 1,
218 	VDM_STATE_BUSY = 2,
219 	VDM_STATE_WAIT_RSP_BUSY = 3,
220 	VDM_STATE_SEND_MESSAGE = 4,
221 };
222 
223 enum pd_msg_request {
224 	PD_MSG_NONE = 0,
225 	PD_MSG_CTRL_REJECT,
226 	PD_MSG_CTRL_WAIT,
227 	PD_MSG_CTRL_NOT_SUPP,
228 	PD_MSG_DATA_SINK_CAP,
229 	PD_MSG_DATA_SOURCE_CAP,
230 	PD_MSG_DATA_REV,
231 };
232 
233 enum adev_actions {
234 	ADEV_NONE = 0,
235 	ADEV_NOTIFY_USB_AND_QUEUE_VDM,
236 	ADEV_QUEUE_VDM,
237 	ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
238 	ADEV_ATTENTION,
239 };
240 
241 /*
242  * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
243  * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
244  * Version 1.2"
245  */
246 enum frs_typec_current {
247 	FRS_NOT_SUPPORTED,
248 	FRS_DEFAULT_POWER,
249 	FRS_5V_1P5A,
250 	FRS_5V_3A,
251 };
252 
253 /* Events from low level driver */
254 
255 #define TCPM_CC_EVENT		BIT(0)
256 #define TCPM_VBUS_EVENT		BIT(1)
257 #define TCPM_RESET_EVENT	BIT(2)
258 #define TCPM_FRS_EVENT		BIT(3)
259 #define TCPM_SOURCING_VBUS	BIT(4)
260 #define TCPM_PORT_CLEAN		BIT(5)
261 #define TCPM_PORT_ERROR		BIT(6)
262 
263 #define LOG_BUFFER_ENTRIES	1024
264 #define LOG_BUFFER_ENTRY_SIZE	128
265 
266 /* Alternate mode support */
267 
268 #define SVID_DISCOVERY_MAX	16
269 #define ALTMODE_DISCOVERY_MAX	(SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
270 
271 #define GET_SINK_CAP_RETRY_MS	100
272 #define SEND_DISCOVER_RETRY_MS	100
273 
274 struct pd_mode_data {
275 	int svid_index;		/* current SVID index		*/
276 	int nsvids;
277 	u16 svids[SVID_DISCOVERY_MAX];
278 	int altmodes;		/* number of alternate modes	*/
279 	struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
280 };
281 
282 /*
283  * @min_volt: Actual min voltage at the local port
284  * @req_min_volt: Requested min voltage to the port partner
285  * @max_volt: Actual max voltage at the local port
286  * @req_max_volt: Requested max voltage to the port partner
287  * @max_curr: Actual max current at the local port
288  * @req_max_curr: Requested max current of the port partner
289  * @req_out_volt: Requested output voltage to the port partner
290  * @req_op_curr: Requested operating current to the port partner
291  * @supported: Parter has at least one APDO hence supports PPS
292  * @active: PPS mode is active
293  */
294 struct pd_pps_data {
295 	u32 min_volt;
296 	u32 req_min_volt;
297 	u32 max_volt;
298 	u32 req_max_volt;
299 	u32 max_curr;
300 	u32 req_max_curr;
301 	u32 req_out_volt;
302 	u32 req_op_curr;
303 	bool supported;
304 	bool active;
305 };
306 
307 struct pd_data {
308 	struct usb_power_delivery *pd;
309 	struct usb_power_delivery_capabilities *source_cap;
310 	struct usb_power_delivery_capabilities_desc source_desc;
311 	struct usb_power_delivery_capabilities *sink_cap;
312 	struct usb_power_delivery_capabilities_desc sink_desc;
313 	unsigned int operating_snk_mw;
314 };
315 
316 struct pd_revision_info {
317 	u8 rev_major;
318 	u8 rev_minor;
319 	u8 ver_major;
320 	u8 ver_minor;
321 };
322 
323 /*
324  * @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
325  * @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
326  * @cc_debounce_time: Deadline (in ms) for tCCDebounce timer
327  */
328 struct pd_timings {
329 	u32 sink_wait_cap_time;
330 	u32 ps_src_off_time;
331 	u32 cc_debounce_time;
332 	u32 snk_bc12_cmpletion_time;
333 };
334 
335 struct tcpm_port {
336 	struct device *dev;
337 
338 	struct mutex lock;		/* tcpm state machine lock */
339 	struct kthread_worker *wq;
340 
341 	struct typec_capability typec_caps;
342 	struct typec_port *typec_port;
343 
344 	struct tcpc_dev	*tcpc;
345 	struct usb_role_switch *role_sw;
346 
347 	enum typec_role vconn_role;
348 	enum typec_role pwr_role;
349 	enum typec_data_role data_role;
350 	enum typec_pwr_opmode pwr_opmode;
351 
352 	struct usb_pd_identity partner_ident;
353 	struct typec_partner_desc partner_desc;
354 	struct typec_partner *partner;
355 
356 	struct usb_pd_identity cable_ident;
357 	struct typec_cable_desc cable_desc;
358 	struct typec_cable *cable;
359 	struct typec_plug_desc plug_prime_desc;
360 	struct typec_plug *plug_prime;
361 
362 	enum typec_cc_status cc_req;
363 	enum typec_cc_status src_rp;	/* work only if pd_supported == false */
364 
365 	enum typec_cc_status cc1;
366 	enum typec_cc_status cc2;
367 	enum typec_cc_polarity polarity;
368 
369 	bool attached;
370 	bool connected;
371 	bool registered;
372 	bool pd_supported;
373 	enum typec_port_type port_type;
374 
375 	/*
376 	 * Set to true when vbus is greater than VSAFE5V min.
377 	 * Set to false when vbus falls below vSinkDisconnect max threshold.
378 	 */
379 	bool vbus_present;
380 
381 	/*
382 	 * Set to true when vbus is less than VSAFE0V max.
383 	 * Set to false when vbus is greater than VSAFE0V max.
384 	 */
385 	bool vbus_vsafe0v;
386 
387 	bool vbus_never_low;
388 	bool vbus_source;
389 	bool vbus_charge;
390 
391 	/* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
392 	bool send_discover;
393 	bool op_vsafe5v;
394 
395 	int try_role;
396 	int try_snk_count;
397 	int try_src_count;
398 
399 	enum pd_msg_request queued_message;
400 
401 	enum tcpm_state enter_state;
402 	enum tcpm_state prev_state;
403 	enum tcpm_state state;
404 	enum tcpm_state delayed_state;
405 	ktime_t delayed_runtime;
406 	unsigned long delay_ms;
407 
408 	spinlock_t pd_event_lock;
409 	u32 pd_events;
410 
411 	struct kthread_work event_work;
412 	struct hrtimer state_machine_timer;
413 	struct kthread_work state_machine;
414 	struct hrtimer vdm_state_machine_timer;
415 	struct kthread_work vdm_state_machine;
416 	struct hrtimer enable_frs_timer;
417 	struct kthread_work enable_frs;
418 	struct hrtimer send_discover_timer;
419 	struct kthread_work send_discover_work;
420 	bool state_machine_running;
421 	/* Set to true when VDM State Machine has following actions. */
422 	bool vdm_sm_running;
423 
424 	struct completion tx_complete;
425 	enum tcpm_transmit_status tx_status;
426 
427 	struct mutex swap_lock;		/* swap command lock */
428 	bool swap_pending;
429 	bool non_pd_role_swap;
430 	struct completion swap_complete;
431 	int swap_status;
432 
433 	unsigned int negotiated_rev;
434 	unsigned int message_id;
435 	unsigned int caps_count;
436 	unsigned int hard_reset_count;
437 	bool pd_capable;
438 	bool explicit_contract;
439 	unsigned int rx_msgid;
440 
441 	/* USB PD objects */
442 	struct usb_power_delivery **pds;
443 	struct pd_data **pd_list;
444 	struct usb_power_delivery_capabilities *port_source_caps;
445 	struct usb_power_delivery_capabilities *port_sink_caps;
446 	struct usb_power_delivery *partner_pd;
447 	struct usb_power_delivery_capabilities *partner_source_caps;
448 	struct usb_power_delivery_capabilities *partner_sink_caps;
449 	struct usb_power_delivery *selected_pd;
450 
451 	/* Partner capabilities/requests */
452 	u32 sink_request;
453 	u32 source_caps[PDO_MAX_OBJECTS];
454 	unsigned int nr_source_caps;
455 	u32 sink_caps[PDO_MAX_OBJECTS];
456 	unsigned int nr_sink_caps;
457 
458 	/* Local capabilities */
459 	unsigned int pd_count;
460 	u32 src_pdo[PDO_MAX_OBJECTS];
461 	unsigned int nr_src_pdo;
462 	u32 snk_pdo[PDO_MAX_OBJECTS];
463 	unsigned int nr_snk_pdo;
464 	u32 snk_vdo_v1[VDO_MAX_OBJECTS];
465 	unsigned int nr_snk_vdo_v1;
466 	u32 snk_vdo[VDO_MAX_OBJECTS];
467 	unsigned int nr_snk_vdo;
468 
469 	unsigned int operating_snk_mw;
470 	bool update_sink_caps;
471 
472 	/* Requested current / voltage to the port partner */
473 	u32 req_current_limit;
474 	u32 req_supply_voltage;
475 	/* Actual current / voltage limit of the local port */
476 	u32 current_limit;
477 	u32 supply_voltage;
478 
479 	/* Used to export TA voltage and current */
480 	struct power_supply *psy;
481 	struct power_supply_desc psy_desc;
482 	enum power_supply_usb_type usb_type;
483 
484 	u32 bist_request;
485 
486 	/* PD state for Vendor Defined Messages */
487 	enum vdm_states vdm_state;
488 	u32 vdm_retries;
489 	/* next Vendor Defined Message to send */
490 	u32 vdo_data[VDO_MAX_SIZE];
491 	u8 vdo_count;
492 	/* VDO to retry if UFP responder replied busy */
493 	u32 vdo_retry;
494 
495 	/* PPS */
496 	struct pd_pps_data pps_data;
497 	struct completion pps_complete;
498 	bool pps_pending;
499 	int pps_status;
500 
501 	/* Alternate mode data */
502 	struct pd_mode_data mode_data;
503 	struct pd_mode_data mode_data_prime;
504 	struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
505 	struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
506 	struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
507 
508 	/* Deadline in jiffies to exit src_try_wait state */
509 	unsigned long max_wait;
510 
511 	/* port belongs to a self powered device */
512 	bool self_powered;
513 
514 	/* Sink FRS */
515 	enum frs_typec_current new_source_frs_current;
516 
517 	/* Sink caps have been queried */
518 	bool sink_cap_done;
519 
520 	/* Collision Avoidance and Atomic Message Sequence */
521 	enum tcpm_state upcoming_state;
522 	enum tcpm_ams ams;
523 	enum tcpm_ams next_ams;
524 	bool in_ams;
525 
526 	/* Auto vbus discharge status */
527 	bool auto_vbus_discharge_enabled;
528 
529 	/*
530 	 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
531 	 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
532 	 * SNK_READY for non-pd link.
533 	 */
534 	bool slow_charger_loop;
535 
536 	/*
537 	 * When true indicates that the lower level drivers indicate potential presence
538 	 * of contaminant in the connector pins based on the tcpm state machine
539 	 * transitions.
540 	 */
541 	bool potential_contaminant;
542 
543 	/* SOP* Related Fields */
544 	/*
545 	 * Flag to determine if SOP' Discover Identity is available. The flag
546 	 * is set if Discover Identity on SOP' does not immediately follow
547 	 * Discover Identity on SOP.
548 	 */
549 	bool send_discover_prime;
550 	/*
551 	 * tx_sop_type determines which SOP* a message is being sent on.
552 	 * For messages that are queued and not sent immediately such as in
553 	 * tcpm_queue_message or messages that send after state changes,
554 	 * the tx_sop_type is set accordingly.
555 	 */
556 	enum tcpm_transmit_type tx_sop_type;
557 	/*
558 	 * Prior to discovering the port partner's Specification Revision, the
559 	 * Vconn source and cable plug will use the lower of their two revisions.
560 	 *
561 	 * When the port partner's Specification Revision is discovered, the following
562 	 * rules are put in place.
563 	 *	1. If the cable revision (1) is lower than the revision negotiated
564 	 * between the port and partner (2), the port and partner will communicate
565 	 * on revision (2), but the port and cable will communicate on revision (1).
566 	 *	2. If the cable revision (1) is higher than the revision negotiated
567 	 * between the port and partner (2), the port and partner will communicate
568 	 * on revision (2), and the port and cable will communicate on revision (2)
569 	 * as well.
570 	 */
571 	unsigned int negotiated_rev_prime;
572 	/*
573 	 * Each SOP* type must maintain their own tx and rx message IDs
574 	 */
575 	unsigned int message_id_prime;
576 	unsigned int rx_msgid_prime;
577 
578 	/* Timer deadline values configured at runtime */
579 	struct pd_timings timings;
580 
581 	/* Indicates maximum (revision, version) supported */
582 	struct pd_revision_info pd_rev;
583 #ifdef CONFIG_DEBUG_FS
584 	struct dentry *dentry;
585 	struct mutex logbuffer_lock;	/* log buffer access lock */
586 	int logbuffer_head;
587 	int logbuffer_tail;
588 	u8 *logbuffer[LOG_BUFFER_ENTRIES];
589 #endif
590 };
591 
592 struct pd_rx_event {
593 	struct kthread_work work;
594 	struct tcpm_port *port;
595 	struct pd_message msg;
596 	enum tcpm_transmit_type rx_sop_type;
597 };
598 
599 static const char * const pd_rev[] = {
600 	[PD_REV10]		= "rev1",
601 	[PD_REV20]		= "rev2",
602 	[PD_REV30]		= "rev3",
603 };
604 
605 #define tcpm_cc_is_sink(cc) \
606 	((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
607 	 (cc) == TYPEC_CC_RP_3_0)
608 
609 /* As long as cc is pulled up, we can consider it as sink. */
610 #define tcpm_port_is_sink(port) \
611 	(tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
612 
613 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
614 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
615 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
616 
617 #define tcpm_port_is_source(port) \
618 	((tcpm_cc_is_source((port)->cc1) && \
619 	 !tcpm_cc_is_source((port)->cc2)) || \
620 	 (tcpm_cc_is_source((port)->cc2) && \
621 	  !tcpm_cc_is_source((port)->cc1)))
622 
623 #define tcpm_port_is_debug(port) \
624 	(tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
625 
626 #define tcpm_port_is_audio(port) \
627 	(tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
628 
629 #define tcpm_port_is_audio_detached(port) \
630 	((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
631 	 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
632 
633 #define tcpm_try_snk(port) \
634 	((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
635 	(port)->port_type == TYPEC_PORT_DRP)
636 
637 #define tcpm_try_src(port) \
638 	((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
639 	(port)->port_type == TYPEC_PORT_DRP)
640 
641 #define tcpm_data_role_for_source(port) \
642 	((port)->typec_caps.data == TYPEC_PORT_UFP ? \
643 	TYPEC_DEVICE : TYPEC_HOST)
644 
645 #define tcpm_data_role_for_sink(port) \
646 	((port)->typec_caps.data == TYPEC_PORT_DFP ? \
647 	TYPEC_HOST : TYPEC_DEVICE)
648 
649 #define tcpm_sink_tx_ok(port) \
650 	(tcpm_port_is_sink(port) && \
651 	((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
652 
653 #define tcpm_wait_for_discharge(port) \
654 	(((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
655 
tcpm_default_state(struct tcpm_port * port)656 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
657 {
658 	if (port->port_type == TYPEC_PORT_DRP) {
659 		if (port->try_role == TYPEC_SINK)
660 			return SNK_UNATTACHED;
661 		else if (port->try_role == TYPEC_SOURCE)
662 			return SRC_UNATTACHED;
663 		/* Fall through to return SRC_UNATTACHED */
664 	} else if (port->port_type == TYPEC_PORT_SNK) {
665 		return SNK_UNATTACHED;
666 	}
667 	return SRC_UNATTACHED;
668 }
669 
tcpm_port_is_disconnected(struct tcpm_port * port)670 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
671 {
672 	return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
673 		port->cc2 == TYPEC_CC_OPEN) ||
674 	       (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
675 				    port->cc1 == TYPEC_CC_OPEN) ||
676 				   (port->polarity == TYPEC_POLARITY_CC2 &&
677 				    port->cc2 == TYPEC_CC_OPEN)));
678 }
679 
680 /*
681  * Logging
682  */
683 
684 #ifdef CONFIG_DEBUG_FS
685 
tcpm_log_full(struct tcpm_port * port)686 static bool tcpm_log_full(struct tcpm_port *port)
687 {
688 	return port->logbuffer_tail ==
689 		(port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
690 }
691 
692 __printf(2, 0)
_tcpm_log(struct tcpm_port * port,const char * fmt,va_list args)693 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
694 {
695 	char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
696 	u64 ts_nsec = local_clock();
697 	unsigned long rem_nsec;
698 
699 	mutex_lock(&port->logbuffer_lock);
700 	if (!port->logbuffer[port->logbuffer_head]) {
701 		port->logbuffer[port->logbuffer_head] =
702 				kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
703 		if (!port->logbuffer[port->logbuffer_head]) {
704 			mutex_unlock(&port->logbuffer_lock);
705 			return;
706 		}
707 	}
708 
709 	vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
710 
711 	if (tcpm_log_full(port)) {
712 		port->logbuffer_head = max(port->logbuffer_head - 1, 0);
713 		strcpy(tmpbuffer, "overflow");
714 	}
715 
716 	if (port->logbuffer_head < 0 ||
717 	    port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
718 		dev_warn(port->dev,
719 			 "Bad log buffer index %d\n", port->logbuffer_head);
720 		goto abort;
721 	}
722 
723 	if (!port->logbuffer[port->logbuffer_head]) {
724 		dev_warn(port->dev,
725 			 "Log buffer index %d is NULL\n", port->logbuffer_head);
726 		goto abort;
727 	}
728 
729 	rem_nsec = do_div(ts_nsec, 1000000000);
730 	scnprintf(port->logbuffer[port->logbuffer_head],
731 		  LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
732 		  (unsigned long)ts_nsec, rem_nsec / 1000,
733 		  tmpbuffer);
734 	port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
735 
736 abort:
737 	mutex_unlock(&port->logbuffer_lock);
738 }
739 
740 __printf(2, 3)
tcpm_log(struct tcpm_port * port,const char * fmt,...)741 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
742 {
743 	va_list args;
744 
745 	/* Do not log while disconnected and unattached */
746 	if (tcpm_port_is_disconnected(port) &&
747 	    (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
748 	     port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
749 		return;
750 
751 	va_start(args, fmt);
752 	_tcpm_log(port, fmt, args);
753 	va_end(args);
754 }
755 
756 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)757 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
758 {
759 	va_list args;
760 
761 	va_start(args, fmt);
762 	_tcpm_log(port, fmt, args);
763 	va_end(args);
764 }
765 
tcpm_log_source_caps(struct tcpm_port * port)766 static void tcpm_log_source_caps(struct tcpm_port *port)
767 {
768 	int i;
769 
770 	for (i = 0; i < port->nr_source_caps; i++) {
771 		u32 pdo = port->source_caps[i];
772 		enum pd_pdo_type type = pdo_type(pdo);
773 		char msg[64];
774 
775 		switch (type) {
776 		case PDO_TYPE_FIXED:
777 			scnprintf(msg, sizeof(msg),
778 				  "%u mV, %u mA [%s%s%s%s%s%s]",
779 				  pdo_fixed_voltage(pdo),
780 				  pdo_max_current(pdo),
781 				  (pdo & PDO_FIXED_DUAL_ROLE) ?
782 							"R" : "",
783 				  (pdo & PDO_FIXED_SUSPEND) ?
784 							"S" : "",
785 				  (pdo & PDO_FIXED_HIGHER_CAP) ?
786 							"H" : "",
787 				  (pdo & PDO_FIXED_USB_COMM) ?
788 							"U" : "",
789 				  (pdo & PDO_FIXED_DATA_SWAP) ?
790 							"D" : "",
791 				  (pdo & PDO_FIXED_EXTPOWER) ?
792 							"E" : "");
793 			break;
794 		case PDO_TYPE_VAR:
795 			scnprintf(msg, sizeof(msg),
796 				  "%u-%u mV, %u mA",
797 				  pdo_min_voltage(pdo),
798 				  pdo_max_voltage(pdo),
799 				  pdo_max_current(pdo));
800 			break;
801 		case PDO_TYPE_BATT:
802 			scnprintf(msg, sizeof(msg),
803 				  "%u-%u mV, %u mW",
804 				  pdo_min_voltage(pdo),
805 				  pdo_max_voltage(pdo),
806 				  pdo_max_power(pdo));
807 			break;
808 		case PDO_TYPE_APDO:
809 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
810 				scnprintf(msg, sizeof(msg),
811 					  "%u-%u mV, %u mA",
812 					  pdo_pps_apdo_min_voltage(pdo),
813 					  pdo_pps_apdo_max_voltage(pdo),
814 					  pdo_pps_apdo_max_current(pdo));
815 			else
816 				strcpy(msg, "undefined APDO");
817 			break;
818 		default:
819 			strcpy(msg, "undefined");
820 			break;
821 		}
822 		tcpm_log(port, " PDO %d: type %d, %s",
823 			 i, type, msg);
824 	}
825 }
826 
tcpm_debug_show(struct seq_file * s,void * v)827 static int tcpm_debug_show(struct seq_file *s, void *v)
828 {
829 	struct tcpm_port *port = s->private;
830 	int tail;
831 
832 	mutex_lock(&port->logbuffer_lock);
833 	tail = port->logbuffer_tail;
834 	while (tail != port->logbuffer_head) {
835 		seq_printf(s, "%s\n", port->logbuffer[tail]);
836 		tail = (tail + 1) % LOG_BUFFER_ENTRIES;
837 	}
838 	if (!seq_has_overflowed(s))
839 		port->logbuffer_tail = tail;
840 	mutex_unlock(&port->logbuffer_lock);
841 
842 	return 0;
843 }
844 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
845 
tcpm_debugfs_init(struct tcpm_port * port)846 static void tcpm_debugfs_init(struct tcpm_port *port)
847 {
848 	char name[NAME_MAX];
849 
850 	mutex_init(&port->logbuffer_lock);
851 	snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
852 	port->dentry = debugfs_create_dir(name, usb_debug_root);
853 	debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
854 			    &tcpm_debug_fops);
855 }
856 
tcpm_debugfs_exit(struct tcpm_port * port)857 static void tcpm_debugfs_exit(struct tcpm_port *port)
858 {
859 	int i;
860 
861 	mutex_lock(&port->logbuffer_lock);
862 	for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
863 		kfree(port->logbuffer[i]);
864 		port->logbuffer[i] = NULL;
865 	}
866 	mutex_unlock(&port->logbuffer_lock);
867 
868 	debugfs_remove(port->dentry);
869 }
870 
871 #else
872 
873 __printf(2, 3)
tcpm_log(const struct tcpm_port * port,const char * fmt,...)874 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
875 __printf(2, 3)
tcpm_log_force(struct tcpm_port * port,const char * fmt,...)876 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
tcpm_log_source_caps(struct tcpm_port * port)877 static void tcpm_log_source_caps(struct tcpm_port *port) { }
tcpm_debugfs_init(const struct tcpm_port * port)878 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
tcpm_debugfs_exit(const struct tcpm_port * port)879 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
880 
881 #endif
882 
tcpm_set_cc(struct tcpm_port * port,enum typec_cc_status cc)883 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
884 {
885 	tcpm_log(port, "cc:=%d", cc);
886 	port->cc_req = cc;
887 	port->tcpc->set_cc(port->tcpc, cc);
888 }
889 
tcpm_enable_auto_vbus_discharge(struct tcpm_port * port,bool enable)890 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
891 {
892 	int ret = 0;
893 
894 	if (port->tcpc->enable_auto_vbus_discharge) {
895 		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
896 		tcpm_log_force(port, "%s vbus discharge ret:%d",
897 			       str_enable_disable(enable), ret);
898 		if (!ret)
899 			port->auto_vbus_discharge_enabled = enable;
900 	}
901 
902 	return ret;
903 }
904 
tcpm_apply_rc(struct tcpm_port * port)905 static void tcpm_apply_rc(struct tcpm_port *port)
906 {
907 	/*
908 	 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
909 	 * when Vbus auto discharge on disconnect is enabled.
910 	 */
911 	if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
912 		tcpm_log(port, "Apply_RC");
913 		port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
914 		tcpm_enable_auto_vbus_discharge(port, false);
915 	}
916 }
917 
918 /*
919  * Determine RP value to set based on maximum current supported
920  * by a port if configured as source.
921  * Returns CC value to report to link partner.
922  */
tcpm_rp_cc(struct tcpm_port * port)923 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
924 {
925 	const u32 *src_pdo = port->src_pdo;
926 	int nr_pdo = port->nr_src_pdo;
927 	int i;
928 
929 	if (!port->pd_supported)
930 		return port->src_rp;
931 
932 	/*
933 	 * Search for first entry with matching voltage.
934 	 * It should report the maximum supported current.
935 	 */
936 	for (i = 0; i < nr_pdo; i++) {
937 		const u32 pdo = src_pdo[i];
938 
939 		if (pdo_type(pdo) == PDO_TYPE_FIXED &&
940 		    pdo_fixed_voltage(pdo) == 5000) {
941 			unsigned int curr = pdo_max_current(pdo);
942 
943 			if (curr >= 3000)
944 				return TYPEC_CC_RP_3_0;
945 			else if (curr >= 1500)
946 				return TYPEC_CC_RP_1_5;
947 			return TYPEC_CC_RP_DEF;
948 		}
949 	}
950 
951 	return TYPEC_CC_RP_DEF;
952 }
953 
tcpm_ams_finish(struct tcpm_port * port)954 static void tcpm_ams_finish(struct tcpm_port *port)
955 {
956 	tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
957 
958 	if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
959 		if (port->negotiated_rev >= PD_REV30)
960 			tcpm_set_cc(port, SINK_TX_OK);
961 		else
962 			tcpm_set_cc(port, SINK_TX_NG);
963 	} else if (port->pwr_role == TYPEC_SOURCE) {
964 		tcpm_set_cc(port, tcpm_rp_cc(port));
965 	}
966 
967 	port->in_ams = false;
968 	port->ams = NONE_AMS;
969 }
970 
tcpm_pd_transmit(struct tcpm_port * port,enum tcpm_transmit_type tx_sop_type,const struct pd_message * msg)971 static int tcpm_pd_transmit(struct tcpm_port *port,
972 			    enum tcpm_transmit_type tx_sop_type,
973 			    const struct pd_message *msg)
974 {
975 	unsigned long time_left;
976 	int ret;
977 	unsigned int negotiated_rev;
978 
979 	switch (tx_sop_type) {
980 	case TCPC_TX_SOP_PRIME:
981 		negotiated_rev = port->negotiated_rev_prime;
982 		break;
983 	case TCPC_TX_SOP:
984 	default:
985 		negotiated_rev = port->negotiated_rev;
986 		break;
987 	}
988 
989 	if (msg)
990 		tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
991 	else
992 		tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
993 
994 	reinit_completion(&port->tx_complete);
995 	ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
996 	if (ret < 0)
997 		return ret;
998 
999 	mutex_unlock(&port->lock);
1000 	time_left = wait_for_completion_timeout(&port->tx_complete,
1001 						msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
1002 	mutex_lock(&port->lock);
1003 	if (!time_left)
1004 		return -ETIMEDOUT;
1005 
1006 	switch (port->tx_status) {
1007 	case TCPC_TX_SUCCESS:
1008 		switch (tx_sop_type) {
1009 		case TCPC_TX_SOP_PRIME:
1010 			port->message_id_prime = (port->message_id_prime + 1) &
1011 						 PD_HEADER_ID_MASK;
1012 			break;
1013 		case TCPC_TX_SOP:
1014 		default:
1015 			port->message_id = (port->message_id + 1) &
1016 					   PD_HEADER_ID_MASK;
1017 			break;
1018 		}
1019 		/*
1020 		 * USB PD rev 2.0, 8.3.2.2.1:
1021 		 * USB PD rev 3.0, 8.3.2.1.3:
1022 		 * "... Note that every AMS is Interruptible until the first
1023 		 * Message in the sequence has been successfully sent (GoodCRC
1024 		 * Message received)."
1025 		 */
1026 		if (port->ams != NONE_AMS)
1027 			port->in_ams = true;
1028 		break;
1029 	case TCPC_TX_DISCARDED:
1030 		ret = -EAGAIN;
1031 		break;
1032 	case TCPC_TX_FAILED:
1033 	default:
1034 		ret = -EIO;
1035 		break;
1036 	}
1037 
1038 	/* Some AMS don't expect responses. Finish them here. */
1039 	if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1040 		tcpm_ams_finish(port);
1041 
1042 	return ret;
1043 }
1044 
tcpm_pd_transmit_complete(struct tcpm_port * port,enum tcpm_transmit_status status)1045 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1046 			       enum tcpm_transmit_status status)
1047 {
1048 	tcpm_log(port, "PD TX complete, status: %u", status);
1049 	port->tx_status = status;
1050 	complete(&port->tx_complete);
1051 }
1052 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1053 
tcpm_mux_set(struct tcpm_port * port,int state,enum usb_role usb_role,enum typec_orientation orientation)1054 static int tcpm_mux_set(struct tcpm_port *port, int state,
1055 			enum usb_role usb_role,
1056 			enum typec_orientation orientation)
1057 {
1058 	int ret;
1059 
1060 	tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1061 		 state, usb_role, orientation);
1062 
1063 	ret = typec_set_orientation(port->typec_port, orientation);
1064 	if (ret)
1065 		return ret;
1066 
1067 	if (port->role_sw) {
1068 		ret = usb_role_switch_set_role(port->role_sw, usb_role);
1069 		if (ret)
1070 			return ret;
1071 	}
1072 
1073 	return typec_set_mode(port->typec_port, state);
1074 }
1075 
tcpm_set_polarity(struct tcpm_port * port,enum typec_cc_polarity polarity)1076 static int tcpm_set_polarity(struct tcpm_port *port,
1077 			     enum typec_cc_polarity polarity)
1078 {
1079 	int ret;
1080 
1081 	tcpm_log(port, "polarity %d", polarity);
1082 
1083 	ret = port->tcpc->set_polarity(port->tcpc, polarity);
1084 	if (ret < 0)
1085 		return ret;
1086 
1087 	port->polarity = polarity;
1088 
1089 	return 0;
1090 }
1091 
tcpm_set_vconn(struct tcpm_port * port,bool enable)1092 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1093 {
1094 	int ret;
1095 
1096 	tcpm_log(port, "vconn:=%d", enable);
1097 
1098 	ret = port->tcpc->set_vconn(port->tcpc, enable);
1099 	if (!ret) {
1100 		port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1101 		typec_set_vconn_role(port->typec_port, port->vconn_role);
1102 	}
1103 
1104 	return ret;
1105 }
1106 
tcpm_get_current_limit(struct tcpm_port * port)1107 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1108 {
1109 	enum typec_cc_status cc;
1110 	u32 limit;
1111 
1112 	cc = port->polarity ? port->cc2 : port->cc1;
1113 	switch (cc) {
1114 	case TYPEC_CC_RP_1_5:
1115 		limit = 1500;
1116 		break;
1117 	case TYPEC_CC_RP_3_0:
1118 		limit = 3000;
1119 		break;
1120 	case TYPEC_CC_RP_DEF:
1121 	default:
1122 		if (port->tcpc->get_current_limit)
1123 			limit = port->tcpc->get_current_limit(port->tcpc);
1124 		else
1125 			limit = 0;
1126 		break;
1127 	}
1128 
1129 	return limit;
1130 }
1131 
tcpm_set_current_limit(struct tcpm_port * port,u32 max_ma,u32 mv)1132 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1133 {
1134 	int ret = -EOPNOTSUPP;
1135 
1136 	tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1137 
1138 	port->supply_voltage = mv;
1139 	port->current_limit = max_ma;
1140 	power_supply_changed(port->psy);
1141 
1142 	if (port->tcpc->set_current_limit)
1143 		ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1144 
1145 	return ret;
1146 }
1147 
tcpm_set_attached_state(struct tcpm_port * port,bool attached)1148 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1149 {
1150 	return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1151 				     port->data_role);
1152 }
1153 
tcpm_set_roles(struct tcpm_port * port,bool attached,enum typec_role role,enum typec_data_role data)1154 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1155 			  enum typec_role role, enum typec_data_role data)
1156 {
1157 	enum typec_orientation orientation;
1158 	enum usb_role usb_role;
1159 	int ret;
1160 
1161 	if (port->polarity == TYPEC_POLARITY_CC1)
1162 		orientation = TYPEC_ORIENTATION_NORMAL;
1163 	else
1164 		orientation = TYPEC_ORIENTATION_REVERSE;
1165 
1166 	if (port->typec_caps.data == TYPEC_PORT_DRD) {
1167 		if (data == TYPEC_HOST)
1168 			usb_role = USB_ROLE_HOST;
1169 		else
1170 			usb_role = USB_ROLE_DEVICE;
1171 	} else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1172 		if (data == TYPEC_HOST) {
1173 			if (role == TYPEC_SOURCE)
1174 				usb_role = USB_ROLE_HOST;
1175 			else
1176 				usb_role = USB_ROLE_NONE;
1177 		} else {
1178 			return -ENOTSUPP;
1179 		}
1180 	} else {
1181 		if (data == TYPEC_DEVICE) {
1182 			if (role == TYPEC_SINK)
1183 				usb_role = USB_ROLE_DEVICE;
1184 			else
1185 				usb_role = USB_ROLE_NONE;
1186 		} else {
1187 			return -ENOTSUPP;
1188 		}
1189 	}
1190 
1191 	ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1192 	if (ret < 0)
1193 		return ret;
1194 
1195 	ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1196 	if (ret < 0)
1197 		return ret;
1198 
1199 	if (port->tcpc->set_orientation) {
1200 		ret = port->tcpc->set_orientation(port->tcpc, orientation);
1201 		if (ret < 0)
1202 			return ret;
1203 	}
1204 
1205 	port->pwr_role = role;
1206 	port->data_role = data;
1207 	typec_set_data_role(port->typec_port, data);
1208 	typec_set_pwr_role(port->typec_port, role);
1209 
1210 	return 0;
1211 }
1212 
tcpm_set_pwr_role(struct tcpm_port * port,enum typec_role role)1213 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1214 {
1215 	int ret;
1216 
1217 	ret = port->tcpc->set_roles(port->tcpc, true, role,
1218 				    port->data_role);
1219 	if (ret < 0)
1220 		return ret;
1221 
1222 	port->pwr_role = role;
1223 	typec_set_pwr_role(port->typec_port, role);
1224 
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Transform the PDO to be compliant to PD rev2.0.
1230  * Return 0 if the PDO type is not defined in PD rev2.0.
1231  * Otherwise, return the converted PDO.
1232  */
tcpm_forge_legacy_pdo(struct tcpm_port * port,u32 pdo,enum typec_role role)1233 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1234 {
1235 	switch (pdo_type(pdo)) {
1236 	case PDO_TYPE_FIXED:
1237 		if (role == TYPEC_SINK)
1238 			return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1239 		else
1240 			return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1241 	case PDO_TYPE_VAR:
1242 	case PDO_TYPE_BATT:
1243 		return pdo;
1244 	case PDO_TYPE_APDO:
1245 	default:
1246 		return 0;
1247 	}
1248 }
1249 
tcpm_pd_send_revision(struct tcpm_port * port)1250 static int tcpm_pd_send_revision(struct tcpm_port *port)
1251 {
1252 	struct pd_message msg;
1253 	u32 rmdo;
1254 
1255 	memset(&msg, 0, sizeof(msg));
1256 	rmdo = RMDO(port->pd_rev.rev_major, port->pd_rev.rev_minor,
1257 		    port->pd_rev.ver_major, port->pd_rev.ver_minor);
1258 	msg.payload[0] = cpu_to_le32(rmdo);
1259 	msg.header = PD_HEADER_LE(PD_DATA_REVISION,
1260 				  port->pwr_role,
1261 				  port->data_role,
1262 				  port->negotiated_rev,
1263 				  port->message_id,
1264 				  1);
1265 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1266 }
1267 
tcpm_pd_send_source_caps(struct tcpm_port * port)1268 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1269 {
1270 	struct pd_message msg;
1271 	u32 pdo;
1272 	unsigned int i, nr_pdo = 0;
1273 
1274 	memset(&msg, 0, sizeof(msg));
1275 
1276 	for (i = 0; i < port->nr_src_pdo; i++) {
1277 		if (port->negotiated_rev >= PD_REV30) {
1278 			msg.payload[nr_pdo++] =	cpu_to_le32(port->src_pdo[i]);
1279 		} else {
1280 			pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1281 			if (pdo)
1282 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1283 		}
1284 	}
1285 
1286 	if (!nr_pdo) {
1287 		/* No source capabilities defined, sink only */
1288 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1289 					  port->pwr_role,
1290 					  port->data_role,
1291 					  port->negotiated_rev,
1292 					  port->message_id, 0);
1293 	} else {
1294 		msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1295 					  port->pwr_role,
1296 					  port->data_role,
1297 					  port->negotiated_rev,
1298 					  port->message_id,
1299 					  nr_pdo);
1300 	}
1301 
1302 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1303 }
1304 
tcpm_pd_send_sink_caps(struct tcpm_port * port)1305 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1306 {
1307 	struct pd_message msg;
1308 	u32 pdo;
1309 	unsigned int i, nr_pdo = 0;
1310 
1311 	memset(&msg, 0, sizeof(msg));
1312 
1313 	for (i = 0; i < port->nr_snk_pdo; i++) {
1314 		if (port->negotiated_rev >= PD_REV30) {
1315 			msg.payload[nr_pdo++] =	cpu_to_le32(port->snk_pdo[i]);
1316 		} else {
1317 			pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1318 			if (pdo)
1319 				msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1320 		}
1321 	}
1322 
1323 	if (!nr_pdo) {
1324 		/* No sink capabilities defined, source only */
1325 		msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1326 					  port->pwr_role,
1327 					  port->data_role,
1328 					  port->negotiated_rev,
1329 					  port->message_id, 0);
1330 	} else {
1331 		msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1332 					  port->pwr_role,
1333 					  port->data_role,
1334 					  port->negotiated_rev,
1335 					  port->message_id,
1336 					  nr_pdo);
1337 	}
1338 
1339 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1340 }
1341 
mod_tcpm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1342 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1343 {
1344 	if (delay_ms) {
1345 		hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1346 	} else {
1347 		hrtimer_cancel(&port->state_machine_timer);
1348 		kthread_queue_work(port->wq, &port->state_machine);
1349 	}
1350 }
1351 
mod_vdm_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1352 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1353 {
1354 	if (delay_ms) {
1355 		hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1356 			      HRTIMER_MODE_REL);
1357 	} else {
1358 		hrtimer_cancel(&port->vdm_state_machine_timer);
1359 		kthread_queue_work(port->wq, &port->vdm_state_machine);
1360 	}
1361 }
1362 
mod_enable_frs_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1363 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1364 {
1365 	if (delay_ms) {
1366 		hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1367 	} else {
1368 		hrtimer_cancel(&port->enable_frs_timer);
1369 		kthread_queue_work(port->wq, &port->enable_frs);
1370 	}
1371 }
1372 
mod_send_discover_delayed_work(struct tcpm_port * port,unsigned int delay_ms)1373 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1374 {
1375 	if (delay_ms) {
1376 		hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1377 	} else {
1378 		hrtimer_cancel(&port->send_discover_timer);
1379 		kthread_queue_work(port->wq, &port->send_discover_work);
1380 	}
1381 }
1382 
tcpm_set_state(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1383 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1384 			   unsigned int delay_ms)
1385 {
1386 	if (delay_ms) {
1387 		tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1388 			 tcpm_states[port->state], tcpm_states[state], delay_ms,
1389 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1390 		port->delayed_state = state;
1391 		mod_tcpm_delayed_work(port, delay_ms);
1392 		port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1393 		port->delay_ms = delay_ms;
1394 	} else {
1395 		tcpm_log(port, "state change %s -> %s [%s %s]",
1396 			 tcpm_states[port->state], tcpm_states[state],
1397 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1398 		port->delayed_state = INVALID_STATE;
1399 		port->prev_state = port->state;
1400 		port->state = state;
1401 		/*
1402 		 * Don't re-queue the state machine work item if we're currently
1403 		 * in the state machine and we're immediately changing states.
1404 		 * tcpm_state_machine_work() will continue running the state
1405 		 * machine.
1406 		 */
1407 		if (!port->state_machine_running)
1408 			mod_tcpm_delayed_work(port, 0);
1409 	}
1410 }
1411 
tcpm_set_state_cond(struct tcpm_port * port,enum tcpm_state state,unsigned int delay_ms)1412 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1413 				unsigned int delay_ms)
1414 {
1415 	if (port->enter_state == port->state)
1416 		tcpm_set_state(port, state, delay_ms);
1417 	else
1418 		tcpm_log(port,
1419 			 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1420 			 delay_ms ? "delayed " : "",
1421 			 tcpm_states[port->state], tcpm_states[state],
1422 			 delay_ms, tcpm_states[port->enter_state],
1423 			 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1424 }
1425 
tcpm_queue_message(struct tcpm_port * port,enum pd_msg_request message)1426 static void tcpm_queue_message(struct tcpm_port *port,
1427 			       enum pd_msg_request message)
1428 {
1429 	port->queued_message = message;
1430 	mod_tcpm_delayed_work(port, 0);
1431 }
1432 
tcpm_vdm_ams(struct tcpm_port * port)1433 static bool tcpm_vdm_ams(struct tcpm_port *port)
1434 {
1435 	switch (port->ams) {
1436 	case DISCOVER_IDENTITY:
1437 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1438 	case DISCOVER_SVIDS:
1439 	case DISCOVER_MODES:
1440 	case DFP_TO_UFP_ENTER_MODE:
1441 	case DFP_TO_UFP_EXIT_MODE:
1442 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1443 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1444 	case ATTENTION:
1445 	case UNSTRUCTURED_VDMS:
1446 	case STRUCTURED_VDMS:
1447 		break;
1448 	default:
1449 		return false;
1450 	}
1451 
1452 	return true;
1453 }
1454 
tcpm_ams_interruptible(struct tcpm_port * port)1455 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1456 {
1457 	switch (port->ams) {
1458 	/* Interruptible AMS */
1459 	case NONE_AMS:
1460 	case SECURITY:
1461 	case FIRMWARE_UPDATE:
1462 	case DISCOVER_IDENTITY:
1463 	case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1464 	case DISCOVER_SVIDS:
1465 	case DISCOVER_MODES:
1466 	case DFP_TO_UFP_ENTER_MODE:
1467 	case DFP_TO_UFP_EXIT_MODE:
1468 	case DFP_TO_CABLE_PLUG_ENTER_MODE:
1469 	case DFP_TO_CABLE_PLUG_EXIT_MODE:
1470 	case UNSTRUCTURED_VDMS:
1471 	case STRUCTURED_VDMS:
1472 	case COUNTRY_INFO:
1473 	case COUNTRY_CODES:
1474 		break;
1475 	/* Non-Interruptible AMS */
1476 	default:
1477 		if (port->in_ams)
1478 			return false;
1479 		break;
1480 	}
1481 
1482 	return true;
1483 }
1484 
tcpm_ams_start(struct tcpm_port * port,enum tcpm_ams ams)1485 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1486 {
1487 	int ret = 0;
1488 
1489 	tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1490 
1491 	if (!tcpm_ams_interruptible(port) &&
1492 	    !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1493 		port->upcoming_state = INVALID_STATE;
1494 		tcpm_log(port, "AMS %s not interruptible, aborting",
1495 			 tcpm_ams_str[port->ams]);
1496 		return -EAGAIN;
1497 	}
1498 
1499 	if (port->pwr_role == TYPEC_SOURCE) {
1500 		enum typec_cc_status cc_req = port->cc_req;
1501 
1502 		port->ams = ams;
1503 
1504 		if (ams == HARD_RESET) {
1505 			tcpm_set_cc(port, tcpm_rp_cc(port));
1506 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1507 			tcpm_set_state(port, HARD_RESET_START, 0);
1508 			return ret;
1509 		} else if (ams == SOFT_RESET_AMS) {
1510 			if (!port->explicit_contract)
1511 				tcpm_set_cc(port, tcpm_rp_cc(port));
1512 			tcpm_set_state(port, SOFT_RESET_SEND, 0);
1513 			return ret;
1514 		} else if (tcpm_vdm_ams(port)) {
1515 			/* tSinkTx is enforced in vdm_run_state_machine */
1516 			if (port->negotiated_rev >= PD_REV30)
1517 				tcpm_set_cc(port, SINK_TX_NG);
1518 			return ret;
1519 		}
1520 
1521 		if (port->negotiated_rev >= PD_REV30)
1522 			tcpm_set_cc(port, SINK_TX_NG);
1523 
1524 		switch (port->state) {
1525 		case SRC_READY:
1526 		case SRC_STARTUP:
1527 		case SRC_SOFT_RESET_WAIT_SNK_TX:
1528 		case SOFT_RESET:
1529 		case SOFT_RESET_SEND:
1530 			if (port->negotiated_rev >= PD_REV30)
1531 				tcpm_set_state(port, AMS_START,
1532 					       cc_req == SINK_TX_OK ?
1533 					       PD_T_SINK_TX : 0);
1534 			else
1535 				tcpm_set_state(port, AMS_START, 0);
1536 			break;
1537 		default:
1538 			if (port->negotiated_rev >= PD_REV30)
1539 				tcpm_set_state(port, SRC_READY,
1540 					       cc_req == SINK_TX_OK ?
1541 					       PD_T_SINK_TX : 0);
1542 			else
1543 				tcpm_set_state(port, SRC_READY, 0);
1544 			break;
1545 		}
1546 	} else {
1547 		if (port->negotiated_rev >= PD_REV30 &&
1548 		    !tcpm_sink_tx_ok(port) &&
1549 		    ams != SOFT_RESET_AMS &&
1550 		    ams != HARD_RESET) {
1551 			port->upcoming_state = INVALID_STATE;
1552 			tcpm_log(port, "Sink TX No Go");
1553 			return -EAGAIN;
1554 		}
1555 
1556 		port->ams = ams;
1557 
1558 		if (ams == HARD_RESET) {
1559 			tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1560 			tcpm_set_state(port, HARD_RESET_START, 0);
1561 			return ret;
1562 		} else if (tcpm_vdm_ams(port)) {
1563 			return ret;
1564 		}
1565 
1566 		if (port->state == SNK_READY ||
1567 		    port->state == SNK_SOFT_RESET)
1568 			tcpm_set_state(port, AMS_START, 0);
1569 		else
1570 			tcpm_set_state(port, SNK_READY, 0);
1571 	}
1572 
1573 	return ret;
1574 }
1575 
1576 /*
1577  * VDM/VDO handling functions
1578  */
tcpm_queue_vdm(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1579 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1580 			   const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1581 {
1582 	u32 vdo_hdr = port->vdo_data[0];
1583 
1584 	WARN_ON(!mutex_is_locked(&port->lock));
1585 
1586 	/* If is sending discover_identity, handle received message first */
1587 	if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1588 		if (tx_sop_type == TCPC_TX_SOP_PRIME)
1589 			port->send_discover_prime = true;
1590 		else
1591 			port->send_discover = true;
1592 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1593 	} else {
1594 		/* Make sure we are not still processing a previous VDM packet */
1595 		WARN_ON(port->vdm_state > VDM_STATE_DONE);
1596 	}
1597 
1598 	port->vdo_count = cnt + 1;
1599 	port->vdo_data[0] = header;
1600 	memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1601 	/* Set ready, vdm state machine will actually send */
1602 	port->vdm_retries = 0;
1603 	port->vdm_state = VDM_STATE_READY;
1604 	port->vdm_sm_running = true;
1605 
1606 	port->tx_sop_type = tx_sop_type;
1607 
1608 	mod_vdm_delayed_work(port, 0);
1609 }
1610 
tcpm_queue_vdm_unlocked(struct tcpm_port * port,const u32 header,const u32 * data,int cnt,enum tcpm_transmit_type tx_sop_type)1611 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1612 				    const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1613 {
1614 	if (port->state != SRC_READY && port->state != SNK_READY &&
1615 	    port->state != SRC_VDM_IDENTITY_REQUEST)
1616 		return;
1617 
1618 	mutex_lock(&port->lock);
1619 	tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
1620 	mutex_unlock(&port->lock);
1621 }
1622 
svdm_consume_identity(struct tcpm_port * port,const u32 * p,int cnt)1623 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1624 {
1625 	u32 vdo = p[VDO_INDEX_IDH];
1626 	u32 product = p[VDO_INDEX_PRODUCT];
1627 
1628 	memset(&port->mode_data, 0, sizeof(port->mode_data));
1629 
1630 	port->partner_ident.id_header = vdo;
1631 	port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1632 	port->partner_ident.product = product;
1633 
1634 	if (port->partner)
1635 		typec_partner_set_identity(port->partner);
1636 
1637 	tcpm_log(port, "Identity: %04x:%04x.%04x",
1638 		 PD_IDH_VID(vdo),
1639 		 PD_PRODUCT_PID(product), product & 0xffff);
1640 }
1641 
svdm_consume_identity_sop_prime(struct tcpm_port * port,const u32 * p,int cnt)1642 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1643 {
1644 	u32 idh = p[VDO_INDEX_IDH];
1645 	u32 product = p[VDO_INDEX_PRODUCT];
1646 	int svdm_version;
1647 
1648 	/*
1649 	 * Attempt to consume identity only if cable currently is not set
1650 	 */
1651 	if (!IS_ERR_OR_NULL(port->cable))
1652 		goto register_plug;
1653 
1654 	/* Reset cable identity */
1655 	memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1656 
1657 	/* Fill out id header, cert, product, cable VDO 1 */
1658 	port->cable_ident.id_header = idh;
1659 	port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1660 	port->cable_ident.product = product;
1661 	port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1662 
1663 	/* Fill out cable desc, infer svdm_version from pd revision */
1664 	port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1665 							USB_PLUG_TYPE_A);
1666 	port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1667 	/* Log PD Revision and additional cable VDO from negotiated revision */
1668 	switch (port->negotiated_rev_prime) {
1669 	case PD_REV30:
1670 		port->cable_desc.pd_revision = 0x0300;
1671 		if (port->cable_desc.active)
1672 			port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1673 		break;
1674 	case PD_REV20:
1675 		port->cable_desc.pd_revision = 0x0200;
1676 		break;
1677 	default:
1678 		port->cable_desc.pd_revision = 0x0200;
1679 		break;
1680 	}
1681 	port->cable_desc.identity = &port->cable_ident;
1682 	/* Register Cable, set identity and svdm_version */
1683 	port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1684 	if (IS_ERR_OR_NULL(port->cable))
1685 		return;
1686 	typec_cable_set_identity(port->cable);
1687 	/* Get SVDM version */
1688 	svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1689 	typec_cable_set_svdm_version(port->cable, svdm_version);
1690 
1691 register_plug:
1692 	if (IS_ERR_OR_NULL(port->plug_prime)) {
1693 		port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1694 		port->plug_prime = typec_register_plug(port->cable,
1695 						       &port->plug_prime_desc);
1696 	}
1697 }
1698 
svdm_consume_svids(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1699 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1700 			       enum tcpm_transmit_type rx_sop_type)
1701 {
1702 	struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1703 				      &port->mode_data_prime : &port->mode_data;
1704 	int i;
1705 
1706 	for (i = 1; i < cnt; i++) {
1707 		u16 svid;
1708 
1709 		svid = (p[i] >> 16) & 0xffff;
1710 		if (!svid)
1711 			return false;
1712 
1713 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1714 			goto abort;
1715 
1716 		pmdata->svids[pmdata->nsvids++] = svid;
1717 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1718 
1719 		svid = p[i] & 0xffff;
1720 		if (!svid)
1721 			return false;
1722 
1723 		if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1724 			goto abort;
1725 
1726 		pmdata->svids[pmdata->nsvids++] = svid;
1727 		tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1728 	}
1729 
1730 	/*
1731 	 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1732 	 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1733 	 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1734 	 * SVIDs Command Shall be executed multiple times until a Discover
1735 	 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1736 	 * the last part of the last VDO or with a VDO containing two SVIDs
1737 	 * with values of 0x0000.
1738 	 *
1739 	 * However, some odd dockers support SVIDs less than 12 but without
1740 	 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1741 	 * request and return false here.
1742 	 */
1743 	return cnt == 7;
1744 abort:
1745 	tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1746 	return false;
1747 }
1748 
svdm_consume_modes(struct tcpm_port * port,const u32 * p,int cnt,enum tcpm_transmit_type rx_sop_type)1749 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1750 			       enum tcpm_transmit_type rx_sop_type)
1751 {
1752 	struct pd_mode_data *pmdata = &port->mode_data;
1753 	struct typec_altmode_desc *paltmode;
1754 	int i;
1755 
1756 	switch (rx_sop_type) {
1757 	case TCPC_TX_SOP_PRIME:
1758 		pmdata = &port->mode_data_prime;
1759 		if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1760 			/* Already logged in svdm_consume_svids() */
1761 			return;
1762 		}
1763 		break;
1764 	case TCPC_TX_SOP:
1765 		pmdata = &port->mode_data;
1766 		if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1767 			/* Already logged in svdm_consume_svids() */
1768 			return;
1769 		}
1770 		break;
1771 	default:
1772 		return;
1773 	}
1774 
1775 	for (i = 1; i < cnt; i++) {
1776 		paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1777 		memset(paltmode, 0, sizeof(*paltmode));
1778 
1779 		paltmode->svid = pmdata->svids[pmdata->svid_index];
1780 		paltmode->mode = i;
1781 		paltmode->vdo = p[i];
1782 
1783 		tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1784 			 pmdata->altmodes, paltmode->svid,
1785 			 paltmode->mode, paltmode->vdo);
1786 
1787 		pmdata->altmodes++;
1788 	}
1789 }
1790 
tcpm_register_partner_altmodes(struct tcpm_port * port)1791 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1792 {
1793 	struct pd_mode_data *modep = &port->mode_data;
1794 	struct typec_altmode *altmode;
1795 	int i;
1796 
1797 	if (!port->partner)
1798 		return;
1799 
1800 	for (i = 0; i < modep->altmodes; i++) {
1801 		altmode = typec_partner_register_altmode(port->partner,
1802 						&modep->altmode_desc[i]);
1803 		if (IS_ERR(altmode)) {
1804 			tcpm_log(port, "Failed to register partner SVID 0x%04x",
1805 				 modep->altmode_desc[i].svid);
1806 			altmode = NULL;
1807 		}
1808 		port->partner_altmode[i] = altmode;
1809 	}
1810 }
1811 
tcpm_register_plug_altmodes(struct tcpm_port * port)1812 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1813 {
1814 	struct pd_mode_data *modep = &port->mode_data_prime;
1815 	struct typec_altmode *altmode;
1816 	int i;
1817 
1818 	typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1819 
1820 	for (i = 0; i < modep->altmodes; i++) {
1821 		altmode = typec_plug_register_altmode(port->plug_prime,
1822 						&modep->altmode_desc[i]);
1823 		if (IS_ERR(altmode)) {
1824 			tcpm_log(port, "Failed to register plug SVID 0x%04x",
1825 				 modep->altmode_desc[i].svid);
1826 			altmode = NULL;
1827 		}
1828 		port->plug_prime_altmode[i] = altmode;
1829 	}
1830 }
1831 
1832 #define supports_modal(port)	PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1833 #define supports_modal_cable(port)     PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1834 #define supports_host(port)    PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1835 
1836 /*
1837  * Helper to determine whether the port is capable of SOP' communication at the
1838  * current point in time.
1839  */
tcpm_can_communicate_sop_prime(struct tcpm_port * port)1840 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1841 {
1842 	/* Check to see if tcpc supports SOP' communication */
1843 	if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1844 		return false;
1845 	/*
1846 	 * Power Delivery 2.0 Section 6.3.11
1847 	 * Before communicating with a Cable Plug a Port Should ensure that it
1848 	 * is the Vconn Source and that the Cable Plugs are powered by
1849 	 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1850 	 * that the present Vconn Source is supplying Vconn, the only means to
1851 	 * ensure that the Cable Plugs are powered is for a Port wishing to
1852 	 * communicate with a Cable Plug is to become the Vconn Source.
1853 	 *
1854 	 * Power Delivery 3.0 Section 6.3.11
1855 	 * Before communicating with a Cable Plug a Port Shall ensure that it
1856 	 * is the Vconn source.
1857 	 */
1858 	if (port->vconn_role != TYPEC_SOURCE)
1859 		return false;
1860 	/*
1861 	 * Power Delivery 2.0 Section 2.4.4
1862 	 * When no Contract or an Implicit Contract is in place the Source can
1863 	 * communicate with a Cable Plug using SOP' packets in order to discover
1864 	 * its characteristics.
1865 	 *
1866 	 * Power Delivery 3.0 Section 2.4.4
1867 	 * When no Contract or an Implicit Contract is in place only the Source
1868 	 * port that is supplying Vconn is allowed to send packets to a Cable
1869 	 * Plug and is allowed to respond to packets from the Cable Plug.
1870 	 */
1871 	if (!port->explicit_contract)
1872 		return port->pwr_role == TYPEC_SOURCE;
1873 	if (port->negotiated_rev == PD_REV30)
1874 		return true;
1875 	/*
1876 	 * Power Delivery 2.0 Section 2.4.4
1877 	 *
1878 	 * When an Explicit Contract is in place the DFP (either the Source or
1879 	 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1880 	 * Packets (see Figure 2-3).
1881 	 */
1882 	if (port->negotiated_rev == PD_REV20)
1883 		return port->data_role == TYPEC_HOST;
1884 	return false;
1885 }
1886 
tcpm_attempt_vconn_swap_discovery(struct tcpm_port * port)1887 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1888 {
1889 	if (!port->tcpc->attempt_vconn_swap_discovery)
1890 		return false;
1891 
1892 	/* Port is already source, no need to perform swap */
1893 	if (port->vconn_role == TYPEC_SOURCE)
1894 		return false;
1895 
1896 	/*
1897 	 * Partner needs to support Alternate Modes with modal support. If
1898 	 * partner is also capable of being a USB Host, it could be a device
1899 	 * that supports Alternate Modes as the DFP.
1900 	 */
1901 	if (!supports_modal(port) || supports_host(port))
1902 		return false;
1903 
1904 	if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1905 	    port->negotiated_rev == PD_REV30)
1906 		return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1907 
1908 	return false;
1909 }
1910 
1911 
tcpm_cable_vdm_supported(struct tcpm_port * port)1912 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1913 {
1914 	return !IS_ERR_OR_NULL(port->cable) &&
1915 	       typec_cable_is_active(port->cable) &&
1916 	       supports_modal_cable(port) &&
1917 	       tcpm_can_communicate_sop_prime(port);
1918 }
1919 
tcpm_pd_svdm(struct tcpm_port * port,struct typec_altmode * adev,const u32 * p,int cnt,u32 * response,enum adev_actions * adev_action,enum tcpm_transmit_type rx_sop_type,enum tcpm_transmit_type * response_tx_sop_type)1920 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1921 			const u32 *p, int cnt, u32 *response,
1922 			enum adev_actions *adev_action,
1923 			enum tcpm_transmit_type rx_sop_type,
1924 			enum tcpm_transmit_type *response_tx_sop_type)
1925 {
1926 	struct typec_port *typec = port->typec_port;
1927 	struct typec_altmode *pdev, *pdev_prime;
1928 	struct pd_mode_data *modep, *modep_prime;
1929 	int svdm_version;
1930 	int rlen = 0;
1931 	int cmd_type;
1932 	int cmd;
1933 	int i;
1934 	int ret;
1935 
1936 	cmd_type = PD_VDO_CMDT(p[0]);
1937 	cmd = PD_VDO_CMD(p[0]);
1938 
1939 	tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1940 		 p[0], cmd_type, cmd, cnt);
1941 
1942 	switch (rx_sop_type) {
1943 	case TCPC_TX_SOP_PRIME:
1944 		modep_prime = &port->mode_data_prime;
1945 		pdev_prime = typec_match_altmode(port->plug_prime_altmode,
1946 						 ALTMODE_DISCOVERY_MAX,
1947 						 PD_VDO_VID(p[0]),
1948 						 PD_VDO_OPOS(p[0]));
1949 		svdm_version = typec_get_cable_svdm_version(typec);
1950 		/*
1951 		 * Update SVDM version if cable was discovered before port partner.
1952 		 */
1953 		if (!IS_ERR_OR_NULL(port->cable) &&
1954 		    PD_VDO_SVDM_VER(p[0]) < svdm_version)
1955 			typec_cable_set_svdm_version(port->cable, svdm_version);
1956 		break;
1957 	case TCPC_TX_SOP:
1958 		modep = &port->mode_data;
1959 		pdev = typec_match_altmode(port->partner_altmode,
1960 					   ALTMODE_DISCOVERY_MAX,
1961 					   PD_VDO_VID(p[0]),
1962 					   PD_VDO_OPOS(p[0]));
1963 		svdm_version = typec_get_negotiated_svdm_version(typec);
1964 		if (svdm_version < 0)
1965 			return 0;
1966 		break;
1967 	default:
1968 		modep = &port->mode_data;
1969 		pdev = typec_match_altmode(port->partner_altmode,
1970 					   ALTMODE_DISCOVERY_MAX,
1971 					   PD_VDO_VID(p[0]),
1972 					   PD_VDO_OPOS(p[0]));
1973 		svdm_version = typec_get_negotiated_svdm_version(typec);
1974 		if (svdm_version < 0)
1975 			return 0;
1976 		break;
1977 	}
1978 
1979 	switch (cmd_type) {
1980 	case CMDT_INIT:
1981 		/*
1982 		 * Only the port or port partner is allowed to initialize SVDM
1983 		 * commands over SOP'. In case the port partner initializes a
1984 		 * sequence when it is not allowed to send SOP' messages, drop
1985 		 * the message should the TCPM port try to process it.
1986 		 */
1987 		if (rx_sop_type == TCPC_TX_SOP_PRIME)
1988 			return 0;
1989 
1990 		switch (cmd) {
1991 		case CMD_DISCOVER_IDENT:
1992 			if (PD_VDO_VID(p[0]) != USB_SID_PD)
1993 				break;
1994 
1995 			if (IS_ERR_OR_NULL(port->partner))
1996 				break;
1997 
1998 			if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1999 				typec_partner_set_svdm_version(port->partner,
2000 							       PD_VDO_SVDM_VER(p[0]));
2001 				svdm_version = PD_VDO_SVDM_VER(p[0]);
2002 			}
2003 
2004 			port->ams = DISCOVER_IDENTITY;
2005 			/*
2006 			 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
2007 			 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
2008 			 * "wrong configuation" or "Unrecognized"
2009 			 */
2010 			if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
2011 			    port->nr_snk_vdo) {
2012 				if (svdm_version < SVDM_VER_2_0) {
2013 					for (i = 0; i < port->nr_snk_vdo_v1; i++)
2014 						response[i + 1] = port->snk_vdo_v1[i];
2015 					rlen = port->nr_snk_vdo_v1 + 1;
2016 
2017 				} else {
2018 					for (i = 0; i < port->nr_snk_vdo; i++)
2019 						response[i + 1] = port->snk_vdo[i];
2020 					rlen = port->nr_snk_vdo + 1;
2021 				}
2022 			}
2023 			break;
2024 		case CMD_DISCOVER_SVID:
2025 			port->ams = DISCOVER_SVIDS;
2026 			break;
2027 		case CMD_DISCOVER_MODES:
2028 			port->ams = DISCOVER_MODES;
2029 			break;
2030 		case CMD_ENTER_MODE:
2031 			port->ams = DFP_TO_UFP_ENTER_MODE;
2032 			break;
2033 		case CMD_EXIT_MODE:
2034 			port->ams = DFP_TO_UFP_EXIT_MODE;
2035 			break;
2036 		case CMD_ATTENTION:
2037 			/* Attention command does not have response */
2038 			*adev_action = ADEV_ATTENTION;
2039 			return 0;
2040 		default:
2041 			break;
2042 		}
2043 		if (rlen >= 1) {
2044 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
2045 		} else if (rlen == 0) {
2046 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2047 			rlen = 1;
2048 		} else {
2049 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2050 			rlen = 1;
2051 		}
2052 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2053 			      (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2054 		break;
2055 	case CMDT_RSP_ACK:
2056 		/*
2057 		 * Silently drop message if we are not connected, but can process
2058 		 * if SOP' Discover Identity prior to explicit contract.
2059 		 */
2060 		if (IS_ERR_OR_NULL(port->partner) &&
2061 		    !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2062 			break;
2063 
2064 		tcpm_ams_finish(port);
2065 
2066 		switch (cmd) {
2067 		/*
2068 		 * SVDM Command Flow for SOP and SOP':
2069 		 * SOP		Discover Identity
2070 		 * SOP'		Discover Identity
2071 		 * SOP		Discover SVIDs
2072 		 *		Discover Modes
2073 		 * (Active Cables)
2074 		 * SOP'		Discover SVIDs
2075 		 *		Discover Modes
2076 		 *
2077 		 * Perform Discover SOP' if the port can communicate with cable
2078 		 * plug.
2079 		 */
2080 		case CMD_DISCOVER_IDENT:
2081 			switch (rx_sop_type) {
2082 			case TCPC_TX_SOP:
2083 				if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2084 					typec_partner_set_svdm_version(port->partner,
2085 								       PD_VDO_SVDM_VER(p[0]));
2086 					/* If cable is discovered before partner, downgrade svdm */
2087 					if (!IS_ERR_OR_NULL(port->cable) &&
2088 					    (typec_get_cable_svdm_version(port->typec_port) >
2089 					    svdm_version))
2090 						typec_cable_set_svdm_version(port->cable,
2091 									     svdm_version);
2092 				}
2093 				/* 6.4.4.3.1 */
2094 				svdm_consume_identity(port, p, cnt);
2095 				/* Attempt Vconn swap, delay SOP' discovery if necessary */
2096 				if (tcpm_attempt_vconn_swap_discovery(port)) {
2097 					port->send_discover_prime = true;
2098 					port->upcoming_state = VCONN_SWAP_SEND;
2099 					ret = tcpm_ams_start(port, VCONN_SWAP);
2100 					if (!ret)
2101 						return 0;
2102 					/* Cannot perform Vconn swap */
2103 					port->upcoming_state = INVALID_STATE;
2104 					port->send_discover_prime = false;
2105 				}
2106 
2107 				/*
2108 				 * Attempt Discover Identity on SOP' if the
2109 				 * cable was not discovered previously, and use
2110 				 * the SVDM version of the partner to probe.
2111 				 */
2112 				if (IS_ERR_OR_NULL(port->cable) &&
2113 				    tcpm_can_communicate_sop_prime(port)) {
2114 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2115 					port->send_discover_prime = true;
2116 					response[0] = VDO(USB_SID_PD, 1,
2117 							  typec_get_negotiated_svdm_version(typec),
2118 							  CMD_DISCOVER_IDENT);
2119 					rlen = 1;
2120 				} else {
2121 					*response_tx_sop_type = TCPC_TX_SOP;
2122 					response[0] = VDO(USB_SID_PD, 1,
2123 							  typec_get_negotiated_svdm_version(typec),
2124 							  CMD_DISCOVER_SVID);
2125 					rlen = 1;
2126 				}
2127 				break;
2128 			case TCPC_TX_SOP_PRIME:
2129 				/*
2130 				 * svdm_consume_identity_sop_prime will determine
2131 				 * the svdm_version for the cable moving forward.
2132 				 */
2133 				svdm_consume_identity_sop_prime(port, p, cnt);
2134 
2135 				/*
2136 				 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2137 				 * to SRC_SEND_CAPABILITIES
2138 				 */
2139 				if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2140 					tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2141 					return 0;
2142 				}
2143 
2144 				*response_tx_sop_type = TCPC_TX_SOP;
2145 				response[0] = VDO(USB_SID_PD, 1,
2146 						  typec_get_negotiated_svdm_version(typec),
2147 						  CMD_DISCOVER_SVID);
2148 				rlen = 1;
2149 				break;
2150 			default:
2151 				return 0;
2152 			}
2153 			break;
2154 		case CMD_DISCOVER_SVID:
2155 			*response_tx_sop_type = rx_sop_type;
2156 			/* 6.4.4.3.2 */
2157 			if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2158 				response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2159 				rlen = 1;
2160 			} else {
2161 				if (rx_sop_type == TCPC_TX_SOP) {
2162 					if (modep->nsvids && supports_modal(port)) {
2163 						response[0] = VDO(modep->svids[0], 1, svdm_version,
2164 								CMD_DISCOVER_MODES);
2165 						rlen = 1;
2166 					}
2167 				} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2168 					if (modep_prime->nsvids) {
2169 						response[0] = VDO(modep_prime->svids[0], 1,
2170 								  svdm_version, CMD_DISCOVER_MODES);
2171 						rlen = 1;
2172 					}
2173 				}
2174 			}
2175 			break;
2176 		case CMD_DISCOVER_MODES:
2177 			if (rx_sop_type == TCPC_TX_SOP) {
2178 				/* 6.4.4.3.3 */
2179 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2180 				modep->svid_index++;
2181 				if (modep->svid_index < modep->nsvids) {
2182 					u16 svid = modep->svids[modep->svid_index];
2183 					*response_tx_sop_type = TCPC_TX_SOP;
2184 					response[0] = VDO(svid, 1, svdm_version,
2185 							  CMD_DISCOVER_MODES);
2186 					rlen = 1;
2187 				} else if (tcpm_cable_vdm_supported(port)) {
2188 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2189 					response[0] = VDO(USB_SID_PD, 1,
2190 							  typec_get_cable_svdm_version(typec),
2191 							  CMD_DISCOVER_SVID);
2192 					rlen = 1;
2193 				} else {
2194 					tcpm_register_partner_altmodes(port);
2195 				}
2196 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2197 				/* 6.4.4.3.3 */
2198 				svdm_consume_modes(port, p, cnt, rx_sop_type);
2199 				modep_prime->svid_index++;
2200 				if (modep_prime->svid_index < modep_prime->nsvids) {
2201 					u16 svid = modep_prime->svids[modep_prime->svid_index];
2202 					*response_tx_sop_type = TCPC_TX_SOP_PRIME;
2203 					response[0] = VDO(svid, 1,
2204 							  typec_get_cable_svdm_version(typec),
2205 							  CMD_DISCOVER_MODES);
2206 					rlen = 1;
2207 				} else {
2208 					tcpm_register_plug_altmodes(port);
2209 					tcpm_register_partner_altmodes(port);
2210 				}
2211 			}
2212 			break;
2213 		case CMD_ENTER_MODE:
2214 			*response_tx_sop_type = rx_sop_type;
2215 			if (rx_sop_type == TCPC_TX_SOP) {
2216 				if (adev && pdev) {
2217 					typec_altmode_update_active(pdev, true);
2218 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2219 				}
2220 			} else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2221 				if (adev && pdev_prime) {
2222 					typec_altmode_update_active(pdev_prime, true);
2223 					*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2224 				}
2225 			}
2226 			return 0;
2227 		case CMD_EXIT_MODE:
2228 			*response_tx_sop_type = rx_sop_type;
2229 			if (rx_sop_type == TCPC_TX_SOP) {
2230 				if (adev && pdev) {
2231 					typec_altmode_update_active(pdev, false);
2232 					/* Back to USB Operation */
2233 					*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2234 					return 0;
2235 				}
2236 			}
2237 			break;
2238 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2239 			break;
2240 		default:
2241 			/* Unrecognized SVDM */
2242 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2243 			rlen = 1;
2244 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2245 				      (VDO_SVDM_VERS(svdm_version));
2246 			break;
2247 		}
2248 		break;
2249 	case CMDT_RSP_NAK:
2250 		tcpm_ams_finish(port);
2251 		switch (cmd) {
2252 		case CMD_DISCOVER_IDENT:
2253 		case CMD_DISCOVER_SVID:
2254 		case CMD_DISCOVER_MODES:
2255 		case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2256 			break;
2257 		case CMD_ENTER_MODE:
2258 			/* Back to USB Operation */
2259 			*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2260 			return 0;
2261 		default:
2262 			/* Unrecognized SVDM */
2263 			response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2264 			rlen = 1;
2265 			response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2266 				      (VDO_SVDM_VERS(svdm_version));
2267 			break;
2268 		}
2269 		break;
2270 	default:
2271 		response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2272 		rlen = 1;
2273 		response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2274 			      (VDO_SVDM_VERS(svdm_version));
2275 		break;
2276 	}
2277 
2278 	/* Informing the alternate mode drivers about everything */
2279 	*adev_action = ADEV_QUEUE_VDM;
2280 	return rlen;
2281 }
2282 
2283 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2284 			       enum pd_msg_request message,
2285 			       enum tcpm_ams ams);
2286 
tcpm_handle_vdm_request(struct tcpm_port * port,const __le32 * payload,int cnt,enum tcpm_transmit_type rx_sop_type)2287 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2288 				    const __le32 *payload, int cnt,
2289 				    enum tcpm_transmit_type rx_sop_type)
2290 {
2291 	enum adev_actions adev_action = ADEV_NONE;
2292 	struct typec_altmode *adev;
2293 	u32 p[PD_MAX_PAYLOAD];
2294 	u32 response[8] = { };
2295 	int i, rlen = 0;
2296 	enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2297 
2298 	for (i = 0; i < cnt; i++)
2299 		p[i] = le32_to_cpu(payload[i]);
2300 
2301 	adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2302 				   PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2303 
2304 	if (port->vdm_state == VDM_STATE_BUSY) {
2305 		/* If UFP responded busy retry after timeout */
2306 		if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2307 			port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2308 			port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2309 				CMDT_INIT;
2310 			mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2311 			return;
2312 		}
2313 		port->vdm_state = VDM_STATE_DONE;
2314 	}
2315 
2316 	if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2317 		/*
2318 		 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2319 		 * advance because we are dropping the lock but may send VDMs soon.
2320 		 * For the cases of INIT received:
2321 		 *  - If no response to send, it will be cleared later in this function.
2322 		 *  - If there are responses to send, it will be cleared in the state machine.
2323 		 * For the cases of RSP received:
2324 		 *  - If no further INIT to send, it will be cleared later in this function.
2325 		 *  - Otherwise, it will be cleared in the state machine if timeout or it will go
2326 		 *    back here until no further INIT to send.
2327 		 * For the cases of unknown type received:
2328 		 *  - We will send NAK and the flag will be cleared in the state machine.
2329 		 */
2330 		port->vdm_sm_running = true;
2331 		rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2332 				    rx_sop_type, &response_tx_sop_type);
2333 	} else {
2334 		if (port->negotiated_rev >= PD_REV30)
2335 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2336 	}
2337 
2338 	/*
2339 	 * We are done with any state stored in the port struct now, except
2340 	 * for any port struct changes done by the tcpm_queue_vdm() call
2341 	 * below, which is a separate operation.
2342 	 *
2343 	 * So we can safely release the lock here; and we MUST release the
2344 	 * lock here to avoid an AB BA lock inversion:
2345 	 *
2346 	 * If we keep the lock here then the lock ordering in this path is:
2347 	 * 1. tcpm_pd_rx_handler take the tcpm port lock
2348 	 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2349 	 *
2350 	 * And we also have this ordering:
2351 	 * 1. alt-mode driver takes the alt-mode's lock
2352 	 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2353 	 *    tcpm port lock
2354 	 *
2355 	 * Dropping our lock here avoids this.
2356 	 */
2357 	mutex_unlock(&port->lock);
2358 
2359 	if (adev) {
2360 		switch (adev_action) {
2361 		case ADEV_NONE:
2362 			break;
2363 		case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2364 			WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2365 			typec_altmode_vdm(adev, p[0], &p[1], cnt);
2366 			break;
2367 		case ADEV_QUEUE_VDM:
2368 			if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
2369 				typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2370 			else
2371 				typec_altmode_vdm(adev, p[0], &p[1], cnt);
2372 			break;
2373 		case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2374 			if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
2375 				if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2376 							    p[0], &p[1], cnt)) {
2377 					int svdm_version = typec_get_cable_svdm_version(
2378 										port->typec_port);
2379 					if (svdm_version < 0)
2380 						break;
2381 
2382 					response[0] = VDO(adev->svid, 1, svdm_version,
2383 							CMD_EXIT_MODE);
2384 					response[0] |= VDO_OPOS(adev->mode);
2385 					rlen = 1;
2386 				}
2387 			} else {
2388 				if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2389 					int svdm_version = typec_get_negotiated_svdm_version(
2390 										port->typec_port);
2391 					if (svdm_version < 0)
2392 						break;
2393 
2394 					response[0] = VDO(adev->svid, 1, svdm_version,
2395 							CMD_EXIT_MODE);
2396 					response[0] |= VDO_OPOS(adev->mode);
2397 					rlen = 1;
2398 				}
2399 			}
2400 			break;
2401 		case ADEV_ATTENTION:
2402 			if (typec_altmode_attention(adev, p[1]))
2403 				tcpm_log(port, "typec_altmode_attention no port partner altmode");
2404 			break;
2405 		}
2406 	}
2407 
2408 	/*
2409 	 * We must re-take the lock here to balance the unlock in
2410 	 * tcpm_pd_rx_handler, note that no changes, other then the
2411 	 * tcpm_queue_vdm call, are made while the lock is held again.
2412 	 * All that is done after the call is unwinding the call stack until
2413 	 * we return to tcpm_pd_rx_handler and do the unlock there.
2414 	 */
2415 	mutex_lock(&port->lock);
2416 
2417 	if (rlen > 0)
2418 		tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2419 	else
2420 		port->vdm_sm_running = false;
2421 }
2422 
tcpm_send_vdm(struct tcpm_port * port,u32 vid,int cmd,const u32 * data,int count,enum tcpm_transmit_type tx_sop_type)2423 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2424 			  const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2425 {
2426 	int svdm_version;
2427 	u32 header;
2428 
2429 	switch (tx_sop_type) {
2430 	case TCPC_TX_SOP_PRIME:
2431 		/*
2432 		 * If the port partner is discovered, then the port partner's
2433 		 * SVDM Version will be returned
2434 		 */
2435 		svdm_version = typec_get_cable_svdm_version(port->typec_port);
2436 		if (svdm_version < 0)
2437 			svdm_version = SVDM_VER_MAX;
2438 		break;
2439 	case TCPC_TX_SOP:
2440 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2441 		if (svdm_version < 0)
2442 			return;
2443 		break;
2444 	default:
2445 		svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2446 		if (svdm_version < 0)
2447 			return;
2448 		break;
2449 	}
2450 
2451 	if (WARN_ON(count > VDO_MAX_SIZE - 1))
2452 		count = VDO_MAX_SIZE - 1;
2453 
2454 	/* set VDM header with VID & CMD */
2455 	header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2456 			1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2457 			svdm_version, cmd);
2458 	tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2459 }
2460 
vdm_ready_timeout(u32 vdm_hdr)2461 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2462 {
2463 	unsigned int timeout;
2464 	int cmd = PD_VDO_CMD(vdm_hdr);
2465 
2466 	/* its not a structured VDM command */
2467 	if (!PD_VDO_SVDM(vdm_hdr))
2468 		return PD_T_VDM_UNSTRUCTURED;
2469 
2470 	switch (PD_VDO_CMDT(vdm_hdr)) {
2471 	case CMDT_INIT:
2472 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2473 			timeout = PD_T_VDM_WAIT_MODE_E;
2474 		else
2475 			timeout = PD_T_VDM_SNDR_RSP;
2476 		break;
2477 	default:
2478 		if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2479 			timeout = PD_T_VDM_E_MODE;
2480 		else
2481 			timeout = PD_T_VDM_RCVR_RSP;
2482 		break;
2483 	}
2484 	return timeout;
2485 }
2486 
vdm_run_state_machine(struct tcpm_port * port)2487 static void vdm_run_state_machine(struct tcpm_port *port)
2488 {
2489 	struct pd_message msg;
2490 	int i, res = 0;
2491 	u32 vdo_hdr = port->vdo_data[0];
2492 	u32 response[8] = { };
2493 
2494 	switch (port->vdm_state) {
2495 	case VDM_STATE_READY:
2496 		/* Only transmit VDM if attached */
2497 		if (!port->attached) {
2498 			port->vdm_state = VDM_STATE_ERR_BUSY;
2499 			break;
2500 		}
2501 
2502 		/*
2503 		 * if there's traffic or we're not in PDO ready state don't send
2504 		 * a VDM.
2505 		 */
2506 		if (port->state != SRC_READY && port->state != SNK_READY &&
2507 		    port->state != SRC_VDM_IDENTITY_REQUEST) {
2508 			port->vdm_sm_running = false;
2509 			break;
2510 		}
2511 
2512 		/* TODO: AMS operation for Unstructured VDM */
2513 		if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2514 			switch (PD_VDO_CMD(vdo_hdr)) {
2515 			case CMD_DISCOVER_IDENT:
2516 				res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2517 				if (res == 0) {
2518 					switch (port->tx_sop_type) {
2519 					case TCPC_TX_SOP_PRIME:
2520 						port->send_discover_prime = false;
2521 						break;
2522 					case TCPC_TX_SOP:
2523 						port->send_discover = false;
2524 						break;
2525 					default:
2526 						port->send_discover = false;
2527 						break;
2528 					}
2529 				} else if (res == -EAGAIN) {
2530 					port->vdo_data[0] = 0;
2531 					mod_send_discover_delayed_work(port,
2532 								       SEND_DISCOVER_RETRY_MS);
2533 				}
2534 				break;
2535 			case CMD_DISCOVER_SVID:
2536 				res = tcpm_ams_start(port, DISCOVER_SVIDS);
2537 				break;
2538 			case CMD_DISCOVER_MODES:
2539 				res = tcpm_ams_start(port, DISCOVER_MODES);
2540 				break;
2541 			case CMD_ENTER_MODE:
2542 				res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2543 				break;
2544 			case CMD_EXIT_MODE:
2545 				res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2546 				break;
2547 			case CMD_ATTENTION:
2548 				res = tcpm_ams_start(port, ATTENTION);
2549 				break;
2550 			case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2551 				res = tcpm_ams_start(port, STRUCTURED_VDMS);
2552 				break;
2553 			default:
2554 				res = -EOPNOTSUPP;
2555 				break;
2556 			}
2557 
2558 			if (res < 0) {
2559 				port->vdm_state = VDM_STATE_ERR_BUSY;
2560 				return;
2561 			}
2562 		}
2563 
2564 		port->vdm_state = VDM_STATE_SEND_MESSAGE;
2565 		mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2566 					    port->pwr_role == TYPEC_SOURCE &&
2567 					    PD_VDO_SVDM(vdo_hdr) &&
2568 					    PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2569 					   PD_T_SINK_TX : 0);
2570 		break;
2571 	case VDM_STATE_WAIT_RSP_BUSY:
2572 		port->vdo_data[0] = port->vdo_retry;
2573 		port->vdo_count = 1;
2574 		port->vdm_state = VDM_STATE_READY;
2575 		tcpm_ams_finish(port);
2576 		break;
2577 	case VDM_STATE_BUSY:
2578 		port->vdm_state = VDM_STATE_ERR_TMOUT;
2579 		if (port->ams != NONE_AMS)
2580 			tcpm_ams_finish(port);
2581 		break;
2582 	case VDM_STATE_ERR_SEND:
2583 		/*
2584 		 * When sending Discover Identity to SOP' before establishing an
2585 		 * explicit contract, do not retry. Instead, weave sending
2586 		 * Source_Capabilities over SOP and Discover Identity over SOP'.
2587 		 */
2588 		if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2589 			tcpm_ams_finish(port);
2590 			port->vdm_state = VDM_STATE_DONE;
2591 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2592 		/*
2593 		 * A partner which does not support USB PD will not reply,
2594 		 * so this is not a fatal error. At the same time, some
2595 		 * devices may not return GoodCRC under some circumstances,
2596 		 * so we need to retry.
2597 		 */
2598 		} else if (port->vdm_retries < 3) {
2599 			tcpm_log(port, "VDM Tx error, retry");
2600 			port->vdm_retries++;
2601 			port->vdm_state = VDM_STATE_READY;
2602 			if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2603 				tcpm_ams_finish(port);
2604 		} else {
2605 			tcpm_ams_finish(port);
2606 			if (port->tx_sop_type == TCPC_TX_SOP)
2607 				break;
2608 			/* Handle SOP' Transmission Errors */
2609 			switch (PD_VDO_CMD(vdo_hdr)) {
2610 			/*
2611 			 * If Discover Identity fails on SOP', then resume
2612 			 * discovery process on SOP only.
2613 			 */
2614 			case CMD_DISCOVER_IDENT:
2615 				port->vdo_data[0] = 0;
2616 				response[0] = VDO(USB_SID_PD, 1,
2617 						  typec_get_negotiated_svdm_version(
2618 									port->typec_port),
2619 						  CMD_DISCOVER_SVID);
2620 				tcpm_queue_vdm(port, response[0], &response[1],
2621 					       0, TCPC_TX_SOP);
2622 				break;
2623 			/*
2624 			 * If Discover SVIDs or Discover Modes fail, then
2625 			 * proceed with Alt Mode discovery process on SOP.
2626 			 */
2627 			case CMD_DISCOVER_SVID:
2628 				tcpm_register_partner_altmodes(port);
2629 				break;
2630 			case CMD_DISCOVER_MODES:
2631 				tcpm_register_partner_altmodes(port);
2632 				break;
2633 			default:
2634 				break;
2635 			}
2636 		}
2637 		break;
2638 	case VDM_STATE_SEND_MESSAGE:
2639 		/* Prepare and send VDM */
2640 		memset(&msg, 0, sizeof(msg));
2641 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2642 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2643 						  0,	/* Cable Plug Indicator for DFP/UFP */
2644 						  0,	/* Reserved */
2645 						  port->negotiated_rev_prime,
2646 						  port->message_id_prime,
2647 						  port->vdo_count);
2648 		} else {
2649 			msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2650 						  port->pwr_role,
2651 						  port->data_role,
2652 						  port->negotiated_rev,
2653 						  port->message_id,
2654 						  port->vdo_count);
2655 		}
2656 		for (i = 0; i < port->vdo_count; i++)
2657 			msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2658 		res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2659 		if (res < 0) {
2660 			port->vdm_state = VDM_STATE_ERR_SEND;
2661 		} else {
2662 			unsigned long timeout;
2663 
2664 			port->vdm_retries = 0;
2665 			port->vdo_data[0] = 0;
2666 			port->vdm_state = VDM_STATE_BUSY;
2667 			timeout = vdm_ready_timeout(vdo_hdr);
2668 			mod_vdm_delayed_work(port, timeout);
2669 		}
2670 		break;
2671 	default:
2672 		break;
2673 	}
2674 }
2675 
vdm_state_machine_work(struct kthread_work * work)2676 static void vdm_state_machine_work(struct kthread_work *work)
2677 {
2678 	struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2679 	enum vdm_states prev_state;
2680 
2681 	mutex_lock(&port->lock);
2682 
2683 	/*
2684 	 * Continue running as long as the port is not busy and there was
2685 	 * a state change.
2686 	 */
2687 	do {
2688 		prev_state = port->vdm_state;
2689 		vdm_run_state_machine(port);
2690 	} while (port->vdm_state != prev_state &&
2691 		 port->vdm_state != VDM_STATE_BUSY &&
2692 		 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2693 
2694 	if (port->vdm_state < VDM_STATE_READY)
2695 		port->vdm_sm_running = false;
2696 
2697 	mutex_unlock(&port->lock);
2698 }
2699 
2700 enum pdo_err {
2701 	PDO_NO_ERR,
2702 	PDO_ERR_NO_VSAFE5V,
2703 	PDO_ERR_VSAFE5V_NOT_FIRST,
2704 	PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2705 	PDO_ERR_FIXED_NOT_SORTED,
2706 	PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2707 	PDO_ERR_DUPE_PDO,
2708 	PDO_ERR_PPS_APDO_NOT_SORTED,
2709 	PDO_ERR_DUPE_PPS_APDO,
2710 };
2711 
2712 static const char * const pdo_err_msg[] = {
2713 	[PDO_ERR_NO_VSAFE5V] =
2714 	" err: source/sink caps should at least have vSafe5V",
2715 	[PDO_ERR_VSAFE5V_NOT_FIRST] =
2716 	" err: vSafe5V Fixed Supply Object Shall always be the first object",
2717 	[PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2718 	" err: PDOs should be in the following order: Fixed; Battery; Variable",
2719 	[PDO_ERR_FIXED_NOT_SORTED] =
2720 	" err: Fixed supply pdos should be in increasing order of their fixed voltage",
2721 	[PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2722 	" err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2723 	[PDO_ERR_DUPE_PDO] =
2724 	" err: Variable/Batt supply pdos cannot have same min/max voltage",
2725 	[PDO_ERR_PPS_APDO_NOT_SORTED] =
2726 	" err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2727 	[PDO_ERR_DUPE_PPS_APDO] =
2728 	" err: Programmable power supply apdos cannot have same min/max voltage and max current",
2729 };
2730 
tcpm_caps_err(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2731 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2732 				  unsigned int nr_pdo)
2733 {
2734 	unsigned int i;
2735 
2736 	/* Should at least contain vSafe5v */
2737 	if (nr_pdo < 1)
2738 		return PDO_ERR_NO_VSAFE5V;
2739 
2740 	/* The vSafe5V Fixed Supply Object Shall always be the first object */
2741 	if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2742 	    pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2743 		return PDO_ERR_VSAFE5V_NOT_FIRST;
2744 
2745 	for (i = 1; i < nr_pdo; i++) {
2746 		if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2747 			return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2748 		} else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2749 			enum pd_pdo_type type = pdo_type(pdo[i]);
2750 
2751 			switch (type) {
2752 			/*
2753 			 * The remaining Fixed Supply Objects, if
2754 			 * present, shall be sent in voltage order;
2755 			 * lowest to highest.
2756 			 */
2757 			case PDO_TYPE_FIXED:
2758 				if (pdo_fixed_voltage(pdo[i]) <=
2759 				    pdo_fixed_voltage(pdo[i - 1]))
2760 					return PDO_ERR_FIXED_NOT_SORTED;
2761 				break;
2762 			/*
2763 			 * The Battery Supply Objects and Variable
2764 			 * supply, if present shall be sent in Minimum
2765 			 * Voltage order; lowest to highest.
2766 			 */
2767 			case PDO_TYPE_VAR:
2768 			case PDO_TYPE_BATT:
2769 				if (pdo_min_voltage(pdo[i]) <
2770 				    pdo_min_voltage(pdo[i - 1]))
2771 					return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2772 				else if ((pdo_min_voltage(pdo[i]) ==
2773 					  pdo_min_voltage(pdo[i - 1])) &&
2774 					 (pdo_max_voltage(pdo[i]) ==
2775 					  pdo_max_voltage(pdo[i - 1])))
2776 					return PDO_ERR_DUPE_PDO;
2777 				break;
2778 			/*
2779 			 * The Programmable Power Supply APDOs, if present,
2780 			 * shall be sent in Maximum Voltage order;
2781 			 * lowest to highest.
2782 			 */
2783 			case PDO_TYPE_APDO:
2784 				if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2785 					break;
2786 
2787 				if (pdo_pps_apdo_max_voltage(pdo[i]) <
2788 				    pdo_pps_apdo_max_voltage(pdo[i - 1]))
2789 					return PDO_ERR_PPS_APDO_NOT_SORTED;
2790 				else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2791 					  pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2792 					 pdo_pps_apdo_max_voltage(pdo[i]) ==
2793 					  pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2794 					 pdo_pps_apdo_max_current(pdo[i]) ==
2795 					  pdo_pps_apdo_max_current(pdo[i - 1]))
2796 					return PDO_ERR_DUPE_PPS_APDO;
2797 				break;
2798 			default:
2799 				tcpm_log_force(port, " Unknown pdo type");
2800 			}
2801 		}
2802 	}
2803 
2804 	return PDO_NO_ERR;
2805 }
2806 
tcpm_validate_caps(struct tcpm_port * port,const u32 * pdo,unsigned int nr_pdo)2807 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2808 			      unsigned int nr_pdo)
2809 {
2810 	enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2811 
2812 	if (err_index != PDO_NO_ERR) {
2813 		tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2814 		return -EINVAL;
2815 	}
2816 
2817 	return 0;
2818 }
2819 
tcpm_altmode_enter(struct typec_altmode * altmode,u32 * vdo)2820 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2821 {
2822 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2823 	int svdm_version;
2824 	u32 header;
2825 
2826 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2827 	if (svdm_version < 0)
2828 		return svdm_version;
2829 
2830 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2831 	header |= VDO_OPOS(altmode->mode);
2832 
2833 	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2834 	return 0;
2835 }
2836 
tcpm_altmode_exit(struct typec_altmode * altmode)2837 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2838 {
2839 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2840 	int svdm_version;
2841 	u32 header;
2842 
2843 	svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2844 	if (svdm_version < 0)
2845 		return svdm_version;
2846 
2847 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2848 	header |= VDO_OPOS(altmode->mode);
2849 
2850 	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2851 	return 0;
2852 }
2853 
tcpm_altmode_vdm(struct typec_altmode * altmode,u32 header,const u32 * data,int count)2854 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2855 			    u32 header, const u32 *data, int count)
2856 {
2857 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2858 
2859 	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2860 
2861 	return 0;
2862 }
2863 
2864 static const struct typec_altmode_ops tcpm_altmode_ops = {
2865 	.enter = tcpm_altmode_enter,
2866 	.exit = tcpm_altmode_exit,
2867 	.vdm = tcpm_altmode_vdm,
2868 };
2869 
2870 
tcpm_cable_altmode_enter(struct typec_altmode * altmode,enum typec_plug_index sop,u32 * vdo)2871 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2872 				    u32 *vdo)
2873 {
2874 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2875 	int svdm_version;
2876 	u32 header;
2877 
2878 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2879 	if (svdm_version < 0)
2880 		return svdm_version;
2881 
2882 	header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2883 	header |= VDO_OPOS(altmode->mode);
2884 
2885 	tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2886 	return 0;
2887 }
2888 
tcpm_cable_altmode_exit(struct typec_altmode * altmode,enum typec_plug_index sop)2889 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2890 {
2891 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2892 	int svdm_version;
2893 	u32 header;
2894 
2895 	svdm_version = typec_get_cable_svdm_version(port->typec_port);
2896 	if (svdm_version < 0)
2897 		return svdm_version;
2898 
2899 	header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2900 	header |= VDO_OPOS(altmode->mode);
2901 
2902 	tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2903 	return 0;
2904 }
2905 
tcpm_cable_altmode_vdm(struct typec_altmode * altmode,enum typec_plug_index sop,u32 header,const u32 * data,int count)2906 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2907 				  u32 header, const u32 *data, int count)
2908 {
2909 	struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2910 
2911 	tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2912 
2913 	return 0;
2914 }
2915 
2916 static const struct typec_cable_ops tcpm_cable_ops = {
2917 	.enter = tcpm_cable_altmode_enter,
2918 	.exit = tcpm_cable_altmode_exit,
2919 	.vdm = tcpm_cable_altmode_vdm,
2920 };
2921 
2922 /*
2923  * PD (data, control) command handling functions
2924  */
ready_state(struct tcpm_port * port)2925 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2926 {
2927 	if (port->pwr_role == TYPEC_SOURCE)
2928 		return SRC_READY;
2929 	else
2930 		return SNK_READY;
2931 }
2932 
2933 static int tcpm_pd_send_control(struct tcpm_port *port,
2934 				enum pd_ctrl_msg_type type,
2935 				enum tcpm_transmit_type tx_sop_type);
2936 
tcpm_handle_alert(struct tcpm_port * port,const __le32 * payload,int cnt)2937 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2938 			      int cnt)
2939 {
2940 	u32 p0 = le32_to_cpu(payload[0]);
2941 	unsigned int type = usb_pd_ado_type(p0);
2942 
2943 	if (!type) {
2944 		tcpm_log(port, "Alert message received with no type");
2945 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2946 		return;
2947 	}
2948 
2949 	/* Just handling non-battery alerts for now */
2950 	if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2951 		if (port->pwr_role == TYPEC_SOURCE) {
2952 			port->upcoming_state = GET_STATUS_SEND;
2953 			tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2954 		} else {
2955 			/*
2956 			 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2957 			 * SinkTxOk in time.
2958 			 */
2959 			port->ams = GETTING_SOURCE_SINK_STATUS;
2960 			tcpm_set_state(port, GET_STATUS_SEND, 0);
2961 		}
2962 	} else {
2963 		tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2964 	}
2965 }
2966 
tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port * port,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage)2967 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2968 						  enum typec_pwr_opmode mode, bool pps_active,
2969 						  u32 requested_vbus_voltage)
2970 {
2971 	int ret;
2972 
2973 	if (!port->tcpc->set_auto_vbus_discharge_threshold)
2974 		return 0;
2975 
2976 	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2977 							    requested_vbus_voltage,
2978 							    port->pps_data.min_volt);
2979 	tcpm_log_force(port,
2980 		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d",
2981 		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage,
2982 		       port->pps_data.min_volt, ret);
2983 
2984 	return ret;
2985 }
2986 
tcpm_pd_handle_state(struct tcpm_port * port,enum tcpm_state state,enum tcpm_ams ams,unsigned int delay_ms)2987 static void tcpm_pd_handle_state(struct tcpm_port *port,
2988 				 enum tcpm_state state,
2989 				 enum tcpm_ams ams,
2990 				 unsigned int delay_ms)
2991 {
2992 	switch (port->state) {
2993 	case SRC_READY:
2994 	case SNK_READY:
2995 		port->ams = ams;
2996 		tcpm_set_state(port, state, delay_ms);
2997 		break;
2998 	/* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2999 	case SNK_TRANSITION_SINK:
3000 	case SNK_TRANSITION_SINK_VBUS:
3001 	case SRC_TRANSITION_SUPPLY:
3002 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3003 		break;
3004 	default:
3005 		if (!tcpm_ams_interruptible(port)) {
3006 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3007 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3008 				       SNK_SOFT_RESET,
3009 				       0);
3010 		} else {
3011 			/* process the Message 6.8.1 */
3012 			port->upcoming_state = state;
3013 			port->next_ams = ams;
3014 			tcpm_set_state(port, ready_state(port), delay_ms);
3015 		}
3016 		break;
3017 	}
3018 }
3019 
tcpm_pd_handle_msg(struct tcpm_port * port,enum pd_msg_request message,enum tcpm_ams ams)3020 static void tcpm_pd_handle_msg(struct tcpm_port *port,
3021 			       enum pd_msg_request message,
3022 			       enum tcpm_ams ams)
3023 {
3024 	switch (port->state) {
3025 	case SRC_READY:
3026 	case SNK_READY:
3027 		port->ams = ams;
3028 		tcpm_queue_message(port, message);
3029 		break;
3030 	/* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
3031 	case SNK_TRANSITION_SINK:
3032 	case SNK_TRANSITION_SINK_VBUS:
3033 	case SRC_TRANSITION_SUPPLY:
3034 		tcpm_set_state(port, HARD_RESET_SEND, 0);
3035 		break;
3036 	default:
3037 		if (!tcpm_ams_interruptible(port)) {
3038 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3039 				       SRC_SOFT_RESET_WAIT_SNK_TX :
3040 				       SNK_SOFT_RESET,
3041 				       0);
3042 		} else {
3043 			port->next_ams = ams;
3044 			tcpm_set_state(port, ready_state(port), 0);
3045 			/* 6.8.1 process the Message */
3046 			tcpm_queue_message(port, message);
3047 		}
3048 		break;
3049 	}
3050 }
3051 
tcpm_register_source_caps(struct tcpm_port * port)3052 static int tcpm_register_source_caps(struct tcpm_port *port)
3053 {
3054 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3055 	struct usb_power_delivery_capabilities_desc caps = { };
3056 	struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3057 
3058 	if (!port->partner_pd)
3059 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3060 	if (IS_ERR(port->partner_pd))
3061 		return PTR_ERR(port->partner_pd);
3062 
3063 	memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3064 	caps.role = TYPEC_SOURCE;
3065 
3066 	if (cap) {
3067 		usb_power_delivery_unregister_capabilities(cap);
3068 		port->partner_source_caps = NULL;
3069 	}
3070 
3071 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3072 	if (IS_ERR(cap))
3073 		return PTR_ERR(cap);
3074 
3075 	port->partner_source_caps = cap;
3076 
3077 	return 0;
3078 }
3079 
tcpm_register_sink_caps(struct tcpm_port * port)3080 static int tcpm_register_sink_caps(struct tcpm_port *port)
3081 {
3082 	struct usb_power_delivery_desc desc = { port->negotiated_rev };
3083 	struct usb_power_delivery_capabilities_desc caps = { };
3084 	struct usb_power_delivery_capabilities *cap;
3085 
3086 	if (!port->partner_pd)
3087 		port->partner_pd = usb_power_delivery_register(NULL, &desc);
3088 	if (IS_ERR(port->partner_pd))
3089 		return PTR_ERR(port->partner_pd);
3090 
3091 	memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3092 	caps.role = TYPEC_SINK;
3093 
3094 	cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3095 	if (IS_ERR(cap))
3096 		return PTR_ERR(cap);
3097 
3098 	port->partner_sink_caps = cap;
3099 
3100 	return 0;
3101 }
3102 
tcpm_pd_data_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3103 static void tcpm_pd_data_request(struct tcpm_port *port,
3104 				 const struct pd_message *msg,
3105 				 enum tcpm_transmit_type rx_sop_type)
3106 {
3107 	enum pd_data_msg_type type = pd_header_type_le(msg->header);
3108 	unsigned int cnt = pd_header_cnt_le(msg->header);
3109 	unsigned int rev = pd_header_rev_le(msg->header);
3110 	unsigned int i;
3111 	enum frs_typec_current partner_frs_current;
3112 	bool frs_enable;
3113 	int ret;
3114 
3115 	if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3116 		port->vdm_state = VDM_STATE_ERR_BUSY;
3117 		tcpm_ams_finish(port);
3118 		mod_vdm_delayed_work(port, 0);
3119 	}
3120 
3121 	switch (type) {
3122 	case PD_DATA_SOURCE_CAP:
3123 		for (i = 0; i < cnt; i++)
3124 			port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3125 
3126 		port->nr_source_caps = cnt;
3127 
3128 		tcpm_log_source_caps(port);
3129 
3130 		tcpm_validate_caps(port, port->source_caps,
3131 				   port->nr_source_caps);
3132 
3133 		tcpm_register_source_caps(port);
3134 
3135 		/*
3136 		 * Adjust revision in subsequent message headers, as required,
3137 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3138 		 * support Rev 1.0 so just do nothing in that scenario.
3139 		 */
3140 		if (rev == PD_REV10) {
3141 			if (port->ams == GET_SOURCE_CAPABILITIES)
3142 				tcpm_ams_finish(port);
3143 			break;
3144 		}
3145 
3146 		if (rev < PD_MAX_REV) {
3147 			port->negotiated_rev = rev;
3148 			if (port->negotiated_rev_prime > port->negotiated_rev)
3149 				port->negotiated_rev_prime = port->negotiated_rev;
3150 		}
3151 
3152 		if (port->pwr_role == TYPEC_SOURCE) {
3153 			if (port->ams == GET_SOURCE_CAPABILITIES)
3154 				tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3155 			/* Unexpected Source Capabilities */
3156 			else
3157 				tcpm_pd_handle_msg(port,
3158 						   port->negotiated_rev < PD_REV30 ?
3159 						   PD_MSG_CTRL_REJECT :
3160 						   PD_MSG_CTRL_NOT_SUPP,
3161 						   NONE_AMS);
3162 		} else if (port->state == SNK_WAIT_CAPABILITIES ||
3163 			   port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) {
3164 		/*
3165 		 * This message may be received even if VBUS is not
3166 		 * present. This is quite unexpected; see USB PD
3167 		 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3168 		 * However, at the same time, we must be ready to
3169 		 * receive this message and respond to it 15ms after
3170 		 * receiving PS_RDY during power swap operations, no matter
3171 		 * if VBUS is available or not (USB PD specification,
3172 		 * section 6.5.9.2).
3173 		 * So we need to accept the message either way,
3174 		 * but be prepared to keep waiting for VBUS after it was
3175 		 * handled.
3176 		 */
3177 			port->ams = POWER_NEGOTIATION;
3178 			port->in_ams = true;
3179 			tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3180 		} else {
3181 			if (port->ams == GET_SOURCE_CAPABILITIES)
3182 				tcpm_ams_finish(port);
3183 			tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3184 					     POWER_NEGOTIATION, 0);
3185 		}
3186 		break;
3187 	case PD_DATA_REQUEST:
3188 		/*
3189 		 * Adjust revision in subsequent message headers, as required,
3190 		 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3191 		 * support Rev 1.0 so just reject in that scenario.
3192 		 */
3193 		if (rev == PD_REV10) {
3194 			tcpm_pd_handle_msg(port,
3195 					   port->negotiated_rev < PD_REV30 ?
3196 					   PD_MSG_CTRL_REJECT :
3197 					   PD_MSG_CTRL_NOT_SUPP,
3198 					   NONE_AMS);
3199 			break;
3200 		}
3201 
3202 		if (rev < PD_MAX_REV) {
3203 			port->negotiated_rev = rev;
3204 			if (port->negotiated_rev_prime > port->negotiated_rev)
3205 				port->negotiated_rev_prime = port->negotiated_rev;
3206 		}
3207 
3208 		if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3209 			tcpm_pd_handle_msg(port,
3210 					   port->negotiated_rev < PD_REV30 ?
3211 					   PD_MSG_CTRL_REJECT :
3212 					   PD_MSG_CTRL_NOT_SUPP,
3213 					   NONE_AMS);
3214 			break;
3215 		}
3216 
3217 		port->sink_request = le32_to_cpu(msg->payload[0]);
3218 
3219 		if (port->vdm_sm_running && port->explicit_contract) {
3220 			tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3221 			break;
3222 		}
3223 
3224 		if (port->state == SRC_SEND_CAPABILITIES)
3225 			tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3226 		else
3227 			tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3228 					     POWER_NEGOTIATION, 0);
3229 		break;
3230 	case PD_DATA_SINK_CAP:
3231 		/* We don't do anything with this at the moment... */
3232 		for (i = 0; i < cnt; i++)
3233 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3234 
3235 		partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3236 			PDO_FIXED_FRS_CURR_SHIFT;
3237 		frs_enable = partner_frs_current && (partner_frs_current <=
3238 						     port->new_source_frs_current);
3239 		tcpm_log(port,
3240 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3241 			 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3242 		if (frs_enable) {
3243 			ret  = port->tcpc->enable_frs(port->tcpc, true);
3244 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3245 		}
3246 
3247 		port->nr_sink_caps = cnt;
3248 		port->sink_cap_done = true;
3249 		tcpm_register_sink_caps(port);
3250 
3251 		if (port->ams == GET_SINK_CAPABILITIES)
3252 			tcpm_set_state(port, ready_state(port), 0);
3253 		/* Unexpected Sink Capabilities */
3254 		else
3255 			tcpm_pd_handle_msg(port,
3256 					   port->negotiated_rev < PD_REV30 ?
3257 					   PD_MSG_CTRL_REJECT :
3258 					   PD_MSG_CTRL_NOT_SUPP,
3259 					   NONE_AMS);
3260 		break;
3261 	case PD_DATA_VENDOR_DEF:
3262 		tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3263 		break;
3264 	case PD_DATA_BIST:
3265 		port->bist_request = le32_to_cpu(msg->payload[0]);
3266 		tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3267 		break;
3268 	case PD_DATA_ALERT:
3269 		if (port->state != SRC_READY && port->state != SNK_READY)
3270 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3271 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3272 					     NONE_AMS, 0);
3273 		else
3274 			tcpm_handle_alert(port, msg->payload, cnt);
3275 		break;
3276 	case PD_DATA_BATT_STATUS:
3277 	case PD_DATA_GET_COUNTRY_INFO:
3278 		/* Currently unsupported */
3279 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3280 				   PD_MSG_CTRL_REJECT :
3281 				   PD_MSG_CTRL_NOT_SUPP,
3282 				   NONE_AMS);
3283 		break;
3284 	default:
3285 		tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3286 				   PD_MSG_CTRL_REJECT :
3287 				   PD_MSG_CTRL_NOT_SUPP,
3288 				   NONE_AMS);
3289 		tcpm_log(port, "Unrecognized data message type %#x", type);
3290 		break;
3291 	}
3292 }
3293 
tcpm_pps_complete(struct tcpm_port * port,int result)3294 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3295 {
3296 	if (port->pps_pending) {
3297 		port->pps_status = result;
3298 		port->pps_pending = false;
3299 		complete(&port->pps_complete);
3300 	}
3301 }
3302 
tcpm_pd_ctrl_request(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3303 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3304 				 const struct pd_message *msg,
3305 				 enum tcpm_transmit_type rx_sop_type)
3306 {
3307 	enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3308 	enum tcpm_state next_state;
3309 	unsigned int rev = pd_header_rev_le(msg->header);
3310 
3311 	/*
3312 	 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3313 	 * VDM AMS if waiting for VDM responses and will be handled later.
3314 	 */
3315 	if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3316 		port->vdm_state = VDM_STATE_ERR_BUSY;
3317 		tcpm_ams_finish(port);
3318 		mod_vdm_delayed_work(port, 0);
3319 	}
3320 
3321 	switch (type) {
3322 	case PD_CTRL_GOOD_CRC:
3323 	case PD_CTRL_PING:
3324 		break;
3325 	case PD_CTRL_GET_SOURCE_CAP:
3326 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3327 		break;
3328 	case PD_CTRL_GET_SINK_CAP:
3329 		tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3330 		break;
3331 	case PD_CTRL_GOTO_MIN:
3332 		break;
3333 	case PD_CTRL_PS_RDY:
3334 		switch (port->state) {
3335 		case SNK_TRANSITION_SINK:
3336 			if (port->vbus_present) {
3337 				tcpm_set_current_limit(port,
3338 						       port->req_current_limit,
3339 						       port->req_supply_voltage);
3340 				port->explicit_contract = true;
3341 				tcpm_set_auto_vbus_discharge_threshold(port,
3342 								       TYPEC_PWR_MODE_PD,
3343 								       port->pps_data.active,
3344 								       port->supply_voltage);
3345 				tcpm_set_state(port, SNK_READY, 0);
3346 			} else {
3347 				/*
3348 				 * Seen after power swap. Keep waiting for VBUS
3349 				 * in a transitional state.
3350 				 */
3351 				tcpm_set_state(port,
3352 					       SNK_TRANSITION_SINK_VBUS, 0);
3353 			}
3354 			break;
3355 		case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3356 			tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3357 			break;
3358 		case PR_SWAP_SNK_SRC_SINK_OFF:
3359 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3360 			break;
3361 		case VCONN_SWAP_WAIT_FOR_VCONN:
3362 			tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3363 			break;
3364 		case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3365 			tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3366 			break;
3367 		default:
3368 			tcpm_pd_handle_state(port,
3369 					     port->pwr_role == TYPEC_SOURCE ?
3370 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3371 					     SNK_SOFT_RESET,
3372 					     NONE_AMS, 0);
3373 			break;
3374 		}
3375 		break;
3376 	case PD_CTRL_REJECT:
3377 	case PD_CTRL_WAIT:
3378 	case PD_CTRL_NOT_SUPP:
3379 		switch (port->state) {
3380 		case SNK_NEGOTIATE_CAPABILITIES:
3381 			/* USB PD specification, Figure 8-43 */
3382 			if (port->explicit_contract)
3383 				next_state = SNK_READY;
3384 			else
3385 				next_state = SNK_WAIT_CAPABILITIES;
3386 
3387 			/* Threshold was relaxed before sending Request. Restore it back. */
3388 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3389 							       port->pps_data.active,
3390 							       port->supply_voltage);
3391 			tcpm_set_state(port, next_state, 0);
3392 			break;
3393 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3394 			/* Revert data back from any requested PPS updates */
3395 			port->pps_data.req_out_volt = port->supply_voltage;
3396 			port->pps_data.req_op_curr = port->current_limit;
3397 			port->pps_status = (type == PD_CTRL_WAIT ?
3398 					    -EAGAIN : -EOPNOTSUPP);
3399 
3400 			/* Threshold was relaxed before sending Request. Restore it back. */
3401 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3402 							       port->pps_data.active,
3403 							       port->supply_voltage);
3404 
3405 			tcpm_set_state(port, SNK_READY, 0);
3406 			break;
3407 		case DR_SWAP_SEND:
3408 			port->swap_status = (type == PD_CTRL_WAIT ?
3409 					     -EAGAIN : -EOPNOTSUPP);
3410 			tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3411 			break;
3412 		case PR_SWAP_SEND:
3413 			port->swap_status = (type == PD_CTRL_WAIT ?
3414 					     -EAGAIN : -EOPNOTSUPP);
3415 			tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3416 			break;
3417 		case VCONN_SWAP_SEND:
3418 			port->swap_status = (type == PD_CTRL_WAIT ?
3419 					     -EAGAIN : -EOPNOTSUPP);
3420 			tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3421 			break;
3422 		case FR_SWAP_SEND:
3423 			tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3424 			break;
3425 		case GET_SINK_CAP:
3426 			port->sink_cap_done = true;
3427 			tcpm_set_state(port, ready_state(port), 0);
3428 			break;
3429 		/*
3430 		 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3431 		 * prevent redundant power re-negotiation
3432 		 */
3433 		case GET_STATUS_SEND:
3434 			tcpm_set_state(port, ready_state(port), 0);
3435 			break;
3436 		case SRC_READY:
3437 		case SNK_READY:
3438 			if (port->vdm_state > VDM_STATE_READY) {
3439 				port->vdm_state = VDM_STATE_DONE;
3440 				if (tcpm_vdm_ams(port))
3441 					tcpm_ams_finish(port);
3442 				mod_vdm_delayed_work(port, 0);
3443 				break;
3444 			}
3445 			fallthrough;
3446 		default:
3447 			tcpm_pd_handle_state(port,
3448 					     port->pwr_role == TYPEC_SOURCE ?
3449 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3450 					     SNK_SOFT_RESET,
3451 					     NONE_AMS, 0);
3452 			break;
3453 		}
3454 		break;
3455 	case PD_CTRL_ACCEPT:
3456 		switch (port->state) {
3457 		case SNK_NEGOTIATE_CAPABILITIES:
3458 			port->pps_data.active = false;
3459 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3460 			break;
3461 		case SNK_NEGOTIATE_PPS_CAPABILITIES:
3462 			port->pps_data.active = true;
3463 			port->pps_data.min_volt = port->pps_data.req_min_volt;
3464 			port->pps_data.max_volt = port->pps_data.req_max_volt;
3465 			port->pps_data.max_curr = port->pps_data.req_max_curr;
3466 			port->req_supply_voltage = port->pps_data.req_out_volt;
3467 			port->req_current_limit = port->pps_data.req_op_curr;
3468 			power_supply_changed(port->psy);
3469 			tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3470 			break;
3471 		case SOFT_RESET_SEND:
3472 			if (port->ams == SOFT_RESET_AMS)
3473 				tcpm_ams_finish(port);
3474 			/*
3475 			 * SOP' Soft Reset is done after Vconn Swap,
3476 			 * which returns to ready state
3477 			 */
3478 			if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3479 				if (rev < port->negotiated_rev_prime)
3480 					port->negotiated_rev_prime = rev;
3481 				tcpm_set_state(port, ready_state(port), 0);
3482 				break;
3483 			}
3484 			if (port->pwr_role == TYPEC_SOURCE) {
3485 				port->upcoming_state = SRC_SEND_CAPABILITIES;
3486 				tcpm_ams_start(port, POWER_NEGOTIATION);
3487 			} else {
3488 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3489 			}
3490 			break;
3491 		case DR_SWAP_SEND:
3492 			tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3493 			break;
3494 		case PR_SWAP_SEND:
3495 			tcpm_set_state(port, PR_SWAP_START, 0);
3496 			break;
3497 		case VCONN_SWAP_SEND:
3498 			tcpm_set_state(port, VCONN_SWAP_START, 0);
3499 			break;
3500 		case FR_SWAP_SEND:
3501 			tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3502 			break;
3503 		default:
3504 			tcpm_pd_handle_state(port,
3505 					     port->pwr_role == TYPEC_SOURCE ?
3506 					     SRC_SOFT_RESET_WAIT_SNK_TX :
3507 					     SNK_SOFT_RESET,
3508 					     NONE_AMS, 0);
3509 			break;
3510 		}
3511 		break;
3512 	case PD_CTRL_SOFT_RESET:
3513 		port->ams = SOFT_RESET_AMS;
3514 		tcpm_set_state(port, SOFT_RESET, 0);
3515 		break;
3516 	case PD_CTRL_DR_SWAP:
3517 		/*
3518 		 * XXX
3519 		 * 6.3.9: If an alternate mode is active, a request to swap
3520 		 * alternate modes shall trigger a port reset.
3521 		 */
3522 		if (port->typec_caps.data != TYPEC_PORT_DRD) {
3523 			tcpm_pd_handle_msg(port,
3524 					   port->negotiated_rev < PD_REV30 ?
3525 					   PD_MSG_CTRL_REJECT :
3526 					   PD_MSG_CTRL_NOT_SUPP,
3527 					   NONE_AMS);
3528 		} else {
3529 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3530 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3531 				break;
3532 			}
3533 
3534 			tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3535 		}
3536 		break;
3537 	case PD_CTRL_PR_SWAP:
3538 		if (port->port_type != TYPEC_PORT_DRP) {
3539 			tcpm_pd_handle_msg(port,
3540 					   port->negotiated_rev < PD_REV30 ?
3541 					   PD_MSG_CTRL_REJECT :
3542 					   PD_MSG_CTRL_NOT_SUPP,
3543 					   NONE_AMS);
3544 		} else {
3545 			if (port->send_discover && port->negotiated_rev < PD_REV30) {
3546 				tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3547 				break;
3548 			}
3549 
3550 			tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3551 		}
3552 		break;
3553 	case PD_CTRL_VCONN_SWAP:
3554 		if (port->send_discover && port->negotiated_rev < PD_REV30) {
3555 			tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3556 			break;
3557 		}
3558 
3559 		tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3560 		break;
3561 	case PD_CTRL_GET_SOURCE_CAP_EXT:
3562 	case PD_CTRL_GET_STATUS:
3563 	case PD_CTRL_FR_SWAP:
3564 	case PD_CTRL_GET_PPS_STATUS:
3565 	case PD_CTRL_GET_COUNTRY_CODES:
3566 		/* Currently not supported */
3567 		tcpm_pd_handle_msg(port,
3568 				   port->negotiated_rev < PD_REV30 ?
3569 				   PD_MSG_CTRL_REJECT :
3570 				   PD_MSG_CTRL_NOT_SUPP,
3571 				   NONE_AMS);
3572 		break;
3573 	case PD_CTRL_GET_REVISION:
3574 		if (port->negotiated_rev >= PD_REV30 && port->pd_rev.rev_major)
3575 			tcpm_pd_handle_msg(port, PD_MSG_DATA_REV,
3576 					   REVISION_INFORMATION);
3577 		else
3578 			tcpm_pd_handle_msg(port,
3579 					   port->negotiated_rev < PD_REV30 ?
3580 					   PD_MSG_CTRL_REJECT :
3581 					   PD_MSG_CTRL_NOT_SUPP,
3582 					   NONE_AMS);
3583 		break;
3584 	default:
3585 		tcpm_pd_handle_msg(port,
3586 				   port->negotiated_rev < PD_REV30 ?
3587 				   PD_MSG_CTRL_REJECT :
3588 				   PD_MSG_CTRL_NOT_SUPP,
3589 				   NONE_AMS);
3590 		tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3591 		break;
3592 	}
3593 }
3594 
tcpm_pd_ext_msg_request(struct tcpm_port * port,const struct pd_message * msg)3595 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3596 				    const struct pd_message *msg)
3597 {
3598 	enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3599 	unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3600 
3601 	/* stopping VDM state machine if interrupted by other Messages */
3602 	if (tcpm_vdm_ams(port)) {
3603 		port->vdm_state = VDM_STATE_ERR_BUSY;
3604 		tcpm_ams_finish(port);
3605 		mod_vdm_delayed_work(port, 0);
3606 	}
3607 
3608 	if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3609 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3610 		tcpm_log(port, "Unchunked extended messages unsupported");
3611 		return;
3612 	}
3613 
3614 	if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3615 		tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3616 		tcpm_log(port, "Chunk handling not yet supported");
3617 		return;
3618 	}
3619 
3620 	switch (type) {
3621 	case PD_EXT_STATUS:
3622 	case PD_EXT_PPS_STATUS:
3623 		if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3624 			tcpm_ams_finish(port);
3625 			tcpm_set_state(port, ready_state(port), 0);
3626 		} else {
3627 			/* unexpected Status or PPS_Status Message */
3628 			tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3629 					     SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3630 					     NONE_AMS, 0);
3631 		}
3632 		break;
3633 	case PD_EXT_SOURCE_CAP_EXT:
3634 	case PD_EXT_GET_BATT_CAP:
3635 	case PD_EXT_GET_BATT_STATUS:
3636 	case PD_EXT_BATT_CAP:
3637 	case PD_EXT_GET_MANUFACTURER_INFO:
3638 	case PD_EXT_MANUFACTURER_INFO:
3639 	case PD_EXT_SECURITY_REQUEST:
3640 	case PD_EXT_SECURITY_RESPONSE:
3641 	case PD_EXT_FW_UPDATE_REQUEST:
3642 	case PD_EXT_FW_UPDATE_RESPONSE:
3643 	case PD_EXT_COUNTRY_INFO:
3644 	case PD_EXT_COUNTRY_CODES:
3645 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3646 		break;
3647 	default:
3648 		tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3649 		tcpm_log(port, "Unrecognized extended message type %#x", type);
3650 		break;
3651 	}
3652 }
3653 
tcpm_pd_rx_handler(struct kthread_work * work)3654 static void tcpm_pd_rx_handler(struct kthread_work *work)
3655 {
3656 	struct pd_rx_event *event = container_of(work,
3657 						 struct pd_rx_event, work);
3658 	const struct pd_message *msg = &event->msg;
3659 	unsigned int cnt = pd_header_cnt_le(msg->header);
3660 	struct tcpm_port *port = event->port;
3661 	enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3662 
3663 	mutex_lock(&port->lock);
3664 
3665 	tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3666 		 port->attached);
3667 
3668 	if (port->attached) {
3669 		enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3670 		unsigned int msgid = pd_header_msgid_le(msg->header);
3671 
3672 		/*
3673 		 * Drop SOP' messages if cannot receive via
3674 		 * tcpm_can_communicate_sop_prime
3675 		 */
3676 		if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3677 		    !tcpm_can_communicate_sop_prime(port))
3678 			goto done;
3679 
3680 		/*
3681 		 * USB PD standard, 6.6.1.2:
3682 		 * "... if MessageID value in a received Message is the
3683 		 * same as the stored value, the receiver shall return a
3684 		 * GoodCRC Message with that MessageID value and drop
3685 		 * the Message (this is a retry of an already received
3686 		 * Message). Note: this shall not apply to the Soft_Reset
3687 		 * Message which always has a MessageID value of zero."
3688 		 */
3689 		switch (rx_sop_type) {
3690 		case TCPC_TX_SOP_PRIME:
3691 			if (msgid == port->rx_msgid_prime)
3692 				goto done;
3693 			port->rx_msgid_prime = msgid;
3694 			break;
3695 		case TCPC_TX_SOP:
3696 		default:
3697 			if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3698 				goto done;
3699 			port->rx_msgid = msgid;
3700 			break;
3701 		}
3702 
3703 		/*
3704 		 * If both ends believe to be DFP/host, we have a data role
3705 		 * mismatch.
3706 		 */
3707 		if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3708 		    (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3709 			tcpm_log(port,
3710 				 "Data role mismatch, initiating error recovery");
3711 			tcpm_set_state(port, ERROR_RECOVERY, 0);
3712 		} else {
3713 			if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3714 				tcpm_pd_ext_msg_request(port, msg);
3715 			else if (cnt)
3716 				tcpm_pd_data_request(port, msg, rx_sop_type);
3717 			else
3718 				tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3719 		}
3720 	}
3721 
3722 done:
3723 	mutex_unlock(&port->lock);
3724 	kfree(event);
3725 }
3726 
tcpm_pd_receive(struct tcpm_port * port,const struct pd_message * msg,enum tcpm_transmit_type rx_sop_type)3727 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3728 		     enum tcpm_transmit_type rx_sop_type)
3729 {
3730 	struct pd_rx_event *event;
3731 
3732 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
3733 	if (!event)
3734 		return;
3735 
3736 	kthread_init_work(&event->work, tcpm_pd_rx_handler);
3737 	event->port = port;
3738 	event->rx_sop_type = rx_sop_type;
3739 	memcpy(&event->msg, msg, sizeof(*msg));
3740 	kthread_queue_work(port->wq, &event->work);
3741 }
3742 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3743 
tcpm_pd_send_control(struct tcpm_port * port,enum pd_ctrl_msg_type type,enum tcpm_transmit_type tx_sop_type)3744 static int tcpm_pd_send_control(struct tcpm_port *port,
3745 				enum pd_ctrl_msg_type type,
3746 				enum tcpm_transmit_type tx_sop_type)
3747 {
3748 	struct pd_message msg;
3749 
3750 	memset(&msg, 0, sizeof(msg));
3751 	switch (tx_sop_type) {
3752 	case TCPC_TX_SOP_PRIME:
3753 		msg.header = PD_HEADER_LE(type,
3754 					  0,	/* Cable Plug Indicator for DFP/UFP */
3755 					  0,	/* Reserved */
3756 					  port->negotiated_rev,
3757 					  port->message_id_prime,
3758 					  0);
3759 		break;
3760 	case TCPC_TX_SOP:
3761 		msg.header = PD_HEADER_LE(type,
3762 					  port->pwr_role,
3763 					  port->data_role,
3764 					  port->negotiated_rev,
3765 					  port->message_id,
3766 					  0);
3767 		break;
3768 	default:
3769 		msg.header = PD_HEADER_LE(type,
3770 					  port->pwr_role,
3771 					  port->data_role,
3772 					  port->negotiated_rev,
3773 					  port->message_id,
3774 					  0);
3775 		break;
3776 	}
3777 
3778 	return tcpm_pd_transmit(port, tx_sop_type, &msg);
3779 }
3780 
3781 /*
3782  * Send queued message without affecting state.
3783  * Return true if state machine should go back to sleep,
3784  * false otherwise.
3785  */
tcpm_send_queued_message(struct tcpm_port * port)3786 static bool tcpm_send_queued_message(struct tcpm_port *port)
3787 {
3788 	enum pd_msg_request queued_message;
3789 	int ret;
3790 
3791 	do {
3792 		queued_message = port->queued_message;
3793 		port->queued_message = PD_MSG_NONE;
3794 
3795 		switch (queued_message) {
3796 		case PD_MSG_CTRL_WAIT:
3797 			tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3798 			break;
3799 		case PD_MSG_CTRL_REJECT:
3800 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3801 			break;
3802 		case PD_MSG_CTRL_NOT_SUPP:
3803 			tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3804 			break;
3805 		case PD_MSG_DATA_SINK_CAP:
3806 			ret = tcpm_pd_send_sink_caps(port);
3807 			if (ret < 0) {
3808 				tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3809 				tcpm_set_state(port, SNK_SOFT_RESET, 0);
3810 			}
3811 			tcpm_ams_finish(port);
3812 			break;
3813 		case PD_MSG_DATA_SOURCE_CAP:
3814 			ret = tcpm_pd_send_source_caps(port);
3815 			if (ret < 0) {
3816 				tcpm_log(port,
3817 					 "Unable to send src caps, ret=%d",
3818 					 ret);
3819 				tcpm_set_state(port, SOFT_RESET_SEND, 0);
3820 			} else if (port->pwr_role == TYPEC_SOURCE) {
3821 				tcpm_ams_finish(port);
3822 				tcpm_set_state(port, HARD_RESET_SEND,
3823 					       PD_T_SENDER_RESPONSE);
3824 			} else {
3825 				tcpm_ams_finish(port);
3826 			}
3827 			break;
3828 		case PD_MSG_DATA_REV:
3829 			ret = tcpm_pd_send_revision(port);
3830 			if (ret)
3831 				tcpm_log(port,
3832 					 "Unable to send revision msg, ret=%d",
3833 					 ret);
3834 			tcpm_ams_finish(port);
3835 			break;
3836 		default:
3837 			break;
3838 		}
3839 	} while (port->queued_message != PD_MSG_NONE);
3840 
3841 	if (port->delayed_state != INVALID_STATE) {
3842 		if (ktime_after(port->delayed_runtime, ktime_get())) {
3843 			mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3844 									  ktime_get())));
3845 			return true;
3846 		}
3847 		port->delayed_state = INVALID_STATE;
3848 	}
3849 	return false;
3850 }
3851 
tcpm_pd_check_request(struct tcpm_port * port)3852 static int tcpm_pd_check_request(struct tcpm_port *port)
3853 {
3854 	u32 pdo, rdo = port->sink_request;
3855 	unsigned int max, op, pdo_max, index;
3856 	enum pd_pdo_type type;
3857 
3858 	index = rdo_index(rdo);
3859 	if (!index || index > port->nr_src_pdo)
3860 		return -EINVAL;
3861 
3862 	pdo = port->src_pdo[index - 1];
3863 	type = pdo_type(pdo);
3864 	switch (type) {
3865 	case PDO_TYPE_FIXED:
3866 	case PDO_TYPE_VAR:
3867 		max = rdo_max_current(rdo);
3868 		op = rdo_op_current(rdo);
3869 		pdo_max = pdo_max_current(pdo);
3870 
3871 		if (op > pdo_max)
3872 			return -EINVAL;
3873 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3874 			return -EINVAL;
3875 
3876 		if (type == PDO_TYPE_FIXED)
3877 			tcpm_log(port,
3878 				 "Requested %u mV, %u mA for %u / %u mA",
3879 				 pdo_fixed_voltage(pdo), pdo_max, op, max);
3880 		else
3881 			tcpm_log(port,
3882 				 "Requested %u -> %u mV, %u mA for %u / %u mA",
3883 				 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3884 				 pdo_max, op, max);
3885 		break;
3886 	case PDO_TYPE_BATT:
3887 		max = rdo_max_power(rdo);
3888 		op = rdo_op_power(rdo);
3889 		pdo_max = pdo_max_power(pdo);
3890 
3891 		if (op > pdo_max)
3892 			return -EINVAL;
3893 		if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3894 			return -EINVAL;
3895 		tcpm_log(port,
3896 			 "Requested %u -> %u mV, %u mW for %u / %u mW",
3897 			 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3898 			 pdo_max, op, max);
3899 		break;
3900 	default:
3901 		return -EINVAL;
3902 	}
3903 
3904 	port->op_vsafe5v = index == 1;
3905 
3906 	return 0;
3907 }
3908 
3909 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3910 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3911 
tcpm_pd_select_pdo(struct tcpm_port * port,int * sink_pdo,int * src_pdo)3912 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3913 			      int *src_pdo)
3914 {
3915 	unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3916 		     max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3917 		     min_snk_mv = 0;
3918 	int ret = -EINVAL;
3919 
3920 	port->pps_data.supported = false;
3921 	port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3922 	power_supply_changed(port->psy);
3923 
3924 	/*
3925 	 * Select the source PDO providing the most power which has a
3926 	 * matchig sink cap.
3927 	 */
3928 	for (i = 0; i < port->nr_source_caps; i++) {
3929 		u32 pdo = port->source_caps[i];
3930 		enum pd_pdo_type type = pdo_type(pdo);
3931 
3932 		switch (type) {
3933 		case PDO_TYPE_FIXED:
3934 			max_src_mv = pdo_fixed_voltage(pdo);
3935 			min_src_mv = max_src_mv;
3936 			break;
3937 		case PDO_TYPE_BATT:
3938 		case PDO_TYPE_VAR:
3939 			max_src_mv = pdo_max_voltage(pdo);
3940 			min_src_mv = pdo_min_voltage(pdo);
3941 			break;
3942 		case PDO_TYPE_APDO:
3943 			if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3944 				port->pps_data.supported = true;
3945 				port->usb_type =
3946 					POWER_SUPPLY_USB_TYPE_PD_PPS;
3947 				power_supply_changed(port->psy);
3948 			}
3949 			continue;
3950 		default:
3951 			tcpm_log(port, "Invalid source PDO type, ignoring");
3952 			continue;
3953 		}
3954 
3955 		switch (type) {
3956 		case PDO_TYPE_FIXED:
3957 		case PDO_TYPE_VAR:
3958 			src_ma = pdo_max_current(pdo);
3959 			src_mw = src_ma * min_src_mv / 1000;
3960 			break;
3961 		case PDO_TYPE_BATT:
3962 			src_mw = pdo_max_power(pdo);
3963 			break;
3964 		case PDO_TYPE_APDO:
3965 			continue;
3966 		default:
3967 			tcpm_log(port, "Invalid source PDO type, ignoring");
3968 			continue;
3969 		}
3970 
3971 		for (j = 0; j < port->nr_snk_pdo; j++) {
3972 			pdo = port->snk_pdo[j];
3973 
3974 			switch (pdo_type(pdo)) {
3975 			case PDO_TYPE_FIXED:
3976 				max_snk_mv = pdo_fixed_voltage(pdo);
3977 				min_snk_mv = max_snk_mv;
3978 				break;
3979 			case PDO_TYPE_BATT:
3980 			case PDO_TYPE_VAR:
3981 				max_snk_mv = pdo_max_voltage(pdo);
3982 				min_snk_mv = pdo_min_voltage(pdo);
3983 				break;
3984 			case PDO_TYPE_APDO:
3985 				continue;
3986 			default:
3987 				tcpm_log(port, "Invalid sink PDO type, ignoring");
3988 				continue;
3989 			}
3990 
3991 			if (max_src_mv <= max_snk_mv &&
3992 				min_src_mv >= min_snk_mv) {
3993 				/* Prefer higher voltages if available */
3994 				if ((src_mw == max_mw && min_src_mv > max_mv) ||
3995 							src_mw > max_mw) {
3996 					*src_pdo = i;
3997 					*sink_pdo = j;
3998 					max_mw = src_mw;
3999 					max_mv = min_src_mv;
4000 					ret = 0;
4001 				}
4002 			}
4003 		}
4004 	}
4005 
4006 	return ret;
4007 }
4008 
tcpm_pd_select_pps_apdo(struct tcpm_port * port)4009 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
4010 {
4011 	unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
4012 	unsigned int src_pdo = 0;
4013 	u32 pdo, src;
4014 
4015 	for (i = 1; i < port->nr_source_caps; ++i) {
4016 		pdo = port->source_caps[i];
4017 
4018 		switch (pdo_type(pdo)) {
4019 		case PDO_TYPE_APDO:
4020 			if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
4021 				tcpm_log(port, "Not PPS APDO (source), ignoring");
4022 				continue;
4023 			}
4024 
4025 			if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
4026 			    port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
4027 				continue;
4028 
4029 			src_ma = pdo_pps_apdo_max_current(pdo);
4030 			max_op_ma = min(src_ma, port->pps_data.req_op_curr);
4031 			op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
4032 			if (op_mw > max_temp_mw) {
4033 				src_pdo = i;
4034 				max_temp_mw = op_mw;
4035 			}
4036 			break;
4037 		default:
4038 			tcpm_log(port, "Not APDO type (source), ignoring");
4039 			continue;
4040 		}
4041 	}
4042 
4043 	if (src_pdo) {
4044 		src = port->source_caps[src_pdo];
4045 
4046 		port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
4047 		port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
4048 		port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
4049 		port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
4050 						 port->pps_data.req_op_curr);
4051 	}
4052 
4053 	return src_pdo;
4054 }
4055 
tcpm_pd_build_request(struct tcpm_port * port,u32 * rdo)4056 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
4057 {
4058 	unsigned int mv, ma, mw, flags;
4059 	unsigned int max_ma, max_mw;
4060 	enum pd_pdo_type type;
4061 	u32 pdo, matching_snk_pdo;
4062 	int src_pdo_index = 0;
4063 	int snk_pdo_index = 0;
4064 	int ret;
4065 
4066 	ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
4067 	if (ret < 0)
4068 		return ret;
4069 
4070 	pdo = port->source_caps[src_pdo_index];
4071 	matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4072 	type = pdo_type(pdo);
4073 
4074 	switch (type) {
4075 	case PDO_TYPE_FIXED:
4076 		mv = pdo_fixed_voltage(pdo);
4077 		break;
4078 	case PDO_TYPE_BATT:
4079 	case PDO_TYPE_VAR:
4080 		mv = pdo_min_voltage(pdo);
4081 		break;
4082 	default:
4083 		tcpm_log(port, "Invalid PDO selected!");
4084 		return -EINVAL;
4085 	}
4086 
4087 	/* Select maximum available current within the sink pdo's limit */
4088 	if (type == PDO_TYPE_BATT) {
4089 		mw = min_power(pdo, matching_snk_pdo);
4090 		ma = 1000 * mw / mv;
4091 	} else {
4092 		ma = min_current(pdo, matching_snk_pdo);
4093 		mw = ma * mv / 1000;
4094 	}
4095 
4096 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4097 
4098 	/* Set mismatch bit if offered power is less than operating power */
4099 	max_ma = ma;
4100 	max_mw = mw;
4101 	if (mw < port->operating_snk_mw) {
4102 		flags |= RDO_CAP_MISMATCH;
4103 		if (type == PDO_TYPE_BATT &&
4104 		    (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4105 			max_mw = pdo_max_power(matching_snk_pdo);
4106 		else if (pdo_max_current(matching_snk_pdo) >
4107 			 pdo_max_current(pdo))
4108 			max_ma = pdo_max_current(matching_snk_pdo);
4109 	}
4110 
4111 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4112 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4113 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4114 		 port->polarity);
4115 
4116 	if (type == PDO_TYPE_BATT) {
4117 		*rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4118 
4119 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4120 			 src_pdo_index, mv, mw,
4121 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4122 	} else {
4123 		*rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4124 
4125 		tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4126 			 src_pdo_index, mv, ma,
4127 			 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4128 	}
4129 
4130 	port->req_current_limit = ma;
4131 	port->req_supply_voltage = mv;
4132 
4133 	return 0;
4134 }
4135 
tcpm_pd_send_request(struct tcpm_port * port)4136 static int tcpm_pd_send_request(struct tcpm_port *port)
4137 {
4138 	struct pd_message msg;
4139 	int ret;
4140 	u32 rdo;
4141 
4142 	ret = tcpm_pd_build_request(port, &rdo);
4143 	if (ret < 0)
4144 		return ret;
4145 
4146 	/*
4147 	 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4148 	 * It is safer to modify the threshold here.
4149 	 */
4150 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4151 
4152 	memset(&msg, 0, sizeof(msg));
4153 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4154 				  port->pwr_role,
4155 				  port->data_role,
4156 				  port->negotiated_rev,
4157 				  port->message_id, 1);
4158 	msg.payload[0] = cpu_to_le32(rdo);
4159 
4160 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4161 }
4162 
tcpm_pd_build_pps_request(struct tcpm_port * port,u32 * rdo)4163 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4164 {
4165 	unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4166 	unsigned int src_pdo_index;
4167 
4168 	src_pdo_index = tcpm_pd_select_pps_apdo(port);
4169 	if (!src_pdo_index)
4170 		return -EOPNOTSUPP;
4171 
4172 	max_mv = port->pps_data.req_max_volt;
4173 	max_ma = port->pps_data.req_max_curr;
4174 	out_mv = port->pps_data.req_out_volt;
4175 	op_ma = port->pps_data.req_op_curr;
4176 
4177 	flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4178 
4179 	op_mw = (op_ma * out_mv) / 1000;
4180 	if (op_mw < port->operating_snk_mw) {
4181 		/*
4182 		 * Try raising current to meet power needs. If that's not enough
4183 		 * then try upping the voltage. If that's still not enough
4184 		 * then we've obviously chosen a PPS APDO which really isn't
4185 		 * suitable so abandon ship.
4186 		 */
4187 		op_ma = (port->operating_snk_mw * 1000) / out_mv;
4188 		if ((port->operating_snk_mw * 1000) % out_mv)
4189 			++op_ma;
4190 		op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4191 
4192 		if (op_ma > max_ma) {
4193 			op_ma = max_ma;
4194 			out_mv = (port->operating_snk_mw * 1000) / op_ma;
4195 			if ((port->operating_snk_mw * 1000) % op_ma)
4196 				++out_mv;
4197 			out_mv += RDO_PROG_VOLT_MV_STEP -
4198 				  (out_mv % RDO_PROG_VOLT_MV_STEP);
4199 
4200 			if (out_mv > max_mv) {
4201 				tcpm_log(port, "Invalid PPS APDO selected!");
4202 				return -EINVAL;
4203 			}
4204 		}
4205 	}
4206 
4207 	tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4208 		 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4209 		 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4210 		 port->polarity);
4211 
4212 	*rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4213 
4214 	tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4215 		 src_pdo_index, out_mv, op_ma);
4216 
4217 	port->pps_data.req_op_curr = op_ma;
4218 	port->pps_data.req_out_volt = out_mv;
4219 
4220 	return 0;
4221 }
4222 
tcpm_pd_send_pps_request(struct tcpm_port * port)4223 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4224 {
4225 	struct pd_message msg;
4226 	int ret;
4227 	u32 rdo;
4228 
4229 	ret = tcpm_pd_build_pps_request(port, &rdo);
4230 	if (ret < 0)
4231 		return ret;
4232 
4233 	/* Relax the threshold as voltage will be adjusted right after Accept Message. */
4234 	tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4235 
4236 	memset(&msg, 0, sizeof(msg));
4237 	msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4238 				  port->pwr_role,
4239 				  port->data_role,
4240 				  port->negotiated_rev,
4241 				  port->message_id, 1);
4242 	msg.payload[0] = cpu_to_le32(rdo);
4243 
4244 	return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4245 }
4246 
tcpm_set_vbus(struct tcpm_port * port,bool enable)4247 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4248 {
4249 	int ret;
4250 
4251 	if (enable && port->vbus_charge)
4252 		return -EINVAL;
4253 
4254 	tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4255 
4256 	ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4257 	if (ret < 0)
4258 		return ret;
4259 
4260 	port->vbus_source = enable;
4261 	return 0;
4262 }
4263 
tcpm_set_charge(struct tcpm_port * port,bool charge)4264 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4265 {
4266 	int ret;
4267 
4268 	if (charge && port->vbus_source)
4269 		return -EINVAL;
4270 
4271 	if (charge != port->vbus_charge) {
4272 		tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4273 		ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4274 					   charge);
4275 		if (ret < 0)
4276 			return ret;
4277 	}
4278 	port->vbus_charge = charge;
4279 	power_supply_changed(port->psy);
4280 	return 0;
4281 }
4282 
tcpm_start_toggling(struct tcpm_port * port,enum typec_cc_status cc)4283 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4284 {
4285 	int ret;
4286 
4287 	if (!port->tcpc->start_toggling)
4288 		return false;
4289 
4290 	tcpm_log_force(port, "Start toggling");
4291 	ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4292 	return ret == 0;
4293 }
4294 
tcpm_init_vbus(struct tcpm_port * port)4295 static int tcpm_init_vbus(struct tcpm_port *port)
4296 {
4297 	int ret;
4298 
4299 	ret = port->tcpc->set_vbus(port->tcpc, false, false);
4300 	port->vbus_source = false;
4301 	port->vbus_charge = false;
4302 	return ret;
4303 }
4304 
tcpm_init_vconn(struct tcpm_port * port)4305 static int tcpm_init_vconn(struct tcpm_port *port)
4306 {
4307 	int ret;
4308 
4309 	ret = port->tcpc->set_vconn(port->tcpc, false);
4310 	port->vconn_role = TYPEC_SINK;
4311 	return ret;
4312 }
4313 
tcpm_typec_connect(struct tcpm_port * port)4314 static void tcpm_typec_connect(struct tcpm_port *port)
4315 {
4316 	struct typec_partner *partner;
4317 
4318 	if (!port->connected) {
4319 		port->connected = true;
4320 		/* Make sure we don't report stale identity information */
4321 		memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4322 		port->partner_desc.usb_pd = port->pd_capable;
4323 		if (tcpm_port_is_debug(port))
4324 			port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4325 		else if (tcpm_port_is_audio(port))
4326 			port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4327 		else
4328 			port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4329 		partner = typec_register_partner(port->typec_port, &port->partner_desc);
4330 		if (IS_ERR(partner)) {
4331 			dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4332 			return;
4333 		}
4334 
4335 		port->partner = partner;
4336 		typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4337 	}
4338 }
4339 
tcpm_src_attach(struct tcpm_port * port)4340 static int tcpm_src_attach(struct tcpm_port *port)
4341 {
4342 	enum typec_cc_polarity polarity =
4343 				port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4344 							 : TYPEC_POLARITY_CC1;
4345 	int ret;
4346 
4347 	if (port->attached)
4348 		return 0;
4349 
4350 	ret = tcpm_set_polarity(port, polarity);
4351 	if (ret < 0)
4352 		return ret;
4353 
4354 	tcpm_enable_auto_vbus_discharge(port, true);
4355 
4356 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
4357 	if (ret < 0)
4358 		return ret;
4359 
4360 	if (port->pd_supported) {
4361 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
4362 		if (ret < 0)
4363 			goto out_disable_mux;
4364 	}
4365 
4366 	/*
4367 	 * USB Type-C specification, version 1.2,
4368 	 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4369 	 * Enable VCONN only if the non-RD port is set to RA.
4370 	 */
4371 	if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4372 	    (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4373 		ret = tcpm_set_vconn(port, true);
4374 		if (ret < 0)
4375 			goto out_disable_pd;
4376 	}
4377 
4378 	ret = tcpm_set_vbus(port, true);
4379 	if (ret < 0)
4380 		goto out_disable_vconn;
4381 
4382 	port->pd_capable = false;
4383 
4384 	port->partner = NULL;
4385 
4386 	port->attached = true;
4387 	port->send_discover = true;
4388 	port->send_discover_prime = false;
4389 
4390 	return 0;
4391 
4392 out_disable_vconn:
4393 	tcpm_set_vconn(port, false);
4394 out_disable_pd:
4395 	if (port->pd_supported)
4396 		port->tcpc->set_pd_rx(port->tcpc, false);
4397 out_disable_mux:
4398 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4399 		     TYPEC_ORIENTATION_NONE);
4400 	return ret;
4401 }
4402 
tcpm_typec_disconnect(struct tcpm_port * port)4403 static void tcpm_typec_disconnect(struct tcpm_port *port)
4404 {
4405 	/*
4406 	 * Unregister plug/cable outside of port->connected because cable can
4407 	 * be discovered before SRC_READY/SNK_READY states where port->connected
4408 	 * is set.
4409 	 */
4410 	typec_unregister_plug(port->plug_prime);
4411 	typec_unregister_cable(port->cable);
4412 	port->plug_prime = NULL;
4413 	port->cable = NULL;
4414 	if (port->connected) {
4415 		if (port->partner) {
4416 			typec_partner_set_usb_power_delivery(port->partner, NULL);
4417 			typec_unregister_partner(port->partner);
4418 			port->partner = NULL;
4419 		}
4420 		port->connected = false;
4421 	}
4422 }
4423 
tcpm_unregister_altmodes(struct tcpm_port * port)4424 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4425 {
4426 	struct pd_mode_data *modep = &port->mode_data;
4427 	struct pd_mode_data *modep_prime = &port->mode_data_prime;
4428 	int i;
4429 
4430 	for (i = 0; i < modep->altmodes; i++) {
4431 		typec_unregister_altmode(port->partner_altmode[i]);
4432 		port->partner_altmode[i] = NULL;
4433 	}
4434 	for (i = 0; i < modep_prime->altmodes; i++) {
4435 		typec_unregister_altmode(port->plug_prime_altmode[i]);
4436 		port->plug_prime_altmode[i] = NULL;
4437 	}
4438 
4439 	memset(modep, 0, sizeof(*modep));
4440 	memset(modep_prime, 0, sizeof(*modep_prime));
4441 }
4442 
tcpm_set_partner_usb_comm_capable(struct tcpm_port * port,bool capable)4443 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4444 {
4445 	tcpm_log(port, "Setting usb_comm capable %s", str_true_false(capable));
4446 
4447 	if (port->tcpc->set_partner_usb_comm_capable)
4448 		port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4449 }
4450 
tcpm_reset_port(struct tcpm_port * port)4451 static void tcpm_reset_port(struct tcpm_port *port)
4452 {
4453 	tcpm_enable_auto_vbus_discharge(port, false);
4454 	port->in_ams = false;
4455 	port->ams = NONE_AMS;
4456 	port->vdm_sm_running = false;
4457 	tcpm_unregister_altmodes(port);
4458 	tcpm_typec_disconnect(port);
4459 	port->attached = false;
4460 	port->pd_capable = false;
4461 	port->pps_data.supported = false;
4462 	tcpm_set_partner_usb_comm_capable(port, false);
4463 
4464 	/*
4465 	 * First Rx ID should be 0; set this to a sentinel of -1 so that
4466 	 * we can check tcpm_pd_rx_handler() if we had seen it before.
4467 	 */
4468 	port->rx_msgid = -1;
4469 	port->rx_msgid_prime = -1;
4470 
4471 	port->tcpc->set_pd_rx(port->tcpc, false);
4472 	tcpm_init_vbus(port);	/* also disables charging */
4473 	tcpm_init_vconn(port);
4474 	tcpm_set_current_limit(port, 0, 0);
4475 	tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4476 	tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4477 		     TYPEC_ORIENTATION_NONE);
4478 	tcpm_set_attached_state(port, false);
4479 	port->try_src_count = 0;
4480 	port->try_snk_count = 0;
4481 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4482 	power_supply_changed(port->psy);
4483 	port->nr_sink_caps = 0;
4484 	port->sink_cap_done = false;
4485 	if (port->tcpc->enable_frs)
4486 		port->tcpc->enable_frs(port->tcpc, false);
4487 
4488 	usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4489 	port->partner_sink_caps = NULL;
4490 	usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4491 	port->partner_source_caps = NULL;
4492 	usb_power_delivery_unregister(port->partner_pd);
4493 	port->partner_pd = NULL;
4494 }
4495 
tcpm_detach(struct tcpm_port * port)4496 static void tcpm_detach(struct tcpm_port *port)
4497 {
4498 	if (tcpm_port_is_disconnected(port))
4499 		port->hard_reset_count = 0;
4500 
4501 	if (!port->attached)
4502 		return;
4503 
4504 	if (port->tcpc->set_bist_data) {
4505 		tcpm_log(port, "disable BIST MODE TESTDATA");
4506 		port->tcpc->set_bist_data(port->tcpc, false);
4507 	}
4508 
4509 	tcpm_reset_port(port);
4510 }
4511 
tcpm_src_detach(struct tcpm_port * port)4512 static void tcpm_src_detach(struct tcpm_port *port)
4513 {
4514 	tcpm_detach(port);
4515 }
4516 
tcpm_snk_attach(struct tcpm_port * port)4517 static int tcpm_snk_attach(struct tcpm_port *port)
4518 {
4519 	int ret;
4520 
4521 	if (port->attached)
4522 		return 0;
4523 
4524 	ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4525 				TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4526 	if (ret < 0)
4527 		return ret;
4528 
4529 	tcpm_enable_auto_vbus_discharge(port, true);
4530 
4531 	ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
4532 	if (ret < 0)
4533 		return ret;
4534 
4535 	port->pd_capable = false;
4536 
4537 	port->partner = NULL;
4538 
4539 	port->attached = true;
4540 	port->send_discover = true;
4541 	port->send_discover_prime = false;
4542 
4543 	return 0;
4544 }
4545 
tcpm_snk_detach(struct tcpm_port * port)4546 static void tcpm_snk_detach(struct tcpm_port *port)
4547 {
4548 	tcpm_detach(port);
4549 }
4550 
tcpm_acc_attach(struct tcpm_port * port)4551 static int tcpm_acc_attach(struct tcpm_port *port)
4552 {
4553 	int ret;
4554 
4555 	if (port->attached)
4556 		return 0;
4557 
4558 	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
4559 			     tcpm_data_role_for_source(port));
4560 	if (ret < 0)
4561 		return ret;
4562 
4563 	port->partner = NULL;
4564 
4565 	tcpm_typec_connect(port);
4566 
4567 	port->attached = true;
4568 
4569 	return 0;
4570 }
4571 
tcpm_acc_detach(struct tcpm_port * port)4572 static void tcpm_acc_detach(struct tcpm_port *port)
4573 {
4574 	tcpm_detach(port);
4575 }
4576 
hard_reset_state(struct tcpm_port * port)4577 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4578 {
4579 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4580 		return HARD_RESET_SEND;
4581 	if (port->pd_capable)
4582 		return ERROR_RECOVERY;
4583 	if (port->pwr_role == TYPEC_SOURCE)
4584 		return SRC_UNATTACHED;
4585 	if (port->state == SNK_WAIT_CAPABILITIES ||
4586 	    port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
4587 		return SNK_READY;
4588 	return SNK_UNATTACHED;
4589 }
4590 
unattached_state(struct tcpm_port * port)4591 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4592 {
4593 	if (port->port_type == TYPEC_PORT_DRP) {
4594 		if (port->pwr_role == TYPEC_SOURCE)
4595 			return SRC_UNATTACHED;
4596 		else
4597 			return SNK_UNATTACHED;
4598 	} else if (port->port_type == TYPEC_PORT_SRC) {
4599 		return SRC_UNATTACHED;
4600 	}
4601 
4602 	return SNK_UNATTACHED;
4603 }
4604 
tcpm_swap_complete(struct tcpm_port * port,int result)4605 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4606 {
4607 	if (port->swap_pending) {
4608 		port->swap_status = result;
4609 		port->swap_pending = false;
4610 		port->non_pd_role_swap = false;
4611 		complete(&port->swap_complete);
4612 	}
4613 }
4614 
tcpm_get_pwr_opmode(enum typec_cc_status cc)4615 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4616 {
4617 	switch (cc) {
4618 	case TYPEC_CC_RP_1_5:
4619 		return TYPEC_PWR_MODE_1_5A;
4620 	case TYPEC_CC_RP_3_0:
4621 		return TYPEC_PWR_MODE_3_0A;
4622 	case TYPEC_CC_RP_DEF:
4623 	default:
4624 		return TYPEC_PWR_MODE_USB;
4625 	}
4626 }
4627 
tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)4628 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4629 {
4630 	switch (opmode) {
4631 	case TYPEC_PWR_MODE_USB:
4632 		return TYPEC_CC_RP_DEF;
4633 	case TYPEC_PWR_MODE_1_5A:
4634 		return TYPEC_CC_RP_1_5;
4635 	case TYPEC_PWR_MODE_3_0A:
4636 	case TYPEC_PWR_MODE_PD:
4637 	default:
4638 		return TYPEC_CC_RP_3_0;
4639 	}
4640 }
4641 
tcpm_set_initial_svdm_version(struct tcpm_port * port)4642 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4643 {
4644 	if (!port->partner)
4645 		return;
4646 
4647 	switch (port->negotiated_rev) {
4648 	case PD_REV30:
4649 		break;
4650 	/*
4651 	 * 6.4.4.2.3 Structured VDM Version
4652 	 * 2.0 states "At this time, there is only one version (1.0) defined.
4653 	 * This field Shall be set to zero to indicate Version 1.0."
4654 	 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4655 	 * To ensure that we follow the Power Delivery revision we are currently
4656 	 * operating on, downgrade the SVDM version to the highest one supported
4657 	 * by the Power Delivery revision.
4658 	 */
4659 	case PD_REV20:
4660 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4661 		break;
4662 	default:
4663 		typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4664 		break;
4665 	}
4666 }
4667 
run_state_machine(struct tcpm_port * port)4668 static void run_state_machine(struct tcpm_port *port)
4669 {
4670 	int ret;
4671 	enum typec_pwr_opmode opmode;
4672 	unsigned int msecs;
4673 	enum tcpm_state upcoming_state;
4674 
4675 	if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4676 		port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4677 						port->state == SRC_UNATTACHED) ||
4678 					       (port->enter_state == SNK_ATTACH_WAIT &&
4679 						port->state == SNK_UNATTACHED) ||
4680 					       (port->enter_state == SNK_DEBOUNCED &&
4681 						port->state == SNK_UNATTACHED));
4682 
4683 	port->enter_state = port->state;
4684 	switch (port->state) {
4685 	case TOGGLING:
4686 		break;
4687 	case CHECK_CONTAMINANT:
4688 		port->tcpc->check_contaminant(port->tcpc);
4689 		break;
4690 	/* SRC states */
4691 	case SRC_UNATTACHED:
4692 		if (!port->non_pd_role_swap)
4693 			tcpm_swap_complete(port, -ENOTCONN);
4694 		tcpm_src_detach(port);
4695 		if (port->potential_contaminant) {
4696 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4697 			break;
4698 		}
4699 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4700 			tcpm_set_state(port, TOGGLING, 0);
4701 			break;
4702 		}
4703 		tcpm_set_cc(port, tcpm_rp_cc(port));
4704 		if (port->port_type == TYPEC_PORT_DRP)
4705 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4706 		break;
4707 	case SRC_ATTACH_WAIT:
4708 		if (tcpm_port_is_debug(port))
4709 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4710 				       port->timings.cc_debounce_time);
4711 		else if (tcpm_port_is_audio(port))
4712 			tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4713 				       port->timings.cc_debounce_time);
4714 		else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4715 			tcpm_set_state(port,
4716 				       tcpm_try_snk(port) ? SNK_TRY
4717 							  : SRC_ATTACHED,
4718 				       port->timings.cc_debounce_time);
4719 		break;
4720 
4721 	case SNK_TRY:
4722 		port->try_snk_count++;
4723 		/*
4724 		 * Requirements:
4725 		 * - Do not drive vconn or vbus
4726 		 * - Terminate CC pins (both) to Rd
4727 		 * Action:
4728 		 * - Wait for tDRPTry (PD_T_DRP_TRY).
4729 		 *   Until then, ignore any state changes.
4730 		 */
4731 		tcpm_set_cc(port, TYPEC_CC_RD);
4732 		tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4733 		break;
4734 	case SNK_TRY_WAIT:
4735 		if (tcpm_port_is_sink(port)) {
4736 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4737 		} else {
4738 			tcpm_set_state(port, SRC_TRYWAIT, 0);
4739 			port->max_wait = 0;
4740 		}
4741 		break;
4742 	case SNK_TRY_WAIT_DEBOUNCE:
4743 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4744 			       PD_T_TRY_CC_DEBOUNCE);
4745 		break;
4746 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4747 		if (port->vbus_present && tcpm_port_is_sink(port))
4748 			tcpm_set_state(port, SNK_ATTACHED, 0);
4749 		else
4750 			port->max_wait = 0;
4751 		break;
4752 	case SRC_TRYWAIT:
4753 		tcpm_set_cc(port, tcpm_rp_cc(port));
4754 		if (port->max_wait == 0) {
4755 			port->max_wait = jiffies +
4756 					 msecs_to_jiffies(PD_T_DRP_TRY);
4757 			tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4758 				       PD_T_DRP_TRY);
4759 		} else {
4760 			if (time_is_after_jiffies(port->max_wait))
4761 				tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4762 					       jiffies_to_msecs(port->max_wait -
4763 								jiffies));
4764 			else
4765 				tcpm_set_state(port, SNK_UNATTACHED, 0);
4766 		}
4767 		break;
4768 	case SRC_TRYWAIT_DEBOUNCE:
4769 		tcpm_set_state(port, SRC_ATTACHED, port->timings.cc_debounce_time);
4770 		break;
4771 	case SRC_TRYWAIT_UNATTACHED:
4772 		tcpm_set_state(port, SNK_UNATTACHED, 0);
4773 		break;
4774 
4775 	case SRC_ATTACHED:
4776 		ret = tcpm_src_attach(port);
4777 		tcpm_set_state(port, SRC_UNATTACHED,
4778 			       ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4779 		break;
4780 	case SRC_STARTUP:
4781 		opmode =  tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4782 		typec_set_pwr_opmode(port->typec_port, opmode);
4783 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
4784 		port->caps_count = 0;
4785 		port->negotiated_rev = PD_MAX_REV;
4786 		port->negotiated_rev_prime = PD_MAX_REV;
4787 		port->message_id = 0;
4788 		port->message_id_prime = 0;
4789 		port->rx_msgid = -1;
4790 		port->rx_msgid_prime = -1;
4791 		port->explicit_contract = false;
4792 		/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4793 		if (port->ams == POWER_ROLE_SWAP ||
4794 		    port->ams == FAST_ROLE_SWAP)
4795 			tcpm_ams_finish(port);
4796 		if (!port->pd_supported) {
4797 			tcpm_set_state(port, SRC_READY, 0);
4798 			break;
4799 		}
4800 		port->upcoming_state = SRC_SEND_CAPABILITIES;
4801 		tcpm_ams_start(port, POWER_NEGOTIATION);
4802 		break;
4803 	case SRC_SEND_CAPABILITIES:
4804 		port->caps_count++;
4805 		if (port->caps_count > PD_N_CAPS_COUNT) {
4806 			tcpm_set_state(port, SRC_READY, 0);
4807 			break;
4808 		}
4809 		ret = tcpm_pd_send_source_caps(port);
4810 		if (ret < 0) {
4811 			if (tcpm_can_communicate_sop_prime(port) &&
4812 			    IS_ERR_OR_NULL(port->cable))
4813 				tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4814 			else
4815 				tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4816 					       PD_T_SEND_SOURCE_CAP);
4817 		} else {
4818 			/*
4819 			 * Per standard, we should clear the reset counter here.
4820 			 * However, that can result in state machine hang-ups.
4821 			 * Reset it only in READY state to improve stability.
4822 			 */
4823 			/* port->hard_reset_count = 0; */
4824 			port->caps_count = 0;
4825 			port->pd_capable = true;
4826 			tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4827 					    PD_T_SENDER_RESPONSE);
4828 		}
4829 		break;
4830 	case SRC_SEND_CAPABILITIES_TIMEOUT:
4831 		/*
4832 		 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4833 		 *
4834 		 * PD 2.0 sinks are supposed to accept src-capabilities with a
4835 		 * 3.0 header and simply ignore any src PDOs which the sink does
4836 		 * not understand such as PPS but some 2.0 sinks instead ignore
4837 		 * the entire PD_DATA_SOURCE_CAP message, causing contract
4838 		 * negotiation to fail.
4839 		 *
4840 		 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4841 		 * sending src-capabilities with a lower PD revision to
4842 		 * make these broken sinks work.
4843 		 */
4844 		if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4845 			tcpm_set_state(port, HARD_RESET_SEND, 0);
4846 		} else if (port->negotiated_rev > PD_REV20) {
4847 			port->negotiated_rev--;
4848 			port->hard_reset_count = 0;
4849 			tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4850 		} else {
4851 			tcpm_set_state(port, hard_reset_state(port), 0);
4852 		}
4853 		break;
4854 	case SRC_NEGOTIATE_CAPABILITIES:
4855 		ret = tcpm_pd_check_request(port);
4856 		if (ret < 0) {
4857 			tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4858 			if (!port->explicit_contract) {
4859 				tcpm_set_state(port,
4860 					       SRC_WAIT_NEW_CAPABILITIES, 0);
4861 			} else {
4862 				tcpm_set_state(port, SRC_READY, 0);
4863 			}
4864 		} else {
4865 			tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4866 			tcpm_set_partner_usb_comm_capable(port,
4867 							  !!(port->sink_request & RDO_USB_COMM));
4868 			tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4869 				       PD_T_SRC_TRANSITION);
4870 		}
4871 		break;
4872 	case SRC_TRANSITION_SUPPLY:
4873 		/* XXX: regulator_set_voltage(vbus, ...) */
4874 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4875 		port->explicit_contract = true;
4876 		typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4877 		port->pwr_opmode = TYPEC_PWR_MODE_PD;
4878 		tcpm_set_state_cond(port, SRC_READY, 0);
4879 		break;
4880 	case SRC_READY:
4881 #if 1
4882 		port->hard_reset_count = 0;
4883 #endif
4884 		port->try_src_count = 0;
4885 
4886 		tcpm_swap_complete(port, 0);
4887 		tcpm_typec_connect(port);
4888 
4889 		if (port->ams != NONE_AMS)
4890 			tcpm_ams_finish(port);
4891 		if (port->next_ams != NONE_AMS) {
4892 			port->ams = port->next_ams;
4893 			port->next_ams = NONE_AMS;
4894 		}
4895 
4896 		/*
4897 		 * If previous AMS is interrupted, switch to the upcoming
4898 		 * state.
4899 		 */
4900 		if (port->upcoming_state != INVALID_STATE) {
4901 			upcoming_state = port->upcoming_state;
4902 			port->upcoming_state = INVALID_STATE;
4903 			tcpm_set_state(port, upcoming_state, 0);
4904 			break;
4905 		}
4906 
4907 		/*
4908 		 * 6.4.4.3.1 Discover Identity
4909 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
4910 		 * Explicit Contract."
4911 		 *
4912 		 * Discover Identity on SOP' should be discovered prior to the
4913 		 * ready state, but if done after a Vconn Swap following Discover
4914 		 * Identity on SOP then the discovery process can be run here
4915 		 * as well.
4916 		 */
4917 		if (port->explicit_contract) {
4918 			if (port->send_discover_prime) {
4919 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
4920 			} else {
4921 				port->tx_sop_type = TCPC_TX_SOP;
4922 				tcpm_set_initial_svdm_version(port);
4923 			}
4924 			mod_send_discover_delayed_work(port, 0);
4925 		} else {
4926 			port->send_discover = false;
4927 			port->send_discover_prime = false;
4928 		}
4929 
4930 		/*
4931 		 * 6.3.5
4932 		 * Sending ping messages is not necessary if
4933 		 * - the source operates at vSafe5V
4934 		 * or
4935 		 * - The system is not operating in PD mode
4936 		 * or
4937 		 * - Both partners are connected using a Type-C connector
4938 		 *
4939 		 * There is no actual need to send PD messages since the local
4940 		 * port type-c and the spec does not clearly say whether PD is
4941 		 * possible when type-c is connected to Type-A/B
4942 		 */
4943 		break;
4944 	case SRC_WAIT_NEW_CAPABILITIES:
4945 		/* Nothing to do... */
4946 		break;
4947 
4948 	/* SNK states */
4949 	case SNK_UNATTACHED:
4950 		if (!port->non_pd_role_swap)
4951 			tcpm_swap_complete(port, -ENOTCONN);
4952 		tcpm_pps_complete(port, -ENOTCONN);
4953 		tcpm_snk_detach(port);
4954 		if (port->potential_contaminant) {
4955 			tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4956 			break;
4957 		}
4958 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4959 			tcpm_set_state(port, TOGGLING, 0);
4960 			break;
4961 		}
4962 		tcpm_set_cc(port, TYPEC_CC_RD);
4963 		if (port->port_type == TYPEC_PORT_DRP)
4964 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4965 		break;
4966 	case SNK_ATTACH_WAIT:
4967 		if ((port->cc1 == TYPEC_CC_OPEN &&
4968 		     port->cc2 != TYPEC_CC_OPEN) ||
4969 		    (port->cc1 != TYPEC_CC_OPEN &&
4970 		     port->cc2 == TYPEC_CC_OPEN))
4971 			tcpm_set_state(port, SNK_DEBOUNCED,
4972 				       port->timings.cc_debounce_time);
4973 		else if (tcpm_port_is_disconnected(port))
4974 			tcpm_set_state(port, SNK_UNATTACHED,
4975 				       PD_T_PD_DEBOUNCE);
4976 		break;
4977 	case SNK_DEBOUNCED:
4978 		if (tcpm_port_is_disconnected(port))
4979 			tcpm_set_state(port, SNK_UNATTACHED,
4980 				       PD_T_PD_DEBOUNCE);
4981 		else if (port->vbus_present)
4982 			tcpm_set_state(port,
4983 				       tcpm_try_src(port) ? SRC_TRY
4984 							  : SNK_ATTACHED,
4985 				       0);
4986 		break;
4987 	case SRC_TRY:
4988 		port->try_src_count++;
4989 		tcpm_set_cc(port, tcpm_rp_cc(port));
4990 		port->max_wait = 0;
4991 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
4992 		break;
4993 	case SRC_TRY_WAIT:
4994 		if (port->max_wait == 0) {
4995 			port->max_wait = jiffies +
4996 					 msecs_to_jiffies(PD_T_DRP_TRY);
4997 			msecs = PD_T_DRP_TRY;
4998 		} else {
4999 			if (time_is_after_jiffies(port->max_wait))
5000 				msecs = jiffies_to_msecs(port->max_wait -
5001 							 jiffies);
5002 			else
5003 				msecs = 0;
5004 		}
5005 		tcpm_set_state(port, SNK_TRYWAIT, msecs);
5006 		break;
5007 	case SRC_TRY_DEBOUNCE:
5008 		tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
5009 		break;
5010 	case SNK_TRYWAIT:
5011 		tcpm_set_cc(port, TYPEC_CC_RD);
5012 		tcpm_set_state(port, SNK_TRYWAIT_VBUS, port->timings.cc_debounce_time);
5013 		break;
5014 	case SNK_TRYWAIT_VBUS:
5015 		/*
5016 		 * TCPM stays in this state indefinitely until VBUS
5017 		 * is detected as long as Rp is not detected for
5018 		 * more than a time period of tPDDebounce.
5019 		 */
5020 		if (port->vbus_present && tcpm_port_is_sink(port)) {
5021 			tcpm_set_state(port, SNK_ATTACHED, 0);
5022 			break;
5023 		}
5024 		if (!tcpm_port_is_sink(port))
5025 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5026 		break;
5027 	case SNK_TRYWAIT_DEBOUNCE:
5028 		tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
5029 		break;
5030 	case SNK_ATTACHED:
5031 		ret = tcpm_snk_attach(port);
5032 		if (ret < 0)
5033 			tcpm_set_state(port, SNK_UNATTACHED, 0);
5034 		else
5035 			/*
5036 			 * For Type C port controllers that use Battery Charging
5037 			 * Detection (based on BCv1.2 spec) to detect USB
5038 			 * charger type, add a delay of "snk_bc12_cmpletion_time"
5039 			 * before transitioning to SNK_STARTUP to allow BC1.2
5040 			 * detection to complete before PD is eventually enabled
5041 			 * in later states.
5042 			 */
5043 			tcpm_set_state(port, SNK_STARTUP,
5044 				       port->timings.snk_bc12_cmpletion_time);
5045 		break;
5046 	case SNK_STARTUP:
5047 		opmode =  tcpm_get_pwr_opmode(port->polarity ?
5048 					      port->cc2 : port->cc1);
5049 		typec_set_pwr_opmode(port->typec_port, opmode);
5050 		port->pwr_opmode = TYPEC_PWR_MODE_USB;
5051 		port->negotiated_rev = PD_MAX_REV;
5052 		port->negotiated_rev_prime = PD_MAX_REV;
5053 		port->message_id = 0;
5054 		port->message_id_prime = 0;
5055 		port->rx_msgid = -1;
5056 		port->rx_msgid_prime = -1;
5057 		port->explicit_contract = false;
5058 
5059 		if (port->ams == POWER_ROLE_SWAP ||
5060 		    port->ams == FAST_ROLE_SWAP)
5061 			/* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
5062 			tcpm_ams_finish(port);
5063 
5064 		tcpm_set_state(port, SNK_DISCOVERY, 0);
5065 		break;
5066 	case SNK_DISCOVERY:
5067 		if (port->vbus_present) {
5068 			u32 current_lim = tcpm_get_current_limit(port);
5069 
5070 			if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
5071 				current_lim = PD_P_SNK_STDBY_MW / 5;
5072 			tcpm_set_current_limit(port, current_lim, 5000);
5073 			/* Not sink vbus if operational current is 0mA */
5074 			tcpm_set_charge(port, !port->pd_supported ||
5075 					pdo_max_current(port->snk_pdo[0]));
5076 
5077 			if (!port->pd_supported)
5078 				tcpm_set_state(port, SNK_READY, 0);
5079 			else
5080 				tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5081 			break;
5082 		}
5083 		/*
5084 		 * For DRP, timeouts differ. Also, handling is supposed to be
5085 		 * different and much more complex (dead battery detection;
5086 		 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5087 		 */
5088 		tcpm_set_state(port, hard_reset_state(port),
5089 			       port->port_type == TYPEC_PORT_DRP ?
5090 					PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5091 		break;
5092 	case SNK_DISCOVERY_DEBOUNCE:
5093 		tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5094 			       port->timings.cc_debounce_time);
5095 		break;
5096 	case SNK_DISCOVERY_DEBOUNCE_DONE:
5097 		if (!tcpm_port_is_disconnected(port) &&
5098 		    tcpm_port_is_sink(port) &&
5099 		    ktime_after(port->delayed_runtime, ktime_get())) {
5100 			tcpm_set_state(port, SNK_DISCOVERY,
5101 				       ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5102 			break;
5103 		}
5104 		tcpm_set_state(port, unattached_state(port), 0);
5105 		break;
5106 	case SNK_WAIT_CAPABILITIES:
5107 		ret = port->tcpc->set_pd_rx(port->tcpc, true);
5108 		if (ret < 0) {
5109 			tcpm_set_state(port, SNK_READY, 0);
5110 			break;
5111 		}
5112 		/*
5113 		 * If VBUS has never been low, and we time out waiting
5114 		 * for source cap, try a soft reset first, in case we
5115 		 * were already in a stable contract before this boot.
5116 		 * Do this only once.
5117 		 */
5118 		if (port->vbus_never_low) {
5119 			port->vbus_never_low = false;
5120 			upcoming_state = SNK_SOFT_RESET;
5121 		} else {
5122 			if (!port->self_powered)
5123 				upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
5124 			else
5125 				upcoming_state = hard_reset_state(port);
5126 		}
5127 
5128 		tcpm_set_state(port, upcoming_state,
5129 			       port->timings.sink_wait_cap_time);
5130 		break;
5131 	case SNK_WAIT_CAPABILITIES_TIMEOUT:
5132 		/*
5133 		 * There are some USB PD sources in the field, which do not
5134 		 * properly implement the specification and fail to start
5135 		 * sending Source Capability messages after a soft reset. The
5136 		 * specification suggests to do a hard reset when no Source
5137 		 * capability message is received within PD_T_SINK_WAIT_CAP,
5138 		 * but that might effectively kil the machine's power source.
5139 		 *
5140 		 * This slightly diverges from the specification and tries to
5141 		 * recover from this by explicitly asking for the capabilities
5142 		 * using the Get_Source_Cap control message before falling back
5143 		 * to a hard reset. The control message should also be supported
5144 		 * and handled by all USB PD source and dual role devices
5145 		 * according to the specification.
5146 		 */
5147 		if (tcpm_pd_send_control(port, PD_CTRL_GET_SOURCE_CAP, TCPC_TX_SOP))
5148 			tcpm_set_state_cond(port, hard_reset_state(port), 0);
5149 		else
5150 			tcpm_set_state(port, hard_reset_state(port),
5151 				       port->timings.sink_wait_cap_time);
5152 		break;
5153 	case SNK_NEGOTIATE_CAPABILITIES:
5154 		port->pd_capable = true;
5155 		tcpm_set_partner_usb_comm_capable(port,
5156 						  !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5157 		port->hard_reset_count = 0;
5158 		ret = tcpm_pd_send_request(port);
5159 		if (ret < 0) {
5160 			/* Restore back to the original state */
5161 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5162 							       port->pps_data.active,
5163 							       port->supply_voltage);
5164 			/* Let the Source send capabilities again. */
5165 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5166 		} else {
5167 			tcpm_set_state_cond(port, hard_reset_state(port),
5168 					    PD_T_SENDER_RESPONSE);
5169 		}
5170 		break;
5171 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
5172 		ret = tcpm_pd_send_pps_request(port);
5173 		if (ret < 0) {
5174 			/* Restore back to the original state */
5175 			tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5176 							       port->pps_data.active,
5177 							       port->supply_voltage);
5178 			port->pps_status = ret;
5179 			/*
5180 			 * If this was called due to updates to sink
5181 			 * capabilities, and pps is no longer valid, we should
5182 			 * safely fall back to a standard PDO.
5183 			 */
5184 			if (port->update_sink_caps)
5185 				tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5186 			else
5187 				tcpm_set_state(port, SNK_READY, 0);
5188 		} else {
5189 			tcpm_set_state_cond(port, hard_reset_state(port),
5190 					    PD_T_SENDER_RESPONSE);
5191 		}
5192 		break;
5193 	case SNK_TRANSITION_SINK:
5194 		/* From the USB PD spec:
5195 		 * "The Sink Shall transition to Sink Standby before a positive or
5196 		 * negative voltage transition of VBUS. During Sink Standby
5197 		 * the Sink Shall reduce its power draw to pSnkStdby."
5198 		 *
5199 		 * This is not applicable to PPS though as the port can continue
5200 		 * to draw negotiated power without switching to standby.
5201 		 */
5202 		if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5203 		    port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5204 			u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5205 
5206 			tcpm_log(port, "Setting standby current %u mV @ %u mA",
5207 				 port->supply_voltage, stdby_ma);
5208 			tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5209 		}
5210 		fallthrough;
5211 	case SNK_TRANSITION_SINK_VBUS:
5212 		tcpm_set_state(port, hard_reset_state(port),
5213 			       PD_T_PS_TRANSITION);
5214 		break;
5215 	case SNK_READY:
5216 		port->try_snk_count = 0;
5217 		port->update_sink_caps = false;
5218 		if (port->explicit_contract) {
5219 			typec_set_pwr_opmode(port->typec_port,
5220 					     TYPEC_PWR_MODE_PD);
5221 			port->pwr_opmode = TYPEC_PWR_MODE_PD;
5222 		}
5223 
5224 		if (!port->pd_capable && port->slow_charger_loop)
5225 			tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5226 		tcpm_swap_complete(port, 0);
5227 		tcpm_typec_connect(port);
5228 		if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5229 			mod_enable_frs_delayed_work(port, 0);
5230 		tcpm_pps_complete(port, port->pps_status);
5231 
5232 		if (port->ams != NONE_AMS)
5233 			tcpm_ams_finish(port);
5234 		if (port->next_ams != NONE_AMS) {
5235 			port->ams = port->next_ams;
5236 			port->next_ams = NONE_AMS;
5237 		}
5238 
5239 		/*
5240 		 * If previous AMS is interrupted, switch to the upcoming
5241 		 * state.
5242 		 */
5243 		if (port->upcoming_state != INVALID_STATE) {
5244 			upcoming_state = port->upcoming_state;
5245 			port->upcoming_state = INVALID_STATE;
5246 			tcpm_set_state(port, upcoming_state, 0);
5247 			break;
5248 		}
5249 
5250 		/*
5251 		 * 6.4.4.3.1 Discover Identity
5252 		 * "The Discover Identity Command Shall only be sent to SOP when there is an
5253 		 * Explicit Contract."
5254 		 *
5255 		 * Discover Identity on SOP' should be discovered prior to the
5256 		 * ready state, but if done after a Vconn Swap following Discover
5257 		 * Identity on SOP then the discovery process can be run here
5258 		 * as well.
5259 		 */
5260 		if (port->explicit_contract) {
5261 			if (port->send_discover_prime) {
5262 				port->tx_sop_type = TCPC_TX_SOP_PRIME;
5263 			} else {
5264 				port->tx_sop_type = TCPC_TX_SOP;
5265 				tcpm_set_initial_svdm_version(port);
5266 			}
5267 			mod_send_discover_delayed_work(port, 0);
5268 		} else {
5269 			port->send_discover = false;
5270 			port->send_discover_prime = false;
5271 		}
5272 
5273 		power_supply_changed(port->psy);
5274 		break;
5275 
5276 	/* Accessory states */
5277 	case ACC_UNATTACHED:
5278 		tcpm_acc_detach(port);
5279 		tcpm_set_state(port, SRC_UNATTACHED, 0);
5280 		break;
5281 	case DEBUG_ACC_ATTACHED:
5282 	case AUDIO_ACC_ATTACHED:
5283 		ret = tcpm_acc_attach(port);
5284 		if (ret < 0)
5285 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5286 		break;
5287 	case AUDIO_ACC_DEBOUNCE:
5288 		tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
5289 		break;
5290 
5291 	/* Hard_Reset states */
5292 	case HARD_RESET_SEND:
5293 		if (port->ams != NONE_AMS)
5294 			tcpm_ams_finish(port);
5295 		if (!port->self_powered && port->port_type == TYPEC_PORT_SNK)
5296 			dev_err(port->dev, "Initiating hard-reset, which might result in machine power-loss.\n");
5297 		/*
5298 		 * State machine will be directed to HARD_RESET_START,
5299 		 * thus set upcoming_state to INVALID_STATE.
5300 		 */
5301 		port->upcoming_state = INVALID_STATE;
5302 		tcpm_ams_start(port, HARD_RESET);
5303 		break;
5304 	case HARD_RESET_START:
5305 		port->sink_cap_done = false;
5306 		if (port->tcpc->enable_frs)
5307 			port->tcpc->enable_frs(port->tcpc, false);
5308 		port->hard_reset_count++;
5309 		port->tcpc->set_pd_rx(port->tcpc, false);
5310 		tcpm_unregister_altmodes(port);
5311 		port->nr_sink_caps = 0;
5312 		port->send_discover = true;
5313 		port->send_discover_prime = false;
5314 		if (port->pwr_role == TYPEC_SOURCE)
5315 			tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5316 				       PD_T_PS_HARD_RESET);
5317 		else
5318 			tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5319 		break;
5320 	case SRC_HARD_RESET_VBUS_OFF:
5321 		/*
5322 		 * 7.1.5 Response to Hard Resets
5323 		 * Hard Reset Signaling indicates a communication failure has occurred and the
5324 		 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5325 		 * drive VBUS to vSafe0V as shown in Figure 7-9.
5326 		 */
5327 		tcpm_set_vconn(port, false);
5328 		tcpm_set_vbus(port, false);
5329 		tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
5330 			       tcpm_data_role_for_source(port));
5331 		/*
5332 		 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5333 		 * PD_T_SRC_RECOVER before turning vbus back on.
5334 		 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5335 		 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5336 		 * tells the Device Policy Manager to instruct the power supply to perform a
5337 		 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5338 		 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5339 		 * re-establish communication with the Sink and resume USB Default Operation.
5340 		 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5341 		 */
5342 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5343 		break;
5344 	case SRC_HARD_RESET_VBUS_ON:
5345 		tcpm_set_vconn(port, true);
5346 		tcpm_set_vbus(port, true);
5347 		if (port->ams == HARD_RESET)
5348 			tcpm_ams_finish(port);
5349 		if (port->pd_supported)
5350 			port->tcpc->set_pd_rx(port->tcpc, true);
5351 		tcpm_set_attached_state(port, true);
5352 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5353 		break;
5354 	case SNK_HARD_RESET_SINK_OFF:
5355 		/* Do not discharge/disconnect during hard reset */
5356 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5357 		memset(&port->pps_data, 0, sizeof(port->pps_data));
5358 		tcpm_set_vconn(port, false);
5359 		if (port->pd_capable)
5360 			tcpm_set_charge(port, false);
5361 		tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
5362 			       tcpm_data_role_for_sink(port));
5363 		/*
5364 		 * VBUS may or may not toggle, depending on the adapter.
5365 		 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5366 		 * directly after timeout.
5367 		 */
5368 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5369 		break;
5370 	case SNK_HARD_RESET_WAIT_VBUS:
5371 		if (port->ams == HARD_RESET)
5372 			tcpm_ams_finish(port);
5373 		/* Assume we're disconnected if VBUS doesn't come back. */
5374 		tcpm_set_state(port, SNK_UNATTACHED,
5375 			       PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5376 		break;
5377 	case SNK_HARD_RESET_SINK_ON:
5378 		/* Note: There is no guarantee that VBUS is on in this state */
5379 		/*
5380 		 * XXX:
5381 		 * The specification suggests that dual mode ports in sink
5382 		 * mode should transition to state PE_SRC_Transition_to_default.
5383 		 * See USB power delivery specification chapter 8.3.3.6.1.3.
5384 		 * This would mean to
5385 		 * - turn off VCONN, reset power supply
5386 		 * - request hardware reset
5387 		 * - turn on VCONN
5388 		 * - Transition to state PE_Src_Startup
5389 		 * SNK only ports shall transition to state Snk_Startup
5390 		 * (see chapter 8.3.3.3.8).
5391 		 * Similar, dual-mode ports in source mode should transition
5392 		 * to PE_SNK_Transition_to_default.
5393 		 */
5394 		if (port->pd_capable) {
5395 			tcpm_set_current_limit(port,
5396 					       tcpm_get_current_limit(port),
5397 					       5000);
5398 			/* Not sink vbus if operational current is 0mA */
5399 			tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5400 		}
5401 		if (port->ams == HARD_RESET)
5402 			tcpm_ams_finish(port);
5403 		tcpm_set_attached_state(port, true);
5404 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5405 		tcpm_set_state(port, SNK_STARTUP, 0);
5406 		break;
5407 
5408 	/* Soft_Reset states */
5409 	case SOFT_RESET:
5410 		port->message_id = 0;
5411 		port->rx_msgid = -1;
5412 		/* remove existing capabilities */
5413 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5414 		port->partner_source_caps = NULL;
5415 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5416 		tcpm_ams_finish(port);
5417 		if (port->pwr_role == TYPEC_SOURCE) {
5418 			port->upcoming_state = SRC_SEND_CAPABILITIES;
5419 			tcpm_ams_start(port, POWER_NEGOTIATION);
5420 		} else {
5421 			tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5422 		}
5423 		break;
5424 	case SRC_SOFT_RESET_WAIT_SNK_TX:
5425 	case SNK_SOFT_RESET:
5426 		if (port->ams != NONE_AMS)
5427 			tcpm_ams_finish(port);
5428 		port->upcoming_state = SOFT_RESET_SEND;
5429 		tcpm_ams_start(port, SOFT_RESET_AMS);
5430 		break;
5431 	case SOFT_RESET_SEND:
5432 		/*
5433 		 * Power Delivery 3.0 Section 6.3.13
5434 		 *
5435 		 * A Soft_Reset Message Shall be targeted at a specific entity
5436 		 * depending on the type of SOP* packet used.
5437 		 */
5438 		if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5439 			port->message_id_prime = 0;
5440 			port->rx_msgid_prime = -1;
5441 			tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5442 			tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5443 		} else {
5444 			port->message_id = 0;
5445 			port->rx_msgid = -1;
5446 			/* remove existing capabilities */
5447 			usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5448 			port->partner_source_caps = NULL;
5449 			if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5450 				tcpm_set_state_cond(port, hard_reset_state(port), 0);
5451 			else
5452 				tcpm_set_state_cond(port, hard_reset_state(port),
5453 						    PD_T_SENDER_RESPONSE);
5454 		}
5455 		break;
5456 
5457 	/* DR_Swap states */
5458 	case DR_SWAP_SEND:
5459 		tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5460 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5461 			port->send_discover = true;
5462 			port->send_discover_prime = false;
5463 		}
5464 		tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5465 				    PD_T_SENDER_RESPONSE);
5466 		break;
5467 	case DR_SWAP_ACCEPT:
5468 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5469 		if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5470 			port->send_discover = true;
5471 			port->send_discover_prime = false;
5472 		}
5473 		tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5474 		break;
5475 	case DR_SWAP_SEND_TIMEOUT:
5476 		tcpm_swap_complete(port, -ETIMEDOUT);
5477 		port->send_discover = false;
5478 		port->send_discover_prime = false;
5479 		tcpm_ams_finish(port);
5480 		tcpm_set_state(port, ready_state(port), 0);
5481 		break;
5482 	case DR_SWAP_CHANGE_DR:
5483 		tcpm_unregister_altmodes(port);
5484 		if (port->data_role == TYPEC_HOST)
5485 			tcpm_set_roles(port, true, port->pwr_role,
5486 				       TYPEC_DEVICE);
5487 		else
5488 			tcpm_set_roles(port, true, port->pwr_role,
5489 				       TYPEC_HOST);
5490 		tcpm_ams_finish(port);
5491 		tcpm_set_state(port, ready_state(port), 0);
5492 		break;
5493 
5494 	case FR_SWAP_SEND:
5495 		if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5496 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5497 			break;
5498 		}
5499 		tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5500 		break;
5501 	case FR_SWAP_SEND_TIMEOUT:
5502 		tcpm_set_state(port, ERROR_RECOVERY, 0);
5503 		break;
5504 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5505 		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5506 		break;
5507 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5508 		if (port->vbus_source)
5509 			tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5510 		else
5511 			tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5512 		break;
5513 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5514 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5515 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5516 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5517 			break;
5518 		}
5519 		tcpm_set_cc(port, tcpm_rp_cc(port));
5520 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5521 		break;
5522 
5523 	/* PR_Swap states */
5524 	case PR_SWAP_ACCEPT:
5525 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5526 		tcpm_set_state(port, PR_SWAP_START, 0);
5527 		break;
5528 	case PR_SWAP_SEND:
5529 		tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5530 		tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5531 				    PD_T_SENDER_RESPONSE);
5532 		break;
5533 	case PR_SWAP_SEND_TIMEOUT:
5534 		tcpm_swap_complete(port, -ETIMEDOUT);
5535 		tcpm_set_state(port, ready_state(port), 0);
5536 		break;
5537 	case PR_SWAP_START:
5538 		tcpm_apply_rc(port);
5539 		if (port->pwr_role == TYPEC_SOURCE)
5540 			tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5541 				       PD_T_SRC_TRANSITION);
5542 		else
5543 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5544 		break;
5545 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5546 		/*
5547 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5548 		 * as this is not a disconnect.
5549 		 */
5550 		tcpm_set_vbus(port, false);
5551 		port->explicit_contract = false;
5552 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
5553 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5554 			       PD_T_SRCSWAPSTDBY);
5555 		break;
5556 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5557 		tcpm_set_cc(port, TYPEC_CC_RD);
5558 		/* allow CC debounce */
5559 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5560 			       port->timings.cc_debounce_time);
5561 		break;
5562 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5563 		/*
5564 		 * USB-PD standard, 6.2.1.4, Port Power Role:
5565 		 * "During the Power Role Swap Sequence, for the initial Source
5566 		 * Port, the Port Power Role field shall be set to Sink in the
5567 		 * PS_RDY Message indicating that the initial Source’s power
5568 		 * supply is turned off"
5569 		 */
5570 		tcpm_set_pwr_role(port, TYPEC_SINK);
5571 		if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5572 			tcpm_set_state(port, ERROR_RECOVERY, 0);
5573 			break;
5574 		}
5575 		tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5576 		break;
5577 	case PR_SWAP_SRC_SNK_SINK_ON:
5578 		tcpm_enable_auto_vbus_discharge(port, true);
5579 		/* Set the vbus disconnect threshold for implicit contract */
5580 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5581 		tcpm_set_state(port, SNK_STARTUP, 0);
5582 		break;
5583 	case PR_SWAP_SNK_SRC_SINK_OFF:
5584 		/* will be source, remove existing capabilities */
5585 		usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5586 		port->partner_source_caps = NULL;
5587 		/*
5588 		 * Prevent vbus discharge circuit from turning on during PR_SWAP
5589 		 * as this is not a disconnect.
5590 		 */
5591 		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5592 						       port->pps_data.active, 0);
5593 		tcpm_set_charge(port, false);
5594 		tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5595 		break;
5596 	case PR_SWAP_SNK_SRC_SOURCE_ON:
5597 		tcpm_enable_auto_vbus_discharge(port, true);
5598 		tcpm_set_cc(port, tcpm_rp_cc(port));
5599 		tcpm_set_vbus(port, true);
5600 		/*
5601 		 * allow time VBUS ramp-up, must be < tNewSrc
5602 		 * Also, this window overlaps with CC debounce as well.
5603 		 * So, Wait for the max of two which is PD_T_NEWSRC
5604 		 */
5605 		tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5606 			       PD_T_NEWSRC);
5607 		break;
5608 	case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5609 		/*
5610 		 * USB PD standard, 6.2.1.4:
5611 		 * "Subsequent Messages initiated by the Policy Engine,
5612 		 * such as the PS_RDY Message sent to indicate that Vbus
5613 		 * is ready, will have the Port Power Role field set to
5614 		 * Source."
5615 		 */
5616 		tcpm_set_pwr_role(port, TYPEC_SOURCE);
5617 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5618 		tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5619 		break;
5620 
5621 	case VCONN_SWAP_ACCEPT:
5622 		tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5623 		tcpm_ams_finish(port);
5624 		tcpm_set_state(port, VCONN_SWAP_START, 0);
5625 		break;
5626 	case VCONN_SWAP_SEND:
5627 		tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5628 		tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5629 			       PD_T_SENDER_RESPONSE);
5630 		break;
5631 	case VCONN_SWAP_SEND_TIMEOUT:
5632 		tcpm_swap_complete(port, -ETIMEDOUT);
5633 		tcpm_set_state(port, ready_state(port), 0);
5634 		break;
5635 	case VCONN_SWAP_START:
5636 		if (port->vconn_role == TYPEC_SOURCE)
5637 			tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5638 		else
5639 			tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5640 		break;
5641 	case VCONN_SWAP_WAIT_FOR_VCONN:
5642 		tcpm_set_state(port, hard_reset_state(port),
5643 			       PD_T_VCONN_SOURCE_ON);
5644 		break;
5645 	case VCONN_SWAP_TURN_ON_VCONN:
5646 		ret = tcpm_set_vconn(port, true);
5647 		tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5648 		/*
5649 		 * USB PD 3.0 Section 6.4.4.3.1
5650 		 *
5651 		 * Note that a Cable Plug or VPD will not be ready for PD
5652 		 * Communication until tVCONNStable after VCONN has been applied
5653 		 */
5654 		if (!ret)
5655 			tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5656 				       PD_T_VCONN_STABLE);
5657 		else
5658 			tcpm_set_state(port, ready_state(port), 0);
5659 		break;
5660 	case VCONN_SWAP_TURN_OFF_VCONN:
5661 		tcpm_set_vconn(port, false);
5662 		tcpm_set_state(port, ready_state(port), 0);
5663 		break;
5664 	case VCONN_SWAP_SEND_SOFT_RESET:
5665 		tcpm_swap_complete(port, port->swap_status);
5666 		if (tcpm_can_communicate_sop_prime(port)) {
5667 			port->tx_sop_type = TCPC_TX_SOP_PRIME;
5668 			port->upcoming_state = SOFT_RESET_SEND;
5669 			tcpm_ams_start(port, SOFT_RESET_AMS);
5670 		} else {
5671 			tcpm_set_state(port, ready_state(port), 0);
5672 		}
5673 		break;
5674 
5675 	case DR_SWAP_CANCEL:
5676 	case PR_SWAP_CANCEL:
5677 	case VCONN_SWAP_CANCEL:
5678 		tcpm_swap_complete(port, port->swap_status);
5679 		if (port->pwr_role == TYPEC_SOURCE)
5680 			tcpm_set_state(port, SRC_READY, 0);
5681 		else
5682 			tcpm_set_state(port, SNK_READY, 0);
5683 		break;
5684 	case FR_SWAP_CANCEL:
5685 		if (port->pwr_role == TYPEC_SOURCE)
5686 			tcpm_set_state(port, SRC_READY, 0);
5687 		else
5688 			tcpm_set_state(port, SNK_READY, 0);
5689 		break;
5690 
5691 	case BIST_RX:
5692 		switch (BDO_MODE_MASK(port->bist_request)) {
5693 		case BDO_MODE_CARRIER2:
5694 			tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5695 			tcpm_set_state(port, unattached_state(port),
5696 				       PD_T_BIST_CONT_MODE);
5697 			break;
5698 		case BDO_MODE_TESTDATA:
5699 			if (port->tcpc->set_bist_data) {
5700 				tcpm_log(port, "Enable BIST MODE TESTDATA");
5701 				port->tcpc->set_bist_data(port->tcpc, true);
5702 			}
5703 			break;
5704 		default:
5705 			break;
5706 		}
5707 		break;
5708 	case GET_STATUS_SEND:
5709 		tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5710 		tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5711 			       PD_T_SENDER_RESPONSE);
5712 		break;
5713 	case GET_STATUS_SEND_TIMEOUT:
5714 		tcpm_set_state(port, ready_state(port), 0);
5715 		break;
5716 	case GET_PPS_STATUS_SEND:
5717 		tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5718 		tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5719 			       PD_T_SENDER_RESPONSE);
5720 		break;
5721 	case GET_PPS_STATUS_SEND_TIMEOUT:
5722 		tcpm_set_state(port, ready_state(port), 0);
5723 		break;
5724 	case GET_SINK_CAP:
5725 		tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5726 		tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5727 		break;
5728 	case GET_SINK_CAP_TIMEOUT:
5729 		port->sink_cap_done = true;
5730 		tcpm_set_state(port, ready_state(port), 0);
5731 		break;
5732 	case ERROR_RECOVERY:
5733 		tcpm_swap_complete(port, -EPROTO);
5734 		tcpm_pps_complete(port, -EPROTO);
5735 		tcpm_set_state(port, PORT_RESET, 0);
5736 		break;
5737 	case PORT_RESET:
5738 		tcpm_reset_port(port);
5739 		if (port->self_powered)
5740 			tcpm_set_cc(port, TYPEC_CC_OPEN);
5741 		else
5742 			tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5743 				    TYPEC_CC_RD : tcpm_rp_cc(port));
5744 		tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5745 			       PD_T_ERROR_RECOVERY);
5746 		break;
5747 	case PORT_RESET_WAIT_OFF:
5748 		tcpm_set_state(port,
5749 			       tcpm_default_state(port),
5750 			       port->vbus_present ? port->timings.ps_src_off_time : 0);
5751 		break;
5752 
5753 	/* AMS intermediate state */
5754 	case AMS_START:
5755 		if (port->upcoming_state == INVALID_STATE) {
5756 			tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5757 				       SRC_READY : SNK_READY, 0);
5758 			break;
5759 		}
5760 
5761 		upcoming_state = port->upcoming_state;
5762 		port->upcoming_state = INVALID_STATE;
5763 		tcpm_set_state(port, upcoming_state, 0);
5764 		break;
5765 
5766 	/* Chunk state */
5767 	case CHUNK_NOT_SUPP:
5768 		tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5769 		tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5770 		break;
5771 
5772 	/* Cable states */
5773 	case SRC_VDM_IDENTITY_REQUEST:
5774 		port->send_discover_prime = true;
5775 		port->tx_sop_type = TCPC_TX_SOP_PRIME;
5776 		mod_send_discover_delayed_work(port, 0);
5777 		port->upcoming_state = SRC_SEND_CAPABILITIES;
5778 		break;
5779 
5780 	default:
5781 		WARN(1, "Unexpected port state %d\n", port->state);
5782 		break;
5783 	}
5784 }
5785 
tcpm_state_machine_work(struct kthread_work * work)5786 static void tcpm_state_machine_work(struct kthread_work *work)
5787 {
5788 	struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5789 	enum tcpm_state prev_state;
5790 
5791 	mutex_lock(&port->lock);
5792 	port->state_machine_running = true;
5793 
5794 	if (port->queued_message && tcpm_send_queued_message(port))
5795 		goto done;
5796 
5797 	/* If we were queued due to a delayed state change, update it now */
5798 	if (port->delayed_state) {
5799 		tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5800 			 tcpm_states[port->state],
5801 			 tcpm_states[port->delayed_state], port->delay_ms);
5802 		port->prev_state = port->state;
5803 		port->state = port->delayed_state;
5804 		port->delayed_state = INVALID_STATE;
5805 	}
5806 
5807 	/*
5808 	 * Continue running as long as we have (non-delayed) state changes
5809 	 * to make.
5810 	 */
5811 	do {
5812 		prev_state = port->state;
5813 		run_state_machine(port);
5814 		if (port->queued_message)
5815 			tcpm_send_queued_message(port);
5816 	} while (port->state != prev_state && !port->delayed_state);
5817 
5818 done:
5819 	port->state_machine_running = false;
5820 	mutex_unlock(&port->lock);
5821 }
5822 
_tcpm_cc_change(struct tcpm_port * port,enum typec_cc_status cc1,enum typec_cc_status cc2)5823 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5824 			    enum typec_cc_status cc2)
5825 {
5826 	enum typec_cc_status old_cc1, old_cc2;
5827 	enum tcpm_state new_state;
5828 
5829 	old_cc1 = port->cc1;
5830 	old_cc2 = port->cc2;
5831 	port->cc1 = cc1;
5832 	port->cc2 = cc2;
5833 
5834 	tcpm_log_force(port,
5835 		       "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5836 		       old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5837 		       port->polarity,
5838 		       tcpm_port_is_disconnected(port) ? "disconnected"
5839 						       : "connected");
5840 
5841 	switch (port->state) {
5842 	case TOGGLING:
5843 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5844 		    tcpm_port_is_source(port))
5845 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5846 		else if (tcpm_port_is_sink(port))
5847 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5848 		break;
5849 	case CHECK_CONTAMINANT:
5850 		/* Wait for Toggling to be resumed */
5851 		break;
5852 	case SRC_UNATTACHED:
5853 	case ACC_UNATTACHED:
5854 		if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5855 		    tcpm_port_is_source(port))
5856 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5857 		break;
5858 	case SRC_ATTACH_WAIT:
5859 		if (tcpm_port_is_disconnected(port) ||
5860 		    tcpm_port_is_audio_detached(port))
5861 			tcpm_set_state(port, SRC_UNATTACHED, 0);
5862 		else if (cc1 != old_cc1 || cc2 != old_cc2)
5863 			tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5864 		break;
5865 	case SRC_ATTACHED:
5866 	case SRC_STARTUP:
5867 	case SRC_SEND_CAPABILITIES:
5868 	case SRC_READY:
5869 		if (tcpm_port_is_disconnected(port) ||
5870 		    !tcpm_port_is_source(port)) {
5871 			if (port->port_type == TYPEC_PORT_SRC)
5872 				tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5873 			else
5874 				tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5875 		}
5876 		break;
5877 	case SNK_UNATTACHED:
5878 		if (tcpm_port_is_sink(port))
5879 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5880 		break;
5881 	case SNK_ATTACH_WAIT:
5882 		if ((port->cc1 == TYPEC_CC_OPEN &&
5883 		     port->cc2 != TYPEC_CC_OPEN) ||
5884 		    (port->cc1 != TYPEC_CC_OPEN &&
5885 		     port->cc2 == TYPEC_CC_OPEN))
5886 			new_state = SNK_DEBOUNCED;
5887 		else if (tcpm_port_is_disconnected(port))
5888 			new_state = SNK_UNATTACHED;
5889 		else
5890 			break;
5891 		if (new_state != port->delayed_state)
5892 			tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5893 		break;
5894 	case SNK_DEBOUNCED:
5895 		if (tcpm_port_is_disconnected(port))
5896 			new_state = SNK_UNATTACHED;
5897 		else if (port->vbus_present)
5898 			new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5899 		else
5900 			new_state = SNK_UNATTACHED;
5901 		if (new_state != port->delayed_state)
5902 			tcpm_set_state(port, SNK_DEBOUNCED, 0);
5903 		break;
5904 	case SNK_READY:
5905 		/*
5906 		 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5907 		 * "A port that has entered into USB PD communications with the Source and
5908 		 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5909 		 * cable disconnect in addition to monitoring VBUS.
5910 		 *
5911 		 * A port that is monitoring the CC voltage for disconnect (but is not in
5912 		 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5913 		 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5914 		 * vRd-USB for tPDDebounce."
5915 		 *
5916 		 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5917 		 * away before vbus decays to disconnect threshold. Allow
5918 		 * disconnect to be driven by vbus disconnect when auto vbus
5919 		 * discharge is enabled.
5920 		 */
5921 		if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5922 			tcpm_set_state(port, unattached_state(port), 0);
5923 		else if (!port->pd_capable &&
5924 			 (cc1 != old_cc1 || cc2 != old_cc2))
5925 			tcpm_set_current_limit(port,
5926 					       tcpm_get_current_limit(port),
5927 					       5000);
5928 		break;
5929 
5930 	case AUDIO_ACC_ATTACHED:
5931 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5932 			tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5933 		break;
5934 	case AUDIO_ACC_DEBOUNCE:
5935 		if (tcpm_port_is_audio(port))
5936 			tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5937 		break;
5938 
5939 	case DEBUG_ACC_ATTACHED:
5940 		if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5941 			tcpm_set_state(port, ACC_UNATTACHED, 0);
5942 		break;
5943 
5944 	case SNK_TRY:
5945 		/* Do nothing, waiting for timeout */
5946 		break;
5947 
5948 	case SNK_DISCOVERY:
5949 		/* CC line is unstable, wait for debounce */
5950 		if (tcpm_port_is_disconnected(port))
5951 			tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5952 		break;
5953 	case SNK_DISCOVERY_DEBOUNCE:
5954 		break;
5955 
5956 	case SRC_TRYWAIT:
5957 		/* Hand over to state machine if needed */
5958 		if (!port->vbus_present && tcpm_port_is_source(port))
5959 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5960 		break;
5961 	case SRC_TRYWAIT_DEBOUNCE:
5962 		if (port->vbus_present || !tcpm_port_is_source(port))
5963 			tcpm_set_state(port, SRC_TRYWAIT, 0);
5964 		break;
5965 	case SNK_TRY_WAIT_DEBOUNCE:
5966 		if (!tcpm_port_is_sink(port)) {
5967 			port->max_wait = 0;
5968 			tcpm_set_state(port, SRC_TRYWAIT, 0);
5969 		}
5970 		break;
5971 	case SRC_TRY_WAIT:
5972 		if (tcpm_port_is_source(port))
5973 			tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5974 		break;
5975 	case SRC_TRY_DEBOUNCE:
5976 		tcpm_set_state(port, SRC_TRY_WAIT, 0);
5977 		break;
5978 	case SNK_TRYWAIT_DEBOUNCE:
5979 		if (tcpm_port_is_sink(port))
5980 			tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5981 		break;
5982 	case SNK_TRYWAIT_VBUS:
5983 		if (!tcpm_port_is_sink(port))
5984 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5985 		break;
5986 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5987 		if (!tcpm_port_is_sink(port))
5988 			tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5989 		else
5990 			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5991 		break;
5992 	case SNK_TRYWAIT:
5993 		/* Do nothing, waiting for tCCDebounce */
5994 		break;
5995 	case PR_SWAP_SNK_SRC_SINK_OFF:
5996 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5997 	case PR_SWAP_SRC_SNK_SOURCE_OFF:
5998 	case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5999 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6000 		/*
6001 		 * CC state change is expected in PR_SWAP
6002 		 * Ignore it.
6003 		 */
6004 		break;
6005 	case FR_SWAP_SEND:
6006 	case FR_SWAP_SEND_TIMEOUT:
6007 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6008 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6009 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6010 		/* Do nothing, CC change expected */
6011 		break;
6012 
6013 	case PORT_RESET:
6014 	case PORT_RESET_WAIT_OFF:
6015 		/*
6016 		 * State set back to default mode once the timer completes.
6017 		 * Ignore CC changes here.
6018 		 */
6019 		break;
6020 	default:
6021 		/*
6022 		 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
6023 		 * to be driven by vbus disconnect.
6024 		 */
6025 		if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
6026 							 port->auto_vbus_discharge_enabled))
6027 			tcpm_set_state(port, unattached_state(port), 0);
6028 		break;
6029 	}
6030 }
6031 
_tcpm_pd_vbus_on(struct tcpm_port * port)6032 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
6033 {
6034 	tcpm_log_force(port, "VBUS on");
6035 	port->vbus_present = true;
6036 	/*
6037 	 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
6038 	 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
6039 	 */
6040 	port->vbus_vsafe0v = false;
6041 
6042 	switch (port->state) {
6043 	case SNK_TRANSITION_SINK_VBUS:
6044 		port->explicit_contract = true;
6045 		tcpm_set_state(port, SNK_READY, 0);
6046 		break;
6047 	case SNK_DISCOVERY:
6048 		tcpm_set_state(port, SNK_DISCOVERY, 0);
6049 		break;
6050 
6051 	case SNK_DEBOUNCED:
6052 		tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
6053 							: SNK_ATTACHED,
6054 				       0);
6055 		break;
6056 	case SNK_HARD_RESET_WAIT_VBUS:
6057 		tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
6058 		break;
6059 	case SRC_ATTACHED:
6060 		tcpm_set_state(port, SRC_STARTUP, 0);
6061 		break;
6062 	case SRC_HARD_RESET_VBUS_ON:
6063 		tcpm_set_state(port, SRC_STARTUP, 0);
6064 		break;
6065 
6066 	case SNK_TRY:
6067 		/* Do nothing, waiting for timeout */
6068 		break;
6069 	case SRC_TRYWAIT:
6070 		/* Do nothing, Waiting for Rd to be detected */
6071 		break;
6072 	case SRC_TRYWAIT_DEBOUNCE:
6073 		tcpm_set_state(port, SRC_TRYWAIT, 0);
6074 		break;
6075 	case SNK_TRY_WAIT_DEBOUNCE:
6076 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6077 		break;
6078 	case SNK_TRYWAIT:
6079 		/* Do nothing, waiting for tCCDebounce */
6080 		break;
6081 	case SNK_TRYWAIT_VBUS:
6082 		if (tcpm_port_is_sink(port))
6083 			tcpm_set_state(port, SNK_ATTACHED, 0);
6084 		break;
6085 	case SNK_TRYWAIT_DEBOUNCE:
6086 		/* Do nothing, waiting for Rp */
6087 		break;
6088 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6089 		if (port->vbus_present && tcpm_port_is_sink(port))
6090 			tcpm_set_state(port, SNK_ATTACHED, 0);
6091 		break;
6092 	case SRC_TRY_WAIT:
6093 	case SRC_TRY_DEBOUNCE:
6094 		/* Do nothing, waiting for sink detection */
6095 		break;
6096 	case FR_SWAP_SEND:
6097 	case FR_SWAP_SEND_TIMEOUT:
6098 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6099 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6100 		if (port->tcpc->frs_sourcing_vbus)
6101 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6102 		break;
6103 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6104 		if (port->tcpc->frs_sourcing_vbus)
6105 			port->tcpc->frs_sourcing_vbus(port->tcpc);
6106 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6107 		break;
6108 
6109 	case PORT_RESET:
6110 	case PORT_RESET_WAIT_OFF:
6111 		/*
6112 		 * State set back to default mode once the timer completes.
6113 		 * Ignore vbus changes here.
6114 		 */
6115 		break;
6116 
6117 	default:
6118 		break;
6119 	}
6120 }
6121 
_tcpm_pd_vbus_off(struct tcpm_port * port)6122 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6123 {
6124 	tcpm_log_force(port, "VBUS off");
6125 	port->vbus_present = false;
6126 	port->vbus_never_low = false;
6127 	switch (port->state) {
6128 	case SNK_HARD_RESET_SINK_OFF:
6129 		tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6130 		break;
6131 	case HARD_RESET_SEND:
6132 		break;
6133 	case SNK_TRY:
6134 		/* Do nothing, waiting for timeout */
6135 		break;
6136 	case SRC_TRYWAIT:
6137 		/* Hand over to state machine if needed */
6138 		if (tcpm_port_is_source(port))
6139 			tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6140 		break;
6141 	case SNK_TRY_WAIT_DEBOUNCE:
6142 		/* Do nothing, waiting for PD_DEBOUNCE to do be done */
6143 		break;
6144 	case SNK_TRYWAIT:
6145 	case SNK_TRYWAIT_VBUS:
6146 	case SNK_TRYWAIT_DEBOUNCE:
6147 		break;
6148 	case SNK_ATTACH_WAIT:
6149 	case SNK_DEBOUNCED:
6150 		/* Do nothing, as TCPM is still waiting for vbus to reach VSAFE5V to connect */
6151 		break;
6152 
6153 	case SNK_NEGOTIATE_CAPABILITIES:
6154 		break;
6155 
6156 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6157 		tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6158 		break;
6159 
6160 	case PR_SWAP_SNK_SRC_SINK_OFF:
6161 		/* Do nothing, expected */
6162 		break;
6163 
6164 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6165 		/*
6166 		 * Do nothing when vbus off notification is received.
6167 		 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6168 		 * for the vbus source to ramp up.
6169 		 */
6170 		break;
6171 
6172 	case PORT_RESET_WAIT_OFF:
6173 		tcpm_set_state(port, tcpm_default_state(port), 0);
6174 		break;
6175 
6176 	case SRC_TRY_WAIT:
6177 	case SRC_TRY_DEBOUNCE:
6178 		/* Do nothing, waiting for sink detection */
6179 		break;
6180 
6181 	case SRC_STARTUP:
6182 	case SRC_SEND_CAPABILITIES:
6183 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6184 	case SRC_NEGOTIATE_CAPABILITIES:
6185 	case SRC_TRANSITION_SUPPLY:
6186 	case SRC_READY:
6187 	case SRC_WAIT_NEW_CAPABILITIES:
6188 		/*
6189 		 * Force to unattached state to re-initiate connection.
6190 		 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6191 		 * sink removed. Although sink removal here is due to source's vbus collapse,
6192 		 * treat it the same way for consistency.
6193 		 */
6194 		if (port->port_type == TYPEC_PORT_SRC)
6195 			tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6196 		else
6197 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6198 		break;
6199 
6200 	case PORT_RESET:
6201 		/*
6202 		 * State set back to default mode once the timer completes.
6203 		 * Ignore vbus changes here.
6204 		 */
6205 		break;
6206 
6207 	case FR_SWAP_SEND:
6208 	case FR_SWAP_SEND_TIMEOUT:
6209 	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6210 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6211 	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6212 		/* Do nothing, vbus drop expected */
6213 		break;
6214 
6215 	case SNK_HARD_RESET_WAIT_VBUS:
6216 		/* Do nothing, its OK to receive vbus off events */
6217 		break;
6218 
6219 	default:
6220 		if (port->pwr_role == TYPEC_SINK && port->attached)
6221 			tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6222 		break;
6223 	}
6224 }
6225 
_tcpm_pd_vbus_vsafe0v(struct tcpm_port * port)6226 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6227 {
6228 	tcpm_log_force(port, "VBUS VSAFE0V");
6229 	port->vbus_vsafe0v = true;
6230 	switch (port->state) {
6231 	case SRC_HARD_RESET_VBUS_OFF:
6232 		/*
6233 		 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6234 		 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6235 		 */
6236 		tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6237 		break;
6238 	case SRC_ATTACH_WAIT:
6239 		if (tcpm_port_is_source(port))
6240 			tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6241 				       port->timings.cc_debounce_time);
6242 		break;
6243 	case SRC_STARTUP:
6244 	case SRC_SEND_CAPABILITIES:
6245 	case SRC_SEND_CAPABILITIES_TIMEOUT:
6246 	case SRC_NEGOTIATE_CAPABILITIES:
6247 	case SRC_TRANSITION_SUPPLY:
6248 	case SRC_READY:
6249 	case SRC_WAIT_NEW_CAPABILITIES:
6250 		if (port->auto_vbus_discharge_enabled) {
6251 			if (port->port_type == TYPEC_PORT_SRC)
6252 				tcpm_set_state(port, SRC_UNATTACHED, 0);
6253 			else
6254 				tcpm_set_state(port, SNK_UNATTACHED, 0);
6255 		}
6256 		break;
6257 	case PR_SWAP_SNK_SRC_SINK_OFF:
6258 	case PR_SWAP_SNK_SRC_SOURCE_ON:
6259 		/* Do nothing, vsafe0v is expected during transition */
6260 		break;
6261 	case SNK_ATTACH_WAIT:
6262 	case SNK_DEBOUNCED:
6263 		/*Do nothing, still waiting for VSAFE5V for connect */
6264 		break;
6265 	case SNK_HARD_RESET_WAIT_VBUS:
6266 		/* Do nothing, its OK to receive vbus off events */
6267 		break;
6268 	default:
6269 		if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6270 			tcpm_set_state(port, SNK_UNATTACHED, 0);
6271 		break;
6272 	}
6273 }
6274 
_tcpm_pd_hard_reset(struct tcpm_port * port)6275 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6276 {
6277 	tcpm_log_force(port, "Received hard reset");
6278 	if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6279 		port->tcpc->set_bist_data(port->tcpc, false);
6280 
6281 	switch (port->state) {
6282 	case TOGGLING:
6283 	case ERROR_RECOVERY:
6284 	case PORT_RESET:
6285 	case PORT_RESET_WAIT_OFF:
6286 		return;
6287 	default:
6288 		break;
6289 	}
6290 
6291 	if (port->ams != NONE_AMS)
6292 		port->ams = NONE_AMS;
6293 	if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6294 		port->ams = HARD_RESET;
6295 	/*
6296 	 * If we keep receiving hard reset requests, executing the hard reset
6297 	 * must have failed. Revert to error recovery if that happens.
6298 	 */
6299 	tcpm_set_state(port,
6300 		       port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6301 				HARD_RESET_START : ERROR_RECOVERY,
6302 		       0);
6303 }
6304 
tcpm_pd_event_handler(struct kthread_work * work)6305 static void tcpm_pd_event_handler(struct kthread_work *work)
6306 {
6307 	struct tcpm_port *port = container_of(work, struct tcpm_port,
6308 					      event_work);
6309 	u32 events;
6310 
6311 	mutex_lock(&port->lock);
6312 
6313 	spin_lock(&port->pd_event_lock);
6314 	while (port->pd_events) {
6315 		events = port->pd_events;
6316 		port->pd_events = 0;
6317 		spin_unlock(&port->pd_event_lock);
6318 		if (events & TCPM_RESET_EVENT)
6319 			_tcpm_pd_hard_reset(port);
6320 		if (events & TCPM_VBUS_EVENT) {
6321 			bool vbus;
6322 
6323 			vbus = port->tcpc->get_vbus(port->tcpc);
6324 			if (vbus) {
6325 				_tcpm_pd_vbus_on(port);
6326 			} else {
6327 				_tcpm_pd_vbus_off(port);
6328 				/*
6329 				 * When TCPC does not support detecting vsafe0v voltage level,
6330 				 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6331 				 * to see if vbus has discharge to VSAFE0V.
6332 				 */
6333 				if (!port->tcpc->is_vbus_vsafe0v ||
6334 				    port->tcpc->is_vbus_vsafe0v(port->tcpc))
6335 					_tcpm_pd_vbus_vsafe0v(port);
6336 			}
6337 		}
6338 		if (events & TCPM_CC_EVENT) {
6339 			enum typec_cc_status cc1, cc2;
6340 
6341 			if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6342 				_tcpm_cc_change(port, cc1, cc2);
6343 		}
6344 		if (events & TCPM_FRS_EVENT) {
6345 			if (port->state == SNK_READY) {
6346 				int ret;
6347 
6348 				port->upcoming_state = FR_SWAP_SEND;
6349 				ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6350 				if (ret == -EAGAIN)
6351 					port->upcoming_state = INVALID_STATE;
6352 			} else {
6353 				tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6354 			}
6355 		}
6356 		if (events & TCPM_SOURCING_VBUS) {
6357 			tcpm_log(port, "sourcing vbus");
6358 			/*
6359 			 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6360 			 * true as TCPM wouldn't have called tcpm_set_vbus.
6361 			 *
6362 			 * When vbus is sourced on the command on TCPM i.e. TCPM called
6363 			 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6364 			 */
6365 			port->vbus_source = true;
6366 			_tcpm_pd_vbus_on(port);
6367 		}
6368 		if (events & TCPM_PORT_CLEAN) {
6369 			tcpm_log(port, "port clean");
6370 			if (port->state == CHECK_CONTAMINANT) {
6371 				if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6372 					tcpm_set_state(port, TOGGLING, 0);
6373 				else
6374 					tcpm_set_state(port, tcpm_default_state(port), 0);
6375 			}
6376 		}
6377 		if (events & TCPM_PORT_ERROR) {
6378 			tcpm_log(port, "port triggering error recovery");
6379 			tcpm_set_state(port, ERROR_RECOVERY, 0);
6380 		}
6381 
6382 		spin_lock(&port->pd_event_lock);
6383 	}
6384 	spin_unlock(&port->pd_event_lock);
6385 	mutex_unlock(&port->lock);
6386 }
6387 
tcpm_cc_change(struct tcpm_port * port)6388 void tcpm_cc_change(struct tcpm_port *port)
6389 {
6390 	spin_lock(&port->pd_event_lock);
6391 	port->pd_events |= TCPM_CC_EVENT;
6392 	spin_unlock(&port->pd_event_lock);
6393 	kthread_queue_work(port->wq, &port->event_work);
6394 }
6395 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6396 
tcpm_vbus_change(struct tcpm_port * port)6397 void tcpm_vbus_change(struct tcpm_port *port)
6398 {
6399 	spin_lock(&port->pd_event_lock);
6400 	port->pd_events |= TCPM_VBUS_EVENT;
6401 	spin_unlock(&port->pd_event_lock);
6402 	kthread_queue_work(port->wq, &port->event_work);
6403 }
6404 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6405 
tcpm_pd_hard_reset(struct tcpm_port * port)6406 void tcpm_pd_hard_reset(struct tcpm_port *port)
6407 {
6408 	spin_lock(&port->pd_event_lock);
6409 	port->pd_events = TCPM_RESET_EVENT;
6410 	spin_unlock(&port->pd_event_lock);
6411 	kthread_queue_work(port->wq, &port->event_work);
6412 }
6413 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6414 
tcpm_sink_frs(struct tcpm_port * port)6415 void tcpm_sink_frs(struct tcpm_port *port)
6416 {
6417 	spin_lock(&port->pd_event_lock);
6418 	port->pd_events |= TCPM_FRS_EVENT;
6419 	spin_unlock(&port->pd_event_lock);
6420 	kthread_queue_work(port->wq, &port->event_work);
6421 }
6422 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6423 
tcpm_sourcing_vbus(struct tcpm_port * port)6424 void tcpm_sourcing_vbus(struct tcpm_port *port)
6425 {
6426 	spin_lock(&port->pd_event_lock);
6427 	port->pd_events |= TCPM_SOURCING_VBUS;
6428 	spin_unlock(&port->pd_event_lock);
6429 	kthread_queue_work(port->wq, &port->event_work);
6430 }
6431 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6432 
tcpm_port_clean(struct tcpm_port * port)6433 void tcpm_port_clean(struct tcpm_port *port)
6434 {
6435 	spin_lock(&port->pd_event_lock);
6436 	port->pd_events |= TCPM_PORT_CLEAN;
6437 	spin_unlock(&port->pd_event_lock);
6438 	kthread_queue_work(port->wq, &port->event_work);
6439 }
6440 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6441 
tcpm_port_is_toggling(struct tcpm_port * port)6442 bool tcpm_port_is_toggling(struct tcpm_port *port)
6443 {
6444 	return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6445 }
6446 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6447 
tcpm_port_error_recovery(struct tcpm_port * port)6448 void tcpm_port_error_recovery(struct tcpm_port *port)
6449 {
6450 	spin_lock(&port->pd_event_lock);
6451 	port->pd_events |= TCPM_PORT_ERROR;
6452 	spin_unlock(&port->pd_event_lock);
6453 	kthread_queue_work(port->wq, &port->event_work);
6454 }
6455 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6456 
tcpm_enable_frs_work(struct kthread_work * work)6457 static void tcpm_enable_frs_work(struct kthread_work *work)
6458 {
6459 	struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6460 	int ret;
6461 
6462 	mutex_lock(&port->lock);
6463 	/* Not FRS capable */
6464 	if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6465 	    port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6466 	    !port->tcpc->enable_frs ||
6467 	    /* Sink caps queried */
6468 	    port->sink_cap_done || port->negotiated_rev < PD_REV30)
6469 		goto unlock;
6470 
6471 	/* Send when the state machine is idle */
6472 	if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6473 	    port->send_discover_prime)
6474 		goto resched;
6475 
6476 	port->upcoming_state = GET_SINK_CAP;
6477 	ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6478 	if (ret == -EAGAIN) {
6479 		port->upcoming_state = INVALID_STATE;
6480 	} else {
6481 		port->sink_cap_done = true;
6482 		goto unlock;
6483 	}
6484 resched:
6485 	mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6486 unlock:
6487 	mutex_unlock(&port->lock);
6488 }
6489 
tcpm_send_discover_work(struct kthread_work * work)6490 static void tcpm_send_discover_work(struct kthread_work *work)
6491 {
6492 	struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6493 
6494 	mutex_lock(&port->lock);
6495 	/* No need to send DISCOVER_IDENTITY anymore */
6496 	if (!port->send_discover && !port->send_discover_prime)
6497 		goto unlock;
6498 
6499 	if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6500 		port->send_discover = false;
6501 		port->send_discover_prime = false;
6502 		goto unlock;
6503 	}
6504 
6505 	/* Retry if the port is not idle */
6506 	if ((port->state != SRC_READY && port->state != SNK_READY &&
6507 	     port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6508 		mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6509 		goto unlock;
6510 	}
6511 
6512 	tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6513 
6514 unlock:
6515 	mutex_unlock(&port->lock);
6516 }
6517 
tcpm_dr_set(struct typec_port * p,enum typec_data_role data)6518 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6519 {
6520 	struct tcpm_port *port = typec_get_drvdata(p);
6521 	int ret;
6522 
6523 	mutex_lock(&port->swap_lock);
6524 	mutex_lock(&port->lock);
6525 
6526 	if (port->typec_caps.data != TYPEC_PORT_DRD) {
6527 		ret = -EINVAL;
6528 		goto port_unlock;
6529 	}
6530 	if (port->state != SRC_READY && port->state != SNK_READY) {
6531 		ret = -EAGAIN;
6532 		goto port_unlock;
6533 	}
6534 
6535 	if (port->data_role == data) {
6536 		ret = 0;
6537 		goto port_unlock;
6538 	}
6539 
6540 	/*
6541 	 * XXX
6542 	 * 6.3.9: If an alternate mode is active, a request to swap
6543 	 * alternate modes shall trigger a port reset.
6544 	 * Reject data role swap request in this case.
6545 	 */
6546 
6547 	if (!port->pd_capable) {
6548 		/*
6549 		 * If the partner is not PD capable, reset the port to
6550 		 * trigger a role change. This can only work if a preferred
6551 		 * role is configured, and if it matches the requested role.
6552 		 */
6553 		if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6554 		    port->try_role == port->pwr_role) {
6555 			ret = -EINVAL;
6556 			goto port_unlock;
6557 		}
6558 		port->non_pd_role_swap = true;
6559 		tcpm_set_state(port, PORT_RESET, 0);
6560 	} else {
6561 		port->upcoming_state = DR_SWAP_SEND;
6562 		ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6563 		if (ret == -EAGAIN) {
6564 			port->upcoming_state = INVALID_STATE;
6565 			goto port_unlock;
6566 		}
6567 	}
6568 
6569 	port->swap_status = 0;
6570 	port->swap_pending = true;
6571 	reinit_completion(&port->swap_complete);
6572 	mutex_unlock(&port->lock);
6573 
6574 	if (!wait_for_completion_timeout(&port->swap_complete,
6575 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6576 		ret = -ETIMEDOUT;
6577 	else
6578 		ret = port->swap_status;
6579 
6580 	port->non_pd_role_swap = false;
6581 	goto swap_unlock;
6582 
6583 port_unlock:
6584 	mutex_unlock(&port->lock);
6585 swap_unlock:
6586 	mutex_unlock(&port->swap_lock);
6587 	return ret;
6588 }
6589 
tcpm_pr_set(struct typec_port * p,enum typec_role role)6590 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6591 {
6592 	struct tcpm_port *port = typec_get_drvdata(p);
6593 	int ret;
6594 
6595 	mutex_lock(&port->swap_lock);
6596 	mutex_lock(&port->lock);
6597 
6598 	if (port->port_type != TYPEC_PORT_DRP) {
6599 		ret = -EINVAL;
6600 		goto port_unlock;
6601 	}
6602 	if (port->state != SRC_READY && port->state != SNK_READY) {
6603 		ret = -EAGAIN;
6604 		goto port_unlock;
6605 	}
6606 
6607 	if (role == port->pwr_role) {
6608 		ret = 0;
6609 		goto port_unlock;
6610 	}
6611 
6612 	port->upcoming_state = PR_SWAP_SEND;
6613 	ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6614 	if (ret == -EAGAIN) {
6615 		port->upcoming_state = INVALID_STATE;
6616 		goto port_unlock;
6617 	}
6618 
6619 	port->swap_status = 0;
6620 	port->swap_pending = true;
6621 	reinit_completion(&port->swap_complete);
6622 	mutex_unlock(&port->lock);
6623 
6624 	if (!wait_for_completion_timeout(&port->swap_complete,
6625 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6626 		ret = -ETIMEDOUT;
6627 	else
6628 		ret = port->swap_status;
6629 
6630 	goto swap_unlock;
6631 
6632 port_unlock:
6633 	mutex_unlock(&port->lock);
6634 swap_unlock:
6635 	mutex_unlock(&port->swap_lock);
6636 	return ret;
6637 }
6638 
tcpm_vconn_set(struct typec_port * p,enum typec_role role)6639 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6640 {
6641 	struct tcpm_port *port = typec_get_drvdata(p);
6642 	int ret;
6643 
6644 	mutex_lock(&port->swap_lock);
6645 	mutex_lock(&port->lock);
6646 
6647 	if (port->state != SRC_READY && port->state != SNK_READY) {
6648 		ret = -EAGAIN;
6649 		goto port_unlock;
6650 	}
6651 
6652 	if (role == port->vconn_role) {
6653 		ret = 0;
6654 		goto port_unlock;
6655 	}
6656 
6657 	port->upcoming_state = VCONN_SWAP_SEND;
6658 	ret = tcpm_ams_start(port, VCONN_SWAP);
6659 	if (ret == -EAGAIN) {
6660 		port->upcoming_state = INVALID_STATE;
6661 		goto port_unlock;
6662 	}
6663 
6664 	port->swap_status = 0;
6665 	port->swap_pending = true;
6666 	reinit_completion(&port->swap_complete);
6667 	mutex_unlock(&port->lock);
6668 
6669 	if (!wait_for_completion_timeout(&port->swap_complete,
6670 				msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6671 		ret = -ETIMEDOUT;
6672 	else
6673 		ret = port->swap_status;
6674 
6675 	goto swap_unlock;
6676 
6677 port_unlock:
6678 	mutex_unlock(&port->lock);
6679 swap_unlock:
6680 	mutex_unlock(&port->swap_lock);
6681 	return ret;
6682 }
6683 
tcpm_try_role(struct typec_port * p,int role)6684 static int tcpm_try_role(struct typec_port *p, int role)
6685 {
6686 	struct tcpm_port *port = typec_get_drvdata(p);
6687 	struct tcpc_dev	*tcpc = port->tcpc;
6688 	int ret = 0;
6689 
6690 	mutex_lock(&port->lock);
6691 	if (tcpc->try_role)
6692 		ret = tcpc->try_role(tcpc, role);
6693 	if (!ret)
6694 		port->try_role = role;
6695 	port->try_src_count = 0;
6696 	port->try_snk_count = 0;
6697 	mutex_unlock(&port->lock);
6698 
6699 	return ret;
6700 }
6701 
tcpm_pps_set_op_curr(struct tcpm_port * port,u16 req_op_curr)6702 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6703 {
6704 	unsigned int target_mw;
6705 	int ret;
6706 
6707 	mutex_lock(&port->swap_lock);
6708 	mutex_lock(&port->lock);
6709 
6710 	if (!port->pps_data.active) {
6711 		ret = -EOPNOTSUPP;
6712 		goto port_unlock;
6713 	}
6714 
6715 	if (port->state != SNK_READY) {
6716 		ret = -EAGAIN;
6717 		goto port_unlock;
6718 	}
6719 
6720 	if (req_op_curr > port->pps_data.max_curr) {
6721 		ret = -EINVAL;
6722 		goto port_unlock;
6723 	}
6724 
6725 	target_mw = (req_op_curr * port->supply_voltage) / 1000;
6726 	if (target_mw < port->operating_snk_mw) {
6727 		ret = -EINVAL;
6728 		goto port_unlock;
6729 	}
6730 
6731 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6732 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6733 	if (ret == -EAGAIN) {
6734 		port->upcoming_state = INVALID_STATE;
6735 		goto port_unlock;
6736 	}
6737 
6738 	/* Round down operating current to align with PPS valid steps */
6739 	req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6740 
6741 	reinit_completion(&port->pps_complete);
6742 	port->pps_data.req_op_curr = req_op_curr;
6743 	port->pps_status = 0;
6744 	port->pps_pending = true;
6745 	mutex_unlock(&port->lock);
6746 
6747 	if (!wait_for_completion_timeout(&port->pps_complete,
6748 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6749 		ret = -ETIMEDOUT;
6750 	else
6751 		ret = port->pps_status;
6752 
6753 	goto swap_unlock;
6754 
6755 port_unlock:
6756 	mutex_unlock(&port->lock);
6757 swap_unlock:
6758 	mutex_unlock(&port->swap_lock);
6759 
6760 	return ret;
6761 }
6762 
tcpm_pps_set_out_volt(struct tcpm_port * port,u16 req_out_volt)6763 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6764 {
6765 	unsigned int target_mw;
6766 	int ret;
6767 
6768 	mutex_lock(&port->swap_lock);
6769 	mutex_lock(&port->lock);
6770 
6771 	if (!port->pps_data.active) {
6772 		ret = -EOPNOTSUPP;
6773 		goto port_unlock;
6774 	}
6775 
6776 	if (port->state != SNK_READY) {
6777 		ret = -EAGAIN;
6778 		goto port_unlock;
6779 	}
6780 
6781 	target_mw = (port->current_limit * req_out_volt) / 1000;
6782 	if (target_mw < port->operating_snk_mw) {
6783 		ret = -EINVAL;
6784 		goto port_unlock;
6785 	}
6786 
6787 	port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6788 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6789 	if (ret == -EAGAIN) {
6790 		port->upcoming_state = INVALID_STATE;
6791 		goto port_unlock;
6792 	}
6793 
6794 	/* Round down output voltage to align with PPS valid steps */
6795 	req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6796 
6797 	reinit_completion(&port->pps_complete);
6798 	port->pps_data.req_out_volt = req_out_volt;
6799 	port->pps_status = 0;
6800 	port->pps_pending = true;
6801 	mutex_unlock(&port->lock);
6802 
6803 	if (!wait_for_completion_timeout(&port->pps_complete,
6804 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6805 		ret = -ETIMEDOUT;
6806 	else
6807 		ret = port->pps_status;
6808 
6809 	goto swap_unlock;
6810 
6811 port_unlock:
6812 	mutex_unlock(&port->lock);
6813 swap_unlock:
6814 	mutex_unlock(&port->swap_lock);
6815 
6816 	return ret;
6817 }
6818 
tcpm_pps_activate(struct tcpm_port * port,bool activate)6819 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6820 {
6821 	int ret = 0;
6822 
6823 	mutex_lock(&port->swap_lock);
6824 	mutex_lock(&port->lock);
6825 
6826 	if (!port->pps_data.supported) {
6827 		ret = -EOPNOTSUPP;
6828 		goto port_unlock;
6829 	}
6830 
6831 	/* Trying to deactivate PPS when already deactivated so just bail */
6832 	if (!port->pps_data.active && !activate)
6833 		goto port_unlock;
6834 
6835 	if (port->state != SNK_READY) {
6836 		ret = -EAGAIN;
6837 		goto port_unlock;
6838 	}
6839 
6840 	if (activate)
6841 		port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6842 	else
6843 		port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6844 	ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6845 	if (ret == -EAGAIN) {
6846 		port->upcoming_state = INVALID_STATE;
6847 		goto port_unlock;
6848 	}
6849 
6850 	reinit_completion(&port->pps_complete);
6851 	port->pps_status = 0;
6852 	port->pps_pending = true;
6853 
6854 	/* Trigger PPS request or move back to standard PDO contract */
6855 	if (activate) {
6856 		port->pps_data.req_out_volt = port->supply_voltage;
6857 		port->pps_data.req_op_curr = port->current_limit;
6858 	}
6859 	mutex_unlock(&port->lock);
6860 
6861 	if (!wait_for_completion_timeout(&port->pps_complete,
6862 				msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6863 		ret = -ETIMEDOUT;
6864 	else
6865 		ret = port->pps_status;
6866 
6867 	goto swap_unlock;
6868 
6869 port_unlock:
6870 	mutex_unlock(&port->lock);
6871 swap_unlock:
6872 	mutex_unlock(&port->swap_lock);
6873 
6874 	return ret;
6875 }
6876 
tcpm_init(struct tcpm_port * port)6877 static void tcpm_init(struct tcpm_port *port)
6878 {
6879 	enum typec_cc_status cc1, cc2;
6880 
6881 	port->tcpc->init(port->tcpc);
6882 
6883 	tcpm_reset_port(port);
6884 
6885 	/*
6886 	 * XXX
6887 	 * Should possibly wait for VBUS to settle if it was enabled locally
6888 	 * since tcpm_reset_port() will disable VBUS.
6889 	 */
6890 	port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6891 	if (port->vbus_present)
6892 		port->vbus_never_low = true;
6893 
6894 	/*
6895 	 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6896 	 * So implicitly vbus_vsafe0v = false.
6897 	 *
6898 	 * 2. When vbus_present is false and TCPC does NOT support querying
6899 	 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6900 	 * vbus_vsafe0v is true.
6901 	 *
6902 	 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6903 	 * then, query tcpc for vsafe0v status.
6904 	 */
6905 	if (port->vbus_present)
6906 		port->vbus_vsafe0v = false;
6907 	else if (!port->tcpc->is_vbus_vsafe0v)
6908 		port->vbus_vsafe0v = true;
6909 	else
6910 		port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6911 
6912 	tcpm_set_state(port, tcpm_default_state(port), 0);
6913 
6914 	if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6915 		_tcpm_cc_change(port, cc1, cc2);
6916 
6917 	/*
6918 	 * Some adapters need a clean slate at startup, and won't recover
6919 	 * otherwise. So do not try to be fancy and force a clean disconnect.
6920 	 */
6921 	tcpm_set_state(port, PORT_RESET, 0);
6922 }
6923 
tcpm_port_type_set(struct typec_port * p,enum typec_port_type type)6924 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6925 {
6926 	struct tcpm_port *port = typec_get_drvdata(p);
6927 
6928 	mutex_lock(&port->lock);
6929 	if (type == port->port_type)
6930 		goto port_unlock;
6931 
6932 	port->port_type = type;
6933 
6934 	if (!port->connected) {
6935 		tcpm_set_state(port, PORT_RESET, 0);
6936 	} else if (type == TYPEC_PORT_SNK) {
6937 		if (!(port->pwr_role == TYPEC_SINK &&
6938 		      port->data_role == TYPEC_DEVICE))
6939 			tcpm_set_state(port, PORT_RESET, 0);
6940 	} else if (type == TYPEC_PORT_SRC) {
6941 		if (!(port->pwr_role == TYPEC_SOURCE &&
6942 		      port->data_role == TYPEC_HOST))
6943 			tcpm_set_state(port, PORT_RESET, 0);
6944 	}
6945 
6946 port_unlock:
6947 	mutex_unlock(&port->lock);
6948 	return 0;
6949 }
6950 
tcpm_find_pd_data(struct tcpm_port * port,struct usb_power_delivery * pd)6951 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6952 {
6953 	int i;
6954 
6955 	for (i = 0; port->pd_list[i]; i++) {
6956 		if (port->pd_list[i]->pd == pd)
6957 			return port->pd_list[i];
6958 	}
6959 
6960 	return ERR_PTR(-ENODATA);
6961 }
6962 
tcpm_pd_get(struct typec_port * p)6963 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6964 {
6965 	struct tcpm_port *port = typec_get_drvdata(p);
6966 
6967 	return port->pds;
6968 }
6969 
tcpm_pd_set(struct typec_port * p,struct usb_power_delivery * pd)6970 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6971 {
6972 	struct tcpm_port *port = typec_get_drvdata(p);
6973 	struct pd_data *data;
6974 	int i, ret = 0;
6975 
6976 	mutex_lock(&port->lock);
6977 
6978 	if (port->selected_pd == pd)
6979 		goto unlock;
6980 
6981 	data = tcpm_find_pd_data(port, pd);
6982 	if (IS_ERR(data)) {
6983 		ret = PTR_ERR(data);
6984 		goto unlock;
6985 	}
6986 
6987 	if (data->sink_desc.pdo[0]) {
6988 		for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6989 			port->snk_pdo[i] = data->sink_desc.pdo[i];
6990 		port->nr_snk_pdo = i;
6991 		port->operating_snk_mw = data->operating_snk_mw;
6992 	}
6993 
6994 	if (data->source_desc.pdo[0]) {
6995 		for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6996 			port->src_pdo[i] = data->source_desc.pdo[i];
6997 		port->nr_src_pdo = i;
6998 	}
6999 
7000 	switch (port->state) {
7001 	case SRC_UNATTACHED:
7002 	case SRC_ATTACH_WAIT:
7003 	case SRC_TRYWAIT:
7004 		tcpm_set_cc(port, tcpm_rp_cc(port));
7005 		break;
7006 	case SRC_SEND_CAPABILITIES:
7007 	case SRC_SEND_CAPABILITIES_TIMEOUT:
7008 	case SRC_NEGOTIATE_CAPABILITIES:
7009 	case SRC_READY:
7010 	case SRC_WAIT_NEW_CAPABILITIES:
7011 		port->caps_count = 0;
7012 		port->upcoming_state = SRC_SEND_CAPABILITIES;
7013 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7014 		if (ret == -EAGAIN) {
7015 			port->upcoming_state = INVALID_STATE;
7016 			goto unlock;
7017 		}
7018 		break;
7019 	case SNK_NEGOTIATE_CAPABILITIES:
7020 	case SNK_NEGOTIATE_PPS_CAPABILITIES:
7021 	case SNK_READY:
7022 	case SNK_TRANSITION_SINK:
7023 	case SNK_TRANSITION_SINK_VBUS:
7024 		if (port->pps_data.active)
7025 			port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
7026 		else if (port->pd_capable)
7027 			port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
7028 		else
7029 			break;
7030 
7031 		port->update_sink_caps = true;
7032 
7033 		ret = tcpm_ams_start(port, POWER_NEGOTIATION);
7034 		if (ret == -EAGAIN) {
7035 			port->upcoming_state = INVALID_STATE;
7036 			goto unlock;
7037 		}
7038 		break;
7039 	default:
7040 		break;
7041 	}
7042 
7043 	port->port_source_caps = data->source_cap;
7044 	port->port_sink_caps = data->sink_cap;
7045 	typec_port_set_usb_power_delivery(p, NULL);
7046 	port->selected_pd = pd;
7047 	typec_port_set_usb_power_delivery(p, port->selected_pd);
7048 unlock:
7049 	mutex_unlock(&port->lock);
7050 	return ret;
7051 }
7052 
7053 static const struct typec_operations tcpm_ops = {
7054 	.try_role = tcpm_try_role,
7055 	.dr_set = tcpm_dr_set,
7056 	.pr_set = tcpm_pr_set,
7057 	.vconn_set = tcpm_vconn_set,
7058 	.port_type_set = tcpm_port_type_set,
7059 	.pd_get = tcpm_pd_get,
7060 	.pd_set = tcpm_pd_set
7061 };
7062 
tcpm_tcpc_reset(struct tcpm_port * port)7063 void tcpm_tcpc_reset(struct tcpm_port *port)
7064 {
7065 	mutex_lock(&port->lock);
7066 	/* XXX: Maintain PD connection if possible? */
7067 	tcpm_init(port);
7068 	mutex_unlock(&port->lock);
7069 }
7070 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
7071 
tcpm_port_unregister_pd(struct tcpm_port * port)7072 static void tcpm_port_unregister_pd(struct tcpm_port *port)
7073 {
7074 	int i;
7075 
7076 	port->port_sink_caps = NULL;
7077 	port->port_source_caps = NULL;
7078 	for (i = 0; i < port->pd_count; i++) {
7079 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
7080 		usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
7081 		devm_kfree(port->dev, port->pd_list[i]);
7082 		port->pd_list[i] = NULL;
7083 		usb_power_delivery_unregister(port->pds[i]);
7084 		port->pds[i] = NULL;
7085 	}
7086 }
7087 
tcpm_port_register_pd(struct tcpm_port * port)7088 static int tcpm_port_register_pd(struct tcpm_port *port)
7089 {
7090 	u16 pd_revision = port->typec_caps.pd_revision;
7091 	u16 pd_version = port->pd_rev.ver_major << 8 | port->pd_rev.ver_minor;
7092 	struct usb_power_delivery_desc desc = { pd_revision, pd_version };
7093 	struct usb_power_delivery_capabilities *cap;
7094 	int ret, i;
7095 
7096 	if (!port->nr_src_pdo && !port->nr_snk_pdo)
7097 		return 0;
7098 
7099 	for (i = 0; i < port->pd_count; i++) {
7100 		port->pds[i] = usb_power_delivery_register(port->dev, &desc);
7101 		if (IS_ERR(port->pds[i])) {
7102 			ret = PTR_ERR(port->pds[i]);
7103 			goto err_unregister;
7104 		}
7105 		port->pd_list[i]->pd = port->pds[i];
7106 
7107 		if (port->pd_list[i]->source_desc.pdo[0]) {
7108 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7109 								&port->pd_list[i]->source_desc);
7110 			if (IS_ERR(cap)) {
7111 				ret = PTR_ERR(cap);
7112 				goto err_unregister;
7113 			}
7114 			port->pd_list[i]->source_cap = cap;
7115 		}
7116 
7117 		if (port->pd_list[i]->sink_desc.pdo[0]) {
7118 			cap = usb_power_delivery_register_capabilities(port->pds[i],
7119 								&port->pd_list[i]->sink_desc);
7120 			if (IS_ERR(cap)) {
7121 				ret = PTR_ERR(cap);
7122 				goto err_unregister;
7123 			}
7124 			port->pd_list[i]->sink_cap = cap;
7125 		}
7126 	}
7127 
7128 	port->port_source_caps = port->pd_list[0]->source_cap;
7129 	port->port_sink_caps = port->pd_list[0]->sink_cap;
7130 	port->selected_pd = port->pds[0];
7131 	return 0;
7132 
7133 err_unregister:
7134 	tcpm_port_unregister_pd(port);
7135 
7136 	return ret;
7137 }
7138 
tcpm_fw_get_timings(struct tcpm_port * port,struct fwnode_handle * fwnode)7139 static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fwnode)
7140 {
7141 	int ret;
7142 	u32 val;
7143 
7144 	ret = fwnode_property_read_u32(fwnode, "sink-wait-cap-time-ms", &val);
7145 	if (!ret)
7146 		port->timings.sink_wait_cap_time = val;
7147 	else
7148 		port->timings.sink_wait_cap_time = PD_T_SINK_WAIT_CAP;
7149 
7150 	ret = fwnode_property_read_u32(fwnode, "ps-source-off-time-ms", &val);
7151 	if (!ret)
7152 		port->timings.ps_src_off_time = val;
7153 	else
7154 		port->timings.ps_src_off_time = PD_T_PS_SOURCE_OFF;
7155 
7156 	ret = fwnode_property_read_u32(fwnode, "cc-debounce-time-ms", &val);
7157 	if (!ret)
7158 		port->timings.cc_debounce_time = val;
7159 	else
7160 		port->timings.cc_debounce_time = PD_T_CC_DEBOUNCE;
7161 
7162 	ret = fwnode_property_read_u32(fwnode, "sink-bc12-completion-time-ms", &val);
7163 	if (!ret)
7164 		port->timings.snk_bc12_cmpletion_time = val;
7165 }
7166 
tcpm_fw_get_caps(struct tcpm_port * port,struct fwnode_handle * fwnode)7167 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7168 {
7169 	struct fwnode_handle *capabilities, *child, *caps = NULL;
7170 	unsigned int nr_src_pdo, nr_snk_pdo;
7171 	const char *opmode_str;
7172 	u32 *src_pdo, *snk_pdo;
7173 	u32 uw, frs_current;
7174 	int ret = 0, i;
7175 	int mode;
7176 
7177 	if (!fwnode)
7178 		return -EINVAL;
7179 
7180 	/*
7181 	 * This fwnode has a "compatible" property, but is never populated as a
7182 	 * struct device. Instead we simply parse it to read the properties.
7183 	 * This it breaks fw_devlink=on. To maintain backward compatibility
7184 	 * with existing DT files, we work around this by deleting any
7185 	 * fwnode_links to/from this fwnode.
7186 	 */
7187 	fw_devlink_purge_absent_suppliers(fwnode);
7188 
7189 	ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7190 	if (ret < 0)
7191 		return ret;
7192 
7193 	mode = 0;
7194 
7195 	if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7196 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7197 
7198 	if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7199 		port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7200 
7201 	port->port_type = port->typec_caps.type;
7202 	port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7203 	port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7204 	port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7205 
7206 	if (!port->pd_supported) {
7207 		ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7208 		if (ret)
7209 			return ret;
7210 		ret = typec_find_pwr_opmode(opmode_str);
7211 		if (ret < 0)
7212 			return ret;
7213 		port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7214 		return 0;
7215 	}
7216 
7217 	/* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7218 
7219 	/* FRS can only be supported by DRP ports */
7220 	if (port->port_type == TYPEC_PORT_DRP) {
7221 		ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7222 					       &frs_current);
7223 		if (!ret && frs_current <= FRS_5V_3A)
7224 			port->new_source_frs_current = frs_current;
7225 
7226 		if (ret)
7227 			ret = 0;
7228 	}
7229 
7230 	/* For the backward compatibility, "capabilities" node is optional. */
7231 	capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7232 	if (!capabilities) {
7233 		port->pd_count = 1;
7234 	} else {
7235 		fwnode_for_each_child_node(capabilities, child)
7236 			port->pd_count++;
7237 
7238 		if (!port->pd_count) {
7239 			ret = -ENODATA;
7240 			goto put_capabilities;
7241 		}
7242 	}
7243 
7244 	port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7245 				 GFP_KERNEL);
7246 	if (!port->pds) {
7247 		ret = -ENOMEM;
7248 		goto put_capabilities;
7249 	}
7250 
7251 	port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7252 				     GFP_KERNEL);
7253 	if (!port->pd_list) {
7254 		ret = -ENOMEM;
7255 		goto put_capabilities;
7256 	}
7257 
7258 	for (i = 0; i < port->pd_count; i++) {
7259 		port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7260 		if (!port->pd_list[i]) {
7261 			ret = -ENOMEM;
7262 			goto put_capabilities;
7263 		}
7264 
7265 		src_pdo = port->pd_list[i]->source_desc.pdo;
7266 		port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7267 		snk_pdo = port->pd_list[i]->sink_desc.pdo;
7268 		port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7269 
7270 		/* If "capabilities" is NULL, fall back to single pd cap population. */
7271 		if (!capabilities)
7272 			caps = fwnode;
7273 		else
7274 			caps = fwnode_get_next_child_node(capabilities, caps);
7275 
7276 		if (port->port_type != TYPEC_PORT_SNK) {
7277 			ret = fwnode_property_count_u32(caps, "source-pdos");
7278 			if (ret == 0) {
7279 				ret = -EINVAL;
7280 				goto put_caps;
7281 			}
7282 			if (ret < 0)
7283 				goto put_caps;
7284 
7285 			nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7286 			ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7287 							     nr_src_pdo);
7288 			if (ret)
7289 				goto put_caps;
7290 
7291 			ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7292 			if (ret)
7293 				goto put_caps;
7294 
7295 			if (i == 0) {
7296 				port->nr_src_pdo = nr_src_pdo;
7297 				memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7298 					       port->pd_list[0]->source_desc.pdo,
7299 					       sizeof(u32) * nr_src_pdo,
7300 					       0);
7301 			}
7302 		}
7303 
7304 		if (port->port_type != TYPEC_PORT_SRC) {
7305 			ret = fwnode_property_count_u32(caps, "sink-pdos");
7306 			if (ret == 0) {
7307 				ret = -EINVAL;
7308 				goto put_caps;
7309 			}
7310 
7311 			if (ret < 0)
7312 				goto put_caps;
7313 
7314 			nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7315 			ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7316 							     nr_snk_pdo);
7317 			if (ret)
7318 				goto put_caps;
7319 
7320 			ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7321 			if (ret)
7322 				goto put_caps;
7323 
7324 			if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7325 				ret = -EINVAL;
7326 				goto put_caps;
7327 			}
7328 
7329 			port->pd_list[i]->operating_snk_mw = uw / 1000;
7330 
7331 			if (i == 0) {
7332 				port->nr_snk_pdo = nr_snk_pdo;
7333 				memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7334 					       port->pd_list[0]->sink_desc.pdo,
7335 					       sizeof(u32) * nr_snk_pdo,
7336 					       0);
7337 				port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7338 			}
7339 		}
7340 	}
7341 
7342 put_caps:
7343 	if (caps != fwnode)
7344 		fwnode_handle_put(caps);
7345 put_capabilities:
7346 	fwnode_handle_put(capabilities);
7347 	return ret;
7348 }
7349 
tcpm_fw_get_snk_vdos(struct tcpm_port * port,struct fwnode_handle * fwnode)7350 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7351 {
7352 	int ret;
7353 
7354 	/* sink-vdos is optional */
7355 	ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7356 	if (ret < 0)
7357 		return 0;
7358 
7359 	port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7360 	if (port->nr_snk_vdo) {
7361 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7362 						     port->snk_vdo,
7363 						     port->nr_snk_vdo);
7364 		if (ret < 0)
7365 			return ret;
7366 	}
7367 
7368 	/* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7369 	if (port->nr_snk_vdo) {
7370 		ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7371 		if (ret < 0)
7372 			return ret;
7373 		else if (ret == 0)
7374 			return -ENODATA;
7375 
7376 		port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7377 		ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7378 						     port->snk_vdo_v1,
7379 						     port->nr_snk_vdo_v1);
7380 		if (ret < 0)
7381 			return ret;
7382 	}
7383 
7384 	return 0;
7385 }
7386 
tcpm_fw_get_pd_revision(struct tcpm_port * port,struct fwnode_handle * fwnode)7387 static void tcpm_fw_get_pd_revision(struct tcpm_port *port, struct fwnode_handle *fwnode)
7388 {
7389 	int ret;
7390 	u8 val[4];
7391 
7392 	ret = fwnode_property_count_u8(fwnode, "pd-revision");
7393 	if (!ret || ret != 4) {
7394 		tcpm_log(port, "Unable to find pd-revision property or incorrect array size");
7395 		return;
7396 	}
7397 
7398 	ret = fwnode_property_read_u8_array(fwnode, "pd-revision", val, 4);
7399 	if (ret) {
7400 		tcpm_log(port, "Failed to parse pd-revision, ret:(%d)", ret);
7401 		return;
7402 	}
7403 
7404 	port->pd_rev.rev_major = val[0];
7405 	port->pd_rev.rev_minor = val[1];
7406 	port->pd_rev.ver_major = val[2];
7407 	port->pd_rev.ver_minor = val[3];
7408 }
7409 
7410 /* Power Supply access to expose source power information */
7411 enum tcpm_psy_online_states {
7412 	TCPM_PSY_OFFLINE = 0,
7413 	TCPM_PSY_FIXED_ONLINE,
7414 	TCPM_PSY_PROG_ONLINE,
7415 };
7416 
7417 static enum power_supply_property tcpm_psy_props[] = {
7418 	POWER_SUPPLY_PROP_USB_TYPE,
7419 	POWER_SUPPLY_PROP_ONLINE,
7420 	POWER_SUPPLY_PROP_VOLTAGE_MIN,
7421 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
7422 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
7423 	POWER_SUPPLY_PROP_CURRENT_MAX,
7424 	POWER_SUPPLY_PROP_CURRENT_NOW,
7425 };
7426 
tcpm_psy_get_online(struct tcpm_port * port,union power_supply_propval * val)7427 static int tcpm_psy_get_online(struct tcpm_port *port,
7428 			       union power_supply_propval *val)
7429 {
7430 	if (port->vbus_charge) {
7431 		if (port->pps_data.active)
7432 			val->intval = TCPM_PSY_PROG_ONLINE;
7433 		else
7434 			val->intval = TCPM_PSY_FIXED_ONLINE;
7435 	} else {
7436 		val->intval = TCPM_PSY_OFFLINE;
7437 	}
7438 
7439 	return 0;
7440 }
7441 
tcpm_psy_get_voltage_min(struct tcpm_port * port,union power_supply_propval * val)7442 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7443 				    union power_supply_propval *val)
7444 {
7445 	if (port->pps_data.active)
7446 		val->intval = port->pps_data.min_volt * 1000;
7447 	else
7448 		val->intval = port->supply_voltage * 1000;
7449 
7450 	return 0;
7451 }
7452 
tcpm_psy_get_voltage_max(struct tcpm_port * port,union power_supply_propval * val)7453 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7454 				    union power_supply_propval *val)
7455 {
7456 	if (port->pps_data.active)
7457 		val->intval = port->pps_data.max_volt * 1000;
7458 	else
7459 		val->intval = port->supply_voltage * 1000;
7460 
7461 	return 0;
7462 }
7463 
tcpm_psy_get_voltage_now(struct tcpm_port * port,union power_supply_propval * val)7464 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7465 				    union power_supply_propval *val)
7466 {
7467 	val->intval = port->supply_voltage * 1000;
7468 
7469 	return 0;
7470 }
7471 
tcpm_psy_get_current_max(struct tcpm_port * port,union power_supply_propval * val)7472 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7473 				    union power_supply_propval *val)
7474 {
7475 	if (port->pps_data.active)
7476 		val->intval = port->pps_data.max_curr * 1000;
7477 	else
7478 		val->intval = port->current_limit * 1000;
7479 
7480 	return 0;
7481 }
7482 
tcpm_psy_get_current_now(struct tcpm_port * port,union power_supply_propval * val)7483 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7484 				    union power_supply_propval *val)
7485 {
7486 	val->intval = port->current_limit * 1000;
7487 
7488 	return 0;
7489 }
7490 
tcpm_psy_get_input_power_limit(struct tcpm_port * port,union power_supply_propval * val)7491 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7492 					  union power_supply_propval *val)
7493 {
7494 	unsigned int src_mv, src_ma, max_src_uw = 0;
7495 	unsigned int i, tmp;
7496 
7497 	for (i = 0; i < port->nr_source_caps; i++) {
7498 		u32 pdo = port->source_caps[i];
7499 
7500 		if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7501 			src_mv = pdo_fixed_voltage(pdo);
7502 			src_ma = pdo_max_current(pdo);
7503 			tmp = src_mv * src_ma;
7504 			max_src_uw = max(tmp, max_src_uw);
7505 		}
7506 	}
7507 
7508 	val->intval = max_src_uw;
7509 	return 0;
7510 }
7511 
tcpm_psy_get_prop(struct power_supply * psy,enum power_supply_property psp,union power_supply_propval * val)7512 static int tcpm_psy_get_prop(struct power_supply *psy,
7513 			     enum power_supply_property psp,
7514 			     union power_supply_propval *val)
7515 {
7516 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7517 	int ret = 0;
7518 
7519 	switch (psp) {
7520 	case POWER_SUPPLY_PROP_USB_TYPE:
7521 		val->intval = port->usb_type;
7522 		break;
7523 	case POWER_SUPPLY_PROP_ONLINE:
7524 		ret = tcpm_psy_get_online(port, val);
7525 		break;
7526 	case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7527 		ret = tcpm_psy_get_voltage_min(port, val);
7528 		break;
7529 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7530 		ret = tcpm_psy_get_voltage_max(port, val);
7531 		break;
7532 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7533 		ret = tcpm_psy_get_voltage_now(port, val);
7534 		break;
7535 	case POWER_SUPPLY_PROP_CURRENT_MAX:
7536 		ret = tcpm_psy_get_current_max(port, val);
7537 		break;
7538 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7539 		ret = tcpm_psy_get_current_now(port, val);
7540 		break;
7541 	case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7542 		tcpm_psy_get_input_power_limit(port, val);
7543 		break;
7544 	default:
7545 		ret = -EINVAL;
7546 		break;
7547 	}
7548 
7549 	return ret;
7550 }
7551 
tcpm_psy_set_online(struct tcpm_port * port,const union power_supply_propval * val)7552 static int tcpm_psy_set_online(struct tcpm_port *port,
7553 			       const union power_supply_propval *val)
7554 {
7555 	int ret;
7556 
7557 	switch (val->intval) {
7558 	case TCPM_PSY_FIXED_ONLINE:
7559 		ret = tcpm_pps_activate(port, false);
7560 		break;
7561 	case TCPM_PSY_PROG_ONLINE:
7562 		ret = tcpm_pps_activate(port, true);
7563 		break;
7564 	default:
7565 		ret = -EINVAL;
7566 		break;
7567 	}
7568 
7569 	return ret;
7570 }
7571 
tcpm_psy_set_prop(struct power_supply * psy,enum power_supply_property psp,const union power_supply_propval * val)7572 static int tcpm_psy_set_prop(struct power_supply *psy,
7573 			     enum power_supply_property psp,
7574 			     const union power_supply_propval *val)
7575 {
7576 	struct tcpm_port *port = power_supply_get_drvdata(psy);
7577 	int ret;
7578 
7579 	/*
7580 	 * All the properties below are related to USB PD. The check needs to be
7581 	 * property specific when a non-pd related property is added.
7582 	 */
7583 	if (!port->pd_supported)
7584 		return -EOPNOTSUPP;
7585 
7586 	switch (psp) {
7587 	case POWER_SUPPLY_PROP_ONLINE:
7588 		ret = tcpm_psy_set_online(port, val);
7589 		break;
7590 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7591 		ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7592 		break;
7593 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7594 		if (val->intval > port->pps_data.max_curr * 1000)
7595 			ret = -EINVAL;
7596 		else
7597 			ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7598 		break;
7599 	default:
7600 		ret = -EINVAL;
7601 		break;
7602 	}
7603 	power_supply_changed(port->psy);
7604 	return ret;
7605 }
7606 
tcpm_psy_prop_writeable(struct power_supply * psy,enum power_supply_property psp)7607 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7608 				   enum power_supply_property psp)
7609 {
7610 	switch (psp) {
7611 	case POWER_SUPPLY_PROP_ONLINE:
7612 	case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7613 	case POWER_SUPPLY_PROP_CURRENT_NOW:
7614 		return 1;
7615 	default:
7616 		return 0;
7617 	}
7618 }
7619 
7620 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7621 
devm_tcpm_psy_register(struct tcpm_port * port)7622 static int devm_tcpm_psy_register(struct tcpm_port *port)
7623 {
7624 	struct power_supply_config psy_cfg = {};
7625 	const char *port_dev_name = dev_name(port->dev);
7626 	size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7627 				     strlen(port_dev_name) + 1;
7628 	char *psy_name;
7629 
7630 	psy_cfg.drv_data = port;
7631 	psy_cfg.fwnode = dev_fwnode(port->dev);
7632 	psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7633 	if (!psy_name)
7634 		return -ENOMEM;
7635 
7636 	snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7637 		 port_dev_name);
7638 	port->psy_desc.name = psy_name;
7639 	port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7640 	port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C)  |
7641 				   BIT(POWER_SUPPLY_USB_TYPE_PD) |
7642 				   BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
7643 	port->psy_desc.properties = tcpm_psy_props;
7644 	port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7645 	port->psy_desc.get_property = tcpm_psy_get_prop;
7646 	port->psy_desc.set_property = tcpm_psy_set_prop;
7647 	port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7648 
7649 	port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7650 
7651 	port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7652 					       &psy_cfg);
7653 
7654 	return PTR_ERR_OR_ZERO(port->psy);
7655 }
7656 
state_machine_timer_handler(struct hrtimer * timer)7657 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7658 {
7659 	struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7660 
7661 	if (port->registered)
7662 		kthread_queue_work(port->wq, &port->state_machine);
7663 	return HRTIMER_NORESTART;
7664 }
7665 
vdm_state_machine_timer_handler(struct hrtimer * timer)7666 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7667 {
7668 	struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7669 
7670 	if (port->registered)
7671 		kthread_queue_work(port->wq, &port->vdm_state_machine);
7672 	return HRTIMER_NORESTART;
7673 }
7674 
enable_frs_timer_handler(struct hrtimer * timer)7675 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7676 {
7677 	struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7678 
7679 	if (port->registered)
7680 		kthread_queue_work(port->wq, &port->enable_frs);
7681 	return HRTIMER_NORESTART;
7682 }
7683 
send_discover_timer_handler(struct hrtimer * timer)7684 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7685 {
7686 	struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7687 
7688 	if (port->registered)
7689 		kthread_queue_work(port->wq, &port->send_discover_work);
7690 	return HRTIMER_NORESTART;
7691 }
7692 
tcpm_register_port(struct device * dev,struct tcpc_dev * tcpc)7693 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7694 {
7695 	struct tcpm_port *port;
7696 	int err;
7697 
7698 	if (!dev || !tcpc ||
7699 	    !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7700 	    !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7701 	    !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7702 		return ERR_PTR(-EINVAL);
7703 
7704 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7705 	if (!port)
7706 		return ERR_PTR(-ENOMEM);
7707 
7708 	port->dev = dev;
7709 	port->tcpc = tcpc;
7710 
7711 	mutex_init(&port->lock);
7712 	mutex_init(&port->swap_lock);
7713 
7714 	port->wq = kthread_run_worker(0, dev_name(dev));
7715 	if (IS_ERR(port->wq))
7716 		return ERR_CAST(port->wq);
7717 	sched_set_fifo(port->wq->task);
7718 
7719 	kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7720 	kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7721 	kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7722 	kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7723 	kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7724 	hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7725 	port->state_machine_timer.function = state_machine_timer_handler;
7726 	hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7727 	port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
7728 	hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7729 	port->enable_frs_timer.function = enable_frs_timer_handler;
7730 	hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7731 	port->send_discover_timer.function = send_discover_timer_handler;
7732 
7733 	spin_lock_init(&port->pd_event_lock);
7734 
7735 	init_completion(&port->tx_complete);
7736 	init_completion(&port->swap_complete);
7737 	init_completion(&port->pps_complete);
7738 	tcpm_debugfs_init(port);
7739 
7740 	err = tcpm_fw_get_caps(port, tcpc->fwnode);
7741 	if (err < 0)
7742 		goto out_destroy_wq;
7743 	err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7744 	if (err < 0)
7745 		goto out_destroy_wq;
7746 
7747 	tcpm_fw_get_timings(port, tcpc->fwnode);
7748 	tcpm_fw_get_pd_revision(port, tcpc->fwnode);
7749 
7750 	port->try_role = port->typec_caps.prefer_role;
7751 
7752 	port->typec_caps.revision = 0x0120;	/* Type-C spec release 1.2 */
7753 
7754 	if (port->pd_rev.rev_major)
7755 		port->typec_caps.pd_revision = port->pd_rev.rev_major << 8 |
7756 					       port->pd_rev.rev_minor;
7757 	else
7758 		port->typec_caps.pd_revision = 0x0300;	/* USB-PD spec release 3.0 */
7759 
7760 	port->typec_caps.svdm_version = SVDM_VER_2_0;
7761 	port->typec_caps.driver_data = port;
7762 	port->typec_caps.ops = &tcpm_ops;
7763 	port->typec_caps.orientation_aware = 1;
7764 
7765 	port->partner_desc.identity = &port->partner_ident;
7766 
7767 	port->role_sw = usb_role_switch_get(port->dev);
7768 	if (!port->role_sw)
7769 		port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7770 	if (IS_ERR(port->role_sw)) {
7771 		err = PTR_ERR(port->role_sw);
7772 		goto out_destroy_wq;
7773 	}
7774 
7775 	err = devm_tcpm_psy_register(port);
7776 	if (err)
7777 		goto out_role_sw_put;
7778 	power_supply_changed(port->psy);
7779 
7780 	err = tcpm_port_register_pd(port);
7781 	if (err)
7782 		goto out_role_sw_put;
7783 
7784 	if (port->pds)
7785 		port->typec_caps.pd = port->pds[0];
7786 
7787 	port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7788 	if (IS_ERR(port->typec_port)) {
7789 		err = PTR_ERR(port->typec_port);
7790 		goto out_unregister_pd;
7791 	}
7792 
7793 	typec_port_register_altmodes(port->typec_port,
7794 				     &tcpm_altmode_ops, port,
7795 				     port->port_altmode, ALTMODE_DISCOVERY_MAX);
7796 	typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7797 				      &tcpm_cable_ops);
7798 	port->registered = true;
7799 
7800 	mutex_lock(&port->lock);
7801 	tcpm_init(port);
7802 	mutex_unlock(&port->lock);
7803 
7804 	tcpm_log(port, "%s: registered", dev_name(dev));
7805 	return port;
7806 
7807 out_unregister_pd:
7808 	tcpm_port_unregister_pd(port);
7809 out_role_sw_put:
7810 	usb_role_switch_put(port->role_sw);
7811 out_destroy_wq:
7812 	tcpm_debugfs_exit(port);
7813 	kthread_destroy_worker(port->wq);
7814 	return ERR_PTR(err);
7815 }
7816 EXPORT_SYMBOL_GPL(tcpm_register_port);
7817 
tcpm_unregister_port(struct tcpm_port * port)7818 void tcpm_unregister_port(struct tcpm_port *port)
7819 {
7820 	int i;
7821 
7822 	port->registered = false;
7823 	kthread_destroy_worker(port->wq);
7824 
7825 	hrtimer_cancel(&port->send_discover_timer);
7826 	hrtimer_cancel(&port->enable_frs_timer);
7827 	hrtimer_cancel(&port->vdm_state_machine_timer);
7828 	hrtimer_cancel(&port->state_machine_timer);
7829 
7830 	tcpm_reset_port(port);
7831 
7832 	tcpm_port_unregister_pd(port);
7833 
7834 	for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7835 		typec_unregister_altmode(port->port_altmode[i]);
7836 	typec_unregister_port(port->typec_port);
7837 	usb_role_switch_put(port->role_sw);
7838 	tcpm_debugfs_exit(port);
7839 }
7840 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7841 
7842 MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
7843 MODULE_DESCRIPTION("USB Type-C Port Manager");
7844 MODULE_LICENSE("GPL");
7845