1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Type-C Port Controller Interface.
6 */
7
8 #include <linux/bitfield.h>
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/i2c.h>
13 #include <linux/interrupt.h>
14 #include <linux/property.h>
15 #include <linux/regmap.h>
16 #include <linux/usb/pd.h>
17 #include <linux/usb/tcpci.h>
18 #include <linux/usb/tcpm.h>
19 #include <linux/usb/typec.h>
20
21 #define PD_RETRY_COUNT_DEFAULT 3
22 #define PD_RETRY_COUNT_3_0_OR_HIGHER 2
23 #define AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV 3500
24 #define VSINKPD_MIN_IR_DROP_MV 750
25 #define VSRC_NEW_MIN_PERCENT 95
26 #define VSRC_VALID_MIN_MV 500
27 #define VPPS_NEW_MIN_PERCENT 95
28 #define VPPS_VALID_MIN_MV 100
29 #define VSINKDISCONNECT_PD_MIN_PERCENT 90
30
31 struct tcpci {
32 struct device *dev;
33
34 struct tcpm_port *port;
35
36 struct regmap *regmap;
37 unsigned int alert_mask;
38
39 bool controls_vbus;
40
41 struct tcpc_dev tcpc;
42 struct tcpci_data *data;
43 };
44
45 struct tcpci_chip {
46 struct tcpci *tcpci;
47 struct tcpci_data data;
48 };
49
tcpci_get_tcpm_port(struct tcpci * tcpci)50 struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci)
51 {
52 return tcpci->port;
53 }
54 EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port);
55
tcpc_to_tcpci(struct tcpc_dev * tcpc)56 static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc)
57 {
58 return container_of(tcpc, struct tcpci, tcpc);
59 }
60
tcpci_read16(struct tcpci * tcpci,unsigned int reg,u16 * val)61 static int tcpci_read16(struct tcpci *tcpci, unsigned int reg, u16 *val)
62 {
63 return regmap_raw_read(tcpci->regmap, reg, val, sizeof(u16));
64 }
65
tcpci_write16(struct tcpci * tcpci,unsigned int reg,u16 val)66 static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
67 {
68 return regmap_raw_write(tcpci->regmap, reg, &val, sizeof(u16));
69 }
70
tcpci_check_std_output_cap(struct regmap * regmap,u8 mask)71 static int tcpci_check_std_output_cap(struct regmap *regmap, u8 mask)
72 {
73 unsigned int reg;
74 int ret;
75
76 ret = regmap_read(regmap, TCPC_STD_OUTPUT_CAP, ®);
77 if (ret < 0)
78 return ret;
79
80 return (reg & mask) == mask;
81 }
82
tcpci_set_cc(struct tcpc_dev * tcpc,enum typec_cc_status cc)83 static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
84 {
85 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
86 bool vconn_pres;
87 enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
88 unsigned int reg;
89 int ret;
90
91 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
92 if (ret < 0)
93 return ret;
94
95 vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
96 if (vconn_pres) {
97 ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, ®);
98 if (ret < 0)
99 return ret;
100
101 if (reg & TCPC_TCPC_CTRL_ORIENTATION)
102 polarity = TYPEC_POLARITY_CC2;
103 }
104
105 switch (cc) {
106 case TYPEC_CC_RA:
107 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RA)
108 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RA));
109 break;
110 case TYPEC_CC_RD:
111 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
112 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
113 break;
114 case TYPEC_CC_RP_DEF:
115 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
116 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
117 | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
118 TCPC_ROLE_CTRL_RP_VAL_DEF));
119 break;
120 case TYPEC_CC_RP_1_5:
121 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
122 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
123 | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
124 TCPC_ROLE_CTRL_RP_VAL_1_5));
125 break;
126 case TYPEC_CC_RP_3_0:
127 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
128 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP)
129 | FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
130 TCPC_ROLE_CTRL_RP_VAL_3_0));
131 break;
132 case TYPEC_CC_OPEN:
133 default:
134 reg = (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN)
135 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN));
136 break;
137 }
138
139 if (vconn_pres) {
140 if (polarity == TYPEC_POLARITY_CC2) {
141 reg &= ~TCPC_ROLE_CTRL_CC1;
142 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
143 } else {
144 reg &= ~TCPC_ROLE_CTRL_CC2;
145 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
146 }
147 }
148
149 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
150 if (ret < 0)
151 return ret;
152
153 return 0;
154 }
155
tcpci_apply_rc(struct tcpc_dev * tcpc,enum typec_cc_status cc,enum typec_cc_polarity polarity)156 static int tcpci_apply_rc(struct tcpc_dev *tcpc, enum typec_cc_status cc,
157 enum typec_cc_polarity polarity)
158 {
159 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
160 unsigned int reg;
161 int ret;
162
163 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
164 if (ret < 0)
165 return ret;
166
167 /*
168 * APPLY_RC state is when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2 and vbus autodischarge on
169 * disconnect is disabled. Bail out when ROLE_CONTROL.CC1 != ROLE_CONTROL.CC2.
170 */
171 if (FIELD_GET(TCPC_ROLE_CTRL_CC2, reg) != FIELD_GET(TCPC_ROLE_CTRL_CC1, reg))
172 return 0;
173
174 return regmap_update_bits(tcpci->regmap, TCPC_ROLE_CTRL, polarity == TYPEC_POLARITY_CC1 ?
175 TCPC_ROLE_CTRL_CC2 : TCPC_ROLE_CTRL_CC1,
176 TCPC_ROLE_CTRL_CC_OPEN);
177 }
178
tcpci_start_toggling(struct tcpc_dev * tcpc,enum typec_port_type port_type,enum typec_cc_status cc)179 static int tcpci_start_toggling(struct tcpc_dev *tcpc,
180 enum typec_port_type port_type,
181 enum typec_cc_status cc)
182 {
183 int ret;
184 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
185 unsigned int reg = TCPC_ROLE_CTRL_DRP;
186
187 if (port_type != TYPEC_PORT_DRP)
188 return -EOPNOTSUPP;
189
190 /* Handle vendor drp toggling */
191 if (tcpci->data->start_drp_toggling) {
192 ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
193 if (ret < 0)
194 return ret;
195 }
196
197 switch (cc) {
198 default:
199 case TYPEC_CC_RP_DEF:
200 reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
201 TCPC_ROLE_CTRL_RP_VAL_DEF);
202 break;
203 case TYPEC_CC_RP_1_5:
204 reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
205 TCPC_ROLE_CTRL_RP_VAL_1_5);
206 break;
207 case TYPEC_CC_RP_3_0:
208 reg |= FIELD_PREP(TCPC_ROLE_CTRL_RP_VAL,
209 TCPC_ROLE_CTRL_RP_VAL_3_0);
210 break;
211 }
212
213 if (cc == TYPEC_CC_RD)
214 reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD)
215 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD));
216 else
217 reg |= (FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP)
218 | FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP));
219 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
220 if (ret < 0)
221 return ret;
222 return regmap_write(tcpci->regmap, TCPC_COMMAND,
223 TCPC_CMD_LOOK4CONNECTION);
224 }
225
tcpci_get_cc(struct tcpc_dev * tcpc,enum typec_cc_status * cc1,enum typec_cc_status * cc2)226 static int tcpci_get_cc(struct tcpc_dev *tcpc,
227 enum typec_cc_status *cc1, enum typec_cc_status *cc2)
228 {
229 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
230 unsigned int reg, role_control;
231 int ret;
232
233 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
234 if (ret < 0)
235 return ret;
236
237 ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, ®);
238 if (ret < 0)
239 return ret;
240
241 *cc1 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC1, reg),
242 reg & TCPC_CC_STATUS_TERM ||
243 tcpc_presenting_rd(role_control, CC1));
244 *cc2 = tcpci_to_typec_cc(FIELD_GET(TCPC_CC_STATUS_CC2, reg),
245 reg & TCPC_CC_STATUS_TERM ||
246 tcpc_presenting_rd(role_control, CC2));
247
248 return 0;
249 }
250
tcpci_set_polarity(struct tcpc_dev * tcpc,enum typec_cc_polarity polarity)251 static int tcpci_set_polarity(struct tcpc_dev *tcpc,
252 enum typec_cc_polarity polarity)
253 {
254 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
255 unsigned int reg;
256 int ret;
257 enum typec_cc_status cc1, cc2;
258
259 /* Obtain Rp setting from role control */
260 ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®);
261 if (ret < 0)
262 return ret;
263
264 ret = tcpci_get_cc(tcpc, &cc1, &cc2);
265 if (ret < 0)
266 return ret;
267
268 /*
269 * When port has drp toggling enabled, ROLE_CONTROL would only have the initial
270 * terminations for the toggling and does not indicate the final cc
271 * terminations when ConnectionResult is 0 i.e. drp toggling stops and
272 * the connection is resolved. Infer port role from TCPC_CC_STATUS based on the
273 * terminations seen. The port role is then used to set the cc terminations.
274 */
275 if (reg & TCPC_ROLE_CTRL_DRP) {
276 /* Disable DRP for the OPEN setting to take effect */
277 reg = reg & ~TCPC_ROLE_CTRL_DRP;
278
279 if (polarity == TYPEC_POLARITY_CC2) {
280 reg &= ~TCPC_ROLE_CTRL_CC2;
281 /* Local port is source */
282 if (cc2 == TYPEC_CC_RD)
283 /* Role control would have the Rp setting when DRP was enabled */
284 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RP);
285 else
286 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_RD);
287 } else {
288 reg &= ~TCPC_ROLE_CTRL_CC1;
289 /* Local port is source */
290 if (cc1 == TYPEC_CC_RD)
291 /* Role control would have the Rp setting when DRP was enabled */
292 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RP);
293 else
294 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_RD);
295 }
296 }
297
298 if (polarity == TYPEC_POLARITY_CC2)
299 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC1, TCPC_ROLE_CTRL_CC_OPEN);
300 else
301 reg |= FIELD_PREP(TCPC_ROLE_CTRL_CC2, TCPC_ROLE_CTRL_CC_OPEN);
302 ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
303 if (ret < 0)
304 return ret;
305
306 return regmap_write(tcpci->regmap, TCPC_TCPC_CTRL,
307 (polarity == TYPEC_POLARITY_CC2) ?
308 TCPC_TCPC_CTRL_ORIENTATION : 0);
309 }
310
tcpci_set_orientation(struct tcpc_dev * tcpc,enum typec_orientation orientation)311 static int tcpci_set_orientation(struct tcpc_dev *tcpc,
312 enum typec_orientation orientation)
313 {
314 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
315 unsigned int reg;
316
317 switch (orientation) {
318 case TYPEC_ORIENTATION_NONE:
319 /* We can't put a single output into high impedance */
320 fallthrough;
321 case TYPEC_ORIENTATION_NORMAL:
322 reg = TCPC_CONFIG_STD_OUTPUT_ORIENTATION_NORMAL;
323 break;
324 case TYPEC_ORIENTATION_REVERSE:
325 reg = TCPC_CONFIG_STD_OUTPUT_ORIENTATION_FLIPPED;
326 break;
327 }
328
329 return regmap_update_bits(tcpci->regmap, TCPC_CONFIG_STD_OUTPUT,
330 TCPC_CONFIG_STD_OUTPUT_ORIENTATION_MASK, reg);
331 }
332
tcpci_set_partner_usb_comm_capable(struct tcpc_dev * tcpc,bool capable)333 static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
334 {
335 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
336
337 if (tcpci->data->set_partner_usb_comm_capable)
338 tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
339 }
340
tcpci_set_vconn(struct tcpc_dev * tcpc,bool enable)341 static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
342 {
343 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
344 int ret;
345
346 /* Handle vendor set vconn */
347 if (tcpci->data->set_vconn) {
348 ret = tcpci->data->set_vconn(tcpci, tcpci->data, enable);
349 if (ret < 0)
350 return ret;
351 }
352
353 return regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL,
354 TCPC_POWER_CTRL_VCONN_ENABLE,
355 enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
356 }
357
tcpci_enable_auto_vbus_discharge(struct tcpc_dev * dev,bool enable)358 static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
359 {
360 struct tcpci *tcpci = tcpc_to_tcpci(dev);
361 int ret;
362
363 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_AUTO_DISCHARGE,
364 enable ? TCPC_POWER_CTRL_AUTO_DISCHARGE : 0);
365 return ret;
366 }
367
tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev * dev,enum typec_pwr_opmode mode,bool pps_active,u32 requested_vbus_voltage_mv)368 static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
369 bool pps_active, u32 requested_vbus_voltage_mv)
370 {
371 struct tcpci *tcpci = tcpc_to_tcpci(dev);
372 unsigned int pwr_ctrl, threshold = 0;
373 int ret;
374
375 /*
376 * Indicates that vbus is going to go away due PR_SWAP, hard reset etc.
377 * Do not discharge vbus here.
378 */
379 if (requested_vbus_voltage_mv == 0)
380 goto write_thresh;
381
382 ret = regmap_read(tcpci->regmap, TCPC_POWER_CTRL, &pwr_ctrl);
383 if (ret < 0)
384 return ret;
385
386 if (pwr_ctrl & TCPC_FAST_ROLE_SWAP_EN) {
387 /* To prevent disconnect when the source is fast role swap is capable. */
388 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
389 } else if (mode == TYPEC_PWR_MODE_PD) {
390 if (pps_active)
391 threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
392 VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) *
393 VSINKDISCONNECT_PD_MIN_PERCENT / 100;
394 else
395 threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) -
396 VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) *
397 VSINKDISCONNECT_PD_MIN_PERCENT / 100;
398 } else {
399 /* 3.5V for non-pd sink */
400 threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
401 }
402
403 threshold = threshold / TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV;
404
405 if (threshold > TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX)
406 return -EINVAL;
407
408 write_thresh:
409 return tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, threshold);
410 }
411
tcpci_enable_frs(struct tcpc_dev * dev,bool enable)412 static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
413 {
414 struct tcpci *tcpci = tcpc_to_tcpci(dev);
415 int ret;
416
417 /* To prevent disconnect during FRS, set disconnect threshold to 3.5V */
418 ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c);
419 if (ret < 0)
420 return ret;
421
422 ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ?
423 TCPC_FAST_ROLE_SWAP_EN : 0);
424
425 return ret;
426 }
427
tcpci_frs_sourcing_vbus(struct tcpc_dev * dev)428 static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
429 {
430 struct tcpci *tcpci = tcpc_to_tcpci(dev);
431
432 if (tcpci->data->frs_sourcing_vbus)
433 tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
434 }
435
tcpci_check_contaminant(struct tcpc_dev * dev)436 static void tcpci_check_contaminant(struct tcpc_dev *dev)
437 {
438 struct tcpci *tcpci = tcpc_to_tcpci(dev);
439
440 if (tcpci->data->check_contaminant)
441 tcpci->data->check_contaminant(tcpci, tcpci->data);
442 }
443
tcpci_set_bist_data(struct tcpc_dev * tcpc,bool enable)444 static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
445 {
446 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
447
448 return regmap_update_bits(tcpci->regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_BIST_TM,
449 enable ? TCPC_TCPC_CTRL_BIST_TM : 0);
450 }
451
tcpci_set_roles(struct tcpc_dev * tcpc,bool attached,enum typec_role role,enum typec_data_role data)452 static int tcpci_set_roles(struct tcpc_dev *tcpc, bool attached,
453 enum typec_role role, enum typec_data_role data)
454 {
455 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
456 unsigned int reg;
457 int ret;
458
459 reg = FIELD_PREP(TCPC_MSG_HDR_INFO_REV, PD_REV20);
460 if (role == TYPEC_SOURCE)
461 reg |= TCPC_MSG_HDR_INFO_PWR_ROLE;
462 if (data == TYPEC_HOST)
463 reg |= TCPC_MSG_HDR_INFO_DATA_ROLE;
464 ret = regmap_write(tcpci->regmap, TCPC_MSG_HDR_INFO, reg);
465 if (ret < 0)
466 return ret;
467
468 return 0;
469 }
470
tcpci_set_pd_rx(struct tcpc_dev * tcpc,bool enable)471 static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
472 {
473 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
474 unsigned int reg = 0;
475 int ret;
476
477 if (enable) {
478 reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
479 if (tcpci->data->cable_comm_capable)
480 reg |= TCPC_RX_DETECT_SOP1;
481 }
482 ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
483 if (ret < 0)
484 return ret;
485
486 return 0;
487 }
488
tcpci_get_vbus(struct tcpc_dev * tcpc)489 static int tcpci_get_vbus(struct tcpc_dev *tcpc)
490 {
491 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
492 unsigned int reg;
493 int ret;
494
495 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
496 if (ret < 0)
497 return ret;
498
499 return !!(reg & TCPC_POWER_STATUS_VBUS_PRES);
500 }
501
tcpci_is_vbus_vsafe0v(struct tcpc_dev * tcpc)502 static bool tcpci_is_vbus_vsafe0v(struct tcpc_dev *tcpc)
503 {
504 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
505 unsigned int reg;
506 int ret;
507
508 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, ®);
509 if (ret < 0)
510 return false;
511
512 return !!(reg & TCPC_EXTENDED_STATUS_VSAFE0V);
513 }
514
tcpci_set_vbus(struct tcpc_dev * tcpc,bool source,bool sink)515 static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink)
516 {
517 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
518 int ret;
519
520 if (tcpci->data->set_vbus) {
521 ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink);
522 /* Bypass when ret > 0 */
523 if (ret != 0)
524 return ret < 0 ? ret : 0;
525 }
526
527 /* Disable both source and sink first before enabling anything */
528
529 if (!source) {
530 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
531 TCPC_CMD_DISABLE_SRC_VBUS);
532 if (ret < 0)
533 return ret;
534 }
535
536 if (!sink) {
537 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
538 TCPC_CMD_DISABLE_SINK_VBUS);
539 if (ret < 0)
540 return ret;
541 }
542
543 if (source) {
544 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
545 TCPC_CMD_SRC_VBUS_DEFAULT);
546 if (ret < 0)
547 return ret;
548 }
549
550 if (sink) {
551 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
552 TCPC_CMD_SINK_VBUS);
553 if (ret < 0)
554 return ret;
555 }
556
557 return 0;
558 }
559
tcpci_pd_transmit(struct tcpc_dev * tcpc,enum tcpm_transmit_type type,const struct pd_message * msg,unsigned int negotiated_rev)560 static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type,
561 const struct pd_message *msg, unsigned int negotiated_rev)
562 {
563 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
564 u16 header = msg ? le16_to_cpu(msg->header) : 0;
565 unsigned int reg, cnt;
566 int ret;
567
568 cnt = msg ? pd_header_cnt(header) * 4 : 0;
569 /**
570 * TCPCI spec forbids direct access of TCPC_TX_DATA.
571 * But, since some of the chipsets offer this capability,
572 * it's fair to support both.
573 */
574 if (tcpci->data->TX_BUF_BYTE_x_hidden) {
575 u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,};
576 u8 pos = 0;
577
578 /* Payload + header + TCPC_TX_BYTE_CNT */
579 buf[pos++] = cnt + 2;
580
581 if (msg)
582 memcpy(&buf[pos], &msg->header, sizeof(msg->header));
583
584 pos += sizeof(header);
585
586 if (cnt > 0)
587 memcpy(&buf[pos], msg->payload, cnt);
588
589 pos += cnt;
590 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos);
591 if (ret < 0)
592 return ret;
593 } else {
594 ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2);
595 if (ret < 0)
596 return ret;
597
598 ret = tcpci_write16(tcpci, TCPC_TX_HDR, header);
599 if (ret < 0)
600 return ret;
601
602 if (cnt > 0) {
603 ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt);
604 if (ret < 0)
605 return ret;
606 }
607 }
608
609 /* nRetryCount is 3 in PD2.0 spec where 2 in PD3.0 spec */
610 reg = FIELD_PREP(TCPC_TRANSMIT_RETRY,
611 (negotiated_rev > PD_REV20
612 ? PD_RETRY_COUNT_3_0_OR_HIGHER
613 : PD_RETRY_COUNT_DEFAULT));
614 reg |= FIELD_PREP(TCPC_TRANSMIT_TYPE, type);
615 ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg);
616 if (ret < 0)
617 return ret;
618
619 return 0;
620 }
621
tcpci_cable_comm_capable(struct tcpc_dev * tcpc)622 static bool tcpci_cable_comm_capable(struct tcpc_dev *tcpc)
623 {
624 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
625
626 return tcpci->data->cable_comm_capable;
627 }
628
tcpci_attempt_vconn_swap_discovery(struct tcpc_dev * tcpc)629 static bool tcpci_attempt_vconn_swap_discovery(struct tcpc_dev *tcpc)
630 {
631 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
632
633 if (tcpci->data->attempt_vconn_swap_discovery)
634 return tcpci->data->attempt_vconn_swap_discovery(tcpci, tcpci->data);
635
636 return false;
637 }
638
tcpci_init(struct tcpc_dev * tcpc)639 static int tcpci_init(struct tcpc_dev *tcpc)
640 {
641 struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
642 unsigned long timeout = jiffies + msecs_to_jiffies(2000); /* XXX */
643 unsigned int reg;
644 int ret;
645
646 while (time_before_eq(jiffies, timeout)) {
647 ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, ®);
648 if (ret < 0)
649 return ret;
650 if (!(reg & TCPC_POWER_STATUS_UNINIT))
651 break;
652 usleep_range(10000, 20000);
653 }
654 if (time_after(jiffies, timeout))
655 return -ETIMEDOUT;
656
657 ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
658 if (ret < 0)
659 return ret;
660
661 /* Handle vendor init */
662 if (tcpci->data->init) {
663 ret = tcpci->data->init(tcpci, tcpci->data);
664 if (ret < 0)
665 return ret;
666 }
667
668 /* Clear all events */
669 ret = tcpci_write16(tcpci, TCPC_ALERT, 0xffff);
670 if (ret < 0)
671 return ret;
672
673 if (tcpci->controls_vbus)
674 reg = TCPC_POWER_STATUS_VBUS_PRES;
675 else
676 reg = 0;
677 ret = regmap_write(tcpci->regmap, TCPC_POWER_STATUS_MASK, reg);
678 if (ret < 0)
679 return ret;
680
681 /* Enable Vbus detection */
682 ret = regmap_write(tcpci->regmap, TCPC_COMMAND,
683 TCPC_CMD_ENABLE_VBUS_DETECT);
684 if (ret < 0)
685 return ret;
686
687 reg = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_FAILED |
688 TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_RX_STATUS |
689 TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_CC_STATUS;
690 if (tcpci->controls_vbus)
691 reg |= TCPC_ALERT_POWER_STATUS;
692 /* Enable VSAFE0V status interrupt when detecting VSAFE0V is supported */
693 if (tcpci->data->vbus_vsafe0v) {
694 reg |= TCPC_ALERT_EXTENDED_STATUS;
695 ret = regmap_write(tcpci->regmap, TCPC_EXTENDED_STATUS_MASK,
696 TCPC_EXTENDED_STATUS_VSAFE0V);
697 if (ret < 0)
698 return ret;
699 }
700
701 tcpci->alert_mask = reg;
702
703 return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
704 }
705
tcpci_irq(struct tcpci * tcpci)706 irqreturn_t tcpci_irq(struct tcpci *tcpci)
707 {
708 u16 status;
709 int ret;
710 int irq_ret;
711 unsigned int raw;
712
713 tcpci_read16(tcpci, TCPC_ALERT, &status);
714 irq_ret = status & tcpci->alert_mask;
715
716 process_status:
717 /*
718 * Clear alert status for everything except RX_STATUS, which shouldn't
719 * be cleared until we have successfully retrieved message.
720 */
721 if (status & ~TCPC_ALERT_RX_STATUS)
722 tcpci_write16(tcpci, TCPC_ALERT,
723 status & ~TCPC_ALERT_RX_STATUS);
724
725 if (status & TCPC_ALERT_CC_STATUS)
726 tcpm_cc_change(tcpci->port);
727
728 if (status & TCPC_ALERT_POWER_STATUS) {
729 regmap_read(tcpci->regmap, TCPC_POWER_STATUS_MASK, &raw);
730 /*
731 * If power status mask has been reset, then the TCPC
732 * has reset.
733 */
734 if (raw == 0xff)
735 tcpm_tcpc_reset(tcpci->port);
736 else
737 tcpm_vbus_change(tcpci->port);
738 }
739
740 if (status & TCPC_ALERT_RX_STATUS) {
741 struct pd_message msg;
742 unsigned int cnt, payload_cnt;
743 u16 header;
744
745 regmap_read(tcpci->regmap, TCPC_RX_BYTE_CNT, &cnt);
746 /*
747 * 'cnt' corresponds to READABLE_BYTE_COUNT in section 4.4.14
748 * of the TCPCI spec [Rev 2.0 Ver 1.0 October 2017] and is
749 * defined in table 4-36 as one greater than the number of
750 * bytes received. And that number includes the header. So:
751 */
752 if (cnt > 3)
753 payload_cnt = cnt - (1 + sizeof(msg.header));
754 else
755 payload_cnt = 0;
756
757 tcpci_read16(tcpci, TCPC_RX_HDR, &header);
758 msg.header = cpu_to_le16(header);
759
760 if (WARN_ON(payload_cnt > sizeof(msg.payload)))
761 payload_cnt = sizeof(msg.payload);
762
763 if (payload_cnt > 0)
764 regmap_raw_read(tcpci->regmap, TCPC_RX_DATA,
765 &msg.payload, payload_cnt);
766
767 /* Read complete, clear RX status alert bit */
768 tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
769
770 tcpm_pd_receive(tcpci->port, &msg, TCPC_TX_SOP);
771 }
772
773 if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
774 ret = regmap_read(tcpci->regmap, TCPC_EXTENDED_STATUS, &raw);
775 if (!ret && (raw & TCPC_EXTENDED_STATUS_VSAFE0V))
776 tcpm_vbus_change(tcpci->port);
777 }
778
779 if (status & TCPC_ALERT_RX_HARD_RST)
780 tcpm_pd_hard_reset(tcpci->port);
781
782 if (status & TCPC_ALERT_TX_SUCCESS)
783 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_SUCCESS);
784 else if (status & TCPC_ALERT_TX_DISCARDED)
785 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_DISCARDED);
786 else if (status & TCPC_ALERT_TX_FAILED)
787 tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
788
789 tcpci_read16(tcpci, TCPC_ALERT, &status);
790
791 if (status & tcpci->alert_mask)
792 goto process_status;
793
794 return IRQ_RETVAL(irq_ret);
795 }
796 EXPORT_SYMBOL_GPL(tcpci_irq);
797
_tcpci_irq(int irq,void * dev_id)798 static irqreturn_t _tcpci_irq(int irq, void *dev_id)
799 {
800 struct tcpci_chip *chip = dev_id;
801
802 return tcpci_irq(chip->tcpci);
803 }
804
805 static const struct regmap_config tcpci_regmap_config = {
806 .reg_bits = 8,
807 .val_bits = 8,
808
809 .max_register = 0x7F, /* 0x80 .. 0xFF are vendor defined */
810 };
811
tcpci_parse_config(struct tcpci * tcpci)812 static int tcpci_parse_config(struct tcpci *tcpci)
813 {
814 tcpci->controls_vbus = true; /* XXX */
815
816 tcpci->tcpc.fwnode = device_get_named_child_node(tcpci->dev,
817 "connector");
818 if (!tcpci->tcpc.fwnode) {
819 dev_err(tcpci->dev, "Can't find connector node.\n");
820 return -EINVAL;
821 }
822
823 return 0;
824 }
825
tcpci_register_port(struct device * dev,struct tcpci_data * data)826 struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
827 {
828 struct tcpci *tcpci;
829 int err;
830
831 tcpci = devm_kzalloc(dev, sizeof(*tcpci), GFP_KERNEL);
832 if (!tcpci)
833 return ERR_PTR(-ENOMEM);
834
835 tcpci->dev = dev;
836 tcpci->data = data;
837 tcpci->regmap = data->regmap;
838
839 tcpci->tcpc.init = tcpci_init;
840 tcpci->tcpc.get_vbus = tcpci_get_vbus;
841 tcpci->tcpc.set_vbus = tcpci_set_vbus;
842 tcpci->tcpc.set_cc = tcpci_set_cc;
843 tcpci->tcpc.apply_rc = tcpci_apply_rc;
844 tcpci->tcpc.get_cc = tcpci_get_cc;
845 tcpci->tcpc.set_polarity = tcpci_set_polarity;
846 tcpci->tcpc.set_vconn = tcpci_set_vconn;
847 tcpci->tcpc.start_toggling = tcpci_start_toggling;
848
849 tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
850 tcpci->tcpc.set_roles = tcpci_set_roles;
851 tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
852 tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
853 tcpci->tcpc.enable_frs = tcpci_enable_frs;
854 tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
855 tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
856 tcpci->tcpc.cable_comm_capable = tcpci_cable_comm_capable;
857 tcpci->tcpc.attempt_vconn_swap_discovery = tcpci_attempt_vconn_swap_discovery;
858
859 if (tcpci->data->check_contaminant)
860 tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
861
862 if (tcpci->data->auto_discharge_disconnect) {
863 tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
864 tcpci->tcpc.set_auto_vbus_discharge_threshold =
865 tcpci_set_auto_vbus_discharge_threshold;
866 regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_BLEED_DISCHARGE,
867 TCPC_POWER_CTRL_BLEED_DISCHARGE);
868 }
869
870 if (tcpci->data->vbus_vsafe0v)
871 tcpci->tcpc.is_vbus_vsafe0v = tcpci_is_vbus_vsafe0v;
872
873 if (tcpci->data->set_orientation)
874 tcpci->tcpc.set_orientation = tcpci_set_orientation;
875
876 err = tcpci_parse_config(tcpci);
877 if (err < 0)
878 return ERR_PTR(err);
879
880 tcpci->port = tcpm_register_port(tcpci->dev, &tcpci->tcpc);
881 if (IS_ERR(tcpci->port)) {
882 fwnode_handle_put(tcpci->tcpc.fwnode);
883 return ERR_CAST(tcpci->port);
884 }
885
886 return tcpci;
887 }
888 EXPORT_SYMBOL_GPL(tcpci_register_port);
889
tcpci_unregister_port(struct tcpci * tcpci)890 void tcpci_unregister_port(struct tcpci *tcpci)
891 {
892 tcpm_unregister_port(tcpci->port);
893 fwnode_handle_put(tcpci->tcpc.fwnode);
894 }
895 EXPORT_SYMBOL_GPL(tcpci_unregister_port);
896
tcpci_probe(struct i2c_client * client)897 static int tcpci_probe(struct i2c_client *client)
898 {
899 struct tcpci_chip *chip;
900 int err;
901 u16 val = 0;
902
903 chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
904 if (!chip)
905 return -ENOMEM;
906
907 chip->data.regmap = devm_regmap_init_i2c(client, &tcpci_regmap_config);
908 if (IS_ERR(chip->data.regmap))
909 return PTR_ERR(chip->data.regmap);
910
911 i2c_set_clientdata(client, chip);
912
913 /* Disable chip interrupts before requesting irq */
914 err = regmap_raw_write(chip->data.regmap, TCPC_ALERT_MASK, &val,
915 sizeof(u16));
916 if (err < 0)
917 return err;
918
919 err = tcpci_check_std_output_cap(chip->data.regmap,
920 TCPC_STD_OUTPUT_CAP_ORIENTATION);
921 if (err < 0)
922 return err;
923
924 chip->data.set_orientation = err;
925
926 err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
927 _tcpci_irq,
928 IRQF_SHARED | IRQF_ONESHOT,
929 dev_name(&client->dev), chip);
930 if (err < 0)
931 return err;
932
933 /*
934 * Disable irq while registering port. If irq is configured as an edge
935 * irq this allow to keep track and process the irq as soon as it is enabled.
936 */
937 disable_irq(client->irq);
938 chip->tcpci = tcpci_register_port(&client->dev, &chip->data);
939 enable_irq(client->irq);
940
941 return PTR_ERR_OR_ZERO(chip->tcpci);
942 }
943
tcpci_remove(struct i2c_client * client)944 static void tcpci_remove(struct i2c_client *client)
945 {
946 struct tcpci_chip *chip = i2c_get_clientdata(client);
947 int err;
948
949 /* Disable chip interrupts before unregistering port */
950 err = tcpci_write16(chip->tcpci, TCPC_ALERT_MASK, 0);
951 if (err < 0)
952 dev_warn(&client->dev, "Failed to disable irqs (%pe)\n", ERR_PTR(err));
953
954 tcpci_unregister_port(chip->tcpci);
955 }
956
957 static const struct i2c_device_id tcpci_id[] = {
958 { "tcpci" },
959 { }
960 };
961 MODULE_DEVICE_TABLE(i2c, tcpci_id);
962
963 #ifdef CONFIG_OF
964 static const struct of_device_id tcpci_of_match[] = {
965 { .compatible = "nxp,ptn5110", },
966 { .compatible = "tcpci", },
967 {},
968 };
969 MODULE_DEVICE_TABLE(of, tcpci_of_match);
970 #endif
971
972 static struct i2c_driver tcpci_i2c_driver = {
973 .driver = {
974 .name = "tcpci",
975 .of_match_table = of_match_ptr(tcpci_of_match),
976 },
977 .probe = tcpci_probe,
978 .remove = tcpci_remove,
979 .id_table = tcpci_id,
980 };
981 module_i2c_driver(tcpci_i2c_driver);
982
983 MODULE_DESCRIPTION("USB Type-C Port Controller Interface driver");
984 MODULE_LICENSE("GPL");
985