1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * UCSI driver for Cypress CCGx Type-C controller
4 *
5 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6 * Author: Ajay Gupta <ajayg@nvidia.com>
7 *
8 * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9 */
10 #include <linux/acpi.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/hex.h>
14 #include <linux/i2c.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/usb/typec_dp.h>
21
22 #include <linux/unaligned.h>
23 #include "ucsi.h"
24
25 enum enum_fw_mode {
26 BOOT, /* bootloader */
27 FW1, /* FW partition-1 (contains secondary fw) */
28 FW2, /* FW partition-2 (contains primary fw) */
29 FW_INVALID,
30 };
31
32 #define CCGX_RAB_DEVICE_MODE 0x0000
33 #define CCGX_RAB_INTR_REG 0x0006
34 #define DEV_INT BIT(0)
35 #define PORT0_INT BIT(1)
36 #define PORT1_INT BIT(2)
37 #define UCSI_READ_INT BIT(7)
38 #define CCGX_RAB_JUMP_TO_BOOT 0x0007
39 #define TO_BOOT 'J'
40 #define TO_ALT_FW 'A'
41 #define CCGX_RAB_RESET_REQ 0x0008
42 #define RESET_SIG 'R'
43 #define CMD_RESET_I2C 0x0
44 #define CMD_RESET_DEV 0x1
45 #define CCGX_RAB_ENTER_FLASHING 0x000A
46 #define FLASH_ENTER_SIG 'P'
47 #define CCGX_RAB_VALIDATE_FW 0x000B
48 #define CCGX_RAB_FLASH_ROW_RW 0x000C
49 #define FLASH_SIG 'F'
50 #define FLASH_RD_CMD 0x0
51 #define FLASH_WR_CMD 0x1
52 #define FLASH_FWCT1_WR_CMD 0x2
53 #define FLASH_FWCT2_WR_CMD 0x3
54 #define FLASH_FWCT_SIG_WR_CMD 0x4
55 #define CCGX_RAB_READ_ALL_VER 0x0010
56 #define CCGX_RAB_READ_FW2_VER 0x0020
57 #define CCGX_RAB_UCSI_CONTROL 0x0039
58 #define CCGX_RAB_UCSI_CONTROL_START BIT(0)
59 #define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
60 #define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
61 #define REG_FLASH_RW_MEM 0x0200
62 #define DEV_REG_IDX CCGX_RAB_DEVICE_MODE
63 #define CCGX_RAB_PDPORT_ENABLE 0x002C
64 #define PDPORT_1 BIT(0)
65 #define PDPORT_2 BIT(1)
66 #define CCGX_RAB_RESPONSE 0x007E
67 #define ASYNC_EVENT BIT(7)
68
69 /* CCGx events & async msg codes */
70 #define RESET_COMPLETE 0x80
71 #define EVENT_INDEX RESET_COMPLETE
72 #define PORT_CONNECT_DET 0x84
73 #define PORT_DISCONNECT_DET 0x85
74 #define ROLE_SWAP_COMPELETE 0x87
75
76 /* ccg firmware */
77 #define CYACD_LINE_SIZE 527
78 #define CCG4_ROW_SIZE 256
79 #define FW1_METADATA_ROW 0x1FF
80 #define FW2_METADATA_ROW 0x1FE
81 #define FW_CFG_TABLE_SIG_SIZE 256
82
83 static int secondary_fw_min_ver = 41;
84
85 enum enum_flash_mode {
86 SECONDARY_BL, /* update secondary using bootloader */
87 PRIMARY, /* update primary using secondary */
88 SECONDARY, /* update secondary using primary */
89 FLASH_NOT_NEEDED, /* update not required */
90 FLASH_INVALID,
91 };
92
93 static const char * const ccg_fw_names[] = {
94 "ccg_boot.cyacd",
95 "ccg_primary.cyacd",
96 "ccg_secondary.cyacd"
97 };
98
99 struct ccg_dev_info {
100 #define CCG_DEVINFO_FWMODE_SHIFT (0)
101 #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
102 #define CCG_DEVINFO_PDPORTS_SHIFT (2)
103 #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
104 u8 mode;
105 u8 bl_mode;
106 __le16 silicon_id;
107 __le16 bl_last_row;
108 } __packed;
109
110 struct version_format {
111 __le16 build;
112 u8 patch;
113 u8 ver;
114 #define CCG_VERSION_PATCH(x) ((x) << 16)
115 #define CCG_VERSION(x) ((x) << 24)
116 #define CCG_VERSION_MIN_SHIFT (0)
117 #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
118 #define CCG_VERSION_MAJ_SHIFT (4)
119 #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
120 } __packed;
121
122 /*
123 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
124 * of missing interrupt when a device is connected for runtime resume
125 */
126 #define CCG_FW_BUILD_NVIDIA (('n' << 8) | 'v')
127 #define CCG_OLD_FW_VERSION (CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
128
129 /* Firmware for Tegra doesn't support UCSI ALT command, built
130 * for NVIDIA has known issue of reporting wrong capability info
131 */
132 #define CCG_FW_BUILD_NVIDIA_TEGRA (('g' << 8) | 'n')
133
134 /* Altmode offset for NVIDIA Function Test Board (FTB) */
135 #define NVIDIA_FTB_DP_OFFSET (2)
136 #define NVIDIA_FTB_DBG_OFFSET (3)
137
138 struct version_info {
139 struct version_format base;
140 struct version_format app;
141 };
142
143 struct fw_config_table {
144 u32 identity;
145 u16 table_size;
146 u8 fwct_version;
147 u8 is_key_change;
148 u8 guid[16];
149 struct version_format base;
150 struct version_format app;
151 u8 primary_fw_digest[32];
152 u32 key_exp_length;
153 u8 key_modulus[256];
154 u8 key_exp[4];
155 };
156
157 /* CCGx response codes */
158 enum ccg_resp_code {
159 CMD_NO_RESP = 0x00,
160 CMD_SUCCESS = 0x02,
161 FLASH_DATA_AVAILABLE = 0x03,
162 CMD_INVALID = 0x05,
163 FLASH_UPDATE_FAIL = 0x07,
164 INVALID_FW = 0x08,
165 INVALID_ARG = 0x09,
166 CMD_NOT_SUPPORT = 0x0A,
167 TRANSACTION_FAIL = 0x0C,
168 PD_CMD_FAIL = 0x0D,
169 UNDEF_ERROR = 0x0F,
170 INVALID_RESP = 0x10,
171 };
172
173 #define CCG_EVENT_MAX (EVENT_INDEX + 43)
174
175 struct ccg_cmd {
176 u16 reg;
177 u32 data;
178 int len;
179 u32 delay; /* ms delay for cmd timeout */
180 };
181
182 struct ccg_resp {
183 u8 code;
184 u8 length;
185 };
186
187 struct ucsi_ccg_altmode {
188 u16 svid;
189 u32 mid;
190 u8 linked_idx;
191 u8 active_idx;
192 #define UCSI_MULTI_DP_INDEX (0xff)
193 bool checked;
194 } __packed;
195
196 #define CCGX_MESSAGE_IN_MAX 4
197 struct op_region {
198 __le32 cci;
199 __le32 message_in[CCGX_MESSAGE_IN_MAX];
200 };
201
202 struct ucsi_ccg {
203 struct device *dev;
204 struct ucsi *ucsi;
205 struct i2c_client *client;
206
207 struct ccg_dev_info info;
208 /* version info for boot, primary and secondary */
209 struct version_info version[FW2 + 1];
210 u32 fw_version;
211 /* CCG HPI communication flags */
212 unsigned long flags;
213 #define RESET_PENDING 0
214 #define DEV_CMD_PENDING 1
215 struct ccg_resp dev_resp;
216 u8 cmd_resp;
217 int port_num;
218 int irq;
219 struct work_struct work;
220 struct mutex lock; /* to sync between user and driver thread */
221
222 /* fw build with vendor information */
223 u16 fw_build;
224 struct work_struct pm_work;
225
226 bool has_multiple_dp;
227 struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
228 struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
229
230 /*
231 * This spinlock protects op_data which includes CCI and MESSAGE_IN that
232 * will be updated in ISR
233 */
234 spinlock_t op_lock;
235 struct op_region op_data;
236 };
237
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)238 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
239 {
240 struct i2c_client *client = uc->client;
241 const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
242 unsigned char buf[2];
243 struct i2c_msg msgs[] = {
244 {
245 .addr = client->addr,
246 .flags = 0x0,
247 .len = sizeof(buf),
248 .buf = buf,
249 },
250 {
251 .addr = client->addr,
252 .flags = I2C_M_RD,
253 .buf = data,
254 },
255 };
256 u32 rlen, rem_len = len, max_read_len = len;
257 int status;
258
259 /* check any max_read_len limitation on i2c adapter */
260 if (quirks && quirks->max_read_len)
261 max_read_len = quirks->max_read_len;
262
263 pm_runtime_get_sync(uc->dev);
264 while (rem_len > 0) {
265 msgs[1].buf = &data[len - rem_len];
266 rlen = min_t(u16, rem_len, max_read_len);
267 msgs[1].len = rlen;
268 put_unaligned_le16(rab, buf);
269 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
270 if (status < 0) {
271 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
272 pm_runtime_put_sync(uc->dev);
273 return status;
274 }
275 rab += rlen;
276 rem_len -= rlen;
277 }
278
279 pm_runtime_put_sync(uc->dev);
280 return 0;
281 }
282
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)283 static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
284 {
285 struct i2c_client *client = uc->client;
286 unsigned char *buf;
287 struct i2c_msg msgs[] = {
288 {
289 .addr = client->addr,
290 .flags = 0x0,
291 }
292 };
293 int status;
294
295 buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
296 if (!buf)
297 return -ENOMEM;
298
299 put_unaligned_le16(rab, buf);
300 memcpy(buf + sizeof(rab), data, len);
301
302 msgs[0].len = len + sizeof(rab);
303 msgs[0].buf = buf;
304
305 pm_runtime_get_sync(uc->dev);
306 status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
307 if (status < 0) {
308 dev_err(uc->dev, "i2c_transfer failed %d\n", status);
309 pm_runtime_put_sync(uc->dev);
310 kfree(buf);
311 return status;
312 }
313
314 pm_runtime_put_sync(uc->dev);
315 kfree(buf);
316 return 0;
317 }
318
ccg_op_region_update(struct ucsi_ccg * uc,u32 cci)319 static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
320 {
321 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
322 struct op_region *data = &uc->op_data;
323 unsigned char *buf;
324 size_t size = sizeof(data->message_in);
325
326 buf = kzalloc(size, GFP_ATOMIC);
327 if (!buf)
328 return -ENOMEM;
329 if (UCSI_CCI_LENGTH(cci)) {
330 int ret = ccg_read(uc, reg, (void *)buf, size);
331
332 if (ret) {
333 kfree(buf);
334 return ret;
335 }
336 }
337
338 spin_lock(&uc->op_lock);
339 data->cci = cpu_to_le32(cci);
340 if (UCSI_CCI_LENGTH(cci))
341 memcpy(&data->message_in, buf, size);
342 spin_unlock(&uc->op_lock);
343 kfree(buf);
344 return 0;
345 }
346
ucsi_ccg_init(struct ucsi_ccg * uc)347 static int ucsi_ccg_init(struct ucsi_ccg *uc)
348 {
349 unsigned int count = 10;
350 u8 data;
351 int status;
352
353 spin_lock_init(&uc->op_lock);
354
355 data = CCGX_RAB_UCSI_CONTROL_STOP;
356 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
357 if (status < 0)
358 return status;
359
360 data = CCGX_RAB_UCSI_CONTROL_START;
361 status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
362 if (status < 0)
363 return status;
364
365 /*
366 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
367 * register write will push response which must be cleared.
368 */
369 do {
370 status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
371 if (status < 0)
372 return status;
373
374 if (!(data & DEV_INT))
375 return 0;
376
377 status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
378 if (status < 0)
379 return status;
380
381 usleep_range(10000, 11000);
382 } while (--count);
383
384 return -ETIMEDOUT;
385 }
386
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)387 static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
388 {
389 u8 cam, new_cam;
390
391 cam = data[0];
392 new_cam = uc->orig[cam].linked_idx;
393 uc->updated[new_cam].active_idx = cam;
394 data[0] = new_cam;
395 }
396
ucsi_ccg_update_altmodes(struct ucsi * ucsi,u8 recipient,struct ucsi_altmode * orig,struct ucsi_altmode * updated)397 static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
398 u8 recipient,
399 struct ucsi_altmode *orig,
400 struct ucsi_altmode *updated)
401 {
402 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
403 struct ucsi_ccg_altmode *alt, *new_alt;
404 int i, j, k = 0;
405 bool found = false;
406
407 if (recipient != UCSI_RECIPIENT_CON)
408 return false;
409
410 alt = uc->orig;
411 new_alt = uc->updated;
412 memset(uc->updated, 0, sizeof(uc->updated));
413
414 /*
415 * Copy original connector altmodes to new structure.
416 * We need this before second loop since second loop
417 * checks for duplicate altmodes.
418 */
419 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
420 alt[i].svid = orig[i].svid;
421 alt[i].mid = orig[i].mid;
422 if (!alt[i].svid)
423 break;
424 }
425
426 for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
427 if (!alt[i].svid)
428 break;
429
430 /* already checked and considered */
431 if (alt[i].checked)
432 continue;
433
434 if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
435 /* Found Non DP altmode */
436 new_alt[k].svid = alt[i].svid;
437 new_alt[k].mid |= alt[i].mid;
438 new_alt[k].linked_idx = i;
439 alt[i].linked_idx = k;
440 updated[k].svid = new_alt[k].svid;
441 updated[k].mid = new_alt[k].mid;
442 k++;
443 continue;
444 }
445
446 for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
447 if (alt[i].svid != alt[j].svid ||
448 !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
449 continue;
450 } else {
451 /* Found duplicate DP mode */
452 new_alt[k].svid = alt[i].svid;
453 new_alt[k].mid |= alt[i].mid | alt[j].mid;
454 new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
455 alt[i].linked_idx = k;
456 alt[j].linked_idx = k;
457 alt[j].checked = true;
458 found = true;
459 }
460 }
461 if (found) {
462 uc->has_multiple_dp = true;
463 } else {
464 /* Didn't find any duplicate DP altmode */
465 new_alt[k].svid = alt[i].svid;
466 new_alt[k].mid |= alt[i].mid;
467 new_alt[k].linked_idx = i;
468 alt[i].linked_idx = k;
469 }
470 updated[k].svid = new_alt[k].svid;
471 updated[k].mid = new_alt[k].mid;
472 k++;
473 }
474 return found;
475 }
476
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)477 static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
478 struct ucsi_connector *con,
479 u64 *cmd)
480 {
481 struct ucsi_ccg_altmode *new_port, *port;
482 struct typec_altmode *alt = NULL;
483 u8 new_cam, cam, pin;
484 bool enter_new_mode;
485 int i, j, k = 0xff;
486
487 port = uc->orig;
488 new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
489 if (new_cam >= ARRAY_SIZE(uc->updated))
490 return;
491 new_port = &uc->updated[new_cam];
492 cam = new_port->linked_idx;
493 enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
494
495 /*
496 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
497 * with multiple DP mode. Find out CAM for best pin assignment
498 * among all DP mode. Priorite pin E->D->C after making sure
499 * the partner supports that pin.
500 */
501 if (cam == UCSI_MULTI_DP_INDEX) {
502 if (enter_new_mode) {
503 for (i = 0; con->partner_altmode[i]; i++) {
504 alt = con->partner_altmode[i];
505 if (alt->svid == new_port->svid)
506 break;
507 }
508 /*
509 * alt will always be non NULL since this is
510 * UCSI_SET_NEW_CAM command and so there will be
511 * at least one con->partner_altmode[i] with svid
512 * matching with new_port->svid.
513 */
514 for (j = 0; port[j].svid; j++) {
515 pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
516 if (alt && port[j].svid == alt->svid &&
517 (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
518 /* prioritize pin E->D->C */
519 if (k == 0xff || (k != 0xff && pin >
520 DP_CONF_GET_PIN_ASSIGN(port[k].mid))
521 ) {
522 k = j;
523 }
524 }
525 }
526 cam = k;
527 new_port->active_idx = cam;
528 } else {
529 cam = new_port->active_idx;
530 }
531 }
532 *cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
533 *cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
534 }
535
536 /*
537 * Change the order of vdo values of NVIDIA test device FTB
538 * (Function Test Board) which reports altmode list with vdo=0x3
539 * first and then vdo=0x. Current logic to assign mode value is
540 * based on order in altmode list and it causes a mismatch of CON
541 * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
542 * first and then vdo=0x3
543 */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt,u64 command)544 static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
545 struct ucsi_altmode *alt,
546 u64 command)
547 {
548 switch (UCSI_ALTMODE_OFFSET(command)) {
549 case NVIDIA_FTB_DP_OFFSET:
550 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
551 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
552 DP_CAP_DP_SIGNALLING(0) | DP_CAP_USB |
553 DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
554 break;
555 case NVIDIA_FTB_DBG_OFFSET:
556 if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
557 alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
558 break;
559 default:
560 break;
561 }
562 }
563
ucsi_ccg_read_version(struct ucsi * ucsi,u16 * version)564 static int ucsi_ccg_read_version(struct ucsi *ucsi, u16 *version)
565 {
566 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
567 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_VERSION);
568
569 return ccg_read(uc, reg, (u8 *)version, sizeof(*version));
570 }
571
ucsi_ccg_read_cci(struct ucsi * ucsi,u32 * cci)572 static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
573 {
574 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
575
576 spin_lock(&uc->op_lock);
577 *cci = uc->op_data.cci;
578 spin_unlock(&uc->op_lock);
579
580 return 0;
581 }
582
ucsi_ccg_read_message_in(struct ucsi * ucsi,void * val,size_t val_len)583 static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
584 {
585 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
586
587 spin_lock(&uc->op_lock);
588 memcpy(val, uc->op_data.message_in, val_len);
589 spin_unlock(&uc->op_lock);
590
591 return 0;
592 }
593
ucsi_ccg_async_control(struct ucsi * ucsi,u64 command)594 static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
595 {
596 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
597 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CONTROL);
598
599 /*
600 * UCSI may read CCI instantly after async_control,
601 * clear CCI to avoid caller getting wrong data before we get CCI from ISR
602 */
603 spin_lock(&uc->op_lock);
604 uc->op_data.cci = 0;
605 spin_unlock(&uc->op_lock);
606
607 return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
608 }
609
ucsi_ccg_sync_control(struct ucsi * ucsi,u64 command,u32 * cci,void * data,size_t size)610 static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
611 void *data, size_t size)
612 {
613 struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
614 struct ucsi_connector *con;
615 int con_index;
616 int ret;
617
618 mutex_lock(&uc->lock);
619 pm_runtime_get_sync(uc->dev);
620
621 if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
622 uc->has_multiple_dp) {
623 con_index = (command >> 16) &
624 UCSI_CMD_CONNECTOR_MASK;
625 if (con_index == 0) {
626 ret = -EINVAL;
627 goto err_put;
628 }
629 con = &uc->ucsi->connector[con_index - 1];
630 ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
631 }
632
633 ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
634
635 switch (UCSI_COMMAND(command)) {
636 case UCSI_GET_CURRENT_CAM:
637 if (uc->has_multiple_dp)
638 ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
639 break;
640 case UCSI_GET_ALTERNATE_MODES:
641 if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
642 struct ucsi_altmode *alt = data;
643
644 if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
645 ucsi_ccg_nvidia_altmode(uc, alt, command);
646 }
647 break;
648 case UCSI_GET_CAPABILITY:
649 if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
650 struct ucsi_capability *cap = data;
651
652 cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
653 }
654 break;
655 default:
656 break;
657 }
658
659 err_put:
660 pm_runtime_put_sync(uc->dev);
661 mutex_unlock(&uc->lock);
662
663 return ret;
664 }
665
666 static const struct ucsi_operations ucsi_ccg_ops = {
667 .read_version = ucsi_ccg_read_version,
668 .read_cci = ucsi_ccg_read_cci,
669 .poll_cci = ucsi_ccg_read_cci,
670 .read_message_in = ucsi_ccg_read_message_in,
671 .sync_control = ucsi_ccg_sync_control,
672 .async_control = ucsi_ccg_async_control,
673 .update_altmodes = ucsi_ccg_update_altmodes
674 };
675
ccg_irq_handler(int irq,void * data)676 static irqreturn_t ccg_irq_handler(int irq, void *data)
677 {
678 u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
679 struct ucsi_ccg *uc = data;
680 u8 intr_reg;
681 u32 cci = 0;
682 int ret = 0;
683
684 ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
685 if (ret)
686 return ret;
687
688 if (!intr_reg)
689 return IRQ_HANDLED;
690 else if (!(intr_reg & UCSI_READ_INT))
691 goto err_clear_irq;
692
693 ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
694 if (ret)
695 goto err_clear_irq;
696
697 /*
698 * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
699 * to the OpRegion before clear the UCSI interrupt
700 */
701 ret = ccg_op_region_update(uc, cci);
702 if (ret)
703 goto err_clear_irq;
704
705 err_clear_irq:
706 ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
707
708 if (!ret)
709 ucsi_notify_common(uc->ucsi, cci);
710
711 return IRQ_HANDLED;
712 }
713
ccg_request_irq(struct ucsi_ccg * uc)714 static int ccg_request_irq(struct ucsi_ccg *uc)
715 {
716 unsigned long flags = IRQF_ONESHOT;
717
718 if (!dev_fwnode(uc->dev))
719 flags |= IRQF_TRIGGER_HIGH;
720
721 return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
722 }
723
ccg_pm_workaround_work(struct work_struct * pm_work)724 static void ccg_pm_workaround_work(struct work_struct *pm_work)
725 {
726 ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
727 }
728
get_fw_info(struct ucsi_ccg * uc)729 static int get_fw_info(struct ucsi_ccg *uc)
730 {
731 int err;
732
733 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
734 sizeof(uc->version));
735 if (err < 0)
736 return err;
737
738 uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
739 CCG_VERSION_PATCH(uc->version[FW2].app.patch);
740
741 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
742 sizeof(uc->info));
743 if (err < 0)
744 return err;
745
746 return 0;
747 }
748
invalid_async_evt(int code)749 static inline bool invalid_async_evt(int code)
750 {
751 return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
752 }
753
ccg_process_response(struct ucsi_ccg * uc)754 static void ccg_process_response(struct ucsi_ccg *uc)
755 {
756 struct device *dev = uc->dev;
757
758 if (uc->dev_resp.code & ASYNC_EVENT) {
759 if (uc->dev_resp.code == RESET_COMPLETE) {
760 if (test_bit(RESET_PENDING, &uc->flags))
761 uc->cmd_resp = uc->dev_resp.code;
762 get_fw_info(uc);
763 }
764 if (invalid_async_evt(uc->dev_resp.code))
765 dev_err(dev, "invalid async evt %d\n",
766 uc->dev_resp.code);
767 } else {
768 if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
769 uc->cmd_resp = uc->dev_resp.code;
770 clear_bit(DEV_CMD_PENDING, &uc->flags);
771 } else {
772 dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
773 uc->dev_resp.code);
774 }
775 }
776 }
777
ccg_read_response(struct ucsi_ccg * uc)778 static int ccg_read_response(struct ucsi_ccg *uc)
779 {
780 unsigned long target = jiffies + msecs_to_jiffies(1000);
781 struct device *dev = uc->dev;
782 u8 intval;
783 int status;
784
785 /* wait for interrupt status to get updated */
786 do {
787 status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
788 sizeof(intval));
789 if (status < 0)
790 return status;
791
792 if (intval & DEV_INT)
793 break;
794 usleep_range(500, 600);
795 } while (time_is_after_jiffies(target));
796
797 if (time_is_before_jiffies(target)) {
798 dev_err(dev, "response timeout error\n");
799 return -ETIME;
800 }
801
802 status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
803 sizeof(uc->dev_resp));
804 if (status < 0)
805 return status;
806
807 status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
808 if (status < 0)
809 return status;
810
811 return 0;
812 }
813
814 /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)815 static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
816 {
817 struct device *dev = uc->dev;
818 int ret;
819
820 switch (cmd->reg & 0xF000) {
821 case DEV_REG_IDX:
822 set_bit(DEV_CMD_PENDING, &uc->flags);
823 break;
824 default:
825 dev_err(dev, "invalid cmd register\n");
826 break;
827 }
828
829 ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
830 if (ret < 0)
831 return ret;
832
833 msleep(cmd->delay);
834
835 ret = ccg_read_response(uc);
836 if (ret < 0) {
837 dev_err(dev, "response read error\n");
838 switch (cmd->reg & 0xF000) {
839 case DEV_REG_IDX:
840 clear_bit(DEV_CMD_PENDING, &uc->flags);
841 break;
842 default:
843 dev_err(dev, "invalid cmd register\n");
844 break;
845 }
846 return -EIO;
847 }
848 ccg_process_response(uc);
849
850 return uc->cmd_resp;
851 }
852
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)853 static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
854 {
855 struct ccg_cmd cmd;
856 int ret;
857
858 cmd.reg = CCGX_RAB_ENTER_FLASHING;
859 cmd.data = FLASH_ENTER_SIG;
860 cmd.len = 1;
861 cmd.delay = 50;
862
863 mutex_lock(&uc->lock);
864
865 ret = ccg_send_command(uc, &cmd);
866
867 mutex_unlock(&uc->lock);
868
869 if (ret != CMD_SUCCESS) {
870 dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
871 return ret;
872 }
873
874 return 0;
875 }
876
ccg_cmd_reset(struct ucsi_ccg * uc)877 static int ccg_cmd_reset(struct ucsi_ccg *uc)
878 {
879 struct ccg_cmd cmd;
880 u8 *p;
881 int ret;
882
883 p = (u8 *)&cmd.data;
884 cmd.reg = CCGX_RAB_RESET_REQ;
885 p[0] = RESET_SIG;
886 p[1] = CMD_RESET_DEV;
887 cmd.len = 2;
888 cmd.delay = 5000;
889
890 mutex_lock(&uc->lock);
891
892 set_bit(RESET_PENDING, &uc->flags);
893
894 ret = ccg_send_command(uc, &cmd);
895 if (ret != RESET_COMPLETE)
896 goto err_clear_flag;
897
898 ret = 0;
899
900 err_clear_flag:
901 clear_bit(RESET_PENDING, &uc->flags);
902
903 mutex_unlock(&uc->lock);
904
905 return ret;
906 }
907
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)908 static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
909 {
910 struct ccg_cmd cmd;
911 int ret;
912
913 cmd.reg = CCGX_RAB_PDPORT_ENABLE;
914 if (enable)
915 cmd.data = (uc->port_num == 1) ?
916 PDPORT_1 : (PDPORT_1 | PDPORT_2);
917 else
918 cmd.data = 0x0;
919 cmd.len = 1;
920 cmd.delay = 10;
921
922 mutex_lock(&uc->lock);
923
924 ret = ccg_send_command(uc, &cmd);
925
926 mutex_unlock(&uc->lock);
927
928 if (ret != CMD_SUCCESS) {
929 dev_err(uc->dev, "port control failed ret=%d\n", ret);
930 return ret;
931 }
932 return 0;
933 }
934
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)935 static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
936 {
937 struct ccg_cmd cmd;
938 int ret;
939
940 cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
941
942 if (bl_mode)
943 cmd.data = TO_BOOT;
944 else
945 cmd.data = TO_ALT_FW;
946
947 cmd.len = 1;
948 cmd.delay = 100;
949
950 mutex_lock(&uc->lock);
951
952 set_bit(RESET_PENDING, &uc->flags);
953
954 ret = ccg_send_command(uc, &cmd);
955 if (ret != RESET_COMPLETE)
956 goto err_clear_flag;
957
958 ret = 0;
959
960 err_clear_flag:
961 clear_bit(RESET_PENDING, &uc->flags);
962
963 mutex_unlock(&uc->lock);
964
965 return ret;
966 }
967
968 static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)969 ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
970 const void *data, u8 fcmd)
971 {
972 struct i2c_client *client = uc->client;
973 struct ccg_cmd cmd;
974 u8 buf[CCG4_ROW_SIZE + 2];
975 u8 *p;
976 int ret;
977
978 /* Copy the data into the flash read/write memory. */
979 put_unaligned_le16(REG_FLASH_RW_MEM, buf);
980
981 memcpy(buf + 2, data, CCG4_ROW_SIZE);
982
983 mutex_lock(&uc->lock);
984
985 ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
986 if (ret != CCG4_ROW_SIZE + 2) {
987 dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
988 mutex_unlock(&uc->lock);
989 return ret < 0 ? ret : -EIO;
990 }
991
992 /* Use the FLASH_ROW_READ_WRITE register to trigger */
993 /* writing of data to the desired flash row */
994 p = (u8 *)&cmd.data;
995 cmd.reg = CCGX_RAB_FLASH_ROW_RW;
996 p[0] = FLASH_SIG;
997 p[1] = fcmd;
998 put_unaligned_le16(row, &p[2]);
999 cmd.len = 4;
1000 cmd.delay = 50;
1001 if (fcmd == FLASH_FWCT_SIG_WR_CMD)
1002 cmd.delay += 400;
1003 if (row == 510)
1004 cmd.delay += 220;
1005 ret = ccg_send_command(uc, &cmd);
1006
1007 mutex_unlock(&uc->lock);
1008
1009 if (ret != CMD_SUCCESS) {
1010 dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
1011 return ret;
1012 }
1013
1014 return 0;
1015 }
1016
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)1017 static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
1018 {
1019 struct ccg_cmd cmd;
1020 int ret;
1021
1022 cmd.reg = CCGX_RAB_VALIDATE_FW;
1023 cmd.data = fwid;
1024 cmd.len = 1;
1025 cmd.delay = 500;
1026
1027 mutex_lock(&uc->lock);
1028
1029 ret = ccg_send_command(uc, &cmd);
1030
1031 mutex_unlock(&uc->lock);
1032
1033 if (ret != CMD_SUCCESS)
1034 return ret;
1035
1036 return 0;
1037 }
1038
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)1039 static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
1040 struct version_format *app,
1041 struct fw_config_table *fw_cfg)
1042 {
1043 struct device *dev = uc->dev;
1044
1045 /* Check if the fw build is for supported vendors */
1046 if (le16_to_cpu(app->build) != uc->fw_build) {
1047 dev_info(dev, "current fw is not from supported vendor\n");
1048 return false;
1049 }
1050
1051 /* Check if the new fw build is for supported vendors */
1052 if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
1053 dev_info(dev, "new fw is not from supported vendor\n");
1054 return false;
1055 }
1056 return true;
1057 }
1058
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)1059 static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
1060 struct version_format *app)
1061 {
1062 const struct firmware *fw = NULL;
1063 struct device *dev = uc->dev;
1064 struct fw_config_table fw_cfg;
1065 u32 cur_version, new_version;
1066 bool is_later = false;
1067
1068 if (request_firmware(&fw, fw_name, dev) != 0) {
1069 dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
1070 return false;
1071 }
1072
1073 /*
1074 * check if signed fw
1075 * last part of fw image is fw cfg table and signature
1076 */
1077 if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
1078 goto out_release_firmware;
1079
1080 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1081 sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
1082
1083 if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
1084 dev_info(dev, "not a signed image\n");
1085 goto out_release_firmware;
1086 }
1087
1088 /* compare input version with FWCT version */
1089 cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
1090 CCG_VERSION(app->ver);
1091
1092 new_version = le16_to_cpu(fw_cfg.app.build) |
1093 CCG_VERSION_PATCH(fw_cfg.app.patch) |
1094 CCG_VERSION(fw_cfg.app.ver);
1095
1096 if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1097 goto out_release_firmware;
1098
1099 if (new_version > cur_version)
1100 is_later = true;
1101
1102 out_release_firmware:
1103 release_firmware(fw);
1104 return is_later;
1105 }
1106
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1107 static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1108 enum enum_flash_mode *mode)
1109 {
1110 struct device *dev = uc->dev;
1111 int err;
1112 struct version_info version[3];
1113
1114 err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1115 sizeof(uc->info));
1116 if (err) {
1117 dev_err(dev, "read device mode failed\n");
1118 return err;
1119 }
1120
1121 err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1122 sizeof(version));
1123 if (err) {
1124 dev_err(dev, "read device mode failed\n");
1125 return err;
1126 }
1127
1128 if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1129 sizeof(struct version_info)) == 0) {
1130 dev_info(dev, "secondary fw is not flashed\n");
1131 *mode = SECONDARY_BL;
1132 } else if (le16_to_cpu(version[FW1].base.build) <
1133 secondary_fw_min_ver) {
1134 dev_info(dev, "secondary fw version is too low (< %d)\n",
1135 secondary_fw_min_ver);
1136 *mode = SECONDARY;
1137 } else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1138 sizeof(struct version_info)) == 0) {
1139 dev_info(dev, "primary fw is not flashed\n");
1140 *mode = PRIMARY;
1141 } else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1142 &version[FW2].app)) {
1143 dev_info(dev, "found primary fw with later version\n");
1144 *mode = PRIMARY;
1145 } else {
1146 dev_info(dev, "secondary and primary fw are the latest\n");
1147 *mode = FLASH_NOT_NEEDED;
1148 }
1149 return 0;
1150 }
1151
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1152 static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1153 {
1154 struct device *dev = uc->dev;
1155 const struct firmware *fw = NULL;
1156 const char *p, *s;
1157 const char *eof;
1158 int err, row, len, line_sz, line_cnt = 0;
1159 unsigned long start_time = jiffies;
1160 struct fw_config_table fw_cfg;
1161 u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1162 u8 *wr_buf;
1163
1164 err = request_firmware(&fw, ccg_fw_names[mode], dev);
1165 if (err) {
1166 dev_err(dev, "request %s failed err=%d\n",
1167 ccg_fw_names[mode], err);
1168 return err;
1169 }
1170
1171 if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1172 CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1173 err = ccg_cmd_port_control(uc, false);
1174 if (err < 0)
1175 goto release_fw;
1176 err = ccg_cmd_jump_boot_mode(uc, 0);
1177 if (err < 0)
1178 goto release_fw;
1179 }
1180
1181 eof = fw->data + fw->size;
1182
1183 /*
1184 * check if signed fw
1185 * last part of fw image is fw cfg table and signature
1186 */
1187 if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1188 goto not_signed_fw;
1189
1190 memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1191 sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1192
1193 if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1194 dev_info(dev, "not a signed image\n");
1195 goto not_signed_fw;
1196 }
1197 eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1198
1199 memcpy((uint8_t *)&fw_cfg_sig,
1200 fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1201
1202 /* flash fw config table and signature first */
1203 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1204 FLASH_FWCT1_WR_CMD);
1205 if (err)
1206 goto release_fw;
1207
1208 err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1209 FLASH_FWCT2_WR_CMD);
1210 if (err)
1211 goto release_fw;
1212
1213 err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1214 FLASH_FWCT_SIG_WR_CMD);
1215 if (err)
1216 goto release_fw;
1217
1218 not_signed_fw:
1219 wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1220 if (!wr_buf) {
1221 err = -ENOMEM;
1222 goto release_fw;
1223 }
1224
1225 err = ccg_cmd_enter_flashing(uc);
1226 if (err)
1227 goto release_mem;
1228
1229 /*****************************************************************
1230 * CCG firmware image (.cyacd) file line format
1231 *
1232 * :00rrrrllll[dd....]cc/r/n
1233 *
1234 * :00 header
1235 * rrrr is row number to flash (4 char)
1236 * llll is data len to flash (4 char)
1237 * dd is a data field represents one byte of data (512 char)
1238 * cc is checksum (2 char)
1239 * \r\n newline
1240 *
1241 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1242 *
1243 *****************************************************************/
1244
1245 p = strnchr(fw->data, fw->size, ':');
1246 while (p < eof) {
1247 s = strnchr(p + 1, eof - p - 1, ':');
1248
1249 if (!s)
1250 s = eof;
1251
1252 line_sz = s - p;
1253
1254 if (line_sz != CYACD_LINE_SIZE) {
1255 dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1256 err = -EINVAL;
1257 goto release_mem;
1258 }
1259
1260 if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1261 err = -EINVAL;
1262 goto release_mem;
1263 }
1264
1265 row = get_unaligned_be16(wr_buf);
1266 len = get_unaligned_be16(&wr_buf[2]);
1267
1268 if (len != CCG4_ROW_SIZE) {
1269 err = -EINVAL;
1270 goto release_mem;
1271 }
1272
1273 err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1274 FLASH_WR_CMD);
1275 if (err)
1276 goto release_mem;
1277
1278 line_cnt++;
1279 p = s;
1280 }
1281
1282 dev_info(dev, "total %d row flashed. time: %dms\n",
1283 line_cnt, jiffies_to_msecs(jiffies - start_time));
1284
1285 err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 : FW1);
1286 if (err)
1287 dev_err(dev, "%s validation failed err=%d\n",
1288 (mode == PRIMARY) ? "FW2" : "FW1", err);
1289 else
1290 dev_info(dev, "%s validated\n",
1291 (mode == PRIMARY) ? "FW2" : "FW1");
1292
1293 err = ccg_cmd_port_control(uc, false);
1294 if (err < 0)
1295 goto release_mem;
1296
1297 err = ccg_cmd_reset(uc);
1298 if (err < 0)
1299 goto release_mem;
1300
1301 err = ccg_cmd_port_control(uc, true);
1302 if (err < 0)
1303 goto release_mem;
1304
1305 release_mem:
1306 kfree(wr_buf);
1307
1308 release_fw:
1309 release_firmware(fw);
1310 return err;
1311 }
1312
1313 /*******************************************************************************
1314 * CCG4 has two copies of the firmware in addition to the bootloader.
1315 * If the device is running FW1, FW2 can be updated with the new version.
1316 * Dual firmware mode allows the CCG device to stay in a PD contract and support
1317 * USB PD and Type-C functionality while a firmware update is in progress.
1318 ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1319 static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1320 {
1321 int err = 0;
1322
1323 while (flash_mode != FLASH_NOT_NEEDED) {
1324 err = do_flash(uc, flash_mode);
1325 if (err < 0)
1326 return err;
1327 err = ccg_fw_update_needed(uc, &flash_mode);
1328 if (err < 0)
1329 return err;
1330 }
1331 dev_info(uc->dev, "CCG FW update successful\n");
1332
1333 return err;
1334 }
1335
ccg_restart(struct ucsi_ccg * uc)1336 static int ccg_restart(struct ucsi_ccg *uc)
1337 {
1338 struct device *dev = uc->dev;
1339 int status;
1340
1341 status = ucsi_ccg_init(uc);
1342 if (status < 0) {
1343 dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1344 return status;
1345 }
1346
1347 status = ccg_request_irq(uc);
1348 if (status < 0) {
1349 dev_err(dev, "request_threaded_irq failed - %d\n", status);
1350 return status;
1351 }
1352
1353 status = ucsi_register(uc->ucsi);
1354 if (status) {
1355 dev_err(uc->dev, "failed to register the interface\n");
1356 return status;
1357 }
1358
1359 pm_runtime_enable(uc->dev);
1360 return 0;
1361 }
1362
ccg_update_firmware(struct work_struct * work)1363 static void ccg_update_firmware(struct work_struct *work)
1364 {
1365 struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1366 enum enum_flash_mode flash_mode;
1367 int status;
1368
1369 status = ccg_fw_update_needed(uc, &flash_mode);
1370 if (status < 0)
1371 return;
1372
1373 if (flash_mode != FLASH_NOT_NEEDED) {
1374 ucsi_unregister(uc->ucsi);
1375 pm_runtime_disable(uc->dev);
1376 free_irq(uc->irq, uc);
1377
1378 ccg_fw_update(uc, flash_mode);
1379 ccg_restart(uc);
1380 }
1381 }
1382
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1383 static ssize_t do_flash_store(struct device *dev,
1384 struct device_attribute *attr,
1385 const char *buf, size_t n)
1386 {
1387 struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1388 bool flash;
1389
1390 if (kstrtobool(buf, &flash))
1391 return -EINVAL;
1392
1393 if (!flash)
1394 return n;
1395
1396 schedule_work(&uc->work);
1397 return n;
1398 }
1399
ucsi_ccg_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1400 static umode_t ucsi_ccg_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
1401 {
1402 struct device *dev = kobj_to_dev(kobj);
1403 struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1404
1405 if (!uc->fw_build)
1406 return 0;
1407
1408 return attr->mode;
1409 }
1410
1411 static DEVICE_ATTR_WO(do_flash);
1412
1413 static struct attribute *ucsi_ccg_attrs[] = {
1414 &dev_attr_do_flash.attr,
1415 NULL,
1416 };
1417 static struct attribute_group ucsi_ccg_attr_group = {
1418 .attrs = ucsi_ccg_attrs,
1419 .is_visible = ucsi_ccg_attrs_is_visible,
1420 };
1421 static const struct attribute_group *ucsi_ccg_groups[] = {
1422 &ucsi_ccg_attr_group,
1423 NULL,
1424 };
1425
ucsi_ccg_probe(struct i2c_client * client)1426 static int ucsi_ccg_probe(struct i2c_client *client)
1427 {
1428 struct device *dev = &client->dev;
1429 struct ucsi_ccg *uc;
1430 const char *fw_name;
1431 int status;
1432
1433 uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1434 if (!uc)
1435 return -ENOMEM;
1436
1437 uc->dev = dev;
1438 uc->client = client;
1439 uc->irq = client->irq;
1440 mutex_init(&uc->lock);
1441 INIT_WORK(&uc->work, ccg_update_firmware);
1442 INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1443
1444 /* Only fail FW flashing when FW build information is not provided */
1445 status = device_property_read_string(dev, "firmware-name", &fw_name);
1446 if (!status) {
1447 if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
1448 uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
1449 else if (!strcmp(fw_name, "nvidia,gpu"))
1450 uc->fw_build = CCG_FW_BUILD_NVIDIA;
1451 if (!uc->fw_build)
1452 dev_err(uc->dev, "failed to get FW build information\n");
1453 }
1454
1455 /* reset ccg device and initialize ucsi */
1456 status = ucsi_ccg_init(uc);
1457 if (status < 0) {
1458 dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1459 return status;
1460 }
1461
1462 status = get_fw_info(uc);
1463 if (status < 0) {
1464 dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1465 return status;
1466 }
1467
1468 uc->port_num = 1;
1469
1470 if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1471 uc->port_num++;
1472
1473 uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1474 if (IS_ERR(uc->ucsi))
1475 return PTR_ERR(uc->ucsi);
1476
1477 ucsi_set_drvdata(uc->ucsi, uc);
1478
1479 status = ccg_request_irq(uc);
1480 if (status < 0) {
1481 dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1482 goto out_ucsi_destroy;
1483 }
1484
1485 status = ucsi_register(uc->ucsi);
1486 if (status)
1487 goto out_free_irq;
1488
1489 i2c_set_clientdata(client, uc);
1490
1491 device_disable_async_suspend(uc->dev);
1492
1493 pm_runtime_set_active(uc->dev);
1494 pm_runtime_enable(uc->dev);
1495 pm_runtime_use_autosuspend(uc->dev);
1496 pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1497 pm_runtime_idle(uc->dev);
1498
1499 return 0;
1500
1501 out_free_irq:
1502 free_irq(uc->irq, uc);
1503 out_ucsi_destroy:
1504 ucsi_destroy(uc->ucsi);
1505
1506 return status;
1507 }
1508
ucsi_ccg_remove(struct i2c_client * client)1509 static void ucsi_ccg_remove(struct i2c_client *client)
1510 {
1511 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1512
1513 cancel_work_sync(&uc->pm_work);
1514 cancel_work_sync(&uc->work);
1515 pm_runtime_disable(uc->dev);
1516 ucsi_unregister(uc->ucsi);
1517 ucsi_destroy(uc->ucsi);
1518 free_irq(uc->irq, uc);
1519 }
1520
1521 static const struct of_device_id ucsi_ccg_of_match_table[] = {
1522 { .compatible = "cypress,cypd4226", },
1523 { /* sentinel */ }
1524 };
1525 MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
1526
1527 static const struct i2c_device_id ucsi_ccg_device_id[] = {
1528 { "ccgx-ucsi" },
1529 {}
1530 };
1531 MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1532
1533 static const struct acpi_device_id amd_i2c_ucsi_match[] = {
1534 {"AMDI0042"},
1535 {}
1536 };
1537 MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
1538
ucsi_ccg_resume(struct device * dev)1539 static int ucsi_ccg_resume(struct device *dev)
1540 {
1541 struct i2c_client *client = to_i2c_client(dev);
1542 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1543
1544 return ucsi_resume(uc->ucsi);
1545 }
1546
ucsi_ccg_runtime_suspend(struct device * dev)1547 static int ucsi_ccg_runtime_suspend(struct device *dev)
1548 {
1549 return 0;
1550 }
1551
ucsi_ccg_runtime_resume(struct device * dev)1552 static int ucsi_ccg_runtime_resume(struct device *dev)
1553 {
1554 struct i2c_client *client = to_i2c_client(dev);
1555 struct ucsi_ccg *uc = i2c_get_clientdata(client);
1556
1557 /*
1558 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1559 * of missing interrupt when a device is connected for runtime resume.
1560 * Schedule a work to call ISR as a workaround.
1561 */
1562 if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1563 uc->fw_version <= CCG_OLD_FW_VERSION)
1564 schedule_work(&uc->pm_work);
1565
1566 return 0;
1567 }
1568
1569 static const struct dev_pm_ops ucsi_ccg_pm = {
1570 .resume = ucsi_ccg_resume,
1571 .runtime_suspend = ucsi_ccg_runtime_suspend,
1572 .runtime_resume = ucsi_ccg_runtime_resume,
1573 };
1574
1575 static struct i2c_driver ucsi_ccg_driver = {
1576 .driver = {
1577 .name = "ucsi_ccg",
1578 .pm = &ucsi_ccg_pm,
1579 .dev_groups = ucsi_ccg_groups,
1580 .acpi_match_table = amd_i2c_ucsi_match,
1581 .of_match_table = ucsi_ccg_of_match_table,
1582 },
1583 .probe = ucsi_ccg_probe,
1584 .remove = ucsi_ccg_remove,
1585 .id_table = ucsi_ccg_device_id,
1586 };
1587
1588 module_i2c_driver(ucsi_ccg_driver);
1589
1590 MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1591 MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1592 MODULE_LICENSE("GPL v2");
1593