xref: /linux/drivers/usb/typec/ucsi/ucsi_ccg.c (revision c89756bcf406af313d191cfe3709e7c175c5b0cd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * UCSI driver for Cypress CCGx Type-C controller
4  *
5  * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
6  * Author: Ajay Gupta <ajayg@nvidia.com>
7  *
8  * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
9  */
10 #include <linux/acpi.h>
11 #include <linux/delay.h>
12 #include <linux/firmware.h>
13 #include <linux/i2c.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/usb/typec_dp.h>
20 
21 #include <linux/unaligned.h>
22 #include "ucsi.h"
23 
24 enum enum_fw_mode {
25 	BOOT,   /* bootloader */
26 	FW1,    /* FW partition-1 (contains secondary fw) */
27 	FW2,    /* FW partition-2 (contains primary fw) */
28 	FW_INVALID,
29 };
30 
31 #define CCGX_RAB_DEVICE_MODE			0x0000
32 #define CCGX_RAB_INTR_REG			0x0006
33 #define  DEV_INT				BIT(0)
34 #define  PORT0_INT				BIT(1)
35 #define  PORT1_INT				BIT(2)
36 #define  UCSI_READ_INT				BIT(7)
37 #define CCGX_RAB_JUMP_TO_BOOT			0x0007
38 #define  TO_BOOT				'J'
39 #define  TO_ALT_FW				'A'
40 #define CCGX_RAB_RESET_REQ			0x0008
41 #define  RESET_SIG				'R'
42 #define  CMD_RESET_I2C				0x0
43 #define  CMD_RESET_DEV				0x1
44 #define CCGX_RAB_ENTER_FLASHING			0x000A
45 #define  FLASH_ENTER_SIG			'P'
46 #define CCGX_RAB_VALIDATE_FW			0x000B
47 #define CCGX_RAB_FLASH_ROW_RW			0x000C
48 #define  FLASH_SIG				'F'
49 #define  FLASH_RD_CMD				0x0
50 #define  FLASH_WR_CMD				0x1
51 #define  FLASH_FWCT1_WR_CMD			0x2
52 #define  FLASH_FWCT2_WR_CMD			0x3
53 #define  FLASH_FWCT_SIG_WR_CMD			0x4
54 #define CCGX_RAB_READ_ALL_VER			0x0010
55 #define CCGX_RAB_READ_FW2_VER			0x0020
56 #define CCGX_RAB_UCSI_CONTROL			0x0039
57 #define CCGX_RAB_UCSI_CONTROL_START		BIT(0)
58 #define CCGX_RAB_UCSI_CONTROL_STOP		BIT(1)
59 #define CCGX_RAB_UCSI_DATA_BLOCK(offset)	(0xf000 | ((offset) & 0xff))
60 #define REG_FLASH_RW_MEM        0x0200
61 #define DEV_REG_IDX				CCGX_RAB_DEVICE_MODE
62 #define CCGX_RAB_PDPORT_ENABLE			0x002C
63 #define  PDPORT_1		BIT(0)
64 #define  PDPORT_2		BIT(1)
65 #define CCGX_RAB_RESPONSE			0x007E
66 #define  ASYNC_EVENT				BIT(7)
67 
68 /* CCGx events & async msg codes */
69 #define RESET_COMPLETE		0x80
70 #define EVENT_INDEX		RESET_COMPLETE
71 #define PORT_CONNECT_DET	0x84
72 #define PORT_DISCONNECT_DET	0x85
73 #define ROLE_SWAP_COMPELETE	0x87
74 
75 /* ccg firmware */
76 #define CYACD_LINE_SIZE         527
77 #define CCG4_ROW_SIZE           256
78 #define FW1_METADATA_ROW        0x1FF
79 #define FW2_METADATA_ROW        0x1FE
80 #define FW_CFG_TABLE_SIG_SIZE	256
81 
82 static int secondary_fw_min_ver = 41;
83 
84 enum enum_flash_mode {
85 	SECONDARY_BL,	/* update secondary using bootloader */
86 	PRIMARY,	/* update primary using secondary */
87 	SECONDARY,	/* update secondary using primary */
88 	FLASH_NOT_NEEDED,	/* update not required */
89 	FLASH_INVALID,
90 };
91 
92 static const char * const ccg_fw_names[] = {
93 	"ccg_boot.cyacd",
94 	"ccg_primary.cyacd",
95 	"ccg_secondary.cyacd"
96 };
97 
98 struct ccg_dev_info {
99 #define CCG_DEVINFO_FWMODE_SHIFT (0)
100 #define CCG_DEVINFO_FWMODE_MASK (0x3 << CCG_DEVINFO_FWMODE_SHIFT)
101 #define CCG_DEVINFO_PDPORTS_SHIFT (2)
102 #define CCG_DEVINFO_PDPORTS_MASK (0x3 << CCG_DEVINFO_PDPORTS_SHIFT)
103 	u8 mode;
104 	u8 bl_mode;
105 	__le16 silicon_id;
106 	__le16 bl_last_row;
107 } __packed;
108 
109 struct version_format {
110 	__le16 build;
111 	u8 patch;
112 	u8 ver;
113 #define CCG_VERSION_PATCH(x) ((x) << 16)
114 #define CCG_VERSION(x)	((x) << 24)
115 #define CCG_VERSION_MIN_SHIFT (0)
116 #define CCG_VERSION_MIN_MASK (0xf << CCG_VERSION_MIN_SHIFT)
117 #define CCG_VERSION_MAJ_SHIFT (4)
118 #define CCG_VERSION_MAJ_MASK (0xf << CCG_VERSION_MAJ_SHIFT)
119 } __packed;
120 
121 /*
122  * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
123  * of missing interrupt when a device is connected for runtime resume
124  */
125 #define CCG_FW_BUILD_NVIDIA	(('n' << 8) | 'v')
126 #define CCG_OLD_FW_VERSION	(CCG_VERSION(0x31) | CCG_VERSION_PATCH(10))
127 
128 /* Firmware for Tegra doesn't support UCSI ALT command, built
129  * for NVIDIA has known issue of reporting wrong capability info
130  */
131 #define CCG_FW_BUILD_NVIDIA_TEGRA	(('g' << 8) | 'n')
132 
133 /* Altmode offset for NVIDIA Function Test Board (FTB) */
134 #define NVIDIA_FTB_DP_OFFSET	(2)
135 #define NVIDIA_FTB_DBG_OFFSET	(3)
136 
137 struct version_info {
138 	struct version_format base;
139 	struct version_format app;
140 };
141 
142 struct fw_config_table {
143 	u32 identity;
144 	u16 table_size;
145 	u8 fwct_version;
146 	u8 is_key_change;
147 	u8 guid[16];
148 	struct version_format base;
149 	struct version_format app;
150 	u8 primary_fw_digest[32];
151 	u32 key_exp_length;
152 	u8 key_modulus[256];
153 	u8 key_exp[4];
154 };
155 
156 /* CCGx response codes */
157 enum ccg_resp_code {
158 	CMD_NO_RESP             = 0x00,
159 	CMD_SUCCESS             = 0x02,
160 	FLASH_DATA_AVAILABLE    = 0x03,
161 	CMD_INVALID             = 0x05,
162 	FLASH_UPDATE_FAIL       = 0x07,
163 	INVALID_FW              = 0x08,
164 	INVALID_ARG             = 0x09,
165 	CMD_NOT_SUPPORT         = 0x0A,
166 	TRANSACTION_FAIL        = 0x0C,
167 	PD_CMD_FAIL             = 0x0D,
168 	UNDEF_ERROR             = 0x0F,
169 	INVALID_RESP		= 0x10,
170 };
171 
172 #define CCG_EVENT_MAX	(EVENT_INDEX + 43)
173 
174 struct ccg_cmd {
175 	u16 reg;
176 	u32 data;
177 	int len;
178 	u32 delay; /* ms delay for cmd timeout  */
179 };
180 
181 struct ccg_resp {
182 	u8 code;
183 	u8 length;
184 };
185 
186 struct ucsi_ccg_altmode {
187 	u16 svid;
188 	u32 mid;
189 	u8 linked_idx;
190 	u8 active_idx;
191 #define UCSI_MULTI_DP_INDEX	(0xff)
192 	bool checked;
193 } __packed;
194 
195 #define CCGX_MESSAGE_IN_MAX 4
196 struct op_region {
197 	__le32 cci;
198 	__le32 message_in[CCGX_MESSAGE_IN_MAX];
199 };
200 
201 struct ucsi_ccg {
202 	struct device *dev;
203 	struct ucsi *ucsi;
204 	struct i2c_client *client;
205 
206 	struct ccg_dev_info info;
207 	/* version info for boot, primary and secondary */
208 	struct version_info version[FW2 + 1];
209 	u32 fw_version;
210 	/* CCG HPI communication flags */
211 	unsigned long flags;
212 #define RESET_PENDING	0
213 #define DEV_CMD_PENDING	1
214 	struct ccg_resp dev_resp;
215 	u8 cmd_resp;
216 	int port_num;
217 	int irq;
218 	struct work_struct work;
219 	struct mutex lock; /* to sync between user and driver thread */
220 
221 	/* fw build with vendor information */
222 	u16 fw_build;
223 	struct work_struct pm_work;
224 
225 	bool has_multiple_dp;
226 	struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
227 	struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
228 
229 	/*
230 	 * This spinlock protects op_data which includes CCI and MESSAGE_IN that
231 	 * will be updated in ISR
232 	 */
233 	spinlock_t op_lock;
234 	struct op_region op_data;
235 };
236 
ccg_read(struct ucsi_ccg * uc,u16 rab,u8 * data,u32 len)237 static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
238 {
239 	struct i2c_client *client = uc->client;
240 	const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
241 	unsigned char buf[2];
242 	struct i2c_msg msgs[] = {
243 		{
244 			.addr	= client->addr,
245 			.flags  = 0x0,
246 			.len	= sizeof(buf),
247 			.buf	= buf,
248 		},
249 		{
250 			.addr	= client->addr,
251 			.flags  = I2C_M_RD,
252 			.buf	= data,
253 		},
254 	};
255 	u32 rlen, rem_len = len, max_read_len = len;
256 	int status;
257 
258 	/* check any max_read_len limitation on i2c adapter */
259 	if (quirks && quirks->max_read_len)
260 		max_read_len = quirks->max_read_len;
261 
262 	pm_runtime_get_sync(uc->dev);
263 	while (rem_len > 0) {
264 		msgs[1].buf = &data[len - rem_len];
265 		rlen = min_t(u16, rem_len, max_read_len);
266 		msgs[1].len = rlen;
267 		put_unaligned_le16(rab, buf);
268 		status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
269 		if (status < 0) {
270 			dev_err(uc->dev, "i2c_transfer failed %d\n", status);
271 			pm_runtime_put_sync(uc->dev);
272 			return status;
273 		}
274 		rab += rlen;
275 		rem_len -= rlen;
276 	}
277 
278 	pm_runtime_put_sync(uc->dev);
279 	return 0;
280 }
281 
ccg_write(struct ucsi_ccg * uc,u16 rab,const u8 * data,u32 len)282 static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
283 {
284 	struct i2c_client *client = uc->client;
285 	unsigned char *buf;
286 	struct i2c_msg msgs[] = {
287 		{
288 			.addr	= client->addr,
289 			.flags  = 0x0,
290 		}
291 	};
292 	int status;
293 
294 	buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
295 	if (!buf)
296 		return -ENOMEM;
297 
298 	put_unaligned_le16(rab, buf);
299 	memcpy(buf + sizeof(rab), data, len);
300 
301 	msgs[0].len = len + sizeof(rab);
302 	msgs[0].buf = buf;
303 
304 	pm_runtime_get_sync(uc->dev);
305 	status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
306 	if (status < 0) {
307 		dev_err(uc->dev, "i2c_transfer failed %d\n", status);
308 		pm_runtime_put_sync(uc->dev);
309 		kfree(buf);
310 		return status;
311 	}
312 
313 	pm_runtime_put_sync(uc->dev);
314 	kfree(buf);
315 	return 0;
316 }
317 
ccg_op_region_update(struct ucsi_ccg * uc,u32 cci)318 static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
319 {
320 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
321 	struct op_region *data = &uc->op_data;
322 	unsigned char *buf;
323 	size_t size = sizeof(data->message_in);
324 
325 	buf = kzalloc(size, GFP_ATOMIC);
326 	if (!buf)
327 		return -ENOMEM;
328 	if (UCSI_CCI_LENGTH(cci)) {
329 		int ret = ccg_read(uc, reg, (void *)buf, size);
330 
331 		if (ret) {
332 			kfree(buf);
333 			return ret;
334 		}
335 	}
336 
337 	spin_lock(&uc->op_lock);
338 	data->cci = cpu_to_le32(cci);
339 	if (UCSI_CCI_LENGTH(cci))
340 		memcpy(&data->message_in, buf, size);
341 	spin_unlock(&uc->op_lock);
342 	kfree(buf);
343 	return 0;
344 }
345 
ucsi_ccg_init(struct ucsi_ccg * uc)346 static int ucsi_ccg_init(struct ucsi_ccg *uc)
347 {
348 	unsigned int count = 10;
349 	u8 data;
350 	int status;
351 
352 	spin_lock_init(&uc->op_lock);
353 
354 	data = CCGX_RAB_UCSI_CONTROL_STOP;
355 	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
356 	if (status < 0)
357 		return status;
358 
359 	data = CCGX_RAB_UCSI_CONTROL_START;
360 	status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
361 	if (status < 0)
362 		return status;
363 
364 	/*
365 	 * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
366 	 * register write will push response which must be cleared.
367 	 */
368 	do {
369 		status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
370 		if (status < 0)
371 			return status;
372 
373 		if (!(data & DEV_INT))
374 			return 0;
375 
376 		status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
377 		if (status < 0)
378 			return status;
379 
380 		usleep_range(10000, 11000);
381 	} while (--count);
382 
383 	return -ETIMEDOUT;
384 }
385 
ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg * uc,u8 * data)386 static void ucsi_ccg_update_get_current_cam_cmd(struct ucsi_ccg *uc, u8 *data)
387 {
388 	u8 cam, new_cam;
389 
390 	cam = data[0];
391 	new_cam = uc->orig[cam].linked_idx;
392 	uc->updated[new_cam].active_idx = cam;
393 	data[0] = new_cam;
394 }
395 
ucsi_ccg_update_altmodes(struct ucsi * ucsi,struct ucsi_altmode * orig,struct ucsi_altmode * updated)396 static bool ucsi_ccg_update_altmodes(struct ucsi *ucsi,
397 				     struct ucsi_altmode *orig,
398 				     struct ucsi_altmode *updated)
399 {
400 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
401 	struct ucsi_ccg_altmode *alt, *new_alt;
402 	int i, j, k = 0;
403 	bool found = false;
404 
405 	alt = uc->orig;
406 	new_alt = uc->updated;
407 	memset(uc->updated, 0, sizeof(uc->updated));
408 
409 	/*
410 	 * Copy original connector altmodes to new structure.
411 	 * We need this before second loop since second loop
412 	 * checks for duplicate altmodes.
413 	 */
414 	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
415 		alt[i].svid = orig[i].svid;
416 		alt[i].mid = orig[i].mid;
417 		if (!alt[i].svid)
418 			break;
419 	}
420 
421 	for (i = 0; i < UCSI_MAX_ALTMODES; i++) {
422 		if (!alt[i].svid)
423 			break;
424 
425 		/* already checked and considered */
426 		if (alt[i].checked)
427 			continue;
428 
429 		if (!DP_CONF_GET_PIN_ASSIGN(alt[i].mid)) {
430 			/* Found Non DP altmode */
431 			new_alt[k].svid = alt[i].svid;
432 			new_alt[k].mid |= alt[i].mid;
433 			new_alt[k].linked_idx = i;
434 			alt[i].linked_idx = k;
435 			updated[k].svid = new_alt[k].svid;
436 			updated[k].mid = new_alt[k].mid;
437 			k++;
438 			continue;
439 		}
440 
441 		for (j = i + 1; j < UCSI_MAX_ALTMODES; j++) {
442 			if (alt[i].svid != alt[j].svid ||
443 			    !DP_CONF_GET_PIN_ASSIGN(alt[j].mid)) {
444 				continue;
445 			} else {
446 				/* Found duplicate DP mode */
447 				new_alt[k].svid = alt[i].svid;
448 				new_alt[k].mid |= alt[i].mid | alt[j].mid;
449 				new_alt[k].linked_idx = UCSI_MULTI_DP_INDEX;
450 				alt[i].linked_idx = k;
451 				alt[j].linked_idx = k;
452 				alt[j].checked = true;
453 				found = true;
454 			}
455 		}
456 		if (found) {
457 			uc->has_multiple_dp = true;
458 		} else {
459 			/* Didn't find any duplicate DP altmode */
460 			new_alt[k].svid = alt[i].svid;
461 			new_alt[k].mid |= alt[i].mid;
462 			new_alt[k].linked_idx = i;
463 			alt[i].linked_idx = k;
464 		}
465 		updated[k].svid = new_alt[k].svid;
466 		updated[k].mid = new_alt[k].mid;
467 		k++;
468 	}
469 	return found;
470 }
471 
ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg * uc,struct ucsi_connector * con,u64 * cmd)472 static void ucsi_ccg_update_set_new_cam_cmd(struct ucsi_ccg *uc,
473 					    struct ucsi_connector *con,
474 					    u64 *cmd)
475 {
476 	struct ucsi_ccg_altmode *new_port, *port;
477 	struct typec_altmode *alt = NULL;
478 	u8 new_cam, cam, pin;
479 	bool enter_new_mode;
480 	int i, j, k = 0xff;
481 
482 	port = uc->orig;
483 	new_cam = UCSI_SET_NEW_CAM_GET_AM(*cmd);
484 	if (new_cam >= ARRAY_SIZE(uc->updated))
485 		return;
486 	new_port = &uc->updated[new_cam];
487 	cam = new_port->linked_idx;
488 	enter_new_mode = UCSI_SET_NEW_CAM_ENTER(*cmd);
489 
490 	/*
491 	 * If CAM is UCSI_MULTI_DP_INDEX then this is DP altmode
492 	 * with multiple DP mode. Find out CAM for best pin assignment
493 	 * among all DP mode. Priorite pin E->D->C after making sure
494 	 * the partner supports that pin.
495 	 */
496 	if (cam == UCSI_MULTI_DP_INDEX) {
497 		if (enter_new_mode) {
498 			for (i = 0; con->partner_altmode[i]; i++) {
499 				alt = con->partner_altmode[i];
500 				if (alt->svid == new_port->svid)
501 					break;
502 			}
503 			/*
504 			 * alt will always be non NULL since this is
505 			 * UCSI_SET_NEW_CAM command and so there will be
506 			 * at least one con->partner_altmode[i] with svid
507 			 * matching with new_port->svid.
508 			 */
509 			for (j = 0; port[j].svid; j++) {
510 				pin = DP_CONF_GET_PIN_ASSIGN(port[j].mid);
511 				if (alt && port[j].svid == alt->svid &&
512 				    (pin & DP_CONF_GET_PIN_ASSIGN(alt->vdo))) {
513 					/* prioritize pin E->D->C */
514 					if (k == 0xff || (k != 0xff && pin >
515 					    DP_CONF_GET_PIN_ASSIGN(port[k].mid))
516 					    ) {
517 						k = j;
518 					}
519 				}
520 			}
521 			cam = k;
522 			new_port->active_idx = cam;
523 		} else {
524 			cam = new_port->active_idx;
525 		}
526 	}
527 	*cmd &= ~UCSI_SET_NEW_CAM_AM_MASK;
528 	*cmd |= UCSI_SET_NEW_CAM_SET_AM(cam);
529 }
530 
531 /*
532  * Change the order of vdo values of NVIDIA test device FTB
533  * (Function Test Board) which reports altmode list with vdo=0x3
534  * first and then vdo=0x. Current logic to assign mode value is
535  * based on order in altmode list and it causes a mismatch of CON
536  * and SOP altmodes since NVIDIA GPU connector has order of vdo=0x1
537  * first and then vdo=0x3
538  */
ucsi_ccg_nvidia_altmode(struct ucsi_ccg * uc,struct ucsi_altmode * alt,u64 command)539 static void ucsi_ccg_nvidia_altmode(struct ucsi_ccg *uc,
540 				    struct ucsi_altmode *alt,
541 				    u64 command)
542 {
543 	switch (UCSI_ALTMODE_OFFSET(command)) {
544 	case NVIDIA_FTB_DP_OFFSET:
545 		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DBG_VDO)
546 			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DP_VDO |
547 				     DP_CAP_DP_SIGNALLING(0) | DP_CAP_USB |
548 				     DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_E));
549 		break;
550 	case NVIDIA_FTB_DBG_OFFSET:
551 		if (alt[0].mid == USB_TYPEC_NVIDIA_VLINK_DP_VDO)
552 			alt[0].mid = USB_TYPEC_NVIDIA_VLINK_DBG_VDO;
553 		break;
554 	default:
555 		break;
556 	}
557 }
558 
ucsi_ccg_read_version(struct ucsi * ucsi,u16 * version)559 static int ucsi_ccg_read_version(struct ucsi *ucsi, u16 *version)
560 {
561 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
562 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_VERSION);
563 
564 	return ccg_read(uc, reg, (u8 *)version, sizeof(*version));
565 }
566 
ucsi_ccg_read_cci(struct ucsi * ucsi,u32 * cci)567 static int ucsi_ccg_read_cci(struct ucsi *ucsi, u32 *cci)
568 {
569 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
570 
571 	spin_lock(&uc->op_lock);
572 	*cci = uc->op_data.cci;
573 	spin_unlock(&uc->op_lock);
574 
575 	return 0;
576 }
577 
ucsi_ccg_read_message_in(struct ucsi * ucsi,void * val,size_t val_len)578 static int ucsi_ccg_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
579 {
580 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
581 
582 	spin_lock(&uc->op_lock);
583 	memcpy(val, uc->op_data.message_in, val_len);
584 	spin_unlock(&uc->op_lock);
585 
586 	return 0;
587 }
588 
ucsi_ccg_async_control(struct ucsi * ucsi,u64 command)589 static int ucsi_ccg_async_control(struct ucsi *ucsi, u64 command)
590 {
591 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
592 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CONTROL);
593 
594 	/*
595 	 * UCSI may read CCI instantly after async_control,
596 	 * clear CCI to avoid caller getting wrong data before we get CCI from ISR
597 	 */
598 	spin_lock(&uc->op_lock);
599 	uc->op_data.cci = 0;
600 	spin_unlock(&uc->op_lock);
601 
602 	return ccg_write(uc, reg, (u8 *)&command, sizeof(command));
603 }
604 
ucsi_ccg_sync_control(struct ucsi * ucsi,u64 command,u32 * cci,void * data,size_t size)605 static int ucsi_ccg_sync_control(struct ucsi *ucsi, u64 command, u32 *cci,
606 				 void *data, size_t size)
607 {
608 	struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
609 	struct ucsi_connector *con;
610 	int con_index;
611 	int ret;
612 
613 	mutex_lock(&uc->lock);
614 	pm_runtime_get_sync(uc->dev);
615 
616 	if (UCSI_COMMAND(command) == UCSI_SET_NEW_CAM &&
617 	    uc->has_multiple_dp) {
618 		con_index = (command >> 16) &
619 			UCSI_CMD_CONNECTOR_MASK;
620 		if (con_index == 0) {
621 			ret = -EINVAL;
622 			goto err_put;
623 		}
624 		con = &uc->ucsi->connector[con_index - 1];
625 		ucsi_ccg_update_set_new_cam_cmd(uc, con, &command);
626 	}
627 
628 	ret = ucsi_sync_control_common(ucsi, command, cci, data, size);
629 
630 	switch (UCSI_COMMAND(command)) {
631 	case UCSI_GET_CURRENT_CAM:
632 		if (uc->has_multiple_dp)
633 			ucsi_ccg_update_get_current_cam_cmd(uc, (u8 *)data);
634 		break;
635 	case UCSI_GET_ALTERNATE_MODES:
636 		if (UCSI_ALTMODE_RECIPIENT(command) == UCSI_RECIPIENT_SOP) {
637 			struct ucsi_altmode *alt = data;
638 
639 			if (alt[0].svid == USB_TYPEC_NVIDIA_VLINK_SID)
640 				ucsi_ccg_nvidia_altmode(uc, alt, command);
641 		}
642 		break;
643 	case UCSI_GET_CAPABILITY:
644 		if (uc->fw_build == CCG_FW_BUILD_NVIDIA_TEGRA) {
645 			struct ucsi_capability *cap = data;
646 
647 			cap->features &= ~UCSI_CAP_ALT_MODE_DETAILS;
648 		}
649 		break;
650 	default:
651 		break;
652 	}
653 
654 err_put:
655 	pm_runtime_put_sync(uc->dev);
656 	mutex_unlock(&uc->lock);
657 
658 	return ret;
659 }
660 
661 static const struct ucsi_operations ucsi_ccg_ops = {
662 	.read_version = ucsi_ccg_read_version,
663 	.read_cci = ucsi_ccg_read_cci,
664 	.poll_cci = ucsi_ccg_read_cci,
665 	.read_message_in = ucsi_ccg_read_message_in,
666 	.sync_control = ucsi_ccg_sync_control,
667 	.async_control = ucsi_ccg_async_control,
668 	.update_altmodes = ucsi_ccg_update_altmodes
669 };
670 
ccg_irq_handler(int irq,void * data)671 static irqreturn_t ccg_irq_handler(int irq, void *data)
672 {
673 	u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
674 	struct ucsi_ccg *uc = data;
675 	u8 intr_reg;
676 	u32 cci = 0;
677 	int ret = 0;
678 
679 	ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
680 	if (ret)
681 		return ret;
682 
683 	if (!intr_reg)
684 		return IRQ_HANDLED;
685 	else if (!(intr_reg & UCSI_READ_INT))
686 		goto err_clear_irq;
687 
688 	ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
689 	if (ret)
690 		goto err_clear_irq;
691 
692 	/*
693 	 * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
694 	 * to the OpRegion before clear the UCSI interrupt
695 	 */
696 	ret = ccg_op_region_update(uc, cci);
697 	if (ret)
698 		goto err_clear_irq;
699 
700 err_clear_irq:
701 	ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
702 
703 	if (!ret)
704 		ucsi_notify_common(uc->ucsi, cci);
705 
706 	return IRQ_HANDLED;
707 }
708 
ccg_request_irq(struct ucsi_ccg * uc)709 static int ccg_request_irq(struct ucsi_ccg *uc)
710 {
711 	unsigned long flags = IRQF_ONESHOT;
712 
713 	if (!dev_fwnode(uc->dev))
714 		flags |= IRQF_TRIGGER_HIGH;
715 
716 	return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
717 }
718 
ccg_pm_workaround_work(struct work_struct * pm_work)719 static void ccg_pm_workaround_work(struct work_struct *pm_work)
720 {
721 	ccg_irq_handler(0, container_of(pm_work, struct ucsi_ccg, pm_work));
722 }
723 
get_fw_info(struct ucsi_ccg * uc)724 static int get_fw_info(struct ucsi_ccg *uc)
725 {
726 	int err;
727 
728 	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)(&uc->version),
729 		       sizeof(uc->version));
730 	if (err < 0)
731 		return err;
732 
733 	uc->fw_version = CCG_VERSION(uc->version[FW2].app.ver) |
734 			CCG_VERSION_PATCH(uc->version[FW2].app.patch);
735 
736 	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
737 		       sizeof(uc->info));
738 	if (err < 0)
739 		return err;
740 
741 	return 0;
742 }
743 
invalid_async_evt(int code)744 static inline bool invalid_async_evt(int code)
745 {
746 	return (code >= CCG_EVENT_MAX) || (code < EVENT_INDEX);
747 }
748 
ccg_process_response(struct ucsi_ccg * uc)749 static void ccg_process_response(struct ucsi_ccg *uc)
750 {
751 	struct device *dev = uc->dev;
752 
753 	if (uc->dev_resp.code & ASYNC_EVENT) {
754 		if (uc->dev_resp.code == RESET_COMPLETE) {
755 			if (test_bit(RESET_PENDING, &uc->flags))
756 				uc->cmd_resp = uc->dev_resp.code;
757 			get_fw_info(uc);
758 		}
759 		if (invalid_async_evt(uc->dev_resp.code))
760 			dev_err(dev, "invalid async evt %d\n",
761 				uc->dev_resp.code);
762 	} else {
763 		if (test_bit(DEV_CMD_PENDING, &uc->flags)) {
764 			uc->cmd_resp = uc->dev_resp.code;
765 			clear_bit(DEV_CMD_PENDING, &uc->flags);
766 		} else {
767 			dev_err(dev, "dev resp 0x%04x but no cmd pending\n",
768 				uc->dev_resp.code);
769 		}
770 	}
771 }
772 
ccg_read_response(struct ucsi_ccg * uc)773 static int ccg_read_response(struct ucsi_ccg *uc)
774 {
775 	unsigned long target = jiffies + msecs_to_jiffies(1000);
776 	struct device *dev = uc->dev;
777 	u8 intval;
778 	int status;
779 
780 	/* wait for interrupt status to get updated */
781 	do {
782 		status = ccg_read(uc, CCGX_RAB_INTR_REG, &intval,
783 				  sizeof(intval));
784 		if (status < 0)
785 			return status;
786 
787 		if (intval & DEV_INT)
788 			break;
789 		usleep_range(500, 600);
790 	} while (time_is_after_jiffies(target));
791 
792 	if (time_is_before_jiffies(target)) {
793 		dev_err(dev, "response timeout error\n");
794 		return -ETIME;
795 	}
796 
797 	status = ccg_read(uc, CCGX_RAB_RESPONSE, (u8 *)&uc->dev_resp,
798 			  sizeof(uc->dev_resp));
799 	if (status < 0)
800 		return status;
801 
802 	status = ccg_write(uc, CCGX_RAB_INTR_REG, &intval, sizeof(intval));
803 	if (status < 0)
804 		return status;
805 
806 	return 0;
807 }
808 
809 /* Caller must hold uc->lock */
ccg_send_command(struct ucsi_ccg * uc,struct ccg_cmd * cmd)810 static int ccg_send_command(struct ucsi_ccg *uc, struct ccg_cmd *cmd)
811 {
812 	struct device *dev = uc->dev;
813 	int ret;
814 
815 	switch (cmd->reg & 0xF000) {
816 	case DEV_REG_IDX:
817 		set_bit(DEV_CMD_PENDING, &uc->flags);
818 		break;
819 	default:
820 		dev_err(dev, "invalid cmd register\n");
821 		break;
822 	}
823 
824 	ret = ccg_write(uc, cmd->reg, (u8 *)&cmd->data, cmd->len);
825 	if (ret < 0)
826 		return ret;
827 
828 	msleep(cmd->delay);
829 
830 	ret = ccg_read_response(uc);
831 	if (ret < 0) {
832 		dev_err(dev, "response read error\n");
833 		switch (cmd->reg & 0xF000) {
834 		case DEV_REG_IDX:
835 			clear_bit(DEV_CMD_PENDING, &uc->flags);
836 			break;
837 		default:
838 			dev_err(dev, "invalid cmd register\n");
839 			break;
840 		}
841 		return -EIO;
842 	}
843 	ccg_process_response(uc);
844 
845 	return uc->cmd_resp;
846 }
847 
ccg_cmd_enter_flashing(struct ucsi_ccg * uc)848 static int ccg_cmd_enter_flashing(struct ucsi_ccg *uc)
849 {
850 	struct ccg_cmd cmd;
851 	int ret;
852 
853 	cmd.reg = CCGX_RAB_ENTER_FLASHING;
854 	cmd.data = FLASH_ENTER_SIG;
855 	cmd.len = 1;
856 	cmd.delay = 50;
857 
858 	mutex_lock(&uc->lock);
859 
860 	ret = ccg_send_command(uc, &cmd);
861 
862 	mutex_unlock(&uc->lock);
863 
864 	if (ret != CMD_SUCCESS) {
865 		dev_err(uc->dev, "enter flashing failed ret=%d\n", ret);
866 		return ret;
867 	}
868 
869 	return 0;
870 }
871 
ccg_cmd_reset(struct ucsi_ccg * uc)872 static int ccg_cmd_reset(struct ucsi_ccg *uc)
873 {
874 	struct ccg_cmd cmd;
875 	u8 *p;
876 	int ret;
877 
878 	p = (u8 *)&cmd.data;
879 	cmd.reg = CCGX_RAB_RESET_REQ;
880 	p[0] = RESET_SIG;
881 	p[1] = CMD_RESET_DEV;
882 	cmd.len = 2;
883 	cmd.delay = 5000;
884 
885 	mutex_lock(&uc->lock);
886 
887 	set_bit(RESET_PENDING, &uc->flags);
888 
889 	ret = ccg_send_command(uc, &cmd);
890 	if (ret != RESET_COMPLETE)
891 		goto err_clear_flag;
892 
893 	ret = 0;
894 
895 err_clear_flag:
896 	clear_bit(RESET_PENDING, &uc->flags);
897 
898 	mutex_unlock(&uc->lock);
899 
900 	return ret;
901 }
902 
ccg_cmd_port_control(struct ucsi_ccg * uc,bool enable)903 static int ccg_cmd_port_control(struct ucsi_ccg *uc, bool enable)
904 {
905 	struct ccg_cmd cmd;
906 	int ret;
907 
908 	cmd.reg = CCGX_RAB_PDPORT_ENABLE;
909 	if (enable)
910 		cmd.data = (uc->port_num == 1) ?
911 			    PDPORT_1 : (PDPORT_1 | PDPORT_2);
912 	else
913 		cmd.data = 0x0;
914 	cmd.len = 1;
915 	cmd.delay = 10;
916 
917 	mutex_lock(&uc->lock);
918 
919 	ret = ccg_send_command(uc, &cmd);
920 
921 	mutex_unlock(&uc->lock);
922 
923 	if (ret != CMD_SUCCESS) {
924 		dev_err(uc->dev, "port control failed ret=%d\n", ret);
925 		return ret;
926 	}
927 	return 0;
928 }
929 
ccg_cmd_jump_boot_mode(struct ucsi_ccg * uc,int bl_mode)930 static int ccg_cmd_jump_boot_mode(struct ucsi_ccg *uc, int bl_mode)
931 {
932 	struct ccg_cmd cmd;
933 	int ret;
934 
935 	cmd.reg = CCGX_RAB_JUMP_TO_BOOT;
936 
937 	if (bl_mode)
938 		cmd.data = TO_BOOT;
939 	else
940 		cmd.data = TO_ALT_FW;
941 
942 	cmd.len = 1;
943 	cmd.delay = 100;
944 
945 	mutex_lock(&uc->lock);
946 
947 	set_bit(RESET_PENDING, &uc->flags);
948 
949 	ret = ccg_send_command(uc, &cmd);
950 	if (ret != RESET_COMPLETE)
951 		goto err_clear_flag;
952 
953 	ret = 0;
954 
955 err_clear_flag:
956 	clear_bit(RESET_PENDING, &uc->flags);
957 
958 	mutex_unlock(&uc->lock);
959 
960 	return ret;
961 }
962 
963 static int
ccg_cmd_write_flash_row(struct ucsi_ccg * uc,u16 row,const void * data,u8 fcmd)964 ccg_cmd_write_flash_row(struct ucsi_ccg *uc, u16 row,
965 			const void *data, u8 fcmd)
966 {
967 	struct i2c_client *client = uc->client;
968 	struct ccg_cmd cmd;
969 	u8 buf[CCG4_ROW_SIZE + 2];
970 	u8 *p;
971 	int ret;
972 
973 	/* Copy the data into the flash read/write memory. */
974 	put_unaligned_le16(REG_FLASH_RW_MEM, buf);
975 
976 	memcpy(buf + 2, data, CCG4_ROW_SIZE);
977 
978 	mutex_lock(&uc->lock);
979 
980 	ret = i2c_master_send(client, buf, CCG4_ROW_SIZE + 2);
981 	if (ret != CCG4_ROW_SIZE + 2) {
982 		dev_err(uc->dev, "REG_FLASH_RW_MEM write fail %d\n", ret);
983 		mutex_unlock(&uc->lock);
984 		return ret < 0 ? ret : -EIO;
985 	}
986 
987 	/* Use the FLASH_ROW_READ_WRITE register to trigger */
988 	/* writing of data to the desired flash row */
989 	p = (u8 *)&cmd.data;
990 	cmd.reg = CCGX_RAB_FLASH_ROW_RW;
991 	p[0] = FLASH_SIG;
992 	p[1] = fcmd;
993 	put_unaligned_le16(row, &p[2]);
994 	cmd.len = 4;
995 	cmd.delay = 50;
996 	if (fcmd == FLASH_FWCT_SIG_WR_CMD)
997 		cmd.delay += 400;
998 	if (row == 510)
999 		cmd.delay += 220;
1000 	ret = ccg_send_command(uc, &cmd);
1001 
1002 	mutex_unlock(&uc->lock);
1003 
1004 	if (ret != CMD_SUCCESS) {
1005 		dev_err(uc->dev, "write flash row failed ret=%d\n", ret);
1006 		return ret;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
ccg_cmd_validate_fw(struct ucsi_ccg * uc,unsigned int fwid)1012 static int ccg_cmd_validate_fw(struct ucsi_ccg *uc, unsigned int fwid)
1013 {
1014 	struct ccg_cmd cmd;
1015 	int ret;
1016 
1017 	cmd.reg = CCGX_RAB_VALIDATE_FW;
1018 	cmd.data = fwid;
1019 	cmd.len = 1;
1020 	cmd.delay = 500;
1021 
1022 	mutex_lock(&uc->lock);
1023 
1024 	ret = ccg_send_command(uc, &cmd);
1025 
1026 	mutex_unlock(&uc->lock);
1027 
1028 	if (ret != CMD_SUCCESS)
1029 		return ret;
1030 
1031 	return 0;
1032 }
1033 
ccg_check_vendor_version(struct ucsi_ccg * uc,struct version_format * app,struct fw_config_table * fw_cfg)1034 static bool ccg_check_vendor_version(struct ucsi_ccg *uc,
1035 				     struct version_format *app,
1036 				     struct fw_config_table *fw_cfg)
1037 {
1038 	struct device *dev = uc->dev;
1039 
1040 	/* Check if the fw build is for supported vendors */
1041 	if (le16_to_cpu(app->build) != uc->fw_build) {
1042 		dev_info(dev, "current fw is not from supported vendor\n");
1043 		return false;
1044 	}
1045 
1046 	/* Check if the new fw build is for supported vendors */
1047 	if (le16_to_cpu(fw_cfg->app.build) != uc->fw_build) {
1048 		dev_info(dev, "new fw is not from supported vendor\n");
1049 		return false;
1050 	}
1051 	return true;
1052 }
1053 
ccg_check_fw_version(struct ucsi_ccg * uc,const char * fw_name,struct version_format * app)1054 static bool ccg_check_fw_version(struct ucsi_ccg *uc, const char *fw_name,
1055 				 struct version_format *app)
1056 {
1057 	const struct firmware *fw = NULL;
1058 	struct device *dev = uc->dev;
1059 	struct fw_config_table fw_cfg;
1060 	u32 cur_version, new_version;
1061 	bool is_later = false;
1062 
1063 	if (request_firmware(&fw, fw_name, dev) != 0) {
1064 		dev_err(dev, "error: Failed to open cyacd file %s\n", fw_name);
1065 		return false;
1066 	}
1067 
1068 	/*
1069 	 * check if signed fw
1070 	 * last part of fw image is fw cfg table and signature
1071 	 */
1072 	if (fw->size < sizeof(fw_cfg) + FW_CFG_TABLE_SIG_SIZE)
1073 		goto out_release_firmware;
1074 
1075 	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1076 	       sizeof(fw_cfg) - FW_CFG_TABLE_SIG_SIZE, sizeof(fw_cfg));
1077 
1078 	if (fw_cfg.identity != ('F' | 'W' << 8 | 'C' << 16 | 'T' << 24)) {
1079 		dev_info(dev, "not a signed image\n");
1080 		goto out_release_firmware;
1081 	}
1082 
1083 	/* compare input version with FWCT version */
1084 	cur_version = le16_to_cpu(app->build) | CCG_VERSION_PATCH(app->patch) |
1085 			CCG_VERSION(app->ver);
1086 
1087 	new_version = le16_to_cpu(fw_cfg.app.build) |
1088 			CCG_VERSION_PATCH(fw_cfg.app.patch) |
1089 			CCG_VERSION(fw_cfg.app.ver);
1090 
1091 	if (!ccg_check_vendor_version(uc, app, &fw_cfg))
1092 		goto out_release_firmware;
1093 
1094 	if (new_version > cur_version)
1095 		is_later = true;
1096 
1097 out_release_firmware:
1098 	release_firmware(fw);
1099 	return is_later;
1100 }
1101 
ccg_fw_update_needed(struct ucsi_ccg * uc,enum enum_flash_mode * mode)1102 static int ccg_fw_update_needed(struct ucsi_ccg *uc,
1103 				enum enum_flash_mode *mode)
1104 {
1105 	struct device *dev = uc->dev;
1106 	int err;
1107 	struct version_info version[3];
1108 
1109 	err = ccg_read(uc, CCGX_RAB_DEVICE_MODE, (u8 *)(&uc->info),
1110 		       sizeof(uc->info));
1111 	if (err) {
1112 		dev_err(dev, "read device mode failed\n");
1113 		return err;
1114 	}
1115 
1116 	err = ccg_read(uc, CCGX_RAB_READ_ALL_VER, (u8 *)version,
1117 		       sizeof(version));
1118 	if (err) {
1119 		dev_err(dev, "read device mode failed\n");
1120 		return err;
1121 	}
1122 
1123 	if (memcmp(&version[FW1], "\0\0\0\0\0\0\0\0",
1124 		   sizeof(struct version_info)) == 0) {
1125 		dev_info(dev, "secondary fw is not flashed\n");
1126 		*mode = SECONDARY_BL;
1127 	} else if (le16_to_cpu(version[FW1].base.build) <
1128 		secondary_fw_min_ver) {
1129 		dev_info(dev, "secondary fw version is too low (< %d)\n",
1130 			 secondary_fw_min_ver);
1131 		*mode = SECONDARY;
1132 	} else if (memcmp(&version[FW2], "\0\0\0\0\0\0\0\0",
1133 		   sizeof(struct version_info)) == 0) {
1134 		dev_info(dev, "primary fw is not flashed\n");
1135 		*mode = PRIMARY;
1136 	} else if (ccg_check_fw_version(uc, ccg_fw_names[PRIMARY],
1137 		   &version[FW2].app)) {
1138 		dev_info(dev, "found primary fw with later version\n");
1139 		*mode = PRIMARY;
1140 	} else {
1141 		dev_info(dev, "secondary and primary fw are the latest\n");
1142 		*mode = FLASH_NOT_NEEDED;
1143 	}
1144 	return 0;
1145 }
1146 
do_flash(struct ucsi_ccg * uc,enum enum_flash_mode mode)1147 static int do_flash(struct ucsi_ccg *uc, enum enum_flash_mode mode)
1148 {
1149 	struct device *dev = uc->dev;
1150 	const struct firmware *fw = NULL;
1151 	const char *p, *s;
1152 	const char *eof;
1153 	int err, row, len, line_sz, line_cnt = 0;
1154 	unsigned long start_time = jiffies;
1155 	struct fw_config_table  fw_cfg;
1156 	u8 fw_cfg_sig[FW_CFG_TABLE_SIG_SIZE];
1157 	u8 *wr_buf;
1158 
1159 	err = request_firmware(&fw, ccg_fw_names[mode], dev);
1160 	if (err) {
1161 		dev_err(dev, "request %s failed err=%d\n",
1162 			ccg_fw_names[mode], err);
1163 		return err;
1164 	}
1165 
1166 	if (((uc->info.mode & CCG_DEVINFO_FWMODE_MASK) >>
1167 			CCG_DEVINFO_FWMODE_SHIFT) == FW2) {
1168 		err = ccg_cmd_port_control(uc, false);
1169 		if (err < 0)
1170 			goto release_fw;
1171 		err = ccg_cmd_jump_boot_mode(uc, 0);
1172 		if (err < 0)
1173 			goto release_fw;
1174 	}
1175 
1176 	eof = fw->data + fw->size;
1177 
1178 	/*
1179 	 * check if signed fw
1180 	 * last part of fw image is fw cfg table and signature
1181 	 */
1182 	if (fw->size < sizeof(fw_cfg) + sizeof(fw_cfg_sig))
1183 		goto not_signed_fw;
1184 
1185 	memcpy((uint8_t *)&fw_cfg, fw->data + fw->size -
1186 	       sizeof(fw_cfg) - sizeof(fw_cfg_sig), sizeof(fw_cfg));
1187 
1188 	if (fw_cfg.identity != ('F' | ('W' << 8) | ('C' << 16) | ('T' << 24))) {
1189 		dev_info(dev, "not a signed image\n");
1190 		goto not_signed_fw;
1191 	}
1192 	eof = fw->data + fw->size - sizeof(fw_cfg) - sizeof(fw_cfg_sig);
1193 
1194 	memcpy((uint8_t *)&fw_cfg_sig,
1195 	       fw->data + fw->size - sizeof(fw_cfg_sig), sizeof(fw_cfg_sig));
1196 
1197 	/* flash fw config table and signature first */
1198 	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg,
1199 				      FLASH_FWCT1_WR_CMD);
1200 	if (err)
1201 		goto release_fw;
1202 
1203 	err = ccg_cmd_write_flash_row(uc, 0, (u8 *)&fw_cfg + CCG4_ROW_SIZE,
1204 				      FLASH_FWCT2_WR_CMD);
1205 	if (err)
1206 		goto release_fw;
1207 
1208 	err = ccg_cmd_write_flash_row(uc, 0, &fw_cfg_sig,
1209 				      FLASH_FWCT_SIG_WR_CMD);
1210 	if (err)
1211 		goto release_fw;
1212 
1213 not_signed_fw:
1214 	wr_buf = kzalloc(CCG4_ROW_SIZE + 4, GFP_KERNEL);
1215 	if (!wr_buf) {
1216 		err = -ENOMEM;
1217 		goto release_fw;
1218 	}
1219 
1220 	err = ccg_cmd_enter_flashing(uc);
1221 	if (err)
1222 		goto release_mem;
1223 
1224 	/*****************************************************************
1225 	 * CCG firmware image (.cyacd) file line format
1226 	 *
1227 	 * :00rrrrllll[dd....]cc/r/n
1228 	 *
1229 	 * :00   header
1230 	 * rrrr is row number to flash				(4 char)
1231 	 * llll is data len to flash				(4 char)
1232 	 * dd   is a data field represents one byte of data	(512 char)
1233 	 * cc   is checksum					(2 char)
1234 	 * \r\n newline
1235 	 *
1236 	 * Total length: 3 + 4 + 4 + 512 + 2 + 2 = 527
1237 	 *
1238 	 *****************************************************************/
1239 
1240 	p = strnchr(fw->data, fw->size, ':');
1241 	while (p < eof) {
1242 		s = strnchr(p + 1, eof - p - 1, ':');
1243 
1244 		if (!s)
1245 			s = eof;
1246 
1247 		line_sz = s - p;
1248 
1249 		if (line_sz != CYACD_LINE_SIZE) {
1250 			dev_err(dev, "Bad FW format line_sz=%d\n", line_sz);
1251 			err =  -EINVAL;
1252 			goto release_mem;
1253 		}
1254 
1255 		if (hex2bin(wr_buf, p + 3, CCG4_ROW_SIZE + 4)) {
1256 			err =  -EINVAL;
1257 			goto release_mem;
1258 		}
1259 
1260 		row = get_unaligned_be16(wr_buf);
1261 		len = get_unaligned_be16(&wr_buf[2]);
1262 
1263 		if (len != CCG4_ROW_SIZE) {
1264 			err =  -EINVAL;
1265 			goto release_mem;
1266 		}
1267 
1268 		err = ccg_cmd_write_flash_row(uc, row, wr_buf + 4,
1269 					      FLASH_WR_CMD);
1270 		if (err)
1271 			goto release_mem;
1272 
1273 		line_cnt++;
1274 		p = s;
1275 	}
1276 
1277 	dev_info(dev, "total %d row flashed. time: %dms\n",
1278 		 line_cnt, jiffies_to_msecs(jiffies - start_time));
1279 
1280 	err = ccg_cmd_validate_fw(uc, (mode == PRIMARY) ? FW2 :  FW1);
1281 	if (err)
1282 		dev_err(dev, "%s validation failed err=%d\n",
1283 			(mode == PRIMARY) ? "FW2" :  "FW1", err);
1284 	else
1285 		dev_info(dev, "%s validated\n",
1286 			 (mode == PRIMARY) ? "FW2" :  "FW1");
1287 
1288 	err = ccg_cmd_port_control(uc, false);
1289 	if (err < 0)
1290 		goto release_mem;
1291 
1292 	err = ccg_cmd_reset(uc);
1293 	if (err < 0)
1294 		goto release_mem;
1295 
1296 	err = ccg_cmd_port_control(uc, true);
1297 	if (err < 0)
1298 		goto release_mem;
1299 
1300 release_mem:
1301 	kfree(wr_buf);
1302 
1303 release_fw:
1304 	release_firmware(fw);
1305 	return err;
1306 }
1307 
1308 /*******************************************************************************
1309  * CCG4 has two copies of the firmware in addition to the bootloader.
1310  * If the device is running FW1, FW2 can be updated with the new version.
1311  * Dual firmware mode allows the CCG device to stay in a PD contract and support
1312  * USB PD and Type-C functionality while a firmware update is in progress.
1313  ******************************************************************************/
ccg_fw_update(struct ucsi_ccg * uc,enum enum_flash_mode flash_mode)1314 static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
1315 {
1316 	int err = 0;
1317 
1318 	while (flash_mode != FLASH_NOT_NEEDED) {
1319 		err = do_flash(uc, flash_mode);
1320 		if (err < 0)
1321 			return err;
1322 		err = ccg_fw_update_needed(uc, &flash_mode);
1323 		if (err < 0)
1324 			return err;
1325 	}
1326 	dev_info(uc->dev, "CCG FW update successful\n");
1327 
1328 	return err;
1329 }
1330 
ccg_restart(struct ucsi_ccg * uc)1331 static int ccg_restart(struct ucsi_ccg *uc)
1332 {
1333 	struct device *dev = uc->dev;
1334 	int status;
1335 
1336 	status = ucsi_ccg_init(uc);
1337 	if (status < 0) {
1338 		dev_err(dev, "ucsi_ccg_start fail, err=%d\n", status);
1339 		return status;
1340 	}
1341 
1342 	status = ccg_request_irq(uc);
1343 	if (status < 0) {
1344 		dev_err(dev, "request_threaded_irq failed - %d\n", status);
1345 		return status;
1346 	}
1347 
1348 	status = ucsi_register(uc->ucsi);
1349 	if (status) {
1350 		dev_err(uc->dev, "failed to register the interface\n");
1351 		return status;
1352 	}
1353 
1354 	pm_runtime_enable(uc->dev);
1355 	return 0;
1356 }
1357 
ccg_update_firmware(struct work_struct * work)1358 static void ccg_update_firmware(struct work_struct *work)
1359 {
1360 	struct ucsi_ccg *uc = container_of(work, struct ucsi_ccg, work);
1361 	enum enum_flash_mode flash_mode;
1362 	int status;
1363 
1364 	status = ccg_fw_update_needed(uc, &flash_mode);
1365 	if (status < 0)
1366 		return;
1367 
1368 	if (flash_mode != FLASH_NOT_NEEDED) {
1369 		ucsi_unregister(uc->ucsi);
1370 		pm_runtime_disable(uc->dev);
1371 		free_irq(uc->irq, uc);
1372 
1373 		ccg_fw_update(uc, flash_mode);
1374 		ccg_restart(uc);
1375 	}
1376 }
1377 
do_flash_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1378 static ssize_t do_flash_store(struct device *dev,
1379 			      struct device_attribute *attr,
1380 			      const char *buf, size_t n)
1381 {
1382 	struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1383 	bool flash;
1384 
1385 	if (kstrtobool(buf, &flash))
1386 		return -EINVAL;
1387 
1388 	if (!flash)
1389 		return n;
1390 
1391 	schedule_work(&uc->work);
1392 	return n;
1393 }
1394 
ucsi_ccg_attrs_is_visible(struct kobject * kobj,struct attribute * attr,int idx)1395 static umode_t ucsi_ccg_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
1396 {
1397 	struct device *dev = kobj_to_dev(kobj);
1398 	struct ucsi_ccg *uc = i2c_get_clientdata(to_i2c_client(dev));
1399 
1400 	if (!uc->fw_build)
1401 		return 0;
1402 
1403 	return attr->mode;
1404 }
1405 
1406 static DEVICE_ATTR_WO(do_flash);
1407 
1408 static struct attribute *ucsi_ccg_attrs[] = {
1409 	&dev_attr_do_flash.attr,
1410 	NULL,
1411 };
1412 static struct attribute_group ucsi_ccg_attr_group = {
1413 	.attrs = ucsi_ccg_attrs,
1414 	.is_visible = ucsi_ccg_attrs_is_visible,
1415 };
1416 static const struct attribute_group *ucsi_ccg_groups[] = {
1417 	&ucsi_ccg_attr_group,
1418 	NULL,
1419 };
1420 
ucsi_ccg_probe(struct i2c_client * client)1421 static int ucsi_ccg_probe(struct i2c_client *client)
1422 {
1423 	struct device *dev = &client->dev;
1424 	struct ucsi_ccg *uc;
1425 	const char *fw_name;
1426 	int status;
1427 
1428 	uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
1429 	if (!uc)
1430 		return -ENOMEM;
1431 
1432 	uc->dev = dev;
1433 	uc->client = client;
1434 	uc->irq = client->irq;
1435 	mutex_init(&uc->lock);
1436 	INIT_WORK(&uc->work, ccg_update_firmware);
1437 	INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
1438 
1439 	/* Only fail FW flashing when FW build information is not provided */
1440 	status = device_property_read_string(dev, "firmware-name", &fw_name);
1441 	if (!status) {
1442 		if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
1443 			uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
1444 		else if (!strcmp(fw_name, "nvidia,gpu"))
1445 			uc->fw_build = CCG_FW_BUILD_NVIDIA;
1446 		if (!uc->fw_build)
1447 			dev_err(uc->dev, "failed to get FW build information\n");
1448 	}
1449 
1450 	/* reset ccg device and initialize ucsi */
1451 	status = ucsi_ccg_init(uc);
1452 	if (status < 0) {
1453 		dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
1454 		return status;
1455 	}
1456 
1457 	status = get_fw_info(uc);
1458 	if (status < 0) {
1459 		dev_err(uc->dev, "get_fw_info failed - %d\n", status);
1460 		return status;
1461 	}
1462 
1463 	uc->port_num = 1;
1464 
1465 	if (uc->info.mode & CCG_DEVINFO_PDPORTS_MASK)
1466 		uc->port_num++;
1467 
1468 	uc->ucsi = ucsi_create(dev, &ucsi_ccg_ops);
1469 	if (IS_ERR(uc->ucsi))
1470 		return PTR_ERR(uc->ucsi);
1471 
1472 	ucsi_set_drvdata(uc->ucsi, uc);
1473 
1474 	status = ccg_request_irq(uc);
1475 	if (status < 0) {
1476 		dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
1477 		goto out_ucsi_destroy;
1478 	}
1479 
1480 	status = ucsi_register(uc->ucsi);
1481 	if (status)
1482 		goto out_free_irq;
1483 
1484 	i2c_set_clientdata(client, uc);
1485 
1486 	device_disable_async_suspend(uc->dev);
1487 
1488 	pm_runtime_set_active(uc->dev);
1489 	pm_runtime_enable(uc->dev);
1490 	pm_runtime_use_autosuspend(uc->dev);
1491 	pm_runtime_set_autosuspend_delay(uc->dev, 5000);
1492 	pm_runtime_idle(uc->dev);
1493 
1494 	return 0;
1495 
1496 out_free_irq:
1497 	free_irq(uc->irq, uc);
1498 out_ucsi_destroy:
1499 	ucsi_destroy(uc->ucsi);
1500 
1501 	return status;
1502 }
1503 
ucsi_ccg_remove(struct i2c_client * client)1504 static void ucsi_ccg_remove(struct i2c_client *client)
1505 {
1506 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1507 
1508 	cancel_work_sync(&uc->pm_work);
1509 	cancel_work_sync(&uc->work);
1510 	pm_runtime_disable(uc->dev);
1511 	ucsi_unregister(uc->ucsi);
1512 	ucsi_destroy(uc->ucsi);
1513 	free_irq(uc->irq, uc);
1514 }
1515 
1516 static const struct of_device_id ucsi_ccg_of_match_table[] = {
1517 		{ .compatible = "cypress,cypd4226", },
1518 		{ /* sentinel */ }
1519 };
1520 MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
1521 
1522 static const struct i2c_device_id ucsi_ccg_device_id[] = {
1523 	{ "ccgx-ucsi" },
1524 	{}
1525 };
1526 MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
1527 
1528 static const struct acpi_device_id amd_i2c_ucsi_match[] = {
1529 	{"AMDI0042"},
1530 	{}
1531 };
1532 MODULE_DEVICE_TABLE(acpi, amd_i2c_ucsi_match);
1533 
ucsi_ccg_resume(struct device * dev)1534 static int ucsi_ccg_resume(struct device *dev)
1535 {
1536 	struct i2c_client *client = to_i2c_client(dev);
1537 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1538 
1539 	return ucsi_resume(uc->ucsi);
1540 }
1541 
ucsi_ccg_runtime_suspend(struct device * dev)1542 static int ucsi_ccg_runtime_suspend(struct device *dev)
1543 {
1544 	return 0;
1545 }
1546 
ucsi_ccg_runtime_resume(struct device * dev)1547 static int ucsi_ccg_runtime_resume(struct device *dev)
1548 {
1549 	struct i2c_client *client = to_i2c_client(dev);
1550 	struct ucsi_ccg *uc = i2c_get_clientdata(client);
1551 
1552 	/*
1553 	 * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
1554 	 * of missing interrupt when a device is connected for runtime resume.
1555 	 * Schedule a work to call ISR as a workaround.
1556 	 */
1557 	if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
1558 	    uc->fw_version <= CCG_OLD_FW_VERSION)
1559 		schedule_work(&uc->pm_work);
1560 
1561 	return 0;
1562 }
1563 
1564 static const struct dev_pm_ops ucsi_ccg_pm = {
1565 	.resume = ucsi_ccg_resume,
1566 	.runtime_suspend = ucsi_ccg_runtime_suspend,
1567 	.runtime_resume = ucsi_ccg_runtime_resume,
1568 };
1569 
1570 static struct i2c_driver ucsi_ccg_driver = {
1571 	.driver = {
1572 		.name = "ucsi_ccg",
1573 		.pm = &ucsi_ccg_pm,
1574 		.dev_groups = ucsi_ccg_groups,
1575 		.acpi_match_table = amd_i2c_ucsi_match,
1576 		.of_match_table = ucsi_ccg_of_match_table,
1577 	},
1578 	.probe = ucsi_ccg_probe,
1579 	.remove = ucsi_ccg_remove,
1580 	.id_table = ucsi_ccg_device_id,
1581 };
1582 
1583 module_i2c_driver(ucsi_ccg_driver);
1584 
1585 MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
1586 MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
1587 MODULE_LICENSE("GPL v2");
1588