xref: /linux/drivers/thunderbolt/eeprom.c (revision b7019ac550eb3916f34d79db583e9b7ea2524afa)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt driver - eeprom access
4  *
5  * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
6  * Copyright (C) 2018, Intel Corporation
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/property.h>
11 #include <linux/slab.h>
12 #include "tb.h"
13 
14 /**
15  * tb_eeprom_ctl_write() - write control word
16  */
17 static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
18 {
19 	return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
20 }
21 
22 /**
23  * tb_eeprom_ctl_write() - read control word
24  */
25 static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
26 {
27 	return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1);
28 }
29 
30 enum tb_eeprom_transfer {
31 	TB_EEPROM_IN,
32 	TB_EEPROM_OUT,
33 };
34 
35 /**
36  * tb_eeprom_active - enable rom access
37  *
38  * WARNING: Always disable access after usage. Otherwise the controller will
39  * fail to reprobe.
40  */
41 static int tb_eeprom_active(struct tb_switch *sw, bool enable)
42 {
43 	struct tb_eeprom_ctl ctl;
44 	int res = tb_eeprom_ctl_read(sw, &ctl);
45 	if (res)
46 		return res;
47 	if (enable) {
48 		ctl.access_high = 1;
49 		res = tb_eeprom_ctl_write(sw, &ctl);
50 		if (res)
51 			return res;
52 		ctl.access_low = 0;
53 		return tb_eeprom_ctl_write(sw, &ctl);
54 	} else {
55 		ctl.access_low = 1;
56 		res = tb_eeprom_ctl_write(sw, &ctl);
57 		if (res)
58 			return res;
59 		ctl.access_high = 0;
60 		return tb_eeprom_ctl_write(sw, &ctl);
61 	}
62 }
63 
64 /**
65  * tb_eeprom_transfer - transfer one bit
66  *
67  * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in.
68  * If TB_EEPROM_OUT is passed, then ctl->data_out will be written.
69  */
70 static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
71 			      enum tb_eeprom_transfer direction)
72 {
73 	int res;
74 	if (direction == TB_EEPROM_OUT) {
75 		res = tb_eeprom_ctl_write(sw, ctl);
76 		if (res)
77 			return res;
78 	}
79 	ctl->clock = 1;
80 	res = tb_eeprom_ctl_write(sw, ctl);
81 	if (res)
82 		return res;
83 	if (direction == TB_EEPROM_IN) {
84 		res = tb_eeprom_ctl_read(sw, ctl);
85 		if (res)
86 			return res;
87 	}
88 	ctl->clock = 0;
89 	return tb_eeprom_ctl_write(sw, ctl);
90 }
91 
92 /**
93  * tb_eeprom_out - write one byte to the bus
94  */
95 static int tb_eeprom_out(struct tb_switch *sw, u8 val)
96 {
97 	struct tb_eeprom_ctl ctl;
98 	int i;
99 	int res = tb_eeprom_ctl_read(sw, &ctl);
100 	if (res)
101 		return res;
102 	for (i = 0; i < 8; i++) {
103 		ctl.data_out = val & 0x80;
104 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
105 		if (res)
106 			return res;
107 		val <<= 1;
108 	}
109 	return 0;
110 }
111 
112 /**
113  * tb_eeprom_in - read one byte from the bus
114  */
115 static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
116 {
117 	struct tb_eeprom_ctl ctl;
118 	int i;
119 	int res = tb_eeprom_ctl_read(sw, &ctl);
120 	if (res)
121 		return res;
122 	*val = 0;
123 	for (i = 0; i < 8; i++) {
124 		*val <<= 1;
125 		res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
126 		if (res)
127 			return res;
128 		*val |= ctl.data_in;
129 	}
130 	return 0;
131 }
132 
133 /**
134  * tb_eeprom_read_n - read count bytes from offset into val
135  */
136 static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
137 		size_t count)
138 {
139 	int i, res;
140 	res = tb_eeprom_active(sw, true);
141 	if (res)
142 		return res;
143 	res = tb_eeprom_out(sw, 3);
144 	if (res)
145 		return res;
146 	res = tb_eeprom_out(sw, offset >> 8);
147 	if (res)
148 		return res;
149 	res = tb_eeprom_out(sw, offset);
150 	if (res)
151 		return res;
152 	for (i = 0; i < count; i++) {
153 		res = tb_eeprom_in(sw, val + i);
154 		if (res)
155 			return res;
156 	}
157 	return tb_eeprom_active(sw, false);
158 }
159 
160 static u8 tb_crc8(u8 *data, int len)
161 {
162 	int i, j;
163 	u8 val = 0xff;
164 	for (i = 0; i < len; i++) {
165 		val ^= data[i];
166 		for (j = 0; j < 8; j++)
167 			val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
168 	}
169 	return val;
170 }
171 
172 static u32 tb_crc32(void *data, size_t len)
173 {
174 	return ~__crc32c_le(~0, data, len);
175 }
176 
177 #define TB_DROM_DATA_START 13
178 struct tb_drom_header {
179 	/* BYTE 0 */
180 	u8 uid_crc8; /* checksum for uid */
181 	/* BYTES 1-8 */
182 	u64 uid;
183 	/* BYTES 9-12 */
184 	u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
185 	/* BYTE 13 */
186 	u8 device_rom_revision; /* should be <= 1 */
187 	u16 data_len:10;
188 	u8 __unknown1:6;
189 	/* BYTES 16-21 */
190 	u16 vendor_id;
191 	u16 model_id;
192 	u8 model_rev;
193 	u8 eeprom_rev;
194 } __packed;
195 
196 enum tb_drom_entry_type {
197 	/* force unsigned to prevent "one-bit signed bitfield" warning */
198 	TB_DROM_ENTRY_GENERIC = 0U,
199 	TB_DROM_ENTRY_PORT,
200 };
201 
202 struct tb_drom_entry_header {
203 	u8 len;
204 	u8 index:6;
205 	bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
206 	enum tb_drom_entry_type type:1;
207 } __packed;
208 
209 struct tb_drom_entry_generic {
210 	struct tb_drom_entry_header header;
211 	u8 data[0];
212 } __packed;
213 
214 struct tb_drom_entry_port {
215 	/* BYTES 0-1 */
216 	struct tb_drom_entry_header header;
217 	/* BYTE 2 */
218 	u8 dual_link_port_rid:4;
219 	u8 link_nr:1;
220 	u8 unknown1:2;
221 	bool has_dual_link_port:1;
222 
223 	/* BYTE 3 */
224 	u8 dual_link_port_nr:6;
225 	u8 unknown2:2;
226 
227 	/* BYTES 4 - 5 TODO decode */
228 	u8 micro2:4;
229 	u8 micro1:4;
230 	u8 micro3;
231 
232 	/* BYTES 6-7, TODO: verify (find hardware that has these set) */
233 	u8 peer_port_rid:4;
234 	u8 unknown3:3;
235 	bool has_peer_port:1;
236 	u8 peer_port_nr:6;
237 	u8 unknown4:2;
238 } __packed;
239 
240 
241 /**
242  * tb_eeprom_get_drom_offset - get drom offset within eeprom
243  */
244 static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
245 {
246 	struct tb_cap_plug_events cap;
247 	int res;
248 	if (!sw->cap_plug_events) {
249 		tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
250 		return -ENOSYS;
251 	}
252 	res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
253 			     sizeof(cap) / 4);
254 	if (res)
255 		return res;
256 
257 	if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
258 		tb_sw_warn(sw, "no NVM\n");
259 		return -ENOSYS;
260 	}
261 
262 	if (cap.drom_offset > 0xffff) {
263 		tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
264 				cap.drom_offset);
265 		return -ENXIO;
266 	}
267 	*offset = cap.drom_offset;
268 	return 0;
269 }
270 
271 /**
272  * tb_drom_read_uid_only - read uid directly from drom
273  *
274  * Does not use the cached copy in sw->drom. Used during resume to check switch
275  * identity.
276  */
277 int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
278 {
279 	u8 data[9];
280 	u16 drom_offset;
281 	u8 crc;
282 	int res = tb_eeprom_get_drom_offset(sw, &drom_offset);
283 	if (res)
284 		return res;
285 
286 	if (drom_offset == 0)
287 		return -ENODEV;
288 
289 	/* read uid */
290 	res = tb_eeprom_read_n(sw, drom_offset, data, 9);
291 	if (res)
292 		return res;
293 
294 	crc = tb_crc8(data + 1, 8);
295 	if (crc != data[0]) {
296 		tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
297 				data[0], crc);
298 		return -EIO;
299 	}
300 
301 	*uid = *(u64 *)(data+1);
302 	return 0;
303 }
304 
305 static int tb_drom_parse_entry_generic(struct tb_switch *sw,
306 		struct tb_drom_entry_header *header)
307 {
308 	const struct tb_drom_entry_generic *entry =
309 		(const struct tb_drom_entry_generic *)header;
310 
311 	switch (header->index) {
312 	case 1:
313 		/* Length includes 2 bytes header so remove it before copy */
314 		sw->vendor_name = kstrndup(entry->data,
315 			header->len - sizeof(*header), GFP_KERNEL);
316 		if (!sw->vendor_name)
317 			return -ENOMEM;
318 		break;
319 
320 	case 2:
321 		sw->device_name = kstrndup(entry->data,
322 			header->len - sizeof(*header), GFP_KERNEL);
323 		if (!sw->device_name)
324 			return -ENOMEM;
325 		break;
326 	}
327 
328 	return 0;
329 }
330 
331 static int tb_drom_parse_entry_port(struct tb_switch *sw,
332 				    struct tb_drom_entry_header *header)
333 {
334 	struct tb_port *port;
335 	int res;
336 	enum tb_port_type type;
337 
338 	/*
339 	 * Some DROMs list more ports than the controller actually has
340 	 * so we skip those but allow the parser to continue.
341 	 */
342 	if (header->index > sw->config.max_port_number) {
343 		dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
344 		return 0;
345 	}
346 
347 	port = &sw->ports[header->index];
348 	port->disabled = header->port_disabled;
349 	if (port->disabled)
350 		return 0;
351 
352 	res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
353 	if (res)
354 		return res;
355 	type &= 0xffffff;
356 
357 	if (type == TB_TYPE_PORT) {
358 		struct tb_drom_entry_port *entry = (void *) header;
359 		if (header->len != sizeof(*entry)) {
360 			tb_sw_warn(sw,
361 				"port entry has size %#x (expected %#zx)\n",
362 				header->len, sizeof(struct tb_drom_entry_port));
363 			return -EIO;
364 		}
365 		port->link_nr = entry->link_nr;
366 		if (entry->has_dual_link_port)
367 			port->dual_link_port =
368 				&port->sw->ports[entry->dual_link_port_nr];
369 	}
370 	return 0;
371 }
372 
373 /**
374  * tb_drom_parse_entries - parse the linked list of drom entries
375  *
376  * Drom must have been copied to sw->drom.
377  */
378 static int tb_drom_parse_entries(struct tb_switch *sw)
379 {
380 	struct tb_drom_header *header = (void *) sw->drom;
381 	u16 pos = sizeof(*header);
382 	u16 drom_size = header->data_len + TB_DROM_DATA_START;
383 	int res;
384 
385 	while (pos < drom_size) {
386 		struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
387 		if (pos + 1 == drom_size || pos + entry->len > drom_size
388 				|| !entry->len) {
389 			tb_sw_warn(sw, "drom buffer overrun, aborting\n");
390 			return -EIO;
391 		}
392 
393 		switch (entry->type) {
394 		case TB_DROM_ENTRY_GENERIC:
395 			res = tb_drom_parse_entry_generic(sw, entry);
396 			break;
397 		case TB_DROM_ENTRY_PORT:
398 			res = tb_drom_parse_entry_port(sw, entry);
399 			break;
400 		}
401 		if (res)
402 			return res;
403 
404 		pos += entry->len;
405 	}
406 	return 0;
407 }
408 
409 /**
410  * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
411  */
412 static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
413 {
414 	struct device *dev = &sw->tb->nhi->pdev->dev;
415 	int len, res;
416 
417 	len = device_property_read_u8_array(dev, "ThunderboltDROM", NULL, 0);
418 	if (len < 0 || len < sizeof(struct tb_drom_header))
419 		return -EINVAL;
420 
421 	sw->drom = kmalloc(len, GFP_KERNEL);
422 	if (!sw->drom)
423 		return -ENOMEM;
424 
425 	res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
426 									len);
427 	if (res)
428 		goto err;
429 
430 	*size = ((struct tb_drom_header *)sw->drom)->data_len +
431 							  TB_DROM_DATA_START;
432 	if (*size > len)
433 		goto err;
434 
435 	return 0;
436 
437 err:
438 	kfree(sw->drom);
439 	sw->drom = NULL;
440 	return -EINVAL;
441 }
442 
443 static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
444 {
445 	u32 drom_offset;
446 	int ret;
447 
448 	if (!sw->dma_port)
449 		return -ENODEV;
450 
451 	ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
452 			 sw->cap_plug_events + 12, 1);
453 	if (ret)
454 		return ret;
455 
456 	if (!drom_offset)
457 		return -ENODEV;
458 
459 	ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
460 				  sizeof(*size));
461 	if (ret)
462 		return ret;
463 
464 	/* Size includes CRC8 + UID + CRC32 */
465 	*size += 1 + 8 + 4;
466 	sw->drom = kzalloc(*size, GFP_KERNEL);
467 	if (!sw->drom)
468 		return -ENOMEM;
469 
470 	ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
471 	if (ret)
472 		goto err_free;
473 
474 	/*
475 	 * Read UID from the minimal DROM because the one in NVM is just
476 	 * a placeholder.
477 	 */
478 	tb_drom_read_uid_only(sw, &sw->uid);
479 	return 0;
480 
481 err_free:
482 	kfree(sw->drom);
483 	sw->drom = NULL;
484 	return ret;
485 }
486 
487 /**
488  * tb_drom_read - copy drom to sw->drom and parse it
489  */
490 int tb_drom_read(struct tb_switch *sw)
491 {
492 	u16 drom_offset;
493 	u16 size;
494 	u32 crc;
495 	struct tb_drom_header *header;
496 	int res;
497 	if (sw->drom)
498 		return 0;
499 
500 	if (tb_route(sw) == 0) {
501 		/*
502 		 * Apple's NHI EFI driver supplies a DROM for the root switch
503 		 * in a device property. Use it if available.
504 		 */
505 		if (tb_drom_copy_efi(sw, &size) == 0)
506 			goto parse;
507 
508 		/* Non-Apple hardware has the DROM as part of NVM */
509 		if (tb_drom_copy_nvm(sw, &size) == 0)
510 			goto parse;
511 
512 		/*
513 		 * The root switch contains only a dummy drom (header only,
514 		 * no entries). Hardcode the configuration here.
515 		 */
516 		tb_drom_read_uid_only(sw, &sw->uid);
517 
518 		sw->ports[1].link_nr = 0;
519 		sw->ports[2].link_nr = 1;
520 		sw->ports[1].dual_link_port = &sw->ports[2];
521 		sw->ports[2].dual_link_port = &sw->ports[1];
522 
523 		sw->ports[3].link_nr = 0;
524 		sw->ports[4].link_nr = 1;
525 		sw->ports[3].dual_link_port = &sw->ports[4];
526 		sw->ports[4].dual_link_port = &sw->ports[3];
527 
528 		/* Port 5 is inaccessible on this gen 1 controller */
529 		if (sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE)
530 			sw->ports[5].disabled = true;
531 
532 		return 0;
533 	}
534 
535 	res = tb_eeprom_get_drom_offset(sw, &drom_offset);
536 	if (res)
537 		return res;
538 
539 	res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2);
540 	if (res)
541 		return res;
542 	size &= 0x3ff;
543 	size += TB_DROM_DATA_START;
544 	tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
545 	if (size < sizeof(*header)) {
546 		tb_sw_warn(sw, "drom too small, aborting\n");
547 		return -EIO;
548 	}
549 
550 	sw->drom = kzalloc(size, GFP_KERNEL);
551 	if (!sw->drom)
552 		return -ENOMEM;
553 	res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size);
554 	if (res)
555 		goto err;
556 
557 parse:
558 	header = (void *) sw->drom;
559 
560 	if (header->data_len + TB_DROM_DATA_START != size) {
561 		tb_sw_warn(sw, "drom size mismatch, aborting\n");
562 		goto err;
563 	}
564 
565 	crc = tb_crc8((u8 *) &header->uid, 8);
566 	if (crc != header->uid_crc8) {
567 		tb_sw_warn(sw,
568 			"drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n",
569 			header->uid_crc8, crc);
570 		goto err;
571 	}
572 	if (!sw->uid)
573 		sw->uid = header->uid;
574 	sw->vendor = header->vendor_id;
575 	sw->device = header->model_id;
576 
577 	crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
578 	if (crc != header->data_crc32) {
579 		tb_sw_warn(sw,
580 			"drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n",
581 			header->data_crc32, crc);
582 	}
583 
584 	if (header->device_rom_revision > 2)
585 		tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n",
586 			header->device_rom_revision);
587 
588 	return tb_drom_parse_entries(sw);
589 err:
590 	kfree(sw->drom);
591 	sw->drom = NULL;
592 	return -EIO;
593 
594 }
595