xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision d96700a6da2afa88607fbd7405ade439424d10d9)
1 /*-
2  * Copyright (c) 2015 Landon Fuller <landon@landonf.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/systm.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include "bcma_eromreg.h"
43 #include "bcma_eromvar.h"
44 
45 /*
46  * BCMA Enumeration ROM (EROM) Table
47  *
48  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
49  *
50  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
51  * ChipCommon registers. The table itself is comprised of 32-bit
52  * type-tagged entries, organized into an array of variable-length
53  * core descriptor records.
54  *
55  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
56  * marker.
57  */
58 
59 static const char	*erom_entry_type_name (uint8_t entry);
60 static int		 erom_read32(struct bcma_erom *erom, uint32_t *entry);
61 static int		 erom_skip32(struct bcma_erom *erom);
62 
63 static int		 erom_skip_core(struct bcma_erom *erom);
64 static int		 erom_skip_mport(struct bcma_erom *erom);
65 static int		 erom_skip_sport_region(struct bcma_erom *erom);
66 
67 static int		 erom_seek_next(struct bcma_erom *erom, uint8_t etype);
68 static int		 erom_region_to_port_type(struct bcma_erom *erom,
69 			    uint8_t region_type, bhnd_port_type *port_type);
70 
71 #define	EROM_LOG(erom, fmt, ...)	do {				\
72 	if (erom->dev != NULL) {					\
73 		device_printf(erom->dev, "erom[0x%llx]: " fmt,		\
74 		    (unsigned long long) (erom->offset), ##__VA_ARGS__);\
75 	} else {							\
76 		printf("erom[0x%llx]: " fmt,				\
77 		    (unsigned long long) (erom->offset), ##__VA_ARGS__);\
78 	}								\
79 } while(0)
80 
81 /**
82  * Open an EROM table for reading.
83  *
84  * @param[out] erom On success, will be populated with a valid EROM
85  * read state.
86  * @param r An active resource mapping the EROM core.
87  * @param offset Offset of the EROM core within @p resource.
88  *
89  * @retval 0 success
90  * @retval non-zero if the erom table could not be opened.
91  */
92 int
93 bcma_erom_open(struct bcma_erom *erom, struct resource *r,
94     bus_size_t offset)
95 {
96 	return (bhnd_erom_bus_space_open(erom, rman_get_device(r),
97 	    rman_get_bustag(r), rman_get_bushandle(r), offset));
98 
99 	return (0);
100 }
101 
102 /**
103  * Open an EROM table for reading using the provided bus space tag and
104  * handle.
105  *
106  * @param[out] erom On success, will be populated with a valid EROM
107  * read state.
108  * @param dev The owning device, or NULL if none.
109  * @param bst EROM table bus space tag.
110  * @param bsh EROM table bus space handle.
111  * @param offset Offset of the EROM core from @p resource.
112  *
113  * @retval 0 success
114  * @retval non-zero if the erom table could not be opened.
115  */
116 int
117 bhnd_erom_bus_space_open(struct bcma_erom *erom, device_t dev,
118     bus_space_tag_t bst, bus_space_handle_t bsh, bus_size_t offset)
119 {
120 	/* Initialize the EROM reader */
121 	erom->dev = dev;
122 	erom->bst = bst;
123 	erom->bsh = bsh;
124 	erom->start = offset + BCMA_EROM_TABLE_START;
125 	erom->offset = 0;
126 
127 	return (0);
128 }
129 
130 /** Return the type name for an EROM entry */
131 static const char *
132 erom_entry_type_name (uint8_t entry)
133 {
134 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
135 	case BCMA_EROM_ENTRY_TYPE_CORE:
136 		return "core";
137 	case BCMA_EROM_ENTRY_TYPE_MPORT:
138 		return "mport";
139 	case BCMA_EROM_ENTRY_TYPE_REGION:
140 		return "region";
141 	default:
142 		return "unknown";
143 	}
144 }
145 
146 /**
147  * Return the current read position.
148  */
149 bus_size_t
150 bcma_erom_tell(struct bcma_erom *erom)
151 {
152 	return (erom->offset);
153 }
154 
155 /**
156  * Seek to an absolute read position.
157  */
158 void
159 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
160 {
161 	erom->offset = offset;
162 }
163 
164 /**
165  * Read a 32-bit entry value from the EROM table without advancing the
166  * read position.
167  *
168  * @param erom EROM read state.
169  * @param entry Will contain the read result on success.
170  * @retval 0 success
171  * @retval ENOENT The end of the EROM table was reached.
172  * @retval non-zero The read could not be completed.
173  */
174 int
175 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
176 {
177 	if (erom->offset >= BCMA_EROM_TABLE_SIZE) {
178 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
179 		return (EINVAL);
180 	}
181 
182 	*entry = bus_space_read_4(erom->bst, erom->bsh,
183 	    erom->start + erom->offset);
184 	return (0);
185 }
186 
187 /**
188  * Read a 32-bit entry value from the EROM table.
189  *
190  * @param erom EROM read state.
191  * @param entry Will contain the read result on success.
192  * @retval 0 success
193  * @retval ENOENT The end of the EROM table was reached.
194  * @retval non-zero The read could not be completed.
195  */
196 static int
197 erom_read32(struct bcma_erom *erom, uint32_t *entry)
198 {
199 	int error;
200 
201 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
202 		erom->offset += 4;
203 
204 	return (error);
205 }
206 
207 /**
208  * Read and discard 32-bit entry value from the EROM table.
209  *
210  * @param erom EROM read state.
211  * @retval 0 success
212  * @retval ENOENT The end of the EROM table was reached.
213  * @retval non-zero The read could not be completed.
214  */
215 static int
216 erom_skip32(struct bcma_erom *erom)
217 {
218 	uint32_t	entry;
219 
220 	return erom_read32(erom, &entry);
221 }
222 
223 /**
224  * Read and discard a core descriptor from the EROM table.
225  *
226  * @param erom EROM read state.
227  * @retval 0 success
228  * @retval ENOENT The end of the EROM table was reached.
229  * @retval non-zero The read could not be completed.
230  */
231 static int
232 erom_skip_core(struct bcma_erom *erom)
233 {
234 	struct bcma_erom_core core;
235 	return (bcma_erom_parse_core(erom, &core));
236 }
237 
238 /**
239  * Read and discard a master port descriptor from the EROM table.
240  *
241  * @param erom EROM read state.
242  * @retval 0 success
243  * @retval ENOENT The end of the EROM table was reached.
244  * @retval non-zero The read could not be completed.
245  */
246 static int
247 erom_skip_mport(struct bcma_erom *erom)
248 {
249 	struct bcma_erom_mport mp;
250 	return (bcma_erom_parse_mport(erom, &mp));
251 }
252 
253 /**
254  * Read and discard a port region descriptor from the EROM table.
255  *
256  * @param erom EROM read state.
257  * @retval 0 success
258  * @retval ENOENT The end of the EROM table was reached.
259  * @retval non-zero The read could not be completed.
260  */
261 static int
262 erom_skip_sport_region(struct bcma_erom *erom)
263 {
264 	struct bcma_erom_sport_region r;
265 	return (bcma_erom_parse_sport_region(erom, &r));
266 }
267 
268 /**
269  * Seek to the next entry matching the given EROM entry type.
270  *
271  * @param erom EROM read state.
272  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
273  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
274  * @retval 0 success
275  * @retval ENOENT The end of the EROM table was reached.
276  * @retval non-zero Reading or parsing the descriptor failed.
277  */
278 static int
279 erom_seek_next(struct bcma_erom *erom, uint8_t etype)
280 {
281 	uint32_t			entry;
282 	int				error;
283 
284 	/* Iterate until we hit an entry matching the requested type. */
285 	while (!(error = bcma_erom_peek32(erom, &entry))) {
286 		/* Handle EOF */
287 		if (entry == BCMA_EROM_TABLE_EOF)
288 			return (ENOENT);
289 
290 		/* Invalid entry */
291 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
292 			return (EINVAL);
293 
294 		/* Entry type matches? */
295 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
296 			return (0);
297 
298 		/* Skip non-matching entry types. */
299 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
300 		case BCMA_EROM_ENTRY_TYPE_CORE:
301 			if ((error = erom_skip_core(erom)))
302 				return (error);
303 
304 			break;
305 
306 		case BCMA_EROM_ENTRY_TYPE_MPORT:
307 			if ((error = erom_skip_mport(erom)))
308 				return (error);
309 
310 			break;
311 
312 		case BCMA_EROM_ENTRY_TYPE_REGION:
313 			if ((error = erom_skip_sport_region(erom)))
314 				return (error);
315 			break;
316 
317 		default:
318 			/* Unknown entry type! */
319 			return (EINVAL);
320 		}
321 	}
322 
323 	return (error);
324 }
325 
326 /**
327  * Return the read position to the start of the EROM table.
328  *
329  * @param erom EROM read state.
330  */
331 void
332 bcma_erom_reset(struct bcma_erom *erom)
333 {
334 	erom->offset = 0;
335 }
336 
337 /**
338  * Seek to the next core entry.
339  *
340  * @param erom EROM read state.
341  * @retval 0 success
342  * @retval ENOENT The end of the EROM table was reached.
343  * @retval non-zero Reading or parsing failed.
344  */
345 int
346 bcma_erom_seek_next_core(struct bcma_erom *erom)
347 {
348 	return (erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE));
349 }
350 
351 /**
352  * Seek to the requested core entry.
353  *
354  * @param erom EROM read state.
355  * @param core_index Index of the core to seek to.
356  * @retval 0 success
357  * @retval ENOENT The end of the EROM table was reached before @p index was
358  * found.
359  * @retval non-zero Reading or parsing failed.
360  */
361 int
362 bcma_erom_seek_core_index(struct bcma_erom *erom, u_int core_index)
363 {
364 	int error;
365 
366 	/* Start search at top of EROM */
367 	bcma_erom_reset(erom);
368 
369 	/* Skip core descriptors till we hit the requested entry */
370 	for (u_int i = 0; i < core_index; i++) {
371 		struct bcma_erom_core core;
372 
373 		/* Read past the core descriptor */
374 		if ((error = bcma_erom_parse_core(erom, &core)))
375 			return (error);
376 
377 		/* Seek to the next readable core entry */
378 		error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
379 		if (error)
380 			return (error);
381 	}
382 
383 	return (0);
384 }
385 
386 
387 /**
388  * Read the next core descriptor from the EROM table.
389  *
390  * @param erom EROM read state.
391  * @param[out] core On success, will be populated with the parsed core
392  * descriptor data.
393  * @retval 0 success
394  * @retval ENOENT The end of the EROM table was reached.
395  * @retval non-zero Reading or parsing the core descriptor failed.
396  */
397 int
398 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
399 {
400 	uint32_t	entry;
401 	int		error;
402 
403 	/* Parse CoreDescA */
404 	if ((error = erom_read32(erom, &entry)))
405 		return (error);
406 
407 	/* Handle EOF */
408 	if (entry == BCMA_EROM_TABLE_EOF)
409 		return (ENOENT);
410 
411 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
412 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
413                    entry, erom_entry_type_name(entry));
414 
415 		return (EINVAL);
416 	}
417 
418 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
419 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
420 
421 	/* Parse CoreDescB */
422 	if ((error = erom_read32(erom, &entry)))
423 		return (error);
424 
425 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
426 		return (EINVAL);
427 	}
428 
429 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
430 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
431 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
432 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
433 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
434 
435 	return (0);
436 }
437 
438 /**
439  * Seek to a region record associated with @p core_index.
440  *
441  * @param erom EROM read state.
442  * @param core_index The index of the core record to be searched.
443  * @param port_type The port type to search for.
444  * @param port_num The port number to search for.
445  * @param region_num The region number to search for.
446  * @retval 0 success
447  * @retval ENOENT The requested region was not found.
448  * @retval non-zero Reading or parsing failed.
449  */
450 int
451 bcma_erom_seek_core_sport_region(struct bcma_erom *erom, u_int core_index,
452     bhnd_port_type port_type, u_int port_num, u_int region_num)
453 {
454 	struct bcma_erom_core	core;
455 	uint32_t		entry;
456 	uint8_t			region_port, region_type;
457 	bool			found;
458 	int			error;
459 
460 	if ((error = bcma_erom_seek_core_index(erom, core_index)))
461 		return (error);
462 
463 	if ((error = bcma_erom_parse_core(erom, &core)))
464 		return (error);
465 
466 	/* Skip master ports */
467 	for (u_long i = 0; i < core.num_mport; i++) {
468 		if ((error = erom_skip_mport(erom)))
469 			return (error);
470 	}
471 
472 	/* Seek to the region block for the given port type */
473 	found = false;
474 	while (1) {
475 		bhnd_port_type	p_type;
476 		uint8_t		r_type;
477 
478 		if ((error = bcma_erom_peek32(erom, &entry)))
479 			return (error);
480 
481 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
482 			return (ENOENT);
483 
484 		/* Expected region type? */
485 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
486 		if ((error = erom_region_to_port_type(erom, r_type, &p_type)))
487 			return (error);
488 
489 		if (p_type == port_type) {
490 			found = true;
491 			break;
492 		}
493 
494 		/* Skip to next entry */
495 		if ((error = erom_skip_sport_region(erom)))
496 			return (error);
497 	}
498 
499 	if (!found)
500 		return (ENOENT);
501 
502 	/* Found the appropriate port type block; now find the region records
503 	 * for the given port number */
504 	found = false;
505 	for (u_int i = 0; i <= port_num; i++) {
506 		bhnd_port_type	p_type;
507 
508 		if ((error = bcma_erom_peek32(erom, &entry)))
509 			return (error);
510 
511 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
512 			return (ENOENT);
513 
514 		/* Fetch the type/port of the first region entry */
515 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
516 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
517 
518 		/* Have we found the region entries for the desired port? */
519 		if (i == port_num) {
520 			error = erom_region_to_port_type(erom, region_type,
521 			    &p_type);
522 			if (error)
523 				return (error);
524 
525 			if (p_type == port_type)
526 				found = true;
527 
528 			break;
529 		}
530 
531 		/* Otherwise, seek to next block of region records */
532 		while (1) {
533 			uint8_t	next_type, next_port;
534 
535 			if ((error = erom_skip_sport_region(erom)))
536 				return (error);
537 
538 			if ((error = bcma_erom_peek32(erom, &entry)))
539 				return (error);
540 
541 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
542 				return (ENOENT);
543 
544 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
545 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
546 
547 			if (next_type != region_type ||
548 			    next_port != region_port)
549 				break;
550 		}
551 	}
552 
553 	if (!found)
554 		return (ENOENT);
555 
556 	/* Finally, search for the requested region number */
557 	for (u_int i = 0; i <= region_num; i++) {
558 		uint8_t	next_port, next_type;
559 
560 		if ((error = bcma_erom_peek32(erom, &entry)))
561 			return (error);
562 
563 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
564 			return (ENOENT);
565 
566 		/* Check for the end of the region block */
567 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
568 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
569 
570 		if (next_type != region_type ||
571 		    next_port != region_port)
572 			break;
573 
574 		if (i == region_num)
575 			return (0);
576 
577 		if ((error = erom_skip_sport_region(erom)))
578 			return (error);
579 	}
580 
581 	/* Not found */
582 	return (ENOENT);
583 }
584 
585 /**
586  * Read the next master port descriptor from the EROM table.
587  *
588  * @param erom EROM read state.
589  * @param[out] mport On success, will be populated with the parsed
590  * descriptor data.
591  * @retval 0 success
592  * @retval non-zero Reading or parsing the descriptor failed.
593  */
594 int
595 bcma_erom_parse_mport(struct bcma_erom *erom,
596     struct bcma_erom_mport *mport)
597 {
598 	uint32_t	entry;
599 	int		error;
600 
601 	/* Parse the master port descriptor */
602 	if ((error = erom_read32(erom, &entry)))
603 		return (error);
604 
605 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
606 		return (EINVAL);
607 
608 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
609 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
610 
611 	return (0);
612 }
613 
614 /**
615  * Read the next slave port region descriptor from the EROM table.
616  *
617  * @param erom EROM read state.
618  * @param[out] mport On success, will be populated with the parsed
619  * descriptor data.
620  * @retval 0 success
621  * @retval ENOENT The end of the region descriptor table was reached.
622  * @retval non-zero Reading or parsing the descriptor failed.
623  */
624 int
625 bcma_erom_parse_sport_region(struct bcma_erom *erom,
626     struct bcma_erom_sport_region *region)
627 {
628 	uint32_t	entry;
629 	uint8_t		size_type;
630 	int		error;
631 
632 	/* Peek at the region descriptor */
633 	if (bcma_erom_peek32(erom, &entry))
634 		return (EINVAL);
635 
636 	/* A non-region entry signals the end of the region table */
637 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
638 		return (ENOENT);
639 	} else {
640 		erom_skip32(erom);
641 	}
642 
643 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
644 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
645 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
646 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
647 
648 	/* If region address is 64-bit, fetch the high bits. */
649 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
650 		if ((error = erom_read32(erom, &entry)))
651 			return (error);
652 
653 		region->base_addr |= ((bhnd_addr_t) entry << 32);
654 	}
655 
656 	/* Parse the region size; it's either encoded as the binary logarithm
657 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
658 	 * 32-bit/64-bit literal value directly following the current entry. */
659 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
660 		if ((error = erom_read32(erom, &entry)))
661 			return (error);
662 
663 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
664 
665 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
666 			if ((error = erom_read32(erom, &entry)))
667 				return (error);
668 			region->size |= ((bhnd_size_t) entry << 32);
669 		}
670 	} else {
671 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
672 	}
673 
674 	/* Verify that addr+size does not overflow. */
675 	if (region->size != 0 &&
676 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
677 	{
678 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
679 		    erom_entry_type_name(region->region_type),
680 		    region->region_port,
681 		    (unsigned long long) region->base_addr,
682 		    (unsigned long long) region->size);
683 
684 		return (EINVAL);
685 	}
686 
687 	return (0);
688 }
689 
690 /**
691  * Convert a bcma_erom_core record to its bhnd_core_info representation.
692  *
693  * @param core EROM core record to convert.
694  * @param core_idx The core index of @p core.
695  * @param core_unit The core unit of @p core.
696  * @param[out] info The populated bhnd_core_info representation.
697  */
698 void
699 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
700     int core_unit, struct bhnd_core_info *info)
701 {
702 	info->vendor = core->vendor;
703 	info->device = core->device;
704 	info->hwrev = core->rev;
705 	info->core_idx = core_idx;
706 	info->unit = core_unit;
707 }
708 
709 /**
710  * Parse all cores descriptors from @p erom and return the array
711  * in @p cores and the count in @p num_cores. The current EROM read position
712  * is left unmodified.
713  *
714  * The memory allocated for the table should be freed using
715  * `free(*cores, M_BHND)`. @p cores and @p num_cores are not changed
716  * when an error is returned.
717  *
718  * @param erom EROM read state.
719  * @param[out] cores the table of parsed core descriptors.
720  * @param[out] num_cores the number of core records in @p cores.
721  */
722 int
723 bcma_erom_get_core_info(struct bcma_erom *erom,
724     struct bhnd_core_info **cores,
725     u_int *num_cores)
726 {
727 	struct bhnd_core_info	*buffer;
728 	bus_size_t		 initial_offset;
729 	u_int			 count;
730 	int			 error;
731 
732 	buffer = NULL;
733 	initial_offset = bcma_erom_tell(erom);
734 
735 	/* Determine the core count */
736 	bcma_erom_reset(erom);
737 	for (count = 0, error = 0; !error; count++) {
738 		struct bcma_erom_core core;
739 
740 		/* Seek to the first readable core entry */
741 		error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
742 		if (error == ENOENT)
743 			break;
744 		else if (error)
745 			goto cleanup;
746 
747 		/* Read past the core descriptor */
748 		if ((error = bcma_erom_parse_core(erom, &core)))
749 			goto cleanup;
750 	}
751 
752 	/* Allocate our output buffer */
753 	buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
754 	    M_NOWAIT);
755 	if (buffer == NULL) {
756 		error = ENOMEM;
757 		goto cleanup;
758 	}
759 
760 	/* Parse all core descriptors */
761 	bcma_erom_reset(erom);
762 	for (u_int i = 0; i < count; i++) {
763 		struct bcma_erom_core	core;
764 		int			unit;
765 
766 		/* Parse the core */
767 		error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
768 		if (error)
769 			goto cleanup;
770 
771 		error = bcma_erom_parse_core(erom, &core);
772 		if (error)
773 			goto cleanup;
774 
775 		/* Determine the unit number */
776 		unit = 0;
777 		for (u_int j = 0; j < i; j++) {
778 			if (buffer[i].vendor == buffer[j].vendor &&
779 			    buffer[i].device == buffer[j].device)
780 				unit++;
781 		}
782 
783 		/* Convert to a bhnd info record */
784 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
785 	}
786 
787 cleanup:
788 	if (!error) {
789 		*cores = buffer;
790 		*num_cores = count;
791 	} else {
792 		if (buffer != NULL)
793 			free(buffer, M_BHND);
794 	}
795 
796 	/* Restore the initial position */
797 	bcma_erom_seek(erom, initial_offset);
798 	return (error);
799 }
800 
801 /**
802  * Map an EROM region type to its corresponding port type.
803  *
804  * @param region_type Region type value.
805  * @param[out] port_type On success, the corresponding port type.
806  */
807 static int
808 erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
809     bhnd_port_type *port_type)
810 {
811 	switch (region_type) {
812 	case BCMA_EROM_REGION_TYPE_DEVICE:
813 		*port_type = BHND_PORT_DEVICE;
814 		return (0);
815 	case BCMA_EROM_REGION_TYPE_BRIDGE:
816 		*port_type = BHND_PORT_BRIDGE;
817 		return (0);
818 	case BCMA_EROM_REGION_TYPE_MWRAP:
819 	case BCMA_EROM_REGION_TYPE_SWRAP:
820 		*port_type = BHND_PORT_AGENT;
821 		return (0);
822 	default:
823 		EROM_LOG(erom, "unsupported region type %hhx\n",
824 			region_type);
825 		return (EINVAL);
826 	}
827 }
828 
829 /**
830  * Register all MMIO region descriptors for the given slave port.
831  *
832  * @param erom EROM read state.
833  * @param corecfg Core info to be populated with the scanned port regions.
834  * @param port_num Port index for which regions will be parsed.
835  * @param region_type The region type to be parsed.
836  * @param[out] offset The offset at which to perform parsing. On success, this
837  * will be updated to point to the next EROM table entry.
838  */
839 static int
840 erom_corecfg_fill_port_regions(struct bcma_erom *erom,
841     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
842     uint8_t region_type)
843 {
844 	struct bcma_sport	*sport;
845 	struct bcma_sport_list	*sports;
846 	bus_size_t		 entry_offset;
847 	int			 error;
848 	bhnd_port_type		 port_type;
849 
850 	error = 0;
851 
852 	/* Determine the port type for this region type. */
853 	if ((error = erom_region_to_port_type(erom, region_type, &port_type)))
854 		return (error);
855 
856 	/* Fetch the list to be populated */
857 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
858 
859 	/* Allocate a new port descriptor */
860 	sport = bcma_alloc_sport(port_num, port_type);
861 	if (sport == NULL)
862 		return (ENOMEM);
863 
864 	/* Read all address regions defined for this port */
865 	for (bcma_rmid_t region_num = 0;; region_num++) {
866 		struct bcma_map			*map;
867 		struct bcma_erom_sport_region	 spr;
868 
869 		/* No valid port definition should come anywhere near
870 		 * BCMA_RMID_MAX. */
871 		if (region_num == BCMA_RMID_MAX) {
872 			EROM_LOG(erom, "core%u %s%u: region count reached "
873 			    "upper limit of %u\n",
874 			    corecfg->core_info.core_idx,
875 			    bhnd_port_type_name(port_type),
876 			    port_num, BCMA_RMID_MAX);
877 
878 			error = EINVAL;
879 			goto cleanup;
880 		}
881 
882 		/* Parse the next region entry. */
883 		entry_offset = bcma_erom_tell(erom);
884 		error = bcma_erom_parse_sport_region(erom, &spr);
885 		if (error && error != ENOENT) {
886 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
887 			    "address region\n",
888 			    corecfg->core_info.core_idx,
889 			    bhnd_port_type_name(port_type),
890 			    port_num, region_num);
891 			goto cleanup;
892 		}
893 
894 		/* ENOENT signals no further region entries */
895 		if (error == ENOENT) {
896 			/* No further entries */
897 			error = 0;
898 			break;
899 		}
900 
901 		/* A region or type mismatch also signals no further region
902 		 * entries */
903 		if (spr.region_port != port_num ||
904 		    spr.region_type != region_type)
905 		{
906 			/* We don't want to consume this entry */
907 			bcma_erom_seek(erom, entry_offset);
908 
909 			error = 0;
910 			goto cleanup;
911 		}
912 
913 		/*
914 		 * Create the map entry.
915 		 */
916 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
917 		if (map == NULL) {
918 			error = ENOMEM;
919 			goto cleanup;
920 		}
921 
922 		map->m_region_num = region_num;
923 		map->m_base = spr.base_addr;
924 		map->m_size = spr.size;
925 		map->m_rid = -1;
926 
927 		/* Add the region map to the port */
928 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
929 		sport->sp_num_maps++;
930 	}
931 
932 cleanup:
933 	/* Append the new port descriptor on success, or deallocate the
934 	 * partially parsed descriptor on failure. */
935 	if (error == 0) {
936 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
937 	} else if (sport != NULL) {
938 		bcma_free_sport(sport);
939 	}
940 
941 	return error;
942 }
943 
944 /**
945  * Parse the next core entry from the EROM table and produce a bcma_corecfg
946  * to be owned by the caller.
947  *
948  * @param erom EROM read state.
949  * @param[out] result On success, the core's device info. The caller inherits
950  * ownership of this allocation.
951  *
952  * @return If successful, returns 0. If the end of the EROM table is hit,
953  * ENOENT will be returned. On error, returns a non-zero error value.
954  */
955 int
956 bcma_erom_parse_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
957 {
958 	struct bcma_corecfg	*cfg;
959 	struct bcma_erom_core	 core;
960 	uint8_t			 first_region_type;
961 	bus_size_t		 initial_offset;
962 	u_int			 core_index;
963 	int			 core_unit;
964 	int			 error;
965 
966 	cfg = NULL;
967 	initial_offset = bcma_erom_tell(erom);
968 
969 	/* Parse the next core entry */
970 	if ((error = bcma_erom_parse_core(erom, &core)))
971 		return (error);
972 
973 	/* Determine the core's index and unit numbers */
974 	bcma_erom_reset(erom);
975 	core_unit = 0;
976 	core_index = 0;
977 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
978 		struct bcma_erom_core prev_core;
979 
980 		/* Parse next core */
981 		if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
982 			return (error);
983 
984 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
985 			return (error);
986 
987 		/* Is earlier unit? */
988 		if (core.vendor == prev_core.vendor &&
989 		    core.device == prev_core.device)
990 		{
991 			core_unit++;
992 		}
993 
994 		/* Seek to next core */
995 		if ((error = erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE)))
996 			return (error);
997 	}
998 
999 	/* We already parsed the core descriptor */
1000 	if ((error = erom_skip_core(erom)))
1001 		return (error);
1002 
1003 	/* Allocate our corecfg */
1004 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1005 	    core.device, core.rev);
1006 	if (cfg == NULL)
1007 		return (ENOMEM);
1008 
1009 	/* These are 5-bit values in the EROM table, and should never be able
1010 	 * to overflow BCMA_PID_MAX. */
1011 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1012 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1013 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1014 	    ("unsupported wport count"));
1015 
1016 	if (bootverbose) {
1017 		EROM_LOG(erom,
1018 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1019 		    core_index,
1020 		    bhnd_vendor_name(core.vendor),
1021 		    bhnd_find_core_name(core.vendor, core.device),
1022 		    core.device, core.rev, core_unit);
1023 	}
1024 
1025 	cfg->num_master_ports = core.num_mport;
1026 	cfg->num_dev_ports = 0;		/* determined below */
1027 	cfg->num_bridge_ports = 0;	/* determined blow */
1028 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1029 
1030 	/* Parse Master Port Descriptors */
1031 	for (uint8_t i = 0; i < core.num_mport; i++) {
1032 		struct bcma_mport	*mport;
1033 		struct bcma_erom_mport	 mpd;
1034 
1035 		/* Parse the master port descriptor */
1036 		error = bcma_erom_parse_mport(erom, &mpd);
1037 		if (error)
1038 			goto failed;
1039 
1040 		/* Initialize a new bus mport structure */
1041 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1042 		if (mport == NULL) {
1043 			error = ENOMEM;
1044 			goto failed;
1045 		}
1046 
1047 		mport->mp_vid = mpd.port_vid;
1048 		mport->mp_num = mpd.port_num;
1049 
1050 		/* Update dinfo */
1051 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1052 	}
1053 
1054 
1055 	/*
1056 	 * Determine whether this is a bridge device; if so, we can
1057 	 * expect the first sequence of address region descriptors to
1058 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1059 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1060 	 *
1061 	 * It's unclear whether this is the correct mechanism by which we
1062 	 * should detect/handle bridge devices, but this approach matches
1063 	 * that of (some of) Broadcom's published drivers.
1064 	 */
1065 	if (core.num_dport > 0) {
1066 		uint32_t entry;
1067 
1068 		if ((error = bcma_erom_peek32(erom, &entry)))
1069 			goto failed;
1070 
1071 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1072 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1073 		{
1074 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1075 			cfg->num_dev_ports = 0;
1076 			cfg->num_bridge_ports = core.num_dport;
1077 		} else {
1078 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1079 			cfg->num_dev_ports = core.num_dport;
1080 			cfg->num_bridge_ports = 0;
1081 		}
1082 	}
1083 
1084 	/* Device/bridge port descriptors */
1085 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1086 		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1087 		    first_region_type);
1088 
1089 		if (error)
1090 			goto failed;
1091 	}
1092 
1093 	/* Wrapper (aka device management) descriptors (for master ports). */
1094 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1095 		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1096 		    BCMA_EROM_REGION_TYPE_MWRAP);
1097 
1098 		if (error)
1099 			goto failed;
1100 	}
1101 
1102 
1103 	/* Wrapper (aka device management) descriptors (for slave ports). */
1104 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1105 		/* Slave wrapper ports are not numbered distinctly from master
1106 		 * wrapper ports. */
1107 
1108 		/*
1109 		 * Broadcom DDR1/DDR2 Memory Controller
1110 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1111 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1112 		 *
1113 		 * ARM BP135 AMBA3 AXI to APB Bridge
1114 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1115 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1116 		 *
1117 		 * core.num_mwrap
1118 		 * ===>
1119 		 * (core.num_mwrap > 0) ?
1120 		 *           core.num_mwrap :
1121 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1122 		 */
1123 		uint8_t sp_num;
1124 		sp_num = (core.num_mwrap > 0) ?
1125 				core.num_mwrap :
1126 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1127 		error = erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1128 		    BCMA_EROM_REGION_TYPE_SWRAP);
1129 
1130 		if (error)
1131 			goto failed;
1132 	}
1133 
1134 	*result = cfg;
1135 	return (0);
1136 
1137 failed:
1138 	if (cfg != NULL)
1139 		bcma_free_corecfg(cfg);
1140 
1141 	return error;
1142 }
1143