xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
5  * Copyright (c) 2017 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Landon Fuller
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19  *    redistribution must be conditioned upon including a substantially
20  *    similar Disclaimer requirement for further binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGES.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/systm.h>
44 
45 #include <machine/bus.h>
46 #include <machine/resource.h>
47 
48 #include <dev/bhnd/bhnd_eromvar.h>
49 
50 #include "bcma_eromreg.h"
51 #include "bcma_eromvar.h"
52 
53 /*
54  * BCMA Enumeration ROM (EROM) Table
55  *
56  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
57  *
58  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
59  * ChipCommon registers. The table itself is comprised of 32-bit
60  * type-tagged entries, organized into an array of variable-length
61  * core descriptor records.
62  *
63  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
64  * marker.
65  */
66 
67 static const char	*bcma_erom_entry_type_name (uint8_t entry);
68 
69 static int		 bcma_erom_read32(struct bcma_erom *erom,
70 			     uint32_t *entry);
71 static int		 bcma_erom_skip32(struct bcma_erom *erom);
72 
73 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
74 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
75 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
76 
77 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
78 			     uint8_t etype);
79 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
80 			     uint8_t region_type, bhnd_port_type *port_type);
81 
82 static int		 bcma_erom_peek32(struct bcma_erom *erom,
83 			     uint32_t *entry);
84 
85 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
86 static void		 bcma_erom_seek(struct bcma_erom *erom,
87 			     bus_size_t offset);
88 static void		 bcma_erom_reset(struct bcma_erom *erom);
89 
90 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
91 			     const struct bhnd_core_match *desc,
92 			     struct bhnd_core_info *core);
93 
94 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
95 			     struct bcma_erom_core *core);
96 
97 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
98 			     struct bcma_erom_mport *mport);
99 
100 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
101 			     struct bcma_erom_sport_region *region);
102 
103 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
104 			     u_int core_idx, int core_unit,
105 			     struct bhnd_core_info *info);
106 
107 /**
108  * BCMA EROM per-instance state.
109  */
110 struct bcma_erom {
111 	struct bhnd_erom	 obj;
112 	device_t	 	 dev;		/**< parent device, or NULL if none. */
113 	struct bhnd_erom_io	*eio;		/**< bus I/O callbacks */
114 	bhnd_size_t	 	 offset;	/**< current read offset */
115 };
116 
117 #define	EROM_LOG(erom, fmt, ...)	do {			\
118 	printf("%s erom[0x%llx]: " fmt, __FUNCTION__,		\
119 	    (unsigned long long)(erom->offset), ##__VA_ARGS__);	\
120 } while(0)
121 
122 /** Return the type name for an EROM entry */
123 static const char *
124 bcma_erom_entry_type_name (uint8_t entry)
125 {
126 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
127 	case BCMA_EROM_ENTRY_TYPE_CORE:
128 		return "core";
129 	case BCMA_EROM_ENTRY_TYPE_MPORT:
130 		return "mport";
131 	case BCMA_EROM_ENTRY_TYPE_REGION:
132 		return "region";
133 	default:
134 		return "unknown";
135 	}
136 }
137 
138 /* BCMA implementation of BHND_EROM_INIT() */
139 static int
140 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
141     struct bhnd_erom_io *eio)
142 {
143 	struct bcma_erom	*sc;
144 	bhnd_addr_t		 table_addr;
145 	int			 error;
146 
147 	sc = (struct bcma_erom *)erom;
148 	sc->eio = eio;
149 	sc->offset = 0;
150 
151 	/* Determine erom table address */
152 	if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
153 		return (ENXIO); /* would overflow */
154 
155 	table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
156 
157 	/* Try to map the erom table */
158 	error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
159 	if (error)
160 		return (error);
161 
162 	return (0);
163 }
164 
165 /* BCMA implementation of BHND_EROM_PROBE() */
166 static int
167 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
168     const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
169 {
170 	int error;
171 
172 	/* Hints aren't supported; all BCMA devices have a ChipCommon
173 	 * core */
174 	if (hint != NULL)
175 		return (EINVAL);
176 
177 	/* Read and parse chip identification */
178 	if ((error = bhnd_erom_read_chipid(eio, cid)))
179 		return (error);
180 
181 	/* Verify chip type */
182 	switch (cid->chip_type) {
183 		case BHND_CHIPTYPE_BCMA:
184 			return (BUS_PROBE_DEFAULT);
185 
186 		case BHND_CHIPTYPE_BCMA_ALT:
187 		case BHND_CHIPTYPE_UBUS:
188 			return (BUS_PROBE_GENERIC);
189 
190 		default:
191 			return (ENXIO);
192 	}
193 }
194 
195 static void
196 bcma_erom_fini(bhnd_erom_t *erom)
197 {
198 	struct bcma_erom *sc = (struct bcma_erom *)erom;
199 
200 	bhnd_erom_io_fini(sc->eio);
201 }
202 
203 static int
204 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
205     struct bhnd_core_info *core)
206 {
207 	struct bcma_erom *sc = (struct bcma_erom *)erom;
208 
209 	/* Search for the first matching core */
210 	return (bcma_erom_seek_matching_core(sc, desc, core));
211 }
212 
213 static int
214 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
215     bhnd_port_type port_type, u_int port_num, u_int region_num,
216     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
217 {
218 	struct bcma_erom	*sc;
219 	struct bcma_erom_core	 ec;
220 	uint32_t		 entry;
221 	uint8_t			 region_port, region_type;
222 	bool			 found;
223 	int			 error;
224 
225 	sc = (struct bcma_erom *)erom;
226 
227 	/* Seek to the first matching core and provide the core info
228 	 * to the caller */
229 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
230 		return (error);
231 
232 	if ((error = bcma_erom_parse_core(sc, &ec)))
233 		return (error);
234 
235 	/* Skip master ports */
236 	for (u_long i = 0; i < ec.num_mport; i++) {
237 		if ((error = bcma_erom_skip_mport(sc)))
238 			return (error);
239 	}
240 
241 	/* Seek to the region block for the given port type */
242 	found = false;
243 	while (1) {
244 		bhnd_port_type	p_type;
245 		uint8_t		r_type;
246 
247 		if ((error = bcma_erom_peek32(sc, &entry)))
248 			return (error);
249 
250 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
251 			return (ENOENT);
252 
253 		/* Expected region type? */
254 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
255 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
256 		if (error)
257 			return (error);
258 
259 		if (p_type == port_type) {
260 			found = true;
261 			break;
262 		}
263 
264 		/* Skip to next entry */
265 		if ((error = bcma_erom_skip_sport_region(sc)))
266 			return (error);
267 	}
268 
269 	if (!found)
270 		return (ENOENT);
271 
272 	/* Found the appropriate port type block; now find the region records
273 	 * for the given port number */
274 	found = false;
275 	for (u_int i = 0; i <= port_num; i++) {
276 		bhnd_port_type	p_type;
277 
278 		if ((error = bcma_erom_peek32(sc, &entry)))
279 			return (error);
280 
281 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
282 			return (ENOENT);
283 
284 		/* Fetch the type/port of the first region entry */
285 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
286 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
287 
288 		/* Have we found the region entries for the desired port? */
289 		if (i == port_num) {
290 			error = bcma_erom_region_to_port_type(sc, region_type,
291 			    &p_type);
292 			if (error)
293 				return (error);
294 
295 			if (p_type == port_type)
296 				found = true;
297 
298 			break;
299 		}
300 
301 		/* Otherwise, seek to next block of region records */
302 		while (1) {
303 			uint8_t	next_type, next_port;
304 
305 			if ((error = bcma_erom_skip_sport_region(sc)))
306 				return (error);
307 
308 			if ((error = bcma_erom_peek32(sc, &entry)))
309 				return (error);
310 
311 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
312 				return (ENOENT);
313 
314 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
315 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
316 
317 			if (next_type != region_type ||
318 			    next_port != region_port)
319 				break;
320 		}
321 	}
322 
323 	if (!found)
324 		return (ENOENT);
325 
326 	/* Finally, search for the requested region number */
327 	for (u_int i = 0; i <= region_num; i++) {
328 		struct bcma_erom_sport_region	region;
329 		uint8_t				next_port, next_type;
330 
331 		if ((error = bcma_erom_peek32(sc, &entry)))
332 			return (error);
333 
334 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
335 			return (ENOENT);
336 
337 		/* Check for the end of the region block */
338 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
339 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
340 
341 		if (next_type != region_type ||
342 		    next_port != region_port)
343 			break;
344 
345 		/* Parse the region */
346 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
347 			return (error);
348 
349 		/* Is this our target region_num? */
350 		if (i == region_num) {
351 			/* Found */
352 			*addr = region.base_addr;
353 			*size = region.size;
354 			return (0);
355 		}
356 	}
357 
358 	/* Not found */
359 	return (ENOENT);
360 };
361 
362 static int
363 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
364     u_int *num_cores)
365 {
366 	struct bcma_erom	*sc;
367 	struct bhnd_core_info	*buffer;
368 	bus_size_t		 initial_offset;
369 	u_int			 count;
370 	int			 error;
371 
372 	sc = (struct bcma_erom *)erom;
373 
374 	buffer = NULL;
375 	initial_offset = bcma_erom_tell(sc);
376 
377 	/* Determine the core count */
378 	bcma_erom_reset(sc);
379 	for (count = 0, error = 0; !error; count++) {
380 		struct bcma_erom_core core;
381 
382 		/* Seek to the first readable core entry */
383 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
384 		if (error == ENOENT)
385 			break;
386 		else if (error)
387 			goto cleanup;
388 
389 		/* Read past the core descriptor */
390 		if ((error = bcma_erom_parse_core(sc, &core)))
391 			goto cleanup;
392 	}
393 
394 	/* Allocate our output buffer */
395 	buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND,
396 	    M_NOWAIT);
397 	if (buffer == NULL) {
398 		error = ENOMEM;
399 		goto cleanup;
400 	}
401 
402 	/* Parse all core descriptors */
403 	bcma_erom_reset(sc);
404 	for (u_int i = 0; i < count; i++) {
405 		struct bcma_erom_core	core;
406 		int			unit;
407 
408 		/* Parse the core */
409 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
410 		if (error)
411 			goto cleanup;
412 
413 		error = bcma_erom_parse_core(sc, &core);
414 		if (error)
415 			goto cleanup;
416 
417 		/* Determine the unit number */
418 		unit = 0;
419 		for (u_int j = 0; j < i; j++) {
420 			if (buffer[i].vendor == buffer[j].vendor &&
421 			    buffer[i].device == buffer[j].device)
422 				unit++;
423 		}
424 
425 		/* Convert to a bhnd info record */
426 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
427 	}
428 
429 cleanup:
430 	if (!error) {
431 		*cores = buffer;
432 		*num_cores = count;
433 	} else {
434 		if (buffer != NULL)
435 			free(buffer, M_BHND);
436 	}
437 
438 	/* Restore the initial position */
439 	bcma_erom_seek(sc, initial_offset);
440 	return (error);
441 }
442 
443 static void
444 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
445 {
446 	free(cores, M_BHND);
447 }
448 
449 /**
450  * Return the current read position.
451  */
452 static bus_size_t
453 bcma_erom_tell(struct bcma_erom *erom)
454 {
455 	return (erom->offset);
456 }
457 
458 /**
459  * Seek to an absolute read position.
460  */
461 static void
462 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
463 {
464 	erom->offset = offset;
465 }
466 
467 /**
468  * Read a 32-bit entry value from the EROM table without advancing the
469  * read position.
470  *
471  * @param erom EROM read state.
472  * @param entry Will contain the read result on success.
473  * @retval 0 success
474  * @retval ENOENT The end of the EROM table was reached.
475  * @retval non-zero The read could not be completed.
476  */
477 static int
478 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
479 {
480 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
481 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
482 		return (EINVAL);
483 	}
484 
485 	*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
486 	return (0);
487 }
488 
489 /**
490  * Read a 32-bit entry value from the EROM table.
491  *
492  * @param erom EROM read state.
493  * @param entry Will contain the read result on success.
494  * @retval 0 success
495  * @retval ENOENT The end of the EROM table was reached.
496  * @retval non-zero The read could not be completed.
497  */
498 static int
499 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
500 {
501 	int error;
502 
503 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
504 		erom->offset += 4;
505 
506 	return (error);
507 }
508 
509 /**
510  * Read and discard 32-bit entry value from the EROM table.
511  *
512  * @param erom EROM read state.
513  * @retval 0 success
514  * @retval ENOENT The end of the EROM table was reached.
515  * @retval non-zero The read could not be completed.
516  */
517 static int
518 bcma_erom_skip32(struct bcma_erom *erom)
519 {
520 	uint32_t	entry;
521 
522 	return bcma_erom_read32(erom, &entry);
523 }
524 
525 /**
526  * Read and discard a core descriptor from the EROM table.
527  *
528  * @param erom EROM read state.
529  * @retval 0 success
530  * @retval ENOENT The end of the EROM table was reached.
531  * @retval non-zero The read could not be completed.
532  */
533 static int
534 bcma_erom_skip_core(struct bcma_erom *erom)
535 {
536 	struct bcma_erom_core core;
537 	return (bcma_erom_parse_core(erom, &core));
538 }
539 
540 /**
541  * Read and discard a master port descriptor from the EROM table.
542  *
543  * @param erom EROM read state.
544  * @retval 0 success
545  * @retval ENOENT The end of the EROM table was reached.
546  * @retval non-zero The read could not be completed.
547  */
548 static int
549 bcma_erom_skip_mport(struct bcma_erom *erom)
550 {
551 	struct bcma_erom_mport mp;
552 	return (bcma_erom_parse_mport(erom, &mp));
553 }
554 
555 /**
556  * Read and discard a port region descriptor from the EROM table.
557  *
558  * @param erom EROM read state.
559  * @retval 0 success
560  * @retval ENOENT The end of the EROM table was reached.
561  * @retval non-zero The read could not be completed.
562  */
563 static int
564 bcma_erom_skip_sport_region(struct bcma_erom *erom)
565 {
566 	struct bcma_erom_sport_region r;
567 	return (bcma_erom_parse_sport_region(erom, &r));
568 }
569 
570 /**
571  * Seek to the next entry matching the given EROM entry type.
572  *
573  * @param erom EROM read state.
574  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
575  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
576  * @retval 0 success
577  * @retval ENOENT The end of the EROM table was reached.
578  * @retval non-zero Reading or parsing the descriptor failed.
579  */
580 static int
581 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
582 {
583 	uint32_t			entry;
584 	int				error;
585 
586 	/* Iterate until we hit an entry matching the requested type. */
587 	while (!(error = bcma_erom_peek32(erom, &entry))) {
588 		/* Handle EOF */
589 		if (entry == BCMA_EROM_TABLE_EOF)
590 			return (ENOENT);
591 
592 		/* Invalid entry */
593 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
594 			return (EINVAL);
595 
596 		/* Entry type matches? */
597 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
598 			return (0);
599 
600 		/* Skip non-matching entry types. */
601 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
602 		case BCMA_EROM_ENTRY_TYPE_CORE:
603 			if ((error = bcma_erom_skip_core(erom)))
604 				return (error);
605 
606 			break;
607 
608 		case BCMA_EROM_ENTRY_TYPE_MPORT:
609 			if ((error = bcma_erom_skip_mport(erom)))
610 				return (error);
611 
612 			break;
613 
614 		case BCMA_EROM_ENTRY_TYPE_REGION:
615 			if ((error = bcma_erom_skip_sport_region(erom)))
616 				return (error);
617 			break;
618 
619 		default:
620 			/* Unknown entry type! */
621 			return (EINVAL);
622 		}
623 	}
624 
625 	return (error);
626 }
627 
628 /**
629  * Return the read position to the start of the EROM table.
630  *
631  * @param erom EROM read state.
632  */
633 static void
634 bcma_erom_reset(struct bcma_erom *erom)
635 {
636 	erom->offset = 0;
637 }
638 
639 /**
640  * Seek to the first core entry matching @p desc.
641  *
642  * @param erom EROM read state.
643  * @param desc The core match descriptor.
644  * @param[out] core On success, the matching core info. If the core info
645  * is not desired, a NULL pointer may be provided.
646  * @retval 0 success
647  * @retval ENOENT The end of the EROM table was reached before @p index was
648  * found.
649  * @retval non-zero Reading or parsing failed.
650  */
651 static int
652 bcma_erom_seek_matching_core(struct bcma_erom *sc,
653     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
654 {
655 	struct bhnd_core_match	 imatch;
656 	bus_size_t		 core_offset, next_offset;
657 	int			 error;
658 
659 	/* Seek to table start. */
660 	bcma_erom_reset(sc);
661 
662 	/* We can't determine a core's unit number during the initial scan. */
663 	imatch = *desc;
664 	imatch.m.match.core_unit = 0;
665 
666 	/* Locate the first matching core */
667 	for (u_int i = 0; i < UINT_MAX; i++) {
668 		struct bcma_erom_core	ec;
669 		struct bhnd_core_info	ci;
670 
671 		/* Seek to the next core */
672 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
673 		if (error)
674 			return (error);
675 
676 		/* Save the core offset */
677 		core_offset = bcma_erom_tell(sc);
678 
679 		/* Parse the core */
680 		if ((error = bcma_erom_parse_core(sc, &ec)))
681 			return (error);
682 
683 		bcma_erom_to_core_info(&ec, i, 0, &ci);
684 
685 		/* Check for initial match */
686 		if (!bhnd_core_matches(&ci, &imatch))
687 			continue;
688 
689 		/* Re-scan preceding cores to determine the unit number. */
690 		next_offset = bcma_erom_tell(sc);
691 		bcma_erom_reset(sc);
692 		for (u_int j = 0; j < i; j++) {
693 			/* Parse the core */
694 			error = bcma_erom_seek_next(sc,
695 			    BCMA_EROM_ENTRY_TYPE_CORE);
696 			if (error)
697 				return (error);
698 
699 			if ((error = bcma_erom_parse_core(sc, &ec)))
700 				return (error);
701 
702 			/* Bump the unit number? */
703 			if (ec.vendor == ci.vendor && ec.device == ci.device)
704 				ci.unit++;
705 		}
706 
707 		/* Check for full match against now-valid unit number */
708 		if (!bhnd_core_matches(&ci, desc)) {
709 			/* Reposition to allow reading the next core */
710 			bcma_erom_seek(sc, next_offset);
711 			continue;
712 		}
713 
714 		/* Found; seek to the core's initial offset and provide
715 		 * the core info to the caller */
716 		bcma_erom_seek(sc, core_offset);
717 		if (core != NULL)
718 			*core = ci;
719 
720 		return (0);
721 	}
722 
723 	/* Not found, or a parse error occured */
724 	return (error);
725 }
726 
727 /**
728  * Read the next core descriptor from the EROM table.
729  *
730  * @param erom EROM read state.
731  * @param[out] core On success, will be populated with the parsed core
732  * descriptor data.
733  * @retval 0 success
734  * @retval ENOENT The end of the EROM table was reached.
735  * @retval non-zero Reading or parsing the core descriptor failed.
736  */
737 static int
738 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
739 {
740 	uint32_t	entry;
741 	int		error;
742 
743 	/* Parse CoreDescA */
744 	if ((error = bcma_erom_read32(erom, &entry)))
745 		return (error);
746 
747 	/* Handle EOF */
748 	if (entry == BCMA_EROM_TABLE_EOF)
749 		return (ENOENT);
750 
751 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
752 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
753                    entry, bcma_erom_entry_type_name(entry));
754 
755 		return (EINVAL);
756 	}
757 
758 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
759 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
760 
761 	/* Parse CoreDescB */
762 	if ((error = bcma_erom_read32(erom, &entry)))
763 		return (error);
764 
765 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
766 		return (EINVAL);
767 	}
768 
769 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
770 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
771 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
772 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
773 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
774 
775 	return (0);
776 }
777 
778 /**
779  * Read the next master port descriptor from the EROM table.
780  *
781  * @param erom EROM read state.
782  * @param[out] mport On success, will be populated with the parsed
783  * descriptor data.
784  * @retval 0 success
785  * @retval non-zero Reading or parsing the descriptor failed.
786  */
787 static int
788 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
789 {
790 	uint32_t	entry;
791 	int		error;
792 
793 	/* Parse the master port descriptor */
794 	if ((error = bcma_erom_read32(erom, &entry)))
795 		return (error);
796 
797 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
798 		return (EINVAL);
799 
800 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
801 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
802 
803 	return (0);
804 }
805 
806 /**
807  * Read the next slave port region descriptor from the EROM table.
808  *
809  * @param erom EROM read state.
810  * @param[out] mport On success, will be populated with the parsed
811  * descriptor data.
812  * @retval 0 success
813  * @retval ENOENT The end of the region descriptor table was reached.
814  * @retval non-zero Reading or parsing the descriptor failed.
815  */
816 static int
817 bcma_erom_parse_sport_region(struct bcma_erom *erom,
818     struct bcma_erom_sport_region *region)
819 {
820 	uint32_t	entry;
821 	uint8_t		size_type;
822 	int		error;
823 
824 	/* Peek at the region descriptor */
825 	if (bcma_erom_peek32(erom, &entry))
826 		return (EINVAL);
827 
828 	/* A non-region entry signals the end of the region table */
829 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
830 		return (ENOENT);
831 	} else {
832 		bcma_erom_skip32(erom);
833 	}
834 
835 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
836 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
837 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
838 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
839 
840 	/* If region address is 64-bit, fetch the high bits. */
841 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
842 		if ((error = bcma_erom_read32(erom, &entry)))
843 			return (error);
844 
845 		region->base_addr |= ((bhnd_addr_t) entry << 32);
846 	}
847 
848 	/* Parse the region size; it's either encoded as the binary logarithm
849 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
850 	 * 32-bit/64-bit literal value directly following the current entry. */
851 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
852 		if ((error = bcma_erom_read32(erom, &entry)))
853 			return (error);
854 
855 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
856 
857 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
858 			if ((error = bcma_erom_read32(erom, &entry)))
859 				return (error);
860 			region->size |= ((bhnd_size_t) entry << 32);
861 		}
862 	} else {
863 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
864 	}
865 
866 	/* Verify that addr+size does not overflow. */
867 	if (region->size != 0 &&
868 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
869 	{
870 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
871 		    bcma_erom_entry_type_name(region->region_type),
872 		    region->region_port,
873 		    (unsigned long long) region->base_addr,
874 		    (unsigned long long) region->size);
875 
876 		return (EINVAL);
877 	}
878 
879 	return (0);
880 }
881 
882 /**
883  * Convert a bcma_erom_core record to its bhnd_core_info representation.
884  *
885  * @param core EROM core record to convert.
886  * @param core_idx The core index of @p core.
887  * @param core_unit The core unit of @p core.
888  * @param[out] info The populated bhnd_core_info representation.
889  */
890 static void
891 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
892     int core_unit, struct bhnd_core_info *info)
893 {
894 	info->vendor = core->vendor;
895 	info->device = core->device;
896 	info->hwrev = core->rev;
897 	info->core_idx = core_idx;
898 	info->unit = core_unit;
899 }
900 
901 /**
902  * Map an EROM region type to its corresponding port type.
903  *
904  * @param region_type Region type value.
905  * @param[out] port_type On success, the corresponding port type.
906  */
907 static int
908 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
909     bhnd_port_type *port_type)
910 {
911 	switch (region_type) {
912 	case BCMA_EROM_REGION_TYPE_DEVICE:
913 		*port_type = BHND_PORT_DEVICE;
914 		return (0);
915 	case BCMA_EROM_REGION_TYPE_BRIDGE:
916 		*port_type = BHND_PORT_BRIDGE;
917 		return (0);
918 	case BCMA_EROM_REGION_TYPE_MWRAP:
919 	case BCMA_EROM_REGION_TYPE_SWRAP:
920 		*port_type = BHND_PORT_AGENT;
921 		return (0);
922 	default:
923 		EROM_LOG(erom, "unsupported region type %hhx\n",
924 			region_type);
925 		return (EINVAL);
926 	}
927 }
928 
929 /**
930  * Register all MMIO region descriptors for the given slave port.
931  *
932  * @param erom EROM read state.
933  * @param corecfg Core info to be populated with the scanned port regions.
934  * @param port_num Port index for which regions will be parsed.
935  * @param region_type The region type to be parsed.
936  * @param[out] offset The offset at which to perform parsing. On success, this
937  * will be updated to point to the next EROM table entry.
938  */
939 static int
940 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
941     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
942     uint8_t region_type)
943 {
944 	struct bcma_sport	*sport;
945 	struct bcma_sport_list	*sports;
946 	bus_size_t		 entry_offset;
947 	int			 error;
948 	bhnd_port_type		 port_type;
949 
950 	error = 0;
951 
952 	/* Determine the port type for this region type. */
953 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
954 	if (error)
955 		return (error);
956 
957 	/* Fetch the list to be populated */
958 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
959 
960 	/* Allocate a new port descriptor */
961 	sport = bcma_alloc_sport(port_num, port_type);
962 	if (sport == NULL)
963 		return (ENOMEM);
964 
965 	/* Read all address regions defined for this port */
966 	for (bcma_rmid_t region_num = 0;; region_num++) {
967 		struct bcma_map			*map;
968 		struct bcma_erom_sport_region	 spr;
969 
970 		/* No valid port definition should come anywhere near
971 		 * BCMA_RMID_MAX. */
972 		if (region_num == BCMA_RMID_MAX) {
973 			EROM_LOG(erom, "core%u %s%u: region count reached "
974 			    "upper limit of %u\n",
975 			    corecfg->core_info.core_idx,
976 			    bhnd_port_type_name(port_type),
977 			    port_num, BCMA_RMID_MAX);
978 
979 			error = EINVAL;
980 			goto cleanup;
981 		}
982 
983 		/* Parse the next region entry. */
984 		entry_offset = bcma_erom_tell(erom);
985 		error = bcma_erom_parse_sport_region(erom, &spr);
986 		if (error && error != ENOENT) {
987 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
988 			    "address region\n",
989 			    corecfg->core_info.core_idx,
990 			    bhnd_port_type_name(port_type),
991 			    port_num, region_num);
992 			goto cleanup;
993 		}
994 
995 		/* ENOENT signals no further region entries */
996 		if (error == ENOENT) {
997 			/* No further entries */
998 			error = 0;
999 			break;
1000 		}
1001 
1002 		/* A region or type mismatch also signals no further region
1003 		 * entries */
1004 		if (spr.region_port != port_num ||
1005 		    spr.region_type != region_type)
1006 		{
1007 			/* We don't want to consume this entry */
1008 			bcma_erom_seek(erom, entry_offset);
1009 
1010 			error = 0;
1011 			goto cleanup;
1012 		}
1013 
1014 		/*
1015 		 * Create the map entry.
1016 		 */
1017 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1018 		if (map == NULL) {
1019 			error = ENOMEM;
1020 			goto cleanup;
1021 		}
1022 
1023 		map->m_region_num = region_num;
1024 		map->m_base = spr.base_addr;
1025 		map->m_size = spr.size;
1026 		map->m_rid = -1;
1027 
1028 		/* Add the region map to the port */
1029 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1030 		sport->sp_num_maps++;
1031 	}
1032 
1033 cleanup:
1034 	/* Append the new port descriptor on success, or deallocate the
1035 	 * partially parsed descriptor on failure. */
1036 	if (error == 0) {
1037 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1038 	} else if (sport != NULL) {
1039 		bcma_free_sport(sport);
1040 	}
1041 
1042 	return error;
1043 }
1044 
1045 /**
1046  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1047  * to be owned by the caller.
1048  *
1049  * @param erom A bcma EROM instance.
1050  * @param[out] result On success, the core's device info. The caller inherits
1051  * ownership of this allocation.
1052  *
1053  * @return If successful, returns 0. If the end of the EROM table is hit,
1054  * ENOENT will be returned. On error, returns a non-zero error value.
1055  */
1056 int
1057 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1058 {
1059 	struct bcma_corecfg	*cfg;
1060 	struct bcma_erom_core	 core;
1061 	uint8_t			 first_region_type;
1062 	bus_size_t		 initial_offset;
1063 	u_int			 core_index;
1064 	int			 core_unit;
1065 	int			 error;
1066 
1067 	cfg = NULL;
1068 	initial_offset = bcma_erom_tell(erom);
1069 
1070 	/* Parse the next core entry */
1071 	if ((error = bcma_erom_parse_core(erom, &core)))
1072 		return (error);
1073 
1074 	/* Determine the core's index and unit numbers */
1075 	bcma_erom_reset(erom);
1076 	core_unit = 0;
1077 	core_index = 0;
1078 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1079 		struct bcma_erom_core prev_core;
1080 
1081 		/* Parse next core */
1082 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1083 		if (error)
1084 			return (error);
1085 
1086 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1087 			return (error);
1088 
1089 		/* Is earlier unit? */
1090 		if (core.vendor == prev_core.vendor &&
1091 		    core.device == prev_core.device)
1092 		{
1093 			core_unit++;
1094 		}
1095 
1096 		/* Seek to next core */
1097 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1098 		if (error)
1099 			return (error);
1100 	}
1101 
1102 	/* We already parsed the core descriptor */
1103 	if ((error = bcma_erom_skip_core(erom)))
1104 		return (error);
1105 
1106 	/* Allocate our corecfg */
1107 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1108 	    core.device, core.rev);
1109 	if (cfg == NULL)
1110 		return (ENOMEM);
1111 
1112 	/* These are 5-bit values in the EROM table, and should never be able
1113 	 * to overflow BCMA_PID_MAX. */
1114 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1115 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1116 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1117 	    ("unsupported wport count"));
1118 
1119 	if (bootverbose) {
1120 		EROM_LOG(erom,
1121 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1122 		    core_index,
1123 		    bhnd_vendor_name(core.vendor),
1124 		    bhnd_find_core_name(core.vendor, core.device),
1125 		    core.device, core.rev, core_unit);
1126 	}
1127 
1128 	cfg->num_master_ports = core.num_mport;
1129 	cfg->num_dev_ports = 0;		/* determined below */
1130 	cfg->num_bridge_ports = 0;	/* determined blow */
1131 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1132 
1133 	/* Parse Master Port Descriptors */
1134 	for (uint8_t i = 0; i < core.num_mport; i++) {
1135 		struct bcma_mport	*mport;
1136 		struct bcma_erom_mport	 mpd;
1137 
1138 		/* Parse the master port descriptor */
1139 		error = bcma_erom_parse_mport(erom, &mpd);
1140 		if (error)
1141 			goto failed;
1142 
1143 		/* Initialize a new bus mport structure */
1144 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1145 		if (mport == NULL) {
1146 			error = ENOMEM;
1147 			goto failed;
1148 		}
1149 
1150 		mport->mp_vid = mpd.port_vid;
1151 		mport->mp_num = mpd.port_num;
1152 
1153 		/* Update dinfo */
1154 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1155 	}
1156 
1157 	/*
1158 	 * Determine whether this is a bridge device; if so, we can
1159 	 * expect the first sequence of address region descriptors to
1160 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1161 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1162 	 *
1163 	 * It's unclear whether this is the correct mechanism by which we
1164 	 * should detect/handle bridge devices, but this approach matches
1165 	 * that of (some of) Broadcom's published drivers.
1166 	 */
1167 	if (core.num_dport > 0) {
1168 		uint32_t entry;
1169 
1170 		if ((error = bcma_erom_peek32(erom, &entry)))
1171 			goto failed;
1172 
1173 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1174 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1175 		{
1176 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1177 			cfg->num_dev_ports = 0;
1178 			cfg->num_bridge_ports = core.num_dport;
1179 		} else {
1180 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1181 			cfg->num_dev_ports = core.num_dport;
1182 			cfg->num_bridge_ports = 0;
1183 		}
1184 	}
1185 
1186 	/* Device/bridge port descriptors */
1187 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1188 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1189 		    first_region_type);
1190 
1191 		if (error)
1192 			goto failed;
1193 	}
1194 
1195 	/* Wrapper (aka device management) descriptors (for master ports). */
1196 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1197 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1198 		    BCMA_EROM_REGION_TYPE_MWRAP);
1199 
1200 		if (error)
1201 			goto failed;
1202 	}
1203 
1204 	/* Wrapper (aka device management) descriptors (for slave ports). */
1205 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1206 		/* Slave wrapper ports are not numbered distinctly from master
1207 		 * wrapper ports. */
1208 
1209 		/*
1210 		 * Broadcom DDR1/DDR2 Memory Controller
1211 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1212 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1213 		 *
1214 		 * ARM BP135 AMBA3 AXI to APB Bridge
1215 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1216 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1217 		 *
1218 		 * core.num_mwrap
1219 		 * ===>
1220 		 * (core.num_mwrap > 0) ?
1221 		 *           core.num_mwrap :
1222 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1223 		 */
1224 		uint8_t sp_num;
1225 		sp_num = (core.num_mwrap > 0) ?
1226 				core.num_mwrap :
1227 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1228 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1229 		    BCMA_EROM_REGION_TYPE_SWRAP);
1230 
1231 		if (error)
1232 			goto failed;
1233 	}
1234 
1235 	/*
1236 	 * Seek to the next core entry (if any), skipping any dangling/invalid
1237 	 * region entries.
1238 	 *
1239 	 * On the BCM4706, the EROM entry for the memory controller core
1240 	 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1241 	 * descriptor.
1242 	 */
1243 	if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1244 		if (error != ENOENT)
1245 			goto failed;
1246 	}
1247 
1248 	*result = cfg;
1249 	return (0);
1250 
1251 failed:
1252 	if (cfg != NULL)
1253 		bcma_free_corecfg(cfg);
1254 
1255 	return error;
1256 }
1257 
1258 static int
1259 bcma_erom_dump(bhnd_erom_t *erom)
1260 {
1261 	struct bcma_erom	*sc;
1262 	uint32_t		entry;
1263 	int			error;
1264 
1265 	sc = (struct bcma_erom *)erom;
1266 
1267 	bcma_erom_reset(sc);
1268 
1269 	while (!(error = bcma_erom_read32(sc, &entry))) {
1270 		/* Handle EOF */
1271 		if (entry == BCMA_EROM_TABLE_EOF) {
1272 			EROM_LOG(sc, "EOF\n");
1273 			return (0);
1274 		}
1275 
1276 		/* Invalid entry */
1277 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1278 			EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1279 			return (EINVAL);
1280 		}
1281 
1282 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1283 		case BCMA_EROM_ENTRY_TYPE_CORE: {
1284 			/* CoreDescA */
1285 			EROM_LOG(sc, "coreA (0x%x)\n", entry);
1286 			EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1287 			    BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1288 			EROM_LOG(sc, "\tid:\t\t0x%x\n",
1289 			    BCMA_EROM_GET_ATTR(entry, COREA_ID));
1290 			EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1291 			    BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1292 
1293 			/* CoreDescB */
1294 			if ((error = bcma_erom_read32(sc, &entry))) {
1295 				EROM_LOG(sc, "error reading CoreDescB: %d\n",
1296 				    error);
1297 				return (error);
1298 			}
1299 
1300 			if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1301 				EROM_LOG(sc, "invalid core descriptor; found "
1302 				    "unexpected entry %#x (type=%s)\n",
1303 				    entry, bcma_erom_entry_type_name(entry));
1304 				return (EINVAL);
1305 			}
1306 
1307 			EROM_LOG(sc, "coreB (0x%x)\n", entry);
1308 			EROM_LOG(sc, "\trev:\t0x%x\n",
1309 			    BCMA_EROM_GET_ATTR(entry, COREB_REV));
1310 			EROM_LOG(sc, "\tnummp:\t0x%x\n",
1311 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1312 			EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1313 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1314 			EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1315 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1316 			EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1317 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1318 
1319 			break;
1320 		}
1321 		case BCMA_EROM_ENTRY_TYPE_MPORT:
1322 			EROM_LOG(sc, "\tmport 0x%x\n", entry);
1323 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1324 			    BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1325 			EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1326 			    BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1327 			break;
1328 
1329 		case BCMA_EROM_ENTRY_TYPE_REGION: {
1330 			bool	addr64;
1331 			uint8_t	size_type;
1332 
1333 			addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1334 			size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1335 
1336 			EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1337 			EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1338 			    addr64 ? "baselo" : "base",
1339 			    BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1340 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1341 			    BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1342 			EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1343 			    BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1344 			EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1345 
1346 			/* Read the base address high bits */
1347 			if (addr64) {
1348 				if ((error = bcma_erom_read32(sc, &entry))) {
1349 					EROM_LOG(sc, "error reading region "
1350 					    "base address high bits %d\n",
1351 					    error);
1352 					return (error);
1353 				}
1354 
1355 				EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1356 			}
1357 
1358 			/* Read extended size descriptor */
1359 			if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1360 				bool size64;
1361 
1362 				if ((error = bcma_erom_read32(sc, &entry))) {
1363 					EROM_LOG(sc, "error reading region "
1364 					    "size descriptor %d\n",
1365 					    error);
1366 					return (error);
1367 				}
1368 
1369 				if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1370 					size64 = true;
1371 				else
1372 					size64 = false;
1373 
1374 				EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1375 				    size64 ? "sizelo" : "size",
1376 				    BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1377 
1378 				if (size64) {
1379 					error = bcma_erom_read32(sc, &entry);
1380 					if (error) {
1381 						EROM_LOG(sc, "error reading "
1382 						    "region size high bits: "
1383 						    "%d\n", error);
1384 						return (error);
1385 					}
1386 
1387 					EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1388 					    entry);
1389 				}
1390 			}
1391 			break;
1392 		}
1393 
1394 		default:
1395 			EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1396 			    entry, bcma_erom_entry_type_name(entry));
1397 			return (EINVAL);
1398 		}
1399 	}
1400 
1401 	if (error == ENOENT)
1402 		EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1403 	else if (error)
1404 		EROM_LOG(sc, "EROM read failed: %d\n", error);
1405 
1406 	return (error);
1407 }
1408 
1409 static kobj_method_t bcma_erom_methods[] = {
1410 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1411 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1412 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1413 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1414 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1415 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1416 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1417 	KOBJMETHOD(bhnd_erom_dump,		bcma_erom_dump),
1418 
1419 	KOBJMETHOD_END
1420 };
1421 
1422 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1423