xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision e4456411a8c2d4a9bfbccd60f2cf914fd402f817)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
5  * Copyright (c) 2017 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Landon Fuller
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19  *    redistribution must be conditioned upon including a substantially
20  *    similar Disclaimer requirement for further binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGES.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/systm.h>
44 
45 #include <machine/bus.h>
46 #include <machine/resource.h>
47 
48 #include <dev/bhnd/bhnd_eromvar.h>
49 
50 #include "bcma_eromreg.h"
51 #include "bcma_eromvar.h"
52 
53 /*
54  * BCMA Enumeration ROM (EROM) Table
55  *
56  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
57  *
58  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
59  * ChipCommon registers. The table itself is comprised of 32-bit
60  * type-tagged entries, organized into an array of variable-length
61  * core descriptor records.
62  *
63  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
64  * marker.
65  */
66 
67 static const char	*bcma_erom_entry_type_name (uint8_t entry);
68 
69 static int		 bcma_erom_read32(struct bcma_erom *erom,
70 			     uint32_t *entry);
71 static int		 bcma_erom_skip32(struct bcma_erom *erom);
72 
73 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
74 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
75 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
76 
77 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
78 			     uint8_t etype);
79 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
80 			     uint8_t region_type, bhnd_port_type *port_type);
81 
82 
83 static int		 bcma_erom_peek32(struct bcma_erom *erom,
84 			     uint32_t *entry);
85 
86 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
87 static void		 bcma_erom_seek(struct bcma_erom *erom,
88 			     bus_size_t offset);
89 static void		 bcma_erom_reset(struct bcma_erom *erom);
90 
91 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
92 			     const struct bhnd_core_match *desc,
93 			     struct bhnd_core_info *core);
94 
95 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
96 			     struct bcma_erom_core *core);
97 
98 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
99 			     struct bcma_erom_mport *mport);
100 
101 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
102 			     struct bcma_erom_sport_region *region);
103 
104 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
105 			     u_int core_idx, int core_unit,
106 			     struct bhnd_core_info *info);
107 
108 /**
109  * BCMA EROM per-instance state.
110  */
111 struct bcma_erom {
112 	struct bhnd_erom	 obj;
113 	device_t	 	 dev;		/**< parent device, or NULL if none. */
114 	struct bhnd_erom_io	*eio;		/**< bus I/O callbacks */
115 	bhnd_size_t	 	 offset;	/**< current read offset */
116 };
117 
118 #define	EROM_LOG(erom, fmt, ...)	do {			\
119 	printf("%s erom[0x%llx]: " fmt, __FUNCTION__,		\
120 	    (unsigned long long)(erom->offset), ##__VA_ARGS__);	\
121 } while(0)
122 
123 /** Return the type name for an EROM entry */
124 static const char *
125 bcma_erom_entry_type_name (uint8_t entry)
126 {
127 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
128 	case BCMA_EROM_ENTRY_TYPE_CORE:
129 		return "core";
130 	case BCMA_EROM_ENTRY_TYPE_MPORT:
131 		return "mport";
132 	case BCMA_EROM_ENTRY_TYPE_REGION:
133 		return "region";
134 	default:
135 		return "unknown";
136 	}
137 }
138 
139 /* BCMA implementation of BHND_EROM_INIT() */
140 static int
141 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
142     struct bhnd_erom_io *eio)
143 {
144 	struct bcma_erom	*sc;
145 	bhnd_addr_t		 table_addr;
146 	int			 error;
147 
148 	sc = (struct bcma_erom *)erom;
149 	sc->eio = eio;
150 	sc->offset = 0;
151 
152 	/* Determine erom table address */
153 	if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
154 		return (ENXIO); /* would overflow */
155 
156 	table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
157 
158 	/* Try to map the erom table */
159 	error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
160 	if (error)
161 		return (error);
162 
163 	return (0);
164 }
165 
166 /* BCMA implementation of BHND_EROM_PROBE() */
167 static int
168 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
169     const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
170 {
171 	int error;
172 
173 	/* Hints aren't supported; all BCMA devices have a ChipCommon
174 	 * core */
175 	if (hint != NULL)
176 		return (EINVAL);
177 
178 	/* Read and parse chip identification */
179 	if ((error = bhnd_erom_read_chipid(eio, cid)))
180 		return (error);
181 
182 	/* Verify chip type */
183 	switch (cid->chip_type) {
184 		case BHND_CHIPTYPE_BCMA:
185 			return (BUS_PROBE_DEFAULT);
186 
187 		case BHND_CHIPTYPE_BCMA_ALT:
188 		case BHND_CHIPTYPE_UBUS:
189 			return (BUS_PROBE_GENERIC);
190 
191 		default:
192 			return (ENXIO);
193 	}
194 }
195 
196 static void
197 bcma_erom_fini(bhnd_erom_t *erom)
198 {
199 	struct bcma_erom *sc = (struct bcma_erom *)erom;
200 
201 	bhnd_erom_io_fini(sc->eio);
202 }
203 
204 static int
205 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
206     struct bhnd_core_info *core)
207 {
208 	struct bcma_erom *sc = (struct bcma_erom *)erom;
209 
210 	/* Search for the first matching core */
211 	return (bcma_erom_seek_matching_core(sc, desc, core));
212 }
213 
214 static int
215 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
216     bhnd_port_type port_type, u_int port_num, u_int region_num,
217     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
218 {
219 	struct bcma_erom	*sc;
220 	struct bcma_erom_core	 ec;
221 	uint32_t		 entry;
222 	uint8_t			 region_port, region_type;
223 	bool			 found;
224 	int			 error;
225 
226 	sc = (struct bcma_erom *)erom;
227 
228 	/* Seek to the first matching core and provide the core info
229 	 * to the caller */
230 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
231 		return (error);
232 
233 	if ((error = bcma_erom_parse_core(sc, &ec)))
234 		return (error);
235 
236 	/* Skip master ports */
237 	for (u_long i = 0; i < ec.num_mport; i++) {
238 		if ((error = bcma_erom_skip_mport(sc)))
239 			return (error);
240 	}
241 
242 	/* Seek to the region block for the given port type */
243 	found = false;
244 	while (1) {
245 		bhnd_port_type	p_type;
246 		uint8_t		r_type;
247 
248 		if ((error = bcma_erom_peek32(sc, &entry)))
249 			return (error);
250 
251 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
252 			return (ENOENT);
253 
254 		/* Expected region type? */
255 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
256 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
257 		if (error)
258 			return (error);
259 
260 		if (p_type == port_type) {
261 			found = true;
262 			break;
263 		}
264 
265 		/* Skip to next entry */
266 		if ((error = bcma_erom_skip_sport_region(sc)))
267 			return (error);
268 	}
269 
270 	if (!found)
271 		return (ENOENT);
272 
273 	/* Found the appropriate port type block; now find the region records
274 	 * for the given port number */
275 	found = false;
276 	for (u_int i = 0; i <= port_num; i++) {
277 		bhnd_port_type	p_type;
278 
279 		if ((error = bcma_erom_peek32(sc, &entry)))
280 			return (error);
281 
282 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
283 			return (ENOENT);
284 
285 		/* Fetch the type/port of the first region entry */
286 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
287 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
288 
289 		/* Have we found the region entries for the desired port? */
290 		if (i == port_num) {
291 			error = bcma_erom_region_to_port_type(sc, region_type,
292 			    &p_type);
293 			if (error)
294 				return (error);
295 
296 			if (p_type == port_type)
297 				found = true;
298 
299 			break;
300 		}
301 
302 		/* Otherwise, seek to next block of region records */
303 		while (1) {
304 			uint8_t	next_type, next_port;
305 
306 			if ((error = bcma_erom_skip_sport_region(sc)))
307 				return (error);
308 
309 			if ((error = bcma_erom_peek32(sc, &entry)))
310 				return (error);
311 
312 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
313 				return (ENOENT);
314 
315 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
316 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
317 
318 			if (next_type != region_type ||
319 			    next_port != region_port)
320 				break;
321 		}
322 	}
323 
324 	if (!found)
325 		return (ENOENT);
326 
327 	/* Finally, search for the requested region number */
328 	for (u_int i = 0; i <= region_num; i++) {
329 		struct bcma_erom_sport_region	region;
330 		uint8_t				next_port, next_type;
331 
332 		if ((error = bcma_erom_peek32(sc, &entry)))
333 			return (error);
334 
335 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
336 			return (ENOENT);
337 
338 		/* Check for the end of the region block */
339 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
340 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
341 
342 		if (next_type != region_type ||
343 		    next_port != region_port)
344 			break;
345 
346 		/* Parse the region */
347 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
348 			return (error);
349 
350 		/* Is this our target region_num? */
351 		if (i == region_num) {
352 			/* Found */
353 			*addr = region.base_addr;
354 			*size = region.size;
355 			return (0);
356 		}
357 	}
358 
359 	/* Not found */
360 	return (ENOENT);
361 };
362 
363 static int
364 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
365     u_int *num_cores)
366 {
367 	struct bcma_erom	*sc;
368 	struct bhnd_core_info	*buffer;
369 	bus_size_t		 initial_offset;
370 	u_int			 count;
371 	int			 error;
372 
373 	sc = (struct bcma_erom *)erom;
374 
375 	buffer = NULL;
376 	initial_offset = bcma_erom_tell(sc);
377 
378 	/* Determine the core count */
379 	bcma_erom_reset(sc);
380 	for (count = 0, error = 0; !error; count++) {
381 		struct bcma_erom_core core;
382 
383 		/* Seek to the first readable core entry */
384 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
385 		if (error == ENOENT)
386 			break;
387 		else if (error)
388 			goto cleanup;
389 
390 		/* Read past the core descriptor */
391 		if ((error = bcma_erom_parse_core(sc, &core)))
392 			goto cleanup;
393 	}
394 
395 	/* Allocate our output buffer */
396 	buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND,
397 	    M_NOWAIT);
398 	if (buffer == NULL) {
399 		error = ENOMEM;
400 		goto cleanup;
401 	}
402 
403 	/* Parse all core descriptors */
404 	bcma_erom_reset(sc);
405 	for (u_int i = 0; i < count; i++) {
406 		struct bcma_erom_core	core;
407 		int			unit;
408 
409 		/* Parse the core */
410 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
411 		if (error)
412 			goto cleanup;
413 
414 		error = bcma_erom_parse_core(sc, &core);
415 		if (error)
416 			goto cleanup;
417 
418 		/* Determine the unit number */
419 		unit = 0;
420 		for (u_int j = 0; j < i; j++) {
421 			if (buffer[i].vendor == buffer[j].vendor &&
422 			    buffer[i].device == buffer[j].device)
423 				unit++;
424 		}
425 
426 		/* Convert to a bhnd info record */
427 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
428 	}
429 
430 cleanup:
431 	if (!error) {
432 		*cores = buffer;
433 		*num_cores = count;
434 	} else {
435 		if (buffer != NULL)
436 			free(buffer, M_BHND);
437 	}
438 
439 	/* Restore the initial position */
440 	bcma_erom_seek(sc, initial_offset);
441 	return (error);
442 }
443 
444 static void
445 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
446 {
447 	free(cores, M_BHND);
448 }
449 
450 /**
451  * Return the current read position.
452  */
453 static bus_size_t
454 bcma_erom_tell(struct bcma_erom *erom)
455 {
456 	return (erom->offset);
457 }
458 
459 /**
460  * Seek to an absolute read position.
461  */
462 static void
463 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
464 {
465 	erom->offset = offset;
466 }
467 
468 /**
469  * Read a 32-bit entry value from the EROM table without advancing the
470  * read position.
471  *
472  * @param erom EROM read state.
473  * @param entry Will contain the read result on success.
474  * @retval 0 success
475  * @retval ENOENT The end of the EROM table was reached.
476  * @retval non-zero The read could not be completed.
477  */
478 static int
479 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
480 {
481 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
482 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
483 		return (EINVAL);
484 	}
485 
486 	*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
487 	return (0);
488 }
489 
490 /**
491  * Read a 32-bit entry value from the EROM table.
492  *
493  * @param erom EROM read state.
494  * @param entry Will contain the read result on success.
495  * @retval 0 success
496  * @retval ENOENT The end of the EROM table was reached.
497  * @retval non-zero The read could not be completed.
498  */
499 static int
500 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
501 {
502 	int error;
503 
504 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
505 		erom->offset += 4;
506 
507 	return (error);
508 }
509 
510 /**
511  * Read and discard 32-bit entry value from the EROM table.
512  *
513  * @param erom EROM read state.
514  * @retval 0 success
515  * @retval ENOENT The end of the EROM table was reached.
516  * @retval non-zero The read could not be completed.
517  */
518 static int
519 bcma_erom_skip32(struct bcma_erom *erom)
520 {
521 	uint32_t	entry;
522 
523 	return bcma_erom_read32(erom, &entry);
524 }
525 
526 /**
527  * Read and discard a core descriptor from the EROM table.
528  *
529  * @param erom EROM read state.
530  * @retval 0 success
531  * @retval ENOENT The end of the EROM table was reached.
532  * @retval non-zero The read could not be completed.
533  */
534 static int
535 bcma_erom_skip_core(struct bcma_erom *erom)
536 {
537 	struct bcma_erom_core core;
538 	return (bcma_erom_parse_core(erom, &core));
539 }
540 
541 /**
542  * Read and discard a master port descriptor from the EROM table.
543  *
544  * @param erom EROM read state.
545  * @retval 0 success
546  * @retval ENOENT The end of the EROM table was reached.
547  * @retval non-zero The read could not be completed.
548  */
549 static int
550 bcma_erom_skip_mport(struct bcma_erom *erom)
551 {
552 	struct bcma_erom_mport mp;
553 	return (bcma_erom_parse_mport(erom, &mp));
554 }
555 
556 /**
557  * Read and discard a port region descriptor from the EROM table.
558  *
559  * @param erom EROM read state.
560  * @retval 0 success
561  * @retval ENOENT The end of the EROM table was reached.
562  * @retval non-zero The read could not be completed.
563  */
564 static int
565 bcma_erom_skip_sport_region(struct bcma_erom *erom)
566 {
567 	struct bcma_erom_sport_region r;
568 	return (bcma_erom_parse_sport_region(erom, &r));
569 }
570 
571 /**
572  * Seek to the next entry matching the given EROM entry type.
573  *
574  * @param erom EROM read state.
575  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
576  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
577  * @retval 0 success
578  * @retval ENOENT The end of the EROM table was reached.
579  * @retval non-zero Reading or parsing the descriptor failed.
580  */
581 static int
582 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
583 {
584 	uint32_t			entry;
585 	int				error;
586 
587 	/* Iterate until we hit an entry matching the requested type. */
588 	while (!(error = bcma_erom_peek32(erom, &entry))) {
589 		/* Handle EOF */
590 		if (entry == BCMA_EROM_TABLE_EOF)
591 			return (ENOENT);
592 
593 		/* Invalid entry */
594 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
595 			return (EINVAL);
596 
597 		/* Entry type matches? */
598 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
599 			return (0);
600 
601 		/* Skip non-matching entry types. */
602 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
603 		case BCMA_EROM_ENTRY_TYPE_CORE:
604 			if ((error = bcma_erom_skip_core(erom)))
605 				return (error);
606 
607 			break;
608 
609 		case BCMA_EROM_ENTRY_TYPE_MPORT:
610 			if ((error = bcma_erom_skip_mport(erom)))
611 				return (error);
612 
613 			break;
614 
615 		case BCMA_EROM_ENTRY_TYPE_REGION:
616 			if ((error = bcma_erom_skip_sport_region(erom)))
617 				return (error);
618 			break;
619 
620 		default:
621 			/* Unknown entry type! */
622 			return (EINVAL);
623 		}
624 	}
625 
626 	return (error);
627 }
628 
629 /**
630  * Return the read position to the start of the EROM table.
631  *
632  * @param erom EROM read state.
633  */
634 static void
635 bcma_erom_reset(struct bcma_erom *erom)
636 {
637 	erom->offset = 0;
638 }
639 
640 /**
641  * Seek to the first core entry matching @p desc.
642  *
643  * @param erom EROM read state.
644  * @param desc The core match descriptor.
645  * @param[out] core On success, the matching core info. If the core info
646  * is not desired, a NULL pointer may be provided.
647  * @retval 0 success
648  * @retval ENOENT The end of the EROM table was reached before @p index was
649  * found.
650  * @retval non-zero Reading or parsing failed.
651  */
652 static int
653 bcma_erom_seek_matching_core(struct bcma_erom *sc,
654     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
655 {
656 	struct bhnd_core_match	 imatch;
657 	bus_size_t		 core_offset, next_offset;
658 	int			 error;
659 
660 	/* Seek to table start. */
661 	bcma_erom_reset(sc);
662 
663 	/* We can't determine a core's unit number during the initial scan. */
664 	imatch = *desc;
665 	imatch.m.match.core_unit = 0;
666 
667 	/* Locate the first matching core */
668 	for (u_int i = 0; i < UINT_MAX; i++) {
669 		struct bcma_erom_core	ec;
670 		struct bhnd_core_info	ci;
671 
672 		/* Seek to the next core */
673 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
674 		if (error)
675 			return (error);
676 
677 		/* Save the core offset */
678 		core_offset = bcma_erom_tell(sc);
679 
680 		/* Parse the core */
681 		if ((error = bcma_erom_parse_core(sc, &ec)))
682 			return (error);
683 
684 		bcma_erom_to_core_info(&ec, i, 0, &ci);
685 
686 		/* Check for initial match */
687 		if (!bhnd_core_matches(&ci, &imatch))
688 			continue;
689 
690 		/* Re-scan preceding cores to determine the unit number. */
691 		next_offset = bcma_erom_tell(sc);
692 		bcma_erom_reset(sc);
693 		for (u_int j = 0; j < i; j++) {
694 			/* Parse the core */
695 			error = bcma_erom_seek_next(sc,
696 			    BCMA_EROM_ENTRY_TYPE_CORE);
697 			if (error)
698 				return (error);
699 
700 			if ((error = bcma_erom_parse_core(sc, &ec)))
701 				return (error);
702 
703 			/* Bump the unit number? */
704 			if (ec.vendor == ci.vendor && ec.device == ci.device)
705 				ci.unit++;
706 		}
707 
708 		/* Check for full match against now-valid unit number */
709 		if (!bhnd_core_matches(&ci, desc)) {
710 			/* Reposition to allow reading the next core */
711 			bcma_erom_seek(sc, next_offset);
712 			continue;
713 		}
714 
715 		/* Found; seek to the core's initial offset and provide
716 		 * the core info to the caller */
717 		bcma_erom_seek(sc, core_offset);
718 		if (core != NULL)
719 			*core = ci;
720 
721 		return (0);
722 	}
723 
724 	/* Not found, or a parse error occured */
725 	return (error);
726 }
727 
728 /**
729  * Read the next core descriptor from the EROM table.
730  *
731  * @param erom EROM read state.
732  * @param[out] core On success, will be populated with the parsed core
733  * descriptor data.
734  * @retval 0 success
735  * @retval ENOENT The end of the EROM table was reached.
736  * @retval non-zero Reading or parsing the core descriptor failed.
737  */
738 static int
739 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
740 {
741 	uint32_t	entry;
742 	int		error;
743 
744 	/* Parse CoreDescA */
745 	if ((error = bcma_erom_read32(erom, &entry)))
746 		return (error);
747 
748 	/* Handle EOF */
749 	if (entry == BCMA_EROM_TABLE_EOF)
750 		return (ENOENT);
751 
752 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
753 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
754                    entry, bcma_erom_entry_type_name(entry));
755 
756 		return (EINVAL);
757 	}
758 
759 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
760 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
761 
762 	/* Parse CoreDescB */
763 	if ((error = bcma_erom_read32(erom, &entry)))
764 		return (error);
765 
766 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
767 		return (EINVAL);
768 	}
769 
770 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
771 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
772 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
773 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
774 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
775 
776 	return (0);
777 }
778 
779 /**
780  * Read the next master port descriptor from the EROM table.
781  *
782  * @param erom EROM read state.
783  * @param[out] mport On success, will be populated with the parsed
784  * descriptor data.
785  * @retval 0 success
786  * @retval non-zero Reading or parsing the descriptor failed.
787  */
788 static int
789 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
790 {
791 	uint32_t	entry;
792 	int		error;
793 
794 	/* Parse the master port descriptor */
795 	if ((error = bcma_erom_read32(erom, &entry)))
796 		return (error);
797 
798 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
799 		return (EINVAL);
800 
801 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
802 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
803 
804 	return (0);
805 }
806 
807 /**
808  * Read the next slave port region descriptor from the EROM table.
809  *
810  * @param erom EROM read state.
811  * @param[out] mport On success, will be populated with the parsed
812  * descriptor data.
813  * @retval 0 success
814  * @retval ENOENT The end of the region descriptor table was reached.
815  * @retval non-zero Reading or parsing the descriptor failed.
816  */
817 static int
818 bcma_erom_parse_sport_region(struct bcma_erom *erom,
819     struct bcma_erom_sport_region *region)
820 {
821 	uint32_t	entry;
822 	uint8_t		size_type;
823 	int		error;
824 
825 	/* Peek at the region descriptor */
826 	if (bcma_erom_peek32(erom, &entry))
827 		return (EINVAL);
828 
829 	/* A non-region entry signals the end of the region table */
830 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
831 		return (ENOENT);
832 	} else {
833 		bcma_erom_skip32(erom);
834 	}
835 
836 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
837 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
838 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
839 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
840 
841 	/* If region address is 64-bit, fetch the high bits. */
842 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
843 		if ((error = bcma_erom_read32(erom, &entry)))
844 			return (error);
845 
846 		region->base_addr |= ((bhnd_addr_t) entry << 32);
847 	}
848 
849 	/* Parse the region size; it's either encoded as the binary logarithm
850 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
851 	 * 32-bit/64-bit literal value directly following the current entry. */
852 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
853 		if ((error = bcma_erom_read32(erom, &entry)))
854 			return (error);
855 
856 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
857 
858 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
859 			if ((error = bcma_erom_read32(erom, &entry)))
860 				return (error);
861 			region->size |= ((bhnd_size_t) entry << 32);
862 		}
863 	} else {
864 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
865 	}
866 
867 	/* Verify that addr+size does not overflow. */
868 	if (region->size != 0 &&
869 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
870 	{
871 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
872 		    bcma_erom_entry_type_name(region->region_type),
873 		    region->region_port,
874 		    (unsigned long long) region->base_addr,
875 		    (unsigned long long) region->size);
876 
877 		return (EINVAL);
878 	}
879 
880 	return (0);
881 }
882 
883 /**
884  * Convert a bcma_erom_core record to its bhnd_core_info representation.
885  *
886  * @param core EROM core record to convert.
887  * @param core_idx The core index of @p core.
888  * @param core_unit The core unit of @p core.
889  * @param[out] info The populated bhnd_core_info representation.
890  */
891 static void
892 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
893     int core_unit, struct bhnd_core_info *info)
894 {
895 	info->vendor = core->vendor;
896 	info->device = core->device;
897 	info->hwrev = core->rev;
898 	info->core_idx = core_idx;
899 	info->unit = core_unit;
900 }
901 
902 /**
903  * Map an EROM region type to its corresponding port type.
904  *
905  * @param region_type Region type value.
906  * @param[out] port_type On success, the corresponding port type.
907  */
908 static int
909 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
910     bhnd_port_type *port_type)
911 {
912 	switch (region_type) {
913 	case BCMA_EROM_REGION_TYPE_DEVICE:
914 		*port_type = BHND_PORT_DEVICE;
915 		return (0);
916 	case BCMA_EROM_REGION_TYPE_BRIDGE:
917 		*port_type = BHND_PORT_BRIDGE;
918 		return (0);
919 	case BCMA_EROM_REGION_TYPE_MWRAP:
920 	case BCMA_EROM_REGION_TYPE_SWRAP:
921 		*port_type = BHND_PORT_AGENT;
922 		return (0);
923 	default:
924 		EROM_LOG(erom, "unsupported region type %hhx\n",
925 			region_type);
926 		return (EINVAL);
927 	}
928 }
929 
930 /**
931  * Register all MMIO region descriptors for the given slave port.
932  *
933  * @param erom EROM read state.
934  * @param corecfg Core info to be populated with the scanned port regions.
935  * @param port_num Port index for which regions will be parsed.
936  * @param region_type The region type to be parsed.
937  * @param[out] offset The offset at which to perform parsing. On success, this
938  * will be updated to point to the next EROM table entry.
939  */
940 static int
941 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
942     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
943     uint8_t region_type)
944 {
945 	struct bcma_sport	*sport;
946 	struct bcma_sport_list	*sports;
947 	bus_size_t		 entry_offset;
948 	int			 error;
949 	bhnd_port_type		 port_type;
950 
951 	error = 0;
952 
953 	/* Determine the port type for this region type. */
954 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
955 	if (error)
956 		return (error);
957 
958 	/* Fetch the list to be populated */
959 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
960 
961 	/* Allocate a new port descriptor */
962 	sport = bcma_alloc_sport(port_num, port_type);
963 	if (sport == NULL)
964 		return (ENOMEM);
965 
966 	/* Read all address regions defined for this port */
967 	for (bcma_rmid_t region_num = 0;; region_num++) {
968 		struct bcma_map			*map;
969 		struct bcma_erom_sport_region	 spr;
970 
971 		/* No valid port definition should come anywhere near
972 		 * BCMA_RMID_MAX. */
973 		if (region_num == BCMA_RMID_MAX) {
974 			EROM_LOG(erom, "core%u %s%u: region count reached "
975 			    "upper limit of %u\n",
976 			    corecfg->core_info.core_idx,
977 			    bhnd_port_type_name(port_type),
978 			    port_num, BCMA_RMID_MAX);
979 
980 			error = EINVAL;
981 			goto cleanup;
982 		}
983 
984 		/* Parse the next region entry. */
985 		entry_offset = bcma_erom_tell(erom);
986 		error = bcma_erom_parse_sport_region(erom, &spr);
987 		if (error && error != ENOENT) {
988 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
989 			    "address region\n",
990 			    corecfg->core_info.core_idx,
991 			    bhnd_port_type_name(port_type),
992 			    port_num, region_num);
993 			goto cleanup;
994 		}
995 
996 		/* ENOENT signals no further region entries */
997 		if (error == ENOENT) {
998 			/* No further entries */
999 			error = 0;
1000 			break;
1001 		}
1002 
1003 		/* A region or type mismatch also signals no further region
1004 		 * entries */
1005 		if (spr.region_port != port_num ||
1006 		    spr.region_type != region_type)
1007 		{
1008 			/* We don't want to consume this entry */
1009 			bcma_erom_seek(erom, entry_offset);
1010 
1011 			error = 0;
1012 			goto cleanup;
1013 		}
1014 
1015 		/*
1016 		 * Create the map entry.
1017 		 */
1018 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1019 		if (map == NULL) {
1020 			error = ENOMEM;
1021 			goto cleanup;
1022 		}
1023 
1024 		map->m_region_num = region_num;
1025 		map->m_base = spr.base_addr;
1026 		map->m_size = spr.size;
1027 		map->m_rid = -1;
1028 
1029 		/* Add the region map to the port */
1030 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1031 		sport->sp_num_maps++;
1032 	}
1033 
1034 cleanup:
1035 	/* Append the new port descriptor on success, or deallocate the
1036 	 * partially parsed descriptor on failure. */
1037 	if (error == 0) {
1038 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1039 	} else if (sport != NULL) {
1040 		bcma_free_sport(sport);
1041 	}
1042 
1043 	return error;
1044 }
1045 
1046 /**
1047  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1048  * to be owned by the caller.
1049  *
1050  * @param erom A bcma EROM instance.
1051  * @param[out] result On success, the core's device info. The caller inherits
1052  * ownership of this allocation.
1053  *
1054  * @return If successful, returns 0. If the end of the EROM table is hit,
1055  * ENOENT will be returned. On error, returns a non-zero error value.
1056  */
1057 int
1058 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1059 {
1060 	struct bcma_corecfg	*cfg;
1061 	struct bcma_erom_core	 core;
1062 	uint8_t			 first_region_type;
1063 	bus_size_t		 initial_offset;
1064 	u_int			 core_index;
1065 	int			 core_unit;
1066 	int			 error;
1067 
1068 	cfg = NULL;
1069 	initial_offset = bcma_erom_tell(erom);
1070 
1071 	/* Parse the next core entry */
1072 	if ((error = bcma_erom_parse_core(erom, &core)))
1073 		return (error);
1074 
1075 	/* Determine the core's index and unit numbers */
1076 	bcma_erom_reset(erom);
1077 	core_unit = 0;
1078 	core_index = 0;
1079 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1080 		struct bcma_erom_core prev_core;
1081 
1082 		/* Parse next core */
1083 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1084 		if (error)
1085 			return (error);
1086 
1087 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1088 			return (error);
1089 
1090 		/* Is earlier unit? */
1091 		if (core.vendor == prev_core.vendor &&
1092 		    core.device == prev_core.device)
1093 		{
1094 			core_unit++;
1095 		}
1096 
1097 		/* Seek to next core */
1098 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1099 		if (error)
1100 			return (error);
1101 	}
1102 
1103 	/* We already parsed the core descriptor */
1104 	if ((error = bcma_erom_skip_core(erom)))
1105 		return (error);
1106 
1107 	/* Allocate our corecfg */
1108 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1109 	    core.device, core.rev);
1110 	if (cfg == NULL)
1111 		return (ENOMEM);
1112 
1113 	/* These are 5-bit values in the EROM table, and should never be able
1114 	 * to overflow BCMA_PID_MAX. */
1115 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1116 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1117 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1118 	    ("unsupported wport count"));
1119 
1120 	if (bootverbose) {
1121 		EROM_LOG(erom,
1122 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1123 		    core_index,
1124 		    bhnd_vendor_name(core.vendor),
1125 		    bhnd_find_core_name(core.vendor, core.device),
1126 		    core.device, core.rev, core_unit);
1127 	}
1128 
1129 	cfg->num_master_ports = core.num_mport;
1130 	cfg->num_dev_ports = 0;		/* determined below */
1131 	cfg->num_bridge_ports = 0;	/* determined blow */
1132 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1133 
1134 	/* Parse Master Port Descriptors */
1135 	for (uint8_t i = 0; i < core.num_mport; i++) {
1136 		struct bcma_mport	*mport;
1137 		struct bcma_erom_mport	 mpd;
1138 
1139 		/* Parse the master port descriptor */
1140 		error = bcma_erom_parse_mport(erom, &mpd);
1141 		if (error)
1142 			goto failed;
1143 
1144 		/* Initialize a new bus mport structure */
1145 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1146 		if (mport == NULL) {
1147 			error = ENOMEM;
1148 			goto failed;
1149 		}
1150 
1151 		mport->mp_vid = mpd.port_vid;
1152 		mport->mp_num = mpd.port_num;
1153 
1154 		/* Update dinfo */
1155 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1156 	}
1157 
1158 
1159 	/*
1160 	 * Determine whether this is a bridge device; if so, we can
1161 	 * expect the first sequence of address region descriptors to
1162 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1163 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1164 	 *
1165 	 * It's unclear whether this is the correct mechanism by which we
1166 	 * should detect/handle bridge devices, but this approach matches
1167 	 * that of (some of) Broadcom's published drivers.
1168 	 */
1169 	if (core.num_dport > 0) {
1170 		uint32_t entry;
1171 
1172 		if ((error = bcma_erom_peek32(erom, &entry)))
1173 			goto failed;
1174 
1175 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1176 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1177 		{
1178 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1179 			cfg->num_dev_ports = 0;
1180 			cfg->num_bridge_ports = core.num_dport;
1181 		} else {
1182 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1183 			cfg->num_dev_ports = core.num_dport;
1184 			cfg->num_bridge_ports = 0;
1185 		}
1186 	}
1187 
1188 	/* Device/bridge port descriptors */
1189 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1190 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1191 		    first_region_type);
1192 
1193 		if (error)
1194 			goto failed;
1195 	}
1196 
1197 	/* Wrapper (aka device management) descriptors (for master ports). */
1198 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1199 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1200 		    BCMA_EROM_REGION_TYPE_MWRAP);
1201 
1202 		if (error)
1203 			goto failed;
1204 	}
1205 
1206 
1207 	/* Wrapper (aka device management) descriptors (for slave ports). */
1208 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1209 		/* Slave wrapper ports are not numbered distinctly from master
1210 		 * wrapper ports. */
1211 
1212 		/*
1213 		 * Broadcom DDR1/DDR2 Memory Controller
1214 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1215 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1216 		 *
1217 		 * ARM BP135 AMBA3 AXI to APB Bridge
1218 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1219 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1220 		 *
1221 		 * core.num_mwrap
1222 		 * ===>
1223 		 * (core.num_mwrap > 0) ?
1224 		 *           core.num_mwrap :
1225 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1226 		 */
1227 		uint8_t sp_num;
1228 		sp_num = (core.num_mwrap > 0) ?
1229 				core.num_mwrap :
1230 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1231 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1232 		    BCMA_EROM_REGION_TYPE_SWRAP);
1233 
1234 		if (error)
1235 			goto failed;
1236 	}
1237 
1238 	/*
1239 	 * Seek to the next core entry (if any), skipping any dangling/invalid
1240 	 * region entries.
1241 	 *
1242 	 * On the BCM4706, the EROM entry for the memory controller core
1243 	 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1244 	 * descriptor.
1245 	 */
1246 	if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1247 		if (error != ENOENT)
1248 			goto failed;
1249 	}
1250 
1251 	*result = cfg;
1252 	return (0);
1253 
1254 failed:
1255 	if (cfg != NULL)
1256 		bcma_free_corecfg(cfg);
1257 
1258 	return error;
1259 }
1260 
1261 static int
1262 bcma_erom_dump(bhnd_erom_t *erom)
1263 {
1264 	struct bcma_erom	*sc;
1265 	uint32_t		entry;
1266 	int			error;
1267 
1268 	sc = (struct bcma_erom *)erom;
1269 
1270 	bcma_erom_reset(sc);
1271 
1272 	while (!(error = bcma_erom_read32(sc, &entry))) {
1273 		/* Handle EOF */
1274 		if (entry == BCMA_EROM_TABLE_EOF) {
1275 			EROM_LOG(sc, "EOF\n");
1276 			return (0);
1277 		}
1278 
1279 		/* Invalid entry */
1280 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1281 			EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1282 			return (EINVAL);
1283 		}
1284 
1285 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1286 		case BCMA_EROM_ENTRY_TYPE_CORE: {
1287 			/* CoreDescA */
1288 			EROM_LOG(sc, "coreA (0x%x)\n", entry);
1289 			EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1290 			    BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1291 			EROM_LOG(sc, "\tid:\t\t0x%x\n",
1292 			    BCMA_EROM_GET_ATTR(entry, COREA_ID));
1293 			EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1294 			    BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1295 
1296 			/* CoreDescB */
1297 			if ((error = bcma_erom_read32(sc, &entry))) {
1298 				EROM_LOG(sc, "error reading CoreDescB: %d\n",
1299 				    error);
1300 				return (error);
1301 			}
1302 
1303 			if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1304 				EROM_LOG(sc, "invalid core descriptor; found "
1305 				    "unexpected entry %#x (type=%s)\n",
1306 				    entry, bcma_erom_entry_type_name(entry));
1307 				return (EINVAL);
1308 			}
1309 
1310 			EROM_LOG(sc, "coreB (0x%x)\n", entry);
1311 			EROM_LOG(sc, "\trev:\t0x%x\n",
1312 			    BCMA_EROM_GET_ATTR(entry, COREB_REV));
1313 			EROM_LOG(sc, "\tnummp:\t0x%x\n",
1314 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1315 			EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1316 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1317 			EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1318 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1319 			EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1320 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1321 
1322 			break;
1323 		}
1324 		case BCMA_EROM_ENTRY_TYPE_MPORT:
1325 			EROM_LOG(sc, "\tmport 0x%x\n", entry);
1326 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1327 			    BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1328 			EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1329 			    BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1330 			break;
1331 
1332 		case BCMA_EROM_ENTRY_TYPE_REGION: {
1333 			bool	addr64;
1334 			uint8_t	size_type;
1335 
1336 			addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1337 			size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1338 
1339 			EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1340 			EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1341 			    addr64 ? "baselo" : "base",
1342 			    BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1343 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1344 			    BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1345 			EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1346 			    BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1347 			EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1348 
1349 			/* Read the base address high bits */
1350 			if (addr64) {
1351 				if ((error = bcma_erom_read32(sc, &entry))) {
1352 					EROM_LOG(sc, "error reading region "
1353 					    "base address high bits %d\n",
1354 					    error);
1355 					return (error);
1356 				}
1357 
1358 				EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1359 			}
1360 
1361 			/* Read extended size descriptor */
1362 			if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1363 				bool size64;
1364 
1365 				if ((error = bcma_erom_read32(sc, &entry))) {
1366 					EROM_LOG(sc, "error reading region "
1367 					    "size descriptor %d\n",
1368 					    error);
1369 					return (error);
1370 				}
1371 
1372 				if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1373 					size64 = true;
1374 				else
1375 					size64 = false;
1376 
1377 				EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1378 				    size64 ? "sizelo" : "size",
1379 				    BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1380 
1381 				if (size64) {
1382 					error = bcma_erom_read32(sc, &entry);
1383 					if (error) {
1384 						EROM_LOG(sc, "error reading "
1385 						    "region size high bits: "
1386 						    "%d\n", error);
1387 						return (error);
1388 					}
1389 
1390 					EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1391 					    entry);
1392 				}
1393 			}
1394 			break;
1395 		}
1396 
1397 		default:
1398 			EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1399 			    entry, bcma_erom_entry_type_name(entry));
1400 			return (EINVAL);
1401 		}
1402 	}
1403 
1404 	if (error == ENOENT)
1405 		EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1406 	else if (error)
1407 		EROM_LOG(sc, "EROM read failed: %d\n", error);
1408 
1409 	return (error);
1410 }
1411 
1412 static kobj_method_t bcma_erom_methods[] = {
1413 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1414 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1415 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1416 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1417 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1418 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1419 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1420 	KOBJMETHOD(bhnd_erom_dump,		bcma_erom_dump),
1421 
1422 	KOBJMETHOD_END
1423 };
1424 
1425 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1426