xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision 5dae51da3da0cc94d17bd67b308fad304ebec7e0)
1 /*-
2  * Copyright (c) 2015 Landon Fuller <landon@landonf.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 #include <sys/systm.h>
38 
39 #include <machine/bus.h>
40 #include <machine/resource.h>
41 
42 #include <dev/bhnd/cores/chipc/chipcreg.h>
43 
44 #include "bcma_eromreg.h"
45 #include "bcma_eromvar.h"
46 
47 /*
48  * BCMA Enumeration ROM (EROM) Table
49  *
50  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
51  *
52  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
53  * ChipCommon registers. The table itself is comprised of 32-bit
54  * type-tagged entries, organized into an array of variable-length
55  * core descriptor records.
56  *
57  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
58  * marker.
59  */
60 
61 struct bcma_erom_io;
62 
63 static const char	*bcma_erom_entry_type_name (uint8_t entry);
64 
65 static uint32_t		 bcma_eio_read4(struct bcma_erom_io *io,
66 			     bus_size_t offset);
67 
68 static int		 bcma_erom_read32(struct bcma_erom *erom,
69 			     uint32_t *entry);
70 static int		 bcma_erom_skip32(struct bcma_erom *erom);
71 
72 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
73 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
74 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
75 
76 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
77 			     uint8_t etype);
78 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
79 			     uint8_t region_type, bhnd_port_type *port_type);
80 
81 
82 static int		 bcma_erom_peek32(struct bcma_erom *erom,
83 			     uint32_t *entry);
84 
85 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
86 static void		 bcma_erom_seek(struct bcma_erom *erom,
87 			     bus_size_t offset);
88 static void		 bcma_erom_reset(struct bcma_erom *erom);
89 
90 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
91 			     const struct bhnd_core_match *desc,
92 			     struct bhnd_core_info *core);
93 
94 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
95 			     struct bcma_erom_core *core);
96 
97 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
98 			     struct bcma_erom_mport *mport);
99 
100 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
101 			     struct bcma_erom_sport_region *region);
102 
103 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
104 			     u_int core_idx, int core_unit,
105 			     struct bhnd_core_info *info);
106 
107 /**
108  * BCMA EROM generic I/O context
109  */
110 struct bcma_erom_io {
111 	struct bhnd_resource	*res;		/**< memory resource, or NULL if initialized
112 						     with bus space tag and handle */
113 	int			 rid;		/**< memory resource id, or -1 */
114 
115 	bus_space_tag_t		 bst;		/**< bus space tag, if any */
116 	bus_space_handle_t	 bsh;		/**< bus space handle, if any */
117 
118 	bus_size_t	 	 start;		/**< base read offset */
119 };
120 
121 /**
122  * BCMA EROM per-instance state.
123  */
124 struct bcma_erom {
125 	struct bhnd_erom	obj;
126 	device_t	 	dev;	/**< parent device, or NULL if none. */
127 	struct bcma_erom_io	io;	/**< I/O context */
128 	bus_size_t	 	offset;	/**< current read offset */
129 };
130 
131 #define	EROM_LOG(erom, fmt, ...)	do {				\
132 	if (erom->dev != NULL) {					\
133 		device_printf(erom->dev, "erom[0x%llx]: " fmt,	\
134 		    (unsigned long long) (erom->offset), ##__VA_ARGS__);\
135 	} else {							\
136 		printf("erom[0x%llx]: " fmt,				\
137 		    (unsigned long long) (erom->offset), ##__VA_ARGS__);\
138 	}								\
139 } while(0)
140 
141 /** Return the type name for an EROM entry */
142 static const char *
143 bcma_erom_entry_type_name (uint8_t entry)
144 {
145 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
146 	case BCMA_EROM_ENTRY_TYPE_CORE:
147 		return "core";
148 	case BCMA_EROM_ENTRY_TYPE_MPORT:
149 		return "mport";
150 	case BCMA_EROM_ENTRY_TYPE_REGION:
151 		return "region";
152 	default:
153 		return "unknown";
154 	}
155 }
156 
157 
158 /**
159  * Read a 32-bit value from an EROM I/O context.
160  *
161  * @param io EROM I/O context.
162  * @param offset Read offset.
163  */
164 static uint32_t
165 bcma_eio_read4(struct bcma_erom_io *io, bus_size_t offset)
166 {
167 	bus_size_t read_off;
168 
169 	read_off = io->start + offset;
170 	if (io->res != NULL)
171 		return (bhnd_bus_read_4(io->res, read_off));
172 	else
173 		return (bus_space_read_4(io->bst, io->bsh, read_off));
174 }
175 
176 /* Initialize bcma_erom resource I/O context */
177 static void
178 bcma_eio_init(struct bcma_erom_io *io, struct bhnd_resource *res, int rid,
179     bus_size_t offset)
180 {
181 	io->res = res;
182 	io->rid = rid;
183 	io->start = offset;
184 }
185 
186 /* Initialize bcma_erom bus space I/O context */
187 static void
188 bcma_eio_init_static(struct bcma_erom_io *io, bus_space_tag_t bst,
189     bus_space_handle_t bsh, bus_size_t offset)
190 {
191 	io->res = NULL;
192 	io->rid = -1;
193 	io->bst = bst;
194 	io->bsh = bsh;
195 	io->start = offset;
196 }
197 
198 /* BCMA implementation of BHND_EROM_INIT() */
199 static int
200 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
201     device_t parent, int rid)
202 {
203 	struct bcma_erom	*sc;
204 	struct bhnd_resource	*res;
205 
206 	sc = (struct bcma_erom *)erom;
207 	sc->dev = parent;
208 	sc->offset = 0;
209 
210 	res = bhnd_alloc_resource(parent, SYS_RES_MEMORY, &rid, cid->enum_addr,
211 	    cid->enum_addr + BCMA_EROM_TABLE_SIZE - 1, BCMA_EROM_TABLE_SIZE,
212 	    RF_ACTIVE|RF_SHAREABLE);
213 
214 	if (res == NULL)
215 		return (ENOMEM);
216 
217 	bcma_eio_init(&sc->io, res, rid, BCMA_EROM_TABLE_START);
218 
219 	return (0);
220 }
221 
222 /* BCMA implementation of BHND_EROM_INIT_STATIC() */
223 static int
224 bcma_erom_init_static(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
225     bus_space_tag_t bst, bus_space_handle_t bsh)
226 {
227 	struct bcma_erom	*sc;
228 
229 	sc = (struct bcma_erom *)erom;
230 	sc->dev = NULL;
231 	sc->offset = 0;
232 
233 	bcma_eio_init_static(&sc->io, bst, bsh, BCMA_EROM_TABLE_START);
234 
235 	return (0);
236 }
237 
238 /* Common implementation of BHND_EROM_PROBE/BHND_EROM_PROBE_STATIC */
239 static int
240 bcma_erom_probe_common(struct bcma_erom_io *io, const struct bhnd_chipid *hint,
241     struct bhnd_chipid *cid)
242 {
243 	uint32_t	idreg, eromptr;
244 
245 	/* Hints aren't supported; all BCMA devices have a ChipCommon
246 	 * core */
247 	if (hint != NULL)
248 		return (EINVAL);
249 
250 	/* Confirm CHIPC_EROMPTR availability */
251 	idreg = bcma_eio_read4(io, CHIPC_ID);
252 	if (!BHND_CHIPTYPE_HAS_EROM(CHIPC_GET_BITS(idreg, CHIPC_ID_BUS)))
253 		return (ENXIO);
254 
255 	/* Fetch EROM address */
256 	eromptr = bcma_eio_read4(io, CHIPC_EROMPTR);
257 
258 	/* Parse chip identifier */
259 	*cid = bhnd_parse_chipid(idreg, eromptr);
260 
261 	/* Verify chip type */
262 	switch (cid->chip_type) {
263 		case BHND_CHIPTYPE_BCMA:
264 			return (BUS_PROBE_DEFAULT);
265 
266 		case BHND_CHIPTYPE_BCMA_ALT:
267 		case BHND_CHIPTYPE_UBUS:
268 			return (BUS_PROBE_GENERIC);
269 
270 		default:
271 			return (ENXIO);
272 	}
273 }
274 
275 static int
276 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_resource *res,
277     bus_size_t offset, const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
278 {
279 	struct bcma_erom_io io;
280 
281 	bcma_eio_init(&io, res, rman_get_rid(res->res),
282 	    offset + BCMA_EROM_TABLE_START);
283 
284 	return (bcma_erom_probe_common(&io, hint, cid));
285 }
286 
287 static int
288 bcma_erom_probe_static(bhnd_erom_class_t *cls, bus_space_tag_t bst,
289      bus_space_handle_t bsh, bus_addr_t paddr, const struct bhnd_chipid *hint,
290      struct bhnd_chipid *cid)
291 {
292 	struct bcma_erom_io io;
293 
294 	bcma_eio_init_static(&io, bst, bsh, BCMA_EROM_TABLE_START);
295 	return (bcma_erom_probe_common(&io, hint, cid));
296 }
297 
298 
299 static void
300 bcma_erom_fini(bhnd_erom_t *erom)
301 {
302 	struct bcma_erom *sc = (struct bcma_erom *)erom;
303 
304 	if (sc->io.res != NULL) {
305 		bhnd_release_resource(sc->dev, SYS_RES_MEMORY, sc->io.rid,
306 		    sc->io.res);
307 
308 		sc->io.res = NULL;
309 		sc->io.rid = -1;
310 	}
311 }
312 
313 static int
314 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
315     struct bhnd_core_info *core)
316 {
317 	struct bcma_erom *sc = (struct bcma_erom *)erom;
318 
319 	/* Search for the first matching core */
320 	return (bcma_erom_seek_matching_core(sc, desc, core));
321 }
322 
323 static int
324 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
325     bhnd_port_type port_type, u_int port_num, u_int region_num,
326     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
327 {
328 	struct bcma_erom	*sc;
329 	struct bcma_erom_core	 ec;
330 	uint32_t		 entry;
331 	uint8_t			 region_port, region_type;
332 	bool			 found;
333 	int			 error;
334 
335 	sc = (struct bcma_erom *)erom;
336 
337 	/* Seek to the first matching core and provide the core info
338 	 * to the caller */
339 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
340 		return (error);
341 
342 	if ((error = bcma_erom_parse_core(sc, &ec)))
343 		return (error);
344 
345 	/* Skip master ports */
346 	for (u_long i = 0; i < ec.num_mport; i++) {
347 		if ((error = bcma_erom_skip_mport(sc)))
348 			return (error);
349 	}
350 
351 	/* Seek to the region block for the given port type */
352 	found = false;
353 	while (1) {
354 		bhnd_port_type	p_type;
355 		uint8_t		r_type;
356 
357 		if ((error = bcma_erom_peek32(sc, &entry)))
358 			return (error);
359 
360 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
361 			return (ENOENT);
362 
363 		/* Expected region type? */
364 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
365 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
366 		if (error)
367 			return (error);
368 
369 		if (p_type == port_type) {
370 			found = true;
371 			break;
372 		}
373 
374 		/* Skip to next entry */
375 		if ((error = bcma_erom_skip_sport_region(sc)))
376 			return (error);
377 	}
378 
379 	if (!found)
380 		return (ENOENT);
381 
382 	/* Found the appropriate port type block; now find the region records
383 	 * for the given port number */
384 	found = false;
385 	for (u_int i = 0; i <= port_num; i++) {
386 		bhnd_port_type	p_type;
387 
388 		if ((error = bcma_erom_peek32(sc, &entry)))
389 			return (error);
390 
391 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
392 			return (ENOENT);
393 
394 		/* Fetch the type/port of the first region entry */
395 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
396 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
397 
398 		/* Have we found the region entries for the desired port? */
399 		if (i == port_num) {
400 			error = bcma_erom_region_to_port_type(sc, region_type,
401 			    &p_type);
402 			if (error)
403 				return (error);
404 
405 			if (p_type == port_type)
406 				found = true;
407 
408 			break;
409 		}
410 
411 		/* Otherwise, seek to next block of region records */
412 		while (1) {
413 			uint8_t	next_type, next_port;
414 
415 			if ((error = bcma_erom_skip_sport_region(sc)))
416 				return (error);
417 
418 			if ((error = bcma_erom_peek32(sc, &entry)))
419 				return (error);
420 
421 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
422 				return (ENOENT);
423 
424 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
425 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
426 
427 			if (next_type != region_type ||
428 			    next_port != region_port)
429 				break;
430 		}
431 	}
432 
433 	if (!found)
434 		return (ENOENT);
435 
436 	/* Finally, search for the requested region number */
437 	for (u_int i = 0; i <= region_num; i++) {
438 		struct bcma_erom_sport_region	region;
439 		uint8_t				next_port, next_type;
440 
441 		if ((error = bcma_erom_peek32(sc, &entry)))
442 			return (error);
443 
444 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
445 			return (ENOENT);
446 
447 		/* Check for the end of the region block */
448 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
449 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
450 
451 		if (next_type != region_type ||
452 		    next_port != region_port)
453 			break;
454 
455 		/* Parse the region */
456 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
457 			return (error);
458 
459 		/* Is this our target region_num? */
460 		if (i == region_num) {
461 			/* Found */
462 			*addr = region.base_addr;
463 			*size = region.size;
464 			return (0);
465 		}
466 	}
467 
468 	/* Not found */
469 	return (ENOENT);
470 };
471 
472 static int
473 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
474     u_int *num_cores)
475 {
476 	struct bcma_erom	*sc;
477 	struct bhnd_core_info	*buffer;
478 	bus_size_t		 initial_offset;
479 	u_int			 count;
480 	int			 error;
481 
482 	sc = (struct bcma_erom *)erom;
483 
484 	buffer = NULL;
485 	initial_offset = bcma_erom_tell(sc);
486 
487 	/* Determine the core count */
488 	bcma_erom_reset(sc);
489 	for (count = 0, error = 0; !error; count++) {
490 		struct bcma_erom_core core;
491 
492 		/* Seek to the first readable core entry */
493 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
494 		if (error == ENOENT)
495 			break;
496 		else if (error)
497 			goto cleanup;
498 
499 		/* Read past the core descriptor */
500 		if ((error = bcma_erom_parse_core(sc, &core)))
501 			goto cleanup;
502 	}
503 
504 	/* Allocate our output buffer */
505 	buffer = malloc(sizeof(struct bhnd_core_info) * count, M_BHND,
506 	    M_NOWAIT);
507 	if (buffer == NULL) {
508 		error = ENOMEM;
509 		goto cleanup;
510 	}
511 
512 	/* Parse all core descriptors */
513 	bcma_erom_reset(sc);
514 	for (u_int i = 0; i < count; i++) {
515 		struct bcma_erom_core	core;
516 		int			unit;
517 
518 		/* Parse the core */
519 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
520 		if (error)
521 			goto cleanup;
522 
523 		error = bcma_erom_parse_core(sc, &core);
524 		if (error)
525 			goto cleanup;
526 
527 		/* Determine the unit number */
528 		unit = 0;
529 		for (u_int j = 0; j < i; j++) {
530 			if (buffer[i].vendor == buffer[j].vendor &&
531 			    buffer[i].device == buffer[j].device)
532 				unit++;
533 		}
534 
535 		/* Convert to a bhnd info record */
536 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
537 	}
538 
539 cleanup:
540 	if (!error) {
541 		*cores = buffer;
542 		*num_cores = count;
543 	} else {
544 		if (buffer != NULL)
545 			free(buffer, M_BHND);
546 	}
547 
548 	/* Restore the initial position */
549 	bcma_erom_seek(sc, initial_offset);
550 	return (error);
551 }
552 
553 static void
554 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
555 {
556 	free(cores, M_BHND);
557 }
558 
559 /**
560  * Return the current read position.
561  */
562 static bus_size_t
563 bcma_erom_tell(struct bcma_erom *erom)
564 {
565 	return (erom->offset);
566 }
567 
568 /**
569  * Seek to an absolute read position.
570  */
571 static void
572 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
573 {
574 	erom->offset = offset;
575 }
576 
577 /**
578  * Read a 32-bit entry value from the EROM table without advancing the
579  * read position.
580  *
581  * @param erom EROM read state.
582  * @param entry Will contain the read result on success.
583  * @retval 0 success
584  * @retval ENOENT The end of the EROM table was reached.
585  * @retval non-zero The read could not be completed.
586  */
587 static int
588 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
589 {
590 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
591 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
592 		return (EINVAL);
593 	}
594 
595 	*entry = bcma_eio_read4(&erom->io, erom->offset);
596 	return (0);
597 }
598 
599 /**
600  * Read a 32-bit entry value from the EROM table.
601  *
602  * @param erom EROM read state.
603  * @param entry Will contain the read result on success.
604  * @retval 0 success
605  * @retval ENOENT The end of the EROM table was reached.
606  * @retval non-zero The read could not be completed.
607  */
608 static int
609 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
610 {
611 	int error;
612 
613 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
614 		erom->offset += 4;
615 
616 	return (error);
617 }
618 
619 /**
620  * Read and discard 32-bit entry value from the EROM table.
621  *
622  * @param erom EROM read state.
623  * @retval 0 success
624  * @retval ENOENT The end of the EROM table was reached.
625  * @retval non-zero The read could not be completed.
626  */
627 static int
628 bcma_erom_skip32(struct bcma_erom *erom)
629 {
630 	uint32_t	entry;
631 
632 	return bcma_erom_read32(erom, &entry);
633 }
634 
635 /**
636  * Read and discard a core descriptor from the EROM table.
637  *
638  * @param erom EROM read state.
639  * @retval 0 success
640  * @retval ENOENT The end of the EROM table was reached.
641  * @retval non-zero The read could not be completed.
642  */
643 static int
644 bcma_erom_skip_core(struct bcma_erom *erom)
645 {
646 	struct bcma_erom_core core;
647 	return (bcma_erom_parse_core(erom, &core));
648 }
649 
650 /**
651  * Read and discard a master port descriptor from the EROM table.
652  *
653  * @param erom EROM read state.
654  * @retval 0 success
655  * @retval ENOENT The end of the EROM table was reached.
656  * @retval non-zero The read could not be completed.
657  */
658 static int
659 bcma_erom_skip_mport(struct bcma_erom *erom)
660 {
661 	struct bcma_erom_mport mp;
662 	return (bcma_erom_parse_mport(erom, &mp));
663 }
664 
665 /**
666  * Read and discard a port region descriptor from the EROM table.
667  *
668  * @param erom EROM read state.
669  * @retval 0 success
670  * @retval ENOENT The end of the EROM table was reached.
671  * @retval non-zero The read could not be completed.
672  */
673 static int
674 bcma_erom_skip_sport_region(struct bcma_erom *erom)
675 {
676 	struct bcma_erom_sport_region r;
677 	return (bcma_erom_parse_sport_region(erom, &r));
678 }
679 
680 /**
681  * Seek to the next entry matching the given EROM entry type.
682  *
683  * @param erom EROM read state.
684  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
685  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
686  * @retval 0 success
687  * @retval ENOENT The end of the EROM table was reached.
688  * @retval non-zero Reading or parsing the descriptor failed.
689  */
690 static int
691 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
692 {
693 	uint32_t			entry;
694 	int				error;
695 
696 	/* Iterate until we hit an entry matching the requested type. */
697 	while (!(error = bcma_erom_peek32(erom, &entry))) {
698 		/* Handle EOF */
699 		if (entry == BCMA_EROM_TABLE_EOF)
700 			return (ENOENT);
701 
702 		/* Invalid entry */
703 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
704 			return (EINVAL);
705 
706 		/* Entry type matches? */
707 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
708 			return (0);
709 
710 		/* Skip non-matching entry types. */
711 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
712 		case BCMA_EROM_ENTRY_TYPE_CORE:
713 			if ((error = bcma_erom_skip_core(erom)))
714 				return (error);
715 
716 			break;
717 
718 		case BCMA_EROM_ENTRY_TYPE_MPORT:
719 			if ((error = bcma_erom_skip_mport(erom)))
720 				return (error);
721 
722 			break;
723 
724 		case BCMA_EROM_ENTRY_TYPE_REGION:
725 			if ((error = bcma_erom_skip_sport_region(erom)))
726 				return (error);
727 			break;
728 
729 		default:
730 			/* Unknown entry type! */
731 			return (EINVAL);
732 		}
733 	}
734 
735 	return (error);
736 }
737 
738 /**
739  * Return the read position to the start of the EROM table.
740  *
741  * @param erom EROM read state.
742  */
743 static void
744 bcma_erom_reset(struct bcma_erom *erom)
745 {
746 	erom->offset = 0;
747 }
748 
749 /**
750  * Seek to the first core entry matching @p desc.
751  *
752  * @param erom EROM read state.
753  * @param desc The core match descriptor.
754  * @param[out] core On success, the matching core info. If the core info
755  * is not desired, a NULL pointer may be provided.
756  * @retval 0 success
757  * @retval ENOENT The end of the EROM table was reached before @p index was
758  * found.
759  * @retval non-zero Reading or parsing failed.
760  */
761 static int
762 bcma_erom_seek_matching_core(struct bcma_erom *sc,
763     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
764 {
765 	struct bhnd_core_match	 imatch;
766 	bus_size_t		 core_offset, next_offset;
767 	int			 error;
768 
769 	/* Seek to table start. */
770 	bcma_erom_reset(sc);
771 
772 	/* We can't determine a core's unit number during the initial scan. */
773 	imatch = *desc;
774 	imatch.m.match.core_unit = 0;
775 
776 	/* Locate the first matching core */
777 	for (u_int i = 0; i < UINT_MAX; i++) {
778 		struct bcma_erom_core	ec;
779 		struct bhnd_core_info	ci;
780 
781 		/* Seek to the next core */
782 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
783 		if (error)
784 			return (error);
785 
786 		/* Save the core offset */
787 		core_offset = bcma_erom_tell(sc);
788 
789 		/* Parse the core */
790 		if ((error = bcma_erom_parse_core(sc, &ec)))
791 			return (error);
792 
793 		bcma_erom_to_core_info(&ec, i, 0, &ci);
794 
795 		/* Check for initial match */
796 		if (!bhnd_core_matches(&ci, &imatch))
797 			continue;
798 
799 		/* Re-scan preceding cores to determine the unit number. */
800 		next_offset = bcma_erom_tell(sc);
801 		bcma_erom_reset(sc);
802 		for (u_int j = 0; j < i; j++) {
803 			/* Parse the core */
804 			error = bcma_erom_seek_next(sc,
805 			    BCMA_EROM_ENTRY_TYPE_CORE);
806 			if (error)
807 				return (error);
808 
809 			if ((error = bcma_erom_parse_core(sc, &ec)))
810 				return (error);
811 
812 			/* Bump the unit number? */
813 			if (ec.vendor == ci.vendor && ec.device == ci.device)
814 				ci.unit++;
815 		}
816 
817 		/* Check for full match against now-valid unit number */
818 		if (!bhnd_core_matches(&ci, desc)) {
819 			/* Reposition to allow reading the next core */
820 			bcma_erom_seek(sc, next_offset);
821 			continue;
822 		}
823 
824 		/* Found; seek to the core's initial offset and provide
825 		 * the core info to the caller */
826 		bcma_erom_seek(sc, core_offset);
827 		if (core != NULL)
828 			*core = ci;
829 
830 		return (0);
831 	}
832 
833 	/* Not found, or a parse error occured */
834 	return (error);
835 }
836 
837 /**
838  * Read the next core descriptor from the EROM table.
839  *
840  * @param erom EROM read state.
841  * @param[out] core On success, will be populated with the parsed core
842  * descriptor data.
843  * @retval 0 success
844  * @retval ENOENT The end of the EROM table was reached.
845  * @retval non-zero Reading or parsing the core descriptor failed.
846  */
847 static int
848 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
849 {
850 	uint32_t	entry;
851 	int		error;
852 
853 	/* Parse CoreDescA */
854 	if ((error = bcma_erom_read32(erom, &entry)))
855 		return (error);
856 
857 	/* Handle EOF */
858 	if (entry == BCMA_EROM_TABLE_EOF)
859 		return (ENOENT);
860 
861 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
862 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
863                    entry, bcma_erom_entry_type_name(entry));
864 
865 		return (EINVAL);
866 	}
867 
868 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
869 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
870 
871 	/* Parse CoreDescB */
872 	if ((error = bcma_erom_read32(erom, &entry)))
873 		return (error);
874 
875 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
876 		return (EINVAL);
877 	}
878 
879 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
880 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
881 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
882 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
883 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
884 
885 	return (0);
886 }
887 
888 /**
889  * Read the next master port descriptor from the EROM table.
890  *
891  * @param erom EROM read state.
892  * @param[out] mport On success, will be populated with the parsed
893  * descriptor data.
894  * @retval 0 success
895  * @retval non-zero Reading or parsing the descriptor failed.
896  */
897 static int
898 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
899 {
900 	uint32_t	entry;
901 	int		error;
902 
903 	/* Parse the master port descriptor */
904 	if ((error = bcma_erom_read32(erom, &entry)))
905 		return (error);
906 
907 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
908 		return (EINVAL);
909 
910 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
911 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
912 
913 	return (0);
914 }
915 
916 /**
917  * Read the next slave port region descriptor from the EROM table.
918  *
919  * @param erom EROM read state.
920  * @param[out] mport On success, will be populated with the parsed
921  * descriptor data.
922  * @retval 0 success
923  * @retval ENOENT The end of the region descriptor table was reached.
924  * @retval non-zero Reading or parsing the descriptor failed.
925  */
926 static int
927 bcma_erom_parse_sport_region(struct bcma_erom *erom,
928     struct bcma_erom_sport_region *region)
929 {
930 	uint32_t	entry;
931 	uint8_t		size_type;
932 	int		error;
933 
934 	/* Peek at the region descriptor */
935 	if (bcma_erom_peek32(erom, &entry))
936 		return (EINVAL);
937 
938 	/* A non-region entry signals the end of the region table */
939 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
940 		return (ENOENT);
941 	} else {
942 		bcma_erom_skip32(erom);
943 	}
944 
945 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
946 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
947 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
948 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
949 
950 	/* If region address is 64-bit, fetch the high bits. */
951 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
952 		if ((error = bcma_erom_read32(erom, &entry)))
953 			return (error);
954 
955 		region->base_addr |= ((bhnd_addr_t) entry << 32);
956 	}
957 
958 	/* Parse the region size; it's either encoded as the binary logarithm
959 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
960 	 * 32-bit/64-bit literal value directly following the current entry. */
961 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
962 		if ((error = bcma_erom_read32(erom, &entry)))
963 			return (error);
964 
965 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
966 
967 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
968 			if ((error = bcma_erom_read32(erom, &entry)))
969 				return (error);
970 			region->size |= ((bhnd_size_t) entry << 32);
971 		}
972 	} else {
973 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
974 	}
975 
976 	/* Verify that addr+size does not overflow. */
977 	if (region->size != 0 &&
978 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
979 	{
980 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
981 		    bcma_erom_entry_type_name(region->region_type),
982 		    region->region_port,
983 		    (unsigned long long) region->base_addr,
984 		    (unsigned long long) region->size);
985 
986 		return (EINVAL);
987 	}
988 
989 	return (0);
990 }
991 
992 /**
993  * Convert a bcma_erom_core record to its bhnd_core_info representation.
994  *
995  * @param core EROM core record to convert.
996  * @param core_idx The core index of @p core.
997  * @param core_unit The core unit of @p core.
998  * @param[out] info The populated bhnd_core_info representation.
999  */
1000 static void
1001 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
1002     int core_unit, struct bhnd_core_info *info)
1003 {
1004 	info->vendor = core->vendor;
1005 	info->device = core->device;
1006 	info->hwrev = core->rev;
1007 	info->core_idx = core_idx;
1008 	info->unit = core_unit;
1009 }
1010 
1011 /**
1012  * Map an EROM region type to its corresponding port type.
1013  *
1014  * @param region_type Region type value.
1015  * @param[out] port_type On success, the corresponding port type.
1016  */
1017 static int
1018 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
1019     bhnd_port_type *port_type)
1020 {
1021 	switch (region_type) {
1022 	case BCMA_EROM_REGION_TYPE_DEVICE:
1023 		*port_type = BHND_PORT_DEVICE;
1024 		return (0);
1025 	case BCMA_EROM_REGION_TYPE_BRIDGE:
1026 		*port_type = BHND_PORT_BRIDGE;
1027 		return (0);
1028 	case BCMA_EROM_REGION_TYPE_MWRAP:
1029 	case BCMA_EROM_REGION_TYPE_SWRAP:
1030 		*port_type = BHND_PORT_AGENT;
1031 		return (0);
1032 	default:
1033 		EROM_LOG(erom, "unsupported region type %hhx\n",
1034 			region_type);
1035 		return (EINVAL);
1036 	}
1037 }
1038 
1039 /**
1040  * Register all MMIO region descriptors for the given slave port.
1041  *
1042  * @param erom EROM read state.
1043  * @param corecfg Core info to be populated with the scanned port regions.
1044  * @param port_num Port index for which regions will be parsed.
1045  * @param region_type The region type to be parsed.
1046  * @param[out] offset The offset at which to perform parsing. On success, this
1047  * will be updated to point to the next EROM table entry.
1048  */
1049 static int
1050 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
1051     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
1052     uint8_t region_type)
1053 {
1054 	struct bcma_sport	*sport;
1055 	struct bcma_sport_list	*sports;
1056 	bus_size_t		 entry_offset;
1057 	int			 error;
1058 	bhnd_port_type		 port_type;
1059 
1060 	error = 0;
1061 
1062 	/* Determine the port type for this region type. */
1063 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
1064 	if (error)
1065 		return (error);
1066 
1067 	/* Fetch the list to be populated */
1068 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
1069 
1070 	/* Allocate a new port descriptor */
1071 	sport = bcma_alloc_sport(port_num, port_type);
1072 	if (sport == NULL)
1073 		return (ENOMEM);
1074 
1075 	/* Read all address regions defined for this port */
1076 	for (bcma_rmid_t region_num = 0;; region_num++) {
1077 		struct bcma_map			*map;
1078 		struct bcma_erom_sport_region	 spr;
1079 
1080 		/* No valid port definition should come anywhere near
1081 		 * BCMA_RMID_MAX. */
1082 		if (region_num == BCMA_RMID_MAX) {
1083 			EROM_LOG(erom, "core%u %s%u: region count reached "
1084 			    "upper limit of %u\n",
1085 			    corecfg->core_info.core_idx,
1086 			    bhnd_port_type_name(port_type),
1087 			    port_num, BCMA_RMID_MAX);
1088 
1089 			error = EINVAL;
1090 			goto cleanup;
1091 		}
1092 
1093 		/* Parse the next region entry. */
1094 		entry_offset = bcma_erom_tell(erom);
1095 		error = bcma_erom_parse_sport_region(erom, &spr);
1096 		if (error && error != ENOENT) {
1097 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
1098 			    "address region\n",
1099 			    corecfg->core_info.core_idx,
1100 			    bhnd_port_type_name(port_type),
1101 			    port_num, region_num);
1102 			goto cleanup;
1103 		}
1104 
1105 		/* ENOENT signals no further region entries */
1106 		if (error == ENOENT) {
1107 			/* No further entries */
1108 			error = 0;
1109 			break;
1110 		}
1111 
1112 		/* A region or type mismatch also signals no further region
1113 		 * entries */
1114 		if (spr.region_port != port_num ||
1115 		    spr.region_type != region_type)
1116 		{
1117 			/* We don't want to consume this entry */
1118 			bcma_erom_seek(erom, entry_offset);
1119 
1120 			error = 0;
1121 			goto cleanup;
1122 		}
1123 
1124 		/*
1125 		 * Create the map entry.
1126 		 */
1127 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1128 		if (map == NULL) {
1129 			error = ENOMEM;
1130 			goto cleanup;
1131 		}
1132 
1133 		map->m_region_num = region_num;
1134 		map->m_base = spr.base_addr;
1135 		map->m_size = spr.size;
1136 		map->m_rid = -1;
1137 
1138 		/* Add the region map to the port */
1139 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1140 		sport->sp_num_maps++;
1141 	}
1142 
1143 cleanup:
1144 	/* Append the new port descriptor on success, or deallocate the
1145 	 * partially parsed descriptor on failure. */
1146 	if (error == 0) {
1147 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1148 	} else if (sport != NULL) {
1149 		bcma_free_sport(sport);
1150 	}
1151 
1152 	return error;
1153 }
1154 
1155 /**
1156  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1157  * to be owned by the caller.
1158  *
1159  * @param erom A bcma EROM instance.
1160  * @param[out] result On success, the core's device info. The caller inherits
1161  * ownership of this allocation.
1162  *
1163  * @return If successful, returns 0. If the end of the EROM table is hit,
1164  * ENOENT will be returned. On error, returns a non-zero error value.
1165  */
1166 int
1167 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1168 {
1169 	struct bcma_corecfg	*cfg;
1170 	struct bcma_erom_core	 core;
1171 	uint8_t			 first_region_type;
1172 	bus_size_t		 initial_offset;
1173 	u_int			 core_index;
1174 	int			 core_unit;
1175 	int			 error;
1176 
1177 	cfg = NULL;
1178 	initial_offset = bcma_erom_tell(erom);
1179 
1180 	/* Parse the next core entry */
1181 	if ((error = bcma_erom_parse_core(erom, &core)))
1182 		return (error);
1183 
1184 	/* Determine the core's index and unit numbers */
1185 	bcma_erom_reset(erom);
1186 	core_unit = 0;
1187 	core_index = 0;
1188 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1189 		struct bcma_erom_core prev_core;
1190 
1191 		/* Parse next core */
1192 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1193 		if (error)
1194 			return (error);
1195 
1196 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1197 			return (error);
1198 
1199 		/* Is earlier unit? */
1200 		if (core.vendor == prev_core.vendor &&
1201 		    core.device == prev_core.device)
1202 		{
1203 			core_unit++;
1204 		}
1205 
1206 		/* Seek to next core */
1207 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1208 		if (error)
1209 			return (error);
1210 	}
1211 
1212 	/* We already parsed the core descriptor */
1213 	if ((error = bcma_erom_skip_core(erom)))
1214 		return (error);
1215 
1216 	/* Allocate our corecfg */
1217 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1218 	    core.device, core.rev);
1219 	if (cfg == NULL)
1220 		return (ENOMEM);
1221 
1222 	/* These are 5-bit values in the EROM table, and should never be able
1223 	 * to overflow BCMA_PID_MAX. */
1224 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1225 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1226 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1227 	    ("unsupported wport count"));
1228 
1229 	if (bootverbose) {
1230 		EROM_LOG(erom,
1231 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1232 		    core_index,
1233 		    bhnd_vendor_name(core.vendor),
1234 		    bhnd_find_core_name(core.vendor, core.device),
1235 		    core.device, core.rev, core_unit);
1236 	}
1237 
1238 	cfg->num_master_ports = core.num_mport;
1239 	cfg->num_dev_ports = 0;		/* determined below */
1240 	cfg->num_bridge_ports = 0;	/* determined blow */
1241 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1242 
1243 	/* Parse Master Port Descriptors */
1244 	for (uint8_t i = 0; i < core.num_mport; i++) {
1245 		struct bcma_mport	*mport;
1246 		struct bcma_erom_mport	 mpd;
1247 
1248 		/* Parse the master port descriptor */
1249 		error = bcma_erom_parse_mport(erom, &mpd);
1250 		if (error)
1251 			goto failed;
1252 
1253 		/* Initialize a new bus mport structure */
1254 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1255 		if (mport == NULL) {
1256 			error = ENOMEM;
1257 			goto failed;
1258 		}
1259 
1260 		mport->mp_vid = mpd.port_vid;
1261 		mport->mp_num = mpd.port_num;
1262 
1263 		/* Update dinfo */
1264 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1265 	}
1266 
1267 
1268 	/*
1269 	 * Determine whether this is a bridge device; if so, we can
1270 	 * expect the first sequence of address region descriptors to
1271 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1272 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1273 	 *
1274 	 * It's unclear whether this is the correct mechanism by which we
1275 	 * should detect/handle bridge devices, but this approach matches
1276 	 * that of (some of) Broadcom's published drivers.
1277 	 */
1278 	if (core.num_dport > 0) {
1279 		uint32_t entry;
1280 
1281 		if ((error = bcma_erom_peek32(erom, &entry)))
1282 			goto failed;
1283 
1284 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1285 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1286 		{
1287 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1288 			cfg->num_dev_ports = 0;
1289 			cfg->num_bridge_ports = core.num_dport;
1290 		} else {
1291 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1292 			cfg->num_dev_ports = core.num_dport;
1293 			cfg->num_bridge_ports = 0;
1294 		}
1295 	}
1296 
1297 	/* Device/bridge port descriptors */
1298 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1299 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1300 		    first_region_type);
1301 
1302 		if (error)
1303 			goto failed;
1304 	}
1305 
1306 	/* Wrapper (aka device management) descriptors (for master ports). */
1307 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1308 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1309 		    BCMA_EROM_REGION_TYPE_MWRAP);
1310 
1311 		if (error)
1312 			goto failed;
1313 	}
1314 
1315 
1316 	/* Wrapper (aka device management) descriptors (for slave ports). */
1317 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1318 		/* Slave wrapper ports are not numbered distinctly from master
1319 		 * wrapper ports. */
1320 
1321 		/*
1322 		 * Broadcom DDR1/DDR2 Memory Controller
1323 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1324 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1325 		 *
1326 		 * ARM BP135 AMBA3 AXI to APB Bridge
1327 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1328 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1329 		 *
1330 		 * core.num_mwrap
1331 		 * ===>
1332 		 * (core.num_mwrap > 0) ?
1333 		 *           core.num_mwrap :
1334 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1335 		 */
1336 		uint8_t sp_num;
1337 		sp_num = (core.num_mwrap > 0) ?
1338 				core.num_mwrap :
1339 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1340 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1341 		    BCMA_EROM_REGION_TYPE_SWRAP);
1342 
1343 		if (error)
1344 			goto failed;
1345 	}
1346 
1347 	*result = cfg;
1348 	return (0);
1349 
1350 failed:
1351 	if (cfg != NULL)
1352 		bcma_free_corecfg(cfg);
1353 
1354 	return error;
1355 }
1356 
1357 static kobj_method_t bcma_erom_methods[] = {
1358 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1359 	KOBJMETHOD(bhnd_erom_probe_static,	bcma_erom_probe_static),
1360 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1361 	KOBJMETHOD(bhnd_erom_init_static,	bcma_erom_init_static),
1362 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1363 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1364 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1365 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1366 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1367 
1368 	KOBJMETHOD_END
1369 };
1370 
1371 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1372