xref: /freebsd/sys/dev/bhnd/bcma/bcma_erom.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2015-2017 Landon Fuller <landonf@landonf.org>
5  * Copyright (c) 2017 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Landon Fuller
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19  *    redistribution must be conditioned upon including a substantially
20  *    similar Disclaimer requirement for further binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGES.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/bus.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 #include <sys/systm.h>
41 
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 
45 #include <dev/bhnd/bhnd_eromvar.h>
46 
47 #include "bcma_eromreg.h"
48 #include "bcma_eromvar.h"
49 
50 /*
51  * BCMA Enumeration ROM (EROM) Table
52  *
53  * Provides auto-discovery of BCMA cores on Broadcom's HND SoC.
54  *
55  * The EROM core address can be found at BCMA_CC_EROM_ADDR within the
56  * ChipCommon registers. The table itself is comprised of 32-bit
57  * type-tagged entries, organized into an array of variable-length
58  * core descriptor records.
59  *
60  * The final core descriptor is followed by a 32-bit BCMA_EROM_TABLE_EOF (0xF)
61  * marker.
62  */
63 
64 static const char	*bcma_erom_entry_type_name (uint8_t entry);
65 
66 static int		 bcma_erom_read32(struct bcma_erom *erom,
67 			     uint32_t *entry);
68 static int		 bcma_erom_skip32(struct bcma_erom *erom);
69 
70 static int		 bcma_erom_skip_core(struct bcma_erom *erom);
71 static int		 bcma_erom_skip_mport(struct bcma_erom *erom);
72 static int		 bcma_erom_skip_sport_region(struct bcma_erom *erom);
73 
74 static int		 bcma_erom_seek_next(struct bcma_erom *erom,
75 			     uint8_t etype);
76 static int		 bcma_erom_region_to_port_type(struct bcma_erom *erom,
77 			     uint8_t region_type, bhnd_port_type *port_type);
78 
79 static int		 bcma_erom_peek32(struct bcma_erom *erom,
80 			     uint32_t *entry);
81 
82 static bus_size_t	 bcma_erom_tell(struct bcma_erom *erom);
83 static void		 bcma_erom_seek(struct bcma_erom *erom,
84 			     bus_size_t offset);
85 static void		 bcma_erom_reset(struct bcma_erom *erom);
86 
87 static int		 bcma_erom_seek_matching_core(struct bcma_erom *sc,
88 			     const struct bhnd_core_match *desc,
89 			     struct bhnd_core_info *core);
90 
91 static int		 bcma_erom_parse_core(struct bcma_erom *erom,
92 			     struct bcma_erom_core *core);
93 
94 static int		 bcma_erom_parse_mport(struct bcma_erom *erom,
95 			     struct bcma_erom_mport *mport);
96 
97 static int		 bcma_erom_parse_sport_region(struct bcma_erom *erom,
98 			     struct bcma_erom_sport_region *region);
99 
100 static void		 bcma_erom_to_core_info(const struct bcma_erom_core *core,
101 			     u_int core_idx, int core_unit,
102 			     struct bhnd_core_info *info);
103 
104 /**
105  * BCMA EROM per-instance state.
106  */
107 struct bcma_erom {
108 	struct bhnd_erom	 obj;
109 	device_t	 	 dev;		/**< parent device, or NULL if none. */
110 	struct bhnd_erom_io	*eio;		/**< bus I/O callbacks */
111 	bhnd_size_t	 	 offset;	/**< current read offset */
112 };
113 
114 #define	EROM_LOG(erom, fmt, ...)	do {			\
115 	printf("%s erom[0x%llx]: " fmt, __FUNCTION__,		\
116 	    (unsigned long long)(erom->offset), ##__VA_ARGS__);	\
117 } while(0)
118 
119 /** Return the type name for an EROM entry */
120 static const char *
bcma_erom_entry_type_name(uint8_t entry)121 bcma_erom_entry_type_name (uint8_t entry)
122 {
123 	switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
124 	case BCMA_EROM_ENTRY_TYPE_CORE:
125 		return "core";
126 	case BCMA_EROM_ENTRY_TYPE_MPORT:
127 		return "mport";
128 	case BCMA_EROM_ENTRY_TYPE_REGION:
129 		return "region";
130 	default:
131 		return "unknown";
132 	}
133 }
134 
135 /* BCMA implementation of BHND_EROM_INIT() */
136 static int
bcma_erom_init(bhnd_erom_t * erom,const struct bhnd_chipid * cid,struct bhnd_erom_io * eio)137 bcma_erom_init(bhnd_erom_t *erom, const struct bhnd_chipid *cid,
138     struct bhnd_erom_io *eio)
139 {
140 	struct bcma_erom	*sc;
141 	bhnd_addr_t		 table_addr;
142 	int			 error;
143 
144 	sc = (struct bcma_erom *)erom;
145 	sc->eio = eio;
146 	sc->offset = 0;
147 
148 	/* Determine erom table address */
149 	if (BHND_ADDR_MAX - BCMA_EROM_TABLE_START < cid->enum_addr)
150 		return (ENXIO); /* would overflow */
151 
152 	table_addr = cid->enum_addr + BCMA_EROM_TABLE_START;
153 
154 	/* Try to map the erom table */
155 	error = bhnd_erom_io_map(sc->eio, table_addr, BCMA_EROM_TABLE_SIZE);
156 	if (error)
157 		return (error);
158 
159 	return (0);
160 }
161 
162 /* BCMA implementation of BHND_EROM_PROBE() */
163 static int
bcma_erom_probe(bhnd_erom_class_t * cls,struct bhnd_erom_io * eio,const struct bhnd_chipid * hint,struct bhnd_chipid * cid)164 bcma_erom_probe(bhnd_erom_class_t *cls, struct bhnd_erom_io *eio,
165     const struct bhnd_chipid *hint, struct bhnd_chipid *cid)
166 {
167 	int error;
168 
169 	/* Hints aren't supported; all BCMA devices have a ChipCommon
170 	 * core */
171 	if (hint != NULL)
172 		return (EINVAL);
173 
174 	/* Read and parse chip identification */
175 	if ((error = bhnd_erom_read_chipid(eio, cid)))
176 		return (error);
177 
178 	/* Verify chip type */
179 	switch (cid->chip_type) {
180 		case BHND_CHIPTYPE_BCMA:
181 			return (BUS_PROBE_DEFAULT);
182 
183 		case BHND_CHIPTYPE_BCMA_ALT:
184 		case BHND_CHIPTYPE_UBUS:
185 			return (BUS_PROBE_GENERIC);
186 
187 		default:
188 			return (ENXIO);
189 	}
190 }
191 
192 static void
bcma_erom_fini(bhnd_erom_t * erom)193 bcma_erom_fini(bhnd_erom_t *erom)
194 {
195 	struct bcma_erom *sc = (struct bcma_erom *)erom;
196 
197 	bhnd_erom_io_fini(sc->eio);
198 }
199 
200 static int
bcma_erom_lookup_core(bhnd_erom_t * erom,const struct bhnd_core_match * desc,struct bhnd_core_info * core)201 bcma_erom_lookup_core(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
202     struct bhnd_core_info *core)
203 {
204 	struct bcma_erom *sc = (struct bcma_erom *)erom;
205 
206 	/* Search for the first matching core */
207 	return (bcma_erom_seek_matching_core(sc, desc, core));
208 }
209 
210 static int
bcma_erom_lookup_core_addr(bhnd_erom_t * erom,const struct bhnd_core_match * desc,bhnd_port_type port_type,u_int port_num,u_int region_num,struct bhnd_core_info * core,bhnd_addr_t * addr,bhnd_size_t * size)211 bcma_erom_lookup_core_addr(bhnd_erom_t *erom, const struct bhnd_core_match *desc,
212     bhnd_port_type port_type, u_int port_num, u_int region_num,
213     struct bhnd_core_info *core, bhnd_addr_t *addr, bhnd_size_t *size)
214 {
215 	struct bcma_erom	*sc;
216 	struct bcma_erom_core	 ec;
217 	uint32_t		 entry;
218 	uint8_t			 region_port, region_type;
219 	bool			 found;
220 	int			 error;
221 
222 	sc = (struct bcma_erom *)erom;
223 
224 	/* Seek to the first matching core and provide the core info
225 	 * to the caller */
226 	if ((error = bcma_erom_seek_matching_core(sc, desc, core)))
227 		return (error);
228 
229 	if ((error = bcma_erom_parse_core(sc, &ec)))
230 		return (error);
231 
232 	/* Skip master ports */
233 	for (u_long i = 0; i < ec.num_mport; i++) {
234 		if ((error = bcma_erom_skip_mport(sc)))
235 			return (error);
236 	}
237 
238 	/* Seek to the region block for the given port type */
239 	found = false;
240 	while (1) {
241 		bhnd_port_type	p_type;
242 		uint8_t		r_type;
243 
244 		if ((error = bcma_erom_peek32(sc, &entry)))
245 			return (error);
246 
247 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
248 			return (ENOENT);
249 
250 		/* Expected region type? */
251 		r_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
252 		error = bcma_erom_region_to_port_type(sc, r_type, &p_type);
253 		if (error)
254 			return (error);
255 
256 		if (p_type == port_type) {
257 			found = true;
258 			break;
259 		}
260 
261 		/* Skip to next entry */
262 		if ((error = bcma_erom_skip_sport_region(sc)))
263 			return (error);
264 	}
265 
266 	if (!found)
267 		return (ENOENT);
268 
269 	/* Found the appropriate port type block; now find the region records
270 	 * for the given port number */
271 	found = false;
272 	for (u_int i = 0; i <= port_num; i++) {
273 		bhnd_port_type	p_type;
274 
275 		if ((error = bcma_erom_peek32(sc, &entry)))
276 			return (error);
277 
278 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
279 			return (ENOENT);
280 
281 		/* Fetch the type/port of the first region entry */
282 		region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
283 		region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
284 
285 		/* Have we found the region entries for the desired port? */
286 		if (i == port_num) {
287 			error = bcma_erom_region_to_port_type(sc, region_type,
288 			    &p_type);
289 			if (error)
290 				return (error);
291 
292 			if (p_type == port_type)
293 				found = true;
294 
295 			break;
296 		}
297 
298 		/* Otherwise, seek to next block of region records */
299 		while (1) {
300 			uint8_t	next_type, next_port;
301 
302 			if ((error = bcma_erom_skip_sport_region(sc)))
303 				return (error);
304 
305 			if ((error = bcma_erom_peek32(sc, &entry)))
306 				return (error);
307 
308 			if (!BCMA_EROM_ENTRY_IS(entry, REGION))
309 				return (ENOENT);
310 
311 			next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
312 			next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
313 
314 			if (next_type != region_type ||
315 			    next_port != region_port)
316 				break;
317 		}
318 	}
319 
320 	if (!found)
321 		return (ENOENT);
322 
323 	/* Finally, search for the requested region number */
324 	for (u_int i = 0; i <= region_num; i++) {
325 		struct bcma_erom_sport_region	region;
326 		uint8_t				next_port, next_type;
327 
328 		if ((error = bcma_erom_peek32(sc, &entry)))
329 			return (error);
330 
331 		if (!BCMA_EROM_ENTRY_IS(entry, REGION))
332 			return (ENOENT);
333 
334 		/* Check for the end of the region block */
335 		next_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
336 		next_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
337 
338 		if (next_type != region_type ||
339 		    next_port != region_port)
340 			break;
341 
342 		/* Parse the region */
343 		if ((error = bcma_erom_parse_sport_region(sc, &region)))
344 			return (error);
345 
346 		/* Is this our target region_num? */
347 		if (i == region_num) {
348 			/* Found */
349 			*addr = region.base_addr;
350 			*size = region.size;
351 			return (0);
352 		}
353 	}
354 
355 	/* Not found */
356 	return (ENOENT);
357 };
358 
359 static int
bcma_erom_get_core_table(bhnd_erom_t * erom,struct bhnd_core_info ** cores,u_int * num_cores)360 bcma_erom_get_core_table(bhnd_erom_t *erom, struct bhnd_core_info **cores,
361     u_int *num_cores)
362 {
363 	struct bcma_erom	*sc;
364 	struct bhnd_core_info	*buffer;
365 	bus_size_t		 initial_offset;
366 	u_int			 count;
367 	int			 error;
368 
369 	sc = (struct bcma_erom *)erom;
370 
371 	buffer = NULL;
372 	initial_offset = bcma_erom_tell(sc);
373 
374 	/* Determine the core count */
375 	bcma_erom_reset(sc);
376 	for (count = 0, error = 0; !error; count++) {
377 		struct bcma_erom_core core;
378 
379 		/* Seek to the first readable core entry */
380 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
381 		if (error == ENOENT)
382 			break;
383 		else if (error)
384 			goto cleanup;
385 
386 		/* Read past the core descriptor */
387 		if ((error = bcma_erom_parse_core(sc, &core)))
388 			goto cleanup;
389 	}
390 
391 	/* Allocate our output buffer */
392 	buffer = mallocarray(count, sizeof(struct bhnd_core_info), M_BHND,
393 	    M_NOWAIT);
394 	if (buffer == NULL) {
395 		error = ENOMEM;
396 		goto cleanup;
397 	}
398 
399 	/* Parse all core descriptors */
400 	bcma_erom_reset(sc);
401 	for (u_int i = 0; i < count; i++) {
402 		struct bcma_erom_core	core;
403 		int			unit;
404 
405 		/* Parse the core */
406 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
407 		if (error)
408 			goto cleanup;
409 
410 		error = bcma_erom_parse_core(sc, &core);
411 		if (error)
412 			goto cleanup;
413 
414 		/* Determine the unit number */
415 		unit = 0;
416 		for (u_int j = 0; j < i; j++) {
417 			if (buffer[i].vendor == buffer[j].vendor &&
418 			    buffer[i].device == buffer[j].device)
419 				unit++;
420 		}
421 
422 		/* Convert to a bhnd info record */
423 		bcma_erom_to_core_info(&core, i, unit, &buffer[i]);
424 	}
425 
426 cleanup:
427 	if (!error) {
428 		*cores = buffer;
429 		*num_cores = count;
430 	} else {
431 		if (buffer != NULL)
432 			free(buffer, M_BHND);
433 	}
434 
435 	/* Restore the initial position */
436 	bcma_erom_seek(sc, initial_offset);
437 	return (error);
438 }
439 
440 static void
bcma_erom_free_core_table(bhnd_erom_t * erom,struct bhnd_core_info * cores)441 bcma_erom_free_core_table(bhnd_erom_t *erom, struct bhnd_core_info *cores)
442 {
443 	free(cores, M_BHND);
444 }
445 
446 /**
447  * Return the current read position.
448  */
449 static bus_size_t
bcma_erom_tell(struct bcma_erom * erom)450 bcma_erom_tell(struct bcma_erom *erom)
451 {
452 	return (erom->offset);
453 }
454 
455 /**
456  * Seek to an absolute read position.
457  */
458 static void
bcma_erom_seek(struct bcma_erom * erom,bus_size_t offset)459 bcma_erom_seek(struct bcma_erom *erom, bus_size_t offset)
460 {
461 	erom->offset = offset;
462 }
463 
464 /**
465  * Read a 32-bit entry value from the EROM table without advancing the
466  * read position.
467  *
468  * @param erom EROM read state.
469  * @param entry Will contain the read result on success.
470  * @retval 0 success
471  * @retval ENOENT The end of the EROM table was reached.
472  * @retval non-zero The read could not be completed.
473  */
474 static int
bcma_erom_peek32(struct bcma_erom * erom,uint32_t * entry)475 bcma_erom_peek32(struct bcma_erom *erom, uint32_t *entry)
476 {
477 	if (erom->offset >= (BCMA_EROM_TABLE_SIZE - sizeof(uint32_t))) {
478 		EROM_LOG(erom, "BCMA EROM table missing terminating EOF\n");
479 		return (EINVAL);
480 	}
481 
482 	*entry = bhnd_erom_io_read(erom->eio, erom->offset, 4);
483 	return (0);
484 }
485 
486 /**
487  * Read a 32-bit entry value from the EROM table.
488  *
489  * @param erom EROM read state.
490  * @param entry Will contain the read result on success.
491  * @retval 0 success
492  * @retval ENOENT The end of the EROM table was reached.
493  * @retval non-zero The read could not be completed.
494  */
495 static int
bcma_erom_read32(struct bcma_erom * erom,uint32_t * entry)496 bcma_erom_read32(struct bcma_erom *erom, uint32_t *entry)
497 {
498 	int error;
499 
500 	if ((error = bcma_erom_peek32(erom, entry)) == 0)
501 		erom->offset += 4;
502 
503 	return (error);
504 }
505 
506 /**
507  * Read and discard 32-bit entry value from the EROM table.
508  *
509  * @param erom EROM read state.
510  * @retval 0 success
511  * @retval ENOENT The end of the EROM table was reached.
512  * @retval non-zero The read could not be completed.
513  */
514 static int
bcma_erom_skip32(struct bcma_erom * erom)515 bcma_erom_skip32(struct bcma_erom *erom)
516 {
517 	uint32_t	entry;
518 
519 	return bcma_erom_read32(erom, &entry);
520 }
521 
522 /**
523  * Read and discard a core descriptor from the EROM table.
524  *
525  * @param erom EROM read state.
526  * @retval 0 success
527  * @retval ENOENT The end of the EROM table was reached.
528  * @retval non-zero The read could not be completed.
529  */
530 static int
bcma_erom_skip_core(struct bcma_erom * erom)531 bcma_erom_skip_core(struct bcma_erom *erom)
532 {
533 	struct bcma_erom_core core;
534 	return (bcma_erom_parse_core(erom, &core));
535 }
536 
537 /**
538  * Read and discard a master port descriptor from the EROM table.
539  *
540  * @param erom EROM read state.
541  * @retval 0 success
542  * @retval ENOENT The end of the EROM table was reached.
543  * @retval non-zero The read could not be completed.
544  */
545 static int
bcma_erom_skip_mport(struct bcma_erom * erom)546 bcma_erom_skip_mport(struct bcma_erom *erom)
547 {
548 	struct bcma_erom_mport mp;
549 	return (bcma_erom_parse_mport(erom, &mp));
550 }
551 
552 /**
553  * Read and discard a port region descriptor from the EROM table.
554  *
555  * @param erom EROM read state.
556  * @retval 0 success
557  * @retval ENOENT The end of the EROM table was reached.
558  * @retval non-zero The read could not be completed.
559  */
560 static int
bcma_erom_skip_sport_region(struct bcma_erom * erom)561 bcma_erom_skip_sport_region(struct bcma_erom *erom)
562 {
563 	struct bcma_erom_sport_region r;
564 	return (bcma_erom_parse_sport_region(erom, &r));
565 }
566 
567 /**
568  * Seek to the next entry matching the given EROM entry type.
569  *
570  * @param erom EROM read state.
571  * @param etype  One of BCMA_EROM_ENTRY_TYPE_CORE,
572  * BCMA_EROM_ENTRY_TYPE_MPORT, or BCMA_EROM_ENTRY_TYPE_REGION.
573  * @retval 0 success
574  * @retval ENOENT The end of the EROM table was reached.
575  * @retval non-zero Reading or parsing the descriptor failed.
576  */
577 static int
bcma_erom_seek_next(struct bcma_erom * erom,uint8_t etype)578 bcma_erom_seek_next(struct bcma_erom *erom, uint8_t etype)
579 {
580 	uint32_t			entry;
581 	int				error;
582 
583 	/* Iterate until we hit an entry matching the requested type. */
584 	while (!(error = bcma_erom_peek32(erom, &entry))) {
585 		/* Handle EOF */
586 		if (entry == BCMA_EROM_TABLE_EOF)
587 			return (ENOENT);
588 
589 		/* Invalid entry */
590 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID))
591 			return (EINVAL);
592 
593 		/* Entry type matches? */
594 		if (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE) == etype)
595 			return (0);
596 
597 		/* Skip non-matching entry types. */
598 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
599 		case BCMA_EROM_ENTRY_TYPE_CORE:
600 			if ((error = bcma_erom_skip_core(erom)))
601 				return (error);
602 
603 			break;
604 
605 		case BCMA_EROM_ENTRY_TYPE_MPORT:
606 			if ((error = bcma_erom_skip_mport(erom)))
607 				return (error);
608 
609 			break;
610 
611 		case BCMA_EROM_ENTRY_TYPE_REGION:
612 			if ((error = bcma_erom_skip_sport_region(erom)))
613 				return (error);
614 			break;
615 
616 		default:
617 			/* Unknown entry type! */
618 			return (EINVAL);
619 		}
620 	}
621 
622 	return (error);
623 }
624 
625 /**
626  * Return the read position to the start of the EROM table.
627  *
628  * @param erom EROM read state.
629  */
630 static void
bcma_erom_reset(struct bcma_erom * erom)631 bcma_erom_reset(struct bcma_erom *erom)
632 {
633 	erom->offset = 0;
634 }
635 
636 /**
637  * Seek to the first core entry matching @p desc.
638  *
639  * @param erom EROM read state.
640  * @param desc The core match descriptor.
641  * @param[out] core On success, the matching core info. If the core info
642  * is not desired, a NULL pointer may be provided.
643  * @retval 0 success
644  * @retval ENOENT The end of the EROM table was reached before @p index was
645  * found.
646  * @retval non-zero Reading or parsing failed.
647  */
648 static int
bcma_erom_seek_matching_core(struct bcma_erom * sc,const struct bhnd_core_match * desc,struct bhnd_core_info * core)649 bcma_erom_seek_matching_core(struct bcma_erom *sc,
650     const struct bhnd_core_match *desc, struct bhnd_core_info *core)
651 {
652 	struct bhnd_core_match	 imatch;
653 	bus_size_t		 core_offset, next_offset;
654 	int			 error;
655 
656 	/* Seek to table start. */
657 	bcma_erom_reset(sc);
658 
659 	/* We can't determine a core's unit number during the initial scan. */
660 	imatch = *desc;
661 	imatch.m.match.core_unit = 0;
662 
663 	/* Locate the first matching core */
664 	for (u_int i = 0; i < UINT_MAX; i++) {
665 		struct bcma_erom_core	ec;
666 		struct bhnd_core_info	ci;
667 
668 		/* Seek to the next core */
669 		error = bcma_erom_seek_next(sc, BCMA_EROM_ENTRY_TYPE_CORE);
670 		if (error)
671 			return (error);
672 
673 		/* Save the core offset */
674 		core_offset = bcma_erom_tell(sc);
675 
676 		/* Parse the core */
677 		if ((error = bcma_erom_parse_core(sc, &ec)))
678 			return (error);
679 
680 		bcma_erom_to_core_info(&ec, i, 0, &ci);
681 
682 		/* Check for initial match */
683 		if (!bhnd_core_matches(&ci, &imatch))
684 			continue;
685 
686 		/* Re-scan preceding cores to determine the unit number. */
687 		next_offset = bcma_erom_tell(sc);
688 		bcma_erom_reset(sc);
689 		for (u_int j = 0; j < i; j++) {
690 			/* Parse the core */
691 			error = bcma_erom_seek_next(sc,
692 			    BCMA_EROM_ENTRY_TYPE_CORE);
693 			if (error)
694 				return (error);
695 
696 			if ((error = bcma_erom_parse_core(sc, &ec)))
697 				return (error);
698 
699 			/* Bump the unit number? */
700 			if (ec.vendor == ci.vendor && ec.device == ci.device)
701 				ci.unit++;
702 		}
703 
704 		/* Check for full match against now-valid unit number */
705 		if (!bhnd_core_matches(&ci, desc)) {
706 			/* Reposition to allow reading the next core */
707 			bcma_erom_seek(sc, next_offset);
708 			continue;
709 		}
710 
711 		/* Found; seek to the core's initial offset and provide
712 		 * the core info to the caller */
713 		bcma_erom_seek(sc, core_offset);
714 		if (core != NULL)
715 			*core = ci;
716 
717 		return (0);
718 	}
719 
720 	/* Not found, or a parse error occurred */
721 	return (error);
722 }
723 
724 /**
725  * Read the next core descriptor from the EROM table.
726  *
727  * @param erom EROM read state.
728  * @param[out] core On success, will be populated with the parsed core
729  * descriptor data.
730  * @retval 0 success
731  * @retval ENOENT The end of the EROM table was reached.
732  * @retval non-zero Reading or parsing the core descriptor failed.
733  */
734 static int
bcma_erom_parse_core(struct bcma_erom * erom,struct bcma_erom_core * core)735 bcma_erom_parse_core(struct bcma_erom *erom, struct bcma_erom_core *core)
736 {
737 	uint32_t	entry;
738 	int		error;
739 
740 	/* Parse CoreDescA */
741 	if ((error = bcma_erom_read32(erom, &entry)))
742 		return (error);
743 
744 	/* Handle EOF */
745 	if (entry == BCMA_EROM_TABLE_EOF)
746 		return (ENOENT);
747 
748 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
749 		EROM_LOG(erom, "Unexpected EROM entry 0x%x (type=%s)\n",
750                    entry, bcma_erom_entry_type_name(entry));
751 
752 		return (EINVAL);
753 	}
754 
755 	core->vendor = BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER);
756 	core->device = BCMA_EROM_GET_ATTR(entry, COREA_ID);
757 
758 	/* Parse CoreDescB */
759 	if ((error = bcma_erom_read32(erom, &entry)))
760 		return (error);
761 
762 	if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
763 		return (EINVAL);
764 	}
765 
766 	core->rev = BCMA_EROM_GET_ATTR(entry, COREB_REV);
767 	core->num_mport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP);
768 	core->num_dport = BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP);
769 	core->num_mwrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP);
770 	core->num_swrap = BCMA_EROM_GET_ATTR(entry, COREB_NUM_WSP);
771 
772 	return (0);
773 }
774 
775 /**
776  * Read the next master port descriptor from the EROM table.
777  *
778  * @param erom EROM read state.
779  * @param[out] mport On success, will be populated with the parsed
780  * descriptor data.
781  * @retval 0 success
782  * @retval non-zero Reading or parsing the descriptor failed.
783  */
784 static int
bcma_erom_parse_mport(struct bcma_erom * erom,struct bcma_erom_mport * mport)785 bcma_erom_parse_mport(struct bcma_erom *erom, struct bcma_erom_mport *mport)
786 {
787 	uint32_t	entry;
788 	int		error;
789 
790 	/* Parse the master port descriptor */
791 	if ((error = bcma_erom_read32(erom, &entry)))
792 		return (error);
793 
794 	if (!BCMA_EROM_ENTRY_IS(entry, MPORT))
795 		return (EINVAL);
796 
797 	mport->port_vid = BCMA_EROM_GET_ATTR(entry, MPORT_ID);
798 	mport->port_num = BCMA_EROM_GET_ATTR(entry, MPORT_NUM);
799 
800 	return (0);
801 }
802 
803 /**
804  * Read the next slave port region descriptor from the EROM table.
805  *
806  * @param erom EROM read state.
807  * @param[out] mport On success, will be populated with the parsed
808  * descriptor data.
809  * @retval 0 success
810  * @retval ENOENT The end of the region descriptor table was reached.
811  * @retval non-zero Reading or parsing the descriptor failed.
812  */
813 static int
bcma_erom_parse_sport_region(struct bcma_erom * erom,struct bcma_erom_sport_region * region)814 bcma_erom_parse_sport_region(struct bcma_erom *erom,
815     struct bcma_erom_sport_region *region)
816 {
817 	uint32_t	entry;
818 	uint8_t		size_type;
819 	int		error;
820 
821 	/* Peek at the region descriptor */
822 	if (bcma_erom_peek32(erom, &entry))
823 		return (EINVAL);
824 
825 	/* A non-region entry signals the end of the region table */
826 	if (!BCMA_EROM_ENTRY_IS(entry, REGION)) {
827 		return (ENOENT);
828 	} else {
829 		bcma_erom_skip32(erom);
830 	}
831 
832 	region->base_addr = BCMA_EROM_GET_ATTR(entry, REGION_BASE);
833 	region->region_type = BCMA_EROM_GET_ATTR(entry, REGION_TYPE);
834 	region->region_port = BCMA_EROM_GET_ATTR(entry, REGION_PORT);
835 	size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
836 
837 	/* If region address is 64-bit, fetch the high bits. */
838 	if (BCMA_EROM_GET_ATTR(entry, REGION_64BIT)) {
839 		if ((error = bcma_erom_read32(erom, &entry)))
840 			return (error);
841 
842 		region->base_addr |= ((bhnd_addr_t) entry << 32);
843 	}
844 
845 	/* Parse the region size; it's either encoded as the binary logarithm
846 	 * of the number of 4K pages (i.e. log2 n), or its encoded as a
847 	 * 32-bit/64-bit literal value directly following the current entry. */
848 	if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
849 		if ((error = bcma_erom_read32(erom, &entry)))
850 			return (error);
851 
852 		region->size = BCMA_EROM_GET_ATTR(entry, RSIZE_VAL);
853 
854 		if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT)) {
855 			if ((error = bcma_erom_read32(erom, &entry)))
856 				return (error);
857 			region->size |= ((bhnd_size_t) entry << 32);
858 		}
859 	} else {
860 		region->size = BCMA_EROM_REGION_SIZE_BASE << size_type;
861 	}
862 
863 	/* Verify that addr+size does not overflow. */
864 	if (region->size != 0 &&
865 	    BHND_ADDR_MAX - (region->size - 1) < region->base_addr)
866 	{
867 		EROM_LOG(erom, "%s%u: invalid address map %llx:%llx\n",
868 		    bcma_erom_entry_type_name(region->region_type),
869 		    region->region_port,
870 		    (unsigned long long) region->base_addr,
871 		    (unsigned long long) region->size);
872 
873 		return (EINVAL);
874 	}
875 
876 	return (0);
877 }
878 
879 /**
880  * Convert a bcma_erom_core record to its bhnd_core_info representation.
881  *
882  * @param core EROM core record to convert.
883  * @param core_idx The core index of @p core.
884  * @param core_unit The core unit of @p core.
885  * @param[out] info The populated bhnd_core_info representation.
886  */
887 static void
bcma_erom_to_core_info(const struct bcma_erom_core * core,u_int core_idx,int core_unit,struct bhnd_core_info * info)888 bcma_erom_to_core_info(const struct bcma_erom_core *core, u_int core_idx,
889     int core_unit, struct bhnd_core_info *info)
890 {
891 	info->vendor = core->vendor;
892 	info->device = core->device;
893 	info->hwrev = core->rev;
894 	info->core_idx = core_idx;
895 	info->unit = core_unit;
896 }
897 
898 /**
899  * Map an EROM region type to its corresponding port type.
900  *
901  * @param region_type Region type value.
902  * @param[out] port_type On success, the corresponding port type.
903  */
904 static int
bcma_erom_region_to_port_type(struct bcma_erom * erom,uint8_t region_type,bhnd_port_type * port_type)905 bcma_erom_region_to_port_type(struct bcma_erom *erom, uint8_t region_type,
906     bhnd_port_type *port_type)
907 {
908 	switch (region_type) {
909 	case BCMA_EROM_REGION_TYPE_DEVICE:
910 		*port_type = BHND_PORT_DEVICE;
911 		return (0);
912 	case BCMA_EROM_REGION_TYPE_BRIDGE:
913 		*port_type = BHND_PORT_BRIDGE;
914 		return (0);
915 	case BCMA_EROM_REGION_TYPE_MWRAP:
916 	case BCMA_EROM_REGION_TYPE_SWRAP:
917 		*port_type = BHND_PORT_AGENT;
918 		return (0);
919 	default:
920 		EROM_LOG(erom, "unsupported region type %hhx\n",
921 			region_type);
922 		return (EINVAL);
923 	}
924 }
925 
926 /**
927  * Register all MMIO region descriptors for the given slave port.
928  *
929  * @param erom EROM read state.
930  * @param corecfg Core info to be populated with the scanned port regions.
931  * @param port_num Port index for which regions will be parsed.
932  * @param region_type The region type to be parsed.
933  * @param[out] offset The offset at which to perform parsing. On success, this
934  * will be updated to point to the next EROM table entry.
935  */
936 static int
bcma_erom_corecfg_fill_port_regions(struct bcma_erom * erom,struct bcma_corecfg * corecfg,bcma_pid_t port_num,uint8_t region_type)937 bcma_erom_corecfg_fill_port_regions(struct bcma_erom *erom,
938     struct bcma_corecfg *corecfg, bcma_pid_t port_num,
939     uint8_t region_type)
940 {
941 	struct bcma_sport	*sport;
942 	struct bcma_sport_list	*sports;
943 	bus_size_t		 entry_offset;
944 	int			 error;
945 	bhnd_port_type		 port_type;
946 
947 	error = 0;
948 
949 	/* Determine the port type for this region type. */
950 	error = bcma_erom_region_to_port_type(erom, region_type, &port_type);
951 	if (error)
952 		return (error);
953 
954 	/* Fetch the list to be populated */
955 	sports = bcma_corecfg_get_port_list(corecfg, port_type);
956 
957 	/* Allocate a new port descriptor */
958 	sport = bcma_alloc_sport(port_num, port_type);
959 	if (sport == NULL)
960 		return (ENOMEM);
961 
962 	/* Read all address regions defined for this port */
963 	for (bcma_rmid_t region_num = 0;; region_num++) {
964 		struct bcma_map			*map;
965 		struct bcma_erom_sport_region	 spr;
966 
967 		/* No valid port definition should come anywhere near
968 		 * BCMA_RMID_MAX. */
969 		if (region_num == BCMA_RMID_MAX) {
970 			EROM_LOG(erom, "core%u %s%u: region count reached "
971 			    "upper limit of %u\n",
972 			    corecfg->core_info.core_idx,
973 			    bhnd_port_type_name(port_type),
974 			    port_num, BCMA_RMID_MAX);
975 
976 			error = EINVAL;
977 			goto cleanup;
978 		}
979 
980 		/* Parse the next region entry. */
981 		entry_offset = bcma_erom_tell(erom);
982 		error = bcma_erom_parse_sport_region(erom, &spr);
983 		if (error && error != ENOENT) {
984 			EROM_LOG(erom, "core%u %s%u.%u: invalid slave port "
985 			    "address region\n",
986 			    corecfg->core_info.core_idx,
987 			    bhnd_port_type_name(port_type),
988 			    port_num, region_num);
989 			goto cleanup;
990 		}
991 
992 		/* ENOENT signals no further region entries */
993 		if (error == ENOENT) {
994 			/* No further entries */
995 			error = 0;
996 			break;
997 		}
998 
999 		/* A region or type mismatch also signals no further region
1000 		 * entries */
1001 		if (spr.region_port != port_num ||
1002 		    spr.region_type != region_type)
1003 		{
1004 			/* We don't want to consume this entry */
1005 			bcma_erom_seek(erom, entry_offset);
1006 
1007 			error = 0;
1008 			goto cleanup;
1009 		}
1010 
1011 		/*
1012 		 * Create the map entry.
1013 		 */
1014 		map = malloc(sizeof(struct bcma_map), M_BHND, M_NOWAIT);
1015 		if (map == NULL) {
1016 			error = ENOMEM;
1017 			goto cleanup;
1018 		}
1019 
1020 		map->m_region_num = region_num;
1021 		map->m_base = spr.base_addr;
1022 		map->m_size = spr.size;
1023 		map->m_rid = -1;
1024 
1025 		/* Add the region map to the port */
1026 		STAILQ_INSERT_TAIL(&sport->sp_maps, map, m_link);
1027 		sport->sp_num_maps++;
1028 	}
1029 
1030 cleanup:
1031 	/* Append the new port descriptor on success, or deallocate the
1032 	 * partially parsed descriptor on failure. */
1033 	if (error == 0) {
1034 		STAILQ_INSERT_TAIL(sports, sport, sp_link);
1035 	} else if (sport != NULL) {
1036 		bcma_free_sport(sport);
1037 	}
1038 
1039 	return error;
1040 }
1041 
1042 /**
1043  * Parse the next core entry from the EROM table and produce a bcma_corecfg
1044  * to be owned by the caller.
1045  *
1046  * @param erom A bcma EROM instance.
1047  * @param[out] result On success, the core's device info. The caller inherits
1048  * ownership of this allocation.
1049  *
1050  * @return If successful, returns 0. If the end of the EROM table is hit,
1051  * ENOENT will be returned. On error, returns a non-zero error value.
1052  */
1053 int
bcma_erom_next_corecfg(struct bcma_erom * erom,struct bcma_corecfg ** result)1054 bcma_erom_next_corecfg(struct bcma_erom *erom, struct bcma_corecfg **result)
1055 {
1056 	struct bcma_corecfg	*cfg;
1057 	struct bcma_erom_core	 core;
1058 	uint8_t			 first_region_type;
1059 	bus_size_t		 initial_offset;
1060 	u_int			 core_index;
1061 	int			 core_unit;
1062 	int			 error;
1063 
1064 	cfg = NULL;
1065 	initial_offset = bcma_erom_tell(erom);
1066 
1067 	/* Parse the next core entry */
1068 	if ((error = bcma_erom_parse_core(erom, &core)))
1069 		return (error);
1070 
1071 	/* Determine the core's index and unit numbers */
1072 	bcma_erom_reset(erom);
1073 	core_unit = 0;
1074 	core_index = 0;
1075 	for (; bcma_erom_tell(erom) != initial_offset; core_index++) {
1076 		struct bcma_erom_core prev_core;
1077 
1078 		/* Parse next core */
1079 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1080 		if (error)
1081 			return (error);
1082 
1083 		if ((error = bcma_erom_parse_core(erom, &prev_core)))
1084 			return (error);
1085 
1086 		/* Is earlier unit? */
1087 		if (core.vendor == prev_core.vendor &&
1088 		    core.device == prev_core.device)
1089 		{
1090 			core_unit++;
1091 		}
1092 
1093 		/* Seek to next core */
1094 		error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE);
1095 		if (error)
1096 			return (error);
1097 	}
1098 
1099 	/* We already parsed the core descriptor */
1100 	if ((error = bcma_erom_skip_core(erom)))
1101 		return (error);
1102 
1103 	/* Allocate our corecfg */
1104 	cfg = bcma_alloc_corecfg(core_index, core_unit, core.vendor,
1105 	    core.device, core.rev);
1106 	if (cfg == NULL)
1107 		return (ENOMEM);
1108 
1109 	/* These are 5-bit values in the EROM table, and should never be able
1110 	 * to overflow BCMA_PID_MAX. */
1111 	KASSERT(core.num_mport <= BCMA_PID_MAX, ("unsupported mport count"));
1112 	KASSERT(core.num_dport <= BCMA_PID_MAX, ("unsupported dport count"));
1113 	KASSERT(core.num_mwrap + core.num_swrap <= BCMA_PID_MAX,
1114 	    ("unsupported wport count"));
1115 
1116 	if (bootverbose) {
1117 		EROM_LOG(erom,
1118 		    "core%u: %s %s (cid=%hx, rev=%hu, unit=%d)\n",
1119 		    core_index,
1120 		    bhnd_vendor_name(core.vendor),
1121 		    bhnd_find_core_name(core.vendor, core.device),
1122 		    core.device, core.rev, core_unit);
1123 	}
1124 
1125 	cfg->num_master_ports = core.num_mport;
1126 	cfg->num_dev_ports = 0;		/* determined below */
1127 	cfg->num_bridge_ports = 0;	/* determined blow */
1128 	cfg->num_wrapper_ports = core.num_mwrap + core.num_swrap;
1129 
1130 	/* Parse Master Port Descriptors */
1131 	for (uint8_t i = 0; i < core.num_mport; i++) {
1132 		struct bcma_mport	*mport;
1133 		struct bcma_erom_mport	 mpd;
1134 
1135 		/* Parse the master port descriptor */
1136 		error = bcma_erom_parse_mport(erom, &mpd);
1137 		if (error)
1138 			goto failed;
1139 
1140 		/* Initialize a new bus mport structure */
1141 		mport = malloc(sizeof(struct bcma_mport), M_BHND, M_NOWAIT);
1142 		if (mport == NULL) {
1143 			error = ENOMEM;
1144 			goto failed;
1145 		}
1146 
1147 		mport->mp_vid = mpd.port_vid;
1148 		mport->mp_num = mpd.port_num;
1149 
1150 		/* Update dinfo */
1151 		STAILQ_INSERT_TAIL(&cfg->master_ports, mport, mp_link);
1152 	}
1153 
1154 	/*
1155 	 * Determine whether this is a bridge device; if so, we can
1156 	 * expect the first sequence of address region descriptors to
1157 	 * be of EROM_REGION_TYPE_BRIDGE instead of
1158 	 * BCMA_EROM_REGION_TYPE_DEVICE.
1159 	 *
1160 	 * It's unclear whether this is the correct mechanism by which we
1161 	 * should detect/handle bridge devices, but this approach matches
1162 	 * that of (some of) Broadcom's published drivers.
1163 	 */
1164 	if (core.num_dport > 0) {
1165 		uint32_t entry;
1166 
1167 		if ((error = bcma_erom_peek32(erom, &entry)))
1168 			goto failed;
1169 
1170 		if (BCMA_EROM_ENTRY_IS(entry, REGION) &&
1171 		    BCMA_EROM_GET_ATTR(entry, REGION_TYPE) == BCMA_EROM_REGION_TYPE_BRIDGE)
1172 		{
1173 			first_region_type = BCMA_EROM_REGION_TYPE_BRIDGE;
1174 			cfg->num_dev_ports = 0;
1175 			cfg->num_bridge_ports = core.num_dport;
1176 		} else {
1177 			first_region_type = BCMA_EROM_REGION_TYPE_DEVICE;
1178 			cfg->num_dev_ports = core.num_dport;
1179 			cfg->num_bridge_ports = 0;
1180 		}
1181 	}
1182 
1183 	/* Device/bridge port descriptors */
1184 	for (uint8_t sp_num = 0; sp_num < core.num_dport; sp_num++) {
1185 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1186 		    first_region_type);
1187 
1188 		if (error)
1189 			goto failed;
1190 	}
1191 
1192 	/* Wrapper (aka device management) descriptors (for master ports). */
1193 	for (uint8_t sp_num = 0; sp_num < core.num_mwrap; sp_num++) {
1194 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1195 		    BCMA_EROM_REGION_TYPE_MWRAP);
1196 
1197 		if (error)
1198 			goto failed;
1199 	}
1200 
1201 	/* Wrapper (aka device management) descriptors (for slave ports). */
1202 	for (uint8_t i = 0; i < core.num_swrap; i++) {
1203 		/* Slave wrapper ports are not numbered distinctly from master
1204 		 * wrapper ports. */
1205 
1206 		/*
1207 		 * Broadcom DDR1/DDR2 Memory Controller
1208 		 * (cid=82e, rev=1, unit=0, d/mw/sw = 2/0/1 ) ->
1209 		 * bhnd0: erom[0xdc]: core6 agent0.0: mismatch got: 0x1 (0x2)
1210 		 *
1211 		 * ARM BP135 AMBA3 AXI to APB Bridge
1212 		 * (cid=135, rev=0, unit=0, d/mw/sw = 1/0/1 ) ->
1213 		 * bhnd0: erom[0x124]: core9 agent1.0: mismatch got: 0x0 (0x2)
1214 		 *
1215 		 * core.num_mwrap
1216 		 * ===>
1217 		 * (core.num_mwrap > 0) ?
1218 		 *           core.num_mwrap :
1219 		 *           ((core.vendor == BHND_MFGID_BCM) ? 1 : 0)
1220 		 */
1221 		uint8_t sp_num;
1222 		sp_num = (core.num_mwrap > 0) ?
1223 				core.num_mwrap :
1224 				((core.vendor == BHND_MFGID_BCM) ? 1 : 0) + i;
1225 		error = bcma_erom_corecfg_fill_port_regions(erom, cfg, sp_num,
1226 		    BCMA_EROM_REGION_TYPE_SWRAP);
1227 
1228 		if (error)
1229 			goto failed;
1230 	}
1231 
1232 	/*
1233 	 * Seek to the next core entry (if any), skipping any dangling/invalid
1234 	 * region entries.
1235 	 *
1236 	 * On the BCM4706, the EROM entry for the memory controller core
1237 	 * (0x4bf/0x52E) contains a dangling/unused slave wrapper port region
1238 	 * descriptor.
1239 	 */
1240 	if ((error = bcma_erom_seek_next(erom, BCMA_EROM_ENTRY_TYPE_CORE))) {
1241 		if (error != ENOENT)
1242 			goto failed;
1243 	}
1244 
1245 	*result = cfg;
1246 	return (0);
1247 
1248 failed:
1249 	if (cfg != NULL)
1250 		bcma_free_corecfg(cfg);
1251 
1252 	return error;
1253 }
1254 
1255 static int
bcma_erom_dump(bhnd_erom_t * erom)1256 bcma_erom_dump(bhnd_erom_t *erom)
1257 {
1258 	struct bcma_erom	*sc;
1259 	uint32_t		entry;
1260 	int			error;
1261 
1262 	sc = (struct bcma_erom *)erom;
1263 
1264 	bcma_erom_reset(sc);
1265 
1266 	while (!(error = bcma_erom_read32(sc, &entry))) {
1267 		/* Handle EOF */
1268 		if (entry == BCMA_EROM_TABLE_EOF) {
1269 			EROM_LOG(sc, "EOF\n");
1270 			return (0);
1271 		}
1272 
1273 		/* Invalid entry */
1274 		if (!BCMA_EROM_GET_ATTR(entry, ENTRY_ISVALID)) {
1275 			EROM_LOG(sc, "invalid EROM entry %#x\n", entry);
1276 			return (EINVAL);
1277 		}
1278 
1279 		switch (BCMA_EROM_GET_ATTR(entry, ENTRY_TYPE)) {
1280 		case BCMA_EROM_ENTRY_TYPE_CORE: {
1281 			/* CoreDescA */
1282 			EROM_LOG(sc, "coreA (0x%x)\n", entry);
1283 			EROM_LOG(sc, "\tdesigner:\t0x%x\n",
1284 			    BCMA_EROM_GET_ATTR(entry, COREA_DESIGNER));
1285 			EROM_LOG(sc, "\tid:\t\t0x%x\n",
1286 			    BCMA_EROM_GET_ATTR(entry, COREA_ID));
1287 			EROM_LOG(sc, "\tclass:\t\t0x%x\n",
1288 			    BCMA_EROM_GET_ATTR(entry, COREA_CLASS));
1289 
1290 			/* CoreDescB */
1291 			if ((error = bcma_erom_read32(sc, &entry))) {
1292 				EROM_LOG(sc, "error reading CoreDescB: %d\n",
1293 				    error);
1294 				return (error);
1295 			}
1296 
1297 			if (!BCMA_EROM_ENTRY_IS(entry, CORE)) {
1298 				EROM_LOG(sc, "invalid core descriptor; found "
1299 				    "unexpected entry %#x (type=%s)\n",
1300 				    entry, bcma_erom_entry_type_name(entry));
1301 				return (EINVAL);
1302 			}
1303 
1304 			EROM_LOG(sc, "coreB (0x%x)\n", entry);
1305 			EROM_LOG(sc, "\trev:\t0x%x\n",
1306 			    BCMA_EROM_GET_ATTR(entry, COREB_REV));
1307 			EROM_LOG(sc, "\tnummp:\t0x%x\n",
1308 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_MP));
1309 			EROM_LOG(sc, "\tnumdp:\t0x%x\n",
1310 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_DP));
1311 			EROM_LOG(sc, "\tnumwmp:\t0x%x\n",
1312 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1313 			EROM_LOG(sc, "\tnumwsp:\t0x%x\n",
1314 			    BCMA_EROM_GET_ATTR(entry, COREB_NUM_WMP));
1315 
1316 			break;
1317 		}
1318 		case BCMA_EROM_ENTRY_TYPE_MPORT:
1319 			EROM_LOG(sc, "\tmport 0x%x\n", entry);
1320 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1321 			    BCMA_EROM_GET_ATTR(entry, MPORT_NUM));
1322 			EROM_LOG(sc, "\t\tid:\t\t0x%x\n",
1323 			    BCMA_EROM_GET_ATTR(entry, MPORT_ID));
1324 			break;
1325 
1326 		case BCMA_EROM_ENTRY_TYPE_REGION: {
1327 			bool	addr64;
1328 			uint8_t	size_type;
1329 
1330 			addr64 = (BCMA_EROM_GET_ATTR(entry, REGION_64BIT) != 0);
1331 			size_type = BCMA_EROM_GET_ATTR(entry, REGION_SIZE);
1332 
1333 			EROM_LOG(sc, "\tregion 0x%x:\n", entry);
1334 			EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1335 			    addr64 ? "baselo" : "base",
1336 			    BCMA_EROM_GET_ATTR(entry, REGION_BASE));
1337 			EROM_LOG(sc, "\t\tport:\t0x%x\n",
1338 			    BCMA_EROM_GET_ATTR(entry, REGION_PORT));
1339 			EROM_LOG(sc, "\t\ttype:\t0x%x\n",
1340 			    BCMA_EROM_GET_ATTR(entry, REGION_TYPE));
1341 			EROM_LOG(sc, "\t\tsztype:\t0x%hhx\n", size_type);
1342 
1343 			/* Read the base address high bits */
1344 			if (addr64) {
1345 				if ((error = bcma_erom_read32(sc, &entry))) {
1346 					EROM_LOG(sc, "error reading region "
1347 					    "base address high bits %d\n",
1348 					    error);
1349 					return (error);
1350 				}
1351 
1352 				EROM_LOG(sc, "\t\tbasehi:\t0x%x\n", entry);
1353 			}
1354 
1355 			/* Read extended size descriptor */
1356 			if (size_type == BCMA_EROM_REGION_SIZE_OTHER) {
1357 				bool size64;
1358 
1359 				if ((error = bcma_erom_read32(sc, &entry))) {
1360 					EROM_LOG(sc, "error reading region "
1361 					    "size descriptor %d\n",
1362 					    error);
1363 					return (error);
1364 				}
1365 
1366 				if (BCMA_EROM_GET_ATTR(entry, RSIZE_64BIT))
1367 					size64 = true;
1368 				else
1369 					size64 = false;
1370 
1371 				EROM_LOG(sc, "\t\t%s:\t0x%x\n",
1372 				    size64 ? "sizelo" : "size",
1373 				    BCMA_EROM_GET_ATTR(entry, RSIZE_VAL));
1374 
1375 				if (size64) {
1376 					error = bcma_erom_read32(sc, &entry);
1377 					if (error) {
1378 						EROM_LOG(sc, "error reading "
1379 						    "region size high bits: "
1380 						    "%d\n", error);
1381 						return (error);
1382 					}
1383 
1384 					EROM_LOG(sc, "\t\tsizehi:\t0x%x\n",
1385 					    entry);
1386 				}
1387 			}
1388 			break;
1389 		}
1390 
1391 		default:
1392 			EROM_LOG(sc, "unknown EROM entry 0x%x (type=%s)\n",
1393 			    entry, bcma_erom_entry_type_name(entry));
1394 			return (EINVAL);
1395 		}
1396 	}
1397 
1398 	if (error == ENOENT)
1399 		EROM_LOG(sc, "BCMA EROM table missing terminating EOF\n");
1400 	else if (error)
1401 		EROM_LOG(sc, "EROM read failed: %d\n", error);
1402 
1403 	return (error);
1404 }
1405 
1406 static kobj_method_t bcma_erom_methods[] = {
1407 	KOBJMETHOD(bhnd_erom_probe,		bcma_erom_probe),
1408 	KOBJMETHOD(bhnd_erom_init,		bcma_erom_init),
1409 	KOBJMETHOD(bhnd_erom_fini,		bcma_erom_fini),
1410 	KOBJMETHOD(bhnd_erom_get_core_table,	bcma_erom_get_core_table),
1411 	KOBJMETHOD(bhnd_erom_free_core_table,	bcma_erom_free_core_table),
1412 	KOBJMETHOD(bhnd_erom_lookup_core,	bcma_erom_lookup_core),
1413 	KOBJMETHOD(bhnd_erom_lookup_core_addr,	bcma_erom_lookup_core_addr),
1414 	KOBJMETHOD(bhnd_erom_dump,		bcma_erom_dump),
1415 
1416 	KOBJMETHOD_END
1417 };
1418 
1419 BHND_EROM_DEFINE_CLASS(bcma_erom, bcma_erom_parser, bcma_erom_methods, sizeof(struct bcma_erom));
1420