xref: /freebsd/sys/dev/bhnd/bcma/bcma_subr.c (revision 0957b409a90fd597c1e9124cbaf3edd2b488f4ac)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
5  * Copyright (c) 2017 The FreeBSD Foundation
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Landon Fuller
9  * under sponsorship from the FreeBSD Foundation.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19  *    redistribution must be conditioned upon including a substantially
20  *    similar Disclaimer requirement for further binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
26  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
27  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
28  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
31  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
33  * THE POSSIBILITY OF SUCH DAMAGES.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/bus.h>
41 #include <sys/kernel.h>
42 #include <sys/limits.h>
43 #include <sys/systm.h>
44 
45 #include <machine/bus.h>
46 #include <machine/resource.h>
47 
48 #include <dev/bhnd/bhndvar.h>
49 
50 #include "bcma_dmp.h"
51 
52 #include "bcmavar.h"
53 
54 /* Return the resource ID for a device's agent register allocation */
55 #define	BCMA_AGENT_RID(_dinfo)	\
56     (BCMA_AGENT_RID_BASE + BCMA_DINFO_COREIDX(_dinfo))
57 
58  /**
59  * Allocate and initialize new core config structure.
60  *
61  * @param core_index Core index on the bus.
62  * @param core_unit Core unit number.
63  * @param vendor Core designer.
64  * @param device Core identifier (e.g. part number).
65  * @param hwrev Core revision.
66  */
67 struct bcma_corecfg *
68 bcma_alloc_corecfg(u_int core_index, int core_unit, uint16_t vendor,
69     uint16_t device, uint8_t hwrev)
70 {
71 	struct bcma_corecfg *cfg;
72 
73 	cfg = malloc(sizeof(*cfg), M_BHND, M_NOWAIT);
74 	if (cfg == NULL)
75 		return NULL;
76 
77 	cfg->core_info = (struct bhnd_core_info) {
78 		.vendor = vendor,
79 		.device = device,
80 		.hwrev = hwrev,
81 		.core_idx = core_index,
82 		.unit = core_unit
83 	};
84 
85 	STAILQ_INIT(&cfg->master_ports);
86 	cfg->num_master_ports = 0;
87 
88 	STAILQ_INIT(&cfg->dev_ports);
89 	cfg->num_dev_ports = 0;
90 
91 	STAILQ_INIT(&cfg->bridge_ports);
92 	cfg->num_bridge_ports = 0;
93 
94 	STAILQ_INIT(&cfg->wrapper_ports);
95 	cfg->num_wrapper_ports = 0;
96 
97 	return (cfg);
98 }
99 
100 /**
101  * Deallocate the given core config and any associated resources.
102  *
103  * @param corecfg Core info to be deallocated.
104  */
105 void
106 bcma_free_corecfg(struct bcma_corecfg *corecfg)
107 {
108 	struct bcma_mport *mport, *mnext;
109 	struct bcma_sport *sport, *snext;
110 
111 	STAILQ_FOREACH_SAFE(mport, &corecfg->master_ports, mp_link, mnext) {
112 		free(mport, M_BHND);
113 	}
114 
115 	STAILQ_FOREACH_SAFE(sport, &corecfg->dev_ports, sp_link, snext) {
116 		bcma_free_sport(sport);
117 	}
118 
119 	STAILQ_FOREACH_SAFE(sport, &corecfg->bridge_ports, sp_link, snext) {
120 		bcma_free_sport(sport);
121 	}
122 
123 	STAILQ_FOREACH_SAFE(sport, &corecfg->wrapper_ports, sp_link, snext) {
124 		bcma_free_sport(sport);
125 	}
126 
127 	free(corecfg, M_BHND);
128 }
129 
130 /**
131  * Return the @p cfg port list for @p type.
132  *
133  * @param cfg The core configuration.
134  * @param type The requested port type.
135  */
136 struct bcma_sport_list *
137 bcma_corecfg_get_port_list(struct bcma_corecfg *cfg, bhnd_port_type type)
138 {
139 	switch (type) {
140 	case BHND_PORT_DEVICE:
141 		return (&cfg->dev_ports);
142 		break;
143 	case BHND_PORT_BRIDGE:
144 		return (&cfg->bridge_ports);
145 		break;
146 	case BHND_PORT_AGENT:
147 		return (&cfg->wrapper_ports);
148 		break;
149 	default:
150 		return (NULL);
151 	}
152 }
153 
154 /**
155  * Populate the resource list and bcma_map RIDs using the maps defined on
156  * @p ports.
157  *
158  * @param bus The requesting bus device.
159  * @param dinfo The device info instance to be initialized.
160  * @param ports The set of ports to be enumerated
161  */
162 static void
163 bcma_dinfo_init_port_resource_info(device_t bus, struct bcma_devinfo *dinfo,
164     struct bcma_sport_list *ports)
165 {
166 	struct bcma_map		*map;
167 	struct bcma_sport	*port;
168 	bhnd_addr_t		 end;
169 
170 	STAILQ_FOREACH(port, ports, sp_link) {
171 		STAILQ_FOREACH(map, &port->sp_maps, m_link) {
172 			/*
173 			 * Create the corresponding device resource list entry.
174 			 *
175 			 * We necessarily skip registration if the region's
176 			 * device memory range is not representable via
177 			 * rman_res_t.
178 			 *
179 			 * When rman_res_t is migrated to uintmax_t, any
180 			 * range should be representable.
181 			 */
182 			end = map->m_base + map->m_size;
183 			if (map->m_base <= RM_MAX_END && end <= RM_MAX_END) {
184 				map->m_rid = resource_list_add_next(
185 				    &dinfo->resources, SYS_RES_MEMORY,
186 				    map->m_base, end, map->m_size);
187 			} else if (bootverbose) {
188 				device_printf(bus,
189 				    "core%u %s%u.%u: region %llx-%llx extends "
190 				        "beyond supported addressable range\n",
191 				    dinfo->corecfg->core_info.core_idx,
192 				    bhnd_port_type_name(port->sp_type),
193 				    port->sp_num, map->m_region_num,
194 				    (unsigned long long) map->m_base,
195 				    (unsigned long long) end);
196 			}
197 		}
198 	}
199 }
200 
201 
202 
203 /**
204  * Allocate the per-core agent register block for a device info structure.
205  *
206  * If an agent0.0 region is not defined on @p dinfo, the device info
207  * agent resource is set to NULL and 0 is returned.
208  *
209  * @param bus The requesting bus device.
210  * @param child The bcma child device.
211  * @param dinfo The device info associated with @p child
212  *
213  * @retval 0 success
214  * @retval non-zero resource allocation failed.
215  */
216 static int
217 bcma_dinfo_init_agent(device_t bus, device_t child, struct bcma_devinfo *dinfo)
218 {
219 	bhnd_addr_t	addr;
220 	bhnd_size_t	size;
221 	rman_res_t	r_start, r_count, r_end;
222 	int		error;
223 
224 	KASSERT(dinfo->res_agent == NULL, ("double allocation of agent"));
225 
226 	/* Verify that the agent register block exists and is
227 	 * mappable */
228 	if (bhnd_get_port_rid(child, BHND_PORT_AGENT, 0, 0) == -1)
229 		return (0);	/* nothing to do */
230 
231 	/* Fetch the address of the agent register block */
232 	error = bhnd_get_region_addr(child, BHND_PORT_AGENT, 0, 0,
233 	    &addr, &size);
234 	if (error) {
235 		device_printf(bus, "failed fetching agent register block "
236 		    "address for core %u\n", BCMA_DINFO_COREIDX(dinfo));
237 		return (error);
238 	}
239 
240 	/* Allocate the resource */
241 	r_start = addr;
242 	r_count = size;
243 	r_end = r_start + r_count - 1;
244 
245 	dinfo->rid_agent = BCMA_AGENT_RID(dinfo);
246 	dinfo->res_agent = BHND_BUS_ALLOC_RESOURCE(bus, bus, SYS_RES_MEMORY,
247 	    &dinfo->rid_agent, r_start, r_end, r_count, RF_ACTIVE|RF_SHAREABLE);
248 	if (dinfo->res_agent == NULL) {
249 		device_printf(bus, "failed allocating agent register block for "
250 		    "core %u\n", BCMA_DINFO_COREIDX(dinfo));
251 		return (ENXIO);
252 	}
253 
254 	return (0);
255 }
256 
257 /**
258  * Populate the list of interrupts for a device info structure
259  * previously initialized via bcma_dinfo_alloc_agent().
260  *
261  * If an agent0.0 region is not mapped on @p dinfo, the OOB interrupt bank is
262  * assumed to be unavailable and 0 is returned.
263  *
264  * @param bus The requesting bus device.
265  * @param dinfo The device info instance to be initialized.
266  */
267 static int
268 bcma_dinfo_init_intrs(device_t bus, device_t child,
269     struct bcma_devinfo *dinfo)
270 {
271 	uint32_t dmpcfg, oobw;
272 
273 	/* Agent block must be mapped */
274 	if (dinfo->res_agent == NULL)
275 		return (0);
276 
277 	/* Agent must support OOB */
278 	dmpcfg = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_CONFIG);
279 	if (!BCMA_DMP_GET_FLAG(dmpcfg, BCMA_DMP_CFG_OOB))
280 		return (0);
281 
282 	/* Fetch width of the OOB interrupt bank */
283 	oobw = bhnd_bus_read_4(dinfo->res_agent,
284 	     BCMA_DMP_OOB_OUTWIDTH(BCMA_OOB_BANK_INTR));
285 	if (oobw >= BCMA_OOB_NUM_SEL) {
286 		device_printf(bus, "ignoring invalid OOBOUTWIDTH for core %u: "
287 		    "%#x\n", BCMA_DINFO_COREIDX(dinfo), oobw);
288 		return (0);
289 	}
290 
291 	/* Fetch OOBSEL busline values and populate list of interrupt
292 	 * descriptors */
293 	for (uint32_t sel = 0; sel < oobw; sel++) {
294 		struct bcma_intr	*intr;
295 		uint32_t		 selout;
296 		uint8_t			 line;
297 
298 		if (dinfo->num_intrs == UINT_MAX)
299 			return (ENOMEM);
300 
301 		selout = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_OOBSELOUT(
302 		    BCMA_OOB_BANK_INTR, sel));
303 
304 		line = (selout >> BCMA_DMP_OOBSEL_SHIFT(sel)) &
305 		    BCMA_DMP_OOBSEL_BUSLINE_MASK;
306 
307 		intr = bcma_alloc_intr(BCMA_OOB_BANK_INTR, sel, line);
308 		if (intr == NULL) {
309 			device_printf(bus, "failed allocating interrupt "
310 			    "descriptor %#x for core %u\n", sel,
311 			    BCMA_DINFO_COREIDX(dinfo));
312 			return (ENOMEM);
313 		}
314 
315 		STAILQ_INSERT_HEAD(&dinfo->intrs, intr, i_link);
316 		dinfo->num_intrs++;
317 	}
318 
319 	return (0);
320 }
321 
322 /**
323  * Allocate and return a new empty device info structure.
324  *
325  * @param bus The requesting bus device.
326  *
327  * @retval NULL if allocation failed.
328  */
329 struct bcma_devinfo *
330 bcma_alloc_dinfo(device_t bus)
331 {
332 	struct bcma_devinfo *dinfo;
333 
334 	dinfo = malloc(sizeof(struct bcma_devinfo), M_BHND, M_NOWAIT|M_ZERO);
335 	if (dinfo == NULL)
336 		return (NULL);
337 
338 	dinfo->corecfg = NULL;
339 	dinfo->res_agent = NULL;
340 	dinfo->rid_agent = -1;
341 
342 	STAILQ_INIT(&dinfo->intrs);
343 	dinfo->num_intrs = 0;
344 
345 	resource_list_init(&dinfo->resources);
346 
347 	return (dinfo);
348 }
349 
350 /**
351  * Initialize a device info structure previously allocated via
352  * bcma_alloc_dinfo, assuming ownership of the provided core
353  * configuration.
354  *
355  * @param bus The requesting bus device.
356  * @param child The bcma child device.
357  * @param dinfo The device info associated with @p child
358  * @param corecfg Device core configuration; ownership of this value
359  * will be assumed by @p dinfo.
360  *
361  * @retval 0 success
362  * @retval non-zero initialization failed.
363  */
364 int
365 bcma_init_dinfo(device_t bus, device_t child, struct bcma_devinfo *dinfo,
366     struct bcma_corecfg *corecfg)
367 {
368 	struct bcma_intr	*intr;
369 	int			 error;
370 
371 	KASSERT(dinfo->corecfg == NULL, ("dinfo previously initialized"));
372 
373 	/* Save core configuration value */
374 	dinfo->corecfg = corecfg;
375 
376 	/* The device ports must always be initialized first to ensure that
377 	 * rid 0 maps to the first device port */
378 	bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->dev_ports);
379 	bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->bridge_ports);
380 	bcma_dinfo_init_port_resource_info(bus, dinfo, &corecfg->wrapper_ports);
381 
382 	/* Now that we've defined the port resources, we can map the device's
383 	 * agent registers (if any) */
384 	if ((error = bcma_dinfo_init_agent(bus, child, dinfo)))
385 		goto failed;
386 
387 	/* With agent registers mapped, we can populate the device's interrupt
388 	 * descriptors */
389 	if ((error = bcma_dinfo_init_intrs(bus, child, dinfo)))
390 		goto failed;
391 
392 	/* Finally, map the interrupt descriptors */
393 	STAILQ_FOREACH(intr, &dinfo->intrs, i_link) {
394 		/* Already mapped? */
395 		if (intr->i_mapped)
396 			continue;
397 
398 		/* Map the interrupt */
399 		error = BHND_BUS_MAP_INTR(bus, child, intr->i_sel,
400 		    &intr->i_irq);
401 		if (error) {
402 			device_printf(bus, "failed mapping interrupt line %u "
403 			    "for core %u: %d\n", intr->i_sel,
404 			    BCMA_DINFO_COREIDX(dinfo), error);
405 			goto failed;
406 		}
407 
408 		intr->i_mapped = true;
409 
410 		/* Add to resource list */
411 		intr->i_rid = resource_list_add_next(&dinfo->resources,
412 		    SYS_RES_IRQ, intr->i_irq, intr->i_irq, 1);
413 	}
414 
415 	return (0);
416 
417 failed:
418 	/* Owned by the caller on failure */
419 	dinfo->corecfg = NULL;
420 
421 	return (error);
422 }
423 
424 /**
425  * Deallocate the given device info structure and any associated resources.
426  *
427  * @param bus The requesting bus device.
428  * @param dinfo Device info to be deallocated.
429  */
430 void
431 bcma_free_dinfo(device_t bus, device_t child, struct bcma_devinfo *dinfo)
432 {
433 	struct bcma_intr *intr, *inext;
434 
435 	resource_list_free(&dinfo->resources);
436 
437 	if (dinfo->corecfg != NULL)
438 		bcma_free_corecfg(dinfo->corecfg);
439 
440 	/* Release agent resource, if any */
441 	if (dinfo->res_agent != NULL) {
442 		bhnd_release_resource(bus, SYS_RES_MEMORY, dinfo->rid_agent,
443 		    dinfo->res_agent);
444 	}
445 
446 	/* Clean up interrupt descriptors */
447 	STAILQ_FOREACH_SAFE(intr, &dinfo->intrs, i_link, inext) {
448 		STAILQ_REMOVE(&dinfo->intrs, intr, bcma_intr, i_link);
449 
450 		/* Release our IRQ mapping */
451 		if (intr->i_mapped) {
452 			BHND_BUS_UNMAP_INTR(bus, child, intr->i_irq);
453 			intr->i_mapped = false;
454 		}
455 
456 		bcma_free_intr(intr);
457 	}
458 
459 	free(dinfo, M_BHND);
460 }
461 
462 
463 /**
464  * Allocate and initialize a new interrupt descriptor.
465  *
466  * @param bank OOB bank.
467  * @param sel OOB selector.
468  * @param line OOB bus line.
469  */
470 struct bcma_intr *
471 bcma_alloc_intr(uint8_t bank, uint8_t sel, uint8_t line)
472 {
473 	struct bcma_intr *intr;
474 
475 	if (bank >= BCMA_OOB_NUM_BANKS)
476 		return (NULL);
477 
478 	if (sel >= BCMA_OOB_NUM_SEL)
479 		return (NULL);
480 
481 	if (line >= BCMA_OOB_NUM_BUSLINES)
482 		return (NULL);
483 
484 	intr = malloc(sizeof(*intr), M_BHND, M_NOWAIT);
485 	if (intr == NULL)
486 		return (NULL);
487 
488 	intr->i_bank = bank;
489 	intr->i_sel = sel;
490 	intr->i_busline = line;
491 	intr->i_mapped = false;
492 	intr->i_irq = 0;
493 
494 	return (intr);
495 }
496 
497 /**
498  * Deallocate all resources associated with the given interrupt descriptor.
499  *
500  * @param intr Interrupt descriptor to be deallocated.
501  */
502 void
503 bcma_free_intr(struct bcma_intr *intr)
504 {
505 	KASSERT(!intr->i_mapped, ("interrupt %u still mapped", intr->i_sel));
506 
507 	free(intr, M_BHND);
508 }
509 
510 /**
511  * Allocate and initialize new slave port descriptor.
512  *
513  * @param port_num Per-core port number.
514  * @param port_type Port type.
515  */
516 struct bcma_sport *
517 bcma_alloc_sport(bcma_pid_t port_num, bhnd_port_type port_type)
518 {
519 	struct bcma_sport *sport;
520 
521 	sport = malloc(sizeof(struct bcma_sport), M_BHND, M_NOWAIT);
522 	if (sport == NULL)
523 		return NULL;
524 
525 	sport->sp_num = port_num;
526 	sport->sp_type = port_type;
527 	sport->sp_num_maps = 0;
528 	STAILQ_INIT(&sport->sp_maps);
529 
530 	return sport;
531 }
532 
533 /**
534  * Deallocate all resources associated with the given port descriptor.
535  *
536  * @param sport Port descriptor to be deallocated.
537  */
538 void
539 bcma_free_sport(struct bcma_sport *sport) {
540 	struct bcma_map *map, *mapnext;
541 
542 	STAILQ_FOREACH_SAFE(map, &sport->sp_maps, m_link, mapnext) {
543 		free(map, M_BHND);
544 	}
545 
546 	free(sport, M_BHND);
547 }
548 
549 
550 /**
551  * Given a bcma(4) child's device info, spin waiting for the device's DMP
552  * resetstatus register to clear.
553  *
554  * @param child The bcma(4) child device.
555  * @param dinfo The @p child device info.
556  *
557  * @retval 0 success
558  * @retval ENODEV if @p dinfo does not map an agent register resource.
559  * @retval ETIMEDOUT if timeout occurs
560  */
561 int
562 bcma_dmp_wait_reset(device_t child, struct bcma_devinfo *dinfo)
563 {
564 	uint32_t rst;
565 
566 	if (dinfo->res_agent == NULL)
567 		return (ENODEV);
568 
569 	/* 300us should be long enough, but there are references to this
570 	 * requiring up to 10ms when performing reset of an 80211 core
571 	 * after a MAC PSM microcode watchdog event. */
572 	for (int i = 0; i < 10000; i += 10) {
573 		rst = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETSTATUS);
574 		if (rst == 0)
575 			return (0);
576 
577 		DELAY(10);
578 	}
579 
580 	device_printf(child, "BCMA_DMP_RESETSTATUS timeout\n");
581 	return (ETIMEDOUT);
582 }
583 
584 /**
585  * Set the bcma(4) child's DMP resetctrl register value, and then wait
586  * for all backplane operations to complete.
587  *
588  * @param child The bcma(4) child device.
589  * @param dinfo The @p child device info.
590  * @param value The new ioctrl value to set.
591  *
592  * @retval 0 success
593  * @retval ENODEV if @p dinfo does not map an agent register resource.
594  * @retval ETIMEDOUT if timeout occurs waiting for reset completion
595  */
596 int
597 bcma_dmp_write_reset(device_t child, struct bcma_devinfo *dinfo, uint32_t value)
598 {
599 	uint32_t rst;
600 
601 	if (dinfo->res_agent == NULL)
602 		return (ENODEV);
603 
604 	/* Already in requested reset state? */
605 	rst = bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETCTRL);
606 	if (rst == value)
607 		return (0);
608 
609 	bhnd_bus_write_4(dinfo->res_agent, BCMA_DMP_RESETCTRL, value);
610 	bhnd_bus_read_4(dinfo->res_agent, BCMA_DMP_RESETCTRL); /* read-back */
611 	DELAY(10);
612 
613 	return (bcma_dmp_wait_reset(child, dinfo));
614 }
615