xref: /illumos-gate/usr/src/uts/common/io/1394/s1394_dev_disc.c (revision 7a6d80f1660abd4755c68cbd094d4a914681d26e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2019 Joyent, Inc.
29  */
30 
31 /*
32  * s1394_dev_disc.c
33  *    1394 Services Layer Device Discovery Routines
34  *    This file contains the bus reset thread code, bus manager routines and
35  *    various routines that are used to implement remote Config ROM reading.
36  *
37  *    FUTURE:
38  *    Rescan the bus if invalid nodes are seen.
39  *    Investigate taskq for reading phase2 config rom reads.
40  *    If we are reading the entire bus info blk, we should attempt
41  *    a block read and fallback to quad reads if this fails.
42  */
43 
44 #include <sys/conf.h>
45 #include <sys/sysmacros.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/cmn_err.h>
49 #include <sys/sunndi.h>
50 #include <sys/modctl.h>
51 #include <sys/ddi_impldefs.h>
52 #include <sys/types.h>
53 #include <sys/kmem.h>
54 #include <sys/kstat.h>
55 #include <sys/varargs.h>
56 
57 #include <sys/1394/t1394.h>
58 #include <sys/1394/s1394.h>
59 #include <sys/1394/h1394.h>
60 #include <sys/1394/ieee1394.h>
61 #include <sys/1394/ieee1212.h>
62 
63 /* hcmd_ret_t */
64 typedef enum {
65 	S1394_HCMD_INVALID,
66 	S1394_HCMD_NODE_DONE,
67 	S1394_HCMD_NODE_EXPECT_MORE,
68 	S1394_HCMD_LOCK_FAILED
69 } hcmd_ret_t;
70 
71 #define	QUAD_TO_CFGROM_ADDR(b, n, q, addr) {			\
72 	uint64_t bl = (b);					\
73 	uint64_t nl = (n);					\
74 	addr = ((bl) << IEEE1394_ADDR_BUS_ID_SHIFT) |		\
75 		((nl) << IEEE1394_ADDR_PHY_ID_SHIFT);		\
76 	addr += IEEE1394_CONFIG_ROM_ADDR + ((q) << 2);		\
77 }
78 
79 #define	CFGROM_READ_PAUSE(d)						\
80 	((s1394_cfgrom_read_delay_ms == 0) ? (void) 0 :			\
81 	delay(drv_usectohz((d) * 1000)))
82 
83 #define	BUMP_CFGROM_READ_DELAY(n)					\
84 	(n)->cfgrom_read_delay += s1394_cfgrom_read_delay_incr
85 
86 #define	CFGROM_GET_READ_DELAY(n, d)					\
87 	((d) = (n)->cfgrom_read_delay)
88 
89 #define	SETUP_QUAD_READ(n, reset_fails, quadlet, cnt)			\
90 {									\
91 	int i = (reset_fails);						\
92 	if (i != 0) {							\
93 		(n)->cfgrom_read_fails = 0;				\
94 		(n)->cfgrom_read_delay = (uchar_t)s1394_cfgrom_read_delay_ms; \
95 	}								\
96 	(n)->cfgrom_quad_to_read = (quadlet);				\
97 	(n)->cfgrom_quad_read_cnt = (cnt);				\
98 }
99 
100 static void s1394_wait_for_events(s1394_hal_t *hal, int firsttime);
101 
102 static int s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
103     hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd));
104 
105 static void s1394_flush_cmplq(s1394_hal_t *hal);
106 
107 static void s1394_br_thread_exit(s1394_hal_t *hal);
108 
109 static void s1394_target_bus_reset_notifies(s1394_hal_t *hal,
110     t1394_localinfo_t *localinfo);
111 
112 static int s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
113     s1394_status_t *status);
114 
115 static int s1394_cfgrom_scan_phase1(s1394_hal_t *hal);
116 
117 static hcmd_ret_t s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal,
118     cmd1394_cmd_t *cmd);
119 
120 static int s1394_cfgrom_scan_phase2(s1394_hal_t *hal);
121 
122 static hcmd_ret_t s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal,
123     cmd1394_cmd_t *cmd);
124 
125 static int s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
126     s1394_status_t *status);
127 
128 static void s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd);
129 
130 static void s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num,
131     uint32_t *quadlet, uint32_t *data);
132 
133 static int s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode);
134 
135 static int s1394_match_all_GUIDs(s1394_hal_t *hal);
136 
137 static void s1394_become_bus_mgr(void *arg);
138 
139 static void s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd);
140 
141 static int s1394_bus_mgr_processing(s1394_hal_t *hal);
142 
143 static int s1394_do_bus_mgr_processing(s1394_hal_t *hal);
144 
145 static void s1394_bus_mgr_timers_stop(s1394_hal_t *hal,
146     timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
147 
148 static void s1394_bus_mgr_timers_start(s1394_hal_t *hal,
149     timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
150 
151 static int s1394_cycle_master_capable(s1394_hal_t *hal);
152 
153 static int s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root,
154     int new_gap_cnt, uint32_t IRM_flags);
155 
156 static void s1394_phy_config_callback(cmd1394_cmd_t *cmd);
157 
158 static int s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node,
159     uint32_t quadlet, uint32_t *nextquadp);
160 
161 static int s1394_cfgrom_read_retry_cnt = 3;	/* 1 + 3 retries */
162 static int s1394_cfgrom_read_delay_ms = 20;	/* start with 20ms */
163 static int s1394_cfgrom_read_delay_incr = 10;	/* 10ms increments */
164 static int s1394_enable_crc_validation = 0;
165 static int s1394_turn_off_dir_stack = 0;
166 static int s1394_crcsz_is_cfgsz = 0;
167 static int s1394_enable_rio_pass1_workarounds = 0;
168 
169 /*
170  * s1394_br_thread()
171  *    is the bus reset thread. Its sole purpose is to read/reread config roms
172  *    as appropriate and do bus reset time things (bus manager processing,
173  *    isoch resource reallocation etc.).
174  */
175 void
176 s1394_br_thread(s1394_hal_t *hal)
177 {
178 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
179 
180 	/* Initialize the Bus Mgr timers */
181 	hal->bus_mgr_timeout_id = 0;
182 	hal->bus_mgr_query_timeout_id = 0;
183 
184 	/* Initialize the cmpletion Q */
185 	mutex_enter(&hal->br_cmplq_mutex);
186 	hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
187 	mutex_exit(&hal->br_cmplq_mutex);
188 
189 	s1394_wait_for_events(hal, 1);
190 
191 	for (;;) {
192 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
193 
194 		s1394_wait_for_events(hal, 0);
195 
196 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
197 
198 		/* stop bus manager timeouts, if needed */
199 		s1394_bus_mgr_timers_stop(hal, &hal->bus_mgr_query_timeout_id,
200 		    &hal->bus_mgr_timeout_id);
201 
202 		s1394_flush_cmplq(hal);
203 
204 		/* start timers for checking bus manager, if needed */
205 		s1394_bus_mgr_timers_start(hal, &hal->bus_mgr_query_timeout_id,
206 		    &hal->bus_mgr_timeout_id);
207 
208 		/* Try to reallocate all isoch resources */
209 		s1394_isoch_rsrc_realloc(hal);
210 
211 		if (s1394_cfgrom_scan_phase1(hal) != DDI_SUCCESS) {
212 			continue;
213 		}
214 
215 		if (s1394_bus_mgr_processing(hal) != DDI_SUCCESS) {
216 			continue;
217 		}
218 
219 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
220 
221 		if (s1394_cfgrom_scan_phase2(hal) != DDI_SUCCESS) {
222 			continue;
223 		}
224 
225 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
226 	}
227 }
228 
229 /*
230  * s1394_wait_for_events()
231  *    blocks waiting for a cv_signal on the bus reset condition variable.
232  *    Used by the bus reset thread for synchronizing with the bus reset/
233  *    self id interrupt callback from the hal. Does CPR initialization
234  *    first time it is called. If services layer sees a valid self id
235  *    buffer, it builds the topology tree and signals the bus reset thread
236  *    to read the config roms as appropriate (indicated by BR_THR_CFGROM_SCAN).
237  *    If the services layer wishes to kill the bus reset thread, it signals
238  *    this by signaling a BR_THR_GO_AWAY event.
239  */
240 static void
241 s1394_wait_for_events(s1394_hal_t *hal, int firsttime)
242 {
243 	uint_t event;
244 
245 	ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
246 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
247 
248 	if (firsttime)
249 		CALLB_CPR_INIT(&hal->hal_cprinfo, &hal->br_thread_mutex,
250 		    callb_generic_cpr, "s1394_br_thread");
251 
252 	/* Check and wait for a BUS RESET */
253 	mutex_enter(&hal->br_thread_mutex);
254 	while ((event = hal->br_thread_ev_type) == 0) {
255 		CALLB_CPR_SAFE_BEGIN(&hal->hal_cprinfo);
256 		cv_wait(&hal->br_thread_cv, &hal->br_thread_mutex);
257 		CALLB_CPR_SAFE_END(&hal->hal_cprinfo, &hal->br_thread_mutex);
258 	}
259 
260 	if (event & BR_THR_GO_AWAY) {
261 		s1394_br_thread_exit(hal);
262 		/*NOTREACHED*/
263 		return;
264 	}
265 
266 	if (firsttime) {
267 		mutex_exit(&hal->br_thread_mutex);
268 		return;
269 	}
270 
271 	mutex_enter(&hal->topology_tree_mutex);
272 	hal->br_cfgrom_read_gen = hal->generation_count;
273 
274 	hal->br_thread_ev_type &= ~BR_THR_CFGROM_SCAN;
275 	mutex_exit(&hal->topology_tree_mutex);
276 	mutex_exit(&hal->br_thread_mutex);
277 }
278 
279 /*
280  * s1394_wait_for_cfgrom_callbacks()
281  *    Waits for completed config rom reads. Takes each completion off the
282  *    completion queue and passes it to the "completion handler" function
283  *    that was passed in as an argument. Further processing of the completion
284  *    queue depends on the return status of the completion handler. If there
285  *    is a bus reset while waiting for completions or if the services layer
286  *    signals BR_THR_GO_AWAY, quits waiting for completions and returns
287  *    non-zero. Also returns non-zero if completion handler returns
288  *    S1394_HCMD_LOCK_FAILED.  Returns 0 if config roms for all nodes have
289  *    been dealt with.
290  */
291 static int
292 s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
293     hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd))
294 {
295 	cmd1394_cmd_t *cmd;
296 	s1394_cmd_priv_t *s_priv;
297 	int ret, done = 0;
298 	hcmd_ret_t cmdret;
299 
300 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
301 
302 	ret = DDI_SUCCESS;
303 
304 	while (!done) {
305 		mutex_enter(&hal->br_cmplq_mutex);
306 		mutex_enter(&hal->topology_tree_mutex);
307 		while (wait_gen == hal->generation_count &&
308 		    (hal->br_thread_ev_type & BR_THR_GO_AWAY) == 0 &&
309 		    hal->br_cmplq_head == NULL) {
310 			mutex_exit(&hal->topology_tree_mutex);
311 			cv_wait(&hal->br_cmplq_cv, &hal->br_cmplq_mutex);
312 			mutex_enter(&hal->topology_tree_mutex);
313 		}
314 		ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
315 		if (wait_gen != hal->generation_count ||
316 		    (hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
317 			mutex_exit(&hal->topology_tree_mutex);
318 			mutex_exit(&hal->br_cmplq_mutex);
319 			s1394_flush_cmplq(hal);
320 			return (DDI_FAILURE);
321 		}
322 		mutex_exit(&hal->topology_tree_mutex);
323 
324 		if ((cmd = hal->br_cmplq_head) != NULL) {
325 			s_priv = S1394_GET_CMD_PRIV(cmd);
326 
327 			hal->br_cmplq_head = s_priv->cmd_priv_next;
328 		}
329 		if (cmd == hal->br_cmplq_tail)
330 			hal->br_cmplq_tail = NULL;
331 		mutex_exit(&hal->br_cmplq_mutex);
332 
333 		if (cmd != NULL) {
334 			if (cmd->bus_generation != wait_gen) {
335 				(void) s1394_free_cmd(hal, &cmd);
336 				continue;
337 			}
338 			cmdret = (*handle_cmd_fn)(hal, cmd);
339 			ASSERT(cmdret != S1394_HCMD_INVALID);
340 			if (cmdret == S1394_HCMD_LOCK_FAILED) {
341 				/* flush completion queue */
342 				ret = DDI_FAILURE;
343 				s1394_flush_cmplq(hal);
344 				break;
345 			} else if (cmdret == S1394_HCMD_NODE_DONE) {
346 				if (--hal->cfgroms_being_read == 0) {
347 					/* All done */
348 					break;
349 				}
350 			} else {
351 				ASSERT(cmdret == S1394_HCMD_NODE_EXPECT_MORE);
352 				done = 0;
353 			}
354 		}
355 	}
356 
357 	return (ret);
358 }
359 
360 /*
361  * s1394_flush_cmplq()
362  *    Frees all cmds on the completion queue.
363  */
364 static void
365 s1394_flush_cmplq(s1394_hal_t *hal)
366 {
367 	s1394_cmd_priv_t *s_priv;
368 	cmd1394_cmd_t *cmd, *tcmd;
369 
370 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
371 
372 	cmd = NULL;
373 
374 	do {
375 		mutex_enter(&hal->br_cmplq_mutex);
376 		cmd = hal->br_cmplq_head;
377 		hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
378 		mutex_exit(&hal->br_cmplq_mutex);
379 
380 		while (cmd != NULL) {
381 			s_priv = S1394_GET_CMD_PRIV(cmd);
382 
383 			tcmd = s_priv->cmd_priv_next;
384 			(void) s1394_free_cmd(hal, &cmd);
385 			cmd = tcmd;
386 		}
387 
388 		mutex_enter(&hal->br_cmplq_mutex);
389 		cmd = hal->br_cmplq_head;
390 		mutex_exit(&hal->br_cmplq_mutex);
391 
392 	} while (cmd != NULL);
393 }
394 
395 /*
396  * s1394_br_thread_exit()
397  *    Flushes the completion queue and calls thread_exit() (which effectively
398  *    kills the bus reset thread).
399  */
400 static void
401 s1394_br_thread_exit(s1394_hal_t *hal)
402 {
403 	ASSERT(MUTEX_HELD(&hal->br_thread_mutex));
404 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
405 	s1394_flush_cmplq(hal);
406 #ifndef	__lock_lint
407 	CALLB_CPR_EXIT(&hal->hal_cprinfo);
408 #endif
409 	hal->br_thread_ev_type &= ~BR_THR_GO_AWAY;
410 	thread_exit();
411 	/*NOTREACHED*/
412 }
413 
414 /*
415  * s1394_target_bus_reset_notifies()
416  *    tells the ndi event framework to invoke any callbacks registered for
417  *    "bus reset event".
418  */
419 static void
420 s1394_target_bus_reset_notifies(s1394_hal_t *hal, t1394_localinfo_t *localinfo)
421 {
422 	ddi_eventcookie_t cookie;
423 
424 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
425 
426 	if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
427 	    DDI_DEVI_BUS_RESET_EVENT, &cookie, NDI_EVENT_NOPASS) ==
428 	    NDI_SUCCESS) {
429 		(void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
430 		    cookie, localinfo);
431 	}
432 }
433 
434 /*
435  * s1394_alloc_cfgrom()
436  *    Allocates config rom for the node. Sets CFGROM_NEW_ALLOC bit in the
437  *    node cfgrom state. Drops topology_tree_mutex around the calls to
438  *    kmem_zalloc(). If re-locking fails, returns DDI_FAILURE, else returns
439  *    DDI_SUCCESS.
440  */
441 static int
442 s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node, s1394_status_t *status)
443 {
444 	uint32_t *cfgrom;
445 
446 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
447 
448 	*status = S1394_NOSTATUS;
449 
450 	/*
451 	 * if cfgrom is non-NULL, this has to be generation changed
452 	 * case (where we allocate cfgrom again to reread the cfgrom)
453 	 */
454 	ASSERT(node->cfgrom == NULL || (node->cfgrom != NULL &&
455 	    CFGROM_GEN_CHANGED(node) == B_TRUE));
456 
457 	/*
458 	 * if node matched, either cfgrom has to be NULL or link should be
459 	 * off in the last matched node or config rom generations changed.
460 	 */
461 	ASSERT(NODE_MATCHED(node) == B_FALSE || (NODE_MATCHED(node) == B_TRUE &&
462 	    (node->cfgrom == NULL || LINK_ACTIVE(node->old_node) == B_FALSE) ||
463 	    CFGROM_GEN_CHANGED(node) == B_TRUE));
464 
465 	s1394_unlock_tree(hal);
466 	cfgrom = (uint32_t *)kmem_zalloc(IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
467 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
468 		kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
469 		*status |= S1394_LOCK_FAILED;
470 		return (DDI_FAILURE);
471 	}
472 	node->cfgrom = cfgrom;
473 	node->cfgrom_size = IEEE1394_CONFIG_ROM_QUAD_SZ;
474 	SET_CFGROM_NEW_ALLOC(node);
475 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
476 	return (DDI_SUCCESS);
477 }
478 
479 /*
480  * s1394_free_cfgrom()
481  *    Marks the config rom invalid and frees up the config based on otpions.
482  */
483 void
484 s1394_free_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
485     s1394_free_cfgrom_t options)
486 {
487 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
488 	ASSERT(node->cfgrom != NULL);
489 
490 	if (options == S1394_FREE_CFGROM_BOTH) {
491 		/*
492 		 * free in both old and new trees; will be called with
493 		 * new node.
494 		 */
495 		s1394_node_t *onode = node->old_node;
496 
497 		if (NODE_MATCHED(node) == B_TRUE && onode->cfgrom != NULL)
498 			ASSERT(onode->cfgrom == node->cfgrom);
499 
500 		if (onode != NULL && onode->cfgrom != NULL && onode->cfgrom !=
501 		    node->cfgrom)
502 			kmem_free(onode->cfgrom, IEEE1394_CONFIG_ROM_SZ);
503 
504 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
505 		onode->cfgrom = NULL;
506 		node->cfgrom = NULL;
507 
508 		CLEAR_CFGROM_STATE(onode);
509 		CLEAR_CFGROM_STATE(node);
510 
511 	} else if (options == S1394_FREE_CFGROM_NEW) {
512 
513 		ASSERT(CFGROM_NEW_ALLOC(node) == B_TRUE);
514 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
515 		CLEAR_CFGROM_NEW_ALLOC(node);
516 		node->cfgrom = NULL;
517 		CLEAR_CFGROM_STATE(node);
518 
519 	} else if (options == S1394_FREE_CFGROM_OLD) {
520 
521 		/* freeing in old tree */
522 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
523 		node->cfgrom = NULL;
524 		CLEAR_CFGROM_STATE(node);
525 	}
526 }
527 
528 /*
529  * s1394_copy_cfgrom()
530  *    Copies config rom info from "from" node to "to" node. Clears
531  *    CFGROM_NEW_ALLOC bit in cfgrom state in bothe nodes. (CFGROM_NEW_ALLOC
532  *    acts as a reference count. If set, only the node in the current tree
533  *    has a pointer to it; if clear, both the node in the current tree as
534  *    well as the corresponding node in the old tree point to the same memory).
535  */
536 void
537 s1394_copy_cfgrom(s1394_node_t *to, s1394_node_t *from)
538 {
539 	ASSERT(to->cfgrom == NULL);
540 
541 	to->cfgrom = from->cfgrom;
542 	to->cfgrom_state = from->cfgrom_state;
543 	to->cfgrom_valid_size = from->cfgrom_valid_size;
544 	to->cfgrom_size = from->cfgrom_size;
545 	to->node_state = from->node_state;
546 
547 	bcopy(from->dir_stack, to->dir_stack,
548 	    offsetof(s1394_node_t, cfgrom_quad_to_read) -
549 	    offsetof(s1394_node_t, dir_stack));
550 
551 	to->cfgrom_quad_to_read = from->cfgrom_quad_to_read;
552 
553 	CLEAR_CFGROM_NEW_ALLOC(to);
554 	CLEAR_CFGROM_NEW_ALLOC(from);
555 
556 	/*
557 	 * old link off, new link on => handled in s1394_cfgrom_scan_phase1
558 	 * old link on, new link off => handled in s1394_process_old_tree
559 	 */
560 	if (LINK_ACTIVE(from) == B_FALSE) {
561 		/*
562 		 * if last time around, link was off, there wouldn't
563 		 * have been config rom allocated.
564 		 */
565 		ASSERT(from->cfgrom == NULL);
566 		return;
567 	} else {
568 		s1394_selfid_pkt_t *selfid_pkt = to->selfid_packet;
569 
570 		if (IEEE1394_SELFID_ISLINKON(selfid_pkt))
571 			SET_LINK_ACTIVE(to);
572 	}
573 }
574 
575 /*
576  * s1394_read_bus_info_blk()
577  *    Attempts to kick off reading IEEE1212_NODE_CAP_QUAD quad or quad 0.
578  *    Increments cfgroms_being_read by 1. Returns DDI_SUCCESS command was
579  *    issued, else sets status to the failure reason and returns DDI_FAILURE.
580  */
581 static int
582 s1394_read_bus_info_blk(s1394_hal_t *hal, s1394_node_t *node,
583     s1394_status_t *status)
584 {
585 	uint32_t quadlet;
586 	cmd1394_cmd_t *cmd;
587 	uchar_t node_num;
588 
589 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
590 	ASSERT(LINK_ACTIVE(node) == B_TRUE);
591 
592 	node_num = node->node_num;
593 
594 	/*
595 	 * drop the topology lock around command allocation. Return failure
596 	 * if either command allocation fails or cannot reacquire the lock
597 	 */
598 	s1394_unlock_tree(hal);
599 	*status = S1394_NOSTATUS;
600 
601 	if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
602 		*status |= S1394_CMD_ALLOC_FAILED;
603 	}
604 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
605 		*status |= S1394_LOCK_FAILED;
606 		/* free the cmd allocated above */
607 		if (((*status) & S1394_CMD_ALLOC_FAILED) != 0)
608 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
609 	}
610 	if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
611 		return (DDI_FAILURE);
612 	}
613 
614 	/* allocate cfgrom if needed */
615 	if (node->cfgrom == NULL && s1394_alloc_cfgrom(hal, node, status) !=
616 	    DDI_SUCCESS) {
617 		ASSERT(((*status) & S1394_LOCK_FAILED) != 0);
618 		(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
619 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
620 		return (DDI_FAILURE);
621 	}
622 
623 	/*
624 	 * if this is a matched node, read quad 2 (node capabilities) to
625 	 * see if the generation count changed.
626 	 */
627 	quadlet = CFGROM_BIB_READ(node) ? IEEE1212_NODE_CAP_QUAD : 0;
628 
629 	/*
630 	 * read bus info block at 100Mbit. This will help us with the cases
631 	 * where LINK is slower than PHY; s1394 uses PHY speed till speed map
632 	 * is updated.
633 	 */
634 	cmd->completion_callback = s1394_cfgrom_read_callback;
635 	cmd->bus_generation = hal->generation_count;
636 	cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
637 	    CMD1394_OVERRIDE_ADDR | CMD1394_OVERRIDE_SPEED);
638 	cmd->cmd_speed = IEEE1394_S100;
639 	cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
640 
641 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
642 	    quadlet, cmd->cmd_addr);
643 
644 	SETUP_QUAD_READ(node, 1, quadlet, 1);
645 	if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
646 		/* free the command if it wasn't handed over to the HAL */
647 		if (((*status) & S1394_CMD_INFLIGHT) == 0) {
648 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
649 		}
650 		if (((*status) & S1394_LOCK_FAILED) != 0) {
651 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
652 		}
653 		return (DDI_FAILURE);
654 	}
655 
656 	hal->cfgroms_being_read++;
657 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
658 
659 	return (DDI_SUCCESS);
660 }
661 
662 /*
663  * s1394_read_rest_of_cfgrom()
664  *    Attempts to start reading node->cfgrom_quad_to_read quadlet. Increments
665  *    cfgroms_being_read by 1 and returns DDI_SUCCESS if command was issued,
666  *    else sets status to the failure reason and returns DDI_FAILURE.
667  */
668 int
669 s1394_read_rest_of_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
670     s1394_status_t *status)
671 {
672 	cmd1394_cmd_t *cmd;
673 	uchar_t node_num = node->node_num;
674 
675 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
676 	ASSERT(LINK_ACTIVE(node) == B_TRUE);
677 
678 	/*
679 	 * drop the topology lock around command allocation. Return failure
680 	 * if either command allocation fails or cannot reacquire the lock
681 	 */
682 	s1394_unlock_tree(hal);
683 	*status = S1394_NOSTATUS;
684 
685 	if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
686 		*status |= S1394_CMD_ALLOC_FAILED;
687 	}
688 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
689 		*status |= S1394_LOCK_FAILED;
690 		/* free if we allocated a cmd above */
691 		if (((*status) & S1394_CMD_ALLOC_FAILED) == 0)
692 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
693 	}
694 	if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
695 		return (DDI_FAILURE);
696 	}
697 
698 	cmd->completion_callback = s1394_cfgrom_read_callback;
699 	cmd->bus_generation = hal->generation_count;
700 	cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
701 	    CMD1394_OVERRIDE_ADDR);
702 	cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
703 
704 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
705 	    node->cfgrom_quad_to_read, cmd->cmd_addr);
706 	SETUP_QUAD_READ(node, 1, node->cfgrom_quad_to_read, 1);
707 	if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
708 		/* free the command if it wasn't handed over to the HAL */
709 		if (((*status) & S1394_CMD_INFLIGHT) == 0) {
710 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
711 		}
712 		if (((*status) & S1394_LOCK_FAILED) != 0) {
713 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
714 		}
715 		return (DDI_FAILURE);
716 	}
717 
718 	hal->cfgroms_being_read++;
719 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
720 
721 	return (DDI_SUCCESS);
722 }
723 
724 /*
725  * s1394_cfgrom_scan_phase1()
726  *    Attempts to read bus info blocks for nodes as needed. Returns DDI_FAILURE
727  *    if bus reset generations changed (as indicated by s1394_lock_tree()
728  *    return status) or if any of the callees return failure, else returns
729  *    DDI_SUCCESS.
730  */
731 static int
732 s1394_cfgrom_scan_phase1(s1394_hal_t *hal)
733 {
734 	uint32_t number_of_nodes;
735 	int ret;
736 	int node;
737 	int wait_in_gen;
738 	int wait_for_cbs;
739 	uint_t hal_node_num;
740 	uint_t hal_node_num_old;
741 	s1394_node_t *nnode, *onode;
742 	s1394_selfid_pkt_t *selfid_pkt;
743 	s1394_status_t status;
744 
745 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
746 
747 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
748 		return (DDI_FAILURE);
749 	}
750 	wait_for_cbs = 0;
751 	number_of_nodes = hal->number_of_nodes;
752 	hal->cfgroms_being_read = 0;
753 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
754 	hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
755 	s1394_unlock_tree(hal);
756 
757 	ret = DDI_SUCCESS;
758 
759 	/* Send requests for all new node config ROM 0 */
760 	for (node = 0; node < number_of_nodes; node++) {
761 
762 		status = S1394_UNKNOWN;
763 
764 		if (s1394_lock_tree(hal) != DDI_SUCCESS) {
765 			status = S1394_LOCK_FAILED;
766 			break;
767 		}
768 
769 		nnode = &hal->topology_tree[node];
770 		onode = nnode->old_node;
771 		/* if node matched, onode should be non NULL */
772 		ASSERT(NODE_MATCHED(nnode) == B_FALSE || (NODE_MATCHED(nnode) ==
773 		    B_TRUE && onode != NULL));
774 
775 		/*
776 		 * Read bus info block if it is a brand new node (MATCHED is 0)
777 		 * or if matched but link was off in previous generations or
778 		 * or if matched but had invalid cfgrom in last generation
779 		 * or if matched but config rom generation > 1 (this is to
780 		 * check if config rom generation changed between bus resets).
781 		 */
782 		if ((node != hal_node_num) &&
783 		    ((NODE_MATCHED(nnode) == B_FALSE) ||
784 		    (NODE_MATCHED(nnode) == B_TRUE && LINK_ACTIVE(onode) ==
785 		    B_FALSE) || (NODE_MATCHED(nnode) == B_TRUE &&
786 		    (onode->cfgrom == NULL || CFGROM_VALID(onode) ==
787 		    B_FALSE)) || (NODE_MATCHED(nnode) == B_TRUE &&
788 		    nnode->cfgrom != NULL && CONFIG_ROM_GEN(nnode->cfgrom) >
789 		    1))) {
790 
791 			SET_NODE_VISITED(nnode);
792 			selfid_pkt = nnode->selfid_packet;
793 			if (IEEE1394_SELFID_ISLINKON(selfid_pkt)) {
794 
795 				SET_LINK_ACTIVE(nnode);
796 
797 				status = S1394_UNKNOWN;
798 
799 				if (s1394_read_bus_info_blk(hal, nnode,
800 				    &status) != DDI_SUCCESS) {
801 					if ((status & S1394_LOCK_FAILED) != 0)
802 						break;
803 				} else {
804 					wait_for_cbs++;
805 					wait_in_gen = hal->br_cfgrom_read_gen;
806 				}
807 			} else {
808 				/*
809 				 * Special case: if link was active last
810 				 * time around, this should be treated as
811 				 * node going away.
812 				 */
813 				CLEAR_LINK_ACTIVE(nnode);
814 				if (NODE_MATCHED(nnode) == B_TRUE &&
815 				    LINK_ACTIVE(onode) == B_TRUE) {
816 					CLEAR_CFGROM_STATE(nnode);
817 				}
818 			}
819 		} else {
820 			if (node == hal_node_num) {
821 				onode = &hal->old_tree[hal_node_num_old];
822 				/* Set up the local matched nodes */
823 				if (onode) {
824 					nnode->old_node = onode;
825 					SET_NODE_MATCHED(nnode);
826 					SET_NODE_MATCHED(onode);
827 					s1394_copy_cfgrom(nnode, onode);
828 				}
829 			}
830 		}
831 		s1394_unlock_tree(hal);
832 	}
833 
834 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
835 
836 	if ((status & S1394_LOCK_FAILED) != 0) {
837 		return (DDI_FAILURE);
838 	}
839 
840 	/*
841 	 * If we started any reads, wait for completion callbacks
842 	 */
843 	if (wait_for_cbs != 0) {
844 		ret = s1394_wait_for_cfgrom_callbacks(hal, wait_in_gen,
845 		    s1394_br_thread_handle_cmd_phase1);
846 	}
847 
848 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
849 
850 	return (ret);
851 }
852 
853 /*
854  * s1394_br_thread_handle_cmd_phase1()
855  *    Process the cmd completion for phase 1 config rom reads. If we
856  *    successfully read IEEE1212_NODE_CAP_QUAD quadlet and config rom gen
857  *    did not change, move targets hanging off the old node to the current
858  *    node. If config rom generations change, alloc new config rom and start
859  *    re-reading the new config rom. If all of bus info block is read (as
860  *    required), mark the node as CFGROM_BIB_READ. If config rom read fails
861  *    retry if not too many failures. Topology tree mutex is dropped and
862  *    reacquired in this routine. If reacquiring fails, returns
863  *    S1394_HCMD_LOCK_FAILED. If the entire bus info block is read, returns
864  *    S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
865  *    indicate not done with the node yet).
866  *
867  *    If we cannot read any of the quadlets in the bus info block, cfgrom
868  *    is marked invalid in this generation (a side effect of calling
869  *    s1394_free_cfgrom()). We free cfgrom in this routine only if the failure
870  *    is not due to bus generations changing.
871  */
872 static hcmd_ret_t
873 s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
874 {
875 	s1394_target_t *t;
876 	s1394_node_t *node, *onode;
877 	uint32_t node_num, quadlet, data;
878 	int freecmd, done, locked;
879 	hcmd_ret_t cmdret;
880 	uchar_t readdelay;
881 	s1394_status_t status;
882 
883 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
884 	ASSERT(quadlet == 0 || quadlet < IEEE1394_BIB_QUAD_SZ);
885 
886 	cmdret = S1394_HCMD_NODE_EXPECT_MORE;
887 
888 	locked = 1;
889 	freecmd = 1;
890 
891 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
892 		locked = 0;
893 		goto bail;
894 	}
895 
896 	node = &hal->topology_tree[node_num];
897 
898 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
899 
900 		int reread = 0;
901 
902 		done = 0;
903 
904 		if (quadlet == IEEE1212_NODE_CAP_QUAD &&
905 		    CFGROM_BIB_READ(node)) {
906 
907 			int cur_gen = ((data & IEEE1394_BIB_GEN_MASK) >>
908 			    IEEE1394_BIB_GEN_SHIFT);
909 
910 			/*
911 			 * node->old_node can be NULL if this is a new node &
912 			 * we are doing a rescan
913 			 */
914 			onode = node->old_node;
915 			if (CONFIG_ROM_GEN(node->cfgrom) == cur_gen) {
916 
917 				if (CFGROM_PARSED(node) == B_TRUE) {
918 					rw_enter(&hal->target_list_rwlock,
919 					    RW_WRITER);
920 					/* Update the target list, if any */
921 					if (onode != NULL &&
922 					    (t = onode->target_list) != NULL) {
923 						node->target_list = t;
924 						while (t != NULL) {
925 							t->on_node = node;
926 							t = t->target_sibling;
927 						}
928 					}
929 					rw_exit(&hal->target_list_rwlock);
930 				}
931 				SET_NODE_MATCHED(node);
932 				if (onode)
933 					SET_NODE_MATCHED(onode);
934 				node->cfgrom_quad_to_read =
935 				    IEEE1394_BIB_QUAD_SZ;
936 				done++;
937 			} else {
938 
939 				SET_CFGROM_GEN_CHANGED(node);
940 				if (onode != NULL)
941 					SET_CFGROM_GEN_CHANGED(onode);
942 				/*
943 				 * Reset BIB_READ flag and start reading entire
944 				 * config rom.
945 				 */
946 				CLEAR_CFGROM_BIB_READ(node);
947 				reread = 1;
948 
949 				/*
950 				 * if generations changed, allocate cfgrom for
951 				 * the new generation. s1394_match_GUID() will
952 				 * free up the cfgrom from the old generation.
953 				 */
954 				if (s1394_alloc_cfgrom(hal, node, &status) !=
955 				    DDI_SUCCESS) {
956 					ASSERT((status & S1394_LOCK_FAILED) !=
957 					    0);
958 					ASSERT(MUTEX_NOT_HELD(&hal->
959 					    topology_tree_mutex));
960 					locked = 0;
961 					/* we failed to relock the tree */
962 					goto bail;
963 				}
964 			}
965 		}
966 
967 		/*
968 		 * we end up here if we don't have bus_info_blk for this
969 		 * node or if config rom generation changed.
970 		 */
971 
972 		/*
973 		 * Pass1 Rio bug workaround. Due to this bug, if we read
974 		 * past quadlet 5 of the config rom, the PCI bus gets wedged.
975 		 * Avoid the hang by not reading past quadlet 5.
976 		 * We identify a remote Rio by the node vendor id part of
977 		 * quad 3 (which is == SUNW == S1394_SUNW_OUI (0x80020)).
978 		 */
979 		if (s1394_enable_rio_pass1_workarounds != 0) {
980 			if ((quadlet == 3) && ((data >> 8) == S1394_SUNW_OUI)) {
981 				node->cfgrom_size = IEEE1394_BIB_QUAD_SZ;
982 				node->cfgrom_valid_size = IEEE1394_BIB_QUAD_SZ;
983 			}
984 		}
985 
986 		if (!done) {
987 
988 			if (reread)
989 				quadlet = 0;
990 			else
991 				node->cfgrom[quadlet++] = data;
992 
993 			/* if we don't have the entire bus_info_blk... */
994 			if (quadlet < IEEE1394_BIB_QUAD_SZ) {
995 
996 				CFGROM_GET_READ_DELAY(node, readdelay);
997 				SETUP_QUAD_READ(node, 1, quadlet, 1);
998 				s1394_unlock_tree(hal);
999 				CFGROM_READ_PAUSE(readdelay);
1000 				/* get next quadlet */
1001 				if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1002 					locked = 0;
1003 				} else if (s1394_read_config_quadlet(hal, cmd,
1004 				    &status) != DDI_SUCCESS) {
1005 					/*
1006 					 * Failed to get going. If command was
1007 					 * successfully handed over to the HAL,
1008 					 * don't free it (it will get freed
1009 					 * later in the callback).
1010 					 */
1011 					if ((status & S1394_CMD_INFLIGHT) !=
1012 					    0) {
1013 						freecmd = 0;
1014 					}
1015 					if ((status & S1394_LOCK_FAILED) != 0) {
1016 						locked = 0;
1017 					} else {
1018 						if (CFGROM_NEW_ALLOC(node) ==
1019 						    B_TRUE) {
1020 							s1394_free_cfgrom(hal,
1021 							    node,
1022 							S1394_FREE_CFGROM_NEW);
1023 						} else {
1024 							CLEAR_CFGROM_STATE(
1025 							    node);
1026 						}
1027 					}
1028 					done++;
1029 				} else {
1030 					freecmd = 0;
1031 				}
1032 			} else {
1033 				/* got all of bus_info_blk */
1034 				SET_CFGROM_BIB_READ(node);
1035 				if (node->cfgrom_size == IEEE1394_BIB_QUAD_SZ)
1036 				    SET_CFGROM_ALL_READ(node);
1037 				node->cfgrom_quad_to_read = quadlet;
1038 				done++;
1039 			}
1040 		}
1041 	} else {
1042 		done = 1;
1043 		node->cfgrom_read_fails++;
1044 		BUMP_CFGROM_READ_DELAY(node);
1045 
1046 		/* retry if not too many failures */
1047 		if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1048 			CFGROM_GET_READ_DELAY(node, readdelay);
1049 			SETUP_QUAD_READ(node, 0, quadlet, 1);
1050 			s1394_unlock_tree(hal);
1051 			CFGROM_READ_PAUSE(readdelay);
1052 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1053 				locked = 0;
1054 			} else if (s1394_read_config_quadlet(hal, cmd,
1055 			    &status) != DDI_SUCCESS) {
1056 				/*
1057 				 * Failed to get going. If command was
1058 				 * successfully handed over to the HAL,
1059 				 * don't free it (it will get freed
1060 				 * later in the callback).
1061 				 */
1062 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1063 					freecmd = 0;
1064 				}
1065 				if ((status & S1394_LOCK_FAILED) != 0) {
1066 					locked = 0;
1067 				} else {
1068 					if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1069 						s1394_free_cfgrom(hal, node,
1070 						    S1394_FREE_CFGROM_NEW);
1071 					} else {
1072 						CLEAR_CFGROM_STATE(node);
1073 					}
1074 				}
1075 			} else {
1076 				done = 0;
1077 				freecmd = 0;
1078 			}
1079 		} else {
1080 			if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1081 				s1394_free_cfgrom(hal, node,
1082 				    S1394_FREE_CFGROM_NEW);
1083 			} else {
1084 				CLEAR_CFGROM_STATE(node);
1085 			}
1086 		}
1087 	}
1088 bail:
1089 	if (freecmd) {
1090 		(void) s1394_free_cmd(hal, &cmd);
1091 	}
1092 
1093 	if (done) {
1094 		cmdret = S1394_HCMD_NODE_DONE;
1095 	}
1096 
1097 	/* if we are bailing out because locking failed, locked == 0 */
1098 	if (locked == 0)
1099 		cmdret = S1394_HCMD_LOCK_FAILED;
1100 	else
1101 		s1394_unlock_tree(hal);
1102 
1103 	return (cmdret);
1104 }
1105 
1106 /*
1107  * s1394_cfgrom_scan_phase2()
1108  *    Handles phase 2 of bus reset processing. Matches GUIDs between old
1109  *    and new topology trees to identify which node moved where. Processes
1110  *    the old topology tree (involves offlining any nodes that got unplugged
1111  *    between the last generation and the current generation). Updates speed
1112  *    map, sets up physical AR request filer and does isoch resource
1113  *    realloc failure notification and bus reset notifications. Then resends
1114  *    any commands that were issued by targets while the reset was being
1115  *    processed. Finally, the current topology tree is processed. This involves
1116  *    reading config rom past the bus info block for new nodes and parsing
1117  *    the config rom, creating a devinfo for each unit directory found in the
1118  *    config rom.
1119  *    Returns DDI_FAILURE if there was bus reset during any of the function
1120  *    calls (as indicated by lock failures) or if any of the routines callees
1121  *    return failure, else returns DDI_SUCCESS.
1122  */
1123 static int
1124 s1394_cfgrom_scan_phase2(s1394_hal_t *hal)
1125 {
1126 	int ret;
1127 	uint_t wait_gen;
1128 	int wait_for_cbs = 0;
1129 	t1394_localinfo_t localinfo;
1130 
1131 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1132 
1133 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1134 		return (DDI_FAILURE);
1135 	}
1136 
1137 	if (s1394_match_all_GUIDs(hal) == DDI_SUCCESS) {
1138 		s1394_unlock_tree(hal);
1139 	}
1140 
1141 	if (s1394_process_old_tree(hal) != DDI_SUCCESS) {
1142 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1143 		return (DDI_FAILURE);
1144 	}
1145 
1146 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1147 		return (DDI_FAILURE);
1148 	}
1149 
1150 	s1394_update_speed_map_link_speeds(hal);
1151 	s1394_unlock_tree(hal);
1152 
1153 	/* Setup physical AR request filters */
1154 	s1394_physical_arreq_setup_all(hal);
1155 
1156 	/* Notify targets of isoch resource realloc failures */
1157 	s1394_isoch_rsrc_realloc_notify(hal);
1158 
1159 	/* Notify targets of the end of bus reset processing */
1160 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1161 		return (DDI_FAILURE);
1162 	}
1163 
1164 	localinfo.bus_generation = hal->generation_count;
1165 	localinfo.local_nodeID = hal->node_id;
1166 
1167 	s1394_unlock_tree(hal);
1168 	s1394_target_bus_reset_notifies(hal, &localinfo);
1169 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1170 		return (DDI_FAILURE);
1171 	}
1172 
1173 	/* Set HAL state to normal */
1174 	if (hal->disable_requests_bit == 0)
1175 		hal->hal_state = S1394_HAL_NORMAL;
1176 	else
1177 		hal->hal_state = S1394_HAL_DREQ;
1178 
1179 	s1394_unlock_tree(hal);
1180 
1181 	/* Flush the pending Q */
1182 	s1394_resend_pending_cmds(hal);
1183 
1184 	if (s1394_process_topology_tree(hal, &wait_for_cbs, &wait_gen)) {
1185 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1186 		return (DDI_FAILURE);
1187 	}
1188 
1189 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1190 		return (DDI_FAILURE);
1191 	}
1192 
1193 	s1394_print_node_info(hal);
1194 
1195 	s1394_unlock_tree(hal);
1196 
1197 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1198 
1199 	ret = DDI_SUCCESS;
1200 
1201 	/*
1202 	 * If we started any reads, wait for completion callbacks
1203 	 */
1204 	if (wait_for_cbs != 0) {
1205 		ret = s1394_wait_for_cfgrom_callbacks(hal, wait_gen,
1206 		    s1394_br_thread_handle_cmd_phase2);
1207 	}
1208 
1209 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1210 
1211 	return (ret);
1212 }
1213 
1214 /*
1215  * s1394_br_thread_handle_cmd_phase2()
1216  *    Process the cmd completion for phase 2 config rom reads. If all the
1217  *    needed quads are read, validates the config rom; if config rom is
1218  *    invalid (crc failures), frees the config rom, else marks the config rom
1219  *    valid and calls s1394_update_devinfo_tree() to parse the config rom.
1220  *    If need to get more quadlets, attempts to kick off the read and returns
1221  *    S1394_HCMD_NODE_EXPECT_MORE if successfully started the read. If a bus
1222  *    reset is seen while in this routine, returns S1394_HCMD_LOCK_FAILED. If
1223  *    done with the node (with or withoug crc errors), returns
1224  *    S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
1225  *    indicate not done with the node yet).
1226  */
1227 static hcmd_ret_t
1228 s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
1229 {
1230 	s1394_node_t *node;
1231 	uint32_t node_num, quadlet, data;
1232 	int update_devinfo, locked, freecmd, done;
1233 	hcmd_ret_t cmdret;
1234 	uchar_t readdelay;
1235 	s1394_status_t status;
1236 
1237 	/*
1238 	 * we end up here if this is a brand new node or if it is a known node
1239 	 * but the config ROM changed (and triggered a re-read).
1240 	 */
1241 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1242 	ASSERT(quadlet == IEEE1394_BIB_QUAD_SZ || quadlet <
1243 	    IEEE1394_CONFIG_ROM_QUAD_SZ);
1244 
1245 	locked = freecmd = done = 1;
1246 	cmdret = S1394_HCMD_NODE_EXPECT_MORE;
1247 
1248 	update_devinfo = 0;
1249 
1250 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1251 		locked = 0;
1252 		goto bail;
1253 	}
1254 
1255 	node = &hal->topology_tree[node_num];
1256 
1257 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1258 
1259 		ASSERT(CFGROM_BIB_READ(node) == B_TRUE);
1260 
1261 		node->cfgrom[quadlet] = data;
1262 
1263 		if (s1394_calc_next_quad(hal, node, quadlet, &quadlet) != 0) {
1264 			/*
1265 			 * Done with this node. Mark config rom valid and
1266 			 * update the devinfo tree for this node.
1267 			 */
1268 			node->cfgrom_valid_size = quadlet + 1;
1269 			if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1270 				SET_CFGROM_ALL_READ(node);
1271 				update_devinfo++;
1272 			} else {
1273 				s1394_free_cfgrom(hal, node,
1274 				    S1394_FREE_CFGROM_BOTH);
1275 			}
1276 		} else {
1277 			CFGROM_GET_READ_DELAY(node, readdelay);
1278 			SETUP_QUAD_READ(node, 1, quadlet, 1);
1279 			s1394_unlock_tree(hal);
1280 			CFGROM_READ_PAUSE(readdelay);
1281 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1282 				locked = 0;
1283 			} else if (s1394_read_config_quadlet(hal, cmd,
1284 			    &status) != DDI_SUCCESS) {
1285 				/* give up on this guy */
1286 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1287 					freecmd = 0;
1288 				}
1289 				if ((status & S1394_LOCK_FAILED) != 0) {
1290 					locked = 0;
1291 				} else {
1292 					node->cfgrom_valid_size = quadlet;
1293 					if (s1394_valid_cfgrom(hal, node) ==
1294 					    B_TRUE) {
1295 						SET_CFGROM_ALL_READ(node);
1296 						update_devinfo++;
1297 					} else {
1298 						s1394_free_cfgrom(hal, node,
1299 						    S1394_FREE_CFGROM_BOTH);
1300 					}
1301 				}
1302 			} else {
1303 				/* successfully started next read */
1304 				done = 0;
1305 				freecmd = 0;
1306 			}
1307 		}
1308 	} else {
1309 		node->cfgrom_read_fails++;
1310 		BUMP_CFGROM_READ_DELAY(node);
1311 
1312 		/* retry if not too many failures */
1313 		if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1314 			CFGROM_GET_READ_DELAY(node, readdelay);
1315 			s1394_unlock_tree(hal);
1316 			SETUP_QUAD_READ(node, 0, quadlet, 1);
1317 			CFGROM_READ_PAUSE(readdelay);
1318 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1319 				locked = 0;
1320 			} else if (s1394_read_config_quadlet(hal, cmd,
1321 			    &status) != DDI_SUCCESS) {
1322 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1323 					freecmd = 0;
1324 				}
1325 				if ((status & S1394_LOCK_FAILED) != 0) {
1326 					locked = 0;
1327 				} else {
1328 					/* stop further reads */
1329 					node->cfgrom_valid_size = quadlet + 1;
1330 					if (s1394_valid_cfgrom(hal, node) ==
1331 					    B_TRUE) {
1332 						SET_CFGROM_ALL_READ(node);
1333 						update_devinfo++;
1334 					} else {
1335 						s1394_free_cfgrom(hal, node,
1336 						    S1394_FREE_CFGROM_BOTH);
1337 					}
1338 				}
1339 			} else {
1340 				/* successfully started next read */
1341 				done = 0;
1342 				freecmd = 0;
1343 			}
1344 		} else {
1345 			node->cfgrom_valid_size = quadlet + 1;
1346 			if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1347 				SET_CFGROM_ALL_READ(node);
1348 				update_devinfo++;
1349 			} else {
1350 				s1394_free_cfgrom(hal, node,
1351 				    S1394_FREE_CFGROM_BOTH);
1352 			}
1353 		}
1354 	}
1355 bail:
1356 	if (freecmd) {
1357 		(void) s1394_free_cmd(hal, &cmd);
1358 	}
1359 
1360 	if (done) {
1361 		cmdret = S1394_HCMD_NODE_DONE;
1362 	}
1363 
1364 	if (update_devinfo) {
1365 		ASSERT(locked);
1366 		/*
1367 		 * s1394_update_devinfo_tree() drops and reacquires the
1368 		 * topology_tree_mutex. If tree lock fails, it returns
1369 		 * a DDI_FAILURE. Set locked to 0 so in this case so that
1370 		 * we will return S1394_HCMD_LOCK_FAILED below
1371 		 */
1372 		if (s1394_update_devinfo_tree(hal, node) != DDI_SUCCESS) {
1373 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1374 			locked = 0;
1375 		}
1376 	}
1377 
1378 	/* if we are bailing out because locking failed, locked == 0 */
1379 	if (locked == 0)
1380 		cmdret = S1394_HCMD_LOCK_FAILED;
1381 	else
1382 		s1394_unlock_tree(hal);
1383 
1384 	return (cmdret);
1385 }
1386 
1387 /*
1388  * s1394_read_config_quadlet()
1389  *    Starts the reads of a config quadlet (deduced cmd_addr).  Returns
1390  *    DDI_SUCCESS if the read was started with no errors, else DDI_FAILURE
1391  *    is returned, with status indicating the reason for the failure(s).
1392  */
1393 static int
1394 s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
1395     s1394_status_t *status)
1396 {
1397 	s1394_node_t *node;
1398 	int ret, err, node_num, quadlet;
1399 
1400 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1401 	node_num = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
1402 	node = &hal->topology_tree[node_num];
1403 	quadlet = node->cfgrom_quad_to_read;
1404 
1405 	/* Calculate the 64-bit address */
1406 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num, quadlet,
1407 	    cmd->cmd_addr);
1408 
1409 	*status = S1394_NOSTATUS;
1410 
1411 	ret = s1394_setup_asynch_command(hal, NULL, cmd, S1394_CMD_READ, &err);
1412 
1413 	if (ret != DDI_SUCCESS) {
1414 		*status |= S1394_UNKNOWN;
1415 		ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1416 		return (DDI_FAILURE);
1417 	}
1418 
1419 	s1394_unlock_tree(hal);
1420 	ret = DDI_SUCCESS;
1421 	/* Send the command out */
1422 	if (s1394_xfer_asynch_command(hal, cmd, &err) == DDI_SUCCESS) {
1423 		/* Callers can expect a callback now */
1424 		*status |= S1394_CMD_INFLIGHT;
1425 	} else {
1426 
1427 		s1394_cmd_priv_t *s_priv;
1428 
1429 		/* Remove from queue */
1430 		s1394_remove_q_asynch_cmd(hal, cmd);
1431 		s_priv = S1394_GET_CMD_PRIV(cmd);
1432 
1433 		s_priv->cmd_in_use = B_FALSE;
1434 
1435 		*status |= S1394_XFER_FAILED;
1436 		ret = DDI_FAILURE;
1437 	}
1438 
1439 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1440 		*status |= S1394_LOCK_FAILED;
1441 		ret = DDI_FAILURE;
1442 	}
1443 
1444 	return (ret);
1445 }
1446 
1447 /*
1448  * s1394_cfgrom_read_callback()
1449  *    callback routine for config rom reads. Frees the command if it failed
1450  *    due to bus reset else appends the command to the completion queue
1451  *    and signals the completion queue cv.
1452  */
1453 static void
1454 s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd)
1455 {
1456 	cmd1394_cmd_t *tcmd;
1457 	s1394_cmd_priv_t *s_priv;
1458 	s1394_hal_t *hal;
1459 
1460 #if defined(DEBUG)
1461 	uint32_t node_num, quadlet, data;
1462 #endif
1463 
1464 	/* Get the Services Layer private area */
1465 	s_priv = S1394_GET_CMD_PRIV(cmd);
1466 
1467 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
1468 
1469 #if defined(DEBUG)
1470 
1471 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1472 
1473 #endif
1474 
1475 	if (cmd->cmd_result == CMD1394_EBUSRESET) {
1476 		(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1477 	} else {
1478 		mutex_enter(&hal->br_cmplq_mutex);
1479 
1480 		/* Put the command on completion queue */
1481 		s_priv->cmd_priv_next = NULL;
1482 		if ((tcmd = hal->br_cmplq_tail) != NULL) {
1483 			s_priv = S1394_GET_CMD_PRIV(tcmd);
1484 
1485 			s_priv->cmd_priv_next = cmd;
1486 		}
1487 
1488 		hal->br_cmplq_tail = cmd;
1489 
1490 		if (hal->br_cmplq_head == NULL)
1491 			hal->br_cmplq_head = cmd;
1492 
1493 		cv_signal(&hal->br_cmplq_cv);
1494 		mutex_exit(&hal->br_cmplq_mutex);
1495 	}
1496 }
1497 
1498 /*
1499  * s1394_cfgrom_parse_unit_dir()
1500  *    Parses the unit directory passed in and returns reg[2...5] of reg
1501  *    property (see 1275 binding for reg property defintion). Currently,
1502  *    returns 0 for all the values since none of the existing devices implement
1503  *    this and future devices, per P1212r, need a binding change.
1504  */
1505 /* ARGSUSED */
1506 void
1507 s1394_cfgrom_parse_unit_dir(uint32_t *unit_dir, uint32_t *addr_hi,
1508     uint32_t *addr_lo, uint32_t *size_hi, uint32_t *size_lo)
1509 {
1510 	*addr_hi = *addr_lo = *size_hi = *size_lo = 0;
1511 }
1512 
1513 /*
1514  * s1394_get_quad_info()
1515  *    Helper routine that picks apart the various fields of a 1394 address
1516  */
1517 static void
1518 s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num, uint32_t *quadlet,
1519     uint32_t *data)
1520 {
1521 	uint64_t addr;
1522 
1523 	addr = cmd->cmd_addr;
1524 	*node_num = IEEE1394_ADDR_PHY_ID(addr);
1525 	*quadlet = ((addr & IEEE1394_ADDR_OFFSET_MASK) -
1526 	    IEEE1394_CONFIG_ROM_ADDR);
1527 	*quadlet = (*quadlet >> 2);
1528 	*data = T1394_DATA32(cmd->cmd_u.q.quadlet_data);
1529 }
1530 
1531 /*
1532  * s1394_match_GUID()
1533  *    attempts to match nnode (which is in the current topology tree) with
1534  *    a node in the old topology tree by comparing GUIDs. If a match is found
1535  *    the old_node field of the current node and cur_node field of the old
1536  *    are set point to each other. Also, this routine makes both the nodes
1537  *    point at the same config rom.  If unable to relock the tree, returns
1538  *    DDI_FAILURE, else returns DDI_SUCCESS.
1539  */
1540 static int
1541 s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode)
1542 {
1543 	int old_node;
1544 	int gen_changed;
1545 	uint32_t old_a, old_b;
1546 	uint32_t new_a, new_b;
1547 	s1394_node_t *onode;
1548 	s1394_target_t *t;
1549 	int	ret = DDI_SUCCESS;
1550 
1551 	ASSERT(nnode->cfgrom != NULL);
1552 	ASSERT(CFGROM_BIB_READ(nnode));
1553 
1554 	new_a = nnode->node_guid_hi;
1555 	new_b = nnode->node_guid_lo;
1556 
1557 	for (old_node = 0; old_node < hal->old_number_of_nodes; old_node++) {
1558 
1559 		onode = &hal->old_tree[old_node];
1560 		if (onode->cfgrom == NULL || CFGROM_BIB_READ(onode) == B_FALSE)
1561 			continue;
1562 
1563 		old_a = onode->node_guid_hi;
1564 		old_b = onode->node_guid_lo;
1565 
1566 		if ((old_a == new_a) && (old_b == new_b)) {
1567 
1568 			if (NODE_MATCHED(onode) == B_TRUE) {
1569 				cmn_err(CE_NOTE, "!Duplicate GUIDs: %08x%08x",
1570 				    old_a, old_b);
1571 				/* offline the new node that last matched */
1572 				ret = s1394_offline_node(hal, onode->cur_node);
1573 				/* and make the current new node invalid */
1574 				ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1575 				s1394_free_cfgrom(hal, nnode,
1576 				    S1394_FREE_CFGROM_NEW);
1577 				break;
1578 			}
1579 
1580 			/*
1581 			 * If there is indeed a cfgrom gen change,
1582 			 * CFGROM_GEN_CHANGED() will be set iff we are matching
1583 			 * tree nodes. Otherwise, CONFIG_ROM_GEN(old) !=
1584 			 * CONFIG_ROM_GEN(new).
1585 			 */
1586 			if (CFGROM_GEN_CHANGED(nnode) == B_TRUE ||
1587 			    (CONFIG_ROM_GEN(onode->cfgrom) !=
1588 			    CONFIG_ROM_GEN(nnode->cfgrom))) {
1589 				gen_changed = 1;
1590 			} else {
1591 				gen_changed = 0;
1592 			}
1593 
1594 			onode->cur_node = nnode;
1595 			nnode->old_node = onode;
1596 			nnode->node_state = onode->node_state;
1597 			SET_NODE_VISITED(onode);
1598 			SET_NODE_MATCHED(onode);
1599 			SET_NODE_MATCHED(nnode);
1600 			/*
1601 			 * If generations changed, need to offline any targets
1602 			 * hanging off the old node, prior to freeing up old
1603 			 * cfgrom. If the generations didn't change, we can
1604 			 * free up the new config rom and copy all info from
1605 			 * the old node (this helps in picking up further
1606 			 * reads from where the last generation left off).
1607 			 */
1608 			if (gen_changed == 1) {
1609 				if (s1394_offline_node(hal, onode)) {
1610 					ret = DDI_FAILURE;
1611 					break;
1612 				}
1613 				s1394_free_cfgrom(hal, onode,
1614 				    S1394_FREE_CFGROM_OLD);
1615 				CLEAR_CFGROM_PARSED(nnode);
1616 				CLEAR_CFGROM_NEW_ALLOC(nnode);
1617 				CLEAR_CFGROM_NEW_ALLOC(onode);
1618 				onode->cfgrom = nnode->cfgrom;
1619 				/* done */
1620 				break;
1621 			}
1622 
1623 			/*
1624 			 * Free up cfgrom memory in the new_node and
1625 			 * point it at the same config rom as the old one.
1626 			 */
1627 			if (onode->cfgrom != nnode->cfgrom) {
1628 				ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1629 				s1394_free_cfgrom(hal, nnode,
1630 				    S1394_FREE_CFGROM_NEW);
1631 			}
1632 			nnode->cfgrom = onode->cfgrom;
1633 			nnode->cfgrom_state = onode->cfgrom_state;
1634 			nnode->cfgrom_valid_size = onode->cfgrom_valid_size;
1635 			nnode->cfgrom_size = onode->cfgrom_size;
1636 			nnode->cfgrom_quad_to_read = onode->cfgrom_quad_to_read;
1637 			bcopy(onode->dir_stack, nnode->dir_stack,
1638 			    offsetof(s1394_node_t, cfgrom_quad_to_read) -
1639 			    offsetof(s1394_node_t, dir_stack));
1640 			CLEAR_CFGROM_NEW_ALLOC(nnode);
1641 			CLEAR_CFGROM_NEW_ALLOC(onode);
1642 
1643 			if (CFGROM_PARSED(nnode) == B_TRUE) {
1644 				rw_enter(&hal->target_list_rwlock, RW_WRITER);
1645 				/* Update the target list */
1646 				if ((t = onode->target_list) != NULL) {
1647 					nnode->target_list = t;
1648 					while (t != NULL) {
1649 						t->on_node = nnode;
1650 						t = t->target_sibling;
1651 					}
1652 				}
1653 				rw_exit(&hal->target_list_rwlock);
1654 			}
1655 			break;
1656 		}
1657 	}
1658 
1659 	return (ret);
1660 }
1661 
1662 /*
1663  * s1394_match_all_GUIDs()
1664  *    attempt to match each node in the current topology tree with the a
1665  *    node in the old topology tree.  If unable to relock the tree, returns
1666  *    DDI_FAILURE, else returns DDI_SUCCESS.
1667  */
1668 static int
1669 s1394_match_all_GUIDs(s1394_hal_t *hal)
1670 {
1671 	int node;
1672 	int ret = DDI_SUCCESS;
1673 	s1394_node_t *nnode;
1674 
1675 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1676 
1677 	for (node = 0; node < hal->number_of_nodes; node++) {
1678 		nnode = &hal->topology_tree[node];
1679 		if (LINK_ACTIVE(nnode) == B_FALSE || CFGROM_BIB_READ(nnode) ==
1680 		    B_FALSE)
1681 			continue;
1682 		if (NODE_MATCHED(nnode)) {
1683 			/*
1684 			 * Skip if node matched. If config rom generations
1685 			 * changed, we want to call s1394_match_GUID() even
1686 			 * if the nodes matched.
1687 			 */
1688 			int gen_changed;
1689 			s1394_node_t *onode = nnode->old_node;
1690 
1691 			gen_changed = (onode && onode->cfgrom &&
1692 			    CONFIG_ROM_GEN(onode->cfgrom) != CONFIG_ROM_GEN(
1693 			    nnode->cfgrom)) ? 1 : 0;
1694 
1695 			if (CFGROM_GEN_CHANGED(nnode) == 0 && gen_changed == 0)
1696 				continue;
1697 		}
1698 
1699 		if (s1394_match_GUID(hal, nnode) == DDI_FAILURE) {
1700 			ret = DDI_FAILURE;
1701 		}
1702 	}
1703 
1704 	return (ret);
1705 }
1706 
1707 /*
1708  * s1394_valid_cfgrom()
1709  *    Performs crc check on the config rom. Returns B_TRUE if config rom has
1710  *    good CRC else returns B_FALSE.
1711  */
1712 /* ARGSUSED */
1713 boolean_t
1714 s1394_valid_cfgrom(s1394_hal_t *hal, s1394_node_t *node)
1715 {
1716 	uint32_t crc_len, crc_value, CRC, CRC_old, quad0;
1717 
1718 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1719 	ASSERT(node->cfgrom);
1720 
1721 	if (s1394_enable_crc_validation == 0) {
1722 		return (B_TRUE);
1723 	}
1724 
1725 	quad0 = node->cfgrom[0];
1726 	crc_len = (quad0 >> IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
1727 	    IEEE1394_CFG_ROM_CRC_LEN_MASK;
1728 	crc_value = quad0 & IEEE1394_CFG_ROM_CRC_VALUE_MASK;
1729 
1730 	if (node->cfgrom_valid_size < crc_len + 1) {
1731 		return (B_FALSE);
1732 	}
1733 
1734 	CRC = s1394_CRC16(&node->cfgrom[1], crc_len);
1735 
1736 	if (CRC != crc_value) {
1737 		CRC_old = s1394_CRC16_old(&node->cfgrom[1], crc_len);
1738 		if (CRC_old == crc_value) {
1739 			return (B_TRUE);
1740 		}
1741 
1742 		cmn_err(CE_NOTE,
1743 		    "!Bad CRC in config rom (node's GUID %08x%08x)",
1744 		    node->node_guid_hi, node->node_guid_lo);
1745 
1746 		return (B_FALSE);
1747 	}
1748 
1749 	return (B_TRUE);
1750 }
1751 
1752 /*
1753  * s1394_valid_dir()
1754  *    Performs crc check on a directory.  Returns B_TRUE if dir has good CRC
1755  *    else returns B_FALSE.
1756  */
1757 /*ARGSUSED*/
1758 boolean_t
1759 s1394_valid_dir(s1394_hal_t *hal, s1394_node_t *node,
1760     uint32_t key, uint32_t *dir)
1761 {
1762 	uint32_t dir_len, crc_value, CRC, CRC_old, quad0;
1763 
1764 	/*
1765 	 * Ideally, we would like to do crc validations for the entire cfgrom
1766 	 * as well as the individual directories. However, we have seen devices
1767 	 * that have valid directories but busted cfgrom crc and devices that
1768 	 * have bad crcs in directories as well as for the entire cfgrom. This
1769 	 * is sad, but unfortunately, real world!
1770 	 */
1771 	if (s1394_enable_crc_validation == 0) {
1772 		return (B_TRUE);
1773 	}
1774 
1775 	quad0 = dir[0];
1776 
1777 	dir_len = IEEE1212_DIR_LEN(quad0);
1778 	crc_value = IEEE1212_DIR_CRC(quad0);
1779 
1780 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1781 
1782 	CRC = s1394_CRC16(&dir[1], dir_len);
1783 
1784 	if (CRC != crc_value) {
1785 		CRC_old = s1394_CRC16_old(&dir[1], dir_len);
1786 		if (CRC_old == crc_value) {
1787 			return (B_TRUE);
1788 		}
1789 
1790 		return (B_FALSE);
1791 	}
1792 
1793 	return (B_TRUE);
1794 }
1795 
1796 /*
1797  * s1394_become_bus_mgr()
1798  *    is a callback from a timeout() setup by the main br_thread.  After
1799  *    a bus reset, depending on the Bus Manager's incumbancy and the state
1800  *    of its abdicate bit, a timer of a certain length is set.  After this
1801  *    time expires, the local host may attempt to become the Bus Manager.
1802  *    This is done by sending a request to the current IRM on the bus.  The
1803  *    IRM holds the BUS_MANAGER_ID register.  Depending on whether or not
1804  *    the local host is already the IRM, we will send a request onto the
1805  *    1394 bus or call into the HAL.
1806  */
1807 static void
1808 s1394_become_bus_mgr(void *arg)
1809 {
1810 	s1394_hal_t	 *hal;
1811 	s1394_cmd_priv_t *s_priv;
1812 	cmd1394_cmd_t	 *cmd;
1813 	uint64_t	 Bus_Mgr_ID_addr;
1814 	uint32_t	 hal_node_num;
1815 	uint32_t	 old_value;
1816 	uint32_t	 generation;
1817 	uint_t		 curr_bus_mgr;
1818 	uint_t		 bm_node;
1819 	uint_t		 IRM_node;
1820 	int		 err;
1821 	int		 ret;
1822 
1823 	hal = (s1394_hal_t *)arg;
1824 
1825 	/* Lock the topology tree */
1826 	mutex_enter(&hal->topology_tree_mutex);
1827 
1828 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1829 	generation   = hal->generation_count;
1830 	IRM_node = hal->IRM_node;
1831 
1832 	mutex_enter(&hal->bus_mgr_node_mutex);
1833 	bm_node = hal->bus_mgr_node;
1834 	mutex_exit(&hal->bus_mgr_node_mutex);
1835 
1836 	/* Unlock the topology tree */
1837 	mutex_exit(&hal->topology_tree_mutex);
1838 
1839 	/* Make sure we aren't already the Bus Manager */
1840 	if (bm_node != -1) {
1841 		return;
1842 	}
1843 
1844 	/* Send compare-swap to BUS_MANAGER_ID */
1845 	/* register on the Isoch Rsrc Mgr */
1846 	if (IRM_node == hal_node_num) {
1847 		/* Local */
1848 		ret = HAL_CALL(hal).csr_cswap32(hal->halinfo.hal_private,
1849 		    generation, (IEEE1394_SCSR_BUSMGR_ID &
1850 		    IEEE1394_CSR_OFFSET_MASK), S1394_INVALID_NODE_NUM,
1851 		    hal_node_num, &old_value);
1852 		if (ret != DDI_SUCCESS) {
1853 			return;
1854 		}
1855 		curr_bus_mgr = IEEE1394_NODE_NUM(old_value);
1856 
1857 		mutex_enter(&hal->bus_mgr_node_mutex);
1858 		if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1859 		    (curr_bus_mgr == hal_node_num)) {
1860 			hal->bus_mgr_node = hal_node_num;
1861 			hal->incumbent_bus_mgr = B_TRUE;
1862 		} else {
1863 			hal->bus_mgr_node = curr_bus_mgr;
1864 			hal->incumbent_bus_mgr = B_FALSE;
1865 		}
1866 		cv_signal(&hal->bus_mgr_node_cv);
1867 		mutex_exit(&hal->bus_mgr_node_mutex);
1868 
1869 	} else {
1870 		/* Remote */
1871 		if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &cmd) !=
1872 		    DDI_SUCCESS) {
1873 			return;
1874 		}
1875 
1876 		cmd->cmd_options	   = (CMD1394_CANCEL_ON_BUS_RESET |
1877 		    CMD1394_OVERRIDE_ADDR);
1878 		cmd->cmd_type		   = CMD1394_ASYNCH_LOCK_32;
1879 		cmd->completion_callback   = s1394_become_bus_mgr_callback;
1880 		Bus_Mgr_ID_addr		   = (IEEE1394_ADDR_BUS_ID_MASK |
1881 		    IEEE1394_SCSR_BUSMGR_ID) |
1882 		    (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
1883 		cmd->cmd_addr		   = Bus_Mgr_ID_addr;
1884 		cmd->bus_generation	   = generation;
1885 		cmd->cmd_u.l32.arg_value   = T1394_DATA32(
1886 		    S1394_INVALID_NODE_NUM);
1887 		cmd->cmd_u.l32.data_value  = T1394_DATA32(hal_node_num);
1888 		cmd->cmd_u.l32.num_retries = 0;
1889 		cmd->cmd_u.l32.lock_type   = CMD1394_LOCK_COMPARE_SWAP;
1890 
1891 		/* Get the Services Layer private area */
1892 		s_priv = S1394_GET_CMD_PRIV(cmd);
1893 
1894 		/* Lock the topology tree */
1895 		mutex_enter(&hal->topology_tree_mutex);
1896 
1897 		ret = s1394_setup_asynch_command(hal, NULL, cmd,
1898 		    S1394_CMD_LOCK, &err);
1899 
1900 		/* Unlock the topology tree */
1901 		mutex_exit(&hal->topology_tree_mutex);
1902 
1903 		/* Command has now been put onto the queue! */
1904 		if (ret != DDI_SUCCESS) {
1905 			/* Need to free the command */
1906 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1907 			return;
1908 		}
1909 
1910 		/* Send the command out */
1911 		ret = s1394_xfer_asynch_command(hal, cmd, &err);
1912 
1913 		if (ret != DDI_SUCCESS) {
1914 			/* Remove cmd outstanding request Q */
1915 			s1394_remove_q_asynch_cmd(hal, cmd);
1916 
1917 			s_priv->cmd_in_use = B_FALSE;
1918 
1919 			mutex_enter(&hal->bus_mgr_node_mutex);
1920 
1921 			/* Don't know who the bus_mgr is */
1922 			hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1923 			hal->incumbent_bus_mgr = B_FALSE;
1924 
1925 			cv_signal(&hal->bus_mgr_node_cv);
1926 			mutex_exit(&hal->bus_mgr_node_mutex);
1927 
1928 			/* Need to free the command */
1929 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1930 		}
1931 	}
1932 }
1933 
1934 /*
1935  * s1394_become_bus_mgr_callback()
1936  *    is the callback used by s1394_become_bus_mgr() when it is necessary
1937  *    to send the Bus Manager request to a remote IRM.  After the completion
1938  *    of the compare-swap request, this routine looks at the "old_value"
1939  *    in the request to determine whether or not it has become the Bus
1940  *    Manager for the current generation.  It sets the bus_mgr_node and
1941  *    incumbent_bus_mgr fields to their appropriate values.
1942  */
1943 static void
1944 s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd)
1945 {
1946 	s1394_cmd_priv_t *s_priv;
1947 	s1394_hal_t *hal;
1948 	uint32_t hal_node_num;
1949 	uint32_t temp;
1950 	uint_t curr_bus_mgr;
1951 
1952 	/* Get the Services Layer private area */
1953 	s_priv = S1394_GET_CMD_PRIV(cmd);
1954 
1955 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
1956 
1957 	/* Lock the topology tree */
1958 	mutex_enter(&hal->topology_tree_mutex);
1959 
1960 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1961 
1962 	/* Was the command successful? */
1963 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1964 		temp = T1394_DATA32(cmd->cmd_u.l32.old_value);
1965 		curr_bus_mgr = IEEE1394_NODE_NUM(temp);
1966 		mutex_enter(&hal->bus_mgr_node_mutex);
1967 		if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1968 		    (curr_bus_mgr == hal_node_num)) {
1969 
1970 			hal->bus_mgr_node = hal_node_num;
1971 			hal->incumbent_bus_mgr = B_TRUE;
1972 
1973 		} else {
1974 			hal->bus_mgr_node = curr_bus_mgr;
1975 			hal->incumbent_bus_mgr = B_FALSE;
1976 		}
1977 		cv_signal(&hal->bus_mgr_node_cv);
1978 		mutex_exit(&hal->bus_mgr_node_mutex);
1979 
1980 	} else {
1981 		mutex_enter(&hal->bus_mgr_node_mutex);
1982 
1983 		/* Don't know who the bus_mgr is */
1984 		hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1985 		hal->incumbent_bus_mgr = B_FALSE;
1986 
1987 		cv_signal(&hal->bus_mgr_node_cv);
1988 		mutex_exit(&hal->bus_mgr_node_mutex);
1989 	}
1990 
1991 	/* Need to free the command */
1992 	(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1993 
1994 	/* Unlock the topology tree */
1995 	mutex_exit(&hal->topology_tree_mutex);
1996 }
1997 
1998 /*
1999  * s1394_bus_mgr_processing()
2000  *    is called following "phase1" completion of reading Bus_Info_Blocks.
2001  *    Its purpose is to determine whether the local node is capable of
2002  *    becoming the Bus Manager (has the IRMC bit set) and if so to call
2003  *    the s1394_do_bus_mgr_processing() routine.
2004  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2005  *    the start of bus reset processing.
2006  */
2007 static int
2008 s1394_bus_mgr_processing(s1394_hal_t *hal)
2009 {
2010 	int ret;
2011 	int IRM_node_num;
2012 
2013 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2014 
2015 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2016 		return (DDI_FAILURE);
2017 	}
2018 	IRM_node_num = hal->IRM_node;
2019 	s1394_unlock_tree(hal);
2020 
2021 	ret = DDI_SUCCESS;
2022 
2023 	/* If we are IRM capable, then do bus_mgr stuff... */
2024 	if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2025 		/* If there is an IRM, then do bus_mgr stuff */
2026 		if (IRM_node_num != -1) {
2027 			if (s1394_do_bus_mgr_processing(hal))
2028 				ret = DDI_FAILURE;
2029 		}
2030 	}
2031 
2032 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2033 
2034 	return (ret);
2035 }
2036 
2037 /*
2038  * s1394_do_bus_mgr_processing()
2039  *    is used to perform those operations expected of the Bus Manager.
2040  *    After being called, s1394_do_bus_mgr_processing() looks at the value
2041  *    in bus_mgr_node and waits if it is -1 (Bus Manager has not been
2042  *    chosen yet).  Then, if there is more than one node on the 1394 bus,
2043  *    and we are either the Bus Manager or (if there is no Bus Manager)
2044  *    the IRM, it optimizes the gap_count and/or sets the cycle master's
2045  *    root holdoff bit (to ensure that the cycle master is/stays root).
2046  *
2047  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2048  *    the start of bus reset processing.
2049  */
2050 static int
2051 s1394_do_bus_mgr_processing(s1394_hal_t *hal)
2052 {
2053 	int	ret;
2054 	int	IRM_flags, hal_bus_mgr_node;
2055 	int	IRM_node_num;
2056 	uint_t	hal_node_num, number_of_nodes;
2057 	int	new_root, new_gap_cnt;
2058 
2059 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2060 
2061 	/* Wait for Bus Manager to be determined */
2062 	/* or a Bus Reset to happen */
2063 	mutex_enter(&hal->bus_mgr_node_mutex);
2064 	if (hal->bus_mgr_node == -1)
2065 		cv_wait(&hal->bus_mgr_node_cv, &hal->bus_mgr_node_mutex);
2066 
2067 	/* Check if a BUS RESET has come while we've been waiting */
2068 	mutex_enter(&hal->br_thread_mutex);
2069 	if (hal->br_thread_ev_type & (BR_THR_CFGROM_SCAN | BR_THR_GO_AWAY)) {
2070 
2071 		mutex_exit(&hal->br_thread_mutex);
2072 		mutex_exit(&hal->bus_mgr_node_mutex);
2073 
2074 		return (1);
2075 	}
2076 	mutex_exit(&hal->br_thread_mutex);
2077 
2078 	hal_bus_mgr_node = hal->bus_mgr_node;
2079 	mutex_exit(&hal->bus_mgr_node_mutex);
2080 
2081 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2082 		return (1);
2083 	}
2084 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2085 	IRM_node_num = hal->IRM_node;
2086 	number_of_nodes = hal->number_of_nodes;
2087 
2088 	ret = 0;
2089 
2090 	/* If we are the bus_mgr or if there is no bus_mgr */
2091 	/* the IRM and there is > 1 nodes on the bus */
2092 	if ((number_of_nodes > 1) &&
2093 	    ((hal_bus_mgr_node == (int)hal_node_num) ||
2094 		((hal_bus_mgr_node == S1394_INVALID_NODE_NUM) &&
2095 		    (IRM_node_num == (int)hal_node_num)))) {
2096 
2097 		IRM_flags = 0;
2098 
2099 		/* Make sure the root node is cycle master capable */
2100 		if (!s1394_cycle_master_capable(hal)) {
2101 			/* Make the local node root */
2102 			new_root = hal_node_num;
2103 			IRM_flags = IRM_flags | ROOT_HOLDOFF;
2104 
2105 			/* If setting root, then optimize gap_count */
2106 			new_gap_cnt = hal->optimum_gap_count;
2107 			IRM_flags = IRM_flags | GAP_COUNT;
2108 
2109 		} else {
2110 			/* Make sure root's ROOT_HOLDOFF bit is set */
2111 			new_root = (number_of_nodes - 1);
2112 			IRM_flags = IRM_flags | ROOT_HOLDOFF;
2113 		}
2114 		if (hal->gap_count > hal->optimum_gap_count) {
2115 			/* Set the gap_count to optimum */
2116 			new_gap_cnt = hal->optimum_gap_count;
2117 			IRM_flags = IRM_flags | GAP_COUNT;
2118 
2119 		}
2120 
2121 		s1394_unlock_tree(hal);
2122 
2123 		if (IRM_flags) {
2124 			ret = s1394_do_phy_config_pkt(hal, new_root,
2125 			    new_gap_cnt, IRM_flags);
2126 		}
2127 		return (ret);
2128 	}
2129 
2130 	s1394_unlock_tree(hal);
2131 
2132 	return (ret);
2133 }
2134 
2135 /*
2136  * s1394_bus_mgr_timers_stop()
2137  *    Cancels bus manager timeouts
2138  */
2139 /*ARGSUSED*/
2140 static void
2141 s1394_bus_mgr_timers_stop(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2142     timeout_id_t *bus_mgr_tid)
2143 {
2144 	/* Cancel the Bus Mgr timeouts (if necessary) */
2145 	if (*bus_mgr_tid != 0) {
2146 		(void) untimeout(*bus_mgr_tid);
2147 		*bus_mgr_tid = 0;
2148 	}
2149 	if (*bus_mgr_query_tid != 0) {
2150 		(void) untimeout(*bus_mgr_query_tid);
2151 		*bus_mgr_query_tid = 0;
2152 	}
2153 }
2154 
2155 /*
2156  * s1394_bus_mgr_timers_start()
2157  *    Starts bus manager timeouts if the hal is IRM capable.
2158  */
2159 static void
2160 s1394_bus_mgr_timers_start(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2161     timeout_id_t *bus_mgr_tid)
2162 {
2163 	boolean_t incumbant;
2164 	uint_t	  hal_node_num;
2165 	int	  IRM_node_num;
2166 
2167 	mutex_enter(&hal->topology_tree_mutex);
2168 
2169 	IRM_node_num = hal->IRM_node;
2170 	hal_node_num = hal->node_id;
2171 
2172 	mutex_enter(&hal->bus_mgr_node_mutex);
2173 	incumbant = hal->incumbent_bus_mgr;
2174 	mutex_exit(&hal->bus_mgr_node_mutex);
2175 
2176 	/* If we are IRM capable, then do bus_mgr stuff... */
2177 	if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2178 		/*
2179 		 * If we are the IRM, then wait 625ms
2180 		 * before checking BUS_MANAGER_ID register
2181 		 */
2182 		if (IRM_node_num == IEEE1394_NODE_NUM(hal_node_num)) {
2183 
2184 			mutex_exit(&hal->topology_tree_mutex);
2185 
2186 			/* Wait 625ms, then check bus manager */
2187 			*bus_mgr_query_tid = timeout(s1394_become_bus_mgr,
2188 			    hal, drv_usectohz(IEEE1394_BM_IRM_TIMEOUT));
2189 
2190 			mutex_enter(&hal->topology_tree_mutex);
2191 		}
2192 
2193 		/* If there is an IRM on the bus */
2194 		if (IRM_node_num != -1) {
2195 			if ((incumbant == B_TRUE) &&
2196 			    (hal->abdicate_bus_mgr_bit == 0)) {
2197 				mutex_exit(&hal->topology_tree_mutex);
2198 
2199 				/* Try to become bus manager */
2200 				s1394_become_bus_mgr(hal);
2201 
2202 				mutex_enter(&hal->topology_tree_mutex);
2203 			} else {
2204 				hal->abdicate_bus_mgr_bit = 0;
2205 
2206 				mutex_exit(&hal->topology_tree_mutex);
2207 
2208 				/* Wait 125ms, then try to become bus manager */
2209 				*bus_mgr_tid = timeout(s1394_become_bus_mgr,
2210 				    hal, drv_usectohz(
2211 					IEEE1394_BM_INCUMBENT_TIMEOUT));
2212 
2213 				mutex_enter(&hal->topology_tree_mutex);
2214 			}
2215 		} else {
2216 			mutex_enter(&hal->bus_mgr_node_mutex);
2217 			hal->incumbent_bus_mgr = B_FALSE;
2218 			mutex_exit(&hal->bus_mgr_node_mutex);
2219 		}
2220 	}
2221 
2222 	mutex_exit(&hal->topology_tree_mutex);
2223 }
2224 
2225 /*
2226  * s1394_get_maxpayload()
2227  *    is used to determine a device's maximum payload size.  That is to
2228  *    say, the largest packet that can be transmitted or received by the
2229  *    the target device given the current topological (speed) constraints
2230  *    and the constraints specified in the local host's and remote device's
2231  *    Config ROM (max_rec).  Caller must hold the topology_tree_mutex and
2232  *    the target_list_rwlock as an RW_READER (at least).
2233  */
2234 /*ARGSUSED*/
2235 void
2236 s1394_get_maxpayload(s1394_target_t *target, uint_t *dev_max_payload,
2237     uint_t *current_max_payload)
2238 {
2239 	s1394_hal_t *hal;
2240 	uint32_t bus_capabilities;
2241 	uint32_t from_node;
2242 	uint32_t to_node;
2243 	uint_t local_max_rec;
2244 	uint_t local_max_blk;
2245 	uint_t max_rec;
2246 	uint_t max_blk;
2247 	uint_t curr_speed;
2248 	uint_t speed_max_blk;
2249 	uint_t temp;
2250 
2251 	/* Find the HAL this target resides on */
2252 	hal = target->on_hal;
2253 
2254 	/* Make sure we're holding the topology_tree_mutex */
2255 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2256 
2257 	/* Set dev_max_payload to local (HAL's) size */
2258 	bus_capabilities = target->on_hal->halinfo.bus_capabilities;
2259 	local_max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2260 	    IEEE1394_BIB_MAXREC_SHIFT;
2261 	if ((local_max_rec > 0) && (local_max_rec < 14)) {
2262 		local_max_blk = 1 << (local_max_rec + 1);
2263 	} else {
2264 		/* These are either unspecified or reserved */
2265 		local_max_blk = 4;
2266 	}
2267 
2268 	/* Is this target on a node? */
2269 	if ((target->target_state & S1394_TARG_GONE) == 0 &&
2270 	    (target->on_node != NULL)) {
2271 		ASSERT(target->on_node->cfgrom != NULL);
2272 
2273 		bus_capabilities =
2274 		    target->on_node->cfgrom[IEEE1212_NODE_CAP_QUAD];
2275 		max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2276 		    IEEE1394_BIB_MAXREC_SHIFT;
2277 
2278 		if ((max_rec > 0) && (max_rec < 14)) {
2279 			max_blk = 1 << (max_rec + 1);
2280 		} else {
2281 			/* These are either unspecified or reserved */
2282 			max_blk = 4;
2283 		}
2284 		(*dev_max_payload) = max_blk;
2285 
2286 		from_node = IEEE1394_NODE_NUM(target->on_hal->node_id);
2287 		to_node = (target->on_node->node_num);
2288 
2289 		/* Speed is to be filled in from speed map */
2290 		curr_speed = (uint_t)s1394_speed_map_get(target->on_hal,
2291 		    from_node, to_node);
2292 		speed_max_blk = 512 << curr_speed;
2293 		temp = (local_max_blk < max_blk) ? local_max_blk : max_blk;
2294 		(*current_max_payload) = (temp < speed_max_blk) ? temp :
2295 		    speed_max_blk;
2296 	} else {
2297 		/* Set dev_max_payload to local (HAL's) size */
2298 		(*dev_max_payload) = local_max_blk;
2299 		(*current_max_payload) = local_max_blk;
2300 	}
2301 }
2302 
2303 /*
2304  * s1394_cycle_master_capable()
2305  *    is used to determine whether or not the current root node on the
2306  *    1394 bus has its CMC-bit set in it Config ROM.  If not, then it
2307  *    is not capable of being cycle master and a new root node must be
2308  *    selected.
2309  */
2310 static int
2311 s1394_cycle_master_capable(s1394_hal_t *hal)
2312 {
2313 	s1394_node_t	*root;
2314 	int		cycle_master_capable;
2315 	uint_t		hal_node_num;
2316 
2317 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2318 
2319 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2320 
2321 	/* Get a pointer to the root node */
2322 	root = s1394_topology_tree_get_root_node(hal);
2323 
2324 	/* Ignore, if we are already root */
2325 	if (root == &hal->topology_tree[hal_node_num]) {
2326 		return (1);
2327 	}
2328 
2329 	/*
2330 	 * We want to pick a new root if link is off or we don't have
2331 	 * valid config rom
2332 	 */
2333 	if (LINK_ACTIVE(root) == B_FALSE || root->cfgrom == NULL ||
2334 	    CFGROM_BIB_READ(root) == 0) {
2335 
2336 		return (0);
2337 	}
2338 
2339 	/* Check the Cycle Master bit in the Bus Info Block */
2340 	cycle_master_capable = root->cfgrom[IEEE1212_NODE_CAP_QUAD] &
2341 	    IEEE1394_BIB_CMC_MASK;
2342 
2343 	if (cycle_master_capable) {
2344 		return (1);
2345 	} else {
2346 		return (0);
2347 	}
2348 }
2349 
2350 /*
2351  * s1394_do_phy_config_pkt()
2352  *    is called by s1394_do_bus_mgr_processing() to setup and send out
2353  *    a PHY configuration packet onto the 1394 bus.  Depending on the
2354  *    values in IRM_flags, the gap_count and root_holdoff bits on the
2355  *    bus will be affected by this packet.
2356  *
2357  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2358  *    the start of bus reset processing.
2359  */
2360 static int
2361 s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root, int new_gap_cnt,
2362     uint32_t IRM_flags)
2363 {
2364 	cmd1394_cmd_t	 *cmd;
2365 	s1394_cmd_priv_t *s_priv;
2366 	h1394_cmd_priv_t *h_priv;
2367 	uint32_t	 pkt_data = 0;
2368 	uint32_t	 gap_cnt = 0;
2369 	uint32_t	 root = 0;
2370 	int		 ret, result;
2371 	uint_t		 flags = 0;
2372 
2373 	/* Gap count needs to be optimized */
2374 	if (IRM_flags & GAP_COUNT) {
2375 
2376 		pkt_data = pkt_data | IEEE1394_PHY_CONFIG_T_BIT_MASK;
2377 		gap_cnt = ((uint32_t)new_gap_cnt) <<
2378 		    IEEE1394_PHY_CONFIG_GAP_CNT_SHIFT;
2379 		gap_cnt = gap_cnt & IEEE1394_PHY_CONFIG_GAP_CNT_MASK;
2380 		pkt_data = pkt_data | gap_cnt;
2381 
2382 		(void) HAL_CALL(hal).set_gap_count(hal->halinfo.hal_private,
2383 		    (uint_t)new_gap_cnt);
2384 	}
2385 
2386 	/* Root node needs to be changed */
2387 	if (IRM_flags & ROOT_HOLDOFF) {
2388 
2389 		pkt_data = pkt_data | IEEE1394_PHY_CONFIG_R_BIT_MASK;
2390 		root = ((uint32_t)new_root) <<
2391 		    IEEE1394_PHY_CONFIG_ROOT_HOLD_SHIFT;
2392 		root = root & IEEE1394_PHY_CONFIG_ROOT_HOLD_MASK;
2393 		pkt_data = pkt_data | root;
2394 
2395 		(void) HAL_CALL(hal).set_root_holdoff_bit(
2396 		    hal->halinfo.hal_private);
2397 	}
2398 
2399 
2400 	if (IRM_flags) {
2401 		if (s1394_alloc_cmd(hal, flags, &cmd) != DDI_SUCCESS) {
2402 			return (0);
2403 		}
2404 
2405 		if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2406 			/* lock tree failure indicates a bus gen change */
2407 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2408 			return (1);
2409 		}
2410 
2411 		/* Setup the callback routine */
2412 		cmd->completion_callback  = s1394_phy_config_callback;
2413 		cmd->cmd_callback_arg	  = (void *)(uintptr_t)IRM_flags;
2414 		cmd->bus_generation	  = hal->generation_count;
2415 		cmd->cmd_options	  = CMD1394_OVERRIDE_ADDR;
2416 		cmd->cmd_type		  = CMD1394_ASYNCH_WR_QUAD;
2417 		cmd->cmd_u.q.quadlet_data = pkt_data;
2418 
2419 		/* Get the Services Layer private area */
2420 		s_priv = S1394_GET_CMD_PRIV(cmd);
2421 
2422 		/* Get a pointer to the HAL private struct */
2423 		h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
2424 
2425 		s_priv->sent_by_target	= (s1394_target_t *)NULL;
2426 		s_priv->sent_on_hal	= (s1394_hal_t *)hal;
2427 
2428 		h_priv->bus_generation	= cmd->bus_generation;
2429 
2430 		/* Speed must be IEEE1394_S100 on PHY config packets */
2431 		s_priv->hal_cmd_private.speed = IEEE1394_S100;
2432 
2433 		/* Mark command as being used */
2434 		s_priv->cmd_in_use = B_TRUE;
2435 
2436 		s1394_unlock_tree(hal);
2437 
2438 		/* Put command on the HAL's outstanding request Q */
2439 		s1394_insert_q_asynch_cmd(hal, cmd);
2440 
2441 		ret = HAL_CALL(hal).send_phy_configuration_packet(
2442 		    hal->halinfo.hal_private, (cmd1394_cmd_t *)cmd,
2443 		    (h1394_cmd_priv_t *)&s_priv->hal_cmd_private, &result);
2444 
2445 		if (ret != DDI_SUCCESS) {
2446 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2447 
2448 			return (0);
2449 
2450 		} else {
2451 			/*
2452 			 * There will be a bus reset only if GAP_COUNT changed
2453 			 */
2454 			if (IRM_flags & GAP_COUNT) {
2455 				return (1);
2456 			}
2457 		}
2458 	}
2459 
2460 	return (0);
2461 }
2462 
2463 /*
2464  * s1394_phy_config_callback()
2465  *    is the callback called after the PHY configuration packet has been
2466  *    sent out onto the 1394 bus.  Depending on the values in IRM_flags,
2467  *    (specifically if the gap_count has been changed) this routine may
2468  *    initiate a bus reset.
2469  */
2470 static void
2471 s1394_phy_config_callback(cmd1394_cmd_t *cmd)
2472 {
2473 	s1394_cmd_priv_t *s_priv;
2474 	s1394_hal_t *hal;
2475 	uint32_t IRM_flags;
2476 
2477 	/* Get the Services Layer private area */
2478 	s_priv = S1394_GET_CMD_PRIV(cmd);
2479 
2480 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
2481 
2482 	IRM_flags = (uint32_t)(uintptr_t)cmd->cmd_callback_arg;
2483 
2484 	if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
2485 		(void) s1394_free_cmd(hal, &cmd);
2486 	} else {
2487 		(void) s1394_free_cmd(hal, &cmd);
2488 
2489 		/* Only need a bus reset if we changed GAP_COUNT */
2490 		if (IRM_flags & GAP_COUNT) {
2491 			s1394_initiate_hal_reset(hal, NON_CRITICAL);
2492 		}
2493 	}
2494 }
2495 
2496 /*
2497  * s1394_lock_tree()
2498  *    Attempts to lock the topology tree. Returns DDI_FAILURE if generations
2499  *    changed or if the services layer signals the bus reset thread to go
2500  *    away. Otherwise, returns DDI_SUCCESS.
2501  */
2502 int
2503 s1394_lock_tree(s1394_hal_t *hal)
2504 {
2505 	int	circular;
2506 
2507 	ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
2508 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2509 
2510 	mutex_enter(&hal->br_thread_mutex);
2511 	ndi_devi_enter(hal->halinfo.dip, &circular);
2512 	mutex_enter(&hal->topology_tree_mutex);
2513 
2514 	if ((hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
2515 		mutex_exit(&hal->br_thread_mutex);
2516 		mutex_exit(&hal->topology_tree_mutex);
2517 		ndi_devi_exit(hal->halinfo.dip, circular);
2518 		return (DDI_FAILURE);
2519 	} else if (hal->br_cfgrom_read_gen != hal->generation_count) {
2520 		mutex_exit(&hal->br_thread_mutex);
2521 		mutex_exit(&hal->topology_tree_mutex);
2522 		ndi_devi_exit(hal->halinfo.dip, circular);
2523 		return (DDI_FAILURE);
2524 	}
2525 
2526 	mutex_exit(&hal->br_thread_mutex);
2527 
2528 	return (DDI_SUCCESS);
2529 }
2530 
2531 /*
2532  * s1394_unlock_tree()
2533  *    Unlocks the topology tree
2534  */
2535 void
2536 s1394_unlock_tree(s1394_hal_t *hal)
2537 {
2538 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2539 	mutex_exit(&hal->topology_tree_mutex);
2540 	ndi_devi_exit(hal->halinfo.dip, 0);
2541 }
2542 
2543 /*
2544  * s1394_calc_next_quad()
2545  *    figures out the next quadlet to read. This maintains a stack of
2546  *    directories in the node. When the first quad of a directory (the
2547  *    first directory would be the root directory) is read, it is pushed on
2548  *    the this stack. When the directory is all read, it scans the directory
2549  *    looking for indirect entries. If any indirect directory entry is found,
2550  *    it is pushed on stack and that directory is read. If we are done dealing
2551  *    with all entries in the current dir, the directory is popped off the
2552  *    stack. If the stack is empty, we are back at the root directory level
2553  *    and essentially read the entire directory hierarchy.
2554  *    Returns 0 is more quads to read, else returns non-zero.
2555  */
2556 static int
2557 s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node, uint32_t quadlet,
2558     uint32_t *nextquadp)
2559 {
2560 	uint32_t data, type, key, value, *ptr;
2561 
2562 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2563 
2564 	if (((quadlet + 1) >= node->cfgrom_size) ||
2565 	    (CFGROM_SIZE_IS_CRCSIZE(node) == B_TRUE && (quadlet + 1) >=
2566 		node->cfgrom_valid_size)) {
2567 		return (1);
2568 	}
2569 
2570 	if (s1394_turn_off_dir_stack != 0 || CFGROM_DIR_STACK_OFF(node) ==
2571 	    B_TRUE) {
2572 		quadlet++;
2573 		*nextquadp = quadlet;
2574 		return (0);
2575 	}
2576 
2577 	data = node->cfgrom[quadlet];
2578 
2579 	if (quadlet == IEEE1212_ROOT_DIR_QUAD) {
2580 		node->dir_stack_top = -1;
2581 		node->expected_dir_quad = quadlet;
2582 		node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2583 	}
2584 
2585 	CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2586 
2587 	/*
2588 	 * check to make sure we are looking at a dir. If the config rom
2589 	 * is broken, then revert to normal scanning of the config rom
2590 	 */
2591 	if (node->expected_dir_quad == quadlet) {
2592 		if (type != 0 || key != 0) {
2593 			SET_CFGROM_DIR_STACK_OFF(node);
2594 			quadlet = IEEE1212_ROOT_DIR_QUAD;
2595 		} else {
2596 			node->cur_dir_start = quadlet;
2597 			node->cur_dir_size = IEEE1212_DIR_LEN(data);
2598 			node->expected_dir_quad = 0;
2599 			/* get the next quad */
2600 			quadlet++;
2601 		}
2602 	} else {
2603 		/*
2604 		 * If we read all quads in cur dir and the cur dir is not
2605 		 * a leaf, scan for offsets (if the directory's CRC checks
2606 		 * out OK). If we have a directory or a leaf, we save the
2607 		 * current location on the stack and start reading that
2608 		 * directory. So, we will end up with a depth first read of
2609 		 * the entire config rom. If we are done with the current
2610 		 * directory, pop it off the stack and continue the scanning
2611 		 * as appropriate.
2612 		 */
2613 		if (quadlet == node->cur_dir_start + node->cur_dir_size) {
2614 
2615 			int i, top;
2616 			boolean_t done_with_cur_dir = B_FALSE;
2617 
2618 			if (node->expected_type == IEEE1212_LEAF_TYPE) {
2619 				node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2620 				done_with_cur_dir = B_TRUE;
2621 				goto donewithcurdir;
2622 			}
2623 
2624 			ptr = &node->cfgrom[node->cur_dir_start];
2625 			CFGROM_TYPE_KEY_VALUE(*ptr, type, key, value);
2626 
2627 			/*
2628 			 * If CRC for this directory is invalid, turn off
2629 			 * dir stack and start re-reading from root dir.
2630 			 * This wastes the work done thus far, but CRC
2631 			 * errors in directories should be rather rare.
2632 			 * if s1394_crcsz_is_cfgsz is set, then set
2633 			 * cfgrom_valid_size to the len specfied as crc len
2634 			 * in quadlet 0.
2635 			 */
2636 			if (s1394_valid_dir(hal, node, key, ptr) == B_FALSE) {
2637 				SET_CFGROM_DIR_STACK_OFF(node);
2638 				if (s1394_crcsz_is_cfgsz != 0) {
2639 					SET_CFGROM_SIZE_IS_CRCSIZE(node);
2640 					node->cfgrom_valid_size =
2641 					    ((node->cfgrom[0] >>
2642 					    IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
2643 					    IEEE1394_CFG_ROM_CRC_LEN_MASK);
2644 				}
2645 				*nextquadp = IEEE1212_ROOT_DIR_QUAD;
2646 				return (0);
2647 			}
2648 			i = node->cur_dir_start + 1;
2649 		rescan:
2650 			for (done_with_cur_dir = B_FALSE; i <=
2651 			    node->cur_dir_start + node->cur_dir_size; i++) {
2652 				data = node->cfgrom[i];
2653 				CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2654 				/* read leaf type and directory types only */
2655 				if (type == IEEE1212_LEAF_TYPE || type ==
2656 				    IEEE1212_DIRECTORY_TYPE) {
2657 
2658 					/*
2659 					 * push current dir on stack; if the
2660 					 * stack is overflowing, ie, too many
2661 					 * directory level nestings, turn off
2662 					 * dir stack and fall back to serial
2663 					 * scanning, starting at root dir. This
2664 					 * wastes all the work we have done
2665 					 * thus far, but more than 16 levels
2666 					 * of directories is rather odd...
2667 					 */
2668 					top = ++node->dir_stack_top;
2669 					if (top == S1394_DIR_STACK_SIZE) {
2670 						SET_CFGROM_DIR_STACK_OFF(node);
2671 						*nextquadp =
2672 						    IEEE1212_ROOT_DIR_QUAD;
2673 						return (0);
2674 					}
2675 
2676 					node->dir_stack[top].dir_start =
2677 					    node->cur_dir_start;
2678 					node->dir_stack[top].dir_size =
2679 					    node->cur_dir_size;
2680 					node->dir_stack[top].dir_next_quad =
2681 					    i + 1;
2682 					/* and set the next quadlet to read */
2683 					quadlet = i + value;
2684 					node->expected_dir_quad = quadlet;
2685 					node->expected_type = type;
2686 					break;
2687 				}
2688 			}
2689 
2690 		donewithcurdir:
2691 
2692 			if ((i > node->cur_dir_start + node->cur_dir_size) ||
2693 				done_with_cur_dir == B_TRUE) {
2694 
2695 				/*
2696 				 * all done with cur dir; pop it off the stack
2697 				 */
2698 				if (node->dir_stack_top >= 0) {
2699 					top = node->dir_stack_top--;
2700 					node->cur_dir_start =
2701 					    node->dir_stack[top].dir_start;
2702 					node->cur_dir_size =
2703 					    node->dir_stack[top].dir_size;
2704 					i = node->dir_stack[top].dir_next_quad;
2705 					goto rescan;
2706 				} else {
2707 					/*
2708 					 * if empty stack, we are at the top
2709 					 * level; declare done.
2710 					 */
2711 					return (1);
2712 				}
2713 			}
2714 		} else {
2715 			/* get the next quadlet */
2716 			quadlet++;
2717 		}
2718 	}
2719 	*nextquadp = quadlet;
2720 
2721 	return (0);
2722 }
2723