xref: /illumos-gate/usr/src/uts/common/io/1394/h1394.c (revision 012e6ce759c490003aed29439cc47d3d73a99ad3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * h1394.c
29  *    1394 Services Layer HAL Interface
30  *    Contains all of the routines that define the HAL to Services Layer
31  *    interface
32  */
33 
34 #include <sys/conf.h>
35 #include <sys/ddi.h>
36 #include <sys/sunddi.h>
37 #include <sys/modctl.h>
38 #include <sys/sunndi.h>
39 #include <sys/cmn_err.h>
40 #include <sys/types.h>
41 #include <sys/kmem.h>
42 #include <sys/thread.h>
43 #include <sys/proc.h>
44 #include <sys/disp.h>
45 #include <sys/time.h>
46 #include <sys/devctl.h>
47 #include <sys/1394/t1394.h>
48 #include <sys/1394/s1394.h>
49 #include <sys/1394/h1394.h>
50 #include <sys/1394/ieee1394.h>
51 
52 
53 extern struct bus_ops nx1394_busops;
54 extern int nx1394_define_events(s1394_hal_t *hal);
55 extern void nx1394_undefine_events(s1394_hal_t *hal);
56 extern int s1394_ignore_invalid_gap_cnt;
57 
58 /*
59  * Function:    h1394_init()
60  * Input(s):    modlp			The structure containing all of the
61  *					    HAL's relevant information
62  *
63  * Output(s):
64  *
65  * Description:	h1394_init() is called by the HAL's _init function and is
66  *		used to set up the nexus bus ops.
67  */
68 int
69 h1394_init(struct modlinkage *modlp)
70 {
71 	struct dev_ops	*devops;
72 
73 	devops = ((struct modldrv *)(modlp->ml_linkage[0]))->drv_dev_ops;
74 	devops->devo_bus_ops = &nx1394_busops;
75 
76 	return (0);
77 }
78 
79 /*
80  * Function:    h1394_fini()
81  * Input(s):    modlp			The structure containing all of the
82  *					    HAL's relevant information
83  *
84  * Output(s):
85  *
86  * Description:	h1394_fini() is called by the HAL's _fini function and is
87  *		used to NULL out the nexus bus ops.
88  */
89 void
90 h1394_fini(struct modlinkage *modlp)
91 {
92 	struct dev_ops	*devops;
93 
94 	devops = ((struct modldrv *)(modlp->ml_linkage[0]))->drv_dev_ops;
95 	devops->devo_bus_ops = NULL;
96 }
97 
98 /*
99  * Function:    h1394_attach()
100  * Input(s):    halinfo			The structure containing all of the
101  *					    HAL's relevant information
102  *		cmd			The ddi_attach_cmd_t that tells us
103  *					    if this is a RESUME or a regular
104  *					    attach() call
105  *
106  * Output(s):	sl_private		The HAL "handle" to be used for
107  *					    all subsequent calls into the
108  *					    1394 Software Framework
109  *
110  * Description:	h1394_attach() registers the HAL with the 1394 Software
111  *		Framework.  It returns a HAL "handle" to be used for
112  *		all subsequent calls into the 1394 Software Framework.
113  */
114 int
115 h1394_attach(h1394_halinfo_t *halinfo, ddi_attach_cmd_t cmd, void **sl_private)
116 {
117 	s1394_hal_t	*hal;
118 	int		ret;
119 	char		buf[32];
120 	uint_t		cmd_size;
121 
122 	ASSERT(sl_private != NULL);
123 
124 	/* If this is a DDI_RESUME, return success */
125 	if (cmd == DDI_RESUME) {
126 		hal = (s1394_hal_t *)(*sl_private);
127 		/* If we have a 1394A PHY, then reset the "contender bit" */
128 		if (hal->halinfo.phy == H1394_PHY_1394A)
129 			(void) HAL_CALL(hal).set_contender_bit(
130 			    hal->halinfo.hal_private);
131 		return (DDI_SUCCESS);
132 	} else if (cmd != DDI_ATTACH) {
133 		return (DDI_FAILURE);
134 	}
135 
136 	/* Allocate space for s1394_hal_t */
137 	hal = kmem_zalloc(sizeof (s1394_hal_t), KM_SLEEP);
138 
139 	/* Setup HAL state */
140 	hal->hal_state = S1394_HAL_INIT;
141 
142 	/* Copy in the halinfo struct */
143 	hal->halinfo = *halinfo;
144 
145 	/* Create the topology tree mutex */
146 	mutex_init(&hal->topology_tree_mutex, NULL, MUTEX_DRIVER,
147 	    hal->halinfo.hw_interrupt);
148 
149 	/* Create the Cycle Mater timer mutex */
150 	mutex_init(&hal->cm_timer_mutex, NULL, MUTEX_DRIVER,
151 	    hal->halinfo.hw_interrupt);
152 
153 	/* Initialize the Isoch CEC list */
154 	hal->isoch_cec_list_head = NULL;
155 	hal->isoch_cec_list_tail = NULL;
156 	mutex_init(&hal->isoch_cec_list_mutex, NULL, MUTEX_DRIVER,
157 	    hal->halinfo.hw_interrupt);
158 
159 	/* Initialize the Bus Manager node ID mutex and cv */
160 	mutex_init(&hal->bus_mgr_node_mutex, NULL, MUTEX_DRIVER,
161 	    hal->halinfo.hw_interrupt);
162 	cv_init(&hal->bus_mgr_node_cv, NULL, CV_DRIVER,
163 	    hal->halinfo.hw_interrupt);
164 
165 	/* Initialize the Bus Manager node ID - "-1" means undetermined */
166 	hal->bus_mgr_node	= -1;
167 	hal->incumbent_bus_mgr	= B_FALSE;
168 
169 	/* Initialize the Target list */
170 	hal->target_head = NULL;
171 	hal->target_tail = NULL;
172 	rw_init(&hal->target_list_rwlock, NULL, RW_DRIVER,
173 	    hal->halinfo.hw_interrupt);
174 
175 	/* Setup Request Q's */
176 	hal->outstanding_q_head	= NULL;
177 	hal->outstanding_q_tail	= NULL;
178 	mutex_init(&hal->outstanding_q_mutex, NULL, MUTEX_DRIVER,
179 	    hal->halinfo.hw_interrupt);
180 	hal->pending_q_head	= NULL;
181 	hal->pending_q_tail	= NULL;
182 	mutex_init(&hal->pending_q_mutex, NULL, MUTEX_DRIVER,
183 	    hal->halinfo.hw_interrupt);
184 
185 	/* Create the kmem_cache for command allocations */
186 	(void) sprintf(buf, "hal%d_cache", ddi_get_instance(hal->halinfo.dip));
187 	cmd_size = sizeof (cmd1394_cmd_t) + sizeof (s1394_cmd_priv_t) +
188 	    hal->halinfo.hal_overhead;
189 
190 	hal->hal_kmem_cachep = kmem_cache_create(buf, cmd_size, 8, NULL, NULL,
191 	    NULL, NULL, NULL, 0);
192 
193 	/* Setup the event stuff */
194 	ret = nx1394_define_events(hal);
195 	if (ret != DDI_SUCCESS) {
196 		/* Clean up before leaving */
197 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL0);
198 
199 		return (DDI_FAILURE);
200 	}
201 
202 	/* Initialize the mutexes and cv's used by the bus reset thread */
203 	mutex_init(&hal->br_thread_mutex, NULL, MUTEX_DRIVER,
204 	    hal->halinfo.hw_interrupt);
205 	cv_init(&hal->br_thread_cv, NULL, CV_DRIVER, hal->halinfo.hw_interrupt);
206 	mutex_init(&hal->br_cmplq_mutex, NULL, MUTEX_DRIVER,
207 	    hal->halinfo.hw_interrupt);
208 	cv_init(&hal->br_cmplq_cv, NULL, CV_DRIVER, hal->halinfo.hw_interrupt);
209 
210 	/*
211 	 * Create a bus reset thread to handle the device discovery.
212 	 *    It should take the default stack sizes, it should run
213 	 *    the s1394_br_thread() routine at the start, passing the
214 	 *    HAL pointer as its argument.  The thread should be put
215 	 *    on processor p0, its state should be set to runnable,
216 	 *    but not yet on a processor, and its scheduling priority
217 	 *    should be the minimum level of any system class.
218 	 */
219 	hal->br_thread = thread_create((caddr_t)NULL, 0, s1394_br_thread,
220 	    hal, 0, &p0, TS_RUN, minclsyspri);
221 
222 	/* Until we see a bus reset this HAL has no nodes */
223 	hal->number_of_nodes = 0;
224 	hal->num_bus_reset_till_fail = NUM_BR_FAIL;
225 
226 	/* Initialize the SelfID Info */
227 	hal->current_buffer = 0;
228 	hal->selfid_buf0 = kmem_zalloc(S1394_SELFID_BUF_SIZE, KM_SLEEP);
229 	hal->selfid_buf1 = kmem_zalloc(S1394_SELFID_BUF_SIZE, KM_SLEEP);
230 
231 	/* Initialize kstat structures */
232 	ret = s1394_kstat_init(hal);
233 	if (ret != DDI_SUCCESS) {
234 		/* Clean up before leaving */
235 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL3);
236 
237 		return (DDI_FAILURE);
238 	}
239 	hal->hal_kstats->guid = hal->halinfo.guid;
240 
241 	/* Setup the node tree pointers */
242 	hal->old_tree	   = &hal->last_valid_tree[0];
243 	hal->topology_tree = &hal->current_tree[0];
244 
245 	/* Initialize the local Config ROM entry */
246 	ret = s1394_init_local_config_rom(hal);
247 	if (ret != DDI_SUCCESS) {
248 		/* Clean up before leaving */
249 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL4);
250 
251 		return (DDI_FAILURE);
252 	}
253 
254 	/* Initialize 1394 Address Space */
255 	ret = s1394_init_addr_space(hal);
256 	if (ret != DDI_SUCCESS) {
257 		/* Clean up before leaving */
258 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL5);
259 
260 		return (DDI_FAILURE);
261 	}
262 
263 	/* Initialize FCP subsystem */
264 	ret = s1394_fcp_hal_init(hal);
265 	if (ret != DDI_SUCCESS) {
266 		/* Clean up before leaving */
267 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL6);
268 
269 		return (DDI_FAILURE);
270 	}
271 
272 	/* Initialize the IRM node ID - "-1" means invalid, undetermined */
273 	hal->IRM_node = -1;
274 
275 	/* If we have a 1394A PHY, then set the "contender bit" */
276 	if (hal->halinfo.phy == H1394_PHY_1394A)
277 		(void) HAL_CALL(hal).set_contender_bit(
278 		    hal->halinfo.hal_private);
279 
280 	/* Add into linked list */
281 	mutex_enter(&s1394_statep->hal_list_mutex);
282 	if ((s1394_statep->hal_head == NULL) &&
283 	    (s1394_statep->hal_tail == NULL)) {
284 		s1394_statep->hal_head = hal;
285 		s1394_statep->hal_tail = hal;
286 	} else {
287 		s1394_statep->hal_tail->hal_next = hal;
288 		hal->hal_prev = s1394_statep->hal_tail;
289 		s1394_statep->hal_tail = hal;
290 	}
291 	mutex_exit(&s1394_statep->hal_list_mutex);
292 
293 	/* Fill in services layer private info */
294 	*sl_private = (void *)hal;
295 
296 	return (DDI_SUCCESS);
297 }
298 
299 /*
300  * Function:    h1394_detach()
301  * Input(s):    sl_private		The HAL "handle" returned by
302  *					    h1394_attach()
303  *		cmd			The ddi_detach_cmd_t that tells us
304  *					    if this is a SUSPEND or a regular
305  *					    detach() call
306  *
307  * Output(s):	DDI_SUCCESS		HAL successfully detached
308  *		DDI_FAILURE		HAL failed to detach
309  *
310  * Description:	h1394_detach() unregisters the HAL from the 1394 Software
311  *		Framework.  It can be called during a SUSPEND operation or
312  *		for a real detach() event.
313  */
314 int
315 h1394_detach(void **sl_private, ddi_detach_cmd_t cmd)
316 {
317 	s1394_hal_t	*hal;
318 
319 	hal = (s1394_hal_t *)(*sl_private);
320 
321 	switch (cmd) {
322 	case DDI_DETACH:
323 		/* Clean up before leaving */
324 		s1394_cleanup_for_detach(hal, H1394_CLEANUP_LEVEL7);
325 		/* NULL out the HAL "handle" */
326 		*sl_private = NULL;
327 		break;
328 
329 	case DDI_SUSPEND:
330 		/* Turn off any timers that might be set */
331 		s1394_destroy_timers(hal);
332 		/* Set the hal_was_suspended bit */
333 		hal->hal_was_suspended = B_TRUE;
334 		break;
335 
336 	default:
337 		return (DDI_FAILURE);
338 	}
339 
340 	return (DDI_SUCCESS);
341 }
342 
343 /*
344  * Function:    h1394_alloc_cmd()
345  * Input(s):    sl_private		The HAL "handle" returned by
346  *					    h1394_attach()
347  *		flags			The flags parameter is described below
348  *
349  * Output(s):	cmdp			Pointer to the newly allocated command
350  *		hal_priv_ptr		Offset into the command, points to
351  *					    the HAL's private area
352  *
353  * Description:	h1394_alloc_cmd() allocates a command for use with the
354  *		h1394_read_request(), h1394_write_request(), or
355  *		h1394_lock_request() interfaces of the 1394 Software Framework.
356  *		By default, h1394_alloc_cmd() may sleep while allocating
357  *		memory for the command structure.  If this is undesirable,
358  *		the HAL may set the H1394_ALLOC_CMD_NOSLEEP bit in the flags
359  *		parameter.
360  */
361 int
362 h1394_alloc_cmd(void *sl_private, uint_t flags, cmd1394_cmd_t **cmdp,
363     h1394_cmd_priv_t **hal_priv_ptr)
364 {
365 	s1394_hal_t	 *hal;
366 	s1394_cmd_priv_t *s_priv;
367 
368 	hal = (s1394_hal_t *)sl_private;
369 
370 	if (s1394_alloc_cmd(hal, flags, cmdp) != DDI_SUCCESS) {
371 		return (DDI_FAILURE);
372 	}
373 
374 	/* Get the Services Layer private area */
375 	s_priv = S1394_GET_CMD_PRIV(*cmdp);
376 
377 	*hal_priv_ptr = &s_priv->hal_cmd_private;
378 
379 	return (DDI_SUCCESS);
380 }
381 
382 /*
383  * Function:    h1394_free_cmd()
384  * Input(s):    sl_private		The HAL "handle" returned by
385  *					    h1394_attach()
386  *		cmdp			Pointer to the command to be freed
387  *
388  * Output(s):	DDI_SUCCESS		HAL successfully freed command
389  *		DDI_FAILURE		HAL failed to free command
390  *
391  * Description:	h1394_free_cmd() attempts to free a command that has previously
392  *		been allocated by the HAL.  It is possible for h1394_free_cmd()
393  *		to fail because the command is currently in-use by the 1394
394  *		Software Framework.
395  */
396 int
397 h1394_free_cmd(void *sl_private, cmd1394_cmd_t **cmdp)
398 {
399 	s1394_hal_t	 *hal;
400 	s1394_cmd_priv_t *s_priv;
401 
402 	hal = (s1394_hal_t *)sl_private;
403 
404 	/* Get the Services Layer private area */
405 	s_priv = S1394_GET_CMD_PRIV(*cmdp);
406 
407 	/* Check that command isn't in use */
408 	if (s_priv->cmd_in_use == B_TRUE) {
409 		ASSERT(s_priv->cmd_in_use == B_FALSE);
410 		return (DDI_FAILURE);
411 	}
412 
413 	kmem_cache_free(hal->hal_kmem_cachep, *cmdp);
414 
415 	/* Command pointer is set to NULL before returning */
416 	*cmdp = NULL;
417 
418 	/* kstats - number of cmds freed */
419 	hal->hal_kstats->cmd_free++;
420 
421 	return (DDI_SUCCESS);
422 }
423 
424 /*
425  * Function:    h1394_cmd_is_complete()
426  * Input(s):    sl_private		The HAL "handle" returned by
427  *					    h1394_attach()
428  *		command_id		Pointer to the command that has
429  *					    just completed
430  *		cmd_type		AT_RESP => AT response or ATREQ =
431  *					    AT request
432  *		status			Command's completion status
433  *
434  * Output(s):	None
435  *
436  * Description:	h1394_cmd_is_complete() is called by the HAL whenever an
437  *		outstanding command has completed (successfully or otherwise).
438  *		After determining whether it was an AT request or and AT
439  *		response that we are handling, the command is dispatched to
440  *		the appropriate handler in the 1394 Software Framework.
441  */
442 void
443 h1394_cmd_is_complete(void *sl_private, cmd1394_cmd_t *command_id,
444     uint32_t cmd_type, int status)
445 {
446 	s1394_hal_t	*hal;
447 	dev_info_t	*dip;
448 
449 	hal = (s1394_hal_t *)sl_private;
450 
451 	/* Is it AT_RESP or AT_REQ? */
452 	switch (cmd_type) {
453 	case H1394_AT_REQ:
454 		s1394_atreq_cmd_complete(hal, command_id, status);
455 		break;
456 
457 	case H1394_AT_RESP:
458 		s1394_atresp_cmd_complete(hal, command_id, status);
459 		break;
460 
461 	default:
462 		dip = hal->halinfo.dip;
463 
464 		/* An unexpected error in the HAL */
465 		cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
466 		    ddi_node_name(dip), ddi_get_instance(dip));
467 
468 		/* Disable the HAL */
469 		s1394_hal_shutdown(hal, B_TRUE);
470 
471 		break;
472 	}
473 }
474 
475 /*
476  * Function:    h1394_bus_reset()
477  * Input(s):    sl_private		The HAL "handle" returned by
478  *					    h1394_attach()
479  *
480  * Output(s):	selfid_buf_addr		The pointer to a buffer into which
481  *					    any Self ID packets should be put
482  *
483  * Description:	h1394_bus_reset() is called whenever a 1394 bus reset event
484  *		is detected by the HAL.  This routine simply prepares for
485  *		the subsequent Self ID packets.
486  */
487 void
488 h1394_bus_reset(void *sl_private, void **selfid_buf_addr)
489 {
490 	s1394_hal_t	*hal;
491 
492 	hal = (s1394_hal_t *)sl_private;
493 
494 	mutex_enter(&hal->topology_tree_mutex);
495 
496 	/* Update the HAL's state */
497 	if (hal->hal_state != S1394_HAL_SHUTDOWN) {
498 		hal->hal_state = S1394_HAL_RESET;
499 	} else {
500 		mutex_exit(&hal->topology_tree_mutex);
501 		return;
502 	}
503 
504 	if (hal->initiated_bus_reset == B_TRUE) {
505 		hal->initiated_bus_reset = B_FALSE;
506 		if (hal->num_bus_reset_till_fail > 0) {
507 			hal->num_bus_reset_till_fail--;
508 		}
509 	} else {
510 		hal->num_bus_reset_till_fail = NUM_BR_FAIL;
511 	}
512 
513 	/* Reset the IRM node ID */
514 	hal->IRM_node = -1;
515 
516 	/* Slowest node defaults to IEEE1394_S400 */
517 	hal->slowest_node_speed = IEEE1394_S400;
518 
519 	/* Pick a SelfID buffer to give */
520 	if (hal->current_buffer == 0) {
521 		*selfid_buf_addr = (void *)hal->selfid_buf1;
522 		hal->current_buffer = 1;
523 	} else {
524 		*selfid_buf_addr = (void *)hal->selfid_buf0;
525 		hal->current_buffer = 0;
526 	}
527 
528 	/* Disable the CSR topology_map (temporarily) */
529 	s1394_CSR_topology_map_disable(hal);
530 
531 	mutex_exit(&hal->topology_tree_mutex);
532 
533 	/* Reset the Bus Manager node ID */
534 	mutex_enter(&hal->bus_mgr_node_mutex);
535 	hal->bus_mgr_node = -1;
536 	mutex_exit(&hal->bus_mgr_node_mutex);
537 }
538 
539 /*
540  * Function:    h1394_self_ids()
541  * Input(s):    sl_private		The HAL "handle" returned by
542  *					    h1394_attach()
543  *		selfid_buf_addr		Pointer to the Self ID buffer
544  *		selfid_size		The size of the filled part of the
545  *					    Self ID buffer
546  *		node_id			The local (host) node ID for the
547  *					    current generation
548  *		generation_count	The current generation number
549  *
550  * Output(s):	None
551  *
552  * Description:	h1394_self_ids() does alot of the work at bus reset.  It
553  *		takes the Self ID packets and parses them, builds a topology
554  *		tree representation of them, calculates gap count, IRM, speed
555  *		map, does any node matching that's possible, and then wakes
556  *		up the br_thread.
557  */
558 void
559 h1394_self_ids(void *sl_private, void *selfid_buf_addr, uint32_t selfid_size,
560     uint32_t node_id, uint32_t generation_count)
561 {
562 	s1394_hal_t	*hal;
563 	int		diameter;
564 	uint_t		gen_diff, gen_rollover;
565 	boolean_t	tree_copied = B_FALSE;
566 	ushort_t	saved_number_of_nodes;
567 
568 	/*
569 	 * NOTE: current topology tree is referred to as topology_tree
570 	 * and the old topology tree is referred to as old_tree.
571 	 * tree_valid indicates selfID buffer checked out OK and we were
572 	 * able to build the topology tree.
573 	 * tree_processed indicates we read the config ROMs as needed.
574 	 */
575 	hal = (s1394_hal_t *)sl_private;
576 
577 	/* Lock the topology tree */
578 	mutex_enter(&hal->topology_tree_mutex);
579 	if (hal->hal_state == S1394_HAL_SHUTDOWN) {
580 		mutex_exit(&hal->topology_tree_mutex);
581 		return;
582 	}
583 
584 	/* kstats - number of selfid completes */
585 	hal->hal_kstats->selfid_complete++;
586 
587 	if (generation_count > hal->generation_count) {
588 		gen_diff = generation_count - hal->generation_count;
589 		hal->hal_kstats->bus_reset += gen_diff;
590 	} else {
591 		gen_diff = hal->generation_count - generation_count;
592 		/* Use max_generation to determine how many bus resets */
593 		hal->hal_kstats->bus_reset +=
594 		    (hal->halinfo.max_generation - gen_diff);
595 	}
596 
597 	/*
598 	 * If the current tree has a valid topology tree (selfids
599 	 * checked out OK etc) and config roms read as needed,
600 	 * then make it the old tree before building a new one.
601 	 */
602 	if ((hal->topology_tree_valid == B_TRUE) &&
603 	    (hal->topology_tree_processed == B_TRUE)) {
604 		/* Trees are switched after the copy completes */
605 		s1394_copy_old_tree(hal);
606 		tree_copied = B_TRUE;
607 	}
608 
609 	/* Set the new generation and node id */
610 	hal->node_id = node_id;
611 	hal->generation_count = generation_count;
612 
613 	/* Invalidate the current topology tree */
614 	hal->topology_tree_valid = B_FALSE;
615 	hal->topology_tree_processed = B_FALSE;
616 	hal->cfgroms_being_read = 0;
617 
618 	/*
619 	 * Save the number of nodes prior to parsing the self id buffer.
620 	 * We need this saved value while initializing the topology tree
621 	 * (for non-copy case).
622 	 */
623 	saved_number_of_nodes = hal->number_of_nodes;
624 
625 	/* Parse the SelfID buffer */
626 	if (s1394_parse_selfid_buffer(hal, selfid_buf_addr, selfid_size) !=
627 	    DDI_SUCCESS) {
628 		/* Unlock the topology tree */
629 		mutex_exit(&hal->topology_tree_mutex);
630 
631 		/* kstats - SelfID buffer error */
632 		hal->hal_kstats->selfid_buffer_error++;
633 		return;		/* Error parsing SelfIDs */
634 	}
635 
636 	/* Sort the SelfID packets by node number (if it's a 1995 PHY) */
637 	if (hal->halinfo.phy == H1394_PHY_1995) {
638 		s1394_sort_selfids(hal);
639 	}
640 
641 	/*
642 	 * Update the cycle master timer - if the timer is set and
643 	 * we were the root but we are not anymore, then disable it.
644 	 */
645 	mutex_enter(&hal->cm_timer_mutex);
646 	if ((hal->cm_timer_set == B_TRUE) &&
647 	    ((hal->old_number_of_nodes - 1) ==
648 		IEEE1394_NODE_NUM(hal->old_node_id)) &&
649 	    ((hal->number_of_nodes - 1) !=
650 		IEEE1394_NODE_NUM(hal->node_id))) {
651 		mutex_exit(&hal->cm_timer_mutex);
652 		(void) untimeout(hal->cm_timer);
653 	} else {
654 		mutex_exit(&hal->cm_timer_mutex);
655 	}
656 
657 	s1394_init_topology_tree(hal, tree_copied, saved_number_of_nodes);
658 
659 	/* Determine the 1394 bus gap count */
660 	hal->gap_count = s1394_get_current_gap_count(hal);
661 	/* If gap counts are inconsistent, reset */
662 	if (hal->gap_count == -1) {
663 		/* Unlock the topology tree */
664 		mutex_exit(&hal->topology_tree_mutex);
665 
666 		/* kstats - SelfID buffer error (invalid gap counts) */
667 		hal->hal_kstats->selfid_buffer_error++;
668 
669 		if (s1394_ignore_invalid_gap_cnt == 1) {
670 			/* Lock the topology tree again */
671 			mutex_enter(&hal->topology_tree_mutex);
672 			hal->gap_count = 0x3F;
673 		} else {
674 			return;	/* Invalid gap counts in SelfID buffer */
675 		}
676 	}
677 
678 	/* Determine the Isoch Resource Manager */
679 	hal->IRM_node = s1394_get_isoch_rsrc_mgr(hal);
680 
681 	/* Build the topology tree */
682 	if (s1394_topology_tree_build(hal) != DDI_SUCCESS) {
683 		/* Unlock the topology tree */
684 		mutex_exit(&hal->topology_tree_mutex);
685 
686 		/* kstats - SelfID buffer error (Invalid topology tree) */
687 		hal->hal_kstats->selfid_buffer_error++;
688 		return;		/* Error building topology tree from SelfIDs */
689 	}
690 
691 	/* Update the CSR topology_map */
692 	s1394_CSR_topology_map_update(hal);
693 
694 	/* Calculate the diameter */
695 	diameter = s1394_topology_tree_calculate_diameter(hal);
696 
697 	/* Determine the optimum gap count */
698 	hal->optimum_gap_count = s1394_gap_count_optimize(diameter);
699 
700 	/* Fill in the speed map */
701 	s1394_speed_map_fill(hal);
702 
703 	/* Initialize the two trees (for tree walking) */
704 	s1394_topology_tree_mark_all_unvisited(hal);
705 	s1394_old_tree_mark_all_unvisited(hal);
706 	s1394_old_tree_mark_all_unmatched(hal);
707 
708 	/* Are both trees (old and new) valid? */
709 	if ((hal->old_tree_valid == B_TRUE) &&
710 	    (hal->topology_tree_valid == B_TRUE)) {
711 		/* If HAL was in a suspended state, then do no matching */
712 		if (hal->hal_was_suspended == B_TRUE) {
713 		    hal->hal_was_suspended = B_FALSE;
714 		} else {
715 			gen_rollover = hal->halinfo.max_generation + 1;
716 			/* If only one bus reset occurred, match the trees */
717 			if (((hal->old_generation_count + 1) % gen_rollover) ==
718 			    generation_count) {
719 				s1394_match_tree_nodes(hal);
720 			}
721 		}
722 	}
723 
724 	/* Unlock the topology tree */
725 	mutex_exit(&hal->topology_tree_mutex);
726 
727 	/* Wake up the bus reset processing thread */
728 	s1394_tickle_bus_reset_thread(hal);
729 }
730 
731 /*
732  * Function:    h1394_read_request()
733  * Input(s):    sl_private		The HAL "handle" returned by
734  *					    h1394_attach()
735  *		req			The incoming AR request
736  *
737  * Output(s):	None
738  *
739  * Description:	h1394_read_request() receives incoming AR requests.  These
740  *		asynchronous read requests are dispatched to the appropriate
741  *		target (if one has registered) or are handled by the 1394
742  *		Software Framework, which will send out an appropriate
743  *		response.
744  */
745 void
746 h1394_read_request(void *sl_private, cmd1394_cmd_t *req)
747 {
748 	s1394_hal_t		*hal;
749 	s1394_cmd_priv_t	*s_priv;
750 	s1394_addr_space_blk_t  *addr_blk;
751 	dev_info_t		*dip;
752 	uint64_t		end_of_request;
753 	uint32_t		offset;
754 	size_t			cmd_length;
755 	uchar_t			*bufp_addr;
756 	uchar_t			*begin_ptr;
757 	uchar_t			*end_ptr;
758 	uchar_t			*tmp_ptr;
759 	void (*recv_read_req)(cmd1394_cmd_t *);
760 
761 	hal = (s1394_hal_t *)sl_private;
762 
763 	/* Get the Services Layer private area */
764 	s_priv = S1394_GET_CMD_PRIV(req);
765 
766 	s_priv->cmd_priv_xfer_type = S1394_CMD_READ;
767 
768 	switch (req->cmd_type) {
769 	case CMD1394_ASYNCH_RD_QUAD:
770 		cmd_length = IEEE1394_QUADLET;
771 		hal->hal_kstats->arreq_quad_rd++;
772 		break;
773 
774 	case CMD1394_ASYNCH_RD_BLOCK:
775 		cmd_length = req->cmd_u.b.blk_length;
776 		hal->hal_kstats->arreq_blk_rd++;
777 		break;
778 
779 	default:
780 		dip = hal->halinfo.dip;
781 
782 		/* An unexpected error in the HAL */
783 		cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
784 		    ddi_node_name(dip), ddi_get_instance(dip));
785 
786 		/* Disable the HAL */
787 		s1394_hal_shutdown(hal, B_TRUE);
788 
789 		return;
790 	}
791 
792 	/* Lock the "used" tree */
793 	mutex_enter(&hal->addr_space_used_mutex);
794 
795 	/* Has the 1394 address been allocated? */
796 	addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
797 
798 	/* If it wasn't found, it isn't owned... */
799 	if (addr_blk == NULL) {
800 		/* Unlock the "used" tree */
801 		mutex_exit(&hal->addr_space_used_mutex);
802 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
803 		(void) s1394_send_response(hal, req);
804 		return;
805 	}
806 
807 	/* Does the WHOLE request fit in the allocated block? */
808 	end_of_request = (req->cmd_addr + cmd_length) - 1;
809 	if (end_of_request > addr_blk->addr_hi) {
810 		/* Unlock the "used" tree */
811 		mutex_exit(&hal->addr_space_used_mutex);
812 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
813 		(void) s1394_send_response(hal, req);
814 		return;
815 	}
816 
817 	/* Is a read request valid for this address space? */
818 	if (!(addr_blk->addr_enable & T1394_ADDR_RDENBL)) {
819 		/* Unlock the "used" tree */
820 		mutex_exit(&hal->addr_space_used_mutex);
821 		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
822 		(void) s1394_send_response(hal, req);
823 		return;
824 	}
825 
826 	/* Make sure quadlet requests are quadlet-aligned */
827 	offset = req->cmd_addr - addr_blk->addr_lo;
828 	if ((req->cmd_type == CMD1394_ASYNCH_RD_QUAD) &&
829 	    ((offset & 0x3) != 0)) {
830 		/* Unlock the "used" tree */
831 		mutex_exit(&hal->addr_space_used_mutex);
832 		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
833 		(void) s1394_send_response(hal, req);
834 		return;
835 	}
836 
837 	/* Fill in the backing store if necessary */
838 	if (addr_blk->kmem_bufp != NULL) {
839 		offset = req->cmd_addr - addr_blk->addr_lo;
840 		bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
841 
842 		switch (req->cmd_type) {
843 		case CMD1394_ASYNCH_RD_QUAD:
844 			bcopy((void *)bufp_addr,
845 			    (void *)&(req->cmd_u.q.quadlet_data), cmd_length);
846 			break;
847 
848 		case CMD1394_ASYNCH_RD_BLOCK:
849 			begin_ptr = req->cmd_u.b.data_block->b_wptr;
850 			end_ptr	  = begin_ptr + cmd_length;
851 			tmp_ptr	  = req->cmd_u.b.data_block->b_datap->db_lim;
852 			if (end_ptr <= tmp_ptr) {
853 				bcopy((void *)bufp_addr, (void *)begin_ptr,
854 				    cmd_length);
855 				/* Update b_wptr to refelect the new data */
856 				req->cmd_u.b.data_block->b_wptr = end_ptr;
857 			} else {
858 				dip = hal->halinfo.dip;
859 
860 				/* An unexpected error in the HAL */
861 				cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
862 				    ddi_node_name(dip), ddi_get_instance(dip));
863 
864 				/* Unlock the "used" tree */
865 				mutex_exit(&hal->addr_space_used_mutex);
866 
867 				/* Disable the HAL */
868 				s1394_hal_shutdown(hal, B_TRUE);
869 
870 				return;
871 			}
872 			break;
873 
874 		default:
875 			dip = hal->halinfo.dip;
876 
877 			/* An unexpected error in the HAL */
878 			cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
879 			    ddi_node_name(dip), ddi_get_instance(dip));
880 
881 			/* Unlock the "used" tree */
882 			mutex_exit(&hal->addr_space_used_mutex);
883 
884 			/* Disable the HAL */
885 			s1394_hal_shutdown(hal, B_TRUE);
886 
887 			return;
888 		}
889 	}
890 
891 	/* Fill in the rest of the info in the request */
892 	s_priv->arreq_valid_addr = B_TRUE;
893 	req->cmd_callback_arg	 = addr_blk->addr_arg;
894 	recv_read_req		 = addr_blk->addr_events.recv_read_request;
895 
896 	/* Unlock the "used" tree */
897 	mutex_exit(&hal->addr_space_used_mutex);
898 
899 	/*
900 	 * Add no code that modifies the command after the target
901 	 * callback is called or after the response is sent to the
902 	 * HAL.
903 	 */
904 	if (recv_read_req != NULL) {
905 		recv_read_req(req);
906 	} else {
907 		req->cmd_result = IEEE1394_RESP_COMPLETE;
908 		(void) s1394_send_response(hal, req);
909 		return;
910 	}
911 }
912 
913 /*
914  * Function:    h1394_write_request()
915  * Input(s):    sl_private		The HAL "handle" returned by
916  *					    h1394_attach()
917  *		req			The incoming AR request
918  *
919  * Output(s):	None
920  *
921  * Description:	h1394_write_request() receives incoming AR requests.  These
922  *		asynchronous write requests are dispatched to the appropriate
923  *		target (if one has registered) or are handled by the 1394
924  *		Software Framework, which will send out an appropriate
925  *		response.
926  */
927 void
928 h1394_write_request(void *sl_private, cmd1394_cmd_t *req)
929 {
930 	s1394_hal_t		*hal;
931 	s1394_cmd_priv_t	*s_priv;
932 	h1394_cmd_priv_t	*h_priv;
933 	s1394_addr_space_blk_t	*addr_blk;
934 	dev_info_t		*dip;
935 	uint32_t		offset;
936 	size_t			cmd_length;
937 	uchar_t			*bufp_addr;
938 	uchar_t			*begin_ptr;
939 	uchar_t			*end_ptr;
940 	uchar_t			*tmp_ptr;
941 	uint64_t		end_of_request;
942 	boolean_t		posted_write = B_FALSE;
943 	boolean_t		write_error = B_FALSE;
944 	void (*recv_write_req)(cmd1394_cmd_t *);
945 
946 	hal = (s1394_hal_t *)sl_private;
947 
948 	/* Get the Services Layer private area */
949 	s_priv = S1394_GET_CMD_PRIV(req);
950 
951 	s_priv->cmd_priv_xfer_type = S1394_CMD_WRITE;
952 
953 	switch (req->cmd_type) {
954 	case CMD1394_ASYNCH_WR_QUAD:
955 		cmd_length = IEEE1394_QUADLET;
956 		hal->hal_kstats->arreq_quad_wr++;
957 		break;
958 
959 	case CMD1394_ASYNCH_WR_BLOCK:
960 		cmd_length = req->cmd_u.b.blk_length;
961 		hal->hal_kstats->arreq_blk_wr++;
962 		hal->hal_kstats->arreq_blk_wr_size += cmd_length;
963 		break;
964 
965 	default:
966 		dip = hal->halinfo.dip;
967 
968 		/* An unexpected error in the HAL */
969 		cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
970 		    ddi_node_name(dip), ddi_get_instance(dip));
971 
972 		/* Disable the HAL */
973 		s1394_hal_shutdown(hal, B_TRUE);
974 
975 		return;
976 	}
977 
978 	/* Lock the "used" tree */
979 	mutex_enter(&hal->addr_space_used_mutex);
980 
981 	/* Has the 1394 address been allocated? */
982 	addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
983 
984 	/* Is this a posted write request? */
985 	posted_write = s1394_is_posted_write(hal, req->cmd_addr);
986 
987 	/* If it wasn't found, it isn't owned... */
988 	if (addr_blk == NULL) {
989 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
990 		write_error	= B_TRUE;
991 		goto write_error_check;
992 	}
993 
994 	/* Does the WHOLE request fit in the allocated block? */
995 	end_of_request = (req->cmd_addr + cmd_length) - 1;
996 	if (end_of_request > addr_blk->addr_hi) {
997 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
998 		write_error	= B_TRUE;
999 		goto write_error_check;
1000 	}
1001 
1002 	/* Is a write request valid for this address space? */
1003 	if (!(addr_blk->addr_enable & T1394_ADDR_WRENBL)) {
1004 		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
1005 		write_error	= B_TRUE;
1006 		goto write_error_check;
1007 	}
1008 
1009 	/* Make sure quadlet request is quadlet aligned */
1010 	offset = req->cmd_addr - addr_blk->addr_lo;
1011 	if ((req->cmd_type == CMD1394_ASYNCH_WR_QUAD) &&
1012 	    ((offset & 0x3) != 0)) {
1013 		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
1014 		write_error	= B_TRUE;
1015 		goto write_error_check;
1016 	}
1017 
1018 write_error_check:
1019 	/* Check if posted-write when sending error responses */
1020 	if (write_error == B_TRUE) {
1021 		/* Unlock the "used" tree */
1022 		mutex_exit(&hal->addr_space_used_mutex);
1023 
1024 		if (posted_write == B_TRUE) {
1025 			/* Get a pointer to the HAL private struct */
1026 			h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
1027 			hal->hal_kstats->arreq_posted_write_error++;
1028 			/* Free the command - Pass it back to the HAL */
1029 			HAL_CALL(hal).response_complete(
1030 			    hal->halinfo.hal_private, req, h_priv);
1031 			return;
1032 		} else {
1033 			(void) s1394_send_response(hal, req);
1034 			return;
1035 		}
1036 	}
1037 
1038 	/* Fill in the backing store if necessary */
1039 	if (addr_blk->kmem_bufp != NULL) {
1040 		offset = req->cmd_addr - addr_blk->addr_lo;
1041 		bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
1042 		switch (req->cmd_type) {
1043 		case CMD1394_ASYNCH_WR_QUAD:
1044 			bcopy((void *)&(req->cmd_u.q.quadlet_data),
1045 			    (void *)bufp_addr, cmd_length);
1046 			break;
1047 
1048 		case CMD1394_ASYNCH_WR_BLOCK:
1049 			begin_ptr = req->cmd_u.b.data_block->b_rptr;
1050 			end_ptr = begin_ptr + cmd_length;
1051 			tmp_ptr = req->cmd_u.b.data_block->b_wptr;
1052 			if (end_ptr <= tmp_ptr) {
1053 				bcopy((void *)begin_ptr, (void *)bufp_addr,
1054 				    cmd_length);
1055 			} else {
1056 				dip = hal->halinfo.dip;
1057 
1058 				/* An unexpected error in the HAL */
1059 				cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
1060 				    ddi_node_name(dip), ddi_get_instance(dip));
1061 
1062 				/* Unlock the "used" tree */
1063 				mutex_exit(&hal->addr_space_used_mutex);
1064 
1065 				/* Disable the HAL */
1066 				s1394_hal_shutdown(hal, B_TRUE);
1067 
1068 				return;
1069 			}
1070 			break;
1071 
1072 		default:
1073 			dip = hal->halinfo.dip;
1074 
1075 			/* An unexpected error in the HAL */
1076 			cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
1077 			    ddi_node_name(dip), ddi_get_instance(dip));
1078 
1079 			/* Unlock the "used" tree */
1080 			mutex_exit(&hal->addr_space_used_mutex);
1081 
1082 			/* Disable the HAL */
1083 			s1394_hal_shutdown(hal, B_TRUE);
1084 
1085 			return;
1086 		}
1087 	}
1088 
1089 	/* Fill in the rest of the info in the request */
1090 	if (addr_blk->addr_type == T1394_ADDR_POSTED_WRITE)
1091 		s_priv->posted_write = B_TRUE;
1092 
1093 	s_priv->arreq_valid_addr = B_TRUE;
1094 	req->cmd_callback_arg	 = addr_blk->addr_arg;
1095 	recv_write_req		 = addr_blk->addr_events.recv_write_request;
1096 
1097 	/* Unlock the "used" tree */
1098 	mutex_exit(&hal->addr_space_used_mutex);
1099 
1100 	/*
1101 	 * Add no code that modifies the command after the target
1102 	 * callback is called or after the response is sent to the
1103 	 * HAL.
1104 	 */
1105 	if (recv_write_req != NULL) {
1106 		recv_write_req(req);
1107 	} else {
1108 		req->cmd_result = IEEE1394_RESP_COMPLETE;
1109 		(void) s1394_send_response(hal, req);
1110 		return;
1111 	}
1112 }
1113 
1114 /*
1115  * Function:    h1394_lock_request()
1116  * Input(s):    sl_private		The HAL "handle" returned by
1117  *					    h1394_attach()
1118  *		req			The incoming AR request
1119  *
1120  * Output(s):	None
1121  *
1122  * Description:	h1394_lock_request() receives incoming AR requests.  These
1123  *		asynchronous lock requests are dispatched to the appropriate
1124  *		target (if one has registered) or are handled by the 1394
1125  *		Software Framework, which will send out an appropriate
1126  *		response.
1127  */
1128 void
1129 h1394_lock_request(void *sl_private, cmd1394_cmd_t *req)
1130 {
1131 	s1394_hal_t		*hal;
1132 	s1394_cmd_priv_t	*s_priv;
1133 	s1394_addr_space_blk_t	*addr_blk;
1134 	dev_info_t		*dip;
1135 	uint64_t		end_of_request;
1136 	uint32_t		offset;
1137 	uchar_t			*bufp_addr;
1138 	cmd1394_lock_type_t	lock_type;
1139 	void (*recv_lock_req)(cmd1394_cmd_t *);
1140 
1141 	hal = (s1394_hal_t *)sl_private;
1142 
1143 	/* Get the Services Layer private area */
1144 	s_priv = S1394_GET_CMD_PRIV(req);
1145 
1146 	s_priv->cmd_priv_xfer_type = S1394_CMD_LOCK;
1147 
1148 	/* Lock the "used" tree */
1149 	mutex_enter(&hal->addr_space_used_mutex);
1150 
1151 	/* Has the 1394 address been allocated? */
1152 	addr_blk = s1394_used_tree_search(hal, req->cmd_addr);
1153 
1154 	/* If it wasn't found, it isn't owned... */
1155 	if (addr_blk == NULL) {
1156 		/* Unlock the "used" tree */
1157 		mutex_exit(&hal->addr_space_used_mutex);
1158 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
1159 		(void) s1394_send_response(hal, req);
1160 		return;
1161 	}
1162 
1163 	/* Does the WHOLE request fit in the allocated block? */
1164 	switch (req->cmd_type) {
1165 	case CMD1394_ASYNCH_LOCK_32:
1166 		end_of_request = (req->cmd_addr + IEEE1394_QUADLET) - 1;
1167 		/* kstats - 32-bit lock request */
1168 		hal->hal_kstats->arreq_lock32++;
1169 		break;
1170 
1171 	case CMD1394_ASYNCH_LOCK_64:
1172 		end_of_request = (req->cmd_addr + IEEE1394_OCTLET) - 1;
1173 		/* kstats - 64-bit lock request */
1174 		hal->hal_kstats->arreq_lock64++;
1175 		break;
1176 
1177 	default:
1178 		/* Unlock the "used" tree */
1179 		mutex_exit(&hal->addr_space_used_mutex);
1180 
1181 		dip = hal->halinfo.dip;
1182 
1183 		/* An unexpected error in the HAL */
1184 		cmn_err(CE_WARN, HALT_ERROR_MESSAGE,
1185 		    ddi_node_name(dip), ddi_get_instance(dip));
1186 
1187 		/* Disable the HAL */
1188 		s1394_hal_shutdown(hal, B_TRUE);
1189 
1190 		return;
1191 	}
1192 
1193 	if (end_of_request > addr_blk->addr_hi) {
1194 		/* Unlock the "used" tree */
1195 		mutex_exit(&hal->addr_space_used_mutex);
1196 		req->cmd_result = IEEE1394_RESP_ADDRESS_ERROR;
1197 		(void) s1394_send_response(hal, req);
1198 		return;
1199 	}
1200 
1201 	/* Is a lock request valid for this address space? */
1202 	if (!(addr_blk->addr_enable & T1394_ADDR_LKENBL)) {
1203 		/* Unlock the "used" tree */
1204 		mutex_exit(&hal->addr_space_used_mutex);
1205 		req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
1206 		(void) s1394_send_response(hal, req);
1207 		return;
1208 	}
1209 
1210 	/* Fill in the backing store if necessary */
1211 	if (addr_blk->kmem_bufp != NULL) {
1212 		offset = req->cmd_addr - addr_blk->addr_lo;
1213 		bufp_addr = (uchar_t *)addr_blk->kmem_bufp + offset;
1214 
1215 		if (req->cmd_type == CMD1394_ASYNCH_LOCK_32) {
1216 			uint32_t	old_value;
1217 			uint32_t	arg_value;
1218 			uint32_t	data_value;
1219 			uint32_t	new_value;
1220 
1221 			arg_value	= req->cmd_u.l32.arg_value;
1222 			data_value	= req->cmd_u.l32.data_value;
1223 			lock_type	= req->cmd_u.l32.lock_type;
1224 			bcopy((void *)bufp_addr, (void *)&old_value,
1225 			    IEEE1394_QUADLET);
1226 
1227 			switch (lock_type) {
1228 			case CMD1394_LOCK_MASK_SWAP:
1229 				/* Mask-Swap (see P1394A - Table 1.7) */
1230 				new_value = (data_value & arg_value) |
1231 				    (old_value & ~arg_value);
1232 				/* Copy new_value into backing store */
1233 				bcopy((void *)&new_value, (void *)bufp_addr,
1234 				    IEEE1394_QUADLET);
1235 				req->cmd_u.l32.old_value = old_value;
1236 				break;
1237 
1238 			case CMD1394_LOCK_COMPARE_SWAP:
1239 				/* Compare-Swap */
1240 				if (old_value == arg_value) {
1241 					new_value = data_value;
1242 					/* Copy new_value into backing store */
1243 					bcopy((void *)&new_value,
1244 					    (void *)bufp_addr,
1245 					    IEEE1394_QUADLET);
1246 				}
1247 				req->cmd_u.l32.old_value = old_value;
1248 				break;
1249 
1250 			case CMD1394_LOCK_FETCH_ADD:
1251 				/* Fetch-Add (see P1394A - Table 1.7) */
1252 				old_value = T1394_DATA32(old_value);
1253 				new_value = old_value + data_value;
1254 				new_value = T1394_DATA32(new_value);
1255 				/* Copy new_value into backing store */
1256 				bcopy((void *)&new_value, (void *)bufp_addr,
1257 				    IEEE1394_QUADLET);
1258 				req->cmd_u.l32.old_value = old_value;
1259 				break;
1260 
1261 			case CMD1394_LOCK_LITTLE_ADD:
1262 				/* Little-Add (see P1394A - Table 1.7) */
1263 				old_value = T1394_DATA32(old_value);
1264 				new_value = old_value + data_value;
1265 				new_value = T1394_DATA32(new_value);
1266 				/* Copy new_value into backing store */
1267 				bcopy((void *)&new_value, (void *)bufp_addr,
1268 				    IEEE1394_QUADLET);
1269 				req->cmd_u.l32.old_value = old_value;
1270 				break;
1271 
1272 			case CMD1394_LOCK_BOUNDED_ADD:
1273 				/* Bounded-Add (see P1394A - Table 1.7) */
1274 				old_value = T1394_DATA32(old_value);
1275 				if (old_value != arg_value) {
1276 					new_value = old_value + data_value;
1277 					new_value = T1394_DATA32(new_value);
1278 					/* Copy new_value into backing store */
1279 					bcopy((void *)&new_value,
1280 					    (void *)bufp_addr,
1281 					    IEEE1394_QUADLET);
1282 				}
1283 				req->cmd_u.l32.old_value = old_value;
1284 				break;
1285 
1286 			case CMD1394_LOCK_WRAP_ADD:
1287 				/* Wrap-Add (see P1394A - Table 1.7) */
1288 				old_value = T1394_DATA32(old_value);
1289 				if (old_value != arg_value) {
1290 					new_value = old_value + data_value;
1291 				} else {
1292 					new_value = data_value;
1293 				}
1294 				new_value = T1394_DATA32(new_value);
1295 				/* Copy new_value into backing store */
1296 				bcopy((void *)&new_value, (void *)bufp_addr,
1297 				    IEEE1394_QUADLET);
1298 				req->cmd_u.l32.old_value = old_value;
1299 				break;
1300 
1301 			default:
1302 				/* Unlock the "used" tree */
1303 				mutex_exit(&hal->addr_space_used_mutex);
1304 				req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
1305 				(void) s1394_send_response(hal, req);
1306 				return;
1307 			}
1308 		} else {
1309 			/* Handling for the 8-byte (64-bit) lock requests */
1310 			uint64_t	old_value;
1311 			uint64_t	arg_value;
1312 			uint64_t	data_value;
1313 			uint64_t	new_value;
1314 
1315 			arg_value	= req->cmd_u.l64.arg_value;
1316 			data_value	= req->cmd_u.l64.data_value;
1317 			lock_type	= req->cmd_u.l64.lock_type;
1318 			bcopy((void *)bufp_addr, (void *)&old_value,
1319 			    IEEE1394_OCTLET);
1320 
1321 			switch (lock_type) {
1322 			case CMD1394_LOCK_MASK_SWAP:
1323 				/* Mask-Swap (see P1394A - Table 1.7) */
1324 				new_value = (data_value & arg_value) |
1325 				    (old_value & ~arg_value);
1326 				/* Copy new_value into backing store */
1327 				bcopy((void *)&new_value, (void *)bufp_addr,
1328 				    IEEE1394_OCTLET);
1329 				req->cmd_u.l64.old_value = old_value;
1330 				break;
1331 
1332 			case CMD1394_LOCK_COMPARE_SWAP:
1333 				/* Compare-Swap */
1334 				if (old_value == arg_value) {
1335 					new_value = data_value;
1336 					/* Copy new_value into backing store */
1337 					bcopy((void *)&new_value,
1338 					    (void *)bufp_addr,
1339 					    IEEE1394_OCTLET);
1340 				}
1341 				req->cmd_u.l64.old_value = old_value;
1342 				break;
1343 
1344 			case CMD1394_LOCK_FETCH_ADD:
1345 				/* Fetch-Add (see P1394A - Table 1.7) */
1346 				old_value = T1394_DATA64(old_value);
1347 				new_value = old_value + data_value;
1348 				new_value = T1394_DATA64(new_value);
1349 				/* Copy new_value into backing store */
1350 				bcopy((void *)&new_value, (void *)bufp_addr,
1351 				    IEEE1394_OCTLET);
1352 				req->cmd_u.l64.old_value = old_value;
1353 				break;
1354 
1355 			case CMD1394_LOCK_LITTLE_ADD:
1356 				/* Little-Add (see P1394A - Table 1.7) */
1357 				old_value = T1394_DATA64(old_value);
1358 				new_value = old_value + data_value;
1359 				new_value = T1394_DATA64(new_value);
1360 				/* Copy new_value into backing store */
1361 				bcopy((void *)&new_value, (void *)bufp_addr,
1362 				    IEEE1394_OCTLET);
1363 				req->cmd_u.l64.old_value = old_value;
1364 				break;
1365 
1366 			case CMD1394_LOCK_BOUNDED_ADD:
1367 				/* Bounded-Add (see P1394A - Table 1.7) */
1368 				old_value = T1394_DATA64(old_value);
1369 				if (old_value != arg_value) {
1370 					new_value = old_value + data_value;
1371 					new_value = T1394_DATA64(new_value);
1372 					/* Copy new_value into backing store */
1373 					bcopy((void *)&new_value,
1374 					    (void *)bufp_addr,
1375 					    IEEE1394_OCTLET);
1376 				}
1377 				req->cmd_u.l64.old_value = old_value;
1378 				break;
1379 
1380 			case CMD1394_LOCK_WRAP_ADD:
1381 				/* Wrap-Add (see P1394A - Table 1.7) */
1382 				old_value = T1394_DATA64(old_value);
1383 				if (old_value != arg_value) {
1384 					new_value = old_value + data_value;
1385 				} else {
1386 					new_value = data_value;
1387 				}
1388 				new_value = T1394_DATA64(new_value);
1389 				/* Copy new_value into backing store */
1390 				bcopy((void *)&new_value, (void *)bufp_addr,
1391 				    IEEE1394_OCTLET);
1392 				req->cmd_u.l64.old_value = old_value;
1393 				break;
1394 
1395 			default:
1396 				/* Unlock the "used" tree */
1397 				mutex_exit(&hal->addr_space_used_mutex);
1398 				req->cmd_result = IEEE1394_RESP_TYPE_ERROR;
1399 				(void) s1394_send_response(hal, req);
1400 				return;
1401 			}
1402 		}
1403 	}
1404 
1405 	/* Fill in the rest of the info in the request */
1406 	s_priv->arreq_valid_addr = B_TRUE;
1407 	req->cmd_callback_arg	 = addr_blk->addr_arg;
1408 	recv_lock_req		 = addr_blk->addr_events.recv_lock_request;
1409 
1410 	/* Unlock the "used" tree */
1411 	mutex_exit(&hal->addr_space_used_mutex);
1412 
1413 	/*
1414 	 * Add no code that modifies the command after the target
1415 	 * callback is called or after the response is sent to the
1416 	 * HAL.
1417 	 */
1418 	if (recv_lock_req != NULL) {
1419 		recv_lock_req(req);
1420 	} else {
1421 		req->cmd_result = IEEE1394_RESP_COMPLETE;
1422 		(void) s1394_send_response(hal, req);
1423 		return;
1424 	}
1425 }
1426 
1427 /*
1428  * Function:    h1394_ioctl()
1429  * Input(s):    sl_private		The HAL "handle" returned by
1430  *					    h1394_attach()
1431  *		cmd			ioctl cmd
1432  *		arg			argument for the ioctl cmd
1433  *		mode			mode bits (see ioctl(9e))
1434  *		cred_p			cred structure pointer
1435  *		rval_p			pointer to return value (see ioctl(9e))
1436  *
1437  * Output(s):	EINVAL if not a DEVCTL ioctl, else return value from s1394_ioctl
1438  *
1439  * Description:	h1394_ioctl() implements non-HAL specific ioctls. Currently,
1440  *		DEVCTL ioctls are the only generic ioctls supported.
1441  */
1442 int
1443 h1394_ioctl(void *sl_private, int cmd, intptr_t arg, int mode, cred_t *cred_p,
1444     int *rval_p)
1445 {
1446 	int	status;
1447 
1448 	if ((cmd & DEVCTL_IOC) != DEVCTL_IOC)
1449 		return (EINVAL);
1450 
1451 	status = s1394_ioctl((s1394_hal_t *)sl_private, cmd, arg, mode,
1452 	    cred_p, rval_p);
1453 
1454 	return (status);
1455 }
1456 
1457 /*
1458  * Function:    h1394_phy_packet()
1459  * Input(s):    sl_private		The HAL "handle" returned by
1460  *					    h1394_attach()
1461  *		packet_data		Pointer to a buffer of packet data
1462  *		quadlet_count		Length of the buffer
1463  *		timestamp		Timestamp indicating time of arrival
1464  *
1465  * Output(s):	None
1466  *
1467  * Description:	h1394_phy_packet() is not implemented currently, but would
1468  *		be used to process the responses to PHY ping packets in P1394A
1469  *		When one is sent out, a timestamp is given indicating its time
1470  *		of departure. Comparing that old timestamp with this new
1471  *		timestamp, we can determine the time of flight and can use
1472  *		those times to optimize the gap count.
1473  */
1474 /* ARGSUSED */
1475 void
1476 h1394_phy_packet(void *sl_private, uint32_t *packet_data, uint_t quadlet_count,
1477 	uint_t timestamp)
1478 {
1479 	/* This interface is not yet implemented */
1480 }
1481 
1482 /*
1483  * Function:    h1394_error_detected()
1484  * Input(s):    sl_private		The HAL "handle" returned by
1485  *					    h1394_attach()
1486  *		type			The type of error the HAL detected
1487  *		arg			Pointer to any extra information
1488  *
1489  * Output(s):	None
1490  *
1491  * Description:	h1394_error_detected() is used by the HAL to report errors
1492  *		to the 1394 Software Framework.
1493  */
1494 void
1495 h1394_error_detected(void *sl_private, h1394_error_t type, void *arg)
1496 {
1497 	s1394_hal_t	*hal;
1498 	uint_t		hal_node_num;
1499 	uint_t		IRM_node_num;
1500 
1501 	hal = (s1394_hal_t *)sl_private;
1502 
1503 	switch (type) {
1504 	case H1394_LOCK_RESP_ERR:
1505 		/* If we are the IRM, then initiate a bus reset */
1506 		mutex_enter(&hal->topology_tree_mutex);
1507 		hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1508 		IRM_node_num = hal->IRM_node;
1509 		mutex_exit(&hal->topology_tree_mutex);
1510 		if (IRM_node_num == hal_node_num)
1511 			s1394_initiate_hal_reset(hal, NON_CRITICAL);
1512 		break;
1513 
1514 	case H1394_POSTED_WR_ERR:
1515 		break;
1516 
1517 	case H1394_SELF_INITIATED_SHUTDOWN:
1518 		s1394_hal_shutdown(hal, B_FALSE);
1519 		break;
1520 
1521 	case H1394_CYCLE_TOO_LONG:
1522 		/* Set a timer to become cycle master after 1 second */
1523 		mutex_enter(&hal->cm_timer_mutex);
1524 		hal->cm_timer_set = B_TRUE;
1525 		mutex_exit(&hal->cm_timer_mutex);
1526 		hal->cm_timer = timeout(s1394_cycle_too_long_callback, hal,
1527 		    drv_usectohz(CYCLE_MASTER_TIMER * 1000));
1528 
1529 		break;
1530 
1531 	default:
1532 		break;
1533 	}
1534 }
1535