1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2019 Joyent, Inc.
29 * Copyright 2023 Oxide Computer Company
30 */
31
32 /*
33 * s1394_dev_disc.c
34 * 1394 Services Layer Device Discovery Routines
35 * This file contains the bus reset thread code, bus manager routines and
36 * various routines that are used to implement remote Config ROM reading.
37 *
38 * FUTURE:
39 * Rescan the bus if invalid nodes are seen.
40 * Investigate taskq for reading phase2 config rom reads.
41 * If we are reading the entire bus info blk, we should attempt
42 * a block read and fallback to quad reads if this fails.
43 */
44
45 #include <sys/conf.h>
46 #include <sys/sysmacros.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/cmn_err.h>
50 #include <sys/sunndi.h>
51 #include <sys/modctl.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/types.h>
54 #include <sys/kmem.h>
55 #include <sys/kstat.h>
56 #include <sys/varargs.h>
57
58 #include <sys/1394/t1394.h>
59 #include <sys/1394/s1394.h>
60 #include <sys/1394/h1394.h>
61 #include <sys/1394/ieee1394.h>
62 #include <sys/1394/ieee1212.h>
63
64 /* hcmd_ret_t */
65 typedef enum {
66 S1394_HCMD_INVALID,
67 S1394_HCMD_NODE_DONE,
68 S1394_HCMD_NODE_EXPECT_MORE,
69 S1394_HCMD_LOCK_FAILED
70 } hcmd_ret_t;
71
72 #define QUAD_TO_CFGROM_ADDR(b, n, q, addr) { \
73 uint64_t bl = (b); \
74 uint64_t nl = (n); \
75 addr = ((bl) << IEEE1394_ADDR_BUS_ID_SHIFT) | \
76 ((nl) << IEEE1394_ADDR_PHY_ID_SHIFT); \
77 addr += IEEE1394_CONFIG_ROM_ADDR + ((q) << 2); \
78 }
79
80 #define CFGROM_READ_PAUSE(d) \
81 ((s1394_cfgrom_read_delay_ms == 0) ? (void) 0 : \
82 delay(drv_usectohz((d) * 1000)))
83
84 #define BUMP_CFGROM_READ_DELAY(n) \
85 (n)->cfgrom_read_delay += s1394_cfgrom_read_delay_incr
86
87 #define CFGROM_GET_READ_DELAY(n, d) \
88 ((d) = (n)->cfgrom_read_delay)
89
90 #define SETUP_QUAD_READ(n, reset_fails, quadlet, cnt) \
91 { \
92 int i = (reset_fails); \
93 if (i != 0) { \
94 (n)->cfgrom_read_fails = 0; \
95 (n)->cfgrom_read_delay = (uchar_t)s1394_cfgrom_read_delay_ms; \
96 } \
97 (n)->cfgrom_quad_to_read = (quadlet); \
98 (n)->cfgrom_quad_read_cnt = (cnt); \
99 }
100
101 static void s1394_wait_for_events(s1394_hal_t *hal, int firsttime);
102
103 static int s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
104 hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd));
105
106 static void s1394_flush_cmplq(s1394_hal_t *hal);
107
108 static void s1394_br_thread_exit(s1394_hal_t *hal);
109
110 static void s1394_target_bus_reset_notifies(s1394_hal_t *hal,
111 t1394_localinfo_t *localinfo);
112
113 static int s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
114 s1394_status_t *status);
115
116 static int s1394_cfgrom_scan_phase1(s1394_hal_t *hal);
117
118 static hcmd_ret_t s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal,
119 cmd1394_cmd_t *cmd);
120
121 static int s1394_cfgrom_scan_phase2(s1394_hal_t *hal);
122
123 static hcmd_ret_t s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal,
124 cmd1394_cmd_t *cmd);
125
126 static int s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
127 s1394_status_t *status);
128
129 static void s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd);
130
131 static void s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num,
132 uint32_t *quadlet, uint32_t *data);
133
134 static int s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode);
135
136 static int s1394_match_all_GUIDs(s1394_hal_t *hal);
137
138 static void s1394_become_bus_mgr(void *arg);
139
140 static void s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd);
141
142 static int s1394_bus_mgr_processing(s1394_hal_t *hal);
143
144 static int s1394_do_bus_mgr_processing(s1394_hal_t *hal);
145
146 static void s1394_bus_mgr_timers_stop(s1394_hal_t *hal,
147 timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
148
149 static void s1394_bus_mgr_timers_start(s1394_hal_t *hal,
150 timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
151
152 static int s1394_cycle_master_capable(s1394_hal_t *hal);
153
154 static int s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root,
155 int new_gap_cnt, uint32_t IRM_flags);
156
157 static void s1394_phy_config_callback(cmd1394_cmd_t *cmd);
158
159 static int s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node,
160 uint32_t quadlet, uint32_t *nextquadp);
161
162 static int s1394_cfgrom_read_retry_cnt = 3; /* 1 + 3 retries */
163 static int s1394_cfgrom_read_delay_ms = 20; /* start with 20ms */
164 static int s1394_cfgrom_read_delay_incr = 10; /* 10ms increments */
165 static int s1394_enable_crc_validation = 0;
166 static int s1394_turn_off_dir_stack = 0;
167 static int s1394_crcsz_is_cfgsz = 0;
168 static int s1394_enable_rio_pass1_workarounds = 0;
169
170 /*
171 * s1394_br_thread()
172 * is the bus reset thread. Its sole purpose is to read/reread config roms
173 * as appropriate and do bus reset time things (bus manager processing,
174 * isoch resource reallocation etc.).
175 */
176 void
s1394_br_thread(s1394_hal_t * hal)177 s1394_br_thread(s1394_hal_t *hal)
178 {
179 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
180
181 /* Initialize the Bus Mgr timers */
182 hal->bus_mgr_timeout_id = 0;
183 hal->bus_mgr_query_timeout_id = 0;
184
185 /* Initialize the cmpletion Q */
186 mutex_enter(&hal->br_cmplq_mutex);
187 hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
188 mutex_exit(&hal->br_cmplq_mutex);
189
190 s1394_wait_for_events(hal, 1);
191
192 for (;;) {
193 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
194
195 s1394_wait_for_events(hal, 0);
196
197 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
198
199 /* stop bus manager timeouts, if needed */
200 s1394_bus_mgr_timers_stop(hal, &hal->bus_mgr_query_timeout_id,
201 &hal->bus_mgr_timeout_id);
202
203 s1394_flush_cmplq(hal);
204
205 /* start timers for checking bus manager, if needed */
206 s1394_bus_mgr_timers_start(hal, &hal->bus_mgr_query_timeout_id,
207 &hal->bus_mgr_timeout_id);
208
209 /* Try to reallocate all isoch resources */
210 s1394_isoch_rsrc_realloc(hal);
211
212 if (s1394_cfgrom_scan_phase1(hal) != DDI_SUCCESS) {
213 continue;
214 }
215
216 if (s1394_bus_mgr_processing(hal) != DDI_SUCCESS) {
217 continue;
218 }
219
220 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
221
222 if (s1394_cfgrom_scan_phase2(hal) != DDI_SUCCESS) {
223 continue;
224 }
225
226 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
227 }
228 }
229
230 /*
231 * s1394_wait_for_events()
232 * blocks waiting for a cv_signal on the bus reset condition variable.
233 * Used by the bus reset thread for synchronizing with the bus reset/
234 * self id interrupt callback from the hal. Does CPR initialization
235 * first time it is called. If services layer sees a valid self id
236 * buffer, it builds the topology tree and signals the bus reset thread
237 * to read the config roms as appropriate (indicated by BR_THR_CFGROM_SCAN).
238 * If the services layer wishes to kill the bus reset thread, it signals
239 * this by signaling a BR_THR_GO_AWAY event.
240 */
241 static void
s1394_wait_for_events(s1394_hal_t * hal,int firsttime)242 s1394_wait_for_events(s1394_hal_t *hal, int firsttime)
243 {
244 uint_t event;
245
246 ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
247 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
248
249 if (firsttime)
250 CALLB_CPR_INIT(&hal->hal_cprinfo, &hal->br_thread_mutex,
251 callb_generic_cpr, "s1394_br_thread");
252
253 /* Check and wait for a BUS RESET */
254 mutex_enter(&hal->br_thread_mutex);
255 while ((event = hal->br_thread_ev_type) == 0) {
256 CALLB_CPR_SAFE_BEGIN(&hal->hal_cprinfo);
257 cv_wait(&hal->br_thread_cv, &hal->br_thread_mutex);
258 CALLB_CPR_SAFE_END(&hal->hal_cprinfo, &hal->br_thread_mutex);
259 }
260
261 if (event & BR_THR_GO_AWAY) {
262 s1394_br_thread_exit(hal);
263 /*NOTREACHED*/
264 return;
265 }
266
267 if (firsttime) {
268 mutex_exit(&hal->br_thread_mutex);
269 return;
270 }
271
272 mutex_enter(&hal->topology_tree_mutex);
273 hal->br_cfgrom_read_gen = hal->generation_count;
274
275 hal->br_thread_ev_type &= ~BR_THR_CFGROM_SCAN;
276 mutex_exit(&hal->topology_tree_mutex);
277 mutex_exit(&hal->br_thread_mutex);
278 }
279
280 /*
281 * s1394_wait_for_cfgrom_callbacks()
282 * Waits for completed config rom reads. Takes each completion off the
283 * completion queue and passes it to the "completion handler" function
284 * that was passed in as an argument. Further processing of the completion
285 * queue depends on the return status of the completion handler. If there
286 * is a bus reset while waiting for completions or if the services layer
287 * signals BR_THR_GO_AWAY, quits waiting for completions and returns
288 * non-zero. Also returns non-zero if completion handler returns
289 * S1394_HCMD_LOCK_FAILED. Returns 0 if config roms for all nodes have
290 * been dealt with.
291 */
292 static int
s1394_wait_for_cfgrom_callbacks(s1394_hal_t * hal,uint_t wait_gen,hcmd_ret_t (* handle_cmd_fn)(s1394_hal_t * hal,cmd1394_cmd_t * cmd))293 s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
294 hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd))
295 {
296 cmd1394_cmd_t *cmd;
297 s1394_cmd_priv_t *s_priv;
298 int ret, done = 0;
299 hcmd_ret_t cmdret;
300
301 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
302
303 ret = DDI_SUCCESS;
304
305 while (!done) {
306 mutex_enter(&hal->br_cmplq_mutex);
307 mutex_enter(&hal->topology_tree_mutex);
308 while (wait_gen == hal->generation_count &&
309 (hal->br_thread_ev_type & BR_THR_GO_AWAY) == 0 &&
310 hal->br_cmplq_head == NULL) {
311 mutex_exit(&hal->topology_tree_mutex);
312 cv_wait(&hal->br_cmplq_cv, &hal->br_cmplq_mutex);
313 mutex_enter(&hal->topology_tree_mutex);
314 }
315 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
316 if (wait_gen != hal->generation_count ||
317 (hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
318 mutex_exit(&hal->topology_tree_mutex);
319 mutex_exit(&hal->br_cmplq_mutex);
320 s1394_flush_cmplq(hal);
321 return (DDI_FAILURE);
322 }
323 mutex_exit(&hal->topology_tree_mutex);
324
325 if ((cmd = hal->br_cmplq_head) != NULL) {
326 s_priv = S1394_GET_CMD_PRIV(cmd);
327
328 hal->br_cmplq_head = s_priv->cmd_priv_next;
329 }
330 if (cmd == hal->br_cmplq_tail)
331 hal->br_cmplq_tail = NULL;
332 mutex_exit(&hal->br_cmplq_mutex);
333
334 if (cmd != NULL) {
335 if (cmd->bus_generation != wait_gen) {
336 (void) s1394_free_cmd(hal, &cmd);
337 continue;
338 }
339 cmdret = (*handle_cmd_fn)(hal, cmd);
340 ASSERT(cmdret != S1394_HCMD_INVALID);
341 if (cmdret == S1394_HCMD_LOCK_FAILED) {
342 /* flush completion queue */
343 ret = DDI_FAILURE;
344 s1394_flush_cmplq(hal);
345 break;
346 } else if (cmdret == S1394_HCMD_NODE_DONE) {
347 if (--hal->cfgroms_being_read == 0) {
348 /* All done */
349 break;
350 }
351 } else {
352 ASSERT(cmdret == S1394_HCMD_NODE_EXPECT_MORE);
353 done = 0;
354 }
355 }
356 }
357
358 return (ret);
359 }
360
361 /*
362 * s1394_flush_cmplq()
363 * Frees all cmds on the completion queue.
364 */
365 static void
s1394_flush_cmplq(s1394_hal_t * hal)366 s1394_flush_cmplq(s1394_hal_t *hal)
367 {
368 s1394_cmd_priv_t *s_priv;
369 cmd1394_cmd_t *cmd, *tcmd;
370
371 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
372
373 cmd = NULL;
374
375 do {
376 mutex_enter(&hal->br_cmplq_mutex);
377 cmd = hal->br_cmplq_head;
378 hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
379 mutex_exit(&hal->br_cmplq_mutex);
380
381 while (cmd != NULL) {
382 s_priv = S1394_GET_CMD_PRIV(cmd);
383
384 tcmd = s_priv->cmd_priv_next;
385 (void) s1394_free_cmd(hal, &cmd);
386 cmd = tcmd;
387 }
388
389 mutex_enter(&hal->br_cmplq_mutex);
390 cmd = hal->br_cmplq_head;
391 mutex_exit(&hal->br_cmplq_mutex);
392
393 } while (cmd != NULL);
394 }
395
396 /*
397 * s1394_br_thread_exit()
398 * Flushes the completion queue and calls thread_exit() (which effectively
399 * kills the bus reset thread).
400 */
401 static void
s1394_br_thread_exit(s1394_hal_t * hal)402 s1394_br_thread_exit(s1394_hal_t *hal)
403 {
404 ASSERT(MUTEX_HELD(&hal->br_thread_mutex));
405 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
406 s1394_flush_cmplq(hal);
407 #ifndef __lock_lint
408 CALLB_CPR_EXIT(&hal->hal_cprinfo);
409 #endif
410 hal->br_thread_ev_type &= ~BR_THR_GO_AWAY;
411 thread_exit();
412 /*NOTREACHED*/
413 }
414
415 /*
416 * s1394_target_bus_reset_notifies()
417 * tells the ndi event framework to invoke any callbacks registered for
418 * "bus reset event".
419 */
420 static void
s1394_target_bus_reset_notifies(s1394_hal_t * hal,t1394_localinfo_t * localinfo)421 s1394_target_bus_reset_notifies(s1394_hal_t *hal, t1394_localinfo_t *localinfo)
422 {
423 ddi_eventcookie_t cookie;
424
425 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
426
427 if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
428 DDI_DEVI_BUS_RESET_EVENT, &cookie, NDI_EVENT_NOPASS) ==
429 NDI_SUCCESS) {
430 (void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
431 cookie, localinfo);
432 }
433 }
434
435 /*
436 * s1394_alloc_cfgrom()
437 * Allocates config rom for the node. Sets CFGROM_NEW_ALLOC bit in the
438 * node cfgrom state. Drops topology_tree_mutex around the calls to
439 * kmem_zalloc(). If re-locking fails, returns DDI_FAILURE, else returns
440 * DDI_SUCCESS.
441 */
442 static int
s1394_alloc_cfgrom(s1394_hal_t * hal,s1394_node_t * node,s1394_status_t * status)443 s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node, s1394_status_t *status)
444 {
445 uint32_t *cfgrom;
446
447 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
448
449 *status = S1394_NOSTATUS;
450
451 /*
452 * if cfgrom is non-NULL, this has to be generation changed
453 * case (where we allocate cfgrom again to reread the cfgrom)
454 */
455 ASSERT(node->cfgrom == NULL || (node->cfgrom != NULL &&
456 CFGROM_GEN_CHANGED(node) == B_TRUE));
457
458 /*
459 * if node matched, either cfgrom has to be NULL or link should be
460 * off in the last matched node or config rom generations changed.
461 */
462 ASSERT(NODE_MATCHED(node) == B_FALSE || (NODE_MATCHED(node) == B_TRUE &&
463 (node->cfgrom == NULL || LINK_ACTIVE(node->old_node) == B_FALSE) ||
464 CFGROM_GEN_CHANGED(node) == B_TRUE));
465
466 s1394_unlock_tree(hal);
467 cfgrom = (uint32_t *)kmem_zalloc(IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
468 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
469 kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
470 *status |= S1394_LOCK_FAILED;
471 return (DDI_FAILURE);
472 }
473 node->cfgrom = cfgrom;
474 node->cfgrom_size = IEEE1394_CONFIG_ROM_QUAD_SZ;
475 SET_CFGROM_NEW_ALLOC(node);
476 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
477 return (DDI_SUCCESS);
478 }
479
480 /*
481 * s1394_free_cfgrom()
482 * Marks the config rom invalid and frees up the config based on otpions.
483 */
484 void
s1394_free_cfgrom(s1394_hal_t * hal,s1394_node_t * node,s1394_free_cfgrom_t options)485 s1394_free_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
486 s1394_free_cfgrom_t options)
487 {
488 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
489 ASSERT(node->cfgrom != NULL);
490
491 if (options == S1394_FREE_CFGROM_BOTH) {
492 /*
493 * free in both old and new trees; will be called with
494 * new node.
495 */
496 s1394_node_t *onode = node->old_node;
497
498 if (NODE_MATCHED(node) == B_TRUE && onode->cfgrom != NULL)
499 ASSERT(onode->cfgrom == node->cfgrom);
500
501 if (onode != NULL && onode->cfgrom != NULL && onode->cfgrom !=
502 node->cfgrom)
503 kmem_free(onode->cfgrom, IEEE1394_CONFIG_ROM_SZ);
504
505 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
506 onode->cfgrom = NULL;
507 node->cfgrom = NULL;
508
509 CLEAR_CFGROM_STATE(onode);
510 CLEAR_CFGROM_STATE(node);
511
512 } else if (options == S1394_FREE_CFGROM_NEW) {
513
514 ASSERT(CFGROM_NEW_ALLOC(node) == B_TRUE);
515 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
516 CLEAR_CFGROM_NEW_ALLOC(node);
517 node->cfgrom = NULL;
518 CLEAR_CFGROM_STATE(node);
519
520 } else if (options == S1394_FREE_CFGROM_OLD) {
521
522 /* freeing in old tree */
523 kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
524 node->cfgrom = NULL;
525 CLEAR_CFGROM_STATE(node);
526 }
527 }
528
529 /*
530 * s1394_copy_cfgrom()
531 * Copies config rom info from "from" node to "to" node. Clears
532 * CFGROM_NEW_ALLOC bit in cfgrom state in bothe nodes. (CFGROM_NEW_ALLOC
533 * acts as a reference count. If set, only the node in the current tree
534 * has a pointer to it; if clear, both the node in the current tree as
535 * well as the corresponding node in the old tree point to the same memory).
536 */
537 void
s1394_copy_cfgrom(s1394_node_t * to,s1394_node_t * from)538 s1394_copy_cfgrom(s1394_node_t *to, s1394_node_t *from)
539 {
540 ASSERT(to->cfgrom == NULL);
541
542 to->cfgrom = from->cfgrom;
543 to->cfgrom_state = from->cfgrom_state;
544 to->cfgrom_valid_size = from->cfgrom_valid_size;
545 to->cfgrom_size = from->cfgrom_size;
546 to->node_state = from->node_state;
547
548 bcopy(from->dir_stack, to->dir_stack,
549 offsetof(s1394_node_t, cfgrom_quad_to_read) -
550 offsetof(s1394_node_t, dir_stack));
551
552 to->cfgrom_quad_to_read = from->cfgrom_quad_to_read;
553
554 CLEAR_CFGROM_NEW_ALLOC(to);
555 CLEAR_CFGROM_NEW_ALLOC(from);
556
557 /*
558 * old link off, new link on => handled in s1394_cfgrom_scan_phase1
559 * old link on, new link off => handled in s1394_process_old_tree
560 */
561 if (LINK_ACTIVE(from) == B_FALSE) {
562 /*
563 * if last time around, link was off, there wouldn't
564 * have been config rom allocated.
565 */
566 ASSERT(from->cfgrom == NULL);
567 return;
568 } else {
569 s1394_selfid_pkt_t *selfid_pkt = to->selfid_packet;
570
571 if (IEEE1394_SELFID_ISLINKON(selfid_pkt))
572 SET_LINK_ACTIVE(to);
573 }
574 }
575
576 /*
577 * s1394_read_bus_info_blk()
578 * Attempts to kick off reading IEEE1212_NODE_CAP_QUAD quad or quad 0.
579 * Increments cfgroms_being_read by 1. Returns DDI_SUCCESS command was
580 * issued, else sets status to the failure reason and returns DDI_FAILURE.
581 */
582 static int
s1394_read_bus_info_blk(s1394_hal_t * hal,s1394_node_t * node,s1394_status_t * status)583 s1394_read_bus_info_blk(s1394_hal_t *hal, s1394_node_t *node,
584 s1394_status_t *status)
585 {
586 uint32_t quadlet;
587 cmd1394_cmd_t *cmd;
588 uchar_t node_num;
589
590 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
591 ASSERT(LINK_ACTIVE(node) == B_TRUE);
592
593 node_num = node->node_num;
594
595 /*
596 * drop the topology lock around command allocation. Return failure
597 * if either command allocation fails or cannot reacquire the lock
598 */
599 s1394_unlock_tree(hal);
600 *status = S1394_NOSTATUS;
601
602 if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
603 *status |= S1394_CMD_ALLOC_FAILED;
604 }
605 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
606 *status |= S1394_LOCK_FAILED;
607 /* free the cmd allocated above */
608 if (((*status) & S1394_CMD_ALLOC_FAILED) != 0)
609 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
610 }
611 if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
612 return (DDI_FAILURE);
613 }
614
615 /* allocate cfgrom if needed */
616 if (node->cfgrom == NULL && s1394_alloc_cfgrom(hal, node, status) !=
617 DDI_SUCCESS) {
618 ASSERT(((*status) & S1394_LOCK_FAILED) != 0);
619 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
620 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
621 return (DDI_FAILURE);
622 }
623
624 /*
625 * if this is a matched node, read quad 2 (node capabilities) to
626 * see if the generation count changed.
627 */
628 quadlet = CFGROM_BIB_READ(node) ? IEEE1212_NODE_CAP_QUAD : 0;
629
630 /*
631 * read bus info block at 100Mbit. This will help us with the cases
632 * where LINK is slower than PHY; s1394 uses PHY speed till speed map
633 * is updated.
634 */
635 cmd->completion_callback = s1394_cfgrom_read_callback;
636 cmd->bus_generation = hal->generation_count;
637 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
638 CMD1394_OVERRIDE_ADDR | CMD1394_OVERRIDE_SPEED);
639 cmd->cmd_speed = IEEE1394_S100;
640 cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
641
642 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
643 quadlet, cmd->cmd_addr);
644
645 SETUP_QUAD_READ(node, 1, quadlet, 1);
646 if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
647 /* free the command if it wasn't handed over to the HAL */
648 if (((*status) & S1394_CMD_INFLIGHT) == 0) {
649 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
650 }
651 if (((*status) & S1394_LOCK_FAILED) != 0) {
652 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
653 }
654 return (DDI_FAILURE);
655 }
656
657 hal->cfgroms_being_read++;
658 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
659
660 return (DDI_SUCCESS);
661 }
662
663 /*
664 * s1394_read_rest_of_cfgrom()
665 * Attempts to start reading node->cfgrom_quad_to_read quadlet. Increments
666 * cfgroms_being_read by 1 and returns DDI_SUCCESS if command was issued,
667 * else sets status to the failure reason and returns DDI_FAILURE.
668 */
669 int
s1394_read_rest_of_cfgrom(s1394_hal_t * hal,s1394_node_t * node,s1394_status_t * status)670 s1394_read_rest_of_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
671 s1394_status_t *status)
672 {
673 cmd1394_cmd_t *cmd;
674 uchar_t node_num = node->node_num;
675
676 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
677 ASSERT(LINK_ACTIVE(node) == B_TRUE);
678
679 /*
680 * drop the topology lock around command allocation. Return failure
681 * if either command allocation fails or cannot reacquire the lock
682 */
683 s1394_unlock_tree(hal);
684 *status = S1394_NOSTATUS;
685
686 if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
687 *status |= S1394_CMD_ALLOC_FAILED;
688 }
689 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
690 *status |= S1394_LOCK_FAILED;
691 /* free if we allocated a cmd above */
692 if (((*status) & S1394_CMD_ALLOC_FAILED) == 0)
693 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
694 }
695 if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
696 return (DDI_FAILURE);
697 }
698
699 cmd->completion_callback = s1394_cfgrom_read_callback;
700 cmd->bus_generation = hal->generation_count;
701 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
702 CMD1394_OVERRIDE_ADDR);
703 cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
704
705 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
706 node->cfgrom_quad_to_read, cmd->cmd_addr);
707 SETUP_QUAD_READ(node, 1, node->cfgrom_quad_to_read, 1);
708 if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
709 /* free the command if it wasn't handed over to the HAL */
710 if (((*status) & S1394_CMD_INFLIGHT) == 0) {
711 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
712 }
713 if (((*status) & S1394_LOCK_FAILED) != 0) {
714 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
715 }
716 return (DDI_FAILURE);
717 }
718
719 hal->cfgroms_being_read++;
720 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
721
722 return (DDI_SUCCESS);
723 }
724
725 /*
726 * s1394_cfgrom_scan_phase1()
727 * Attempts to read bus info blocks for nodes as needed. Returns DDI_FAILURE
728 * if bus reset generations changed (as indicated by s1394_lock_tree()
729 * return status) or if any of the callees return failure, else returns
730 * DDI_SUCCESS.
731 */
732 static int
s1394_cfgrom_scan_phase1(s1394_hal_t * hal)733 s1394_cfgrom_scan_phase1(s1394_hal_t *hal)
734 {
735 uint32_t number_of_nodes;
736 int ret;
737 int node;
738 int wait_in_gen;
739 int wait_for_cbs;
740 uint_t hal_node_num;
741 uint_t hal_node_num_old;
742 s1394_node_t *nnode, *onode;
743 s1394_selfid_pkt_t *selfid_pkt;
744 s1394_status_t status;
745
746 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
747
748 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
749 return (DDI_FAILURE);
750 }
751 wait_for_cbs = 0;
752 number_of_nodes = hal->number_of_nodes;
753 hal->cfgroms_being_read = 0;
754 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
755 hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
756 s1394_unlock_tree(hal);
757
758 ret = DDI_SUCCESS;
759
760 /* Send requests for all new node config ROM 0 */
761 for (node = 0; node < number_of_nodes; node++) {
762
763 status = S1394_UNKNOWN;
764
765 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
766 status = S1394_LOCK_FAILED;
767 break;
768 }
769
770 nnode = &hal->topology_tree[node];
771 onode = nnode->old_node;
772 /* if node matched, onode should be non NULL */
773 ASSERT(NODE_MATCHED(nnode) == B_FALSE || (NODE_MATCHED(nnode) ==
774 B_TRUE && onode != NULL));
775
776 /*
777 * Read bus info block if it is a brand new node (MATCHED is 0)
778 * or if matched but link was off in previous generations or
779 * or if matched but had invalid cfgrom in last generation
780 * or if matched but config rom generation > 1 (this is to
781 * check if config rom generation changed between bus resets).
782 */
783 if ((node != hal_node_num) &&
784 ((NODE_MATCHED(nnode) == B_FALSE) ||
785 (NODE_MATCHED(nnode) == B_TRUE && LINK_ACTIVE(onode) ==
786 B_FALSE) || (NODE_MATCHED(nnode) == B_TRUE &&
787 (onode->cfgrom == NULL || CFGROM_VALID(onode) ==
788 B_FALSE)) || (NODE_MATCHED(nnode) == B_TRUE &&
789 nnode->cfgrom != NULL && CONFIG_ROM_GEN(nnode->cfgrom) >
790 1))) {
791
792 SET_NODE_VISITED(nnode);
793 selfid_pkt = nnode->selfid_packet;
794 if (IEEE1394_SELFID_ISLINKON(selfid_pkt)) {
795
796 SET_LINK_ACTIVE(nnode);
797
798 status = S1394_UNKNOWN;
799
800 if (s1394_read_bus_info_blk(hal, nnode,
801 &status) != DDI_SUCCESS) {
802 if ((status & S1394_LOCK_FAILED) != 0)
803 break;
804 } else {
805 wait_for_cbs++;
806 wait_in_gen = hal->br_cfgrom_read_gen;
807 }
808 } else {
809 /*
810 * Special case: if link was active last
811 * time around, this should be treated as
812 * node going away.
813 */
814 CLEAR_LINK_ACTIVE(nnode);
815 if (NODE_MATCHED(nnode) == B_TRUE &&
816 LINK_ACTIVE(onode) == B_TRUE) {
817 CLEAR_CFGROM_STATE(nnode);
818 }
819 }
820 } else {
821 if (node == hal_node_num) {
822 onode = &hal->old_tree[hal_node_num_old];
823 /* Set up the local matched nodes */
824 if (onode) {
825 nnode->old_node = onode;
826 SET_NODE_MATCHED(nnode);
827 SET_NODE_MATCHED(onode);
828 s1394_copy_cfgrom(nnode, onode);
829 }
830 }
831 }
832 s1394_unlock_tree(hal);
833 }
834
835 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
836
837 if ((status & S1394_LOCK_FAILED) != 0) {
838 return (DDI_FAILURE);
839 }
840
841 /*
842 * If we started any reads, wait for completion callbacks
843 */
844 if (wait_for_cbs != 0) {
845 ret = s1394_wait_for_cfgrom_callbacks(hal, wait_in_gen,
846 s1394_br_thread_handle_cmd_phase1);
847 }
848
849 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
850
851 return (ret);
852 }
853
854 /*
855 * s1394_br_thread_handle_cmd_phase1()
856 * Process the cmd completion for phase 1 config rom reads. If we
857 * successfully read IEEE1212_NODE_CAP_QUAD quadlet and config rom gen
858 * did not change, move targets hanging off the old node to the current
859 * node. If config rom generations change, alloc new config rom and start
860 * re-reading the new config rom. If all of bus info block is read (as
861 * required), mark the node as CFGROM_BIB_READ. If config rom read fails
862 * retry if not too many failures. Topology tree mutex is dropped and
863 * reacquired in this routine. If reacquiring fails, returns
864 * S1394_HCMD_LOCK_FAILED. If the entire bus info block is read, returns
865 * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
866 * indicate not done with the node yet).
867 *
868 * If we cannot read any of the quadlets in the bus info block, cfgrom
869 * is marked invalid in this generation (a side effect of calling
870 * s1394_free_cfgrom()). We free cfgrom in this routine only if the failure
871 * is not due to bus generations changing.
872 */
873 static hcmd_ret_t
s1394_br_thread_handle_cmd_phase1(s1394_hal_t * hal,cmd1394_cmd_t * cmd)874 s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
875 {
876 s1394_target_t *t;
877 s1394_node_t *node, *onode;
878 uint32_t node_num, quadlet, data;
879 int freecmd, done, locked;
880 hcmd_ret_t cmdret;
881 uchar_t readdelay;
882 s1394_status_t status;
883
884 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
885 ASSERT(quadlet == 0 || quadlet < IEEE1394_BIB_QUAD_SZ);
886
887 cmdret = S1394_HCMD_NODE_EXPECT_MORE;
888
889 locked = 1;
890 freecmd = 1;
891
892 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
893 locked = 0;
894 goto bail;
895 }
896
897 node = &hal->topology_tree[node_num];
898
899 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
900
901 int reread = 0;
902
903 done = 0;
904
905 if (quadlet == IEEE1212_NODE_CAP_QUAD &&
906 CFGROM_BIB_READ(node)) {
907
908 int cur_gen = ((data & IEEE1394_BIB_GEN_MASK) >>
909 IEEE1394_BIB_GEN_SHIFT);
910
911 /*
912 * node->old_node can be NULL if this is a new node &
913 * we are doing a rescan
914 */
915 onode = node->old_node;
916 if (CONFIG_ROM_GEN(node->cfgrom) == cur_gen) {
917
918 if (CFGROM_PARSED(node) == B_TRUE) {
919 rw_enter(&hal->target_list_rwlock,
920 RW_WRITER);
921 /* Update the target list, if any */
922 if (onode != NULL &&
923 (t = onode->target_list) != NULL) {
924 node->target_list = t;
925 while (t != NULL) {
926 t->on_node = node;
927 t = t->target_sibling;
928 }
929 }
930 rw_exit(&hal->target_list_rwlock);
931 }
932 SET_NODE_MATCHED(node);
933 if (onode)
934 SET_NODE_MATCHED(onode);
935 node->cfgrom_quad_to_read =
936 IEEE1394_BIB_QUAD_SZ;
937 done++;
938 } else {
939
940 SET_CFGROM_GEN_CHANGED(node);
941 if (onode != NULL)
942 SET_CFGROM_GEN_CHANGED(onode);
943 /*
944 * Reset BIB_READ flag and start reading entire
945 * config rom.
946 */
947 CLEAR_CFGROM_BIB_READ(node);
948 reread = 1;
949
950 /*
951 * if generations changed, allocate cfgrom for
952 * the new generation. s1394_match_GUID() will
953 * free up the cfgrom from the old generation.
954 */
955 if (s1394_alloc_cfgrom(hal, node, &status) !=
956 DDI_SUCCESS) {
957 ASSERT((status & S1394_LOCK_FAILED) !=
958 0);
959 ASSERT(MUTEX_NOT_HELD(&hal->
960 topology_tree_mutex));
961 locked = 0;
962 /* we failed to relock the tree */
963 goto bail;
964 }
965 }
966 }
967
968 /*
969 * we end up here if we don't have bus_info_blk for this
970 * node or if config rom generation changed.
971 */
972
973 /*
974 * Pass1 Rio bug workaround. Due to this bug, if we read
975 * past quadlet 5 of the config rom, the PCI bus gets wedged.
976 * Avoid the hang by not reading past quadlet 5.
977 * We identify a remote Rio by the node vendor id part of
978 * quad 3 (which is == SUNW == S1394_SUNW_OUI (0x80020)).
979 */
980 if (s1394_enable_rio_pass1_workarounds != 0) {
981 if ((quadlet == 3) && ((data >> 8) == S1394_SUNW_OUI)) {
982 node->cfgrom_size = IEEE1394_BIB_QUAD_SZ;
983 node->cfgrom_valid_size = IEEE1394_BIB_QUAD_SZ;
984 }
985 }
986
987 if (!done) {
988
989 if (reread)
990 quadlet = 0;
991 else
992 node->cfgrom[quadlet++] = data;
993
994 /* if we don't have the entire bus_info_blk... */
995 if (quadlet < IEEE1394_BIB_QUAD_SZ) {
996
997 CFGROM_GET_READ_DELAY(node, readdelay);
998 SETUP_QUAD_READ(node, 1, quadlet, 1);
999 s1394_unlock_tree(hal);
1000 CFGROM_READ_PAUSE(readdelay);
1001 /* get next quadlet */
1002 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1003 locked = 0;
1004 } else if (s1394_read_config_quadlet(hal, cmd,
1005 &status) != DDI_SUCCESS) {
1006 /*
1007 * Failed to get going. If command was
1008 * successfully handed over to the HAL,
1009 * don't free it (it will get freed
1010 * later in the callback).
1011 */
1012 if ((status & S1394_CMD_INFLIGHT) !=
1013 0) {
1014 freecmd = 0;
1015 }
1016 if ((status & S1394_LOCK_FAILED) != 0) {
1017 locked = 0;
1018 } else {
1019 const s1394_free_cfgrom_t S =
1020 S1394_FREE_CFGROM_NEW;
1021 if (CFGROM_NEW_ALLOC(node) ==
1022 B_TRUE) {
1023 s1394_free_cfgrom(hal,
1024 node, S);
1025 } else {
1026 CLEAR_CFGROM_STATE(
1027 node);
1028 }
1029 }
1030 done++;
1031 } else {
1032 freecmd = 0;
1033 }
1034 } else {
1035 /* got all of bus_info_blk */
1036 SET_CFGROM_BIB_READ(node);
1037 if (node->cfgrom_size == IEEE1394_BIB_QUAD_SZ)
1038 SET_CFGROM_ALL_READ(node);
1039 node->cfgrom_quad_to_read = quadlet;
1040 done++;
1041 }
1042 }
1043 } else {
1044 done = 1;
1045 node->cfgrom_read_fails++;
1046 BUMP_CFGROM_READ_DELAY(node);
1047
1048 /* retry if not too many failures */
1049 if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1050 CFGROM_GET_READ_DELAY(node, readdelay);
1051 SETUP_QUAD_READ(node, 0, quadlet, 1);
1052 s1394_unlock_tree(hal);
1053 CFGROM_READ_PAUSE(readdelay);
1054 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1055 locked = 0;
1056 } else if (s1394_read_config_quadlet(hal, cmd,
1057 &status) != DDI_SUCCESS) {
1058 /*
1059 * Failed to get going. If command was
1060 * successfully handed over to the HAL,
1061 * don't free it (it will get freed
1062 * later in the callback).
1063 */
1064 if ((status & S1394_CMD_INFLIGHT) != 0) {
1065 freecmd = 0;
1066 }
1067 if ((status & S1394_LOCK_FAILED) != 0) {
1068 locked = 0;
1069 } else {
1070 if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1071 s1394_free_cfgrom(hal, node,
1072 S1394_FREE_CFGROM_NEW);
1073 } else {
1074 CLEAR_CFGROM_STATE(node);
1075 }
1076 }
1077 } else {
1078 done = 0;
1079 freecmd = 0;
1080 }
1081 } else {
1082 if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1083 s1394_free_cfgrom(hal, node,
1084 S1394_FREE_CFGROM_NEW);
1085 } else {
1086 CLEAR_CFGROM_STATE(node);
1087 }
1088 }
1089 }
1090 bail:
1091 if (freecmd) {
1092 (void) s1394_free_cmd(hal, &cmd);
1093 }
1094
1095 if (done) {
1096 cmdret = S1394_HCMD_NODE_DONE;
1097 }
1098
1099 /* if we are bailing out because locking failed, locked == 0 */
1100 if (locked == 0)
1101 cmdret = S1394_HCMD_LOCK_FAILED;
1102 else
1103 s1394_unlock_tree(hal);
1104
1105 return (cmdret);
1106 }
1107
1108 /*
1109 * s1394_cfgrom_scan_phase2()
1110 * Handles phase 2 of bus reset processing. Matches GUIDs between old
1111 * and new topology trees to identify which node moved where. Processes
1112 * the old topology tree (involves offlining any nodes that got unplugged
1113 * between the last generation and the current generation). Updates speed
1114 * map, sets up physical AR request filer and does isoch resource
1115 * realloc failure notification and bus reset notifications. Then resends
1116 * any commands that were issued by targets while the reset was being
1117 * processed. Finally, the current topology tree is processed. This involves
1118 * reading config rom past the bus info block for new nodes and parsing
1119 * the config rom, creating a devinfo for each unit directory found in the
1120 * config rom.
1121 * Returns DDI_FAILURE if there was bus reset during any of the function
1122 * calls (as indicated by lock failures) or if any of the routines callees
1123 * return failure, else returns DDI_SUCCESS.
1124 */
1125 static int
s1394_cfgrom_scan_phase2(s1394_hal_t * hal)1126 s1394_cfgrom_scan_phase2(s1394_hal_t *hal)
1127 {
1128 int ret;
1129 uint_t wait_gen;
1130 int wait_for_cbs = 0;
1131 t1394_localinfo_t localinfo;
1132
1133 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1134
1135 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1136 return (DDI_FAILURE);
1137 }
1138
1139 if (s1394_match_all_GUIDs(hal) == DDI_SUCCESS) {
1140 s1394_unlock_tree(hal);
1141 }
1142
1143 if (s1394_process_old_tree(hal) != DDI_SUCCESS) {
1144 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1145 return (DDI_FAILURE);
1146 }
1147
1148 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1149 return (DDI_FAILURE);
1150 }
1151
1152 s1394_update_speed_map_link_speeds(hal);
1153 s1394_unlock_tree(hal);
1154
1155 /* Setup physical AR request filters */
1156 s1394_physical_arreq_setup_all(hal);
1157
1158 /* Notify targets of isoch resource realloc failures */
1159 s1394_isoch_rsrc_realloc_notify(hal);
1160
1161 /* Notify targets of the end of bus reset processing */
1162 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1163 return (DDI_FAILURE);
1164 }
1165
1166 localinfo.bus_generation = hal->generation_count;
1167 localinfo.local_nodeID = hal->node_id;
1168
1169 s1394_unlock_tree(hal);
1170 s1394_target_bus_reset_notifies(hal, &localinfo);
1171 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1172 return (DDI_FAILURE);
1173 }
1174
1175 /* Set HAL state to normal */
1176 if (hal->disable_requests_bit == 0)
1177 hal->hal_state = S1394_HAL_NORMAL;
1178 else
1179 hal->hal_state = S1394_HAL_DREQ;
1180
1181 s1394_unlock_tree(hal);
1182
1183 /* Flush the pending Q */
1184 s1394_resend_pending_cmds(hal);
1185
1186 if (s1394_process_topology_tree(hal, &wait_for_cbs, &wait_gen)) {
1187 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1188 return (DDI_FAILURE);
1189 }
1190
1191 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1192 return (DDI_FAILURE);
1193 }
1194
1195 s1394_print_node_info(hal);
1196
1197 s1394_unlock_tree(hal);
1198
1199 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1200
1201 ret = DDI_SUCCESS;
1202
1203 /*
1204 * If we started any reads, wait for completion callbacks
1205 */
1206 if (wait_for_cbs != 0) {
1207 ret = s1394_wait_for_cfgrom_callbacks(hal, wait_gen,
1208 s1394_br_thread_handle_cmd_phase2);
1209 }
1210
1211 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1212
1213 return (ret);
1214 }
1215
1216 /*
1217 * s1394_br_thread_handle_cmd_phase2()
1218 * Process the cmd completion for phase 2 config rom reads. If all the
1219 * needed quads are read, validates the config rom; if config rom is
1220 * invalid (crc failures), frees the config rom, else marks the config rom
1221 * valid and calls s1394_update_devinfo_tree() to parse the config rom.
1222 * If need to get more quadlets, attempts to kick off the read and returns
1223 * S1394_HCMD_NODE_EXPECT_MORE if successfully started the read. If a bus
1224 * reset is seen while in this routine, returns S1394_HCMD_LOCK_FAILED. If
1225 * done with the node (with or withoug crc errors), returns
1226 * S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
1227 * indicate not done with the node yet).
1228 */
1229 static hcmd_ret_t
s1394_br_thread_handle_cmd_phase2(s1394_hal_t * hal,cmd1394_cmd_t * cmd)1230 s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
1231 {
1232 s1394_node_t *node;
1233 uint32_t node_num, quadlet, data;
1234 int update_devinfo, locked, freecmd, done;
1235 hcmd_ret_t cmdret;
1236 uchar_t readdelay;
1237 s1394_status_t status;
1238
1239 /*
1240 * we end up here if this is a brand new node or if it is a known node
1241 * but the config ROM changed (and triggered a re-read).
1242 */
1243 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1244 ASSERT(quadlet == IEEE1394_BIB_QUAD_SZ || quadlet <
1245 IEEE1394_CONFIG_ROM_QUAD_SZ);
1246
1247 locked = freecmd = done = 1;
1248 cmdret = S1394_HCMD_NODE_EXPECT_MORE;
1249
1250 update_devinfo = 0;
1251
1252 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1253 locked = 0;
1254 goto bail;
1255 }
1256
1257 node = &hal->topology_tree[node_num];
1258
1259 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1260
1261 ASSERT(CFGROM_BIB_READ(node) == B_TRUE);
1262
1263 node->cfgrom[quadlet] = data;
1264
1265 if (s1394_calc_next_quad(hal, node, quadlet, &quadlet) != 0) {
1266 /*
1267 * Done with this node. Mark config rom valid and
1268 * update the devinfo tree for this node.
1269 */
1270 node->cfgrom_valid_size = quadlet + 1;
1271 if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1272 SET_CFGROM_ALL_READ(node);
1273 update_devinfo++;
1274 } else {
1275 s1394_free_cfgrom(hal, node,
1276 S1394_FREE_CFGROM_BOTH);
1277 }
1278 } else {
1279 CFGROM_GET_READ_DELAY(node, readdelay);
1280 SETUP_QUAD_READ(node, 1, quadlet, 1);
1281 s1394_unlock_tree(hal);
1282 CFGROM_READ_PAUSE(readdelay);
1283 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1284 locked = 0;
1285 } else if (s1394_read_config_quadlet(hal, cmd,
1286 &status) != DDI_SUCCESS) {
1287 /* give up on this guy */
1288 if ((status & S1394_CMD_INFLIGHT) != 0) {
1289 freecmd = 0;
1290 }
1291 if ((status & S1394_LOCK_FAILED) != 0) {
1292 locked = 0;
1293 } else {
1294 node->cfgrom_valid_size = quadlet;
1295 if (s1394_valid_cfgrom(hal, node) ==
1296 B_TRUE) {
1297 SET_CFGROM_ALL_READ(node);
1298 update_devinfo++;
1299 } else {
1300 s1394_free_cfgrom(hal, node,
1301 S1394_FREE_CFGROM_BOTH);
1302 }
1303 }
1304 } else {
1305 /* successfully started next read */
1306 done = 0;
1307 freecmd = 0;
1308 }
1309 }
1310 } else {
1311 node->cfgrom_read_fails++;
1312 BUMP_CFGROM_READ_DELAY(node);
1313
1314 /* retry if not too many failures */
1315 if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1316 CFGROM_GET_READ_DELAY(node, readdelay);
1317 s1394_unlock_tree(hal);
1318 SETUP_QUAD_READ(node, 0, quadlet, 1);
1319 CFGROM_READ_PAUSE(readdelay);
1320 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1321 locked = 0;
1322 } else if (s1394_read_config_quadlet(hal, cmd,
1323 &status) != DDI_SUCCESS) {
1324 if ((status & S1394_CMD_INFLIGHT) != 0) {
1325 freecmd = 0;
1326 }
1327 if ((status & S1394_LOCK_FAILED) != 0) {
1328 locked = 0;
1329 } else {
1330 /* stop further reads */
1331 node->cfgrom_valid_size = quadlet + 1;
1332 if (s1394_valid_cfgrom(hal, node) ==
1333 B_TRUE) {
1334 SET_CFGROM_ALL_READ(node);
1335 update_devinfo++;
1336 } else {
1337 s1394_free_cfgrom(hal, node,
1338 S1394_FREE_CFGROM_BOTH);
1339 }
1340 }
1341 } else {
1342 /* successfully started next read */
1343 done = 0;
1344 freecmd = 0;
1345 }
1346 } else {
1347 node->cfgrom_valid_size = quadlet + 1;
1348 if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1349 SET_CFGROM_ALL_READ(node);
1350 update_devinfo++;
1351 } else {
1352 s1394_free_cfgrom(hal, node,
1353 S1394_FREE_CFGROM_BOTH);
1354 }
1355 }
1356 }
1357 bail:
1358 if (freecmd) {
1359 (void) s1394_free_cmd(hal, &cmd);
1360 }
1361
1362 if (done) {
1363 cmdret = S1394_HCMD_NODE_DONE;
1364 }
1365
1366 if (update_devinfo) {
1367 ASSERT(locked);
1368 /*
1369 * s1394_update_devinfo_tree() drops and reacquires the
1370 * topology_tree_mutex. If tree lock fails, it returns
1371 * a DDI_FAILURE. Set locked to 0 so in this case so that
1372 * we will return S1394_HCMD_LOCK_FAILED below
1373 */
1374 if (s1394_update_devinfo_tree(hal, node) != DDI_SUCCESS) {
1375 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1376 locked = 0;
1377 }
1378 }
1379
1380 /* if we are bailing out because locking failed, locked == 0 */
1381 if (locked == 0)
1382 cmdret = S1394_HCMD_LOCK_FAILED;
1383 else
1384 s1394_unlock_tree(hal);
1385
1386 return (cmdret);
1387 }
1388
1389 /*
1390 * s1394_read_config_quadlet()
1391 * Starts the reads of a config quadlet (deduced cmd_addr). Returns
1392 * DDI_SUCCESS if the read was started with no errors, else DDI_FAILURE
1393 * is returned, with status indicating the reason for the failure(s).
1394 */
1395 static int
s1394_read_config_quadlet(s1394_hal_t * hal,cmd1394_cmd_t * cmd,s1394_status_t * status)1396 s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
1397 s1394_status_t *status)
1398 {
1399 s1394_node_t *node;
1400 int ret, err, node_num, quadlet;
1401
1402 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1403 node_num = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
1404 node = &hal->topology_tree[node_num];
1405 quadlet = node->cfgrom_quad_to_read;
1406
1407 /* Calculate the 64-bit address */
1408 QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num, quadlet,
1409 cmd->cmd_addr);
1410
1411 *status = S1394_NOSTATUS;
1412
1413 ret = s1394_setup_asynch_command(hal, NULL, cmd, S1394_CMD_READ, &err);
1414
1415 if (ret != DDI_SUCCESS) {
1416 *status |= S1394_UNKNOWN;
1417 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1418 return (DDI_FAILURE);
1419 }
1420
1421 s1394_unlock_tree(hal);
1422 ret = DDI_SUCCESS;
1423 /* Send the command out */
1424 if (s1394_xfer_asynch_command(hal, cmd, &err) == DDI_SUCCESS) {
1425 /* Callers can expect a callback now */
1426 *status |= S1394_CMD_INFLIGHT;
1427 } else {
1428
1429 s1394_cmd_priv_t *s_priv;
1430
1431 /* Remove from queue */
1432 s1394_remove_q_asynch_cmd(hal, cmd);
1433 s_priv = S1394_GET_CMD_PRIV(cmd);
1434
1435 s_priv->cmd_in_use = B_FALSE;
1436
1437 *status |= S1394_XFER_FAILED;
1438 ret = DDI_FAILURE;
1439 }
1440
1441 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1442 *status |= S1394_LOCK_FAILED;
1443 ret = DDI_FAILURE;
1444 }
1445
1446 return (ret);
1447 }
1448
1449 /*
1450 * s1394_cfgrom_read_callback()
1451 * callback routine for config rom reads. Frees the command if it failed
1452 * due to bus reset else appends the command to the completion queue
1453 * and signals the completion queue cv.
1454 */
1455 static void
s1394_cfgrom_read_callback(cmd1394_cmd_t * cmd)1456 s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd)
1457 {
1458 cmd1394_cmd_t *tcmd;
1459 s1394_cmd_priv_t *s_priv;
1460 s1394_hal_t *hal;
1461
1462 #if defined(DEBUG)
1463 uint32_t node_num, quadlet, data;
1464 #endif
1465
1466 /* Get the Services Layer private area */
1467 s_priv = S1394_GET_CMD_PRIV(cmd);
1468
1469 hal = (s1394_hal_t *)s_priv->sent_on_hal;
1470
1471 #if defined(DEBUG)
1472
1473 s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1474
1475 #endif
1476
1477 if (cmd->cmd_result == CMD1394_EBUSRESET) {
1478 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1479 } else {
1480 mutex_enter(&hal->br_cmplq_mutex);
1481
1482 /* Put the command on completion queue */
1483 s_priv->cmd_priv_next = NULL;
1484 if ((tcmd = hal->br_cmplq_tail) != NULL) {
1485 s_priv = S1394_GET_CMD_PRIV(tcmd);
1486
1487 s_priv->cmd_priv_next = cmd;
1488 }
1489
1490 hal->br_cmplq_tail = cmd;
1491
1492 if (hal->br_cmplq_head == NULL)
1493 hal->br_cmplq_head = cmd;
1494
1495 cv_signal(&hal->br_cmplq_cv);
1496 mutex_exit(&hal->br_cmplq_mutex);
1497 }
1498 }
1499
1500 /*
1501 * s1394_cfgrom_parse_unit_dir()
1502 * Parses the unit directory passed in and returns reg[2...5] of reg
1503 * property (see 1275 binding for reg property defintion). Currently,
1504 * returns 0 for all the values since none of the existing devices implement
1505 * this and future devices, per P1212r, need a binding change.
1506 */
1507 /* ARGSUSED */
1508 void
s1394_cfgrom_parse_unit_dir(uint32_t * unit_dir,uint32_t * addr_hi,uint32_t * addr_lo,uint32_t * size_hi,uint32_t * size_lo)1509 s1394_cfgrom_parse_unit_dir(uint32_t *unit_dir, uint32_t *addr_hi,
1510 uint32_t *addr_lo, uint32_t *size_hi, uint32_t *size_lo)
1511 {
1512 *addr_hi = *addr_lo = *size_hi = *size_lo = 0;
1513 }
1514
1515 /*
1516 * s1394_get_quad_info()
1517 * Helper routine that picks apart the various fields of a 1394 address
1518 */
1519 static void
s1394_get_quad_info(cmd1394_cmd_t * cmd,uint32_t * node_num,uint32_t * quadlet,uint32_t * data)1520 s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num, uint32_t *quadlet,
1521 uint32_t *data)
1522 {
1523 uint64_t addr;
1524
1525 addr = cmd->cmd_addr;
1526 *node_num = IEEE1394_ADDR_PHY_ID(addr);
1527 *quadlet = ((addr & IEEE1394_ADDR_OFFSET_MASK) -
1528 IEEE1394_CONFIG_ROM_ADDR);
1529 *quadlet = (*quadlet >> 2);
1530 *data = T1394_DATA32(cmd->cmd_u.q.quadlet_data);
1531 }
1532
1533 /*
1534 * s1394_match_GUID()
1535 * attempts to match nnode (which is in the current topology tree) with
1536 * a node in the old topology tree by comparing GUIDs. If a match is found
1537 * the old_node field of the current node and cur_node field of the old
1538 * are set point to each other. Also, this routine makes both the nodes
1539 * point at the same config rom. If unable to relock the tree, returns
1540 * DDI_FAILURE, else returns DDI_SUCCESS.
1541 */
1542 static int
s1394_match_GUID(s1394_hal_t * hal,s1394_node_t * nnode)1543 s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode)
1544 {
1545 int old_node;
1546 int gen_changed;
1547 uint32_t old_a, old_b;
1548 uint32_t new_a, new_b;
1549 s1394_node_t *onode;
1550 s1394_target_t *t;
1551 int ret = DDI_SUCCESS;
1552
1553 ASSERT(nnode->cfgrom != NULL);
1554 ASSERT(CFGROM_BIB_READ(nnode));
1555
1556 new_a = nnode->node_guid_hi;
1557 new_b = nnode->node_guid_lo;
1558
1559 for (old_node = 0; old_node < hal->old_number_of_nodes; old_node++) {
1560
1561 onode = &hal->old_tree[old_node];
1562 if (onode->cfgrom == NULL || CFGROM_BIB_READ(onode) == B_FALSE)
1563 continue;
1564
1565 old_a = onode->node_guid_hi;
1566 old_b = onode->node_guid_lo;
1567
1568 if ((old_a == new_a) && (old_b == new_b)) {
1569
1570 if (NODE_MATCHED(onode) == B_TRUE) {
1571 cmn_err(CE_NOTE, "!Duplicate GUIDs: %08x%08x",
1572 old_a, old_b);
1573 /* offline the new node that last matched */
1574 ret = s1394_offline_node(hal, onode->cur_node);
1575 /* and make the current new node invalid */
1576 ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1577 s1394_free_cfgrom(hal, nnode,
1578 S1394_FREE_CFGROM_NEW);
1579 break;
1580 }
1581
1582 /*
1583 * If there is indeed a cfgrom gen change,
1584 * CFGROM_GEN_CHANGED() will be set iff we are matching
1585 * tree nodes. Otherwise, CONFIG_ROM_GEN(old) !=
1586 * CONFIG_ROM_GEN(new).
1587 */
1588 if (CFGROM_GEN_CHANGED(nnode) == B_TRUE ||
1589 (CONFIG_ROM_GEN(onode->cfgrom) !=
1590 CONFIG_ROM_GEN(nnode->cfgrom))) {
1591 gen_changed = 1;
1592 } else {
1593 gen_changed = 0;
1594 }
1595
1596 onode->cur_node = nnode;
1597 nnode->old_node = onode;
1598 nnode->node_state = onode->node_state;
1599 SET_NODE_VISITED(onode);
1600 SET_NODE_MATCHED(onode);
1601 SET_NODE_MATCHED(nnode);
1602 /*
1603 * If generations changed, need to offline any targets
1604 * hanging off the old node, prior to freeing up old
1605 * cfgrom. If the generations didn't change, we can
1606 * free up the new config rom and copy all info from
1607 * the old node (this helps in picking up further
1608 * reads from where the last generation left off).
1609 */
1610 if (gen_changed == 1) {
1611 if (s1394_offline_node(hal, onode)) {
1612 ret = DDI_FAILURE;
1613 break;
1614 }
1615 s1394_free_cfgrom(hal, onode,
1616 S1394_FREE_CFGROM_OLD);
1617 CLEAR_CFGROM_PARSED(nnode);
1618 CLEAR_CFGROM_NEW_ALLOC(nnode);
1619 CLEAR_CFGROM_NEW_ALLOC(onode);
1620 onode->cfgrom = nnode->cfgrom;
1621 /* done */
1622 break;
1623 }
1624
1625 /*
1626 * Free up cfgrom memory in the new_node and
1627 * point it at the same config rom as the old one.
1628 */
1629 if (onode->cfgrom != nnode->cfgrom) {
1630 ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1631 s1394_free_cfgrom(hal, nnode,
1632 S1394_FREE_CFGROM_NEW);
1633 }
1634 nnode->cfgrom = onode->cfgrom;
1635 nnode->cfgrom_state = onode->cfgrom_state;
1636 nnode->cfgrom_valid_size = onode->cfgrom_valid_size;
1637 nnode->cfgrom_size = onode->cfgrom_size;
1638 nnode->cfgrom_quad_to_read = onode->cfgrom_quad_to_read;
1639 bcopy(onode->dir_stack, nnode->dir_stack,
1640 offsetof(s1394_node_t, cfgrom_quad_to_read) -
1641 offsetof(s1394_node_t, dir_stack));
1642 CLEAR_CFGROM_NEW_ALLOC(nnode);
1643 CLEAR_CFGROM_NEW_ALLOC(onode);
1644
1645 if (CFGROM_PARSED(nnode) == B_TRUE) {
1646 rw_enter(&hal->target_list_rwlock, RW_WRITER);
1647 /* Update the target list */
1648 if ((t = onode->target_list) != NULL) {
1649 nnode->target_list = t;
1650 while (t != NULL) {
1651 t->on_node = nnode;
1652 t = t->target_sibling;
1653 }
1654 }
1655 rw_exit(&hal->target_list_rwlock);
1656 }
1657 break;
1658 }
1659 }
1660
1661 return (ret);
1662 }
1663
1664 /*
1665 * s1394_match_all_GUIDs()
1666 * attempt to match each node in the current topology tree with the a
1667 * node in the old topology tree. If unable to relock the tree, returns
1668 * DDI_FAILURE, else returns DDI_SUCCESS.
1669 */
1670 static int
s1394_match_all_GUIDs(s1394_hal_t * hal)1671 s1394_match_all_GUIDs(s1394_hal_t *hal)
1672 {
1673 int node;
1674 int ret = DDI_SUCCESS;
1675 s1394_node_t *nnode;
1676
1677 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1678
1679 for (node = 0; node < hal->number_of_nodes; node++) {
1680 nnode = &hal->topology_tree[node];
1681 if (LINK_ACTIVE(nnode) == B_FALSE || CFGROM_BIB_READ(nnode) ==
1682 B_FALSE)
1683 continue;
1684 if (NODE_MATCHED(nnode)) {
1685 /*
1686 * Skip if node matched. If config rom generations
1687 * changed, we want to call s1394_match_GUID() even
1688 * if the nodes matched.
1689 */
1690 int gen_changed;
1691 s1394_node_t *onode = nnode->old_node;
1692
1693 gen_changed = (onode && onode->cfgrom &&
1694 CONFIG_ROM_GEN(onode->cfgrom) != CONFIG_ROM_GEN(
1695 nnode->cfgrom)) ? 1 : 0;
1696
1697 if (CFGROM_GEN_CHANGED(nnode) == 0 && gen_changed == 0)
1698 continue;
1699 }
1700
1701 if (s1394_match_GUID(hal, nnode) == DDI_FAILURE) {
1702 ret = DDI_FAILURE;
1703 }
1704 }
1705
1706 return (ret);
1707 }
1708
1709 /*
1710 * s1394_valid_cfgrom()
1711 * Performs crc check on the config rom. Returns B_TRUE if config rom has
1712 * good CRC else returns B_FALSE.
1713 */
1714 /* ARGSUSED */
1715 boolean_t
s1394_valid_cfgrom(s1394_hal_t * hal,s1394_node_t * node)1716 s1394_valid_cfgrom(s1394_hal_t *hal, s1394_node_t *node)
1717 {
1718 uint32_t crc_len, crc_value, CRC, CRC_old, quad0;
1719
1720 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1721 ASSERT(node->cfgrom);
1722
1723 if (s1394_enable_crc_validation == 0) {
1724 return (B_TRUE);
1725 }
1726
1727 quad0 = node->cfgrom[0];
1728 crc_len = (quad0 >> IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
1729 IEEE1394_CFG_ROM_CRC_LEN_MASK;
1730 crc_value = quad0 & IEEE1394_CFG_ROM_CRC_VALUE_MASK;
1731
1732 if (node->cfgrom_valid_size < crc_len + 1) {
1733 return (B_FALSE);
1734 }
1735
1736 CRC = s1394_CRC16(&node->cfgrom[1], crc_len);
1737
1738 if (CRC != crc_value) {
1739 CRC_old = s1394_CRC16_old(&node->cfgrom[1], crc_len);
1740 if (CRC_old == crc_value) {
1741 return (B_TRUE);
1742 }
1743
1744 cmn_err(CE_NOTE,
1745 "!Bad CRC in config rom (node's GUID %08x%08x)",
1746 node->node_guid_hi, node->node_guid_lo);
1747
1748 return (B_FALSE);
1749 }
1750
1751 return (B_TRUE);
1752 }
1753
1754 /*
1755 * s1394_valid_dir()
1756 * Performs crc check on a directory. Returns B_TRUE if dir has good CRC
1757 * else returns B_FALSE.
1758 */
1759 /*ARGSUSED*/
1760 boolean_t
s1394_valid_dir(s1394_hal_t * hal,s1394_node_t * node,uint32_t key,uint32_t * dir)1761 s1394_valid_dir(s1394_hal_t *hal, s1394_node_t *node,
1762 uint32_t key, uint32_t *dir)
1763 {
1764 uint32_t dir_len, crc_value, CRC, CRC_old, quad0;
1765
1766 /*
1767 * Ideally, we would like to do crc validations for the entire cfgrom
1768 * as well as the individual directories. However, we have seen devices
1769 * that have valid directories but busted cfgrom crc and devices that
1770 * have bad crcs in directories as well as for the entire cfgrom. This
1771 * is sad, but unfortunately, real world!
1772 */
1773 if (s1394_enable_crc_validation == 0) {
1774 return (B_TRUE);
1775 }
1776
1777 quad0 = dir[0];
1778
1779 dir_len = IEEE1212_DIR_LEN(quad0);
1780 crc_value = IEEE1212_DIR_CRC(quad0);
1781
1782 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1783
1784 CRC = s1394_CRC16(&dir[1], dir_len);
1785
1786 if (CRC != crc_value) {
1787 CRC_old = s1394_CRC16_old(&dir[1], dir_len);
1788 if (CRC_old == crc_value) {
1789 return (B_TRUE);
1790 }
1791
1792 return (B_FALSE);
1793 }
1794
1795 return (B_TRUE);
1796 }
1797
1798 /*
1799 * s1394_become_bus_mgr()
1800 * is a callback from a timeout() setup by the main br_thread. After
1801 * a bus reset, depending on the Bus Manager's incumbancy and the state
1802 * of its abdicate bit, a timer of a certain length is set. After this
1803 * time expires, the local host may attempt to become the Bus Manager.
1804 * This is done by sending a request to the current IRM on the bus. The
1805 * IRM holds the BUS_MANAGER_ID register. Depending on whether or not
1806 * the local host is already the IRM, we will send a request onto the
1807 * 1394 bus or call into the HAL.
1808 */
1809 static void
s1394_become_bus_mgr(void * arg)1810 s1394_become_bus_mgr(void *arg)
1811 {
1812 s1394_hal_t *hal;
1813 s1394_cmd_priv_t *s_priv;
1814 cmd1394_cmd_t *cmd;
1815 uint64_t Bus_Mgr_ID_addr;
1816 uint32_t hal_node_num;
1817 uint32_t old_value;
1818 uint32_t generation;
1819 uint_t curr_bus_mgr;
1820 uint_t bm_node;
1821 uint_t IRM_node;
1822 int err;
1823 int ret;
1824
1825 hal = (s1394_hal_t *)arg;
1826
1827 /* Lock the topology tree */
1828 mutex_enter(&hal->topology_tree_mutex);
1829
1830 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1831 generation = hal->generation_count;
1832 IRM_node = hal->IRM_node;
1833
1834 mutex_enter(&hal->bus_mgr_node_mutex);
1835 bm_node = hal->bus_mgr_node;
1836 mutex_exit(&hal->bus_mgr_node_mutex);
1837
1838 /* Unlock the topology tree */
1839 mutex_exit(&hal->topology_tree_mutex);
1840
1841 /* Make sure we aren't already the Bus Manager */
1842 if (bm_node != -1) {
1843 return;
1844 }
1845
1846 /* Send compare-swap to BUS_MANAGER_ID */
1847 /* register on the Isoch Rsrc Mgr */
1848 if (IRM_node == hal_node_num) {
1849 /* Local */
1850 ret = HAL_CALL(hal).csr_cswap32(hal->halinfo.hal_private,
1851 generation, (IEEE1394_SCSR_BUSMGR_ID &
1852 IEEE1394_CSR_OFFSET_MASK), S1394_INVALID_NODE_NUM,
1853 hal_node_num, &old_value);
1854 if (ret != DDI_SUCCESS) {
1855 return;
1856 }
1857 curr_bus_mgr = IEEE1394_NODE_NUM(old_value);
1858
1859 mutex_enter(&hal->bus_mgr_node_mutex);
1860 if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1861 (curr_bus_mgr == hal_node_num)) {
1862 hal->bus_mgr_node = hal_node_num;
1863 hal->incumbent_bus_mgr = B_TRUE;
1864 } else {
1865 hal->bus_mgr_node = curr_bus_mgr;
1866 hal->incumbent_bus_mgr = B_FALSE;
1867 }
1868 cv_signal(&hal->bus_mgr_node_cv);
1869 mutex_exit(&hal->bus_mgr_node_mutex);
1870
1871 } else {
1872 /* Remote */
1873 if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &cmd) !=
1874 DDI_SUCCESS) {
1875 return;
1876 }
1877
1878 cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
1879 CMD1394_OVERRIDE_ADDR);
1880 cmd->cmd_type = CMD1394_ASYNCH_LOCK_32;
1881 cmd->completion_callback = s1394_become_bus_mgr_callback;
1882 Bus_Mgr_ID_addr = (IEEE1394_ADDR_BUS_ID_MASK |
1883 IEEE1394_SCSR_BUSMGR_ID) |
1884 (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
1885 cmd->cmd_addr = Bus_Mgr_ID_addr;
1886 cmd->bus_generation = generation;
1887 cmd->cmd_u.l32.arg_value = T1394_DATA32(
1888 S1394_INVALID_NODE_NUM);
1889 cmd->cmd_u.l32.data_value = T1394_DATA32(hal_node_num);
1890 cmd->cmd_u.l32.num_retries = 0;
1891 cmd->cmd_u.l32.lock_type = CMD1394_LOCK_COMPARE_SWAP;
1892
1893 /* Get the Services Layer private area */
1894 s_priv = S1394_GET_CMD_PRIV(cmd);
1895
1896 /* Lock the topology tree */
1897 mutex_enter(&hal->topology_tree_mutex);
1898
1899 ret = s1394_setup_asynch_command(hal, NULL, cmd,
1900 S1394_CMD_LOCK, &err);
1901
1902 /* Unlock the topology tree */
1903 mutex_exit(&hal->topology_tree_mutex);
1904
1905 /* Command has now been put onto the queue! */
1906 if (ret != DDI_SUCCESS) {
1907 /* Need to free the command */
1908 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1909 return;
1910 }
1911
1912 /* Send the command out */
1913 ret = s1394_xfer_asynch_command(hal, cmd, &err);
1914
1915 if (ret != DDI_SUCCESS) {
1916 /* Remove cmd outstanding request Q */
1917 s1394_remove_q_asynch_cmd(hal, cmd);
1918
1919 s_priv->cmd_in_use = B_FALSE;
1920
1921 mutex_enter(&hal->bus_mgr_node_mutex);
1922
1923 /* Don't know who the bus_mgr is */
1924 hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1925 hal->incumbent_bus_mgr = B_FALSE;
1926
1927 cv_signal(&hal->bus_mgr_node_cv);
1928 mutex_exit(&hal->bus_mgr_node_mutex);
1929
1930 /* Need to free the command */
1931 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1932 }
1933 }
1934 }
1935
1936 /*
1937 * s1394_become_bus_mgr_callback()
1938 * is the callback used by s1394_become_bus_mgr() when it is necessary
1939 * to send the Bus Manager request to a remote IRM. After the completion
1940 * of the compare-swap request, this routine looks at the "old_value"
1941 * in the request to determine whether or not it has become the Bus
1942 * Manager for the current generation. It sets the bus_mgr_node and
1943 * incumbent_bus_mgr fields to their appropriate values.
1944 */
1945 static void
s1394_become_bus_mgr_callback(cmd1394_cmd_t * cmd)1946 s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd)
1947 {
1948 s1394_cmd_priv_t *s_priv;
1949 s1394_hal_t *hal;
1950 uint32_t hal_node_num;
1951 uint32_t temp;
1952 uint_t curr_bus_mgr;
1953
1954 /* Get the Services Layer private area */
1955 s_priv = S1394_GET_CMD_PRIV(cmd);
1956
1957 hal = (s1394_hal_t *)s_priv->sent_on_hal;
1958
1959 /* Lock the topology tree */
1960 mutex_enter(&hal->topology_tree_mutex);
1961
1962 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1963
1964 /* Was the command successful? */
1965 if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1966 temp = T1394_DATA32(cmd->cmd_u.l32.old_value);
1967 curr_bus_mgr = IEEE1394_NODE_NUM(temp);
1968 mutex_enter(&hal->bus_mgr_node_mutex);
1969 if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1970 (curr_bus_mgr == hal_node_num)) {
1971
1972 hal->bus_mgr_node = hal_node_num;
1973 hal->incumbent_bus_mgr = B_TRUE;
1974
1975 } else {
1976 hal->bus_mgr_node = curr_bus_mgr;
1977 hal->incumbent_bus_mgr = B_FALSE;
1978 }
1979 cv_signal(&hal->bus_mgr_node_cv);
1980 mutex_exit(&hal->bus_mgr_node_mutex);
1981
1982 } else {
1983 mutex_enter(&hal->bus_mgr_node_mutex);
1984
1985 /* Don't know who the bus_mgr is */
1986 hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1987 hal->incumbent_bus_mgr = B_FALSE;
1988
1989 cv_signal(&hal->bus_mgr_node_cv);
1990 mutex_exit(&hal->bus_mgr_node_mutex);
1991 }
1992
1993 /* Need to free the command */
1994 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1995
1996 /* Unlock the topology tree */
1997 mutex_exit(&hal->topology_tree_mutex);
1998 }
1999
2000 /*
2001 * s1394_bus_mgr_processing()
2002 * is called following "phase1" completion of reading Bus_Info_Blocks.
2003 * Its purpose is to determine whether the local node is capable of
2004 * becoming the Bus Manager (has the IRMC bit set) and if so to call
2005 * the s1394_do_bus_mgr_processing() routine.
2006 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2007 * the start of bus reset processing.
2008 */
2009 static int
s1394_bus_mgr_processing(s1394_hal_t * hal)2010 s1394_bus_mgr_processing(s1394_hal_t *hal)
2011 {
2012 int ret;
2013 int IRM_node_num;
2014
2015 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2016
2017 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2018 return (DDI_FAILURE);
2019 }
2020 IRM_node_num = hal->IRM_node;
2021 s1394_unlock_tree(hal);
2022
2023 ret = DDI_SUCCESS;
2024
2025 /* If we are IRM capable, then do bus_mgr stuff... */
2026 if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2027 /* If there is an IRM, then do bus_mgr stuff */
2028 if (IRM_node_num != -1) {
2029 if (s1394_do_bus_mgr_processing(hal))
2030 ret = DDI_FAILURE;
2031 }
2032 }
2033
2034 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2035
2036 return (ret);
2037 }
2038
2039 /*
2040 * s1394_do_bus_mgr_processing()
2041 * is used to perform those operations expected of the Bus Manager.
2042 * After being called, s1394_do_bus_mgr_processing() looks at the value
2043 * in bus_mgr_node and waits if it is -1 (Bus Manager has not been
2044 * chosen yet). Then, if there is more than one node on the 1394 bus,
2045 * and we are either the Bus Manager or (if there is no Bus Manager)
2046 * the IRM, it optimizes the gap_count and/or sets the cycle master's
2047 * root holdoff bit (to ensure that the cycle master is/stays root).
2048 *
2049 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2050 * the start of bus reset processing.
2051 */
2052 static int
s1394_do_bus_mgr_processing(s1394_hal_t * hal)2053 s1394_do_bus_mgr_processing(s1394_hal_t *hal)
2054 {
2055 int ret;
2056 int IRM_flags, hal_bus_mgr_node;
2057 int IRM_node_num;
2058 uint_t hal_node_num, number_of_nodes;
2059 int new_root, new_gap_cnt;
2060
2061 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2062
2063 /* Wait for Bus Manager to be determined */
2064 /* or a Bus Reset to happen */
2065 mutex_enter(&hal->bus_mgr_node_mutex);
2066 if (hal->bus_mgr_node == -1)
2067 cv_wait(&hal->bus_mgr_node_cv, &hal->bus_mgr_node_mutex);
2068
2069 /* Check if a BUS RESET has come while we've been waiting */
2070 mutex_enter(&hal->br_thread_mutex);
2071 if (hal->br_thread_ev_type & (BR_THR_CFGROM_SCAN | BR_THR_GO_AWAY)) {
2072
2073 mutex_exit(&hal->br_thread_mutex);
2074 mutex_exit(&hal->bus_mgr_node_mutex);
2075
2076 return (1);
2077 }
2078 mutex_exit(&hal->br_thread_mutex);
2079
2080 hal_bus_mgr_node = hal->bus_mgr_node;
2081 mutex_exit(&hal->bus_mgr_node_mutex);
2082
2083 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2084 return (1);
2085 }
2086 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2087 IRM_node_num = hal->IRM_node;
2088 number_of_nodes = hal->number_of_nodes;
2089
2090 ret = 0;
2091
2092 /* If we are the bus_mgr or if there is no bus_mgr */
2093 /* the IRM and there is > 1 nodes on the bus */
2094 if ((number_of_nodes > 1) &&
2095 ((hal_bus_mgr_node == (int)hal_node_num) ||
2096 ((hal_bus_mgr_node == S1394_INVALID_NODE_NUM) &&
2097 (IRM_node_num == (int)hal_node_num)))) {
2098
2099 IRM_flags = 0;
2100
2101 /* Make sure the root node is cycle master capable */
2102 if (!s1394_cycle_master_capable(hal)) {
2103 /* Make the local node root */
2104 new_root = hal_node_num;
2105 IRM_flags = IRM_flags | ROOT_HOLDOFF;
2106
2107 /* If setting root, then optimize gap_count */
2108 new_gap_cnt = hal->optimum_gap_count;
2109 IRM_flags = IRM_flags | GAP_COUNT;
2110
2111 } else {
2112 /* Make sure root's ROOT_HOLDOFF bit is set */
2113 new_root = (number_of_nodes - 1);
2114 IRM_flags = IRM_flags | ROOT_HOLDOFF;
2115 }
2116 if (hal->gap_count > hal->optimum_gap_count) {
2117 /* Set the gap_count to optimum */
2118 new_gap_cnt = hal->optimum_gap_count;
2119 IRM_flags = IRM_flags | GAP_COUNT;
2120
2121 }
2122
2123 s1394_unlock_tree(hal);
2124
2125 if (IRM_flags) {
2126 ret = s1394_do_phy_config_pkt(hal, new_root,
2127 new_gap_cnt, IRM_flags);
2128 }
2129 return (ret);
2130 }
2131
2132 s1394_unlock_tree(hal);
2133
2134 return (ret);
2135 }
2136
2137 /*
2138 * s1394_bus_mgr_timers_stop()
2139 * Cancels bus manager timeouts
2140 */
2141 /*ARGSUSED*/
2142 static void
s1394_bus_mgr_timers_stop(s1394_hal_t * hal,timeout_id_t * bus_mgr_query_tid,timeout_id_t * bus_mgr_tid)2143 s1394_bus_mgr_timers_stop(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2144 timeout_id_t *bus_mgr_tid)
2145 {
2146 /* Cancel the Bus Mgr timeouts (if necessary) */
2147 if (*bus_mgr_tid != 0) {
2148 (void) untimeout(*bus_mgr_tid);
2149 *bus_mgr_tid = 0;
2150 }
2151 if (*bus_mgr_query_tid != 0) {
2152 (void) untimeout(*bus_mgr_query_tid);
2153 *bus_mgr_query_tid = 0;
2154 }
2155 }
2156
2157 /*
2158 * s1394_bus_mgr_timers_start()
2159 * Starts bus manager timeouts if the hal is IRM capable.
2160 */
2161 static void
s1394_bus_mgr_timers_start(s1394_hal_t * hal,timeout_id_t * bus_mgr_query_tid,timeout_id_t * bus_mgr_tid)2162 s1394_bus_mgr_timers_start(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2163 timeout_id_t *bus_mgr_tid)
2164 {
2165 boolean_t incumbant;
2166 uint_t hal_node_num;
2167 int IRM_node_num;
2168
2169 mutex_enter(&hal->topology_tree_mutex);
2170
2171 IRM_node_num = hal->IRM_node;
2172 hal_node_num = hal->node_id;
2173
2174 mutex_enter(&hal->bus_mgr_node_mutex);
2175 incumbant = hal->incumbent_bus_mgr;
2176 mutex_exit(&hal->bus_mgr_node_mutex);
2177
2178 /* If we are IRM capable, then do bus_mgr stuff... */
2179 if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2180 /*
2181 * If we are the IRM, then wait 625ms
2182 * before checking BUS_MANAGER_ID register
2183 */
2184 if (IRM_node_num == IEEE1394_NODE_NUM(hal_node_num)) {
2185
2186 mutex_exit(&hal->topology_tree_mutex);
2187
2188 /* Wait 625ms, then check bus manager */
2189 *bus_mgr_query_tid = timeout(s1394_become_bus_mgr,
2190 hal, drv_usectohz(IEEE1394_BM_IRM_TIMEOUT));
2191
2192 mutex_enter(&hal->topology_tree_mutex);
2193 }
2194
2195 /* If there is an IRM on the bus */
2196 if (IRM_node_num != -1) {
2197 if ((incumbant == B_TRUE) &&
2198 (hal->abdicate_bus_mgr_bit == 0)) {
2199 mutex_exit(&hal->topology_tree_mutex);
2200
2201 /* Try to become bus manager */
2202 s1394_become_bus_mgr(hal);
2203
2204 mutex_enter(&hal->topology_tree_mutex);
2205 } else {
2206 hal->abdicate_bus_mgr_bit = 0;
2207
2208 mutex_exit(&hal->topology_tree_mutex);
2209
2210 /* Wait 125ms, then try to become bus manager */
2211 *bus_mgr_tid = timeout(s1394_become_bus_mgr,
2212 hal, drv_usectohz(
2213 IEEE1394_BM_INCUMBENT_TIMEOUT));
2214
2215 mutex_enter(&hal->topology_tree_mutex);
2216 }
2217 } else {
2218 mutex_enter(&hal->bus_mgr_node_mutex);
2219 hal->incumbent_bus_mgr = B_FALSE;
2220 mutex_exit(&hal->bus_mgr_node_mutex);
2221 }
2222 }
2223
2224 mutex_exit(&hal->topology_tree_mutex);
2225 }
2226
2227 /*
2228 * s1394_get_maxpayload()
2229 * is used to determine a device's maximum payload size. That is to
2230 * say, the largest packet that can be transmitted or received by the
2231 * the target device given the current topological (speed) constraints
2232 * and the constraints specified in the local host's and remote device's
2233 * Config ROM (max_rec). Caller must hold the topology_tree_mutex and
2234 * the target_list_rwlock as an RW_READER (at least).
2235 */
2236 /*ARGSUSED*/
2237 void
s1394_get_maxpayload(s1394_target_t * target,uint_t * dev_max_payload,uint_t * current_max_payload)2238 s1394_get_maxpayload(s1394_target_t *target, uint_t *dev_max_payload,
2239 uint_t *current_max_payload)
2240 {
2241 s1394_hal_t *hal;
2242 uint32_t bus_capabilities;
2243 uint32_t from_node;
2244 uint32_t to_node;
2245 uint_t local_max_rec;
2246 uint_t local_max_blk;
2247 uint_t max_rec;
2248 uint_t max_blk;
2249 uint_t curr_speed;
2250 uint_t speed_max_blk;
2251 uint_t temp;
2252
2253 /* Find the HAL this target resides on */
2254 hal = target->on_hal;
2255
2256 /* Make sure we're holding the topology_tree_mutex */
2257 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2258
2259 /* Set dev_max_payload to local (HAL's) size */
2260 bus_capabilities = target->on_hal->halinfo.bus_capabilities;
2261 local_max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2262 IEEE1394_BIB_MAXREC_SHIFT;
2263 if ((local_max_rec > 0) && (local_max_rec < 14)) {
2264 local_max_blk = 1 << (local_max_rec + 1);
2265 } else {
2266 /* These are either unspecified or reserved */
2267 local_max_blk = 4;
2268 }
2269
2270 /* Is this target on a node? */
2271 if ((target->target_state & S1394_TARG_GONE) == 0 &&
2272 (target->on_node != NULL)) {
2273 ASSERT(target->on_node->cfgrom != NULL);
2274
2275 bus_capabilities =
2276 target->on_node->cfgrom[IEEE1212_NODE_CAP_QUAD];
2277 max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2278 IEEE1394_BIB_MAXREC_SHIFT;
2279
2280 if ((max_rec > 0) && (max_rec < 14)) {
2281 max_blk = 1 << (max_rec + 1);
2282 } else {
2283 /* These are either unspecified or reserved */
2284 max_blk = 4;
2285 }
2286 (*dev_max_payload) = max_blk;
2287
2288 from_node = IEEE1394_NODE_NUM(target->on_hal->node_id);
2289 to_node = (target->on_node->node_num);
2290
2291 /* Speed is to be filled in from speed map */
2292 curr_speed = (uint_t)s1394_speed_map_get(target->on_hal,
2293 from_node, to_node);
2294 speed_max_blk = 512 << curr_speed;
2295 temp = (local_max_blk < max_blk) ? local_max_blk : max_blk;
2296 (*current_max_payload) = (temp < speed_max_blk) ? temp :
2297 speed_max_blk;
2298 } else {
2299 /* Set dev_max_payload to local (HAL's) size */
2300 (*dev_max_payload) = local_max_blk;
2301 (*current_max_payload) = local_max_blk;
2302 }
2303 }
2304
2305 /*
2306 * s1394_cycle_master_capable()
2307 * is used to determine whether or not the current root node on the
2308 * 1394 bus has its CMC-bit set in it Config ROM. If not, then it
2309 * is not capable of being cycle master and a new root node must be
2310 * selected.
2311 */
2312 static int
s1394_cycle_master_capable(s1394_hal_t * hal)2313 s1394_cycle_master_capable(s1394_hal_t *hal)
2314 {
2315 s1394_node_t *root;
2316 int cycle_master_capable;
2317 uint_t hal_node_num;
2318
2319 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2320
2321 hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2322
2323 /* Get a pointer to the root node */
2324 root = s1394_topology_tree_get_root_node(hal);
2325
2326 /* Ignore, if we are already root */
2327 if (root == &hal->topology_tree[hal_node_num]) {
2328 return (1);
2329 }
2330
2331 /*
2332 * We want to pick a new root if link is off or we don't have
2333 * valid config rom
2334 */
2335 if (LINK_ACTIVE(root) == B_FALSE || root->cfgrom == NULL ||
2336 CFGROM_BIB_READ(root) == 0) {
2337
2338 return (0);
2339 }
2340
2341 /* Check the Cycle Master bit in the Bus Info Block */
2342 cycle_master_capable = root->cfgrom[IEEE1212_NODE_CAP_QUAD] &
2343 IEEE1394_BIB_CMC_MASK;
2344
2345 if (cycle_master_capable) {
2346 return (1);
2347 } else {
2348 return (0);
2349 }
2350 }
2351
2352 /*
2353 * s1394_do_phy_config_pkt()
2354 * is called by s1394_do_bus_mgr_processing() to setup and send out
2355 * a PHY configuration packet onto the 1394 bus. Depending on the
2356 * values in IRM_flags, the gap_count and root_holdoff bits on the
2357 * bus will be affected by this packet.
2358 *
2359 * NOTE: we overload DDI_FAILURE return value to mean jump back to
2360 * the start of bus reset processing.
2361 */
2362 static int
s1394_do_phy_config_pkt(s1394_hal_t * hal,int new_root,int new_gap_cnt,uint32_t IRM_flags)2363 s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root, int new_gap_cnt,
2364 uint32_t IRM_flags)
2365 {
2366 cmd1394_cmd_t *cmd;
2367 s1394_cmd_priv_t *s_priv;
2368 h1394_cmd_priv_t *h_priv;
2369 uint32_t pkt_data = 0;
2370 uint32_t gap_cnt = 0;
2371 uint32_t root = 0;
2372 int ret, result;
2373 uint_t flags = 0;
2374
2375 /* Gap count needs to be optimized */
2376 if (IRM_flags & GAP_COUNT) {
2377
2378 pkt_data = pkt_data | IEEE1394_PHY_CONFIG_T_BIT_MASK;
2379 gap_cnt = ((uint32_t)new_gap_cnt) <<
2380 IEEE1394_PHY_CONFIG_GAP_CNT_SHIFT;
2381 gap_cnt = gap_cnt & IEEE1394_PHY_CONFIG_GAP_CNT_MASK;
2382 pkt_data = pkt_data | gap_cnt;
2383
2384 (void) HAL_CALL(hal).set_gap_count(hal->halinfo.hal_private,
2385 (uint_t)new_gap_cnt);
2386 }
2387
2388 /* Root node needs to be changed */
2389 if (IRM_flags & ROOT_HOLDOFF) {
2390
2391 pkt_data = pkt_data | IEEE1394_PHY_CONFIG_R_BIT_MASK;
2392 root = ((uint32_t)new_root) <<
2393 IEEE1394_PHY_CONFIG_ROOT_HOLD_SHIFT;
2394 root = root & IEEE1394_PHY_CONFIG_ROOT_HOLD_MASK;
2395 pkt_data = pkt_data | root;
2396
2397 (void) HAL_CALL(hal).set_root_holdoff_bit(
2398 hal->halinfo.hal_private);
2399 }
2400
2401
2402 if (IRM_flags) {
2403 if (s1394_alloc_cmd(hal, flags, &cmd) != DDI_SUCCESS) {
2404 return (0);
2405 }
2406
2407 if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2408 /* lock tree failure indicates a bus gen change */
2409 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2410 return (1);
2411 }
2412
2413 /* Setup the callback routine */
2414 cmd->completion_callback = s1394_phy_config_callback;
2415 cmd->cmd_callback_arg = (void *)(uintptr_t)IRM_flags;
2416 cmd->bus_generation = hal->generation_count;
2417 cmd->cmd_options = CMD1394_OVERRIDE_ADDR;
2418 cmd->cmd_type = CMD1394_ASYNCH_WR_QUAD;
2419 cmd->cmd_u.q.quadlet_data = pkt_data;
2420
2421 /* Get the Services Layer private area */
2422 s_priv = S1394_GET_CMD_PRIV(cmd);
2423
2424 /* Get a pointer to the HAL private struct */
2425 h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
2426
2427 s_priv->sent_by_target = (s1394_target_t *)NULL;
2428 s_priv->sent_on_hal = (s1394_hal_t *)hal;
2429
2430 h_priv->bus_generation = cmd->bus_generation;
2431
2432 /* Speed must be IEEE1394_S100 on PHY config packets */
2433 s_priv->hal_cmd_private.speed = IEEE1394_S100;
2434
2435 /* Mark command as being used */
2436 s_priv->cmd_in_use = B_TRUE;
2437
2438 s1394_unlock_tree(hal);
2439
2440 /* Put command on the HAL's outstanding request Q */
2441 s1394_insert_q_asynch_cmd(hal, cmd);
2442
2443 ret = HAL_CALL(hal).send_phy_configuration_packet(
2444 hal->halinfo.hal_private, (cmd1394_cmd_t *)cmd,
2445 (h1394_cmd_priv_t *)&s_priv->hal_cmd_private, &result);
2446
2447 if (ret != DDI_SUCCESS) {
2448 (void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2449
2450 return (0);
2451
2452 } else {
2453 /*
2454 * There will be a bus reset only if GAP_COUNT changed
2455 */
2456 if (IRM_flags & GAP_COUNT) {
2457 return (1);
2458 }
2459 }
2460 }
2461
2462 return (0);
2463 }
2464
2465 /*
2466 * s1394_phy_config_callback()
2467 * is the callback called after the PHY configuration packet has been
2468 * sent out onto the 1394 bus. Depending on the values in IRM_flags,
2469 * (specifically if the gap_count has been changed) this routine may
2470 * initiate a bus reset.
2471 */
2472 static void
s1394_phy_config_callback(cmd1394_cmd_t * cmd)2473 s1394_phy_config_callback(cmd1394_cmd_t *cmd)
2474 {
2475 s1394_cmd_priv_t *s_priv;
2476 s1394_hal_t *hal;
2477 uint32_t IRM_flags;
2478
2479 /* Get the Services Layer private area */
2480 s_priv = S1394_GET_CMD_PRIV(cmd);
2481
2482 hal = (s1394_hal_t *)s_priv->sent_on_hal;
2483
2484 IRM_flags = (uint32_t)(uintptr_t)cmd->cmd_callback_arg;
2485
2486 if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
2487 (void) s1394_free_cmd(hal, &cmd);
2488 } else {
2489 (void) s1394_free_cmd(hal, &cmd);
2490
2491 /* Only need a bus reset if we changed GAP_COUNT */
2492 if (IRM_flags & GAP_COUNT) {
2493 s1394_initiate_hal_reset(hal, NON_CRITICAL);
2494 }
2495 }
2496 }
2497
2498 /*
2499 * s1394_lock_tree()
2500 * Attempts to lock the topology tree. Returns DDI_FAILURE if generations
2501 * changed or if the services layer signals the bus reset thread to go
2502 * away. Otherwise, returns DDI_SUCCESS.
2503 */
2504 int
s1394_lock_tree(s1394_hal_t * hal)2505 s1394_lock_tree(s1394_hal_t *hal)
2506 {
2507 ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
2508 ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2509
2510 mutex_enter(&hal->br_thread_mutex);
2511 ndi_devi_enter(hal->halinfo.dip);
2512 mutex_enter(&hal->topology_tree_mutex);
2513
2514 if ((hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
2515 mutex_exit(&hal->br_thread_mutex);
2516 mutex_exit(&hal->topology_tree_mutex);
2517 ndi_devi_exit(hal->halinfo.dip);
2518 return (DDI_FAILURE);
2519 } else if (hal->br_cfgrom_read_gen != hal->generation_count) {
2520 mutex_exit(&hal->br_thread_mutex);
2521 mutex_exit(&hal->topology_tree_mutex);
2522 ndi_devi_exit(hal->halinfo.dip);
2523 return (DDI_FAILURE);
2524 }
2525
2526 mutex_exit(&hal->br_thread_mutex);
2527
2528 return (DDI_SUCCESS);
2529 }
2530
2531 /*
2532 * s1394_unlock_tree()
2533 * Unlocks the topology tree
2534 */
2535 void
s1394_unlock_tree(s1394_hal_t * hal)2536 s1394_unlock_tree(s1394_hal_t *hal)
2537 {
2538 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2539 mutex_exit(&hal->topology_tree_mutex);
2540 ndi_devi_exit(hal->halinfo.dip);
2541 }
2542
2543 /*
2544 * s1394_calc_next_quad()
2545 * figures out the next quadlet to read. This maintains a stack of
2546 * directories in the node. When the first quad of a directory (the
2547 * first directory would be the root directory) is read, it is pushed on
2548 * the this stack. When the directory is all read, it scans the directory
2549 * looking for indirect entries. If any indirect directory entry is found,
2550 * it is pushed on stack and that directory is read. If we are done dealing
2551 * with all entries in the current dir, the directory is popped off the
2552 * stack. If the stack is empty, we are back at the root directory level
2553 * and essentially read the entire directory hierarchy.
2554 * Returns 0 is more quads to read, else returns non-zero.
2555 */
2556 static int
s1394_calc_next_quad(s1394_hal_t * hal,s1394_node_t * node,uint32_t quadlet,uint32_t * nextquadp)2557 s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node, uint32_t quadlet,
2558 uint32_t *nextquadp)
2559 {
2560 uint32_t data, type, key, value, *ptr;
2561
2562 ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2563
2564 if (((quadlet + 1) >= node->cfgrom_size) ||
2565 (CFGROM_SIZE_IS_CRCSIZE(node) == B_TRUE &&
2566 (quadlet + 1) >= node->cfgrom_valid_size)) {
2567 return (1);
2568 }
2569
2570 if (s1394_turn_off_dir_stack != 0 || CFGROM_DIR_STACK_OFF(node) ==
2571 B_TRUE) {
2572 quadlet++;
2573 *nextquadp = quadlet;
2574 return (0);
2575 }
2576
2577 data = node->cfgrom[quadlet];
2578
2579 if (quadlet == IEEE1212_ROOT_DIR_QUAD) {
2580 node->dir_stack_top = -1;
2581 node->expected_dir_quad = quadlet;
2582 node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2583 }
2584
2585 CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2586
2587 /*
2588 * check to make sure we are looking at a dir. If the config rom
2589 * is broken, then revert to normal scanning of the config rom
2590 */
2591 if (node->expected_dir_quad == quadlet) {
2592 if (type != 0 || key != 0) {
2593 SET_CFGROM_DIR_STACK_OFF(node);
2594 quadlet = IEEE1212_ROOT_DIR_QUAD;
2595 } else {
2596 node->cur_dir_start = quadlet;
2597 node->cur_dir_size = IEEE1212_DIR_LEN(data);
2598 node->expected_dir_quad = 0;
2599 /* get the next quad */
2600 quadlet++;
2601 }
2602 } else {
2603 /*
2604 * If we read all quads in cur dir and the cur dir is not
2605 * a leaf, scan for offsets (if the directory's CRC checks
2606 * out OK). If we have a directory or a leaf, we save the
2607 * current location on the stack and start reading that
2608 * directory. So, we will end up with a depth first read of
2609 * the entire config rom. If we are done with the current
2610 * directory, pop it off the stack and continue the scanning
2611 * as appropriate.
2612 */
2613 if (quadlet == node->cur_dir_start + node->cur_dir_size) {
2614
2615 int i, top;
2616 boolean_t done_with_cur_dir = B_FALSE;
2617
2618 if (node->expected_type == IEEE1212_LEAF_TYPE) {
2619 node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2620 done_with_cur_dir = B_TRUE;
2621 goto donewithcurdir;
2622 }
2623
2624 ptr = &node->cfgrom[node->cur_dir_start];
2625 CFGROM_TYPE_KEY_VALUE(*ptr, type, key, value);
2626
2627 /*
2628 * If CRC for this directory is invalid, turn off
2629 * dir stack and start re-reading from root dir.
2630 * This wastes the work done thus far, but CRC
2631 * errors in directories should be rather rare.
2632 * if s1394_crcsz_is_cfgsz is set, then set
2633 * cfgrom_valid_size to the len specfied as crc len
2634 * in quadlet 0.
2635 */
2636 if (s1394_valid_dir(hal, node, key, ptr) == B_FALSE) {
2637 SET_CFGROM_DIR_STACK_OFF(node);
2638 if (s1394_crcsz_is_cfgsz != 0) {
2639 SET_CFGROM_SIZE_IS_CRCSIZE(node);
2640 node->cfgrom_valid_size =
2641 ((node->cfgrom[0] >>
2642 IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
2643 IEEE1394_CFG_ROM_CRC_LEN_MASK);
2644 }
2645 *nextquadp = IEEE1212_ROOT_DIR_QUAD;
2646 return (0);
2647 }
2648 i = node->cur_dir_start + 1;
2649 rescan:
2650 for (done_with_cur_dir = B_FALSE; i <=
2651 node->cur_dir_start + node->cur_dir_size; i++) {
2652 data = node->cfgrom[i];
2653 CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2654 /* read leaf type and directory types only */
2655 if (type == IEEE1212_LEAF_TYPE || type ==
2656 IEEE1212_DIRECTORY_TYPE) {
2657
2658 /*
2659 * push current dir on stack; if the
2660 * stack is overflowing, ie, too many
2661 * directory level nestings, turn off
2662 * dir stack and fall back to serial
2663 * scanning, starting at root dir. This
2664 * wastes all the work we have done
2665 * thus far, but more than 16 levels
2666 * of directories is rather odd...
2667 */
2668 top = ++node->dir_stack_top;
2669 if (top == S1394_DIR_STACK_SIZE) {
2670 SET_CFGROM_DIR_STACK_OFF(node);
2671 *nextquadp =
2672 IEEE1212_ROOT_DIR_QUAD;
2673 return (0);
2674 }
2675
2676 node->dir_stack[top].dir_start =
2677 node->cur_dir_start;
2678 node->dir_stack[top].dir_size =
2679 node->cur_dir_size;
2680 node->dir_stack[top].dir_next_quad =
2681 i + 1;
2682 /* and set the next quadlet to read */
2683 quadlet = i + value;
2684 node->expected_dir_quad = quadlet;
2685 node->expected_type = type;
2686 break;
2687 }
2688 }
2689
2690 donewithcurdir:
2691
2692 if ((i > node->cur_dir_start + node->cur_dir_size) ||
2693 done_with_cur_dir == B_TRUE) {
2694
2695 /*
2696 * all done with cur dir; pop it off the stack
2697 */
2698 if (node->dir_stack_top >= 0) {
2699 top = node->dir_stack_top--;
2700 node->cur_dir_start =
2701 node->dir_stack[top].dir_start;
2702 node->cur_dir_size =
2703 node->dir_stack[top].dir_size;
2704 i = node->dir_stack[top].dir_next_quad;
2705 goto rescan;
2706 } else {
2707 /*
2708 * if empty stack, we are at the top
2709 * level; declare done.
2710 */
2711 return (1);
2712 }
2713 }
2714 } else {
2715 /* get the next quadlet */
2716 quadlet++;
2717 }
2718 }
2719 *nextquadp = quadlet;
2720
2721 return (0);
2722 }
2723