1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * PIM-DR layer of DR driver. Provides interface between user
29 * level applications and the PSM-DR layer.
30 */
31
32 #include <sys/note.h>
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/cred.h>
37 #include <sys/dditypes.h>
38 #include <sys/devops.h>
39 #include <sys/modctl.h>
40 #include <sys/poll.h>
41 #include <sys/conf.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/sunndi.h>
45 #include <sys/stat.h>
46 #include <sys/kmem.h>
47 #include <sys/processor.h>
48 #include <sys/cpuvar.h>
49 #include <sys/mem_config.h>
50
51 #include <sys/autoconf.h>
52 #include <sys/cmn_err.h>
53
54 #include <sys/ddi_impldefs.h>
55 #include <sys/promif.h>
56 #include <sys/machsystm.h>
57
58 #include <sys/dr.h>
59 #include <sys/drmach.h>
60 #include <sys/dr_util.h>
61
62 extern int nulldev();
63 extern int nodev();
64 extern struct memlist *phys_install;
65
66 #ifdef DEBUG
67 uint_t dr_debug = 0; /* dr.h for bit values */
68 #endif /* DEBUG */
69
70 /*
71 * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
72 * kernel. They are, however, referenced during both debug and non-debug
73 * compiles.
74 */
75
76 static char *state_str[] = {
77 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
78 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
79 "FATAL"
80 };
81
82 #define SBD_CMD_STR(c) \
83 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
84 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
85 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
86 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
87 ((c) == SBD_CMD_TEST) ? "TEST" : \
88 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
89 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
90 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
91 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
92 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
93 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : \
94 ((c) == SBD_CMD_STATUS) ? "STATUS" : "unknown")
95
96 #define DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[NIX(ut)][un]))
97
98 #define DR_MAKE_MINOR(i, b) (((i) << 16) | (b))
99 #define DR_MINOR2INST(m) (((m) >> 16) & 0xffff)
100 #define DR_MINOR2BNUM(m) ((m) & 0xffff)
101
102 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
103 static char *dr_ie_fmt = "dr.c %d";
104
105 /* struct for drmach device name to sbd_comp_type_t mapping */
106 typedef struct {
107 char *s_devtype;
108 sbd_comp_type_t s_nodetype;
109 } dr_devname_t;
110
111 /* struct to map starfire device attributes - name:sbd_comp_type_t */
112 static dr_devname_t dr_devattr[] = {
113 { DRMACH_DEVTYPE_MEM, SBD_COMP_MEM },
114 { DRMACH_DEVTYPE_CPU, SBD_COMP_CPU },
115 { DRMACH_DEVTYPE_PCI, SBD_COMP_IO },
116 #if defined(DRMACH_DEVTYPE_SBUS)
117 { DRMACH_DEVTYPE_SBUS, SBD_COMP_IO },
118 #endif
119 #if defined(DRMACH_DEVTYPE_WCI)
120 { DRMACH_DEVTYPE_WCI, SBD_COMP_IO },
121 #endif
122 /* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
123 { NULL, SBD_COMP_UNKNOWN }
124 };
125
126 /*
127 * Per instance soft-state structure.
128 */
129 typedef struct dr_softstate {
130 dev_info_t *dip;
131 dr_board_t *boards;
132 kmutex_t i_lock;
133 int dr_initialized;
134 } dr_softstate_t;
135
136 /*
137 * dr Global data elements
138 */
139 struct dr_global {
140 dr_softstate_t *softsp; /* pointer to initialize soft state */
141 kmutex_t lock;
142 } dr_g;
143
144 dr_unsafe_devs_t dr_unsafe_devs;
145
146 /*
147 * Table of known passthru commands.
148 */
149
150 struct {
151 char *pt_name;
152 int (*pt_func)(dr_handle_t *);
153 } pt_arr[] = {
154 "quiesce", dr_pt_test_suspend,
155 };
156
157 int dr_modunload_okay = 0; /* set to non-zero to allow unload */
158
159 static int dr_dev_type_to_nt(char *);
160
161 /*
162 * State transition table. States valid transitions for "board" state.
163 * Recall that non-zero return value terminates operation, however
164 * the herrno value is what really indicates an error , if any.
165 */
166 static int
_cmd2index(int c)167 _cmd2index(int c)
168 {
169 /*
170 * Translate DR CMD to index into dr_state_transition.
171 */
172 switch (c) {
173 case SBD_CMD_CONNECT: return (0);
174 case SBD_CMD_DISCONNECT: return (1);
175 case SBD_CMD_CONFIGURE: return (2);
176 case SBD_CMD_UNCONFIGURE: return (3);
177 case SBD_CMD_ASSIGN: return (4);
178 case SBD_CMD_UNASSIGN: return (5);
179 case SBD_CMD_POWERON: return (6);
180 case SBD_CMD_POWEROFF: return (7);
181 case SBD_CMD_TEST: return (8);
182 default: return (-1);
183 }
184 }
185
186 #define CMD2INDEX(c) _cmd2index(c)
187
188 static struct dr_state_trans {
189 int x_cmd;
190 struct {
191 int x_rv; /* return value of pre_op */
192 int x_err; /* error, if any */
193 } x_op[DR_STATE_MAX];
194 } dr_state_transition[] = {
195 { SBD_CMD_CONNECT,
196 {
197 { 0, 0 }, /* empty */
198 { 0, 0 }, /* occupied */
199 { -1, ESBD_STATE }, /* connected */
200 { -1, ESBD_STATE }, /* unconfigured */
201 { -1, ESBD_STATE }, /* partial */
202 { -1, ESBD_STATE }, /* configured */
203 { -1, ESBD_STATE }, /* release */
204 { -1, ESBD_STATE }, /* unreferenced */
205 { -1, ESBD_FATAL_STATE }, /* fatal */
206 }
207 },
208 { SBD_CMD_DISCONNECT,
209 {
210 { -1, ESBD_STATE }, /* empty */
211 { 0, 0 }, /* occupied */
212 { 0, 0 }, /* connected */
213 { 0, 0 }, /* unconfigured */
214 { -1, ESBD_STATE }, /* partial */
215 { -1, ESBD_STATE }, /* configured */
216 { -1, ESBD_STATE }, /* release */
217 { -1, ESBD_STATE }, /* unreferenced */
218 { -1, ESBD_FATAL_STATE }, /* fatal */
219 }
220 },
221 { SBD_CMD_CONFIGURE,
222 {
223 { -1, ESBD_STATE }, /* empty */
224 { -1, ESBD_STATE }, /* occupied */
225 { 0, 0 }, /* connected */
226 { 0, 0 }, /* unconfigured */
227 { 0, 0 }, /* partial */
228 { 0, 0 }, /* configured */
229 { -1, ESBD_STATE }, /* release */
230 { -1, ESBD_STATE }, /* unreferenced */
231 { -1, ESBD_FATAL_STATE }, /* fatal */
232 }
233 },
234 { SBD_CMD_UNCONFIGURE,
235 {
236 { -1, ESBD_STATE }, /* empty */
237 { -1, ESBD_STATE }, /* occupied */
238 { -1, ESBD_STATE }, /* connected */
239 { -1, ESBD_STATE }, /* unconfigured */
240 { 0, 0 }, /* partial */
241 { 0, 0 }, /* configured */
242 { 0, 0 }, /* release */
243 { 0, 0 }, /* unreferenced */
244 { -1, ESBD_FATAL_STATE }, /* fatal */
245 }
246 },
247 { SBD_CMD_ASSIGN,
248 {
249 { 0, 0 }, /* empty */
250 { 0, 0 }, /* occupied */
251 { -1, ESBD_STATE }, /* connected */
252 { -1, ESBD_STATE }, /* unconfigured */
253 { -1, ESBD_STATE }, /* partial */
254 { -1, ESBD_STATE }, /* configured */
255 { -1, ESBD_STATE }, /* release */
256 { -1, ESBD_STATE }, /* unreferenced */
257 { -1, ESBD_FATAL_STATE }, /* fatal */
258 }
259 },
260 { SBD_CMD_UNASSIGN,
261 {
262 { 0, 0 }, /* empty */
263 { 0, 0 }, /* occupied */
264 { -1, ESBD_STATE }, /* connected */
265 { -1, ESBD_STATE }, /* unconfigured */
266 { -1, ESBD_STATE }, /* partial */
267 { -1, ESBD_STATE }, /* configured */
268 { -1, ESBD_STATE }, /* release */
269 { -1, ESBD_STATE }, /* unreferenced */
270 { -1, ESBD_FATAL_STATE }, /* fatal */
271 }
272 },
273 { SBD_CMD_POWERON,
274 {
275 { 0, 0 }, /* empty */
276 { 0, 0 }, /* occupied */
277 { -1, ESBD_STATE }, /* connected */
278 { -1, ESBD_STATE }, /* unconfigured */
279 { -1, ESBD_STATE }, /* partial */
280 { -1, ESBD_STATE }, /* configured */
281 { -1, ESBD_STATE }, /* release */
282 { -1, ESBD_STATE }, /* unreferenced */
283 { -1, ESBD_FATAL_STATE }, /* fatal */
284 }
285 },
286 { SBD_CMD_POWEROFF,
287 {
288 { 0, 0 }, /* empty */
289 { 0, 0 }, /* occupied */
290 { -1, ESBD_STATE }, /* connected */
291 { -1, ESBD_STATE }, /* unconfigured */
292 { -1, ESBD_STATE }, /* partial */
293 { -1, ESBD_STATE }, /* configured */
294 { -1, ESBD_STATE }, /* release */
295 { -1, ESBD_STATE }, /* unreferenced */
296 { -1, ESBD_FATAL_STATE }, /* fatal */
297 }
298 },
299 { SBD_CMD_TEST,
300 {
301 { 0, 0 }, /* empty */
302 { 0, 0 }, /* occupied */
303 { -1, ESBD_STATE }, /* connected */
304 { -1, ESBD_STATE }, /* unconfigured */
305 { -1, ESBD_STATE }, /* partial */
306 { -1, ESBD_STATE }, /* configured */
307 { -1, ESBD_STATE }, /* release */
308 { -1, ESBD_STATE }, /* unreferenced */
309 { -1, ESBD_FATAL_STATE }, /* fatal */
310 }
311 },
312 };
313
314 /*
315 * Global R/W lock to synchronize access across
316 * multiple boards. Users wanting multi-board access
317 * must grab WRITE lock, others must grab READ lock.
318 */
319 krwlock_t dr_grwlock;
320
321 /*
322 * Head of the boardlist used as a reference point for
323 * locating board structs.
324 * TODO: eliminate dr_boardlist
325 */
326 dr_board_t *dr_boardlist;
327
328 /*
329 * DR support functions.
330 */
331 static dr_devset_t dr_dev2devset(sbd_comp_id_t *cid);
332 static int dr_check_transition(dr_board_t *bp,
333 dr_devset_t *devsetp,
334 struct dr_state_trans *transp,
335 int cmd);
336 static int dr_check_unit_attached(dr_common_unit_t *dp);
337 static sbd_error_t *dr_init_devlists(dr_board_t *bp);
338 static void dr_board_discovery(dr_board_t *bp);
339 static int dr_board_init(dr_board_t *bp, dev_info_t *dip,
340 int bd);
341 static void dr_board_destroy(dr_board_t *bp);
342 static void dr_board_transition(dr_board_t *bp, dr_state_t st);
343
344 /*
345 * DR driver (DDI) entry points.
346 */
347 static int dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
348 void *arg, void **result);
349 static int dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
350 static int dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
351 static int dr_probe(dev_info_t *dip);
352 static int dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
353 cred_t *cred_p, int *rval_p);
354 static int dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
355 static int dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
356
357 /*
358 * DR command processing operations.
359 */
360
361 static int dr_copyin_iocmd(dr_handle_t *hp);
362 static int dr_copyout_iocmd(dr_handle_t *hp);
363 static int dr_copyout_errs(dr_handle_t *hp);
364 static int dr_pre_op(dr_handle_t *hp);
365 static int dr_post_op(dr_handle_t *hp);
366 static int dr_exec_op(dr_handle_t *hp);
367 static void dr_assign_board(dr_handle_t *hp);
368 static void dr_unassign_board(dr_handle_t *hp);
369 static void dr_connect(dr_handle_t *hp);
370 static int dr_disconnect(dr_handle_t *hp);
371 static void dr_dev_configure(dr_handle_t *hp);
372 static void dr_dev_release(dr_handle_t *hp);
373 static int dr_dev_unconfigure(dr_handle_t *hp);
374 static void dr_dev_cancel(dr_handle_t *hp);
375 static int dr_dev_status(dr_handle_t *hp);
376 static int dr_get_ncm(dr_handle_t *hp);
377 static int dr_pt_ioctl(dr_handle_t *hp);
378 static void dr_poweron_board(dr_handle_t *hp);
379 static void dr_poweroff_board(dr_handle_t *hp);
380 static void dr_test_board(dr_handle_t *hp);
381
382
383
384 /*
385 * Autoconfiguration data structures
386 */
387
388 struct cb_ops dr_cb_ops = {
389 dr_open, /* open */
390 dr_close, /* close */
391 nodev, /* strategy */
392 nodev, /* print */
393 nodev, /* dump */
394 nodev, /* read */
395 nodev, /* write */
396 dr_ioctl, /* ioctl */
397 nodev, /* devmap */
398 nodev, /* mmap */
399 nodev, /* segmap */
400 nochpoll, /* chpoll */
401 ddi_prop_op, /* cb_prop_op */
402 NULL, /* struct streamtab */
403 D_NEW | D_MP | D_MTSAFE, /* compatibility flags */
404 CB_REV, /* Rev */
405 nodev, /* cb_aread */
406 nodev /* cb_awrite */
407 };
408
409 struct dev_ops dr_dev_ops = {
410 DEVO_REV, /* build version */
411 0, /* dev ref count */
412 dr_getinfo, /* getinfo */
413 nulldev, /* identify */
414 dr_probe, /* probe */
415 dr_attach, /* attach */
416 dr_detach, /* detach */
417 nodev, /* reset */
418 &dr_cb_ops, /* cb_ops */
419 (struct bus_ops *)NULL, /* bus ops */
420 NULL, /* power */
421 ddi_quiesce_not_needed, /* quiesce */
422 };
423
424 extern struct mod_ops mod_driverops;
425
426 static struct modldrv modldrv = {
427 &mod_driverops,
428 "Dynamic Reconfiguration",
429 &dr_dev_ops
430 };
431
432 static struct modlinkage modlinkage = {
433 MODREV_1,
434 (void *)&modldrv,
435 NULL
436 };
437
438 /*
439 * Driver entry points.
440 */
441 int
_init(void)442 _init(void)
443 {
444 int err;
445
446 /*
447 * If you need to support multiple nodes (instances), then
448 * whatever the maximum number of supported nodes is would
449 * need to passed as the third parameter to ddi_soft_state_init().
450 * Alternative would be to dynamically fini and re-init the
451 * soft state structure each time a node is attached.
452 */
453 err = ddi_soft_state_init((void **)&dr_g.softsp,
454 sizeof (dr_softstate_t), 1);
455 if (err)
456 return (err);
457
458 mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
459 rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
460
461 return (mod_install(&modlinkage));
462 }
463
464 int
_fini(void)465 _fini(void)
466 {
467 int err;
468
469 if ((err = mod_remove(&modlinkage)) != 0)
470 return (err);
471
472 mutex_destroy(&dr_g.lock);
473 rw_destroy(&dr_grwlock);
474
475 ddi_soft_state_fini((void **)&dr_g.softsp);
476
477 return (0);
478 }
479
480 int
_info(struct modinfo * modinfop)481 _info(struct modinfo *modinfop)
482 {
483 return (mod_info(&modlinkage, modinfop));
484 }
485
486 /*ARGSUSED1*/
487 static int
dr_open(dev_t * dev,int flag,int otyp,cred_t * cred_p)488 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
489 {
490 int instance;
491 dr_softstate_t *softsp;
492 dr_board_t *bp;
493 /*
494 * Don't open unless we've attached.
495 */
496 instance = DR_MINOR2INST(getminor(*dev));
497 softsp = ddi_get_soft_state(dr_g.softsp, instance);
498 if (softsp == NULL)
499 return (ENXIO);
500
501 mutex_enter(&softsp->i_lock);
502 if (!softsp->dr_initialized) {
503 int bd;
504 int rv = 0;
505
506 bp = softsp->boards;
507
508 /* initialize each array element */
509 for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
510 rv = dr_board_init(bp, softsp->dip, bd);
511 if (rv)
512 break;
513 }
514
515 if (rv == 0) {
516 softsp->dr_initialized = 1;
517 } else {
518 /* destroy elements initialized thus far */
519 while (--bp >= softsp->boards)
520 dr_board_destroy(bp);
521
522
523 /* TODO: should this be another errno val ? */
524 mutex_exit(&softsp->i_lock);
525 return (ENXIO);
526 }
527 }
528 mutex_exit(&softsp->i_lock);
529
530 bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
531
532 /*
533 * prevent opening of a dyn-ap for a board
534 * that does not exist
535 */
536 if (!bp->b_assigned) {
537 if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
538 return (ENODEV);
539 }
540
541 return (0);
542 }
543
544 /*ARGSUSED*/
545 static int
dr_close(dev_t dev,int flag,int otyp,cred_t * cred_p)546 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
547 {
548 return (0);
549 }
550
551 /*
552 * Enable/disable DR features.
553 */
554 int dr_enable = 1;
555
556 /*ARGSUSED3*/
557 static int
dr_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)558 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
559 cred_t *cred_p, int *rval_p)
560 {
561 int rv = 0;
562 int instance;
563 int bd;
564 dr_handle_t *hp;
565 dr_softstate_t *softsp;
566 static fn_t f = "dr_ioctl";
567
568 PR_ALL("%s...\n", f);
569
570 instance = DR_MINOR2INST(getminor(dev));
571 softsp = ddi_get_soft_state(dr_g.softsp, instance);
572 if (softsp == NULL) {
573 cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
574 return (ENXIO);
575 }
576
577 if (!dr_enable) {
578 switch (cmd) {
579 case SBD_CMD_STATUS:
580 case SBD_CMD_GETNCM:
581 case SBD_CMD_PASSTHRU:
582 break;
583 default:
584 return (ENOTSUP);
585 }
586 }
587
588 bd = DR_MINOR2BNUM(getminor(dev));
589 if (bd >= MAX_BOARDS)
590 return (ENXIO);
591
592 /* get and initialize storage for new handle */
593 hp = GETSTRUCT(dr_handle_t, 1);
594 hp->h_bd = &softsp->boards[bd];
595 hp->h_err = NULL;
596 hp->h_dev = getminor(dev);
597 hp->h_cmd = cmd;
598 hp->h_mode = mode;
599 hp->h_iap = (sbd_ioctl_arg_t *)arg;
600
601 /* copy sbd command into handle */
602 rv = dr_copyin_iocmd(hp);
603 if (rv) {
604 FREESTRUCT(hp, dr_handle_t, 1);
605 return (EINVAL);
606 }
607
608 /* translate canonical name to component type */
609 if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
610 hp->h_sbdcmd.cmd_cm.c_id.c_type =
611 dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
612
613 PR_ALL("%s: c_name = %s, c_type = %d\n",
614 f,
615 hp->h_sbdcmd.cmd_cm.c_id.c_name,
616 hp->h_sbdcmd.cmd_cm.c_id.c_type);
617 } else {
618 /*EMPTY*/
619 PR_ALL("%s: c_name is NULL\n", f);
620 }
621
622 /* determine scope of operation */
623 hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
624
625 switch (hp->h_cmd) {
626 case SBD_CMD_STATUS:
627 case SBD_CMD_GETNCM:
628 /* no locks needed for these commands */
629 break;
630
631 default:
632 rw_enter(&dr_grwlock, RW_WRITER);
633 mutex_enter(&hp->h_bd->b_lock);
634
635 /*
636 * If we're dealing with memory at all, then we have
637 * to keep the "exclusive" global lock held. This is
638 * necessary since we will probably need to look at
639 * multiple board structs. Otherwise, we only have
640 * to deal with the board in question and so can drop
641 * the global lock to "shared".
642 */
643 rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
644 if (rv == 0)
645 rw_downgrade(&dr_grwlock);
646 break;
647 }
648 rv = 0;
649
650 if (rv == 0)
651 rv = dr_pre_op(hp);
652 if (rv == 0)
653 rv = dr_exec_op(hp);
654 if (rv == 0)
655 rv = dr_post_op(hp);
656
657 if (rv == -1)
658 rv = EIO;
659
660 if (hp->h_err != NULL)
661 if (!(rv = dr_copyout_errs(hp)))
662 rv = EIO;
663
664 /* undo locking, if any, done before dr_pre_op */
665 switch (hp->h_cmd) {
666 case SBD_CMD_STATUS:
667 case SBD_CMD_GETNCM:
668 break;
669
670 case SBD_CMD_ASSIGN:
671 case SBD_CMD_UNASSIGN:
672 case SBD_CMD_POWERON:
673 case SBD_CMD_POWEROFF:
674 case SBD_CMD_CONNECT:
675 case SBD_CMD_CONFIGURE:
676 case SBD_CMD_UNCONFIGURE:
677 case SBD_CMD_DISCONNECT:
678 /* Board changed state. Log a sysevent. */
679 if (rv == 0)
680 (void) drmach_log_sysevent(hp->h_bd->b_num, "",
681 SE_SLEEP, 1);
682 /* Fall through */
683
684 default:
685 mutex_exit(&hp->h_bd->b_lock);
686 rw_exit(&dr_grwlock);
687 }
688
689 if (hp->h_opts.size != 0)
690 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
691
692 FREESTRUCT(hp, dr_handle_t, 1);
693
694 return (rv);
695 }
696
697 /*ARGSUSED*/
698 static int
dr_probe(dev_info_t * dip)699 dr_probe(dev_info_t *dip)
700 {
701 return (DDI_PROBE_SUCCESS);
702 }
703
704 static int
dr_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)705 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
706 {
707 int rv, rv2;
708 int bd;
709 int instance;
710 sbd_error_t *err;
711 dr_softstate_t *softsp;
712
713 instance = ddi_get_instance(dip);
714
715 switch (cmd) {
716
717 case DDI_ATTACH:
718
719 rw_enter(&dr_grwlock, RW_WRITER);
720
721 rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
722 if (rv != DDI_SUCCESS) {
723 cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
724 instance);
725 return (DDI_FAILURE);
726 }
727
728 /* initialize softstate structure */
729 softsp = ddi_get_soft_state(dr_g.softsp, instance);
730 softsp->dip = dip;
731
732 mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
733
734 /* allocate board array (aka boardlist) */
735 softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
736
737 /* TODO: eliminate dr_boardlist */
738 dr_boardlist = softsp->boards;
739
740 /* initialize each array element */
741 rv = DDI_SUCCESS;
742 for (bd = 0; bd < MAX_BOARDS; bd++) {
743 dr_board_t *bp = &softsp->boards[bd];
744 char *p, *name;
745 int l, minor_num;
746
747 /*
748 * initialized board attachment point path
749 * (relative to pseudo) in a form immediately
750 * reusable as an cfgadm command argument.
751 * TODO: clean this up
752 */
753 p = bp->b_path;
754 l = sizeof (bp->b_path);
755 (void) snprintf(p, l, "dr@%d:", instance);
756 while (*p != '\0') {
757 l--;
758 p++;
759 }
760
761 name = p;
762 err = drmach_board_name(bd, p, l);
763 if (err) {
764 sbd_err_clear(&err);
765 rv = DDI_FAILURE;
766 break;
767 }
768
769 minor_num = DR_MAKE_MINOR(instance, bd);
770 rv = ddi_create_minor_node(dip, name, S_IFCHR,
771 minor_num, DDI_NT_SBD_ATTACHMENT_POINT, NULL);
772 if (rv != DDI_SUCCESS)
773 rv = DDI_FAILURE;
774 }
775
776 if (rv == DDI_SUCCESS) {
777 /*
778 * Announce the node's presence.
779 */
780 ddi_report_dev(dip);
781 } else {
782 ddi_remove_minor_node(dip, NULL);
783 }
784 /*
785 * Init registered unsafe devs.
786 */
787 dr_unsafe_devs.devnames = NULL;
788 rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
789 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
790 "unsupported-io-drivers", &dr_unsafe_devs.devnames,
791 &dr_unsafe_devs.ndevs);
792
793 if (rv2 != DDI_PROP_SUCCESS)
794 dr_unsafe_devs.ndevs = 0;
795
796 rw_exit(&dr_grwlock);
797 return (rv);
798
799 default:
800 return (DDI_FAILURE);
801 }
802
803 /*NOTREACHED*/
804 }
805
806 static int
dr_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)807 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
808 {
809 int instance;
810 dr_softstate_t *softsp;
811
812 switch (cmd) {
813 case DDI_DETACH:
814 if (!dr_modunload_okay)
815 return (DDI_FAILURE);
816
817 rw_enter(&dr_grwlock, RW_WRITER);
818
819 instance = ddi_get_instance(dip);
820 softsp = ddi_get_soft_state(dr_g.softsp, instance);
821
822 /* TODO: eliminate dr_boardlist */
823 ASSERT(softsp->boards == dr_boardlist);
824
825 /* remove all minor nodes */
826 ddi_remove_minor_node(dip, NULL);
827
828 if (softsp->dr_initialized) {
829 int bd;
830
831 for (bd = 0; bd < MAX_BOARDS; bd++)
832 dr_board_destroy(&softsp->boards[bd]);
833 }
834
835 FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
836 mutex_destroy(&softsp->i_lock);
837 ddi_soft_state_free(dr_g.softsp, instance);
838
839 rw_exit(&dr_grwlock);
840 return (DDI_SUCCESS);
841
842 default:
843 return (DDI_FAILURE);
844 }
845 /*NOTREACHED*/
846 }
847
848 static int
dr_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)849 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
850 {
851 _NOTE(ARGUNUSED(dip))
852
853 dev_t dev = (dev_t)arg;
854 int instance, error;
855 dr_softstate_t *softsp;
856
857 *result = NULL;
858 error = DDI_SUCCESS;
859 instance = DR_MINOR2INST(getminor(dev));
860
861 switch (cmd) {
862 case DDI_INFO_DEVT2DEVINFO:
863 softsp = ddi_get_soft_state(dr_g.softsp, instance);
864 if (softsp == NULL)
865 return (DDI_FAILURE);
866 *result = (void *)softsp->dip;
867 break;
868
869 case DDI_INFO_DEVT2INSTANCE:
870 *result = (void *)(uintptr_t)instance;
871 break;
872
873 default:
874 error = DDI_FAILURE;
875 break;
876 }
877
878 return (error);
879 }
880
881 /*
882 * DR operations.
883 */
884
885 static int
dr_copyin_iocmd(dr_handle_t * hp)886 dr_copyin_iocmd(dr_handle_t *hp)
887 {
888 static fn_t f = "dr_copyin_iocmd";
889 sbd_cmd_t *scp = &hp->h_sbdcmd;
890
891 if (hp->h_iap == NULL)
892 return (EINVAL);
893
894 bzero((caddr_t)scp, sizeof (sbd_cmd_t));
895
896 #ifdef _MULTI_DATAMODEL
897 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
898 sbd_cmd32_t scmd32;
899
900 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
901
902 if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
903 sizeof (sbd_cmd32_t), hp->h_mode)) {
904 cmn_err(CE_WARN,
905 "%s: (32bit) failed to copyin "
906 "sbdcmd-struct", f);
907 return (EFAULT);
908 }
909 scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
910 scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
911 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
912 &scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
913 scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
914 scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
915 scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
916
917 switch (hp->h_cmd) {
918 case SBD_CMD_STATUS:
919 scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
920 scp->cmd_stat.s_statp =
921 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
922 break;
923 default:
924 break;
925
926 }
927 } else
928 #endif /* _MULTI_DATAMODEL */
929 if (ddi_copyin((void *)hp->h_iap, (void *)scp,
930 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
931 cmn_err(CE_WARN,
932 "%s: failed to copyin sbdcmd-struct", f);
933 return (EFAULT);
934 }
935
936 if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
937 hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
938 ++hp->h_opts.size;
939 if (ddi_copyin((void *)scp->cmd_cm.c_opts,
940 (void *)hp->h_opts.copts,
941 scp->cmd_cm.c_len, hp->h_mode) != 0) {
942 cmn_err(CE_WARN, "%s: failed to copyin options", f);
943 return (EFAULT);
944 }
945 }
946 return (0);
947 }
948
949 static int
dr_copyout_iocmd(dr_handle_t * hp)950 dr_copyout_iocmd(dr_handle_t *hp)
951 {
952 static fn_t f = "dr_copyout_iocmd";
953 sbd_cmd_t *scp = &hp->h_sbdcmd;
954
955 if (hp->h_iap == NULL)
956 return (EINVAL);
957
958 #ifdef _MULTI_DATAMODEL
959 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
960 sbd_cmd32_t scmd32;
961
962 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
963 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
964 bcopy(&scp->cmd_cm.c_id.c_name[0],
965 &scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
966
967 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
968 scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
969 scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
970
971 switch (hp->h_cmd) {
972 case SBD_CMD_GETNCM:
973 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
974 break;
975 default:
976 break;
977 }
978
979 if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
980 sizeof (sbd_cmd32_t), hp->h_mode)) {
981 cmn_err(CE_WARN,
982 "%s: (32bit) failed to copyout "
983 "sbdcmd-struct", f);
984 return (EFAULT);
985 }
986 } else
987 #endif /* _MULTI_DATAMODEL */
988 if (ddi_copyout((void *)scp, (void *)hp->h_iap,
989 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
990 cmn_err(CE_WARN,
991 "%s: failed to copyout sbdcmd-struct", f);
992 return (EFAULT);
993 }
994
995 return (0);
996 }
997
998 static int
dr_copyout_errs(dr_handle_t * hp)999 dr_copyout_errs(dr_handle_t *hp)
1000 {
1001 static fn_t f = "dr_copyout_errs";
1002
1003 if (hp->h_err == NULL)
1004 return (0);
1005
1006 if (hp->h_err->e_code) {
1007 PR_ALL("%s: error %d %s",
1008 f, hp->h_err->e_code, hp->h_err->e_rsc);
1009 }
1010
1011 #ifdef _MULTI_DATAMODEL
1012 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1013 sbd_error32_t *serr32p;
1014
1015 serr32p = GETSTRUCT(sbd_error32_t, 1);
1016
1017 serr32p->e_code = hp->h_err->e_code;
1018 bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1019 MAXPATHLEN);
1020 if (ddi_copyout((void *)serr32p,
1021 (void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1022 sizeof (sbd_error32_t), hp->h_mode)) {
1023 cmn_err(CE_WARN,
1024 "%s: (32bit) failed to copyout", f);
1025 return (EFAULT);
1026 }
1027 FREESTRUCT(serr32p, sbd_error32_t, 1);
1028 } else
1029 #endif /* _MULTI_DATAMODEL */
1030 if (ddi_copyout((void *)hp->h_err,
1031 (void *)&hp->h_iap->i_err,
1032 sizeof (sbd_error_t), hp->h_mode)) {
1033 cmn_err(CE_WARN,
1034 "%s: failed to copyout", f);
1035 return (EFAULT);
1036 }
1037
1038 sbd_err_clear(&hp->h_err);
1039
1040 return (0);
1041
1042 }
1043
1044 /*
1045 * pre-op entry point must sbd_err_set_c(), if needed.
1046 * Return value of non-zero indicates failure.
1047 */
1048 static int
dr_pre_op(dr_handle_t * hp)1049 dr_pre_op(dr_handle_t *hp)
1050 {
1051 int rv = 0, t;
1052 int cmd, serr = 0;
1053 dr_devset_t devset;
1054 dr_board_t *bp = hp->h_bd;
1055 dr_handle_t *shp = hp;
1056 static fn_t f = "dr_pre_op";
1057
1058 cmd = hp->h_cmd;
1059 devset = shp->h_devset;
1060
1061 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1062
1063 hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts);
1064 if (hp->h_err != NULL) {
1065 PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1066 SBD_CMD_STR(cmd), cmd);
1067 return (-1);
1068 }
1069
1070 /*
1071 * Check for valid state transitions.
1072 */
1073 if ((t = CMD2INDEX(cmd)) != -1) {
1074 struct dr_state_trans *transp;
1075 int state_err;
1076
1077 transp = &dr_state_transition[t];
1078 ASSERT(transp->x_cmd == cmd);
1079
1080 state_err = dr_check_transition(bp, &devset, transp, cmd);
1081
1082 if (state_err < 0) {
1083 /*
1084 * Invalidate device.
1085 */
1086 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1087 serr = -1;
1088 PR_ALL("%s: invalid devset (0x%x)\n",
1089 f, (uint_t)devset);
1090 } else if (state_err != 0) {
1091 /*
1092 * State transition is not a valid one.
1093 */
1094 dr_op_err(CE_IGNORE, hp,
1095 transp->x_op[state_err].x_err, NULL);
1096
1097 serr = transp->x_op[state_err].x_rv;
1098
1099 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1100 f, state_str[state_err], state_err,
1101 SBD_CMD_STR(cmd), cmd);
1102 } else {
1103 shp->h_devset = devset;
1104 }
1105 }
1106
1107 if (serr) {
1108 rv = -1;
1109 }
1110
1111 return (rv);
1112 }
1113
1114 static int
dr_post_op(dr_handle_t * hp)1115 dr_post_op(dr_handle_t *hp)
1116 {
1117 int rv = 0;
1118 int cmd;
1119 dr_board_t *bp = hp->h_bd;
1120 static fn_t f = "dr_post_op";
1121
1122 cmd = hp->h_cmd;
1123
1124 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1125
1126 /* errors should have been caught by now */
1127 ASSERT(hp->h_err == NULL);
1128
1129 hp->h_err = drmach_post_op(cmd, bp->b_id, &hp->h_opts);
1130 if (hp->h_err != NULL) {
1131 PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1132 SBD_CMD_STR(cmd), cmd);
1133 return (-1);
1134 }
1135
1136 switch (cmd) {
1137 case SBD_CMD_CONFIGURE:
1138 case SBD_CMD_UNCONFIGURE:
1139 case SBD_CMD_CONNECT:
1140 case SBD_CMD_DISCONNECT:
1141 case SBD_CMD_GETNCM:
1142 case SBD_CMD_STATUS:
1143 break;
1144
1145 default:
1146 break;
1147 }
1148
1149 return (rv);
1150 }
1151
1152 static int
dr_exec_op(dr_handle_t * hp)1153 dr_exec_op(dr_handle_t *hp)
1154 {
1155 int rv = 0;
1156 static fn_t f = "dr_exec_op";
1157
1158 /* errors should have been caught by now */
1159 ASSERT(hp->h_err == NULL);
1160
1161 switch (hp->h_cmd) {
1162 case SBD_CMD_ASSIGN:
1163 dr_assign_board(hp);
1164 break;
1165
1166 case SBD_CMD_UNASSIGN:
1167 dr_unassign_board(hp);
1168 break;
1169
1170 case SBD_CMD_POWEROFF:
1171 dr_poweroff_board(hp);
1172 break;
1173
1174 case SBD_CMD_POWERON:
1175 dr_poweron_board(hp);
1176 break;
1177
1178 case SBD_CMD_TEST:
1179 dr_test_board(hp);
1180 break;
1181
1182 case SBD_CMD_CONNECT:
1183 dr_connect(hp);
1184 break;
1185
1186 case SBD_CMD_CONFIGURE:
1187 dr_dev_configure(hp);
1188 break;
1189
1190 case SBD_CMD_UNCONFIGURE:
1191 dr_dev_release(hp);
1192 if (hp->h_err == NULL)
1193 rv = dr_dev_unconfigure(hp);
1194 else
1195 dr_dev_cancel(hp);
1196 break;
1197
1198 case SBD_CMD_DISCONNECT:
1199 rv = dr_disconnect(hp);
1200 break;
1201
1202 case SBD_CMD_STATUS:
1203 rv = dr_dev_status(hp);
1204 break;
1205
1206 case SBD_CMD_GETNCM:
1207 hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1208 rv = dr_copyout_iocmd(hp);
1209 break;
1210
1211 case SBD_CMD_PASSTHRU:
1212 rv = dr_pt_ioctl(hp);
1213 break;
1214
1215 default:
1216 cmn_err(CE_WARN,
1217 "%s: unknown command (%d)",
1218 f, hp->h_cmd);
1219 break;
1220 }
1221
1222 if (hp->h_err != NULL) {
1223 rv = -1;
1224 }
1225
1226 return (rv);
1227 }
1228
1229 static void
dr_assign_board(dr_handle_t * hp)1230 dr_assign_board(dr_handle_t *hp)
1231 {
1232 dr_board_t *bp = hp->h_bd;
1233
1234 hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1235 if (hp->h_err == NULL) {
1236 bp->b_assigned = 1;
1237 }
1238 }
1239
1240 static void
dr_unassign_board(dr_handle_t * hp)1241 dr_unassign_board(dr_handle_t *hp)
1242 {
1243 dr_board_t *bp = hp->h_bd;
1244
1245 /*
1246 * Block out status during unassign.
1247 * Not doing cv_wait_sig here as starfire SSP software
1248 * ignores unassign failure and removes board from
1249 * domain mask causing system panic.
1250 * TODO: Change cv_wait to cv_wait_sig when SSP software
1251 * handles unassign failure.
1252 */
1253 dr_lock_status(bp);
1254
1255 hp->h_err = drmach_board_unassign(bp->b_id);
1256 if (hp->h_err == NULL) {
1257 /*
1258 * clear drmachid_t handle; not valid after board unassign
1259 */
1260 bp->b_id = 0;
1261 bp->b_assigned = 0;
1262 }
1263
1264 dr_unlock_status(bp);
1265 }
1266
1267 static void
dr_poweron_board(dr_handle_t * hp)1268 dr_poweron_board(dr_handle_t *hp)
1269 {
1270 dr_board_t *bp = hp->h_bd;
1271
1272 hp->h_err = drmach_board_poweron(bp->b_id);
1273 }
1274
1275 static void
dr_poweroff_board(dr_handle_t * hp)1276 dr_poweroff_board(dr_handle_t *hp)
1277 {
1278 dr_board_t *bp = hp->h_bd;
1279
1280 hp->h_err = drmach_board_poweroff(bp->b_id);
1281 }
1282
1283 static void
dr_test_board(dr_handle_t * hp)1284 dr_test_board(dr_handle_t *hp)
1285 {
1286 dr_board_t *bp = hp->h_bd;
1287 hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1288 dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1289 }
1290
1291 /*
1292 * Create and populate the component nodes for a board. Assumes that the
1293 * devlists for the board have been initialized.
1294 */
1295 static void
dr_make_comp_nodes(dr_board_t * bp)1296 dr_make_comp_nodes(dr_board_t *bp) {
1297
1298 int i;
1299
1300 /*
1301 * Make nodes for the individual components on the board.
1302 * First we need to initialize memory unit data structures of board
1303 * structure.
1304 */
1305 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1306 dr_mem_unit_t *mp;
1307
1308 mp = dr_get_mem_unit(bp, i);
1309 dr_init_mem_unit(mp);
1310 }
1311
1312 /*
1313 * Initialize cpu unit data structures.
1314 */
1315 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1316 dr_cpu_unit_t *cp;
1317
1318 cp = dr_get_cpu_unit(bp, i);
1319 dr_init_cpu_unit(cp);
1320 }
1321
1322 /*
1323 * Initialize io unit data structures.
1324 */
1325 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1326 dr_io_unit_t *ip;
1327
1328 ip = dr_get_io_unit(bp, i);
1329 dr_init_io_unit(ip);
1330 }
1331
1332 dr_board_transition(bp, DR_STATE_CONNECTED);
1333
1334 bp->b_rstate = SBD_STAT_CONNECTED;
1335 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1336 bp->b_cond = SBD_COND_OK;
1337 (void) drv_getparm(TIME, (void *)&bp->b_time);
1338
1339 }
1340
1341 /*
1342 * Only do work if called to operate on an entire board
1343 * which doesn't already have components present.
1344 */
1345 static void
dr_connect(dr_handle_t * hp)1346 dr_connect(dr_handle_t *hp)
1347 {
1348 dr_board_t *bp = hp->h_bd;
1349 static fn_t f = "dr_connect";
1350
1351 PR_ALL("%s...\n", f);
1352
1353 if (DR_DEVS_PRESENT(bp)) {
1354 /*
1355 * Board already has devices present.
1356 */
1357 PR_ALL("%s: devices already present (0x%lx)\n",
1358 f, DR_DEVS_PRESENT(bp));
1359 return;
1360 }
1361
1362 hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1363 if (hp->h_err)
1364 return;
1365
1366 hp->h_err = dr_init_devlists(bp);
1367 if (hp->h_err)
1368 return;
1369 else if (bp->b_ndev == 0) {
1370 dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1371 return;
1372 } else {
1373 dr_make_comp_nodes(bp);
1374 return;
1375 }
1376 /*NOTREACHED*/
1377 }
1378
1379 static int
dr_disconnect(dr_handle_t * hp)1380 dr_disconnect(dr_handle_t *hp)
1381 {
1382 int i;
1383 dr_devset_t devset;
1384 dr_board_t *bp = hp->h_bd;
1385 static fn_t f = "dr_disconnect";
1386
1387 PR_ALL("%s...\n", f);
1388
1389 /*
1390 * Only devices which are present, but
1391 * unattached can be disconnected.
1392 */
1393 devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1394 DR_DEVS_UNATTACHED(bp);
1395
1396 if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1397 dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1398 return (0);
1399 }
1400
1401 /*
1402 * Block out status during disconnect.
1403 */
1404 mutex_enter(&bp->b_slock);
1405 while (bp->b_sflags & DR_BSLOCK) {
1406 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1407 mutex_exit(&bp->b_slock);
1408 return (EINTR);
1409 }
1410 }
1411 bp->b_sflags |= DR_BSLOCK;
1412 mutex_exit(&bp->b_slock);
1413
1414 hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1415
1416 DR_DEVS_DISCONNECT(bp, devset);
1417
1418 ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1419
1420 /*
1421 * Update per-device state transitions.
1422 */
1423 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1424 dr_cpu_unit_t *cp;
1425
1426 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1427 continue;
1428
1429 cp = dr_get_cpu_unit(bp, i);
1430 if (dr_disconnect_cpu(cp) == 0)
1431 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1432 else if (cp->sbc_cm.sbdev_error != NULL)
1433 DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1434
1435 ASSERT(cp->sbc_cm.sbdev_error == NULL);
1436 }
1437
1438 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1439 dr_mem_unit_t *mp;
1440
1441 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1442 continue;
1443
1444 mp = dr_get_mem_unit(bp, i);
1445 if (dr_disconnect_mem(mp) == 0)
1446 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1447 else if (mp->sbm_cm.sbdev_error != NULL)
1448 DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1449
1450 ASSERT(mp->sbm_cm.sbdev_error == NULL);
1451 }
1452
1453 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1454 dr_io_unit_t *ip;
1455
1456 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1457 continue;
1458
1459 ip = dr_get_io_unit(bp, i);
1460 if (dr_disconnect_io(ip) == 0)
1461 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1462 else if (ip->sbi_cm.sbdev_error != NULL)
1463 DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1464
1465 ASSERT(ip->sbi_cm.sbdev_error == NULL);
1466 }
1467 if (hp->h_err) {
1468 /*
1469 * For certain errors, drmach_board_disconnect will mark
1470 * the board as unusable; in these cases the devtree must
1471 * be purged so that status calls will succeed.
1472 * XXX
1473 * This implementation checks for discrete error codes -
1474 * someday, the i/f to drmach_board_disconnect should be
1475 * changed to avoid the e_code testing.
1476 */
1477 if ((hp->h_err->e_code == ESTC_MBXRPLY) ||
1478 (hp->h_err->e_code == ESTC_MBXRQST) ||
1479 (hp->h_err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) ||
1480 (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1481 (hp->h_err->e_code == ESTC_DEPROBE) ||
1482 (hp->h_err->e_code == EOPL_DEPROBE)) {
1483 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1484 bp->b_busy = 0;
1485 (void) drv_getparm(TIME, (void *)&bp->b_time);
1486
1487 if (drmach_board_deprobe(bp->b_id))
1488 goto disconnect_done;
1489 else
1490 bp->b_ndev = 0;
1491 }
1492
1493 /*
1494 * If the disconnect failed in a recoverable way,
1495 * more work is required.
1496 * XXX
1497 * This implementation checks for discrete error codes -
1498 * someday, the i/f to drmach_board_disconnect should be
1499 * changed to avoid the e_code testing.
1500 */
1501 if ((hp->h_err->e_code == ESTC_MBXRQST) ||
1502 (hp->h_err->e_code == ESTC_SMS_ERR_RECOVERABLE) ||
1503 (hp->h_err->e_code == ESTC_DEPROBE) ||
1504 (hp->h_err->e_code == EOPL_DEPROBE)) {
1505 /*
1506 * With this failure, the board has been deprobed
1507 * by IKP, and reprobed. We've already gotten rid
1508 * of the old devtree, now we need to reconstruct it
1509 * based on the new IKP probe
1510 */
1511 if (dr_init_devlists(bp) || (bp->b_ndev == 0))
1512 goto disconnect_done;
1513
1514 dr_make_comp_nodes(bp);
1515 }
1516 }
1517 /*
1518 * Once all the components on a board have been disconnect
1519 * the board's state can transition to disconnected and
1520 * we can allow the deprobe to take place.
1521 */
1522 if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1523 dr_board_transition(bp, DR_STATE_OCCUPIED);
1524 bp->b_rstate = SBD_STAT_DISCONNECTED;
1525 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1526 bp->b_busy = 0;
1527 (void) drv_getparm(TIME, (void *)&bp->b_time);
1528
1529 hp->h_err = drmach_board_deprobe(bp->b_id);
1530
1531 if (hp->h_err == NULL) {
1532 bp->b_ndev = 0;
1533 dr_board_transition(bp, DR_STATE_EMPTY);
1534 bp->b_rstate = SBD_STAT_EMPTY;
1535 (void) drv_getparm(TIME, (void *)&bp->b_time);
1536 }
1537 }
1538
1539 disconnect_done:
1540 dr_unlock_status(bp);
1541
1542 return (0);
1543 }
1544
1545 /*
1546 * Check if a particular device is a valid target of the current
1547 * operation. Return 1 if it is a valid target, and 0 otherwise.
1548 */
1549 static int
dr_dev_is_target(dr_dev_unit_t * dp,int present_only,uint_t uset)1550 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1551 {
1552 dr_common_unit_t *cp;
1553 int is_present;
1554 int is_attached;
1555
1556 cp = &dp->du_common;
1557
1558 /* check if the user requested this device */
1559 if ((uset & (1 << cp->sbdev_unum)) == 0) {
1560 return (0);
1561 }
1562
1563 is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1564 is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1565
1566 /*
1567 * If the present_only flag is set, a valid target
1568 * must be present but not attached. Otherwise, it
1569 * must be both present and attached.
1570 */
1571 if (is_present && (present_only ^ is_attached)) {
1572 /* sanity check */
1573 ASSERT(cp->sbdev_id != (drmachid_t)0);
1574
1575 return (1);
1576 }
1577
1578 return (0);
1579 }
1580
1581 static void
dr_dev_make_list(dr_handle_t * hp,sbd_comp_type_t type,int present_only,dr_common_unit_t *** devlist,int * devnum)1582 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1583 dr_common_unit_t ***devlist, int *devnum)
1584 {
1585 dr_board_t *bp = hp->h_bd;
1586 int unum;
1587 int nunits;
1588 uint_t uset;
1589 int len;
1590 dr_common_unit_t **list, **wp;
1591
1592 switch (type) {
1593 case SBD_COMP_CPU:
1594 nunits = MAX_CPU_UNITS_PER_BOARD;
1595 break;
1596 case SBD_COMP_MEM:
1597 nunits = MAX_MEM_UNITS_PER_BOARD;
1598 break;
1599 case SBD_COMP_IO:
1600 nunits = MAX_IO_UNITS_PER_BOARD;
1601 break;
1602 default:
1603 /* catch this in debug kernels */
1604 ASSERT(0);
1605 break;
1606 }
1607
1608 /* allocate list storage. */
1609 len = sizeof (dr_common_unit_t *) * (nunits + 1);
1610 list = kmem_zalloc(len, KM_SLEEP);
1611
1612 /* record length of storage in first element */
1613 *list++ = (dr_common_unit_t *)(uintptr_t)len;
1614
1615 /* get bit array signifying which units are to be involved */
1616 uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1617
1618 /*
1619 * Adjust the loop count for CPU devices since all cores
1620 * in a CMP will be examined in a single iteration.
1621 */
1622 if (type == SBD_COMP_CPU) {
1623 nunits = MAX_CMP_UNITS_PER_BOARD;
1624 }
1625
1626 /* populate list */
1627 for (wp = list, unum = 0; unum < nunits; unum++) {
1628
1629 dr_dev_unit_t *dp;
1630 int core;
1631 int cunum;
1632
1633 dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1634 if (dr_dev_is_target(dp, present_only, uset)) {
1635 *wp++ = &dp->du_common;
1636 }
1637
1638 /* further processing is only required for CPUs */
1639 if (type != SBD_COMP_CPU) {
1640 continue;
1641 }
1642
1643 /*
1644 * Add any additional cores from the current CPU
1645 * device. This is to ensure that all the cores
1646 * are grouped together in the device list, and
1647 * consequently sequenced together during the actual
1648 * operation.
1649 */
1650 for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1651
1652 cunum = DR_CMP_CORE_UNUM(unum, core);
1653 dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1654
1655 if (dr_dev_is_target(dp, present_only, uset)) {
1656 *wp++ = &dp->du_common;
1657 }
1658 }
1659 }
1660
1661 /* calculate number of units in list, return result and list pointer */
1662 *devnum = wp - list;
1663 *devlist = list;
1664 }
1665
1666 static void
dr_dev_clean_up(dr_handle_t * hp,dr_common_unit_t ** list,int devnum)1667 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1668 {
1669 int len;
1670 int n = 0;
1671 dr_common_unit_t *cp, **rp = list;
1672
1673 /*
1674 * move first encountered unit error to handle if handle
1675 * does not yet have a recorded error.
1676 */
1677 if (hp->h_err == NULL) {
1678 while (n++ < devnum) {
1679 cp = *rp++;
1680 if (cp->sbdev_error != NULL) {
1681 hp->h_err = cp->sbdev_error;
1682 cp->sbdev_error = NULL;
1683 break;
1684 }
1685 }
1686 }
1687
1688 /* free remaining unit errors */
1689 while (n++ < devnum) {
1690 cp = *rp++;
1691 if (cp->sbdev_error != NULL) {
1692 sbd_err_clear(&cp->sbdev_error);
1693 cp->sbdev_error = NULL;
1694 }
1695 }
1696
1697 /* free list */
1698 list -= 1;
1699 len = (int)(uintptr_t)list[0];
1700 kmem_free(list, len);
1701 }
1702
1703 static int
dr_dev_walk(dr_handle_t * hp,sbd_comp_type_t type,int present_only,int (* pre_op)(dr_handle_t *,dr_common_unit_t **,int),void (* op)(dr_handle_t *,dr_common_unit_t *),int (* post_op)(dr_handle_t *,dr_common_unit_t **,int),void (* board_op)(dr_handle_t *,dr_common_unit_t **,int))1704 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1705 int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1706 void (*op)(dr_handle_t *, dr_common_unit_t *),
1707 int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1708 void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1709 {
1710 int devnum, rv;
1711 dr_common_unit_t **devlist;
1712
1713 dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1714
1715 rv = 0;
1716 if (devnum > 0) {
1717 rv = (*pre_op)(hp, devlist, devnum);
1718 if (rv == 0) {
1719 int n;
1720
1721 for (n = 0; n < devnum; n++)
1722 (*op)(hp, devlist[n]);
1723
1724 rv = (*post_op)(hp, devlist, devnum);
1725
1726 (*board_op)(hp, devlist, devnum);
1727 }
1728 }
1729
1730 dr_dev_clean_up(hp, devlist, devnum);
1731 return (rv);
1732 }
1733
1734 /*ARGSUSED*/
1735 static int
dr_dev_noop(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1736 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1737 {
1738 return (0);
1739 }
1740
1741 static void
dr_attach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1742 dr_attach_update_state(dr_handle_t *hp,
1743 dr_common_unit_t **devlist, int devnum)
1744 {
1745 dr_board_t *bp = hp->h_bd;
1746 int i;
1747 dr_devset_t devs_unattached, devs_present;
1748 static fn_t f = "dr_post_attach_devlist";
1749
1750 for (i = 0; i < devnum; i++) {
1751 dr_common_unit_t *cp = devlist[i];
1752
1753 if (dr_check_unit_attached(cp) == -1) {
1754 PR_ALL("%s: ERROR %s not attached\n",
1755 f, cp->sbdev_path);
1756 continue;
1757 }
1758
1759 DR_DEV_SET_ATTACHED(cp);
1760
1761 dr_device_transition(cp, DR_STATE_CONFIGURED);
1762 cp->sbdev_cond = SBD_COND_OK;
1763 }
1764
1765 devs_present = DR_DEVS_PRESENT(bp);
1766 devs_unattached = DR_DEVS_UNATTACHED(bp);
1767
1768 switch (bp->b_state) {
1769 case DR_STATE_CONNECTED:
1770 case DR_STATE_UNCONFIGURED:
1771 ASSERT(devs_present);
1772
1773 if (devs_unattached == 0) {
1774 /*
1775 * All devices finally attached.
1776 */
1777 dr_board_transition(bp, DR_STATE_CONFIGURED);
1778 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1779 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1780 hp->h_bd->b_cond = SBD_COND_OK;
1781 hp->h_bd->b_busy = 0;
1782 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1783 } else if (devs_present != devs_unattached) {
1784 /*
1785 * Only some devices are fully attached.
1786 */
1787 dr_board_transition(bp, DR_STATE_PARTIAL);
1788 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1789 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1790 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1791 }
1792 break;
1793
1794 case DR_STATE_PARTIAL:
1795 ASSERT(devs_present);
1796 /*
1797 * All devices finally attached.
1798 */
1799 if (devs_unattached == 0) {
1800 dr_board_transition(bp, DR_STATE_CONFIGURED);
1801 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1802 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1803 hp->h_bd->b_cond = SBD_COND_OK;
1804 hp->h_bd->b_busy = 0;
1805 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1806 }
1807 break;
1808
1809 default:
1810 break;
1811 }
1812 }
1813
1814 static void
dr_dev_configure(dr_handle_t * hp)1815 dr_dev_configure(dr_handle_t *hp)
1816 {
1817 int rv;
1818
1819 rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1820 dr_pre_attach_cpu,
1821 dr_attach_cpu,
1822 dr_post_attach_cpu,
1823 dr_attach_update_state);
1824
1825 if (rv >= 0) {
1826 rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1827 dr_pre_attach_mem,
1828 dr_attach_mem,
1829 dr_post_attach_mem,
1830 dr_attach_update_state);
1831 }
1832
1833 if (rv >= 0) {
1834 (void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1835 dr_pre_attach_io,
1836 dr_attach_io,
1837 dr_post_attach_io,
1838 dr_attach_update_state);
1839 }
1840 }
1841
1842 static void
dr_release_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1843 dr_release_update_state(dr_handle_t *hp,
1844 dr_common_unit_t **devlist, int devnum)
1845 {
1846 _NOTE(ARGUNUSED(devlist))
1847 _NOTE(ARGUNUSED(devnum))
1848
1849 dr_board_t *bp = hp->h_bd;
1850
1851 /*
1852 * If the entire board was released and all components
1853 * unreferenced then transfer it to the UNREFERENCED state.
1854 */
1855 if ((bp->b_state != DR_STATE_RELEASE) &&
1856 (DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1857 dr_board_transition(bp, DR_STATE_RELEASE);
1858 hp->h_bd->b_busy = 1;
1859 }
1860 }
1861
1862 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1863 int
dr_release_dev_done(dr_common_unit_t * cp)1864 dr_release_dev_done(dr_common_unit_t *cp)
1865 {
1866 if (cp->sbdev_state == DR_STATE_RELEASE) {
1867 ASSERT(DR_DEV_IS_RELEASED(cp));
1868
1869 DR_DEV_SET_UNREFERENCED(cp);
1870
1871 dr_device_transition(cp, DR_STATE_UNREFERENCED);
1872
1873 return (0);
1874 } else {
1875 return (-1);
1876 }
1877 }
1878
1879 static void
dr_release_done(dr_handle_t * hp,dr_common_unit_t * cp)1880 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1881 {
1882 _NOTE(ARGUNUSED(hp))
1883
1884 dr_board_t *bp;
1885 static fn_t f = "dr_release_done";
1886
1887 PR_ALL("%s...\n", f);
1888
1889 /* get board pointer & sanity check */
1890 bp = cp->sbdev_bp;
1891 ASSERT(bp == hp->h_bd);
1892
1893 /*
1894 * Transfer the device which just completed its release
1895 * to the UNREFERENCED state.
1896 */
1897 switch (cp->sbdev_type) {
1898 case SBD_COMP_MEM:
1899 dr_release_mem_done(cp);
1900 break;
1901
1902 default:
1903 DR_DEV_SET_RELEASED(cp);
1904
1905 dr_device_transition(cp, DR_STATE_RELEASE);
1906
1907 (void) dr_release_dev_done(cp);
1908 break;
1909 }
1910
1911 /*
1912 * If we're not already in the RELEASE state for this
1913 * board and we now have released all that were previously
1914 * attached, then transfer the board to the RELEASE state.
1915 */
1916 if ((bp->b_state == DR_STATE_RELEASE) &&
1917 (DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1918 dr_board_transition(bp, DR_STATE_UNREFERENCED);
1919 bp->b_busy = 1;
1920 (void) drv_getparm(TIME, (void *)&bp->b_time);
1921 }
1922 }
1923
1924 static void
dr_dev_release_mem(dr_handle_t * hp,dr_common_unit_t * dv)1925 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1926 {
1927 dr_release_mem(dv);
1928 dr_release_done(hp, dv);
1929 }
1930
1931 static void
dr_dev_release(dr_handle_t * hp)1932 dr_dev_release(dr_handle_t *hp)
1933 {
1934 int rv;
1935
1936 hp->h_bd->b_busy = 1;
1937
1938 rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1939 dr_pre_release_cpu,
1940 dr_release_done,
1941 dr_dev_noop,
1942 dr_release_update_state);
1943
1944 if (rv >= 0) {
1945 rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1946 dr_pre_release_mem,
1947 dr_dev_release_mem,
1948 dr_dev_noop,
1949 dr_release_update_state);
1950 }
1951
1952 if (rv >= 0) {
1953 rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1954 dr_pre_release_io,
1955 dr_release_done,
1956 dr_dev_noop,
1957 dr_release_update_state);
1958
1959 }
1960
1961 if (rv < 0)
1962 hp->h_bd->b_busy = 0;
1963 /* else, b_busy will be cleared in dr_detach_update_state() */
1964 }
1965
1966 static void
dr_detach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1967 dr_detach_update_state(dr_handle_t *hp,
1968 dr_common_unit_t **devlist, int devnum)
1969 {
1970 dr_board_t *bp = hp->h_bd;
1971 int i;
1972 dr_state_t bstate;
1973 static fn_t f = "dr_detach_update_state";
1974
1975 for (i = 0; i < devnum; i++) {
1976 dr_common_unit_t *cp = devlist[i];
1977
1978 if (dr_check_unit_attached(cp) >= 0) {
1979 /*
1980 * Device is still attached probably due
1981 * to an error. Need to keep track of it.
1982 */
1983 PR_ALL("%s: ERROR %s not detached\n",
1984 f, cp->sbdev_path);
1985
1986 continue;
1987 }
1988
1989 DR_DEV_CLR_ATTACHED(cp);
1990 DR_DEV_CLR_RELEASED(cp);
1991 DR_DEV_CLR_UNREFERENCED(cp);
1992 dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1993 }
1994
1995 bstate = bp->b_state;
1996 if (bstate != DR_STATE_UNCONFIGURED) {
1997 if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1998 /*
1999 * All devices are finally detached.
2000 */
2001 dr_board_transition(bp, DR_STATE_UNCONFIGURED);
2002 hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
2003 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2004 } else if ((bp->b_state != DR_STATE_PARTIAL) &&
2005 (DR_DEVS_ATTACHED(bp) !=
2006 DR_DEVS_PRESENT(bp))) {
2007 /*
2008 * Some devices remain attached.
2009 */
2010 dr_board_transition(bp, DR_STATE_PARTIAL);
2011 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2012 }
2013
2014 if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
2015 hp->h_bd->b_busy = 0;
2016 }
2017 }
2018
2019 static int
dr_dev_unconfigure(dr_handle_t * hp)2020 dr_dev_unconfigure(dr_handle_t *hp)
2021 {
2022 dr_board_t *bp = hp->h_bd;
2023
2024 /*
2025 * Block out status during IO unconfig.
2026 */
2027 mutex_enter(&bp->b_slock);
2028 while (bp->b_sflags & DR_BSLOCK) {
2029 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2030 mutex_exit(&bp->b_slock);
2031 return (EINTR);
2032 }
2033 }
2034 bp->b_sflags |= DR_BSLOCK;
2035 mutex_exit(&bp->b_slock);
2036
2037 (void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2038 dr_pre_detach_io,
2039 dr_detach_io,
2040 dr_post_detach_io,
2041 dr_detach_update_state);
2042
2043 dr_unlock_status(bp);
2044
2045 (void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2046 dr_pre_detach_cpu,
2047 dr_detach_cpu,
2048 dr_post_detach_cpu,
2049 dr_detach_update_state);
2050
2051 (void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2052 dr_pre_detach_mem,
2053 dr_detach_mem,
2054 dr_post_detach_mem,
2055 dr_detach_update_state);
2056
2057 return (0);
2058 }
2059
2060 static void
dr_dev_cancel(dr_handle_t * hp)2061 dr_dev_cancel(dr_handle_t *hp)
2062 {
2063 int i;
2064 dr_devset_t devset;
2065 dr_board_t *bp = hp->h_bd;
2066 static fn_t f = "dr_dev_cancel";
2067
2068 PR_ALL("%s...\n", f);
2069
2070 /*
2071 * Only devices which have been "released" are
2072 * subject to cancellation.
2073 */
2074 devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2075
2076 /*
2077 * Nothing to do for CPUs or IO other than change back
2078 * their state.
2079 */
2080 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2081 dr_cpu_unit_t *cp;
2082 dr_state_t nstate;
2083
2084 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2085 continue;
2086
2087 cp = dr_get_cpu_unit(bp, i);
2088 if (dr_cancel_cpu(cp) == 0)
2089 nstate = DR_STATE_CONFIGURED;
2090 else
2091 nstate = DR_STATE_FATAL;
2092
2093 dr_device_transition(&cp->sbc_cm, nstate);
2094 }
2095
2096 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2097 dr_io_unit_t *ip;
2098
2099 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2100 continue;
2101 ip = dr_get_io_unit(bp, i);
2102 dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2103 }
2104 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2105 dr_mem_unit_t *mp;
2106 dr_state_t nstate;
2107
2108 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2109 continue;
2110
2111 mp = dr_get_mem_unit(bp, i);
2112 if (dr_cancel_mem(mp) == 0)
2113 nstate = DR_STATE_CONFIGURED;
2114 else
2115 nstate = DR_STATE_FATAL;
2116
2117 dr_device_transition(&mp->sbm_cm, nstate);
2118 }
2119
2120 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2121
2122 DR_DEVS_CANCEL(bp, devset);
2123
2124 if (DR_DEVS_RELEASED(bp) == 0) {
2125 dr_state_t new_state;
2126 /*
2127 * If the board no longer has any released devices
2128 * than transfer it back to the CONFIG/PARTIAL state.
2129 */
2130 if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2131 new_state = DR_STATE_CONFIGURED;
2132 else
2133 new_state = DR_STATE_PARTIAL;
2134 if (bp->b_state != new_state) {
2135 dr_board_transition(bp, new_state);
2136 }
2137 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2138 hp->h_bd->b_busy = 0;
2139 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2140 }
2141 }
2142
2143 static int
dr_dev_status(dr_handle_t * hp)2144 dr_dev_status(dr_handle_t *hp)
2145 {
2146 int nstat, mode, ncm, sz, pbsz, pnstat;
2147 dr_handle_t *shp;
2148 dr_devset_t devset = 0;
2149 sbd_stat_t *dstatp = NULL;
2150 sbd_dev_stat_t *devstatp;
2151 dr_board_t *bp;
2152 drmach_status_t pstat;
2153 int rv = 0;
2154
2155 #ifdef _MULTI_DATAMODEL
2156 int sz32 = 0;
2157 #endif /* _MULTI_DATAMODEL */
2158
2159 static fn_t f = "dr_status";
2160
2161 PR_ALL("%s...\n", f);
2162
2163 mode = hp->h_mode;
2164 shp = hp;
2165 devset = shp->h_devset;
2166 bp = hp->h_bd;
2167
2168 /*
2169 * Block out disconnect, unassign, IO unconfigure and
2170 * devinfo branch creation during status.
2171 */
2172 mutex_enter(&bp->b_slock);
2173 while (bp->b_sflags & DR_BSLOCK) {
2174 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2175 mutex_exit(&bp->b_slock);
2176 return (EINTR);
2177 }
2178 }
2179 bp->b_sflags |= DR_BSLOCK;
2180 mutex_exit(&bp->b_slock);
2181
2182 ncm = 1;
2183 if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2184 if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2185 /*
2186 * Calculate the maximum number of components possible
2187 * for a board. This number will be used to size the
2188 * status scratch buffer used by board and component
2189 * status functions.
2190 * This buffer may differ in size from what is provided
2191 * by the plugin, since the known component set on the
2192 * board may change between the plugin's GETNCM call, and
2193 * the status call. Sizing will be adjusted to the plugin's
2194 * receptacle buffer at copyout time.
2195 */
2196 ncm = MAX_CPU_UNITS_PER_BOARD +
2197 MAX_MEM_UNITS_PER_BOARD +
2198 MAX_IO_UNITS_PER_BOARD;
2199
2200 } else {
2201 /*
2202 * In the case of c_type == SBD_COMP_NONE, and
2203 * SBD_FLAG_ALLCMP not specified, only the board
2204 * info is to be returned, no components.
2205 */
2206 ncm = 0;
2207 devset = 0;
2208 }
2209 }
2210
2211 sz = sizeof (sbd_stat_t);
2212 if (ncm > 1)
2213 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2214
2215
2216 pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2217 pnstat = (pbsz - sizeof (sbd_stat_t))/sizeof (sbd_dev_stat_t);
2218
2219 /*
2220 * s_nbytes describes the size of the preallocated user
2221 * buffer into which the application is execting to
2222 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2223 */
2224
2225 #ifdef _MULTI_DATAMODEL
2226
2227 /*
2228 * More buffer space is required for the 64bit to 32bit
2229 * conversion of data structures.
2230 */
2231 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2232 sz32 = sizeof (sbd_stat32_t);
2233 if (ncm > 1)
2234 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2235 pnstat = (pbsz - sizeof (sbd_stat32_t))/
2236 sizeof (sbd_dev_stat32_t);
2237 }
2238
2239 sz += sz32;
2240 #endif
2241 /*
2242 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2243 * increment the plugin's nstat count.
2244 */
2245 ++pnstat;
2246
2247 if (bp->b_id == 0) {
2248 bzero(&pstat, sizeof (pstat));
2249 } else {
2250 sbd_error_t *err;
2251
2252 err = drmach_status(bp->b_id, &pstat);
2253 if (err) {
2254 DRERR_SET_C(&hp->h_err, &err);
2255 rv = EIO;
2256 goto status_done;
2257 }
2258 }
2259
2260 dstatp = (sbd_stat_t *)GETSTRUCT(char, sz);
2261
2262 devstatp = &dstatp->s_stat[0];
2263
2264 dstatp->s_board = bp->b_num;
2265
2266 /*
2267 * Detect transitions between empty and disconnected.
2268 */
2269 if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2270 bp->b_rstate = SBD_STAT_DISCONNECTED;
2271 else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2272 bp->b_rstate = SBD_STAT_EMPTY;
2273
2274 dstatp->s_rstate = bp->b_rstate;
2275 dstatp->s_ostate = bp->b_ostate;
2276 dstatp->s_cond = bp->b_cond = pstat.cond;
2277 dstatp->s_busy = bp->b_busy | pstat.busy;
2278 dstatp->s_time = bp->b_time;
2279 dstatp->s_power = pstat.powered;
2280 dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2281 dstatp->s_nstat = nstat = 0;
2282 bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2283 bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2284
2285 devset &= DR_DEVS_PRESENT(bp);
2286 if (devset == 0) {
2287 /*
2288 * No device chosen.
2289 */
2290 PR_ALL("%s: no device present\n", f);
2291 }
2292
2293 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2294 if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2295 dstatp->s_nstat += nstat;
2296 devstatp += nstat;
2297 }
2298
2299 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2300 if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2301 dstatp->s_nstat += nstat;
2302 devstatp += nstat;
2303 }
2304
2305 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2306 if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2307 dstatp->s_nstat += nstat;
2308 devstatp += nstat;
2309 }
2310
2311 /*
2312 * Due to a possible change in number of components between
2313 * the time of plugin's GETNCM call and now, there may be
2314 * more or less components than the plugin's buffer can
2315 * hold. Adjust s_nstat accordingly.
2316 */
2317
2318 dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2319
2320
2321 #ifdef _MULTI_DATAMODEL
2322 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2323 int i, j;
2324 sbd_stat32_t *dstat32p;
2325
2326 dstat32p = (sbd_stat32_t *)devstatp;
2327
2328 /* Alignment Paranoia */
2329 if ((ulong_t)dstat32p & 0x1) {
2330 PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2331 f, sizeof (sbd_stat32_t), (void *)dstat32p);
2332 DR_OP_INTERNAL_ERROR(hp);
2333 rv = EINVAL;
2334 goto status_done;
2335 }
2336
2337 /* paranoia: detect buffer overrun */
2338 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2339 ((caddr_t)dstatp) + sz) {
2340 DR_OP_INTERNAL_ERROR(hp);
2341 rv = EINVAL;
2342 goto status_done;
2343 }
2344
2345 /* copy sbd_stat_t structure members */
2346 #define _SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2347 _SBD_STAT(int32_t, s_board);
2348 _SBD_STAT(int32_t, s_rstate);
2349 _SBD_STAT(int32_t, s_ostate);
2350 _SBD_STAT(int32_t, s_cond);
2351 _SBD_STAT(int32_t, s_busy);
2352 _SBD_STAT(time32_t, s_time);
2353 _SBD_STAT(uint32_t, s_power);
2354 _SBD_STAT(uint32_t, s_assigned);
2355 _SBD_STAT(int32_t, s_nstat);
2356 bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2357 SBD_TYPE_LEN);
2358 bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2359 SBD_MAX_INFO);
2360 #undef _SBD_STAT
2361
2362 for (i = 0; i < dstatp->s_nstat; i++) {
2363 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
2364 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
2365 #define _SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2366
2367 /* copy sbd_cm_stat_t structure members */
2368 _SBD_DEV_STAT(int32_t, ds_type);
2369 _SBD_DEV_STAT(int32_t, ds_unit);
2370 _SBD_DEV_STAT(int32_t, ds_ostate);
2371 _SBD_DEV_STAT(int32_t, ds_cond);
2372 _SBD_DEV_STAT(int32_t, ds_busy);
2373 _SBD_DEV_STAT(int32_t, ds_suspend);
2374 _SBD_DEV_STAT(time32_t, ds_time);
2375 bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2376 OBP_MAXPROPNAME);
2377
2378 switch (dsp->ds_type) {
2379 case SBD_COMP_CPU:
2380 /* copy sbd_cpu_stat_t structure members */
2381 _SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2382 _SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2383 _SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2384 _SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2385 break;
2386
2387 case SBD_COMP_MEM:
2388 /* copy sbd_mem_stat_t structure members */
2389 _SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2390 _SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2391 _SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2392 _SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2393 _SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2394 _SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2395 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2396 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2397 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2398 _SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2399 _SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2400 bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2401 &ds32p->d_mem.ms_peer_ap_id[0],
2402 sizeof (ds32p->d_mem.ms_peer_ap_id));
2403 break;
2404
2405 case SBD_COMP_IO:
2406 /* copy sbd_io_stat_t structure members */
2407 _SBD_DEV_STAT(int32_t, d_io.is_referenced);
2408 _SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2409
2410 for (j = 0; j < SBD_MAX_UNSAFE; j++)
2411 _SBD_DEV_STAT(int32_t,
2412 d_io.is_unsafe_list[j]);
2413
2414 bcopy(&dsp->d_io.is_pathname[0],
2415 &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2416 break;
2417
2418 case SBD_COMP_CMP:
2419 /* copy sbd_cmp_stat_t structure members */
2420 bcopy(&dsp->d_cmp.ps_cpuid[0],
2421 &ds32p->d_cmp.ps_cpuid[0],
2422 sizeof (ds32p->d_cmp.ps_cpuid));
2423 _SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2424 _SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2425 _SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2426 break;
2427
2428 default:
2429 cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2430 f, (int)dsp->ds_type);
2431 rv = EFAULT;
2432 goto status_done;
2433 }
2434 #undef _SBD_DEV_STAT
2435 }
2436
2437
2438 if (ddi_copyout((void *)dstat32p,
2439 hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2440 cmn_err(CE_WARN,
2441 "%s: failed to copyout status "
2442 "for board %d", f, bp->b_num);
2443 rv = EFAULT;
2444 goto status_done;
2445 }
2446 } else
2447 #endif /* _MULTI_DATAMODEL */
2448
2449 if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2450 pbsz, mode) != 0) {
2451 cmn_err(CE_WARN,
2452 "%s: failed to copyout status for board %d",
2453 f, bp->b_num);
2454 rv = EFAULT;
2455 goto status_done;
2456 }
2457
2458 status_done:
2459 if (dstatp != NULL)
2460 FREESTRUCT(dstatp, char, sz);
2461
2462 dr_unlock_status(bp);
2463
2464 return (rv);
2465 }
2466
2467 static int
dr_get_ncm(dr_handle_t * hp)2468 dr_get_ncm(dr_handle_t *hp)
2469 {
2470 int i;
2471 int ncm = 0;
2472 dr_devset_t devset;
2473
2474 devset = DR_DEVS_PRESENT(hp->h_bd);
2475 if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2476 devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2477 DEVSET_ANYUNIT);
2478
2479 /*
2480 * Handle CPUs first to deal with possible CMP
2481 * devices. If the CPU is a CMP, we need to only
2482 * increment ncm once even if there are multiple
2483 * cores for that CMP present in the devset.
2484 */
2485 for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2486 if (devset & DEVSET(SBD_COMP_CMP, i)) {
2487 ncm++;
2488 }
2489 }
2490
2491 /* eliminate the CPU information from the devset */
2492 devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2493
2494 for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2495 ncm += devset & 0x1;
2496 devset >>= 1;
2497 }
2498
2499 return (ncm);
2500 }
2501
2502 /* used by dr_mem.c */
2503 /* TODO: eliminate dr_boardlist */
2504 dr_board_t *
dr_lookup_board(int board_num)2505 dr_lookup_board(int board_num)
2506 {
2507 dr_board_t *bp;
2508
2509 ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2510
2511 bp = &dr_boardlist[board_num];
2512 ASSERT(bp->b_num == board_num);
2513
2514 return (bp);
2515 }
2516
2517 static dr_dev_unit_t *
dr_get_dev_unit(dr_board_t * bp,sbd_comp_type_t nt,int unit_num)2518 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2519 {
2520 dr_dev_unit_t *dp;
2521
2522 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2523 ASSERT(dp->du_common.sbdev_bp == bp);
2524 ASSERT(dp->du_common.sbdev_unum == unit_num);
2525 ASSERT(dp->du_common.sbdev_type == nt);
2526
2527 return (dp);
2528 }
2529
2530 dr_cpu_unit_t *
dr_get_cpu_unit(dr_board_t * bp,int unit_num)2531 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2532 {
2533 dr_dev_unit_t *dp;
2534
2535 ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2536
2537 dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2538 return (&dp->du_cpu);
2539 }
2540
2541 dr_mem_unit_t *
dr_get_mem_unit(dr_board_t * bp,int unit_num)2542 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2543 {
2544 dr_dev_unit_t *dp;
2545
2546 ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2547
2548 dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2549 return (&dp->du_mem);
2550 }
2551
2552 dr_io_unit_t *
dr_get_io_unit(dr_board_t * bp,int unit_num)2553 dr_get_io_unit(dr_board_t *bp, int unit_num)
2554 {
2555 dr_dev_unit_t *dp;
2556
2557 ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2558
2559 dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2560 return (&dp->du_io);
2561 }
2562
2563 dr_common_unit_t *
dr_get_common_unit(dr_board_t * bp,sbd_comp_type_t nt,int unum)2564 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2565 {
2566 dr_dev_unit_t *dp;
2567
2568 dp = dr_get_dev_unit(bp, nt, unum);
2569 return (&dp->du_common);
2570 }
2571
2572 static dr_devset_t
dr_dev2devset(sbd_comp_id_t * cid)2573 dr_dev2devset(sbd_comp_id_t *cid)
2574 {
2575 static fn_t f = "dr_dev2devset";
2576
2577 dr_devset_t devset;
2578 int unit = cid->c_unit;
2579
2580 switch (cid->c_type) {
2581 case SBD_COMP_NONE:
2582 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2583 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2584 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
2585 PR_ALL("%s: COMP_NONE devset = 0x%lx\n", f, devset);
2586 break;
2587
2588 case SBD_COMP_CPU:
2589 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2590 cmn_err(CE_WARN,
2591 "%s: invalid cpu unit# = %d",
2592 f, unit);
2593 devset = 0;
2594 } else {
2595 /*
2596 * Generate a devset that includes all the
2597 * cores of a CMP device. If this is not a
2598 * CMP, the extra cores will be eliminated
2599 * later since they are not present. This is
2600 * also true for CMP devices that do not have
2601 * all cores active.
2602 */
2603 devset = DEVSET(SBD_COMP_CMP, unit);
2604 }
2605
2606 PR_ALL("%s: CPU devset = 0x%lx\n", f, devset);
2607 break;
2608
2609 case SBD_COMP_MEM:
2610 if (unit == SBD_NULL_UNIT) {
2611 unit = 0;
2612 cid->c_unit = 0;
2613 }
2614
2615 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2616 cmn_err(CE_WARN,
2617 "%s: invalid mem unit# = %d",
2618 f, unit);
2619 devset = 0;
2620 } else
2621 devset = DEVSET(cid->c_type, unit);
2622
2623 PR_ALL("%s: MEM devset = 0x%lx\n", f, devset);
2624 break;
2625
2626 case SBD_COMP_IO:
2627 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2628 cmn_err(CE_WARN,
2629 "%s: invalid io unit# = %d",
2630 f, unit);
2631 devset = 0;
2632 } else
2633 devset = DEVSET(cid->c_type, unit);
2634
2635 PR_ALL("%s: IO devset = 0x%lx\n", f, devset);
2636 break;
2637
2638 default:
2639 case SBD_COMP_UNKNOWN:
2640 devset = 0;
2641 break;
2642 }
2643
2644 return (devset);
2645 }
2646
2647 /*
2648 * Converts a dynamic attachment point name to a SBD_COMP_* type.
2649 * Returns SDB_COMP_UNKNOWN if name is not recognized.
2650 */
2651 static int
dr_dev_type_to_nt(char * type)2652 dr_dev_type_to_nt(char *type)
2653 {
2654 int i;
2655
2656 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2657 if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2658 break;
2659
2660 return (dr_devattr[i].s_nodetype);
2661 }
2662
2663 /*
2664 * Converts a SBD_COMP_* type to a dynamic attachment point name.
2665 * Return NULL if SBD_COMP_ type is not recognized.
2666 */
2667 char *
dr_nt_to_dev_type(int nt)2668 dr_nt_to_dev_type(int nt)
2669 {
2670 int i;
2671
2672 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2673 if (dr_devattr[i].s_nodetype == nt)
2674 break;
2675
2676 return (dr_devattr[i].s_devtype);
2677 }
2678
2679
2680 /*
2681 * State transition policy is that if there is some component for which
2682 * the state transition is valid, then let it through. The exception is
2683 * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2684 * for ALL components.
2685 * Returns the state that is in error, if any.
2686 */
2687 static int
dr_check_transition(dr_board_t * bp,dr_devset_t * devsetp,struct dr_state_trans * transp,int cmd)2688 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2689 struct dr_state_trans *transp, int cmd)
2690 {
2691 int s, ut;
2692 int state_err = 0;
2693 dr_devset_t devset;
2694 dr_common_unit_t *cp;
2695 static fn_t f = "dr_check_transition";
2696
2697 devset = *devsetp;
2698
2699 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2700 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2701 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2702 continue;
2703
2704 cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2705 s = (int)cp->sbdev_state;
2706 if (!DR_DEV_IS_PRESENT(cp)) {
2707 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2708 } else {
2709 if (transp->x_op[s].x_rv) {
2710 if (!state_err)
2711 state_err = s;
2712 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2713 }
2714 }
2715 }
2716 }
2717 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2718 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2719 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2720 continue;
2721
2722 cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2723 s = (int)cp->sbdev_state;
2724 if (!DR_DEV_IS_PRESENT(cp)) {
2725 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2726 } else {
2727 if (transp->x_op[s].x_rv) {
2728 if (!state_err)
2729 state_err = s;
2730 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2731 }
2732 }
2733 }
2734 }
2735 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2736 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2737 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2738 continue;
2739
2740 cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2741 s = (int)cp->sbdev_state;
2742 if (!DR_DEV_IS_PRESENT(cp)) {
2743 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2744 } else {
2745 if (transp->x_op[s].x_rv) {
2746 if (!state_err)
2747 state_err = s;
2748 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2749 }
2750 }
2751 }
2752 }
2753
2754 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2755 f, (uint_t)*devsetp, (uint_t)devset);
2756
2757 *devsetp = devset;
2758 /*
2759 * If there are some remaining components for which
2760 * this state transition is valid, then allow them
2761 * through, otherwise if none are left then return
2762 * the state error. The exception is SBD_CMD_DISCONNECT.
2763 * On disconnect, the state transition must be valid for ALL
2764 * components.
2765 */
2766 if (cmd == SBD_CMD_DISCONNECT)
2767 return (state_err);
2768 return (devset ? 0 : state_err);
2769 }
2770
2771 void
dr_device_transition(dr_common_unit_t * cp,dr_state_t st)2772 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2773 {
2774 PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2775 cp->sbdev_path,
2776 state_str[cp->sbdev_state], cp->sbdev_state,
2777 state_str[st], st);
2778
2779 cp->sbdev_state = st;
2780 if (st == DR_STATE_CONFIGURED) {
2781 cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2782 if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2783 cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2784 (void) drv_getparm(TIME,
2785 (void *) &cp->sbdev_bp->b_time);
2786 }
2787 } else
2788 cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2789
2790 (void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2791 }
2792
2793 static void
dr_board_transition(dr_board_t * bp,dr_state_t st)2794 dr_board_transition(dr_board_t *bp, dr_state_t st)
2795 {
2796 PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2797 bp->b_num,
2798 state_str[bp->b_state], bp->b_state,
2799 state_str[st], st);
2800
2801 bp->b_state = st;
2802 }
2803
2804 void
dr_op_err(int ce,dr_handle_t * hp,int code,char * fmt,...)2805 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2806 {
2807 sbd_error_t *err;
2808 va_list args;
2809
2810 va_start(args, fmt);
2811 err = drerr_new_v(code, fmt, args);
2812 va_end(args);
2813
2814 if (ce != CE_IGNORE)
2815 sbd_err_log(err, ce);
2816
2817 DRERR_SET_C(&hp->h_err, &err);
2818 }
2819
2820 void
dr_dev_err(int ce,dr_common_unit_t * cp,int code)2821 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2822 {
2823 sbd_error_t *err;
2824
2825 err = drerr_new(0, code, cp->sbdev_path, NULL);
2826
2827 if (ce != CE_IGNORE)
2828 sbd_err_log(err, ce);
2829
2830 DRERR_SET_C(&cp->sbdev_error, &err);
2831 }
2832
2833 /*
2834 * A callback routine. Called from the drmach layer as a result of
2835 * call to drmach_board_find_devices from dr_init_devlists.
2836 */
2837 static sbd_error_t *
dr_dev_found(void * data,const char * name,int unum,drmachid_t id)2838 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2839 {
2840 dr_board_t *bp = data;
2841 dr_dev_unit_t *dp;
2842 int nt;
2843 static fn_t f = "dr_dev_found";
2844
2845 PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2846 f, bp->b_num, name, unum, id);
2847
2848 nt = dr_dev_type_to_nt((char *)name);
2849 if (nt == SBD_COMP_UNKNOWN) {
2850 /*
2851 * this should not happen. When it does, it indicates
2852 * a missmatch in devices supported by the drmach layer
2853 * vs devices supported by this layer.
2854 */
2855 return (DR_INTERNAL_ERROR());
2856 }
2857
2858 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2859
2860 /* sanity check */
2861 ASSERT(dp->du_common.sbdev_bp == bp);
2862 ASSERT(dp->du_common.sbdev_unum == unum);
2863 ASSERT(dp->du_common.sbdev_type == nt);
2864
2865 /* render dynamic attachment point path of this unit */
2866 (void) snprintf(dp->du_common.sbdev_path,
2867 sizeof (dp->du_common.sbdev_path),
2868 (nt == SBD_COMP_MEM ? "%s::%s" : "%s::%s%d"),
2869 bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2870
2871 dp->du_common.sbdev_id = id;
2872 DR_DEV_SET_PRESENT(&dp->du_common);
2873
2874 bp->b_ndev++;
2875
2876 return (NULL);
2877 }
2878
2879 static sbd_error_t *
dr_init_devlists(dr_board_t * bp)2880 dr_init_devlists(dr_board_t *bp)
2881 {
2882 int i;
2883 sbd_error_t *err;
2884 dr_dev_unit_t *dp;
2885 static fn_t f = "dr_init_devlists";
2886
2887 PR_ALL("%s (%s)...\n", f, bp->b_path);
2888
2889 /* sanity check */
2890 ASSERT(bp->b_ndev == 0);
2891
2892 DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2893
2894 /*
2895 * This routine builds the board's devlist and initializes
2896 * the common portion of the unit data structures.
2897 * Note: because the common portion is considered
2898 * uninitialized, the dr_get_*_unit() routines can not
2899 * be used.
2900 */
2901
2902 /*
2903 * Clear out old entries, if any.
2904 */
2905 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2906 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2907
2908 bzero(dp, sizeof (*dp));
2909 dp->du_common.sbdev_bp = bp;
2910 dp->du_common.sbdev_unum = i;
2911 dp->du_common.sbdev_type = SBD_COMP_CPU;
2912 }
2913
2914 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2915 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2916
2917 bzero(dp, sizeof (*dp));
2918 dp->du_common.sbdev_bp = bp;
2919 dp->du_common.sbdev_unum = i;
2920 dp->du_common.sbdev_type = SBD_COMP_MEM;
2921 }
2922
2923 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2924 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2925
2926 bzero(dp, sizeof (*dp));
2927 dp->du_common.sbdev_bp = bp;
2928 dp->du_common.sbdev_unum = i;
2929 dp->du_common.sbdev_type = SBD_COMP_IO;
2930 }
2931
2932 err = NULL;
2933 if (bp->b_id) {
2934 /* find devices on this board */
2935 err = drmach_board_find_devices(
2936 bp->b_id, bp, dr_dev_found);
2937 }
2938
2939 return (err);
2940 }
2941
2942 /*
2943 * Return the unit number of the respective drmachid if
2944 * it's found to be attached.
2945 */
2946 static int
dr_check_unit_attached(dr_common_unit_t * cp)2947 dr_check_unit_attached(dr_common_unit_t *cp)
2948 {
2949 int rv = 0;
2950 processorid_t cpuid;
2951 uint64_t basepa, endpa;
2952 struct memlist *ml;
2953 extern struct memlist *phys_install;
2954 sbd_error_t *err;
2955 int yes;
2956 static fn_t f = "dr_check_unit_attached";
2957
2958 switch (cp->sbdev_type) {
2959 case SBD_COMP_CPU:
2960 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2961 if (err) {
2962 DRERR_SET_C(&cp->sbdev_error, &err);
2963 rv = -1;
2964 break;
2965 }
2966 mutex_enter(&cpu_lock);
2967 if (cpu_get(cpuid) == NULL)
2968 rv = -1;
2969 mutex_exit(&cpu_lock);
2970 break;
2971
2972 case SBD_COMP_MEM:
2973 err = drmach_mem_get_base_physaddr(cp->sbdev_id, &basepa);
2974 if (err) {
2975 DRERR_SET_C(&cp->sbdev_error, &err);
2976 rv = -1;
2977 break;
2978 }
2979
2980 /*
2981 * basepa may not be on a alignment boundary, make it so.
2982 */
2983 err = drmach_mem_get_slice_size(cp->sbdev_id, &endpa);
2984 if (err) {
2985 DRERR_SET_C(&cp->sbdev_error, &err);
2986 rv = -1;
2987 break;
2988 }
2989
2990 basepa &= ~(endpa - 1);
2991 endpa += basepa;
2992
2993 /*
2994 * Check if base address is in phys_install.
2995 */
2996 memlist_read_lock();
2997 for (ml = phys_install; ml; ml = ml->ml_next)
2998 if ((endpa <= ml->ml_address) ||
2999 (basepa >= (ml->ml_address + ml->ml_size)))
3000 continue;
3001 else
3002 break;
3003 memlist_read_unlock();
3004 if (ml == NULL)
3005 rv = -1;
3006 break;
3007
3008 case SBD_COMP_IO:
3009 err = drmach_io_is_attached(cp->sbdev_id, &yes);
3010 if (err) {
3011 DRERR_SET_C(&cp->sbdev_error, &err);
3012 rv = -1;
3013 break;
3014 } else if (!yes)
3015 rv = -1;
3016 break;
3017
3018 default:
3019 PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
3020 f, cp->sbdev_type, cp->sbdev_id);
3021 rv = -1;
3022 break;
3023 }
3024
3025 return (rv);
3026 }
3027
3028 /*
3029 * See if drmach recognizes the passthru command. DRMACH expects the
3030 * id to identify the thing to which the command is being applied. Using
3031 * nonsense SBD terms, that information has been perversely encoded in the
3032 * c_id member of the sbd_cmd_t structure. This logic reads those tea
3033 * leaves, finds the associated drmach id, then calls drmach to process
3034 * the passthru command.
3035 */
3036 static int
dr_pt_try_drmach(dr_handle_t * hp)3037 dr_pt_try_drmach(dr_handle_t *hp)
3038 {
3039 dr_board_t *bp = hp->h_bd;
3040 sbd_comp_id_t *comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
3041 drmachid_t id;
3042
3043 if (comp_id->c_type == SBD_COMP_NONE) {
3044 id = bp->b_id;
3045 } else {
3046 sbd_comp_type_t nt;
3047
3048 nt = dr_dev_type_to_nt(comp_id->c_name);
3049 if (nt == SBD_COMP_UNKNOWN) {
3050 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3051 id = 0;
3052 } else {
3053 /* pt command applied to dynamic attachment point */
3054 dr_common_unit_t *cp;
3055 cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3056 id = cp->sbdev_id;
3057 }
3058 }
3059
3060 if (hp->h_err == NULL)
3061 hp->h_err = drmach_passthru(id, &hp->h_opts);
3062
3063 return (hp->h_err == NULL ? 0 : -1);
3064 }
3065
3066 static int
dr_pt_ioctl(dr_handle_t * hp)3067 dr_pt_ioctl(dr_handle_t *hp)
3068 {
3069 int cmd, rv, len;
3070 int32_t sz;
3071 int found;
3072 char *copts;
3073 static fn_t f = "dr_pt_ioctl";
3074
3075 PR_ALL("%s...\n", f);
3076
3077 sz = hp->h_opts.size;
3078 copts = hp->h_opts.copts;
3079
3080 if (sz == 0 || copts == (char *)NULL) {
3081 cmn_err(CE_WARN, "%s: invalid passthru args", f);
3082 return (EINVAL);
3083 }
3084
3085 found = 0;
3086 for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3087 len = strlen(pt_arr[cmd].pt_name);
3088 found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3089 if (found)
3090 break;
3091 }
3092
3093 if (found)
3094 rv = (*pt_arr[cmd].pt_func)(hp);
3095 else
3096 rv = dr_pt_try_drmach(hp);
3097
3098 return (rv);
3099 }
3100
3101 /*
3102 * Called at driver load time to determine the state and condition
3103 * of an existing board in the system.
3104 */
3105 static void
dr_board_discovery(dr_board_t * bp)3106 dr_board_discovery(dr_board_t *bp)
3107 {
3108 int i;
3109 dr_devset_t devs_lost, devs_attached = 0;
3110 dr_cpu_unit_t *cp;
3111 dr_mem_unit_t *mp;
3112 dr_io_unit_t *ip;
3113 static fn_t f = "dr_board_discovery";
3114
3115 if (DR_DEVS_PRESENT(bp) == 0) {
3116 PR_ALL("%s: board %d has no devices present\n",
3117 f, bp->b_num);
3118 return;
3119 }
3120
3121 /*
3122 * Check for existence of cpus.
3123 */
3124 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3125 cp = dr_get_cpu_unit(bp, i);
3126
3127 if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3128 continue;
3129
3130 if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3131 DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3132 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3133 PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3134 f, bp->b_num, i);
3135 }
3136 dr_init_cpu_unit(cp);
3137 }
3138
3139 /*
3140 * Check for existence of memory.
3141 */
3142 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3143 mp = dr_get_mem_unit(bp, i);
3144
3145 if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3146 continue;
3147
3148 if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3149 DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3150 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3151 PR_ALL("%s: board %d, mem-unit %d - attached\n",
3152 f, bp->b_num, i);
3153 }
3154 dr_init_mem_unit(mp);
3155 }
3156
3157 /*
3158 * Check for i/o state.
3159 */
3160 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3161 ip = dr_get_io_unit(bp, i);
3162
3163 if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3164 continue;
3165
3166 if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3167 /*
3168 * Found it!
3169 */
3170 DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3171 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3172 PR_ALL("%s: board %d, io-unit %d - attached\n",
3173 f, bp->b_num, i);
3174 }
3175 dr_init_io_unit(ip);
3176 }
3177
3178 DR_DEVS_CONFIGURE(bp, devs_attached);
3179 if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3180 int ut;
3181 /*
3182 * It is not legal on board discovery to have a
3183 * board that is only partially attached. A board
3184 * is either all attached or all connected. If a
3185 * board has at least one attached device, then
3186 * the the remaining devices, if any, must have
3187 * been lost or disconnected. These devices can
3188 * only be recovered by a full attach from scratch.
3189 * Note that devices previously in the unreferenced
3190 * state are subsequently lost until the next full
3191 * attach. This is necessary since the driver unload
3192 * that must have occurred would have wiped out the
3193 * information necessary to re-configure the device
3194 * back online, e.g. memlist.
3195 */
3196 PR_ALL("%s: some devices LOST (0x%lx)...\n", f, devs_lost);
3197
3198 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3199 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3200 continue;
3201
3202 cp = dr_get_cpu_unit(bp, ut);
3203 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3204 }
3205
3206 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3207 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3208 continue;
3209
3210 mp = dr_get_mem_unit(bp, ut);
3211 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3212 }
3213
3214 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3215 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3216 continue;
3217
3218 ip = dr_get_io_unit(bp, ut);
3219 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3220 }
3221
3222 DR_DEVS_DISCONNECT(bp, devs_lost);
3223 }
3224 }
3225
3226 static int
dr_board_init(dr_board_t * bp,dev_info_t * dip,int bd)3227 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3228 {
3229 sbd_error_t *err;
3230
3231 mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3232 mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3233 cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3234 bp->b_rstate = SBD_STAT_EMPTY;
3235 bp->b_ostate = SBD_STAT_UNCONFIGURED;
3236 bp->b_cond = SBD_COND_UNKNOWN;
3237 (void) drv_getparm(TIME, (void *)&bp->b_time);
3238
3239 (void) drmach_board_lookup(bd, &bp->b_id);
3240 bp->b_num = bd;
3241 bp->b_dip = dip;
3242
3243 bp->b_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3244 MAX_CPU_UNITS_PER_BOARD);
3245
3246 bp->b_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3247 MAX_MEM_UNITS_PER_BOARD);
3248
3249 bp->b_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3250 MAX_IO_UNITS_PER_BOARD);
3251
3252 /*
3253 * Initialize the devlists
3254 */
3255 err = dr_init_devlists(bp);
3256 if (err) {
3257 sbd_err_clear(&err);
3258 dr_board_destroy(bp);
3259 return (-1);
3260 } else if (bp->b_ndev == 0) {
3261 dr_board_transition(bp, DR_STATE_EMPTY);
3262 } else {
3263 /*
3264 * Couldn't have made it down here without
3265 * having found at least one device.
3266 */
3267 ASSERT(DR_DEVS_PRESENT(bp) != 0);
3268 /*
3269 * Check the state of any possible devices on the
3270 * board.
3271 */
3272 dr_board_discovery(bp);
3273
3274 bp->b_assigned = 1;
3275
3276 if (DR_DEVS_UNATTACHED(bp) == 0) {
3277 /*
3278 * The board has no unattached devices, therefore
3279 * by reason of insanity it must be configured!
3280 */
3281 dr_board_transition(bp, DR_STATE_CONFIGURED);
3282 bp->b_ostate = SBD_STAT_CONFIGURED;
3283 bp->b_rstate = SBD_STAT_CONNECTED;
3284 bp->b_cond = SBD_COND_OK;
3285 (void) drv_getparm(TIME, (void *)&bp->b_time);
3286 } else if (DR_DEVS_ATTACHED(bp)) {
3287 dr_board_transition(bp, DR_STATE_PARTIAL);
3288 bp->b_ostate = SBD_STAT_CONFIGURED;
3289 bp->b_rstate = SBD_STAT_CONNECTED;
3290 bp->b_cond = SBD_COND_OK;
3291 (void) drv_getparm(TIME, (void *)&bp->b_time);
3292 } else {
3293 dr_board_transition(bp, DR_STATE_CONNECTED);
3294 bp->b_rstate = SBD_STAT_CONNECTED;
3295 (void) drv_getparm(TIME, (void *)&bp->b_time);
3296 }
3297 }
3298
3299 return (0);
3300 }
3301
3302 static void
dr_board_destroy(dr_board_t * bp)3303 dr_board_destroy(dr_board_t *bp)
3304 {
3305 PR_ALL("dr_board_destroy: num %d, path %s\n",
3306 bp->b_num, bp->b_path);
3307
3308 dr_board_transition(bp, DR_STATE_EMPTY);
3309 bp->b_rstate = SBD_STAT_EMPTY;
3310 (void) drv_getparm(TIME, (void *)&bp->b_time);
3311
3312 /*
3313 * Free up MEM unit structs.
3314 */
3315 FREESTRUCT(bp->b_dev[NIX(SBD_COMP_MEM)],
3316 dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3317 bp->b_dev[NIX(SBD_COMP_MEM)] = NULL;
3318 /*
3319 * Free up CPU unit structs.
3320 */
3321 FREESTRUCT(bp->b_dev[NIX(SBD_COMP_CPU)],
3322 dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3323 bp->b_dev[NIX(SBD_COMP_CPU)] = NULL;
3324 /*
3325 * Free up IO unit structs.
3326 */
3327 FREESTRUCT(bp->b_dev[NIX(SBD_COMP_IO)],
3328 dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3329 bp->b_dev[NIX(SBD_COMP_IO)] = NULL;
3330
3331 mutex_destroy(&bp->b_lock);
3332 mutex_destroy(&bp->b_slock);
3333 cv_destroy(&bp->b_scv);
3334 }
3335
3336 void
dr_lock_status(dr_board_t * bp)3337 dr_lock_status(dr_board_t *bp)
3338 {
3339 mutex_enter(&bp->b_slock);
3340 while (bp->b_sflags & DR_BSLOCK)
3341 cv_wait(&bp->b_scv, &bp->b_slock);
3342 bp->b_sflags |= DR_BSLOCK;
3343 mutex_exit(&bp->b_slock);
3344 }
3345
3346 void
dr_unlock_status(dr_board_t * bp)3347 dr_unlock_status(dr_board_t *bp)
3348 {
3349 mutex_enter(&bp->b_slock);
3350 bp->b_sflags &= ~DR_BSLOCK;
3351 cv_signal(&bp->b_scv);
3352 mutex_exit(&bp->b_slock);
3353 }
3354
3355 /*
3356 * Extract flags passed via ioctl.
3357 */
3358 int
dr_cmd_flags(dr_handle_t * hp)3359 dr_cmd_flags(dr_handle_t *hp)
3360 {
3361 return (hp->h_sbdcmd.cmd_cm.c_flags);
3362 }
3363