1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26 /*
27 * Copyright (c) 2010, Intel Corporation.
28 * All rights reserved.
29 */
30
31 /*
32 * PIM-DR layer of DR driver. Provides interface between user
33 * level applications and the PSM-DR layer.
34 */
35
36 #include <sys/note.h>
37 #include <sys/debug.h>
38 #include <sys/types.h>
39 #include <sys/errno.h>
40 #include <sys/cred.h>
41 #include <sys/dditypes.h>
42 #include <sys/devops.h>
43 #include <sys/modctl.h>
44 #include <sys/poll.h>
45 #include <sys/conf.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/sunndi.h>
49 #include <sys/stat.h>
50 #include <sys/kmem.h>
51 #include <sys/processor.h>
52 #include <sys/cpuvar.h>
53 #include <sys/mem_config.h>
54
55 #include <sys/autoconf.h>
56 #include <sys/cmn_err.h>
57
58 #include <sys/ddi_impldefs.h>
59 #include <sys/promif.h>
60 #include <sys/machsystm.h>
61
62 #include <sys/dr.h>
63 #include <sys/drmach.h>
64 #include <sys/dr_util.h>
65
66 extern int nulldev();
67 extern int nodev();
68 extern struct memlist *phys_install;
69
70 #ifdef DEBUG
71 uint_t dr_debug = 0; /* dr.h for bit values */
72 #endif /* DEBUG */
73
74 static int dr_dev_type_to_nt(char *);
75
76 /*
77 * NOTE: state_str, nt_str and SBD_CMD_STR are only used in a debug
78 * kernel. They are, however, referenced during both debug and non-debug
79 * compiles.
80 */
81
82 static char *state_str[] = {
83 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
84 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
85 "FATAL"
86 };
87
88 #define SBD_CMD_STR(c) \
89 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
90 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
91 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
92 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
93 ((c) == SBD_CMD_TEST) ? "TEST" : \
94 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
95 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
96 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
97 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
98 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
99 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : \
100 ((c) == SBD_CMD_STATUS) ? "STATUS" : "unknown")
101
102 #define DR_GET_BOARD_DEVUNIT(sb, ut, un) (&((sb)->b_dev[DEVSET_NIX(ut)][un]))
103
104 #define DR_MAKE_MINOR(i, b) (((i) << 16) | (b))
105 #define DR_MINOR2INST(m) (((m) >> 16) & 0xffff)
106 #define DR_MINOR2BNUM(m) ((m) & 0xffff)
107
108 /* for the DR*INTERNAL_ERROR macros. see sys/dr.h. */
109 static char *dr_ie_fmt = "dr.c %d";
110
111 /* struct for drmach device name to sbd_comp_type_t mapping */
112 typedef struct {
113 char *s_devtype;
114 sbd_comp_type_t s_nodetype;
115 } dr_devname_t;
116
117 /* struct to map starfire device attributes - name:sbd_comp_type_t */
118 static dr_devname_t dr_devattr[] = {
119 { DRMACH_DEVTYPE_MEM, SBD_COMP_MEM },
120 { DRMACH_DEVTYPE_CPU, SBD_COMP_CPU },
121 { DRMACH_DEVTYPE_PCI, SBD_COMP_IO },
122 #if defined(DRMACH_DEVTYPE_SBUS)
123 { DRMACH_DEVTYPE_SBUS, SBD_COMP_IO },
124 #endif
125 #if defined(DRMACH_DEVTYPE_WCI)
126 { DRMACH_DEVTYPE_WCI, SBD_COMP_IO },
127 #endif
128 /* last s_devtype must be NULL, s_nodetype must be SBD_COMP_UNKNOWN */
129 { NULL, SBD_COMP_UNKNOWN }
130 };
131
132 /*
133 * Per instance soft-state structure.
134 */
135 typedef struct dr_softstate {
136 dev_info_t *dip;
137 dr_board_t *boards;
138 kmutex_t i_lock;
139 int dr_initialized;
140 } dr_softstate_t;
141
142 /*
143 * dr Global data elements
144 */
145 struct dr_global {
146 dr_softstate_t *softsp; /* pointer to initialize soft state */
147 kmutex_t lock;
148 } dr_g;
149
150 dr_unsafe_devs_t dr_unsafe_devs;
151
152 /*
153 * Table of known passthru commands.
154 */
155 struct {
156 char *pt_name;
157 int (*pt_func)(dr_handle_t *);
158 } pt_arr[] = {
159 "quiesce", dr_pt_test_suspend,
160 };
161
162 int dr_modunload_okay = 0; /* set to non-zero to allow unload */
163
164 /*
165 * State transition table. States valid transitions for "board" state.
166 * Recall that non-zero return value terminates operation, however
167 * the herrno value is what really indicates an error , if any.
168 */
169 static int
_cmd2index(int c)170 _cmd2index(int c)
171 {
172 /*
173 * Translate DR CMD to index into dr_state_transition.
174 */
175 switch (c) {
176 case SBD_CMD_CONNECT: return (0);
177 case SBD_CMD_DISCONNECT: return (1);
178 case SBD_CMD_CONFIGURE: return (2);
179 case SBD_CMD_UNCONFIGURE: return (3);
180 case SBD_CMD_ASSIGN: return (4);
181 case SBD_CMD_UNASSIGN: return (5);
182 case SBD_CMD_POWERON: return (6);
183 case SBD_CMD_POWEROFF: return (7);
184 case SBD_CMD_TEST: return (8);
185 default: return (-1);
186 }
187 }
188
189 #define CMD2INDEX(c) _cmd2index(c)
190
191 static struct dr_state_trans {
192 int x_cmd;
193 struct {
194 int x_rv; /* return value of pre_op */
195 int x_err; /* error, if any */
196 } x_op[DR_STATE_MAX];
197 } dr_state_transition[] = {
198 { SBD_CMD_CONNECT,
199 {
200 { 0, 0 }, /* empty */
201 { 0, 0 }, /* occupied */
202 { -1, ESBD_STATE }, /* connected */
203 { -1, ESBD_STATE }, /* unconfigured */
204 { -1, ESBD_STATE }, /* partial */
205 { -1, ESBD_STATE }, /* configured */
206 { -1, ESBD_STATE }, /* release */
207 { -1, ESBD_STATE }, /* unreferenced */
208 { -1, ESBD_FATAL_STATE }, /* fatal */
209 }
210 },
211 { SBD_CMD_DISCONNECT,
212 {
213 { -1, ESBD_STATE }, /* empty */
214 { 0, 0 }, /* occupied */
215 { 0, 0 }, /* connected */
216 { 0, 0 }, /* unconfigured */
217 { -1, ESBD_STATE }, /* partial */
218 { -1, ESBD_STATE }, /* configured */
219 { -1, ESBD_STATE }, /* release */
220 { -1, ESBD_STATE }, /* unreferenced */
221 { -1, ESBD_FATAL_STATE }, /* fatal */
222 }
223 },
224 { SBD_CMD_CONFIGURE,
225 {
226 { -1, ESBD_STATE }, /* empty */
227 { -1, ESBD_STATE }, /* occupied */
228 { 0, 0 }, /* connected */
229 { 0, 0 }, /* unconfigured */
230 { 0, 0 }, /* partial */
231 { 0, 0 }, /* configured */
232 { -1, ESBD_STATE }, /* release */
233 { -1, ESBD_STATE }, /* unreferenced */
234 { -1, ESBD_FATAL_STATE }, /* fatal */
235 }
236 },
237 { SBD_CMD_UNCONFIGURE,
238 {
239 { -1, ESBD_STATE }, /* empty */
240 { -1, ESBD_STATE }, /* occupied */
241 { -1, ESBD_STATE }, /* connected */
242 { -1, ESBD_STATE }, /* unconfigured */
243 { 0, 0 }, /* partial */
244 { 0, 0 }, /* configured */
245 { 0, 0 }, /* release */
246 { 0, 0 }, /* unreferenced */
247 { -1, ESBD_FATAL_STATE }, /* fatal */
248 }
249 },
250 { SBD_CMD_ASSIGN,
251 {
252 { 0, 0 }, /* empty */
253 { 0, 0 }, /* occupied */
254 { -1, ESBD_STATE }, /* connected */
255 { -1, ESBD_STATE }, /* unconfigured */
256 { -1, ESBD_STATE }, /* partial */
257 { -1, ESBD_STATE }, /* configured */
258 { -1, ESBD_STATE }, /* release */
259 { -1, ESBD_STATE }, /* unreferenced */
260 { -1, ESBD_FATAL_STATE }, /* fatal */
261 }
262 },
263 { SBD_CMD_UNASSIGN,
264 {
265 { 0, 0 }, /* empty */
266 { 0, 0 }, /* occupied */
267 { -1, ESBD_STATE }, /* connected */
268 { -1, ESBD_STATE }, /* unconfigured */
269 { -1, ESBD_STATE }, /* partial */
270 { -1, ESBD_STATE }, /* configured */
271 { -1, ESBD_STATE }, /* release */
272 { -1, ESBD_STATE }, /* unreferenced */
273 { -1, ESBD_FATAL_STATE }, /* fatal */
274 }
275 },
276 { SBD_CMD_POWERON,
277 {
278 { 0, 0 }, /* empty */
279 { 0, 0 }, /* occupied */
280 { -1, ESBD_STATE }, /* connected */
281 { -1, ESBD_STATE }, /* unconfigured */
282 { -1, ESBD_STATE }, /* partial */
283 { -1, ESBD_STATE }, /* configured */
284 { -1, ESBD_STATE }, /* release */
285 { -1, ESBD_STATE }, /* unreferenced */
286 { -1, ESBD_FATAL_STATE }, /* fatal */
287 }
288 },
289 { SBD_CMD_POWEROFF,
290 {
291 { 0, 0 }, /* empty */
292 { 0, 0 }, /* occupied */
293 { -1, ESBD_STATE }, /* connected */
294 { -1, ESBD_STATE }, /* unconfigured */
295 { -1, ESBD_STATE }, /* partial */
296 { -1, ESBD_STATE }, /* configured */
297 { -1, ESBD_STATE }, /* release */
298 { -1, ESBD_STATE }, /* unreferenced */
299 { -1, ESBD_FATAL_STATE }, /* fatal */
300 }
301 },
302 { SBD_CMD_TEST,
303 {
304 { 0, 0 }, /* empty */
305 { 0, 0 }, /* occupied */
306 { -1, ESBD_STATE }, /* connected */
307 { -1, ESBD_STATE }, /* unconfigured */
308 { -1, ESBD_STATE }, /* partial */
309 { -1, ESBD_STATE }, /* configured */
310 { -1, ESBD_STATE }, /* release */
311 { -1, ESBD_STATE }, /* unreferenced */
312 { -1, ESBD_FATAL_STATE }, /* fatal */
313 }
314 },
315 };
316
317 /*
318 * Global R/W lock to synchronize access across
319 * multiple boards. Users wanting multi-board access
320 * must grab WRITE lock, others must grab READ lock.
321 */
322 krwlock_t dr_grwlock;
323
324 /*
325 * Head of the boardlist used as a reference point for
326 * locating board structs.
327 * TODO: eliminate dr_boardlist
328 */
329 dr_board_t *dr_boardlist;
330
331 /*
332 * DR support functions.
333 */
334 static dr_devset_t dr_dev2devset(sbd_comp_id_t *cid);
335 static int dr_check_transition(dr_board_t *bp,
336 dr_devset_t *devsetp,
337 struct dr_state_trans *transp,
338 int cmd);
339 static int dr_check_unit_attached(dr_common_unit_t *dp);
340 static sbd_error_t *dr_init_devlists(dr_board_t *bp);
341 static void dr_board_discovery(dr_board_t *bp);
342 static int dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd);
343 static void dr_board_destroy(dr_board_t *bp);
344 static void dr_board_transition(dr_board_t *bp, dr_state_t st);
345
346 /*
347 * DR driver (DDI) entry points.
348 */
349 static int dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
350 void *arg, void **result);
351 static int dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
352 static int dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
353 static int dr_probe(dev_info_t *dip);
354 static int dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
355 cred_t *cred_p, int *rval_p);
356 static int dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p);
357 static int dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p);
358
359 /*
360 * DR command processing operations.
361 */
362 static int dr_copyin_iocmd(dr_handle_t *hp);
363 static int dr_copyout_iocmd(dr_handle_t *hp);
364 static int dr_copyout_errs(dr_handle_t *hp);
365 static int dr_pre_op(dr_handle_t *hp);
366 static int dr_post_op(dr_handle_t *hp, int rv);
367 static int dr_exec_op(dr_handle_t *hp);
368 static void dr_assign_board(dr_handle_t *hp);
369 static void dr_unassign_board(dr_handle_t *hp);
370 static void dr_connect(dr_handle_t *hp);
371 static int dr_disconnect(dr_handle_t *hp);
372 static void dr_dev_configure(dr_handle_t *hp);
373 static void dr_dev_release(dr_handle_t *hp);
374 static int dr_dev_unconfigure(dr_handle_t *hp);
375 static void dr_dev_cancel(dr_handle_t *hp);
376 static int dr_dev_status(dr_handle_t *hp);
377 static int dr_get_ncm(dr_handle_t *hp);
378 static int dr_pt_ioctl(dr_handle_t *hp);
379 static void dr_poweron_board(dr_handle_t *hp);
380 static void dr_poweroff_board(dr_handle_t *hp);
381 static void dr_test_board(dr_handle_t *hp);
382
383 /*
384 * Autoconfiguration data structures
385 */
386 struct cb_ops dr_cb_ops = {
387 dr_open, /* open */
388 dr_close, /* close */
389 nodev, /* strategy */
390 nodev, /* print */
391 nodev, /* dump */
392 nodev, /* read */
393 nodev, /* write */
394 dr_ioctl, /* ioctl */
395 nodev, /* devmap */
396 nodev, /* mmap */
397 nodev, /* segmap */
398 nochpoll, /* chpoll */
399 ddi_prop_op, /* cb_prop_op */
400 NULL, /* struct streamtab */
401 D_NEW | D_MP | D_MTSAFE, /* compatibility flags */
402 CB_REV, /* Rev */
403 nodev, /* cb_aread */
404 nodev /* cb_awrite */
405 };
406
407 struct dev_ops dr_dev_ops = {
408 DEVO_REV, /* build version */
409 0, /* dev ref count */
410 dr_getinfo, /* getinfo */
411 nulldev, /* identify */
412 dr_probe, /* probe */
413 dr_attach, /* attach */
414 dr_detach, /* detach */
415 nodev, /* reset */
416 &dr_cb_ops, /* cb_ops */
417 (struct bus_ops *)NULL, /* bus ops */
418 NULL, /* power */
419 ddi_quiesce_not_needed, /* quiesce */
420 };
421
422 extern struct mod_ops mod_driverops;
423
424 static struct modldrv modldrv = {
425 &mod_driverops,
426 "Dynamic Reconfiguration",
427 &dr_dev_ops
428 };
429
430 static struct modlinkage modlinkage = {
431 MODREV_1,
432 (void *)&modldrv,
433 NULL
434 };
435
436 /*
437 * Driver entry points.
438 */
439 int
_init(void)440 _init(void)
441 {
442 int err;
443
444 /*
445 * If you need to support multiple nodes (instances), then
446 * whatever the maximum number of supported nodes is would
447 * need to passed as the third parameter to ddi_soft_state_init().
448 * Alternative would be to dynamically fini and re-init the
449 * soft state structure each time a node is attached.
450 */
451 err = ddi_soft_state_init((void **)&dr_g.softsp,
452 sizeof (dr_softstate_t), 1);
453 if (err)
454 return (err);
455
456 mutex_init(&dr_g.lock, NULL, MUTEX_DRIVER, NULL);
457 rw_init(&dr_grwlock, NULL, RW_DEFAULT, NULL);
458
459 return (mod_install(&modlinkage));
460 }
461
462 int
_fini(void)463 _fini(void)
464 {
465 int err;
466
467 if ((err = mod_remove(&modlinkage)) != 0)
468 return (err);
469
470 mutex_destroy(&dr_g.lock);
471 rw_destroy(&dr_grwlock);
472
473 ddi_soft_state_fini((void **)&dr_g.softsp);
474
475 return (0);
476 }
477
478 int
_info(struct modinfo * modinfop)479 _info(struct modinfo *modinfop)
480 {
481 return (mod_info(&modlinkage, modinfop));
482 }
483
484 /*ARGSUSED1*/
485 static int
dr_open(dev_t * dev,int flag,int otyp,cred_t * cred_p)486 dr_open(dev_t *dev, int flag, int otyp, cred_t *cred_p)
487 {
488 int instance;
489 dr_softstate_t *softsp;
490 dr_board_t *bp;
491
492 /*
493 * Don't open unless we've attached.
494 */
495 instance = DR_MINOR2INST(getminor(*dev));
496 softsp = ddi_get_soft_state(dr_g.softsp, instance);
497 if (softsp == NULL)
498 return (ENXIO);
499
500 mutex_enter(&softsp->i_lock);
501 if (!softsp->dr_initialized) {
502 int bd;
503 int rv = 0;
504
505 bp = softsp->boards;
506
507 /* initialize each array element */
508 for (bd = 0; bd < MAX_BOARDS; bd++, bp++) {
509 rv = dr_board_init(bp, softsp->dip, bd);
510 if (rv)
511 break;
512 }
513
514 if (rv == 0) {
515 softsp->dr_initialized = 1;
516 } else {
517 /* destroy elements initialized thus far */
518 while (--bp >= softsp->boards)
519 dr_board_destroy(bp);
520
521 /* TODO: should this be another errno val ? */
522 mutex_exit(&softsp->i_lock);
523 return (ENXIO);
524 }
525 }
526 mutex_exit(&softsp->i_lock);
527
528 bp = &softsp->boards[DR_MINOR2BNUM(getminor(*dev))];
529
530 /*
531 * prevent opening of a dyn-ap for a board
532 * that does not exist
533 */
534 if (!bp->b_assigned) {
535 if (drmach_board_lookup(bp->b_num, &bp->b_id) != 0)
536 return (ENODEV);
537 }
538
539 return (0);
540 }
541
542 /*ARGSUSED*/
543 static int
dr_close(dev_t dev,int flag,int otyp,cred_t * cred_p)544 dr_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
545 {
546 return (0);
547 }
548
549 /*
550 * Enable/disable DR features.
551 */
552 int dr_enable = 1;
553
554 /*ARGSUSED3*/
555 static int
dr_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,cred_t * cred_p,int * rval_p)556 dr_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
557 cred_t *cred_p, int *rval_p)
558 {
559 int rv = 0;
560 int instance;
561 int bd;
562 dr_handle_t *hp;
563 dr_softstate_t *softsp;
564 static fn_t f = "dr_ioctl";
565
566 PR_ALL("%s...\n", f);
567
568 instance = DR_MINOR2INST(getminor(dev));
569 softsp = ddi_get_soft_state(dr_g.softsp, instance);
570 if (softsp == NULL) {
571 cmn_err(CE_WARN, "dr%d: module not yet attached", instance);
572 return (ENXIO);
573 }
574
575 if (!dr_enable) {
576 switch (cmd) {
577 case SBD_CMD_STATUS:
578 case SBD_CMD_GETNCM:
579 case SBD_CMD_PASSTHRU:
580 break;
581 default:
582 return (ENOTSUP);
583 }
584 }
585
586 bd = DR_MINOR2BNUM(getminor(dev));
587 if (bd >= MAX_BOARDS)
588 return (ENXIO);
589
590 /* get and initialize storage for new handle */
591 hp = GETSTRUCT(dr_handle_t, 1);
592 hp->h_bd = &softsp->boards[bd];
593 hp->h_err = NULL;
594 hp->h_dev = getminor(dev);
595 hp->h_cmd = cmd;
596 hp->h_mode = mode;
597 hp->h_iap = (sbd_ioctl_arg_t *)arg;
598
599 /* copy sbd command into handle */
600 rv = dr_copyin_iocmd(hp);
601 if (rv) {
602 FREESTRUCT(hp, dr_handle_t, 1);
603 return (EINVAL);
604 }
605
606 /* translate canonical name to component type */
607 if (hp->h_sbdcmd.cmd_cm.c_id.c_name[0] != '\0') {
608 hp->h_sbdcmd.cmd_cm.c_id.c_type =
609 dr_dev_type_to_nt(hp->h_sbdcmd.cmd_cm.c_id.c_name);
610
611 PR_ALL("%s: c_name = %s, c_type = %d\n",
612 f,
613 hp->h_sbdcmd.cmd_cm.c_id.c_name,
614 hp->h_sbdcmd.cmd_cm.c_id.c_type);
615 } else {
616 /*EMPTY*/
617 PR_ALL("%s: c_name is NULL\n", f);
618 }
619
620 /* determine scope of operation */
621 hp->h_devset = dr_dev2devset(&hp->h_sbdcmd.cmd_cm.c_id);
622
623 switch (hp->h_cmd) {
624 case SBD_CMD_STATUS:
625 case SBD_CMD_GETNCM:
626 /* no locks needed for these commands */
627 break;
628
629 default:
630 rw_enter(&dr_grwlock, RW_WRITER);
631 mutex_enter(&hp->h_bd->b_lock);
632
633 /*
634 * If we're dealing with memory at all, then we have
635 * to keep the "exclusive" global lock held. This is
636 * necessary since we will probably need to look at
637 * multiple board structs. Otherwise, we only have
638 * to deal with the board in question and so can drop
639 * the global lock to "shared".
640 */
641 rv = DEVSET_IN_SET(hp->h_devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
642 if (rv == 0)
643 rw_downgrade(&dr_grwlock);
644 break;
645 }
646 rv = 0;
647
648 if (rv == 0)
649 rv = dr_pre_op(hp);
650 if (rv == 0) {
651 rv = dr_exec_op(hp);
652 rv = dr_post_op(hp, rv);
653 }
654
655 if (rv == -1)
656 rv = EIO;
657
658 if (hp->h_err != NULL)
659 if (!(rv = dr_copyout_errs(hp)))
660 rv = EIO;
661
662 /* undo locking, if any, done before dr_pre_op */
663 switch (hp->h_cmd) {
664 case SBD_CMD_STATUS:
665 case SBD_CMD_GETNCM:
666 break;
667
668 case SBD_CMD_ASSIGN:
669 case SBD_CMD_UNASSIGN:
670 case SBD_CMD_POWERON:
671 case SBD_CMD_POWEROFF:
672 case SBD_CMD_CONNECT:
673 case SBD_CMD_CONFIGURE:
674 case SBD_CMD_UNCONFIGURE:
675 case SBD_CMD_DISCONNECT:
676 /* Board changed state. Log a sysevent. */
677 if (rv == 0)
678 (void) drmach_log_sysevent(hp->h_bd->b_num, "",
679 SE_SLEEP, 0);
680 /* Fall through */
681
682 default:
683 mutex_exit(&hp->h_bd->b_lock);
684 rw_exit(&dr_grwlock);
685 }
686
687 if (hp->h_opts.size != 0)
688 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
689
690 FREESTRUCT(hp, dr_handle_t, 1);
691
692 return (rv);
693 }
694
695 /*ARGSUSED*/
696 static int
dr_probe(dev_info_t * dip)697 dr_probe(dev_info_t *dip)
698 {
699 return (DDI_PROBE_SUCCESS);
700 }
701
702 static int
dr_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)703 dr_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
704 {
705 int rv, rv2;
706 int bd;
707 int instance;
708 sbd_error_t *err;
709 dr_softstate_t *softsp;
710
711 instance = ddi_get_instance(dip);
712
713 switch (cmd) {
714 case DDI_ATTACH:
715 rw_enter(&dr_grwlock, RW_WRITER);
716
717 rv = ddi_soft_state_zalloc(dr_g.softsp, instance);
718 if (rv != DDI_SUCCESS) {
719 cmn_err(CE_WARN, "dr%d: failed to alloc soft-state",
720 instance);
721 return (DDI_FAILURE);
722 }
723
724 /* initialize softstate structure */
725 softsp = ddi_get_soft_state(dr_g.softsp, instance);
726 softsp->dip = dip;
727
728 mutex_init(&softsp->i_lock, NULL, MUTEX_DRIVER, NULL);
729
730 /* allocate board array (aka boardlist) */
731 softsp->boards = GETSTRUCT(dr_board_t, MAX_BOARDS);
732
733 /* TODO: eliminate dr_boardlist */
734 dr_boardlist = softsp->boards;
735
736 /* initialize each array element */
737 rv = DDI_SUCCESS;
738 for (bd = 0; bd < MAX_BOARDS; bd++) {
739 dr_board_t *bp = &softsp->boards[bd];
740 char *p, *name;
741 int l, minor_num;
742
743 /*
744 * initialized board attachment point path
745 * (relative to pseudo) in a form immediately
746 * reusable as an cfgadm command argument.
747 * TODO: clean this up
748 */
749 p = bp->b_path;
750 l = sizeof (bp->b_path);
751 (void) snprintf(p, l, "dr@%d:", instance);
752 while (*p != '\0') {
753 l--;
754 p++;
755 }
756
757 name = p;
758 err = drmach_board_name(bd, p, l);
759 if (err) {
760 sbd_err_clear(&err);
761 rv = DDI_FAILURE;
762 break;
763 }
764
765 minor_num = DR_MAKE_MINOR(instance, bd);
766 rv = ddi_create_minor_node(dip, name, S_IFCHR,
767 minor_num, DDI_NT_SBD_ATTACHMENT_POINT, 0);
768 if (rv != DDI_SUCCESS)
769 rv = DDI_FAILURE;
770 }
771
772 if (rv == DDI_SUCCESS) {
773 /*
774 * Announce the node's presence.
775 */
776 ddi_report_dev(dip);
777 } else {
778 ddi_remove_minor_node(dip, NULL);
779 }
780 /*
781 * Init registered unsafe devs.
782 */
783 dr_unsafe_devs.devnames = NULL;
784 rv2 = ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
785 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
786 "unsupported-io-drivers", &dr_unsafe_devs.devnames,
787 &dr_unsafe_devs.ndevs);
788
789 if (rv2 != DDI_PROP_SUCCESS)
790 dr_unsafe_devs.ndevs = 0;
791
792 rw_exit(&dr_grwlock);
793 return (rv);
794
795 default:
796 return (DDI_FAILURE);
797 }
798
799 /*NOTREACHED*/
800 }
801
802 static int
dr_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)803 dr_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
804 {
805 int instance;
806 dr_softstate_t *softsp;
807
808 switch (cmd) {
809 case DDI_DETACH:
810 if (!dr_modunload_okay)
811 return (DDI_FAILURE);
812
813 rw_enter(&dr_grwlock, RW_WRITER);
814
815 instance = ddi_get_instance(dip);
816 softsp = ddi_get_soft_state(dr_g.softsp, instance);
817
818 /* TODO: eliminate dr_boardlist */
819 ASSERT(softsp->boards == dr_boardlist);
820
821 /* remove all minor nodes */
822 ddi_remove_minor_node(dip, NULL);
823
824 if (softsp->dr_initialized) {
825 int bd;
826
827 for (bd = 0; bd < MAX_BOARDS; bd++)
828 dr_board_destroy(&softsp->boards[bd]);
829 }
830
831 FREESTRUCT(softsp->boards, dr_board_t, MAX_BOARDS);
832 mutex_destroy(&softsp->i_lock);
833 ddi_soft_state_free(dr_g.softsp, instance);
834
835 rw_exit(&dr_grwlock);
836 return (DDI_SUCCESS);
837
838 default:
839 return (DDI_FAILURE);
840 }
841 /*NOTREACHED*/
842 }
843
844 static int
dr_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** result)845 dr_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
846 {
847 _NOTE(ARGUNUSED(dip))
848
849 dev_t dev = (dev_t)arg;
850 int instance, error;
851 dr_softstate_t *softsp;
852
853 *result = NULL;
854 error = DDI_SUCCESS;
855 instance = DR_MINOR2INST(getminor(dev));
856
857 switch (cmd) {
858 case DDI_INFO_DEVT2DEVINFO:
859 softsp = ddi_get_soft_state(dr_g.softsp, instance);
860 if (softsp == NULL)
861 return (DDI_FAILURE);
862 *result = (void *)softsp->dip;
863 break;
864
865 case DDI_INFO_DEVT2INSTANCE:
866 *result = (void *)(uintptr_t)instance;
867 break;
868
869 default:
870 error = DDI_FAILURE;
871 break;
872 }
873
874 return (error);
875 }
876
877 /*
878 * DR operations.
879 */
880
881 static int
dr_copyin_iocmd(dr_handle_t * hp)882 dr_copyin_iocmd(dr_handle_t *hp)
883 {
884 static fn_t f = "dr_copyin_iocmd";
885 sbd_cmd_t *scp = &hp->h_sbdcmd;
886
887 if (hp->h_iap == NULL)
888 return (EINVAL);
889
890 bzero((caddr_t)scp, sizeof (sbd_cmd_t));
891
892 #ifdef _MULTI_DATAMODEL
893 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
894 sbd_cmd32_t scmd32;
895
896 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
897
898 if (ddi_copyin((void *)hp->h_iap, (void *)&scmd32,
899 sizeof (sbd_cmd32_t), hp->h_mode)) {
900 cmn_err(CE_WARN,
901 "%s: (32bit) failed to copyin "
902 "sbdcmd-struct", f);
903 return (EFAULT);
904 }
905 scp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
906 scp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
907 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
908 &scp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
909 scp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
910 scp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
911 scp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
912
913 switch (hp->h_cmd) {
914 case SBD_CMD_STATUS:
915 scp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
916 scp->cmd_stat.s_statp =
917 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
918 break;
919 default:
920 break;
921
922 }
923 } else
924 #endif /* _MULTI_DATAMODEL */
925 if (ddi_copyin((void *)hp->h_iap, (void *)scp,
926 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
927 cmn_err(CE_WARN,
928 "%s: failed to copyin sbdcmd-struct", f);
929 return (EFAULT);
930 }
931
932 if ((hp->h_opts.size = scp->cmd_cm.c_len) != 0) {
933 hp->h_opts.copts = GETSTRUCT(char, scp->cmd_cm.c_len + 1);
934 ++hp->h_opts.size;
935 if (ddi_copyin((void *)scp->cmd_cm.c_opts,
936 (void *)hp->h_opts.copts,
937 scp->cmd_cm.c_len, hp->h_mode) != 0) {
938 cmn_err(CE_WARN, "%s: failed to copyin options", f);
939 return (EFAULT);
940 }
941 }
942
943 return (0);
944 }
945
946 static int
dr_copyout_iocmd(dr_handle_t * hp)947 dr_copyout_iocmd(dr_handle_t *hp)
948 {
949 static fn_t f = "dr_copyout_iocmd";
950 sbd_cmd_t *scp = &hp->h_sbdcmd;
951
952 if (hp->h_iap == NULL)
953 return (EINVAL);
954
955 #ifdef _MULTI_DATAMODEL
956 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
957 sbd_cmd32_t scmd32;
958
959 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
960 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
961 bcopy(&scp->cmd_cm.c_id.c_name[0],
962 &scmd32.cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
963
964 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
965 scmd32.cmd_cm.c_len = scp->cmd_cm.c_len;
966 scmd32.cmd_cm.c_opts = (caddr32_t)(uintptr_t)scp->cmd_cm.c_opts;
967
968 switch (hp->h_cmd) {
969 case SBD_CMD_GETNCM:
970 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
971 break;
972 default:
973 break;
974 }
975
976 if (ddi_copyout((void *)&scmd32, (void *)hp->h_iap,
977 sizeof (sbd_cmd32_t), hp->h_mode)) {
978 cmn_err(CE_WARN,
979 "%s: (32bit) failed to copyout "
980 "sbdcmd-struct", f);
981 return (EFAULT);
982 }
983 } else
984 #endif /* _MULTI_DATAMODEL */
985 if (ddi_copyout((void *)scp, (void *)hp->h_iap,
986 sizeof (sbd_cmd_t), hp->h_mode) != 0) {
987 cmn_err(CE_WARN,
988 "%s: failed to copyout sbdcmd-struct", f);
989 return (EFAULT);
990 }
991
992 return (0);
993 }
994
995 static int
dr_copyout_errs(dr_handle_t * hp)996 dr_copyout_errs(dr_handle_t *hp)
997 {
998 static fn_t f = "dr_copyout_errs";
999
1000 if (hp->h_err == NULL)
1001 return (0);
1002
1003 if (hp->h_err->e_code) {
1004 PR_ALL("%s: error %d %s",
1005 f, hp->h_err->e_code, hp->h_err->e_rsc);
1006 }
1007
1008 #ifdef _MULTI_DATAMODEL
1009 if (ddi_model_convert_from(hp->h_mode & FMODELS) == DDI_MODEL_ILP32) {
1010 sbd_error32_t *serr32p;
1011
1012 serr32p = GETSTRUCT(sbd_error32_t, 1);
1013
1014 serr32p->e_code = hp->h_err->e_code;
1015 bcopy(&hp->h_err->e_rsc[0], &serr32p->e_rsc[0],
1016 MAXPATHLEN);
1017 if (ddi_copyout((void *)serr32p,
1018 (void *)&((sbd_ioctl_arg32_t *)hp->h_iap)->i_err,
1019 sizeof (sbd_error32_t), hp->h_mode)) {
1020 cmn_err(CE_WARN,
1021 "%s: (32bit) failed to copyout", f);
1022 return (EFAULT);
1023 }
1024 FREESTRUCT(serr32p, sbd_error32_t, 1);
1025 } else
1026 #endif /* _MULTI_DATAMODEL */
1027 if (ddi_copyout((void *)hp->h_err,
1028 (void *)&hp->h_iap->i_err,
1029 sizeof (sbd_error_t), hp->h_mode)) {
1030 cmn_err(CE_WARN,
1031 "%s: failed to copyout", f);
1032 return (EFAULT);
1033 }
1034
1035 sbd_err_clear(&hp->h_err);
1036
1037 return (0);
1038
1039 }
1040
1041 /*
1042 * pre-op entry point must sbd_err_set_c(), if needed.
1043 * Return value of non-zero indicates failure.
1044 */
1045 static int
dr_pre_op(dr_handle_t * hp)1046 dr_pre_op(dr_handle_t *hp)
1047 {
1048 int rv = 0, t;
1049 int cmd, serr = 0;
1050 dr_devset_t devset;
1051 dr_board_t *bp = hp->h_bd;
1052 dr_handle_t *shp = hp;
1053 static fn_t f = "dr_pre_op";
1054
1055 cmd = hp->h_cmd;
1056 devset = shp->h_devset;
1057
1058 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1059
1060 devset = DEVSET_AND(devset, DR_DEVS_PRESENT(bp));
1061 hp->h_err = drmach_pre_op(cmd, bp->b_id, &hp->h_opts, &devset);
1062 if (hp->h_err != NULL) {
1063 PR_ALL("drmach_pre_op failed for cmd %s(%d)\n",
1064 SBD_CMD_STR(cmd), cmd);
1065 return (-1);
1066 }
1067
1068 /*
1069 * Check for valid state transitions.
1070 */
1071 if ((t = CMD2INDEX(cmd)) != -1) {
1072 struct dr_state_trans *transp;
1073 int state_err;
1074
1075 transp = &dr_state_transition[t];
1076 ASSERT(transp->x_cmd == cmd);
1077
1078 state_err = dr_check_transition(bp, &devset, transp, cmd);
1079
1080 if (state_err < 0) {
1081 /*
1082 * Invalidate device.
1083 */
1084 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, NULL);
1085 serr = -1;
1086 PR_ALL("%s: invalid devset (0x%x)\n",
1087 f, (uint_t)devset);
1088 } else if (state_err != 0) {
1089 /*
1090 * State transition is not a valid one.
1091 */
1092 dr_op_err(CE_IGNORE, hp,
1093 transp->x_op[state_err].x_err, NULL);
1094
1095 serr = transp->x_op[state_err].x_rv;
1096
1097 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1098 f, state_str[state_err], state_err,
1099 SBD_CMD_STR(cmd), cmd);
1100 } else {
1101 shp->h_devset = devset;
1102 }
1103 }
1104
1105 if (serr) {
1106 rv = -1;
1107 }
1108
1109 return (rv);
1110 }
1111
1112 static int
dr_post_op(dr_handle_t * hp,int rv)1113 dr_post_op(dr_handle_t *hp, int rv)
1114 {
1115 int cmd;
1116 sbd_error_t *err;
1117 dr_board_t *bp = hp->h_bd;
1118 static fn_t f = "dr_post_op";
1119
1120 cmd = hp->h_cmd;
1121
1122 PR_ALL("%s (cmd = %s)...\n", f, SBD_CMD_STR(cmd));
1123
1124 err = drmach_post_op(cmd, bp->b_id, &hp->h_opts, rv);
1125 if (err != NULL) {
1126 PR_ALL("drmach_post_op failed for cmd %s(%d)\n",
1127 SBD_CMD_STR(cmd), cmd);
1128 if (rv == 0) {
1129 ASSERT(hp->h_err == NULL);
1130 hp->h_err = err;
1131 rv = -1;
1132 } else if (hp->h_err == NULL) {
1133 hp->h_err = err;
1134 } else {
1135 sbd_err_clear(&err);
1136 }
1137 }
1138
1139 return (rv);
1140 }
1141
1142 static int
dr_exec_op(dr_handle_t * hp)1143 dr_exec_op(dr_handle_t *hp)
1144 {
1145 int rv = 0;
1146 static fn_t f = "dr_exec_op";
1147
1148 /* errors should have been caught by now */
1149 ASSERT(hp->h_err == NULL);
1150
1151 switch (hp->h_cmd) {
1152 case SBD_CMD_ASSIGN:
1153 dr_assign_board(hp);
1154 break;
1155
1156 case SBD_CMD_UNASSIGN:
1157 dr_unassign_board(hp);
1158 break;
1159
1160 case SBD_CMD_POWEROFF:
1161 dr_poweroff_board(hp);
1162 break;
1163
1164 case SBD_CMD_POWERON:
1165 dr_poweron_board(hp);
1166 break;
1167
1168 case SBD_CMD_TEST:
1169 dr_test_board(hp);
1170 break;
1171
1172 case SBD_CMD_CONNECT:
1173 dr_connect(hp);
1174 break;
1175
1176 case SBD_CMD_CONFIGURE:
1177 dr_dev_configure(hp);
1178 break;
1179
1180 case SBD_CMD_UNCONFIGURE:
1181 dr_dev_release(hp);
1182 if (hp->h_err == NULL)
1183 rv = dr_dev_unconfigure(hp);
1184 else
1185 dr_dev_cancel(hp);
1186 break;
1187
1188 case SBD_CMD_DISCONNECT:
1189 rv = dr_disconnect(hp);
1190 break;
1191
1192 case SBD_CMD_STATUS:
1193 rv = dr_dev_status(hp);
1194 break;
1195
1196 case SBD_CMD_GETNCM:
1197 hp->h_sbdcmd.cmd_getncm.g_ncm = dr_get_ncm(hp);
1198 rv = dr_copyout_iocmd(hp);
1199 break;
1200
1201 case SBD_CMD_PASSTHRU:
1202 rv = dr_pt_ioctl(hp);
1203 break;
1204
1205 default:
1206 cmn_err(CE_WARN,
1207 "%s: unknown command (%d)",
1208 f, hp->h_cmd);
1209 break;
1210 }
1211
1212 if (hp->h_err != NULL) {
1213 rv = -1;
1214 }
1215
1216 return (rv);
1217 }
1218
1219 static void
dr_assign_board(dr_handle_t * hp)1220 dr_assign_board(dr_handle_t *hp)
1221 {
1222 dr_board_t *bp = hp->h_bd;
1223
1224 hp->h_err = drmach_board_assign(bp->b_num, &bp->b_id);
1225 if (hp->h_err == NULL) {
1226 bp->b_assigned = 1;
1227 }
1228 }
1229
1230 static void
dr_unassign_board(dr_handle_t * hp)1231 dr_unassign_board(dr_handle_t *hp)
1232 {
1233 dr_board_t *bp = hp->h_bd;
1234
1235 /*
1236 * Block out status during unassign.
1237 * Not doing cv_wait_sig here as starfire SSP software
1238 * ignores unassign failure and removes board from
1239 * domain mask causing system panic.
1240 * TODO: Change cv_wait to cv_wait_sig when SSP software
1241 * handles unassign failure.
1242 */
1243 dr_lock_status(bp);
1244
1245 hp->h_err = drmach_board_unassign(bp->b_id);
1246 if (hp->h_err == NULL) {
1247 /*
1248 * clear drmachid_t handle; not valid after board unassign
1249 */
1250 bp->b_id = 0;
1251 bp->b_assigned = 0;
1252 }
1253
1254 dr_unlock_status(bp);
1255 }
1256
1257 static void
dr_poweron_board(dr_handle_t * hp)1258 dr_poweron_board(dr_handle_t *hp)
1259 {
1260 dr_board_t *bp = hp->h_bd;
1261
1262 hp->h_err = drmach_board_poweron(bp->b_id);
1263 }
1264
1265 static void
dr_poweroff_board(dr_handle_t * hp)1266 dr_poweroff_board(dr_handle_t *hp)
1267 {
1268 dr_board_t *bp = hp->h_bd;
1269
1270 hp->h_err = drmach_board_poweroff(bp->b_id);
1271 }
1272
1273 static void
dr_test_board(dr_handle_t * hp)1274 dr_test_board(dr_handle_t *hp)
1275 {
1276 dr_board_t *bp = hp->h_bd;
1277 hp->h_err = drmach_board_test(bp->b_id, &hp->h_opts,
1278 dr_cmd_flags(hp) & SBD_FLAG_FORCE);
1279 }
1280
1281 /*
1282 * Create and populate the component nodes for a board. Assumes that the
1283 * devlists for the board have been initialized.
1284 */
1285 static void
dr_make_comp_nodes(dr_board_t * bp)1286 dr_make_comp_nodes(dr_board_t *bp)
1287 {
1288 int i;
1289
1290 /*
1291 * Make nodes for the individual components on the board.
1292 * First we need to initialize memory unit data structures of board
1293 * structure.
1294 */
1295 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1296 dr_mem_unit_t *mp;
1297
1298 mp = dr_get_mem_unit(bp, i);
1299 dr_init_mem_unit(mp);
1300 }
1301
1302 /*
1303 * Initialize cpu unit data structures.
1304 */
1305 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1306 dr_cpu_unit_t *cp;
1307
1308 cp = dr_get_cpu_unit(bp, i);
1309 dr_init_cpu_unit(cp);
1310 }
1311
1312 /*
1313 * Initialize io unit data structures.
1314 */
1315 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1316 dr_io_unit_t *ip;
1317
1318 ip = dr_get_io_unit(bp, i);
1319 dr_init_io_unit(ip);
1320 }
1321
1322 dr_board_transition(bp, DR_STATE_CONNECTED);
1323
1324 bp->b_rstate = SBD_STAT_CONNECTED;
1325 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1326 bp->b_cond = SBD_COND_OK;
1327 (void) drv_getparm(TIME, (void *)&bp->b_time);
1328
1329 }
1330
1331 /*
1332 * Only do work if called to operate on an entire board
1333 * which doesn't already have components present.
1334 */
1335 static void
dr_connect(dr_handle_t * hp)1336 dr_connect(dr_handle_t *hp)
1337 {
1338 dr_board_t *bp = hp->h_bd;
1339 static fn_t f = "dr_connect";
1340
1341 PR_ALL("%s...\n", f);
1342
1343 if (DR_DEVS_PRESENT(bp)) {
1344 /*
1345 * Board already has devices present.
1346 */
1347 PR_ALL("%s: devices already present (" DEVSET_FMT_STR ")\n",
1348 f, DEVSET_FMT_ARG(DR_DEVS_PRESENT(bp)));
1349 return;
1350 }
1351
1352 hp->h_err = drmach_board_connect(bp->b_id, &hp->h_opts);
1353 if (hp->h_err)
1354 return;
1355
1356 hp->h_err = dr_init_devlists(bp);
1357 if (hp->h_err)
1358 return;
1359 else if (bp->b_ndev == 0) {
1360 dr_op_err(CE_WARN, hp, ESBD_EMPTY_BD, bp->b_path);
1361 return;
1362 } else {
1363 dr_make_comp_nodes(bp);
1364 return;
1365 }
1366 /*NOTREACHED*/
1367 }
1368
1369 static int
dr_disconnect(dr_handle_t * hp)1370 dr_disconnect(dr_handle_t *hp)
1371 {
1372 int i;
1373 dr_devset_t devset;
1374 dr_board_t *bp = hp->h_bd;
1375 static fn_t f = "dr_disconnect";
1376
1377 PR_ALL("%s...\n", f);
1378
1379 /*
1380 * Only devices which are present, but
1381 * unattached can be disconnected.
1382 */
1383 devset = hp->h_devset & DR_DEVS_PRESENT(bp) &
1384 DR_DEVS_UNATTACHED(bp);
1385
1386 if ((devset == 0) && DR_DEVS_PRESENT(bp)) {
1387 dr_op_err(CE_IGNORE, hp, ESBD_EMPTY_BD, bp->b_path);
1388 return (0);
1389 }
1390
1391 /*
1392 * Block out status during disconnect.
1393 */
1394 mutex_enter(&bp->b_slock);
1395 while (bp->b_sflags & DR_BSLOCK) {
1396 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1397 mutex_exit(&bp->b_slock);
1398 return (EINTR);
1399 }
1400 }
1401 bp->b_sflags |= DR_BSLOCK;
1402 mutex_exit(&bp->b_slock);
1403
1404 hp->h_err = drmach_board_disconnect(bp->b_id, &hp->h_opts);
1405 if (hp->h_err && hp->h_err->e_code == EX86_WALK_DEPENDENCY) {
1406 /*
1407 * Other boards have dependency on this board. No device nodes
1408 * have been destroyed so keep current board status.
1409 */
1410 goto disconnect_done;
1411 }
1412
1413 DR_DEVS_DISCONNECT(bp, devset);
1414
1415 ASSERT((DR_DEVS_ATTACHED(bp) & devset) == 0);
1416
1417 /*
1418 * Update per-device state transitions.
1419 */
1420 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
1421 dr_cpu_unit_t *cp;
1422
1423 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
1424 continue;
1425
1426 cp = dr_get_cpu_unit(bp, i);
1427 if (dr_disconnect_cpu(cp) == 0)
1428 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
1429 else if (cp->sbc_cm.sbdev_error != NULL)
1430 DRERR_SET_C(&hp->h_err, &cp->sbc_cm.sbdev_error);
1431
1432 ASSERT(cp->sbc_cm.sbdev_error == NULL);
1433 }
1434
1435 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
1436 dr_mem_unit_t *mp;
1437
1438 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
1439 continue;
1440
1441 mp = dr_get_mem_unit(bp, i);
1442 if (dr_disconnect_mem(mp) == 0)
1443 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
1444 else if (mp->sbm_cm.sbdev_error != NULL)
1445 DRERR_SET_C(&hp->h_err, &mp->sbm_cm.sbdev_error);
1446
1447 ASSERT(mp->sbm_cm.sbdev_error == NULL);
1448 }
1449
1450 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
1451 dr_io_unit_t *ip;
1452
1453 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
1454 continue;
1455
1456 ip = dr_get_io_unit(bp, i);
1457 if (dr_disconnect_io(ip) == 0)
1458 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
1459 else if (ip->sbi_cm.sbdev_error != NULL)
1460 DRERR_SET_C(&hp->h_err, &ip->sbi_cm.sbdev_error);
1461
1462 ASSERT(ip->sbi_cm.sbdev_error == NULL);
1463 }
1464
1465 if (hp->h_err) {
1466 /*
1467 * For certain errors, drmach_board_disconnect will mark
1468 * the board as unusable; in these cases the devtree must
1469 * be purged so that status calls will succeed.
1470 * XXX
1471 * This implementation checks for discrete error codes -
1472 * someday, the i/f to drmach_board_disconnect should be
1473 * changed to avoid the e_code testing.
1474 */
1475 if (hp->h_err->e_code == EX86_DEPROBE) {
1476 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1477 bp->b_busy = 0;
1478 (void) drv_getparm(TIME, (void *)&bp->b_time);
1479
1480 if (drmach_board_deprobe(bp->b_id))
1481 goto disconnect_done;
1482 else
1483 bp->b_ndev = 0;
1484 }
1485 }
1486
1487 /*
1488 * Once all the components on a board have been disconnect
1489 * the board's state can transition to disconnected and
1490 * we can allow the deprobe to take place.
1491 */
1492 if (hp->h_err == NULL && DR_DEVS_PRESENT(bp) == 0) {
1493 dr_board_transition(bp, DR_STATE_OCCUPIED);
1494 bp->b_rstate = SBD_STAT_DISCONNECTED;
1495 bp->b_ostate = SBD_STAT_UNCONFIGURED;
1496 bp->b_busy = 0;
1497 (void) drv_getparm(TIME, (void *)&bp->b_time);
1498
1499 hp->h_err = drmach_board_deprobe(bp->b_id);
1500
1501 if (hp->h_err == NULL) {
1502 bp->b_ndev = 0;
1503 dr_board_transition(bp, DR_STATE_EMPTY);
1504 bp->b_rstate = SBD_STAT_EMPTY;
1505 (void) drv_getparm(TIME, (void *)&bp->b_time);
1506 }
1507 }
1508
1509 disconnect_done:
1510 dr_unlock_status(bp);
1511
1512 return (0);
1513 }
1514
1515 /*
1516 * Check if a particular device is a valid target of the current
1517 * operation. Return 1 if it is a valid target, and 0 otherwise.
1518 */
1519 static int
dr_dev_is_target(dr_dev_unit_t * dp,int present_only,uint_t uset)1520 dr_dev_is_target(dr_dev_unit_t *dp, int present_only, uint_t uset)
1521 {
1522 dr_common_unit_t *cp;
1523 int is_present;
1524 int is_attached;
1525
1526 cp = &dp->du_common;
1527
1528 /* check if the user requested this device */
1529 if ((uset & (1 << cp->sbdev_unum)) == 0) {
1530 return (0);
1531 }
1532
1533 is_present = DR_DEV_IS_PRESENT(cp) ? 1 : 0;
1534 is_attached = DR_DEV_IS_ATTACHED(cp) ? 1 : 0;
1535
1536 /*
1537 * If the present_only flag is set, a valid target
1538 * must be present but not attached. Otherwise, it
1539 * must be both present and attached.
1540 */
1541 if (is_present && (present_only ^ is_attached)) {
1542 /* sanity check */
1543 ASSERT(cp->sbdev_id != (drmachid_t)0);
1544
1545 return (1);
1546 }
1547
1548 return (0);
1549 }
1550
1551 static void
dr_dev_make_list(dr_handle_t * hp,sbd_comp_type_t type,int present_only,dr_common_unit_t *** devlist,int * devnum)1552 dr_dev_make_list(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1553 dr_common_unit_t ***devlist, int *devnum)
1554 {
1555 dr_board_t *bp = hp->h_bd;
1556 int unum;
1557 int nunits;
1558 uint_t uset;
1559 int len;
1560 dr_common_unit_t **list, **wp;
1561
1562 switch (type) {
1563 case SBD_COMP_CPU:
1564 nunits = MAX_CPU_UNITS_PER_BOARD;
1565 break;
1566 case SBD_COMP_MEM:
1567 nunits = MAX_MEM_UNITS_PER_BOARD;
1568 break;
1569 case SBD_COMP_IO:
1570 nunits = MAX_IO_UNITS_PER_BOARD;
1571 break;
1572 default:
1573 nunits = 0;
1574 /* catch this in debug kernels */
1575 ASSERT(0);
1576 break;
1577 }
1578
1579 /* allocate list storage. */
1580 len = sizeof (dr_common_unit_t *) * (nunits + 1);
1581 list = kmem_zalloc(len, KM_SLEEP);
1582
1583 /* record length of storage in first element */
1584 *list++ = (dr_common_unit_t *)(uintptr_t)len;
1585
1586 /* get bit array signifying which units are to be involved */
1587 uset = DEVSET_GET_UNITSET(hp->h_devset, type);
1588
1589 /*
1590 * Adjust the loop count for CPU devices since all cores
1591 * in a CMP will be examined in a single iteration.
1592 */
1593 if (type == SBD_COMP_CPU) {
1594 nunits = MAX_CMP_UNITS_PER_BOARD;
1595 }
1596
1597 /* populate list */
1598 for (wp = list, unum = 0; unum < nunits; unum++) {
1599 dr_dev_unit_t *dp;
1600 int core;
1601 int cunum;
1602
1603 dp = DR_GET_BOARD_DEVUNIT(bp, type, unum);
1604 if (dr_dev_is_target(dp, present_only, uset)) {
1605 *wp++ = &dp->du_common;
1606 }
1607
1608 /* further processing is only required for CPUs */
1609 if (type != SBD_COMP_CPU) {
1610 continue;
1611 }
1612
1613 /*
1614 * Add any additional cores from the current CPU
1615 * device. This is to ensure that all the cores
1616 * are grouped together in the device list, and
1617 * consequently sequenced together during the actual
1618 * operation.
1619 */
1620 for (core = 1; core < MAX_CORES_PER_CMP; core++) {
1621 cunum = DR_CMP_CORE_UNUM(unum, core);
1622 dp = DR_GET_BOARD_DEVUNIT(bp, type, cunum);
1623
1624 if (dr_dev_is_target(dp, present_only, uset)) {
1625 *wp++ = &dp->du_common;
1626 }
1627 }
1628 }
1629
1630 /* calculate number of units in list, return result and list pointer */
1631 *devnum = wp - list;
1632 *devlist = list;
1633 }
1634
1635 static void
dr_dev_clean_up(dr_handle_t * hp,dr_common_unit_t ** list,int devnum)1636 dr_dev_clean_up(dr_handle_t *hp, dr_common_unit_t **list, int devnum)
1637 {
1638 int len;
1639 int n = 0;
1640 dr_common_unit_t *cp, **rp = list;
1641
1642 /*
1643 * move first encountered unit error to handle if handle
1644 * does not yet have a recorded error.
1645 */
1646 if (hp->h_err == NULL) {
1647 while (n++ < devnum) {
1648 cp = *rp++;
1649 if (cp->sbdev_error != NULL) {
1650 hp->h_err = cp->sbdev_error;
1651 cp->sbdev_error = NULL;
1652 break;
1653 }
1654 }
1655 }
1656
1657 /* free remaining unit errors */
1658 while (n++ < devnum) {
1659 cp = *rp++;
1660 if (cp->sbdev_error != NULL) {
1661 sbd_err_clear(&cp->sbdev_error);
1662 cp->sbdev_error = NULL;
1663 }
1664 }
1665
1666 /* free list */
1667 list -= 1;
1668 len = (int)(uintptr_t)list[0];
1669 kmem_free(list, len);
1670 }
1671
1672 static int
dr_dev_walk(dr_handle_t * hp,sbd_comp_type_t type,int present_only,int (* pre_op)(dr_handle_t *,dr_common_unit_t **,int),void (* op)(dr_handle_t *,dr_common_unit_t *),int (* post_op)(dr_handle_t *,dr_common_unit_t **,int),void (* board_op)(dr_handle_t *,dr_common_unit_t **,int))1673 dr_dev_walk(dr_handle_t *hp, sbd_comp_type_t type, int present_only,
1674 int (*pre_op)(dr_handle_t *, dr_common_unit_t **, int),
1675 void (*op)(dr_handle_t *, dr_common_unit_t *),
1676 int (*post_op)(dr_handle_t *, dr_common_unit_t **, int),
1677 void (*board_op)(dr_handle_t *, dr_common_unit_t **, int))
1678 {
1679 int devnum, rv;
1680 dr_common_unit_t **devlist;
1681
1682 dr_dev_make_list(hp, type, present_only, &devlist, &devnum);
1683
1684 rv = 0;
1685 if (devnum > 0) {
1686 rv = (*pre_op)(hp, devlist, devnum);
1687 if (rv == 0) {
1688 int n;
1689
1690 for (n = 0; n < devnum; n++)
1691 (*op)(hp, devlist[n]);
1692
1693 rv = (*post_op)(hp, devlist, devnum);
1694
1695 (*board_op)(hp, devlist, devnum);
1696 }
1697 }
1698
1699 dr_dev_clean_up(hp, devlist, devnum);
1700 return (rv);
1701 }
1702
1703 /*ARGSUSED*/
1704 static int
dr_dev_noop(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1705 dr_dev_noop(dr_handle_t *hp, dr_common_unit_t **devlist, int devnum)
1706 {
1707 return (0);
1708 }
1709
1710 static void
dr_attach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1711 dr_attach_update_state(dr_handle_t *hp,
1712 dr_common_unit_t **devlist, int devnum)
1713 {
1714 dr_board_t *bp = hp->h_bd;
1715 int i;
1716 dr_devset_t devs_unattached, devs_present;
1717 static fn_t f = "dr_attach_update_state";
1718
1719 for (i = 0; i < devnum; i++) {
1720 dr_common_unit_t *cp = devlist[i];
1721
1722 if (dr_check_unit_attached(cp) == -1) {
1723 PR_ALL("%s: ERROR %s not attached\n",
1724 f, cp->sbdev_path);
1725 continue;
1726 }
1727
1728 DR_DEV_SET_ATTACHED(cp);
1729
1730 dr_device_transition(cp, DR_STATE_CONFIGURED);
1731 cp->sbdev_cond = SBD_COND_OK;
1732 }
1733
1734 devs_present = DR_DEVS_PRESENT(bp);
1735 devs_unattached = DR_DEVS_UNATTACHED(bp);
1736
1737 switch (bp->b_state) {
1738 case DR_STATE_CONNECTED:
1739 case DR_STATE_UNCONFIGURED:
1740 ASSERT(devs_present);
1741
1742 if (devs_unattached == 0) {
1743 /*
1744 * All devices finally attached.
1745 */
1746 dr_board_transition(bp, DR_STATE_CONFIGURED);
1747 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1748 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1749 hp->h_bd->b_cond = SBD_COND_OK;
1750 hp->h_bd->b_busy = 0;
1751 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1752 } else if (devs_present != devs_unattached) {
1753 /*
1754 * Only some devices are fully attached.
1755 */
1756 dr_board_transition(bp, DR_STATE_PARTIAL);
1757 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1758 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1759 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1760 }
1761 break;
1762
1763 case DR_STATE_PARTIAL:
1764 ASSERT(devs_present);
1765 /*
1766 * All devices finally attached.
1767 */
1768 if (devs_unattached == 0) {
1769 dr_board_transition(bp, DR_STATE_CONFIGURED);
1770 hp->h_bd->b_rstate = SBD_STAT_CONNECTED;
1771 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
1772 hp->h_bd->b_cond = SBD_COND_OK;
1773 hp->h_bd->b_busy = 0;
1774 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1775 }
1776 break;
1777
1778 default:
1779 break;
1780 }
1781 }
1782
1783 static void
dr_dev_configure(dr_handle_t * hp)1784 dr_dev_configure(dr_handle_t *hp)
1785 {
1786 int rv;
1787
1788 rv = dr_dev_walk(hp, SBD_COMP_CPU, 1,
1789 dr_pre_attach_cpu,
1790 dr_attach_cpu,
1791 dr_post_attach_cpu,
1792 dr_attach_update_state);
1793
1794 if (rv >= 0) {
1795 rv = dr_dev_walk(hp, SBD_COMP_MEM, 1,
1796 dr_pre_attach_mem,
1797 dr_attach_mem,
1798 dr_post_attach_mem,
1799 dr_attach_update_state);
1800 }
1801
1802 if (rv >= 0) {
1803 (void) dr_dev_walk(hp, SBD_COMP_IO, 1,
1804 dr_pre_attach_io,
1805 dr_attach_io,
1806 dr_post_attach_io,
1807 dr_attach_update_state);
1808 }
1809 }
1810
1811 static void
dr_release_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1812 dr_release_update_state(dr_handle_t *hp,
1813 dr_common_unit_t **devlist, int devnum)
1814 {
1815 _NOTE(ARGUNUSED(devlist))
1816 _NOTE(ARGUNUSED(devnum))
1817
1818 dr_board_t *bp = hp->h_bd;
1819
1820 /*
1821 * If the entire board was released and all components
1822 * unreferenced then transfer it to the UNREFERENCED state.
1823 */
1824 if ((bp->b_state != DR_STATE_RELEASE) &&
1825 (DR_DEVS_RELEASED(bp) == DR_DEVS_ATTACHED(bp))) {
1826 dr_board_transition(bp, DR_STATE_RELEASE);
1827 hp->h_bd->b_busy = 1;
1828 }
1829 }
1830
1831 /* called by dr_release_done [below] and dr_release_mem_done [dr_mem.c] */
1832 int
dr_release_dev_done(dr_common_unit_t * cp)1833 dr_release_dev_done(dr_common_unit_t *cp)
1834 {
1835 if (cp->sbdev_state == DR_STATE_RELEASE) {
1836 ASSERT(DR_DEV_IS_RELEASED(cp));
1837
1838 DR_DEV_SET_UNREFERENCED(cp);
1839
1840 dr_device_transition(cp, DR_STATE_UNREFERENCED);
1841
1842 return (0);
1843 } else {
1844 return (-1);
1845 }
1846 }
1847
1848 static void
dr_release_done(dr_handle_t * hp,dr_common_unit_t * cp)1849 dr_release_done(dr_handle_t *hp, dr_common_unit_t *cp)
1850 {
1851 _NOTE(ARGUNUSED(hp))
1852
1853 dr_board_t *bp;
1854 static fn_t f = "dr_release_done";
1855
1856 PR_ALL("%s...\n", f);
1857
1858 /* get board pointer & sanity check */
1859 bp = cp->sbdev_bp;
1860 ASSERT(bp == hp->h_bd);
1861
1862 /*
1863 * Transfer the device which just completed its release
1864 * to the UNREFERENCED state.
1865 */
1866 switch (cp->sbdev_type) {
1867 case SBD_COMP_MEM:
1868 dr_release_mem_done(cp);
1869 break;
1870
1871 default:
1872 DR_DEV_SET_RELEASED(cp);
1873
1874 dr_device_transition(cp, DR_STATE_RELEASE);
1875
1876 (void) dr_release_dev_done(cp);
1877 break;
1878 }
1879
1880 /*
1881 * If we're not already in the RELEASE state for this
1882 * board and we now have released all that were previously
1883 * attached, then transfer the board to the RELEASE state.
1884 */
1885 if ((bp->b_state == DR_STATE_RELEASE) &&
1886 (DR_DEVS_RELEASED(bp) == DR_DEVS_UNREFERENCED(bp))) {
1887 dr_board_transition(bp, DR_STATE_UNREFERENCED);
1888 bp->b_busy = 1;
1889 (void) drv_getparm(TIME, (void *)&bp->b_time);
1890 }
1891 }
1892
1893 static void
dr_dev_release_mem(dr_handle_t * hp,dr_common_unit_t * dv)1894 dr_dev_release_mem(dr_handle_t *hp, dr_common_unit_t *dv)
1895 {
1896 dr_release_mem(dv);
1897 dr_release_done(hp, dv);
1898 }
1899
1900 static void
dr_dev_release(dr_handle_t * hp)1901 dr_dev_release(dr_handle_t *hp)
1902 {
1903 int rv;
1904
1905 hp->h_bd->b_busy = 1;
1906
1907 rv = dr_dev_walk(hp, SBD_COMP_CPU, 0,
1908 dr_pre_release_cpu,
1909 dr_release_done,
1910 dr_dev_noop,
1911 dr_release_update_state);
1912
1913 if (rv >= 0) {
1914 rv = dr_dev_walk(hp, SBD_COMP_MEM, 0,
1915 dr_pre_release_mem,
1916 dr_dev_release_mem,
1917 dr_dev_noop,
1918 dr_release_update_state);
1919 }
1920
1921 if (rv >= 0) {
1922 rv = dr_dev_walk(hp, SBD_COMP_IO, 0,
1923 dr_pre_release_io,
1924 dr_release_done,
1925 dr_dev_noop,
1926 dr_release_update_state);
1927
1928 }
1929
1930 if (rv < 0)
1931 hp->h_bd->b_busy = 0;
1932 /* else, b_busy will be cleared in dr_detach_update_state() */
1933 }
1934
1935 static void
dr_detach_update_state(dr_handle_t * hp,dr_common_unit_t ** devlist,int devnum)1936 dr_detach_update_state(dr_handle_t *hp,
1937 dr_common_unit_t **devlist, int devnum)
1938 {
1939 dr_board_t *bp = hp->h_bd;
1940 int i;
1941 dr_state_t bstate;
1942 static fn_t f = "dr_detach_update_state";
1943
1944 for (i = 0; i < devnum; i++) {
1945 dr_common_unit_t *cp = devlist[i];
1946
1947 if (dr_check_unit_attached(cp) >= 0) {
1948 /*
1949 * Device is still attached probably due
1950 * to an error. Need to keep track of it.
1951 */
1952 PR_ALL("%s: ERROR %s not detached\n",
1953 f, cp->sbdev_path);
1954
1955 continue;
1956 }
1957
1958 DR_DEV_CLR_ATTACHED(cp);
1959 DR_DEV_CLR_RELEASED(cp);
1960 DR_DEV_CLR_UNREFERENCED(cp);
1961 dr_device_transition(cp, DR_STATE_UNCONFIGURED);
1962 }
1963
1964 bstate = bp->b_state;
1965 if (bstate != DR_STATE_UNCONFIGURED) {
1966 if (DR_DEVS_PRESENT(bp) == DR_DEVS_UNATTACHED(bp)) {
1967 /*
1968 * All devices are finally detached.
1969 */
1970 dr_board_transition(bp, DR_STATE_UNCONFIGURED);
1971 hp->h_bd->b_ostate = SBD_STAT_UNCONFIGURED;
1972 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1973 } else if ((bp->b_state != DR_STATE_PARTIAL) &&
1974 (DR_DEVS_ATTACHED(bp) !=
1975 DR_DEVS_PRESENT(bp))) {
1976 /*
1977 * Some devices remain attached.
1978 */
1979 dr_board_transition(bp, DR_STATE_PARTIAL);
1980 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
1981 }
1982
1983 if ((hp->h_devset & DR_DEVS_UNATTACHED(bp)) == hp->h_devset)
1984 hp->h_bd->b_busy = 0;
1985 }
1986 }
1987
1988 static int
dr_dev_unconfigure(dr_handle_t * hp)1989 dr_dev_unconfigure(dr_handle_t *hp)
1990 {
1991 dr_board_t *bp = hp->h_bd;
1992
1993 /*
1994 * Block out status during IO unconfig.
1995 */
1996 mutex_enter(&bp->b_slock);
1997 while (bp->b_sflags & DR_BSLOCK) {
1998 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
1999 mutex_exit(&bp->b_slock);
2000 return (EINTR);
2001 }
2002 }
2003 bp->b_sflags |= DR_BSLOCK;
2004 mutex_exit(&bp->b_slock);
2005
2006 (void) dr_dev_walk(hp, SBD_COMP_IO, 0,
2007 dr_pre_detach_io,
2008 dr_detach_io,
2009 dr_post_detach_io,
2010 dr_detach_update_state);
2011
2012 dr_unlock_status(bp);
2013
2014 (void) dr_dev_walk(hp, SBD_COMP_CPU, 0,
2015 dr_pre_detach_cpu,
2016 dr_detach_cpu,
2017 dr_post_detach_cpu,
2018 dr_detach_update_state);
2019
2020 (void) dr_dev_walk(hp, SBD_COMP_MEM, 0,
2021 dr_pre_detach_mem,
2022 dr_detach_mem,
2023 dr_post_detach_mem,
2024 dr_detach_update_state);
2025
2026 return (0);
2027 }
2028
2029 static void
dr_dev_cancel(dr_handle_t * hp)2030 dr_dev_cancel(dr_handle_t *hp)
2031 {
2032 int i;
2033 dr_devset_t devset;
2034 dr_board_t *bp = hp->h_bd;
2035 static fn_t f = "dr_dev_cancel";
2036
2037 PR_ALL("%s...\n", f);
2038
2039 /*
2040 * Only devices which have been "released" are
2041 * subject to cancellation.
2042 */
2043 devset = hp->h_devset & DR_DEVS_RELEASED(bp);
2044
2045 /*
2046 * Nothing to do for CPUs or IO other than change back
2047 * their state.
2048 */
2049 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2050 dr_cpu_unit_t *cp;
2051 dr_state_t nstate;
2052
2053 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
2054 continue;
2055
2056 cp = dr_get_cpu_unit(bp, i);
2057 if (dr_cancel_cpu(cp) == 0)
2058 nstate = DR_STATE_CONFIGURED;
2059 else
2060 nstate = DR_STATE_FATAL;
2061
2062 dr_device_transition(&cp->sbc_cm, nstate);
2063 }
2064
2065 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2066 dr_io_unit_t *ip;
2067
2068 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
2069 continue;
2070 ip = dr_get_io_unit(bp, i);
2071 dr_device_transition(&ip->sbi_cm, DR_STATE_CONFIGURED);
2072 }
2073 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2074 dr_mem_unit_t *mp;
2075 dr_state_t nstate;
2076
2077 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
2078 continue;
2079
2080 mp = dr_get_mem_unit(bp, i);
2081 if (dr_cancel_mem(mp) == 0)
2082 nstate = DR_STATE_CONFIGURED;
2083 else
2084 nstate = DR_STATE_FATAL;
2085
2086 dr_device_transition(&mp->sbm_cm, nstate);
2087 }
2088
2089 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
2090
2091 DR_DEVS_CANCEL(bp, devset);
2092
2093 if (DR_DEVS_RELEASED(bp) == 0) {
2094 dr_state_t new_state;
2095 /*
2096 * If the board no longer has any released devices
2097 * than transfer it back to the CONFIG/PARTIAL state.
2098 */
2099 if (DR_DEVS_ATTACHED(bp) == DR_DEVS_PRESENT(bp))
2100 new_state = DR_STATE_CONFIGURED;
2101 else
2102 new_state = DR_STATE_PARTIAL;
2103 if (bp->b_state != new_state) {
2104 dr_board_transition(bp, new_state);
2105 }
2106 hp->h_bd->b_ostate = SBD_STAT_CONFIGURED;
2107 hp->h_bd->b_busy = 0;
2108 (void) drv_getparm(TIME, (void *)&hp->h_bd->b_time);
2109 }
2110 }
2111
2112 static int
dr_dev_status(dr_handle_t * hp)2113 dr_dev_status(dr_handle_t *hp)
2114 {
2115 int nstat, mode, ncm, sz, pbsz, pnstat;
2116 dr_handle_t *shp;
2117 dr_devset_t devset = 0;
2118 sbd_stat_t *dstatp = NULL;
2119 sbd_dev_stat_t *devstatp;
2120 dr_board_t *bp;
2121 drmach_status_t pstat;
2122 int rv = 0;
2123
2124 #ifdef _MULTI_DATAMODEL
2125 int sz32 = 0;
2126 #endif /* _MULTI_DATAMODEL */
2127
2128 static fn_t f = "dr_dev_status";
2129
2130 PR_ALL("%s...\n", f);
2131
2132 mode = hp->h_mode;
2133 shp = hp;
2134 devset = shp->h_devset;
2135 bp = hp->h_bd;
2136
2137 /*
2138 * Block out disconnect, unassign, IO unconfigure and
2139 * devinfo branch creation during status.
2140 */
2141 mutex_enter(&bp->b_slock);
2142 while (bp->b_sflags & DR_BSLOCK) {
2143 if (cv_wait_sig(&bp->b_scv, &bp->b_slock) == 0) {
2144 mutex_exit(&bp->b_slock);
2145 return (EINTR);
2146 }
2147 }
2148 bp->b_sflags |= DR_BSLOCK;
2149 mutex_exit(&bp->b_slock);
2150
2151 ncm = 1;
2152 if (hp->h_sbdcmd.cmd_cm.c_id.c_type == SBD_COMP_NONE) {
2153 if (dr_cmd_flags(hp) & SBD_FLAG_ALLCMP) {
2154 /*
2155 * Calculate the maximum number of components possible
2156 * for a board. This number will be used to size the
2157 * status scratch buffer used by board and component
2158 * status functions.
2159 * This buffer may differ in size from what is provided
2160 * by the plugin, since the known component set on the
2161 * board may change between the plugin's GETNCM call, and
2162 * the status call. Sizing will be adjusted to the plugin's
2163 * receptacle buffer at copyout time.
2164 */
2165 ncm = MAX_CPU_UNITS_PER_BOARD +
2166 MAX_MEM_UNITS_PER_BOARD +
2167 MAX_IO_UNITS_PER_BOARD;
2168
2169 } else {
2170 /*
2171 * In the case of c_type == SBD_COMP_NONE, and
2172 * SBD_FLAG_ALLCMP not specified, only the board
2173 * info is to be returned, no components.
2174 */
2175 ncm = 0;
2176 devset = 0;
2177 }
2178 }
2179
2180 sz = sizeof (sbd_stat_t);
2181 if (ncm > 1)
2182 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
2183
2184
2185 pbsz = (int)hp->h_sbdcmd.cmd_stat.s_nbytes;
2186 pnstat = (pbsz - sizeof (sbd_stat_t)) / sizeof (sbd_dev_stat_t);
2187
2188 /*
2189 * s_nbytes describes the size of the preallocated user
2190 * buffer into which the application is execting to
2191 * receive the sbd_stat_t and sbd_dev_stat_t structures.
2192 */
2193
2194 #ifdef _MULTI_DATAMODEL
2195
2196 /*
2197 * More buffer space is required for the 64bit to 32bit
2198 * conversion of data structures.
2199 */
2200 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2201 sz32 = sizeof (sbd_stat32_t);
2202 if (ncm > 1)
2203 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
2204 pnstat = (pbsz - sizeof (sbd_stat32_t))/
2205 sizeof (sbd_dev_stat32_t);
2206 }
2207
2208 sz += sz32;
2209 #endif
2210 /*
2211 * Since one sbd_dev_stat_t is included in the sbd_stat_t,
2212 * increment the plugin's nstat count.
2213 */
2214 ++pnstat;
2215
2216 if (bp->b_id == 0) {
2217 bzero(&pstat, sizeof (pstat));
2218 } else {
2219 sbd_error_t *err;
2220
2221 err = drmach_status(bp->b_id, &pstat);
2222 if (err) {
2223 DRERR_SET_C(&hp->h_err, &err);
2224 rv = EIO;
2225 goto status_done;
2226 }
2227 }
2228
2229 dstatp = (sbd_stat_t *)(void *)GETSTRUCT(char, sz);
2230
2231 devstatp = &dstatp->s_stat[0];
2232
2233 dstatp->s_board = bp->b_num;
2234
2235 /*
2236 * Detect transitions between empty and disconnected.
2237 */
2238 if (!pstat.empty && (bp->b_rstate == SBD_STAT_EMPTY))
2239 bp->b_rstate = SBD_STAT_DISCONNECTED;
2240 else if (pstat.empty && (bp->b_rstate == SBD_STAT_DISCONNECTED))
2241 bp->b_rstate = SBD_STAT_EMPTY;
2242
2243 dstatp->s_rstate = bp->b_rstate;
2244 dstatp->s_ostate = bp->b_ostate;
2245 dstatp->s_cond = bp->b_cond = pstat.cond;
2246 dstatp->s_busy = bp->b_busy | pstat.busy;
2247 dstatp->s_time = bp->b_time;
2248 dstatp->s_power = pstat.powered;
2249 dstatp->s_assigned = bp->b_assigned = pstat.assigned;
2250 dstatp->s_nstat = nstat = 0;
2251 bcopy(&pstat.type[0], &dstatp->s_type[0], SBD_TYPE_LEN);
2252 bcopy(&pstat.info[0], &dstatp->s_info[0], SBD_MAX_INFO);
2253
2254 devset &= DR_DEVS_PRESENT(bp);
2255 if (devset == 0) {
2256 /*
2257 * No device chosen.
2258 */
2259 PR_ALL("%s: no device present\n", f);
2260 }
2261
2262 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
2263 if ((nstat = dr_cpu_status(hp, devset, devstatp)) > 0) {
2264 dstatp->s_nstat += nstat;
2265 devstatp += nstat;
2266 }
2267
2268 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
2269 if ((nstat = dr_mem_status(hp, devset, devstatp)) > 0) {
2270 dstatp->s_nstat += nstat;
2271 devstatp += nstat;
2272 }
2273
2274 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
2275 if ((nstat = dr_io_status(hp, devset, devstatp)) > 0) {
2276 dstatp->s_nstat += nstat;
2277 devstatp += nstat;
2278 }
2279
2280 /*
2281 * Due to a possible change in number of components between
2282 * the time of plugin's GETNCM call and now, there may be
2283 * more or less components than the plugin's buffer can
2284 * hold. Adjust s_nstat accordingly.
2285 */
2286
2287 dstatp->s_nstat = dstatp->s_nstat > pnstat ? pnstat : dstatp->s_nstat;
2288
2289 #ifdef _MULTI_DATAMODEL
2290 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
2291 int i, j;
2292 sbd_stat32_t *dstat32p;
2293
2294 dstat32p = (sbd_stat32_t *)devstatp;
2295
2296 /* Alignment Paranoia */
2297 if ((ulong_t)dstat32p & 0x1) {
2298 PR_ALL("%s: alignment: sz=0x%lx dstat32p=0x%p\n",
2299 f, sizeof (sbd_stat32_t), (void *)dstat32p);
2300 DR_OP_INTERNAL_ERROR(hp);
2301 rv = EINVAL;
2302 goto status_done;
2303 }
2304
2305 /* paranoia: detect buffer overrun */
2306 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
2307 ((caddr_t)dstatp) + sz) {
2308 DR_OP_INTERNAL_ERROR(hp);
2309 rv = EINVAL;
2310 goto status_done;
2311 }
2312
2313 /* copy sbd_stat_t structure members */
2314 #define _SBD_STAT(t, m) dstat32p->m = (t)dstatp->m
2315 _SBD_STAT(int32_t, s_board);
2316 _SBD_STAT(int32_t, s_rstate);
2317 _SBD_STAT(int32_t, s_ostate);
2318 _SBD_STAT(int32_t, s_cond);
2319 _SBD_STAT(int32_t, s_busy);
2320 _SBD_STAT(time32_t, s_time);
2321 _SBD_STAT(uint32_t, s_power);
2322 _SBD_STAT(uint32_t, s_assigned);
2323 _SBD_STAT(int32_t, s_nstat);
2324 bcopy(&dstatp->s_type[0], &dstat32p->s_type[0],
2325 SBD_TYPE_LEN);
2326 bcopy(&dstatp->s_info[0], &dstat32p->s_info[0],
2327 SBD_MAX_INFO);
2328 #undef _SBD_STAT
2329
2330 for (i = 0; i < dstatp->s_nstat; i++) {
2331 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
2332 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
2333 #define _SBD_DEV_STAT(t, m) ds32p->m = (t)dsp->m
2334
2335 /* copy sbd_cm_stat_t structure members */
2336 _SBD_DEV_STAT(int32_t, ds_type);
2337 _SBD_DEV_STAT(int32_t, ds_unit);
2338 _SBD_DEV_STAT(int32_t, ds_ostate);
2339 _SBD_DEV_STAT(int32_t, ds_cond);
2340 _SBD_DEV_STAT(int32_t, ds_busy);
2341 _SBD_DEV_STAT(int32_t, ds_suspend);
2342 _SBD_DEV_STAT(time32_t, ds_time);
2343 bcopy(&dsp->ds_name[0], &ds32p->ds_name[0],
2344 OBP_MAXPROPNAME);
2345
2346 switch (dsp->ds_type) {
2347 case SBD_COMP_CPU:
2348 /* copy sbd_cpu_stat_t structure members */
2349 _SBD_DEV_STAT(int32_t, d_cpu.cs_isbootproc);
2350 _SBD_DEV_STAT(int32_t, d_cpu.cs_cpuid);
2351 _SBD_DEV_STAT(int32_t, d_cpu.cs_speed);
2352 _SBD_DEV_STAT(int32_t, d_cpu.cs_ecache);
2353 break;
2354
2355 case SBD_COMP_MEM:
2356 /* copy sbd_mem_stat_t structure members */
2357 _SBD_DEV_STAT(int32_t, d_mem.ms_interleave);
2358 _SBD_DEV_STAT(uint32_t, d_mem.ms_basepfn);
2359 _SBD_DEV_STAT(uint32_t, d_mem.ms_totpages);
2360 _SBD_DEV_STAT(uint32_t, d_mem.ms_detpages);
2361 _SBD_DEV_STAT(int32_t, d_mem.ms_pageslost);
2362 _SBD_DEV_STAT(uint32_t, d_mem.ms_managed_pages);
2363 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_pages);
2364 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_first);
2365 _SBD_DEV_STAT(uint32_t, d_mem.ms_noreloc_last);
2366 _SBD_DEV_STAT(int32_t, d_mem.ms_cage_enabled);
2367 _SBD_DEV_STAT(int32_t, d_mem.ms_peer_is_target);
2368 bcopy(&dsp->d_mem.ms_peer_ap_id[0],
2369 &ds32p->d_mem.ms_peer_ap_id[0],
2370 sizeof (ds32p->d_mem.ms_peer_ap_id));
2371 break;
2372
2373 case SBD_COMP_IO:
2374 /* copy sbd_io_stat_t structure members */
2375 _SBD_DEV_STAT(int32_t, d_io.is_referenced);
2376 _SBD_DEV_STAT(int32_t, d_io.is_unsafe_count);
2377
2378 for (j = 0; j < SBD_MAX_UNSAFE; j++)
2379 _SBD_DEV_STAT(int32_t,
2380 d_io.is_unsafe_list[j]);
2381
2382 bcopy(&dsp->d_io.is_pathname[0],
2383 &ds32p->d_io.is_pathname[0], MAXPATHLEN);
2384 break;
2385
2386 case SBD_COMP_CMP:
2387 /* copy sbd_cmp_stat_t structure members */
2388 bcopy(&dsp->d_cmp.ps_cpuid[0],
2389 &ds32p->d_cmp.ps_cpuid[0],
2390 sizeof (ds32p->d_cmp.ps_cpuid));
2391 _SBD_DEV_STAT(int32_t, d_cmp.ps_ncores);
2392 _SBD_DEV_STAT(int32_t, d_cmp.ps_speed);
2393 _SBD_DEV_STAT(int32_t, d_cmp.ps_ecache);
2394 break;
2395
2396 default:
2397 cmn_err(CE_WARN, "%s: unknown dev type (%d)",
2398 f, (int)dsp->ds_type);
2399 rv = EFAULT;
2400 goto status_done;
2401 }
2402 #undef _SBD_DEV_STAT
2403 }
2404
2405
2406 if (ddi_copyout((void *)dstat32p,
2407 hp->h_sbdcmd.cmd_stat.s_statp, pbsz, mode) != 0) {
2408 cmn_err(CE_WARN,
2409 "%s: failed to copyout status "
2410 "for board %d", f, bp->b_num);
2411 rv = EFAULT;
2412 goto status_done;
2413 }
2414 } else
2415 #endif /* _MULTI_DATAMODEL */
2416
2417 if (ddi_copyout((void *)dstatp, hp->h_sbdcmd.cmd_stat.s_statp,
2418 pbsz, mode) != 0) {
2419 cmn_err(CE_WARN,
2420 "%s: failed to copyout status for board %d",
2421 f, bp->b_num);
2422 rv = EFAULT;
2423 goto status_done;
2424 }
2425
2426 status_done:
2427 if (dstatp != NULL)
2428 FREESTRUCT(dstatp, char, sz);
2429
2430 dr_unlock_status(bp);
2431
2432 return (rv);
2433 }
2434
2435 static int
dr_get_ncm(dr_handle_t * hp)2436 dr_get_ncm(dr_handle_t *hp)
2437 {
2438 int i;
2439 int ncm = 0;
2440 dr_devset_t devset;
2441
2442 devset = DR_DEVS_PRESENT(hp->h_bd);
2443 if (hp->h_sbdcmd.cmd_cm.c_id.c_type != SBD_COMP_NONE)
2444 devset &= DEVSET(hp->h_sbdcmd.cmd_cm.c_id.c_type,
2445 DEVSET_ANYUNIT);
2446
2447 /*
2448 * Handle CPUs first to deal with possible CMP
2449 * devices. If the CPU is a CMP, we need to only
2450 * increment ncm once even if there are multiple
2451 * cores for that CMP present in the devset.
2452 */
2453 for (i = 0; i < MAX_CMP_UNITS_PER_BOARD; i++) {
2454 if (devset & DEVSET(SBD_COMP_CMP, i)) {
2455 ncm++;
2456 }
2457 }
2458
2459 /* eliminate the CPU information from the devset */
2460 devset &= ~(DEVSET(SBD_COMP_CMP, DEVSET_ANYUNIT));
2461
2462 for (i = 0; i < (sizeof (dr_devset_t) * 8); i++) {
2463 ncm += devset & 0x1;
2464 devset >>= 1;
2465 }
2466
2467 return (ncm);
2468 }
2469
2470 /* used by dr_mem.c */
2471 /* TODO: eliminate dr_boardlist */
2472 dr_board_t *
dr_lookup_board(int board_num)2473 dr_lookup_board(int board_num)
2474 {
2475 dr_board_t *bp;
2476
2477 ASSERT(board_num >= 0 && board_num < MAX_BOARDS);
2478
2479 bp = &dr_boardlist[board_num];
2480 ASSERT(bp->b_num == board_num);
2481
2482 return (bp);
2483 }
2484
2485 static dr_dev_unit_t *
dr_get_dev_unit(dr_board_t * bp,sbd_comp_type_t nt,int unit_num)2486 dr_get_dev_unit(dr_board_t *bp, sbd_comp_type_t nt, int unit_num)
2487 {
2488 dr_dev_unit_t *dp;
2489
2490 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unit_num);
2491 ASSERT(dp->du_common.sbdev_bp == bp);
2492 ASSERT(dp->du_common.sbdev_unum == unit_num);
2493 ASSERT(dp->du_common.sbdev_type == nt);
2494
2495 return (dp);
2496 }
2497
2498 dr_cpu_unit_t *
dr_get_cpu_unit(dr_board_t * bp,int unit_num)2499 dr_get_cpu_unit(dr_board_t *bp, int unit_num)
2500 {
2501 dr_dev_unit_t *dp;
2502
2503 ASSERT(unit_num >= 0 && unit_num < MAX_CPU_UNITS_PER_BOARD);
2504
2505 dp = dr_get_dev_unit(bp, SBD_COMP_CPU, unit_num);
2506 return (&dp->du_cpu);
2507 }
2508
2509 dr_mem_unit_t *
dr_get_mem_unit(dr_board_t * bp,int unit_num)2510 dr_get_mem_unit(dr_board_t *bp, int unit_num)
2511 {
2512 dr_dev_unit_t *dp;
2513
2514 ASSERT(unit_num >= 0 && unit_num < MAX_MEM_UNITS_PER_BOARD);
2515
2516 dp = dr_get_dev_unit(bp, SBD_COMP_MEM, unit_num);
2517 return (&dp->du_mem);
2518 }
2519
2520 dr_io_unit_t *
dr_get_io_unit(dr_board_t * bp,int unit_num)2521 dr_get_io_unit(dr_board_t *bp, int unit_num)
2522 {
2523 dr_dev_unit_t *dp;
2524
2525 ASSERT(unit_num >= 0 && unit_num < MAX_IO_UNITS_PER_BOARD);
2526
2527 dp = dr_get_dev_unit(bp, SBD_COMP_IO, unit_num);
2528 return (&dp->du_io);
2529 }
2530
2531 dr_common_unit_t *
dr_get_common_unit(dr_board_t * bp,sbd_comp_type_t nt,int unum)2532 dr_get_common_unit(dr_board_t *bp, sbd_comp_type_t nt, int unum)
2533 {
2534 dr_dev_unit_t *dp;
2535
2536 dp = dr_get_dev_unit(bp, nt, unum);
2537 return (&dp->du_common);
2538 }
2539
2540 static dr_devset_t
dr_dev2devset(sbd_comp_id_t * cid)2541 dr_dev2devset(sbd_comp_id_t *cid)
2542 {
2543 static fn_t f = "dr_dev2devset";
2544
2545 dr_devset_t devset;
2546 int unit = cid->c_unit;
2547
2548 switch (cid->c_type) {
2549 case SBD_COMP_NONE:
2550 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
2551 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
2552 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
2553 PR_ALL("%s: COMP_NONE devset = " DEVSET_FMT_STR "\n",
2554 f, DEVSET_FMT_ARG(devset));
2555 break;
2556
2557 case SBD_COMP_CPU:
2558 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
2559 cmn_err(CE_WARN,
2560 "%s: invalid cpu unit# = %d",
2561 f, unit);
2562 devset = 0;
2563 } else {
2564 /*
2565 * Generate a devset that includes all the
2566 * cores of a CMP device. If this is not a
2567 * CMP, the extra cores will be eliminated
2568 * later since they are not present. This is
2569 * also true for CMP devices that do not have
2570 * all cores active.
2571 */
2572 devset = DEVSET(SBD_COMP_CMP, unit);
2573 }
2574
2575 PR_ALL("%s: CPU devset = " DEVSET_FMT_STR "\n",
2576 f, DEVSET_FMT_ARG(devset));
2577 break;
2578
2579 case SBD_COMP_MEM:
2580 if (unit == SBD_NULL_UNIT) {
2581 unit = 0;
2582 cid->c_unit = 0;
2583 }
2584
2585 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
2586 cmn_err(CE_WARN,
2587 "%s: invalid mem unit# = %d",
2588 f, unit);
2589 devset = 0;
2590 } else
2591 devset = DEVSET(cid->c_type, unit);
2592
2593 PR_ALL("%s: MEM devset = " DEVSET_FMT_STR "\n",
2594 f, DEVSET_FMT_ARG(devset));
2595 break;
2596
2597 case SBD_COMP_IO:
2598 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
2599 cmn_err(CE_WARN,
2600 "%s: invalid io unit# = %d",
2601 f, unit);
2602 devset = 0;
2603 } else
2604 devset = DEVSET(cid->c_type, unit);
2605
2606 PR_ALL("%s: IO devset = " DEVSET_FMT_STR "\n",
2607 f, DEVSET_FMT_ARG(devset));
2608 break;
2609
2610 default:
2611 case SBD_COMP_UNKNOWN:
2612 devset = 0;
2613 break;
2614 }
2615
2616 return (devset);
2617 }
2618
2619 /*
2620 * Converts a dynamic attachment point name to a SBD_COMP_* type.
2621 * Returns SDB_COMP_UNKNOWN if name is not recognized.
2622 */
2623 static int
dr_dev_type_to_nt(char * type)2624 dr_dev_type_to_nt(char *type)
2625 {
2626 int i;
2627
2628 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2629 if (strcmp(dr_devattr[i].s_devtype, type) == 0)
2630 break;
2631
2632 return (dr_devattr[i].s_nodetype);
2633 }
2634
2635 /*
2636 * Converts a SBD_COMP_* type to a dynamic attachment point name.
2637 * Return NULL if SBD_COMP_ type is not recognized.
2638 */
2639 char *
dr_nt_to_dev_type(int nt)2640 dr_nt_to_dev_type(int nt)
2641 {
2642 int i;
2643
2644 for (i = 0; dr_devattr[i].s_nodetype != SBD_COMP_UNKNOWN; i++)
2645 if (dr_devattr[i].s_nodetype == nt)
2646 break;
2647
2648 return (dr_devattr[i].s_devtype);
2649 }
2650
2651 /*
2652 * State transition policy is that if there is some component for which
2653 * the state transition is valid, then let it through. The exception is
2654 * SBD_CMD_DISCONNECT. On disconnect, the state transition must be valid
2655 * for ALL components.
2656 * Returns the state that is in error, if any.
2657 */
2658 static int
dr_check_transition(dr_board_t * bp,dr_devset_t * devsetp,struct dr_state_trans * transp,int cmd)2659 dr_check_transition(dr_board_t *bp, dr_devset_t *devsetp,
2660 struct dr_state_trans *transp, int cmd)
2661 {
2662 int s, ut;
2663 int state_err = 0;
2664 dr_devset_t devset;
2665 dr_common_unit_t *cp;
2666 static fn_t f = "dr_check_transition";
2667
2668 devset = *devsetp;
2669
2670 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2671 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
2672 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
2673 continue;
2674
2675 cp = dr_get_common_unit(bp, SBD_COMP_CPU, ut);
2676 s = (int)cp->sbdev_state;
2677 if (!DR_DEV_IS_PRESENT(cp)) {
2678 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2679 } else {
2680 if (transp->x_op[s].x_rv) {
2681 if (!state_err)
2682 state_err = s;
2683 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
2684 }
2685 }
2686 }
2687 }
2688 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2689 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
2690 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
2691 continue;
2692
2693 cp = dr_get_common_unit(bp, SBD_COMP_MEM, ut);
2694 s = (int)cp->sbdev_state;
2695 if (!DR_DEV_IS_PRESENT(cp)) {
2696 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2697 } else {
2698 if (transp->x_op[s].x_rv) {
2699 if (!state_err)
2700 state_err = s;
2701 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
2702 }
2703 }
2704 }
2705 }
2706 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2707 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
2708 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
2709 continue;
2710
2711 cp = dr_get_common_unit(bp, SBD_COMP_IO, ut);
2712 s = (int)cp->sbdev_state;
2713 if (!DR_DEV_IS_PRESENT(cp)) {
2714 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2715 } else {
2716 if (transp->x_op[s].x_rv) {
2717 if (!state_err)
2718 state_err = s;
2719 DEVSET_DEL(devset, SBD_COMP_IO, ut);
2720 }
2721 }
2722 }
2723 }
2724
2725 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
2726 f, (uint_t)*devsetp, (uint_t)devset);
2727
2728 *devsetp = devset;
2729 /*
2730 * If there are some remaining components for which
2731 * this state transition is valid, then allow them
2732 * through, otherwise if none are left then return
2733 * the state error. The exception is SBD_CMD_DISCONNECT.
2734 * On disconnect, the state transition must be valid for ALL
2735 * components.
2736 */
2737 if (cmd == SBD_CMD_DISCONNECT)
2738 return (state_err);
2739 return (devset ? 0 : state_err);
2740 }
2741
2742 void
dr_device_transition(dr_common_unit_t * cp,dr_state_t st)2743 dr_device_transition(dr_common_unit_t *cp, dr_state_t st)
2744 {
2745 PR_STATE("%s STATE %s(%d) -> %s(%d)\n",
2746 cp->sbdev_path,
2747 state_str[cp->sbdev_state], cp->sbdev_state,
2748 state_str[st], st);
2749
2750 cp->sbdev_state = st;
2751 if (st == DR_STATE_CONFIGURED) {
2752 cp->sbdev_ostate = SBD_STAT_CONFIGURED;
2753 if (cp->sbdev_bp->b_ostate != SBD_STAT_CONFIGURED) {
2754 cp->sbdev_bp->b_ostate = SBD_STAT_CONFIGURED;
2755 (void) drv_getparm(TIME,
2756 (void *) &cp->sbdev_bp->b_time);
2757 }
2758 } else
2759 cp->sbdev_ostate = SBD_STAT_UNCONFIGURED;
2760
2761 (void) drv_getparm(TIME, (void *) &cp->sbdev_time);
2762 }
2763
2764 static void
dr_board_transition(dr_board_t * bp,dr_state_t st)2765 dr_board_transition(dr_board_t *bp, dr_state_t st)
2766 {
2767 PR_STATE("BOARD %d STATE: %s(%d) -> %s(%d)\n",
2768 bp->b_num,
2769 state_str[bp->b_state], bp->b_state,
2770 state_str[st], st);
2771
2772 bp->b_state = st;
2773 }
2774
2775 void
dr_op_err(int ce,dr_handle_t * hp,int code,char * fmt,...)2776 dr_op_err(int ce, dr_handle_t *hp, int code, char *fmt, ...)
2777 {
2778 sbd_error_t *err;
2779 va_list args;
2780
2781 va_start(args, fmt);
2782 err = drerr_new_v(code, fmt, args);
2783 va_end(args);
2784
2785 if (ce != CE_IGNORE)
2786 sbd_err_log(err, ce);
2787
2788 DRERR_SET_C(&hp->h_err, &err);
2789 }
2790
2791 void
dr_dev_err(int ce,dr_common_unit_t * cp,int code)2792 dr_dev_err(int ce, dr_common_unit_t *cp, int code)
2793 {
2794 sbd_error_t *err;
2795
2796 err = drerr_new(0, code, cp->sbdev_path, NULL);
2797
2798 if (ce != CE_IGNORE)
2799 sbd_err_log(err, ce);
2800
2801 DRERR_SET_C(&cp->sbdev_error, &err);
2802 }
2803
2804 /*
2805 * A callback routine. Called from the drmach layer as a result of
2806 * call to drmach_board_find_devices from dr_init_devlists.
2807 */
2808 static sbd_error_t *
dr_dev_found(void * data,const char * name,int unum,drmachid_t id)2809 dr_dev_found(void *data, const char *name, int unum, drmachid_t id)
2810 {
2811 dr_board_t *bp = data;
2812 dr_dev_unit_t *dp;
2813 int nt;
2814 static fn_t f = "dr_dev_found";
2815
2816 PR_ALL("%s (board = %d, name = %s, unum = %d, id = %p)...\n",
2817 f, bp->b_num, name, unum, id);
2818
2819 nt = dr_dev_type_to_nt((char *)name);
2820 if (nt == SBD_COMP_UNKNOWN) {
2821 /*
2822 * this should not happen. When it does, it indicates
2823 * a missmatch in devices supported by the drmach layer
2824 * vs devices supported by this layer.
2825 */
2826 return (DR_INTERNAL_ERROR());
2827 }
2828
2829 dp = DR_GET_BOARD_DEVUNIT(bp, nt, unum);
2830
2831 /* sanity check */
2832 ASSERT(dp->du_common.sbdev_bp == bp);
2833 ASSERT(dp->du_common.sbdev_unum == unum);
2834 ASSERT(dp->du_common.sbdev_type == nt);
2835
2836 /* render dynamic attachment point path of this unit */
2837 (void) snprintf(dp->du_common.sbdev_path,
2838 sizeof (dp->du_common.sbdev_path), "%s::%s%d",
2839 bp->b_path, name, DR_UNUM2SBD_UNUM(unum, nt));
2840
2841 dp->du_common.sbdev_id = id;
2842 DR_DEV_SET_PRESENT(&dp->du_common);
2843
2844 bp->b_ndev++;
2845
2846 return (NULL);
2847 }
2848
2849 static sbd_error_t *
dr_init_devlists(dr_board_t * bp)2850 dr_init_devlists(dr_board_t *bp)
2851 {
2852 int i;
2853 sbd_error_t *err;
2854 dr_dev_unit_t *dp;
2855 static fn_t f = "dr_init_devlists";
2856
2857 PR_ALL("%s (%s)...\n", f, bp->b_path);
2858
2859 /* sanity check */
2860 ASSERT(bp->b_ndev == 0);
2861
2862 DR_DEVS_DISCONNECT(bp, (uint_t)-1);
2863
2864 /*
2865 * This routine builds the board's devlist and initializes
2866 * the common portion of the unit data structures.
2867 * Note: because the common portion is considered
2868 * uninitialized, the dr_get_*_unit() routines can not
2869 * be used.
2870 */
2871
2872 /*
2873 * Clear out old entries, if any.
2874 */
2875 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2876 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_CPU, i);
2877
2878 bzero(dp, sizeof (*dp));
2879 dp->du_common.sbdev_bp = bp;
2880 dp->du_common.sbdev_unum = i;
2881 dp->du_common.sbdev_type = SBD_COMP_CPU;
2882 }
2883
2884 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2885 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_MEM, i);
2886
2887 bzero(dp, sizeof (*dp));
2888 dp->du_common.sbdev_bp = bp;
2889 dp->du_common.sbdev_unum = i;
2890 dp->du_common.sbdev_type = SBD_COMP_MEM;
2891 }
2892
2893 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2894 dp = DR_GET_BOARD_DEVUNIT(bp, SBD_COMP_IO, i);
2895
2896 bzero(dp, sizeof (*dp));
2897 dp->du_common.sbdev_bp = bp;
2898 dp->du_common.sbdev_unum = i;
2899 dp->du_common.sbdev_type = SBD_COMP_IO;
2900 }
2901
2902 err = NULL;
2903 if (bp->b_id) {
2904 /* find devices on this board */
2905 err = drmach_board_find_devices(
2906 bp->b_id, bp, dr_dev_found);
2907 }
2908
2909 return (err);
2910 }
2911
2912 /*
2913 * Return the unit number of the respective drmachid if
2914 * it's found to be attached.
2915 */
2916 static int
dr_check_unit_attached(dr_common_unit_t * cp)2917 dr_check_unit_attached(dr_common_unit_t *cp)
2918 {
2919 int rv = 0;
2920 processorid_t cpuid;
2921 uint64_t basepa, endpa;
2922 struct memlist *ml;
2923 extern struct memlist *phys_install;
2924 sbd_error_t *err;
2925 int yes;
2926 static fn_t f = "dr_check_unit_attached";
2927
2928 switch (cp->sbdev_type) {
2929 case SBD_COMP_CPU:
2930 err = drmach_cpu_get_id(cp->sbdev_id, &cpuid);
2931 if (err) {
2932 DRERR_SET_C(&cp->sbdev_error, &err);
2933 rv = -1;
2934 break;
2935 }
2936 mutex_enter(&cpu_lock);
2937 if (cpu_get(cpuid) == NULL)
2938 rv = -1;
2939 mutex_exit(&cpu_lock);
2940 break;
2941
2942 case SBD_COMP_MEM:
2943 err = drmach_mem_get_slice_info(cp->sbdev_id,
2944 &basepa, &endpa, NULL);
2945 if (err) {
2946 DRERR_SET_C(&cp->sbdev_error, &err);
2947 rv = -1;
2948 break;
2949 }
2950
2951 /*
2952 * Check if base address is in phys_install.
2953 */
2954 memlist_read_lock();
2955 for (ml = phys_install; ml; ml = ml->ml_next)
2956 if ((endpa <= ml->ml_address) ||
2957 (basepa >= (ml->ml_address + ml->ml_size)))
2958 continue;
2959 else
2960 break;
2961 memlist_read_unlock();
2962 if (ml == NULL)
2963 rv = -1;
2964 break;
2965
2966 case SBD_COMP_IO:
2967 err = drmach_io_is_attached(cp->sbdev_id, &yes);
2968 if (err) {
2969 DRERR_SET_C(&cp->sbdev_error, &err);
2970 rv = -1;
2971 break;
2972 } else if (!yes)
2973 rv = -1;
2974 break;
2975
2976 default:
2977 PR_ALL("%s: unexpected nodetype(%d) for id 0x%p\n",
2978 f, cp->sbdev_type, cp->sbdev_id);
2979 rv = -1;
2980 break;
2981 }
2982
2983 return (rv);
2984 }
2985
2986 /*
2987 * See if drmach recognizes the passthru command. DRMACH expects the
2988 * id to identify the thing to which the command is being applied. Using
2989 * nonsense SBD terms, that information has been perversely encoded in the
2990 * c_id member of the sbd_cmd_t structure. This logic reads those tea
2991 * leaves, finds the associated drmach id, then calls drmach to process
2992 * the passthru command.
2993 */
2994 static int
dr_pt_try_drmach(dr_handle_t * hp)2995 dr_pt_try_drmach(dr_handle_t *hp)
2996 {
2997 dr_board_t *bp = hp->h_bd;
2998 sbd_comp_id_t *comp_id = &hp->h_sbdcmd.cmd_cm.c_id;
2999 drmachid_t id;
3000
3001 if (comp_id->c_type == SBD_COMP_NONE) {
3002 id = bp->b_id;
3003 } else {
3004 sbd_comp_type_t nt;
3005
3006 nt = dr_dev_type_to_nt(comp_id->c_name);
3007 if (nt == SBD_COMP_UNKNOWN) {
3008 dr_op_err(CE_IGNORE, hp, ESBD_INVAL, comp_id->c_name);
3009 id = 0;
3010 } else {
3011 /* pt command applied to dynamic attachment point */
3012 dr_common_unit_t *cp;
3013 cp = dr_get_common_unit(bp, nt, comp_id->c_unit);
3014 id = cp->sbdev_id;
3015 }
3016 }
3017
3018 if (hp->h_err == NULL)
3019 hp->h_err = drmach_passthru(id, &hp->h_opts);
3020
3021 return (hp->h_err == NULL ? 0 : -1);
3022 }
3023
3024 static int
dr_pt_ioctl(dr_handle_t * hp)3025 dr_pt_ioctl(dr_handle_t *hp)
3026 {
3027 int cmd, rv, len;
3028 int32_t sz;
3029 int found;
3030 char *copts;
3031 static fn_t f = "dr_pt_ioctl";
3032
3033 PR_ALL("%s...\n", f);
3034
3035 sz = hp->h_opts.size;
3036 copts = hp->h_opts.copts;
3037
3038 if (sz == 0 || copts == (char *)NULL) {
3039 cmn_err(CE_WARN, "%s: invalid passthru args", f);
3040 return (EINVAL);
3041 }
3042
3043 found = 0;
3044 for (cmd = 0; cmd < (sizeof (pt_arr) / sizeof (pt_arr[0])); cmd++) {
3045 len = strlen(pt_arr[cmd].pt_name);
3046 found = (strncmp(pt_arr[cmd].pt_name, copts, len) == 0);
3047 if (found)
3048 break;
3049 }
3050
3051 if (found)
3052 rv = (*pt_arr[cmd].pt_func)(hp);
3053 else
3054 rv = dr_pt_try_drmach(hp);
3055
3056 return (rv);
3057 }
3058
3059 /*
3060 * Called at driver load time to determine the state and condition
3061 * of an existing board in the system.
3062 */
3063 static void
dr_board_discovery(dr_board_t * bp)3064 dr_board_discovery(dr_board_t *bp)
3065 {
3066 int i;
3067 dr_devset_t devs_lost, devs_attached = 0;
3068 dr_cpu_unit_t *cp;
3069 dr_mem_unit_t *mp;
3070 dr_io_unit_t *ip;
3071 static fn_t f = "dr_board_discovery";
3072
3073 if (DR_DEVS_PRESENT(bp) == 0) {
3074 PR_ALL("%s: board %d has no devices present\n",
3075 f, bp->b_num);
3076 return;
3077 }
3078
3079 /*
3080 * Check for existence of cpus.
3081 */
3082 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
3083 cp = dr_get_cpu_unit(bp, i);
3084
3085 if (!DR_DEV_IS_PRESENT(&cp->sbc_cm))
3086 continue;
3087
3088 if (dr_check_unit_attached(&cp->sbc_cm) >= 0) {
3089 DR_DEV_SET_ATTACHED(&cp->sbc_cm);
3090 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
3091 PR_ALL("%s: board %d, cpu-unit %d - attached\n",
3092 f, bp->b_num, i);
3093 }
3094 dr_init_cpu_unit(cp);
3095 }
3096
3097 /*
3098 * Check for existence of memory.
3099 */
3100 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3101 mp = dr_get_mem_unit(bp, i);
3102
3103 if (!DR_DEV_IS_PRESENT(&mp->sbm_cm))
3104 continue;
3105
3106 if (dr_check_unit_attached(&mp->sbm_cm) >= 0) {
3107 DR_DEV_SET_ATTACHED(&mp->sbm_cm);
3108 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
3109 PR_ALL("%s: board %d, mem-unit %d - attached\n",
3110 f, bp->b_num, i);
3111 }
3112 dr_init_mem_unit(mp);
3113 }
3114
3115 /*
3116 * Check for i/o state.
3117 */
3118 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
3119 ip = dr_get_io_unit(bp, i);
3120
3121 if (!DR_DEV_IS_PRESENT(&ip->sbi_cm))
3122 continue;
3123
3124 if (dr_check_unit_attached(&ip->sbi_cm) >= 0) {
3125 /*
3126 * Found it!
3127 */
3128 DR_DEV_SET_ATTACHED(&ip->sbi_cm);
3129 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
3130 PR_ALL("%s: board %d, io-unit %d - attached\n",
3131 f, bp->b_num, i);
3132 }
3133 dr_init_io_unit(ip);
3134 }
3135
3136 DR_DEVS_CONFIGURE(bp, devs_attached);
3137 if (devs_attached && ((devs_lost = DR_DEVS_UNATTACHED(bp)) != 0)) {
3138 int ut;
3139
3140 /*
3141 * It is not legal on board discovery to have a
3142 * board that is only partially attached. A board
3143 * is either all attached or all connected. If a
3144 * board has at least one attached device, then
3145 * the the remaining devices, if any, must have
3146 * been lost or disconnected. These devices can
3147 * only be recovered by a full attach from scratch.
3148 * Note that devices previously in the unreferenced
3149 * state are subsequently lost until the next full
3150 * attach. This is necessary since the driver unload
3151 * that must have occurred would have wiped out the
3152 * information necessary to re-configure the device
3153 * back online, e.g. memlist.
3154 */
3155 PR_ALL("%s: some devices LOST (" DEVSET_FMT_STR ")...\n",
3156 f, DEVSET_FMT_ARG(devs_lost));
3157
3158 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
3159 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut))
3160 continue;
3161
3162 cp = dr_get_cpu_unit(bp, ut);
3163 dr_device_transition(&cp->sbc_cm, DR_STATE_EMPTY);
3164 }
3165
3166 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
3167 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut))
3168 continue;
3169
3170 mp = dr_get_mem_unit(bp, ut);
3171 dr_device_transition(&mp->sbm_cm, DR_STATE_EMPTY);
3172 }
3173
3174 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
3175 if (!DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut))
3176 continue;
3177
3178 ip = dr_get_io_unit(bp, ut);
3179 dr_device_transition(&ip->sbi_cm, DR_STATE_EMPTY);
3180 }
3181
3182 DR_DEVS_DISCONNECT(bp, devs_lost);
3183 }
3184 }
3185
3186 static int
dr_board_init(dr_board_t * bp,dev_info_t * dip,int bd)3187 dr_board_init(dr_board_t *bp, dev_info_t *dip, int bd)
3188 {
3189 sbd_error_t *err;
3190
3191 mutex_init(&bp->b_lock, NULL, MUTEX_DRIVER, NULL);
3192 mutex_init(&bp->b_slock, NULL, MUTEX_DRIVER, NULL);
3193 cv_init(&bp->b_scv, NULL, CV_DRIVER, NULL);
3194 bp->b_rstate = SBD_STAT_EMPTY;
3195 bp->b_ostate = SBD_STAT_UNCONFIGURED;
3196 bp->b_cond = SBD_COND_UNKNOWN;
3197 (void) drv_getparm(TIME, (void *)&bp->b_time);
3198
3199 (void) drmach_board_lookup(bd, &bp->b_id);
3200 bp->b_num = bd;
3201 bp->b_dip = dip;
3202
3203 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = GETSTRUCT(dr_dev_unit_t,
3204 MAX_CPU_UNITS_PER_BOARD);
3205
3206 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = GETSTRUCT(dr_dev_unit_t,
3207 MAX_MEM_UNITS_PER_BOARD);
3208
3209 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = GETSTRUCT(dr_dev_unit_t,
3210 MAX_IO_UNITS_PER_BOARD);
3211
3212 /*
3213 * Initialize the devlists
3214 */
3215 err = dr_init_devlists(bp);
3216 if (err) {
3217 sbd_err_clear(&err);
3218 dr_board_destroy(bp);
3219 return (-1);
3220 } else if (bp->b_ndev == 0) {
3221 dr_board_transition(bp, DR_STATE_EMPTY);
3222 } else {
3223 /*
3224 * Couldn't have made it down here without
3225 * having found at least one device.
3226 */
3227 ASSERT(DR_DEVS_PRESENT(bp) != 0);
3228 /*
3229 * Check the state of any possible devices on the
3230 * board.
3231 */
3232 dr_board_discovery(bp);
3233
3234 bp->b_assigned = 1;
3235
3236 if (DR_DEVS_UNATTACHED(bp) == 0) {
3237 /*
3238 * The board has no unattached devices, therefore
3239 * by reason of insanity it must be configured!
3240 */
3241 dr_board_transition(bp, DR_STATE_CONFIGURED);
3242 bp->b_ostate = SBD_STAT_CONFIGURED;
3243 bp->b_rstate = SBD_STAT_CONNECTED;
3244 bp->b_cond = SBD_COND_OK;
3245 (void) drv_getparm(TIME, (void *)&bp->b_time);
3246 } else if (DR_DEVS_ATTACHED(bp)) {
3247 dr_board_transition(bp, DR_STATE_PARTIAL);
3248 bp->b_ostate = SBD_STAT_CONFIGURED;
3249 bp->b_rstate = SBD_STAT_CONNECTED;
3250 bp->b_cond = SBD_COND_OK;
3251 (void) drv_getparm(TIME, (void *)&bp->b_time);
3252 } else {
3253 dr_board_transition(bp, DR_STATE_CONNECTED);
3254 bp->b_rstate = SBD_STAT_CONNECTED;
3255 (void) drv_getparm(TIME, (void *)&bp->b_time);
3256 }
3257 }
3258
3259 return (0);
3260 }
3261
3262 static void
dr_board_destroy(dr_board_t * bp)3263 dr_board_destroy(dr_board_t *bp)
3264 {
3265 PR_ALL("dr_board_destroy: num %d, path %s\n",
3266 bp->b_num, bp->b_path);
3267
3268 dr_board_transition(bp, DR_STATE_EMPTY);
3269 bp->b_rstate = SBD_STAT_EMPTY;
3270 (void) drv_getparm(TIME, (void *)&bp->b_time);
3271
3272 /*
3273 * Free up MEM unit structs.
3274 */
3275 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)],
3276 dr_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
3277 bp->b_dev[DEVSET_NIX(SBD_COMP_MEM)] = NULL;
3278 /*
3279 * Free up CPU unit structs.
3280 */
3281 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)],
3282 dr_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
3283 bp->b_dev[DEVSET_NIX(SBD_COMP_CPU)] = NULL;
3284 /*
3285 * Free up IO unit structs.
3286 */
3287 FREESTRUCT(bp->b_dev[DEVSET_NIX(SBD_COMP_IO)],
3288 dr_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
3289 bp->b_dev[DEVSET_NIX(SBD_COMP_IO)] = NULL;
3290
3291 mutex_destroy(&bp->b_lock);
3292 mutex_destroy(&bp->b_slock);
3293 cv_destroy(&bp->b_scv);
3294
3295 /*
3296 * Reset the board structure to its initial state, otherwise it will
3297 * cause trouble on the next call to dr_board_init() for the same board.
3298 * dr_board_init() may be called multiple times for the same board
3299 * if DR driver fails to initialize some boards.
3300 */
3301 bzero(bp, sizeof (*bp));
3302 }
3303
3304 void
dr_lock_status(dr_board_t * bp)3305 dr_lock_status(dr_board_t *bp)
3306 {
3307 mutex_enter(&bp->b_slock);
3308 while (bp->b_sflags & DR_BSLOCK)
3309 cv_wait(&bp->b_scv, &bp->b_slock);
3310 bp->b_sflags |= DR_BSLOCK;
3311 mutex_exit(&bp->b_slock);
3312 }
3313
3314 void
dr_unlock_status(dr_board_t * bp)3315 dr_unlock_status(dr_board_t *bp)
3316 {
3317 mutex_enter(&bp->b_slock);
3318 bp->b_sflags &= ~DR_BSLOCK;
3319 cv_signal(&bp->b_scv);
3320 mutex_exit(&bp->b_slock);
3321 }
3322
3323 /*
3324 * Extract flags passed via ioctl.
3325 */
3326 int
dr_cmd_flags(dr_handle_t * hp)3327 dr_cmd_flags(dr_handle_t *hp)
3328 {
3329 return (hp->h_sbdcmd.cmd_cm.c_flags);
3330 }
3331