1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * safari system board DR module.
29 */
30
31 #include <sys/debug.h>
32 #include <sys/types.h>
33 #include <sys/errno.h>
34 #include <sys/cred.h>
35 #include <sys/dditypes.h>
36 #include <sys/devops.h>
37 #include <sys/modctl.h>
38 #include <sys/poll.h>
39 #include <sys/conf.h>
40 #include <sys/ddi.h>
41 #include <sys/sunddi.h>
42 #include <sys/sunndi.h>
43 #include <sys/ndi_impldefs.h>
44 #include <sys/stat.h>
45 #include <sys/kmem.h>
46 #include <sys/cpuvar.h>
47 #include <sys/mem_config.h>
48 #include <sys/mem_cage.h>
49
50 #include <sys/autoconf.h>
51 #include <sys/cmn_err.h>
52
53 #include <sys/ddi_impldefs.h>
54 #include <sys/machsystm.h>
55 #include <sys/param.h>
56
57 #include <sys/sbdpriv.h>
58 #include <sys/sbd_io.h>
59
60 /* start sbd includes */
61
62 #include <sys/systm.h>
63 #include <sys/sysmacros.h>
64 #include <sys/x_call.h>
65 #include <sys/membar.h>
66 #include <vm/seg_kmem.h>
67
68 extern int nulldev();
69 extern int nodev();
70
71 typedef struct { /* arg to sbd_get_handle */
72 dev_t dev;
73 int cmd;
74 int mode;
75 sbd_ioctl_arg_t *ioargp;
76 } sbd_init_arg_t;
77
78
79 /*
80 * sbd support operations.
81 */
82 static void sbd_exec_op(sbd_handle_t *hp);
83 static void sbd_dev_configure(sbd_handle_t *hp);
84 static int sbd_dev_release(sbd_handle_t *hp);
85 static int sbd_dev_unconfigure(sbd_handle_t *hp);
86 static void sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep,
87 dev_info_t *dip, int unit);
88 static void sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep,
89 dev_info_t *dip, int unit);
90 static int sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit);
91 static void sbd_cancel(sbd_handle_t *hp);
92 void sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip);
93 int sbd_dealloc_instance(sbd_board_t *sbp, int max_boards);
94 int sbd_errno2ecode(int error);
95 #pragma weak sbdp_cpu_get_impl
96
97 #ifdef DEBUG
98 uint_t sbd_debug = (uint_t)0x0;
99
100 #ifdef SBD_DEBUG_ERRS
101 /* controls which errors are injected */
102 uint_t sbd_err_debug = (uint_t)0x0;
103
104 /* controls printing about error injection */
105 uint_t sbd_print_errs = (uint_t)0x0;
106
107 #endif /* SBD_DEBUG_ERRS */
108
109 #endif /* DEBUG */
110
111 char *sbd_state_str[] = {
112 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
113 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
114 "FATAL"
115 };
116
117 /* Note: this must be changed in tandem with sbd_ioctl.h */
118 char *sbd_ct_str[] = {
119 "NONE", "CPU", "MEM", "IO", "UNKNOWN"
120 };
121
122 /* Note: this must also be changed in tandem with sbd_ioctl.h */
123 #define SBD_CMD_STR(c) \
124 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
125 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
126 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
127 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
128 ((c) == SBD_CMD_TEST) ? "TEST" : \
129 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
130 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
131 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
132 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
133 ((c) == SBD_CMD_STATUS) ? "STATUS" : \
134 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
135 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : "unknown")
136
137 /*
138 * Defines and structures for device tree naming and mapping
139 * to node types
140 */
141
142 sbd_devattr_t *sbd_devattr;
143
144 /* defines to access the attribute struct */
145 #define SBD_DEVNAME(i) sbd_devattr[i].s_devname
146 #define SBD_OTYPE(i) sbd_devattr[(i)].s_obp_type
147 #define SBD_COMP(i) sbd_devattr[i].s_dnodetype
148
149 /*
150 * State transition table. States valid transitions for "board" state.
151 * Recall that non-zero return value terminates operation, however
152 * the herrno value is what really indicates an error , if any.
153 */
154 static int
_cmd2index(int c)155 _cmd2index(int c)
156 {
157 /*
158 * Translate DR CMD to index into sbd_state_transition.
159 */
160 switch (c) {
161 case SBD_CMD_CONNECT: return (0);
162 case SBD_CMD_DISCONNECT: return (1);
163 case SBD_CMD_CONFIGURE: return (2);
164 case SBD_CMD_UNCONFIGURE: return (3);
165 case SBD_CMD_POWEROFF: return (4);
166 case SBD_CMD_POWERON: return (5);
167 case SBD_CMD_UNASSIGN: return (6);
168 case SBD_CMD_ASSIGN: return (7);
169 case SBD_CMD_TEST: return (8);
170 default: return (-1);
171 }
172 }
173
174 #define CMD2INDEX(c) _cmd2index(c)
175
176 static struct sbd_state_trans {
177 int x_cmd;
178 struct {
179 int x_rv; /* return value of pre_op */
180 int x_err; /* errno, if any */
181 } x_op[SBD_NUM_STATES];
182 } sbd_state_transition[] = {
183 { SBD_CMD_CONNECT,
184 {
185 { 0, 0 }, /* empty */
186 { 0, 0 }, /* occupied */
187 { 1, EIO }, /* connected */
188 { 1, EIO }, /* unconfigured */
189 { 1, EIO }, /* partial */
190 { 1, EIO }, /* configured */
191 { 1, EIO }, /* release */
192 { 1, EIO }, /* unreferenced */
193 { 1, EIO }, /* fatal */
194 }
195 },
196 { SBD_CMD_DISCONNECT,
197 {
198 { 1, EIO }, /* empty */
199 { 0, 0 }, /* occupied */
200 { 0, 0 }, /* connected */
201 { 0, 0 }, /* unconfigured */
202 { 1, EIO }, /* partial */
203 { 1, EIO }, /* configured */
204 { 1, EIO }, /* release */
205 { 1, EIO }, /* unreferenced */
206 { 1, EIO }, /* fatal */
207 }
208 },
209 { SBD_CMD_CONFIGURE,
210 {
211 { 1, EIO }, /* empty */
212 { 1, EIO }, /* occupied */
213 { 0, 0 }, /* connected */
214 { 0, 0 }, /* unconfigured */
215 { 0, 0 }, /* partial */
216 { 1, 0 }, /* configured */
217 { 0, 0 }, /* release */
218 { 0, 0 }, /* unreferenced */
219 { 1, EIO }, /* fatal */
220 }
221 },
222 { SBD_CMD_UNCONFIGURE,
223 {
224 { 1, EIO }, /* empty */
225 { 1, EIO }, /* occupied */
226 { 1, EIO }, /* connected */
227 { 1, EIO }, /* unconfigured */
228 { 1, EIO }, /* partial */
229 { 0, 0 }, /* configured */
230 { 0, 0 }, /* release */
231 { 0, 0 }, /* unreferenced */
232 { 1, EIO }, /* fatal */
233 }
234 },
235 { SBD_CMD_POWEROFF,
236 {
237 { 1, EIO }, /* empty */
238 { 0, 0 }, /* occupied */
239 { 1, EIO }, /* connected */
240 { 1, EIO }, /* unconfigured */
241 { 1, EIO }, /* partial */
242 { 1, EIO }, /* configured */
243 { 1, EIO }, /* release */
244 { 1, EIO }, /* unreferenced */
245 { 1, EIO }, /* fatal */
246 }
247 },
248 { SBD_CMD_POWERON,
249 {
250 { 1, EIO }, /* empty */
251 { 0, 0 }, /* occupied */
252 { 1, EIO }, /* connected */
253 { 1, EIO }, /* unconfigured */
254 { 1, EIO }, /* partial */
255 { 1, EIO }, /* configured */
256 { 1, EIO }, /* release */
257 { 1, EIO }, /* unreferenced */
258 { 1, EIO }, /* fatal */
259 }
260 },
261 { SBD_CMD_UNASSIGN,
262 {
263 { 1, EIO }, /* empty */
264 { 0, 0 }, /* occupied */
265 { 1, EIO }, /* connected */
266 { 1, EIO }, /* unconfigured */
267 { 1, EIO }, /* partial */
268 { 1, EIO }, /* configured */
269 { 1, EIO }, /* release */
270 { 1, EIO }, /* unreferenced */
271 { 1, EIO }, /* fatal */
272 }
273 },
274 { SBD_CMD_ASSIGN,
275 {
276 { 1, EIO }, /* empty */
277 { 0, 0 }, /* occupied */
278 { 1, EIO }, /* connected */
279 { 1, EIO }, /* unconfigured */
280 { 1, EIO }, /* partial */
281 { 1, EIO }, /* configured */
282 { 1, EIO }, /* release */
283 { 1, EIO }, /* unreferenced */
284 { 1, EIO }, /* fatal */
285 }
286 },
287 { SBD_CMD_TEST,
288 {
289 { 1, EIO }, /* empty */
290 { 0, 0 }, /* occupied */
291 { 1, EIO }, /* connected */
292 { 1, EIO }, /* unconfigured */
293 { 1, EIO }, /* partial */
294 { 1, EIO }, /* configured */
295 { 1, EIO }, /* release */
296 { 1, EIO }, /* unreferenced */
297 { 1, EIO }, /* fatal */
298 }
299 },
300 };
301
302 /*
303 * Global R/W lock to synchronize access across
304 * multiple boards. Users wanting multi-board access
305 * must grab WRITE lock, others must grab READ lock.
306 */
307 krwlock_t sbd_grwlock;
308
309 /*
310 * Global to determine if an event needs to be sent
311 */
312 char send_event = 0;
313
314 /*
315 * Required/Expected functions.
316 */
317
318 static sbd_handle_t *sbd_get_handle(dev_t dev, sbd_softstate_t *softsp,
319 intptr_t arg, sbd_init_arg_t *iap);
320 static void sbd_release_handle(sbd_handle_t *hp);
321 static int sbd_pre_op(sbd_handle_t *hp);
322 static void sbd_post_op(sbd_handle_t *hp);
323 static int sbd_probe_board(sbd_handle_t *hp);
324 static int sbd_deprobe_board(sbd_handle_t *hp);
325 static void sbd_connect(sbd_handle_t *hp);
326 static void sbd_assign_board(sbd_handle_t *hp);
327 static void sbd_unassign_board(sbd_handle_t *hp);
328 static void sbd_poweron_board(sbd_handle_t *hp);
329 static void sbd_poweroff_board(sbd_handle_t *hp);
330 static void sbd_test_board(sbd_handle_t *hp);
331
332 static int sbd_disconnect(sbd_handle_t *hp);
333 static sbd_devlist_t *sbd_get_attach_devlist(sbd_handle_t *hp,
334 int32_t *devnump, int32_t pass);
335 static int sbd_pre_attach_devlist(sbd_handle_t *hp,
336 sbd_devlist_t *devlist, int32_t devnum);
337 static int sbd_post_attach_devlist(sbd_handle_t *hp,
338 sbd_devlist_t *devlist, int32_t devnum);
339 static sbd_devlist_t *sbd_get_release_devlist(sbd_handle_t *hp,
340 int32_t *devnump, int32_t pass);
341 static int sbd_pre_release_devlist(sbd_handle_t *hp,
342 sbd_devlist_t *devlist, int32_t devnum);
343 static int sbd_post_release_devlist(sbd_handle_t *hp,
344 sbd_devlist_t *devlist, int32_t devnum);
345 static void sbd_release_done(sbd_handle_t *hp,
346 sbd_comp_type_t nodetype,
347 dev_info_t *dip);
348 static sbd_devlist_t *sbd_get_detach_devlist(sbd_handle_t *hp,
349 int32_t *devnump, int32_t pass);
350 static int sbd_pre_detach_devlist(sbd_handle_t *hp,
351 sbd_devlist_t *devlist, int32_t devnum);
352 static int sbd_post_detach_devlist(sbd_handle_t *hp,
353 sbd_devlist_t *devlist, int32_t devnum);
354 static void sbd_status(sbd_handle_t *hp);
355 static void sbd_get_ncm(sbd_handle_t *hp);
356
357
358 /*
359 * Support functions.
360 */
361 static sbd_devset_t sbd_dev2devset(sbd_comp_id_t *cid);
362 static int sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd,
363 sbd_cmd_t *cmdp, sbd_ioctl_arg_t *iap);
364 static int sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap,
365 void *arg);
366 static int sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp,
367 sbd_ioctl_arg_t *iap);
368 static int sbd_check_transition(sbd_board_t *sbp,
369 sbd_devset_t *devsetp,
370 struct sbd_state_trans *transp);
371 static sbd_devlist_t *sbd_get_devlist(sbd_handle_t *hp,
372 sbd_board_t *sbp,
373 sbd_comp_type_t nodetype,
374 int max_units, uint_t uset,
375 int *count, int present_only);
376 static int sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset,
377 sbd_dev_stat_t *dsp);
378
379 static int sbd_init_devlists(sbd_board_t *sbp);
380 static int sbd_name_to_idx(char *name);
381 static int sbd_otype_to_idx(char *otpye);
382 static int sbd_setup_devlists(dev_info_t *dip, void *arg);
383 static void sbd_init_mem_devlists(sbd_board_t *sbp);
384 static void sbd_init_cpu_unit(sbd_board_t *sbp, int unit);
385 static void sbd_board_discovery(sbd_board_t *sbp);
386 static void sbd_board_init(sbd_board_t *sbp,
387 sbd_softstate_t *softsp,
388 int bd, dev_info_t *dip, int wnode);
389 static void sbd_board_destroy(sbd_board_t *sbp);
390 static int sbd_check_unit_attached(sbd_board_t *sbp,
391 dev_info_t *dip, int unit,
392 sbd_comp_type_t nodetype, sbderror_t *ep);
393
394 static sbd_state_t rstate_cvt(sbd_istate_t state);
395
396 /*
397 * Autoconfiguration data structures
398 */
399
400 extern struct mod_ops mod_miscops;
401
402 static struct modlmisc modlmisc = {
403 &mod_miscops,
404 "System Board DR"
405 };
406
407 static struct modlinkage modlinkage = {
408 MODREV_1,
409 (void *)&modlmisc,
410 NULL
411 };
412
413 static int sbd_instances = 0;
414
415 /*
416 * dr Global data elements
417 */
418 sbd_global sbd_g;
419
420 /*
421 * We want to be able to unload the module when we wish to do so, but we don't
422 * want anything else to unload it. Unloading cannot occur until
423 * sbd_teardown_instance is called by an explicit IOCTL into the parent node.
424 * This support is for debugging purposes and should it be expected to work
425 * on the field, it should be enhanced:
426 * Currently, there is still a window where sbd_teardow_instance gets called,
427 * sbd_prevent_unloading now = 0, the driver doesn't get unloaded, and
428 * sbd_setup_instance gets called. This may cause a panic.
429 */
430 int sbd_prevent_unloading = 1;
431
432 /*
433 * Driver entry points.
434 */
435 int
_init(void)436 _init(void)
437 {
438 int err;
439
440 /*
441 * If you need to support multiple nodes (instances), then
442 * whatever the maximum number of supported nodes is would
443 * need to passed as the third parameter to ddi_soft_state_init().
444 * Alternative would be to dynamically fini and re-init the
445 * soft state structure each time a node is attached.
446 */
447 err = ddi_soft_state_init((void **)&sbd_g.softsp,
448 sizeof (sbd_softstate_t), SBD_MAX_INSTANCES);
449 if (err)
450 return (err);
451
452 if ((err = mod_install(&modlinkage)) != 0) {
453 ddi_soft_state_fini((void **)&sbd_g.softsp);
454 return (err);
455 }
456
457 /* Get the array of names from platform helper routine */
458 sbd_devattr = sbdp_get_devattr();
459
460 return (err);
461 }
462
463 int
_fini(void)464 _fini(void)
465 {
466 int err;
467
468 if (sbd_prevent_unloading)
469 return (DDI_FAILURE);
470
471 ASSERT(sbd_instances == 0);
472
473 if ((err = mod_remove(&modlinkage)) != 0)
474 return (err);
475
476 ddi_soft_state_fini((void **)&sbd_g.softsp);
477
478 return (0);
479 }
480
481 int
_info(struct modinfo * modinfop)482 _info(struct modinfo *modinfop)
483 {
484 return (mod_info(&modlinkage, modinfop));
485 }
486
487 int
sbd_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,char * event)488 sbd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, char *event)
489 {
490 int rv = 0, instance;
491 sbd_handle_t *hp;
492 sbd_softstate_t *softsp;
493 sbd_init_arg_t init_arg;
494 static fn_t f = "sbd_ioctl";
495 int dr_avail;
496
497 PR_BYP("sbd_ioctl cmd=%x, arg=%lx\n", cmd, arg);
498
499 /* Note: this must also be changed in tandem with sbd_ioctl.h */
500 switch (cmd) {
501 case SBD_CMD_ASSIGN:
502 case SBD_CMD_UNASSIGN:
503 case SBD_CMD_POWERON:
504 case SBD_CMD_POWEROFF:
505 case SBD_CMD_TEST:
506 case SBD_CMD_CONNECT:
507 case SBD_CMD_CONFIGURE:
508 case SBD_CMD_UNCONFIGURE:
509 case SBD_CMD_DISCONNECT:
510 case SBD_CMD_STATUS:
511 case SBD_CMD_GETNCM:
512 case SBD_CMD_PASSTHRU:
513 break;
514 default:
515 return (ENOTTY);
516 }
517
518 instance = SBD_GET_MINOR2INST(getminor(dev));
519 if ((softsp = (sbd_softstate_t *)GET_SOFTC(instance)) == NULL) {
520 cmn_err(CE_WARN,
521 "sbd:%s:%d: module not yet attached",
522 f, instance);
523 return (ENXIO);
524 }
525
526 init_arg.dev = dev;
527 init_arg.cmd = cmd;
528 init_arg.mode = mode;
529 init_arg.ioargp = (sbd_ioctl_arg_t *)arg;
530
531 hp = sbd_get_handle(dev, softsp, arg, &init_arg);
532 /* Check to see if we support dr */
533 dr_avail = sbdp_dr_avail();
534 if (dr_avail != 1) {
535 switch (hp->h_cmd) {
536 case SBD_CMD_STATUS:
537 case SBD_CMD_GETNCM:
538 case SBD_CMD_PASSTHRU:
539 break;
540 default:
541 sbd_release_handle(hp);
542 return (ENOTSUP);
543 }
544 }
545
546 switch (hp->h_cmd) {
547 case SBD_CMD_STATUS:
548 case SBD_CMD_GETNCM:
549 case SBD_CMD_PASSTHRU:
550 /* no locks needed for these commands */
551 break;
552
553 default:
554 rw_enter(&sbd_grwlock, RW_WRITER);
555 mutex_enter(&SBDH2BD(hp->h_sbd)->sb_mutex);
556
557 /*
558 * If we're dealing with memory at all, then we have
559 * to keep the "exclusive" global lock held. This is
560 * necessary since we will probably need to look at
561 * multiple board structs. Otherwise, we only have
562 * to deal with the board in question and so can drop
563 * the global lock to "shared".
564 */
565 /*
566 * XXX This is incorrect. The sh_devset has not
567 * been set at this point - it is 0.
568 */
569 rv = DEVSET_IN_SET(HD2MACHHD(hp)->sh_devset,
570 SBD_COMP_MEM, DEVSET_ANYUNIT);
571 if (rv == 0)
572 rw_downgrade(&sbd_grwlock);
573 break;
574 }
575
576 /*
577 * Before any operations happen, reset the event flag
578 */
579 send_event = 0;
580
581 if (sbd_pre_op(hp) == 0) {
582 sbd_exec_op(hp);
583 sbd_post_op(hp);
584 }
585
586 rv = SBD_GET_ERRNO(SBD_HD2ERR(hp));
587 *event = send_event;
588
589 /* undo locking, if any, done before sbd_pre_op */
590 switch (hp->h_cmd) {
591 case SBD_CMD_STATUS:
592 case SBD_CMD_GETNCM:
593 case SBD_CMD_PASSTHRU:
594 break;
595 default:
596 mutex_exit(&SBDH2BD(hp->h_sbd)->sb_mutex);
597 rw_exit(&sbd_grwlock);
598 }
599
600 sbd_release_handle(hp);
601
602 return (rv);
603 }
604
605 int
sbd_setup_instance(int instance,dev_info_t * root,int max_boards,int wnode,caddr_t sbdp_arg)606 sbd_setup_instance(int instance, dev_info_t *root, int max_boards, int wnode,
607 caddr_t sbdp_arg)
608 {
609 int b;
610 sbd_softstate_t *softsp;
611 sbd_board_t *sbd_boardlist;
612 static fn_t f = "sbd_setup_instance";
613
614 sbd_instances++;
615
616 if (sbdp_setup_instance(sbdp_arg) != DDI_SUCCESS) {
617 sbd_instances--;
618 return (DDI_FAILURE);
619 }
620
621 if (ALLOC_SOFTC(instance) != DDI_SUCCESS) {
622 cmn_err(CE_WARN,
623 "sbd:%s:%d: failed to alloc soft-state",
624 f, instance);
625 (void) sbdp_teardown_instance(sbdp_arg);
626 sbd_instances--;
627 return (DDI_FAILURE);
628 }
629
630 softsp = (sbd_softstate_t *)GET_SOFTC(instance);
631
632 if (softsp == NULL) {
633 cmn_err(CE_WARN,
634 "sbd:%s:%d: failed to get soft-state instance",
635 f, instance);
636 goto exit;
637 }
638
639 sbd_boardlist = GETSTRUCT(sbd_board_t, max_boards);
640 if (sbd_boardlist == NULL) {
641 cmn_err(CE_WARN,
642 "sbd:%s: failed to alloc board list %d",
643 f, instance);
644 goto exit;
645 }
646
647
648 softsp->sbd_boardlist = (void *)sbd_boardlist;
649 softsp->max_boards = max_boards;
650 softsp->wnode = wnode;
651
652
653 for (b = 0; b < max_boards; b++) {
654 sbd_board_init(sbd_boardlist++, softsp, b, root, wnode);
655 }
656
657
658 return (DDI_SUCCESS);
659 exit:
660 (void) sbdp_teardown_instance(sbdp_arg);
661 FREE_SOFTC(instance);
662 sbd_instances--;
663 return (DDI_FAILURE);
664 }
665
666 int
sbd_teardown_instance(int instance,caddr_t sbdp_arg)667 sbd_teardown_instance(int instance, caddr_t sbdp_arg)
668 {
669 sbd_softstate_t *softsp;
670
671 if (sbdp_teardown_instance(sbdp_arg) != DDI_SUCCESS)
672 return (DDI_FAILURE);
673
674 softsp = (sbd_softstate_t *)GET_SOFTC(instance);
675 if (softsp == NULL) {
676 return (DDI_FAILURE);
677 }
678
679 (void) sbd_dealloc_instance((sbd_board_t *)softsp->sbd_boardlist,
680 softsp->max_boards);
681
682 FREE_SOFTC(instance);
683 sbd_instances--;
684 sbd_prevent_unloading = 0;
685
686 return (DDI_SUCCESS);
687 }
688
689 static void
sbd_exec_op(sbd_handle_t * hp)690 sbd_exec_op(sbd_handle_t *hp)
691 {
692 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
693 static fn_t f = "sbd_exec_op";
694
695 switch (hp->h_cmd) {
696 int dev_canceled;
697
698 case SBD_CMD_CONNECT:
699 if (sbd_probe_board(hp))
700 break;
701
702 sbd_connect(hp);
703 break;
704
705 case SBD_CMD_CONFIGURE:
706 sbd_dev_configure(hp);
707 break;
708
709 case SBD_CMD_UNCONFIGURE:
710 if (((dev_canceled = sbd_dev_release(hp)) == 0) &&
711 (SBD_GET_ERRNO(SBD_HD2ERR(hp)) == 0 &&
712 SBD_GET_ERR(SBD_HD2ERR(hp)) == 0))
713 dev_canceled = sbd_dev_unconfigure(hp);
714
715 if (dev_canceled)
716 sbd_cancel(hp);
717 break;
718
719 case SBD_CMD_DISCONNECT:
720 mutex_enter(&sbp->sb_slock);
721 if (sbd_disconnect(hp) == 0)
722 (void) sbd_deprobe_board(hp);
723 mutex_exit(&sbp->sb_slock);
724 break;
725
726 case SBD_CMD_STATUS:
727 sbd_status(hp);
728 break;
729
730 case SBD_CMD_GETNCM:
731 sbd_get_ncm(hp);
732 break;
733
734 case SBD_CMD_ASSIGN:
735 sbd_assign_board(hp);
736 break;
737
738 case SBD_CMD_UNASSIGN:
739 sbd_unassign_board(hp);
740 break;
741
742 case SBD_CMD_POWEROFF:
743 sbd_poweroff_board(hp);
744 break;
745
746 case SBD_CMD_POWERON:
747 sbd_poweron_board(hp);
748 break;
749
750 case SBD_CMD_TEST:
751 sbd_test_board(hp);
752 break;
753
754 case SBD_CMD_PASSTHRU:
755 {
756 int rv;
757 sbdp_handle_t *hdp;
758 sbderror_t *ep = SBD_HD2ERR(hp);
759 sbdp_ioctl_arg_t ia, *iap;
760
761 iap = &ia;
762
763 iap->h_dev = hp->h_dev;
764 iap->h_cmd = hp->h_cmd;
765 iap->h_iap = (intptr_t)hp->h_iap;
766 iap->h_mode = hp->h_mode;
767
768 hdp = sbd_get_sbdp_handle(sbp, hp);
769 rv = sbdp_ioctl(hdp, iap);
770 if (rv != 0) {
771 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
772 ep->e_errno = rv;
773 }
774 sbd_release_sbdp_handle(hdp);
775 break;
776 }
777
778 default:
779 SBD_SET_ERRNO(SBD_HD2ERR(hp), ENOTTY);
780 cmn_err(CE_WARN,
781 "sbd:%s: unknown command (%d)",
782 f, hp->h_cmd);
783 break;
784
785 }
786
787 if (SBD_GET_ERR(SBD_HD2ERR(hp)))
788 PR_BYP("XXX e_code=%d", SBD_GET_ERR(SBD_HD2ERR(hp)));
789 if (SBD_GET_ERRNO(SBD_HD2ERR(hp)))
790 PR_BYP("XXX errno=%d", SBD_GET_ERRNO(SBD_HD2ERR(hp)));
791 }
792
793 sbd_comp_type_t
sbd_get_devtype(sbd_handle_t * hp,dev_info_t * dip)794 sbd_get_devtype(sbd_handle_t *hp, dev_info_t *dip)
795 {
796 sbd_board_t *sbp = hp ? SBDH2BD(hp->h_sbd) : NULL;
797 sbd_istate_t bstate;
798 dev_info_t **devlist;
799 int i;
800 char device[OBP_MAXDRVNAME];
801 int devicelen;
802
803 devicelen = sizeof (device);
804
805 bstate = sbp ? SBD_BOARD_STATE(sbp) : SBD_STATE_EMPTY;
806 /*
807 * if the board's connected or configured, search the
808 * devlists. Otherwise check the device tree
809 */
810 switch (bstate) {
811
812 case SBD_STATE_CONNECTED:
813 case SBD_STATE_CONFIGURED:
814 case SBD_STATE_UNREFERENCED:
815 case SBD_STATE_UNCONFIGURED:
816 devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
817 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
818 if (devlist[i] == dip)
819 return (SBD_COMP_MEM);
820
821 devlist = sbp->sb_devlist[NIX(SBD_COMP_CPU)];
822 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
823 if (devlist[i] == dip)
824 return (SBD_COMP_CPU);
825
826 devlist = sbp->sb_devlist[NIX(SBD_COMP_IO)];
827 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
828 if (devlist[i] == dip)
829 return (SBD_COMP_IO);
830 /*FALLTHROUGH*/
831
832 default:
833 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
834 OBP_DEVICETYPE, (caddr_t)device, &devicelen))
835 break;
836
837 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
838 if (strcmp(device, SBD_OTYPE(i)) != 0)
839 continue;
840 return (SBD_COMP(i));
841 }
842
843 break;
844 }
845 return (SBD_COMP_UNKNOWN);
846 }
847
848 static void
sbd_dev_configure(sbd_handle_t * hp)849 sbd_dev_configure(sbd_handle_t *hp)
850 {
851 int n, unit;
852 int32_t pass, devnum;
853 dev_info_t *dip;
854 sbd_devlist_t *devlist;
855 sbdp_handle_t *hdp;
856 sbd_comp_type_t nodetype;
857 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
858
859 pass = 1;
860
861 hdp = sbd_get_sbdp_handle(sbp, hp);
862 while ((devlist = sbd_get_attach_devlist(hp, &devnum, pass)) != NULL) {
863 int err;
864
865 err = sbd_pre_attach_devlist(hp, devlist, devnum);
866 if (err < 0) {
867 break;
868 } else if (err > 0) {
869 pass++;
870 continue;
871 }
872
873 for (n = 0; n < devnum; n++) {
874 sbderror_t *ep;
875
876 ep = &devlist[n].dv_error;
877 SBD_SET_ERRNO(ep, 0);
878 SBD_SET_ERR(ep, 0);
879 dip = devlist[n].dv_dip;
880 nodetype = sbd_get_devtype(hp, dip);
881
882 unit = sbdp_get_unit_num(hdp, dip);
883 if (unit < 0) {
884 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
885 break;
886 }
887
888 switch (nodetype) {
889 case SBD_COMP_MEM:
890 sbd_attach_mem(hp, ep);
891 if (SBD_GET_ERR(ep) == ESBD_CPUONLINE) {
892 FREESTRUCT(devlist, sbd_devlist_t,
893 MAX_MEM_UNITS_PER_BOARD);
894 sbd_release_sbdp_handle(hdp);
895 return;
896 }
897 break;
898
899 case SBD_COMP_CPU:
900 sbd_attach_cpu(hp, ep, dip, unit);
901 break;
902
903 case SBD_COMP_IO:
904 sbd_attach_io(hp, ep, dip, unit);
905 break;
906
907 default:
908 SBD_SET_ERRNO(ep, ENOTTY);
909 break;
910 }
911
912 if (sbd_set_err_in_hdl(hp, ep) == 0)
913 continue;
914 }
915
916 err = sbd_post_attach_devlist(hp, devlist, devnum);
917 if (err < 0)
918 break;
919
920 pass++;
921 }
922 sbd_release_sbdp_handle(hdp);
923 }
924
925 static int
sbd_dev_release(sbd_handle_t * hp)926 sbd_dev_release(sbd_handle_t *hp)
927 {
928 int n, unit;
929 int32_t pass, devnum;
930 dev_info_t *dip;
931 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
932 sbdp_handle_t *hdp;
933 sbd_devlist_t *devlist;
934 sbd_comp_type_t nodetype;
935 int err = 0;
936 int dev_canceled;
937
938 pass = 1;
939 hdp = sbd_get_sbdp_handle(sbp, hp);
940
941 sbp->sb_busy = 1;
942 while ((devlist =
943 sbd_get_release_devlist(hp, &devnum, pass)) != NULL) {
944
945 err = sbd_pre_release_devlist(hp, devlist, devnum);
946 if (err < 0) {
947 dev_canceled = 1;
948 break;
949 } else if (err > 0) {
950 pass++;
951 continue;
952 }
953
954 dev_canceled = 0;
955 for (n = 0; n < devnum; n++) {
956 dip = devlist[n].dv_dip;
957 nodetype = sbd_get_devtype(hp, dip);
958
959 unit = sbdp_get_unit_num(hdp, dip);
960 if (unit < 0) {
961 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
962 break;
963 }
964
965 if ((nodetype == SBD_COMP_MEM) &&
966 sbd_release_mem(hp, dip, unit)) {
967
968 dev_canceled++;
969 }
970
971 sbd_release_done(hp, nodetype, dip);
972 }
973
974 err = sbd_post_release_devlist(hp, devlist, devnum);
975
976 if (err < 0)
977 break;
978
979 if (dev_canceled)
980 break;
981
982 pass++;
983 }
984 sbp->sb_busy = 0;
985
986 sbd_release_sbdp_handle(hdp);
987
988 if (dev_canceled)
989 return (dev_canceled);
990
991 return (err);
992 }
993
994 static int
sbd_dev_unconfigure(sbd_handle_t * hp)995 sbd_dev_unconfigure(sbd_handle_t *hp)
996 {
997 int n, unit;
998 int32_t pass, devnum;
999 dev_info_t *dip;
1000 sbd_devlist_t *devlist;
1001 sbdp_handle_t *hdp;
1002 sbd_comp_type_t nodetype;
1003 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1004 int dev_canceled = 0;
1005 static fn_t f = "sbd_dev_unconfigure";
1006
1007 PR_ALL("%s...\n", f);
1008
1009 pass = 1;
1010 hdp = sbd_get_sbdp_handle(sbp, hp);
1011
1012 while ((devlist = sbd_get_detach_devlist(hp, &devnum, pass)) != NULL) {
1013 int err, detach_err = 0;
1014
1015 err = sbd_pre_detach_devlist(hp, devlist, devnum);
1016 if (err) {
1017 /*
1018 * Only cancel the operation for memory in
1019 * case of failure.
1020 */
1021 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
1022 if (nodetype == SBD_COMP_MEM)
1023 dev_canceled = 1;
1024 (void) sbd_post_detach_devlist(hp, devlist, devnum);
1025 break;
1026 }
1027
1028 for (n = 0; n < devnum; n++) {
1029 sbderror_t *ep;
1030
1031 ep = &devlist[n].dv_error;
1032 SBD_SET_ERRNO(ep, 0);
1033 SBD_SET_ERR(ep, 0);
1034 dip = devlist[n].dv_dip;
1035 nodetype = sbd_get_devtype(hp, dip);
1036
1037 unit = sbdp_get_unit_num(hdp, dip);
1038 if (unit < 0) {
1039 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1040 break;
1041 }
1042
1043 switch (nodetype) {
1044 case SBD_COMP_MEM:
1045 dev_canceled = sbd_detach_mem(hp, ep, unit);
1046 break;
1047
1048 case SBD_COMP_CPU:
1049 sbd_detach_cpu(hp, ep, dip, unit);
1050 break;
1051
1052 case SBD_COMP_IO:
1053 sbd_detach_io(hp, ep, dip, unit);
1054 break;
1055
1056 default:
1057 SBD_SET_ERRNO(ep, ENOTTY);
1058 break;
1059 }
1060
1061 if (sbd_set_err_in_hdl(hp, ep) == 0) {
1062 detach_err = -1;
1063 break;
1064 }
1065
1066 }
1067 err = sbd_post_detach_devlist(hp, devlist, devnum);
1068 if ((err < 0) || (detach_err < 0))
1069 break;
1070
1071 pass++;
1072 }
1073
1074 sbd_release_sbdp_handle(hdp);
1075 return (dev_canceled);
1076 }
1077
1078 int
sbd_errno2ecode(int error)1079 sbd_errno2ecode(int error)
1080 {
1081 int rv;
1082
1083 switch (error) {
1084 case EBUSY:
1085 rv = ESBD_BUSY;
1086 break;
1087 case EINVAL:
1088 rv = ESBD_INVAL;
1089 break;
1090 case EALREADY:
1091 rv = ESBD_ALREADY;
1092 break;
1093 case ENODEV:
1094 rv = ESBD_NODEV;
1095 break;
1096 case ENOMEM:
1097 rv = ESBD_NOMEM;
1098 break;
1099 default:
1100 rv = ESBD_INVAL;
1101 }
1102
1103 return (rv);
1104 }
1105
1106 static void
sbd_attach_cpu(sbd_handle_t * hp,sbderror_t * ep,dev_info_t * dip,int unit)1107 sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1108 {
1109 int rv = 0;
1110 processorid_t cpuid;
1111 sbdp_handle_t *hdp;
1112 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1113 static fn_t f = "sbd_attach_cpu";
1114 char *pathname;
1115
1116 ASSERT(MUTEX_HELD(&cpu_lock));
1117
1118 ASSERT(dip);
1119
1120 /*
1121 * With the introduction of CMP devices, the CPU nodes
1122 * are no longer directly under the top node. Since
1123 * there is no plan to support CPU attach in the near
1124 * future, a branch configure operation is not required.
1125 */
1126
1127 hdp = sbd_get_sbdp_handle(sbp, hp);
1128 cpuid = sbdp_get_cpuid(hdp, dip);
1129 if (cpuid < 0) {
1130 rv = -1;
1131 SBD_GET_PERR(hdp->h_err, ep);
1132 } else if ((rv = cpu_configure(cpuid)) != 0) {
1133 cmn_err(CE_WARN,
1134 "sbd:%s: cpu_configure for cpuid %d failed",
1135 f, cpuid);
1136 SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1137 }
1138 sbd_release_sbdp_handle(hdp);
1139
1140 if (rv == 0) {
1141 ASSERT(sbp->sb_cpupath[unit] != NULL);
1142 pathname = sbp->sb_cpupath[unit];
1143 (void) ddi_pathname(dip, pathname);
1144 }
1145 }
1146
1147 /*
1148 * translate errno
1149 */
1150 void
sbd_errno_decode(int err,sbderror_t * ep,dev_info_t * dip)1151 sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip)
1152 {
1153 ASSERT(err != 0);
1154
1155 switch (err) {
1156 case ENOMEM:
1157 SBD_SET_ERR(ep, ESBD_NOMEM);
1158 break;
1159
1160 case EBUSY:
1161 SBD_SET_ERR(ep, ESBD_BUSY);
1162 break;
1163
1164 case EIO:
1165 SBD_SET_ERR(ep, ESBD_IO);
1166 break;
1167
1168 case ENXIO:
1169 SBD_SET_ERR(ep, ESBD_NODEV);
1170 break;
1171
1172 case EINVAL:
1173 SBD_SET_ERR(ep, ESBD_INVAL);
1174 break;
1175
1176 case EFAULT:
1177 default:
1178 SBD_SET_ERR(ep, ESBD_FAULT);
1179 break;
1180 }
1181
1182 (void) ddi_pathname(dip, SBD_GET_ERRSTR(ep));
1183 }
1184
1185 static void
sbd_detach_cpu(sbd_handle_t * hp,sbderror_t * ep,dev_info_t * dip,int unit)1186 sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1187 {
1188 processorid_t cpuid;
1189 int rv;
1190 sbdp_handle_t *hdp;
1191 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1192 sbd_error_t *spe;
1193 static fn_t f = "sbd_detach_cpu";
1194
1195 ASSERT(MUTEX_HELD(&cpu_lock));
1196
1197 ASSERT(dip);
1198 hdp = sbd_get_sbdp_handle(sbp, hp);
1199 spe = hdp->h_err;
1200 cpuid = sbdp_get_cpuid(hdp, dip);
1201 if (cpuid < 0) {
1202 SBD_GET_PERR(spe, ep);
1203 sbd_release_sbdp_handle(hdp);
1204 return;
1205 }
1206
1207 if ((rv = cpu_unconfigure(cpuid)) != 0) {
1208 SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1209 SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
1210 cmn_err(CE_WARN,
1211 "sbd:%s: cpu_unconfigure for cpu %d failed",
1212 f, cpuid);
1213 sbd_release_sbdp_handle(hdp);
1214 return;
1215 }
1216 sbd_release_sbdp_handle(hdp);
1217
1218 /*
1219 * Since CPU nodes are no longer configured in CPU
1220 * attach, the corresponding branch unconfigure
1221 * operation that would be performed here is also
1222 * no longer required.
1223 */
1224 }
1225
1226
1227 int
sbd_detach_mem(sbd_handle_t * hp,sbderror_t * ep,int unit)1228 sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit)
1229 {
1230 sbd_mem_unit_t *mp;
1231 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1232 int i, rv;
1233 static fn_t f = "sbd_detach_mem";
1234
1235 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
1236
1237 if (sbd_detach_memory(hp, ep, mp, unit)) {
1238 cmn_err(CE_WARN, "%s: detach fail", f);
1239 return (-1);
1240 }
1241
1242 /*
1243 * Now detach mem devinfo nodes with status lock held.
1244 */
1245 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
1246 dev_info_t *fdip = NULL;
1247
1248 if (mp->sbm_dip[i] == NULL)
1249 continue;
1250 ASSERT(e_ddi_branch_held(mp->sbm_dip[i]));
1251 mutex_enter(&sbp->sb_slock);
1252 rv = e_ddi_branch_unconfigure(mp->sbm_dip[i], &fdip,
1253 DEVI_BRANCH_EVENT);
1254 mutex_exit(&sbp->sb_slock);
1255 if (rv) {
1256 /*
1257 * If non-NULL, fdip is returned held and must be
1258 * released.
1259 */
1260 if (fdip != NULL) {
1261 sbd_errno_decode(rv, ep, fdip);
1262 ddi_release_devi(fdip);
1263 } else {
1264 sbd_errno_decode(rv, ep, mp->sbm_dip[i]);
1265 }
1266 }
1267 }
1268
1269 return (0);
1270 }
1271
1272 /* start beginning of sbd.c */
1273
1274 /*
1275 * MDR memory support - somewhat disabled for now.
1276 * UNSAFE unsafe driver code - I don't think we want this.
1277 * need to check.
1278 * DEVNODE This driver creates attachment points for individual
1279 * components as well as boards. We only need board
1280 * support.
1281 * DEV2DEVSET Put only present devices in devset.
1282 */
1283
1284
1285 static sbd_state_t
rstate_cvt(sbd_istate_t state)1286 rstate_cvt(sbd_istate_t state)
1287 {
1288 sbd_state_t cs;
1289
1290 switch (state) {
1291 case SBD_STATE_EMPTY:
1292 cs = SBD_STAT_EMPTY;
1293 break;
1294 case SBD_STATE_OCCUPIED:
1295 case SBD_STATE_FATAL:
1296 cs = SBD_STAT_DISCONNECTED;
1297 break;
1298 case SBD_STATE_CONFIGURED:
1299 case SBD_STATE_CONNECTED:
1300 case SBD_STATE_UNCONFIGURED:
1301 case SBD_STATE_PARTIAL:
1302 case SBD_STATE_RELEASE:
1303 case SBD_STATE_UNREFERENCED:
1304 cs = SBD_STAT_CONNECTED;
1305 break;
1306 default:
1307 cs = SBD_STAT_NONE;
1308 break;
1309 }
1310
1311 return (cs);
1312 }
1313
1314
1315 sbd_state_t
ostate_cvt(sbd_istate_t state)1316 ostate_cvt(sbd_istate_t state)
1317 {
1318 sbd_state_t cs;
1319
1320 switch (state) {
1321 case SBD_STATE_EMPTY:
1322 case SBD_STATE_OCCUPIED:
1323 case SBD_STATE_UNCONFIGURED:
1324 case SBD_STATE_CONNECTED:
1325 case SBD_STATE_FATAL:
1326 cs = SBD_STAT_UNCONFIGURED;
1327 break;
1328 case SBD_STATE_PARTIAL:
1329 case SBD_STATE_CONFIGURED:
1330 case SBD_STATE_RELEASE:
1331 case SBD_STATE_UNREFERENCED:
1332 cs = SBD_STAT_CONFIGURED;
1333 break;
1334 default:
1335 cs = SBD_STAT_NONE;
1336 break;
1337 }
1338
1339 return (cs);
1340 }
1341
1342 int
sbd_dealloc_instance(sbd_board_t * sbp,int max_boards)1343 sbd_dealloc_instance(sbd_board_t *sbp, int max_boards)
1344 {
1345 int b;
1346 sbd_board_t *list = sbp;
1347 static fn_t f = "sbd_dealloc_instance";
1348
1349 PR_ALL("%s...\n", f);
1350
1351 if (sbp == NULL) {
1352 return (-1);
1353 }
1354
1355 for (b = 0; b < max_boards; b++) {
1356 sbd_board_destroy(sbp++);
1357 }
1358
1359 FREESTRUCT(list, sbd_board_t, max_boards);
1360
1361 return (0);
1362 }
1363
1364 static sbd_devset_t
sbd_dev2devset(sbd_comp_id_t * cid)1365 sbd_dev2devset(sbd_comp_id_t *cid)
1366 {
1367 static fn_t f = "sbd_dev2devset";
1368
1369 sbd_devset_t devset;
1370 int unit = cid->c_unit;
1371
1372 switch (cid->c_type) {
1373 case SBD_COMP_NONE:
1374 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
1375 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
1376 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
1377 break;
1378
1379 case SBD_COMP_CPU:
1380 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
1381 PR_ALL("%s: invalid cpu unit# = %d",
1382 f, unit);
1383 devset = 0;
1384 } else
1385 /*
1386 * Generate a devset that includes all the
1387 * cores of a CMP device. If this is not a
1388 * CMP, the extra cores will be eliminated
1389 * later since they are not present. This is
1390 * also true for CMP devices that do not have
1391 * all cores active.
1392 */
1393 devset = DEVSET(SBD_COMP_CMP, unit);
1394
1395 break;
1396
1397 case SBD_COMP_MEM:
1398
1399 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
1400 #ifdef XXX_jeffco
1401 PR_ALL("%s: invalid mem unit# = %d",
1402 f, unit);
1403 devset = 0;
1404 #endif
1405 devset = DEVSET(cid->c_type, 0);
1406 PR_ALL("%s: adjusted MEM devset = 0x%x\n",
1407 f, devset);
1408 } else
1409 devset = DEVSET(cid->c_type, unit);
1410 break;
1411
1412 case SBD_COMP_IO:
1413 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
1414 PR_ALL("%s: invalid io unit# = %d",
1415 f, unit);
1416 devset = 0;
1417 } else
1418 devset = DEVSET(cid->c_type, unit);
1419
1420 break;
1421
1422 default:
1423 case SBD_COMP_UNKNOWN:
1424 devset = 0;
1425 break;
1426 }
1427
1428 return (devset);
1429 }
1430
1431 /*
1432 * Simple mutex for covering handle list ops as it is only
1433 * used "infrequently". No need to add another mutex to the sbd_board_t.
1434 */
1435 static kmutex_t sbd_handle_list_mutex;
1436
1437 static sbd_handle_t *
sbd_get_handle(dev_t dev,sbd_softstate_t * softsp,intptr_t arg,sbd_init_arg_t * iap)1438 sbd_get_handle(dev_t dev, sbd_softstate_t *softsp, intptr_t arg,
1439 sbd_init_arg_t *iap)
1440 {
1441 sbd_handle_t *hp;
1442 sbderror_t *ep;
1443 sbd_priv_handle_t *shp;
1444 sbd_board_t *sbp = softsp->sbd_boardlist;
1445 int board;
1446
1447 board = SBDGETSLOT(dev);
1448 ASSERT(board < softsp->max_boards);
1449 sbp += board;
1450
1451 /*
1452 * Brand-new handle.
1453 */
1454 shp = kmem_zalloc(sizeof (sbd_priv_handle_t), KM_SLEEP);
1455 shp->sh_arg = (void *)arg;
1456
1457 hp = MACHHD2HD(shp);
1458
1459 ep = &shp->sh_err;
1460
1461 hp->h_err = ep;
1462 hp->h_sbd = (void *) sbp;
1463 hp->h_dev = iap->dev;
1464 hp->h_cmd = iap->cmd;
1465 hp->h_mode = iap->mode;
1466 sbd_init_err(ep);
1467
1468 mutex_enter(&sbd_handle_list_mutex);
1469 shp->sh_next = sbp->sb_handle;
1470 sbp->sb_handle = shp;
1471 mutex_exit(&sbd_handle_list_mutex);
1472
1473 return (hp);
1474 }
1475
1476 void
sbd_init_err(sbderror_t * ep)1477 sbd_init_err(sbderror_t *ep)
1478 {
1479 ep->e_errno = 0;
1480 ep->e_code = 0;
1481 ep->e_rsc[0] = '\0';
1482 }
1483
1484 int
sbd_set_err_in_hdl(sbd_handle_t * hp,sbderror_t * ep)1485 sbd_set_err_in_hdl(sbd_handle_t *hp, sbderror_t *ep)
1486 {
1487 sbderror_t *hep = SBD_HD2ERR(hp);
1488
1489 /*
1490 * If there is an error logged already, don't rewrite it
1491 */
1492 if (SBD_GET_ERR(hep) || SBD_GET_ERRNO(hep)) {
1493 return (0);
1494 }
1495
1496 if (SBD_GET_ERR(ep) || SBD_GET_ERRNO(ep)) {
1497 SBD_SET_ERR(hep, SBD_GET_ERR(ep));
1498 SBD_SET_ERRNO(hep, SBD_GET_ERRNO(ep));
1499 SBD_SET_ERRSTR(hep, SBD_GET_ERRSTR(ep));
1500 return (0);
1501 }
1502
1503 return (-1);
1504 }
1505
1506 static void
sbd_release_handle(sbd_handle_t * hp)1507 sbd_release_handle(sbd_handle_t *hp)
1508 {
1509 sbd_priv_handle_t *shp, **shpp;
1510 sbd_board_t *sbp;
1511 static fn_t f = "sbd_release_handle";
1512
1513 if (hp == NULL)
1514 return;
1515
1516 sbp = SBDH2BD(hp->h_sbd);
1517
1518 shp = HD2MACHHD(hp);
1519
1520 mutex_enter(&sbd_handle_list_mutex);
1521 /*
1522 * Locate the handle in the board's reference list.
1523 */
1524 for (shpp = &sbp->sb_handle; (*shpp) && ((*shpp) != shp);
1525 shpp = &((*shpp)->sh_next))
1526 /* empty */;
1527
1528 if (*shpp == NULL) {
1529 cmn_err(CE_PANIC,
1530 "sbd:%s: handle not found in board %d",
1531 f, sbp->sb_num);
1532 /*NOTREACHED*/
1533 } else {
1534 *shpp = shp->sh_next;
1535 }
1536 mutex_exit(&sbd_handle_list_mutex);
1537
1538 if (hp->h_opts.copts != NULL) {
1539 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
1540 }
1541
1542 FREESTRUCT(shp, sbd_priv_handle_t, 1);
1543 }
1544
1545 sbdp_handle_t *
sbd_get_sbdp_handle(sbd_board_t * sbp,sbd_handle_t * hp)1546 sbd_get_sbdp_handle(sbd_board_t *sbp, sbd_handle_t *hp)
1547 {
1548 sbdp_handle_t *hdp;
1549
1550 hdp = kmem_zalloc(sizeof (sbdp_handle_t), KM_SLEEP);
1551 hdp->h_err = kmem_zalloc(sizeof (sbd_error_t), KM_SLEEP);
1552 if (sbp == NULL) {
1553 hdp->h_board = -1;
1554 hdp->h_wnode = -1;
1555 } else {
1556 hdp->h_board = sbp->sb_num;
1557 hdp->h_wnode = sbp->sb_wnode;
1558 }
1559
1560 if (hp == NULL) {
1561 hdp->h_flags = 0;
1562 hdp->h_opts = NULL;
1563 } else {
1564 hdp->h_flags = SBD_2_SBDP_FLAGS(hp->h_flags);
1565 hdp->h_opts = &hp->h_opts;
1566 }
1567
1568 return (hdp);
1569 }
1570
1571 void
sbd_release_sbdp_handle(sbdp_handle_t * hdp)1572 sbd_release_sbdp_handle(sbdp_handle_t *hdp)
1573 {
1574 if (hdp == NULL)
1575 return;
1576
1577 kmem_free(hdp->h_err, sizeof (sbd_error_t));
1578 kmem_free(hdp, sizeof (sbdp_handle_t));
1579 }
1580
1581 void
sbd_reset_error_sbdph(sbdp_handle_t * hdp)1582 sbd_reset_error_sbdph(sbdp_handle_t *hdp)
1583 {
1584 if ((hdp != NULL) && (hdp->h_err != NULL)) {
1585 bzero(hdp->h_err, sizeof (sbd_error_t));
1586 }
1587 }
1588
1589 static int
sbd_copyin_ioarg(sbd_handle_t * hp,int mode,int cmd,sbd_cmd_t * cmdp,sbd_ioctl_arg_t * iap)1590 sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd, sbd_cmd_t *cmdp,
1591 sbd_ioctl_arg_t *iap)
1592 {
1593 static fn_t f = "sbd_copyin_ioarg";
1594
1595 if (iap == NULL)
1596 return (EINVAL);
1597
1598 bzero((caddr_t)cmdp, sizeof (sbd_cmd_t));
1599
1600 #ifdef _MULTI_DATAMODEL
1601 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1602 sbd_cmd32_t scmd32;
1603
1604 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
1605
1606 if (ddi_copyin((void *)iap, (void *)&scmd32,
1607 sizeof (sbd_cmd32_t), mode)) {
1608 cmn_err(CE_WARN,
1609 "sbd:%s: (32bit) failed to copyin "
1610 "sbdcmd-struct", f);
1611 return (EFAULT);
1612 }
1613 cmdp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
1614 cmdp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
1615 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
1616 &cmdp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
1617 cmdp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
1618 cmdp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
1619 cmdp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
1620
1621 if (cmd == SBD_CMD_PASSTHRU) {
1622 PR_BYP("passthru copyin: iap=%p, sz=%ld", (void *)iap,
1623 sizeof (sbd_cmd32_t));
1624 PR_BYP("passthru copyin: c_opts=%x, c_len=%d",
1625 scmd32.cmd_cm.c_opts,
1626 scmd32.cmd_cm.c_len);
1627 }
1628
1629 switch (cmd) {
1630 case SBD_CMD_STATUS:
1631 cmdp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
1632 cmdp->cmd_stat.s_statp =
1633 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
1634 break;
1635 default:
1636 break;
1637
1638 }
1639 } else
1640 #endif /* _MULTI_DATAMODEL */
1641 if (ddi_copyin((void *)iap, (void *)cmdp,
1642 sizeof (sbd_cmd_t), mode) != 0) {
1643 cmn_err(CE_WARN,
1644 "sbd:%s: failed to copyin sbd cmd_t struct", f);
1645 return (EFAULT);
1646 }
1647 /*
1648 * A user may set platform specific options so we need to
1649 * copy them in
1650 */
1651 if ((cmd != SBD_CMD_STATUS) && ((hp->h_opts.size = cmdp->cmd_cm.c_len)
1652 > 0)) {
1653 hp->h_opts.size += 1; /* For null termination of string. */
1654 hp->h_opts.copts = GETSTRUCT(char, hp->h_opts.size);
1655 if (ddi_copyin((void *)cmdp->cmd_cm.c_opts,
1656 (void *)hp->h_opts.copts,
1657 cmdp->cmd_cm.c_len, hp->h_mode) != 0) {
1658 /* copts is freed in sbd_release_handle(). */
1659 cmn_err(CE_WARN,
1660 "sbd:%s: failed to copyin options", f);
1661 return (EFAULT);
1662 }
1663 }
1664
1665 return (0);
1666 }
1667
1668 static int
sbd_copyout_ioarg(int mode,int cmd,sbd_cmd_t * scp,sbd_ioctl_arg_t * iap)1669 sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp, sbd_ioctl_arg_t *iap)
1670 {
1671 static fn_t f = "sbd_copyout_ioarg";
1672
1673 if ((iap == NULL) || (scp == NULL))
1674 return (EINVAL);
1675
1676 #ifdef _MULTI_DATAMODEL
1677 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1678 sbd_cmd32_t scmd32;
1679
1680 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
1681 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
1682 bcopy(scp->cmd_cm.c_id.c_name,
1683 scmd32.cmd_cm.c_id.c_name, OBP_MAXPROPNAME);
1684
1685 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
1686
1687 switch (cmd) {
1688 case SBD_CMD_GETNCM:
1689 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
1690 break;
1691 default:
1692 break;
1693 }
1694
1695 if (ddi_copyout((void *)&scmd32, (void *)iap,
1696 sizeof (sbd_cmd32_t), mode)) {
1697 cmn_err(CE_WARN,
1698 "sbd:%s: (32bit) failed to copyout "
1699 "sbdcmd struct", f);
1700 return (EFAULT);
1701 }
1702 } else
1703 #endif /* _MULTI_DATAMODEL */
1704 if (ddi_copyout((void *)scp, (void *)iap,
1705 sizeof (sbd_cmd_t), mode) != 0) {
1706 cmn_err(CE_WARN,
1707 "sbd:%s: failed to copyout sbdcmd struct", f);
1708 return (EFAULT);
1709 }
1710
1711 return (0);
1712 }
1713
1714 static int
sbd_copyout_errs(int mode,sbd_ioctl_arg_t * iap,void * arg)1715 sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap, void *arg)
1716 {
1717 static fn_t f = "sbd_copyout_errs";
1718 sbd_ioctl_arg_t *uap;
1719
1720 uap = (sbd_ioctl_arg_t *)arg;
1721
1722 #ifdef _MULTI_DATAMODEL
1723 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1724 sbd_error32_t err32;
1725 sbd_ioctl_arg32_t *uap32;
1726
1727 uap32 = (sbd_ioctl_arg32_t *)arg;
1728
1729 err32.e_code = iap->ie_code;
1730 (void) strcpy(err32.e_rsc, iap->ie_rsc);
1731
1732 if (ddi_copyout((void *)&err32, (void *)&uap32->i_err,
1733 sizeof (sbd_error32_t), mode)) {
1734 cmn_err(CE_WARN,
1735 "sbd:%s: failed to copyout ioctl32 errs",
1736 f);
1737 return (EFAULT);
1738 }
1739 } else
1740 #endif /* _MULTI_DATAMODEL */
1741 if (ddi_copyout((void *)&iap->i_err, (void *)&uap->i_err,
1742 sizeof (sbd_error_t), mode) != 0) {
1743 cmn_err(CE_WARN,
1744 "sbd:%s: failed to copyout ioctl errs", f);
1745 return (EFAULT);
1746 }
1747
1748 return (0);
1749 }
1750
1751 /*
1752 * State transition policy is that if at least one
1753 * device cannot make the transition, then none of
1754 * the requested devices are allowed to transition.
1755 *
1756 * Returns the state that is in error, if any.
1757 */
1758 static int
sbd_check_transition(sbd_board_t * sbp,sbd_devset_t * devsetp,struct sbd_state_trans * transp)1759 sbd_check_transition(sbd_board_t *sbp, sbd_devset_t *devsetp,
1760 struct sbd_state_trans *transp)
1761 {
1762 int s, ut;
1763 int state_err = 0;
1764 sbd_devset_t devset;
1765 static fn_t f = "sbd_check_transition";
1766
1767 devset = *devsetp;
1768
1769 if (!devset) {
1770 /*
1771 * Transition does not deal with any components.
1772 * This is the case for addboard/deleteboard.
1773 */
1774 PR_ALL("%s: no devs: requested devset = 0x%x,"
1775 " final devset = 0x%x\n",
1776 f, (uint_t)*devsetp, (uint_t)devset);
1777
1778 return (0);
1779 }
1780
1781 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
1782 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
1783 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
1784 continue;
1785 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, ut);
1786 if (transp->x_op[s].x_rv) {
1787 if (!state_err)
1788 state_err = s;
1789 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
1790 }
1791 }
1792 }
1793
1794 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
1795 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
1796 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
1797 continue;
1798 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, ut);
1799 if (transp->x_op[s].x_rv) {
1800 if (!state_err)
1801 state_err = s;
1802 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
1803 }
1804 }
1805 }
1806
1807 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
1808 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
1809 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
1810 continue;
1811 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_IO, ut);
1812 if (transp->x_op[s].x_rv) {
1813 if (!state_err)
1814 state_err = s;
1815 DEVSET_DEL(devset, SBD_COMP_IO, ut);
1816 }
1817 }
1818 }
1819
1820 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
1821 f, (uint_t)*devsetp, (uint_t)devset);
1822
1823 *devsetp = devset;
1824 /*
1825 * If there are some remaining components for which
1826 * this state transition is valid, then allow them
1827 * through, otherwise if none are left then return
1828 * the state error.
1829 */
1830 return (devset ? 0 : state_err);
1831 }
1832
1833 /*
1834 * pre-op entry point must SET_ERRNO(), if needed.
1835 * Return value of non-zero indicates failure.
1836 */
1837 static int
sbd_pre_op(sbd_handle_t * hp)1838 sbd_pre_op(sbd_handle_t *hp)
1839 {
1840 int rv = 0, t;
1841 int cmd, serr = 0;
1842 sbd_devset_t devset;
1843 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1844 sbd_priv_handle_t *shp = HD2MACHHD(hp);
1845 sbderror_t *ep = SBD_HD2ERR(hp);
1846 sbd_cmd_t *cmdp;
1847 static fn_t f = "sbd_pre_op";
1848
1849 cmd = hp->h_cmd;
1850 devset = shp->sh_devset;
1851
1852 switch (cmd) {
1853 case SBD_CMD_CONNECT:
1854 case SBD_CMD_DISCONNECT:
1855 case SBD_CMD_UNCONFIGURE:
1856 case SBD_CMD_CONFIGURE:
1857 case SBD_CMD_ASSIGN:
1858 case SBD_CMD_UNASSIGN:
1859 case SBD_CMD_POWERON:
1860 case SBD_CMD_POWEROFF:
1861 case SBD_CMD_TEST:
1862 /* ioctls allowed if caller has write permission */
1863 if (!(hp->h_mode & FWRITE)) {
1864 SBD_SET_ERRNO(ep, EPERM);
1865 return (-1);
1866 }
1867
1868 default:
1869 break;
1870 }
1871
1872 hp->h_iap = GETSTRUCT(sbd_ioctl_arg_t, 1);
1873 rv = sbd_copyin_ioarg(hp, hp->h_mode, cmd,
1874 (sbd_cmd_t *)hp->h_iap, shp->sh_arg);
1875 if (rv) {
1876 SBD_SET_ERRNO(ep, rv);
1877 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1878 hp->h_iap = NULL;
1879 cmn_err(CE_WARN, "%s: copyin fail", f);
1880 return (-1);
1881 } else {
1882 cmdp = (sbd_cmd_t *)hp->h_iap;
1883 if (cmdp->cmd_cm.c_id.c_name[0] != '\0') {
1884
1885 cmdp->cmd_cm.c_id.c_type = SBD_COMP(sbd_name_to_idx(
1886 cmdp->cmd_cm.c_id.c_name));
1887 if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_MEM) {
1888 if (cmdp->cmd_cm.c_id.c_unit == -1)
1889 cmdp->cmd_cm.c_id.c_unit = 0;
1890 }
1891 }
1892 devset = shp->sh_orig_devset = shp->sh_devset =
1893 sbd_dev2devset(&cmdp->cmd_cm.c_id);
1894 if (devset == 0) {
1895 SBD_SET_ERRNO(ep, EINVAL);
1896 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1897 hp->h_iap = NULL;
1898 return (-1);
1899 }
1900 }
1901
1902 /*
1903 * Always turn on these bits ala Sunfire DR.
1904 */
1905 hp->h_flags |= SBD_FLAG_DEVI_FORCE;
1906
1907 if (cmdp->cmd_cm.c_flags & SBD_FLAG_FORCE)
1908 hp->h_flags |= SBD_IOCTL_FLAG_FORCE;
1909
1910 /*
1911 * Check for valid state transitions.
1912 */
1913 if (!serr && ((t = CMD2INDEX(cmd)) != -1)) {
1914 struct sbd_state_trans *transp;
1915 int state_err;
1916
1917 transp = &sbd_state_transition[t];
1918 ASSERT(transp->x_cmd == cmd);
1919
1920 state_err = sbd_check_transition(sbp, &devset, transp);
1921
1922 if (state_err < 0) {
1923 /*
1924 * Invalidate device.
1925 */
1926 SBD_SET_ERRNO(ep, ENOTTY);
1927 serr = -1;
1928 PR_ALL("%s: invalid devset (0x%x)\n",
1929 f, (uint_t)devset);
1930 } else if (state_err != 0) {
1931 /*
1932 * State transition is not a valid one.
1933 */
1934 SBD_SET_ERRNO(ep, transp->x_op[state_err].x_err);
1935 serr = transp->x_op[state_err].x_rv;
1936 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1937 f, sbd_state_str[state_err], state_err,
1938 SBD_CMD_STR(cmd), cmd);
1939 }
1940 if (serr && SBD_GET_ERRNO(ep) != 0) {
1941 /*
1942 * A state transition error occurred.
1943 */
1944 if (serr < 0) {
1945 SBD_SET_ERR(ep, ESBD_INVAL);
1946 } else {
1947 SBD_SET_ERR(ep, ESBD_STATE);
1948 }
1949 PR_ALL("%s: invalid state transition\n", f);
1950 } else {
1951 shp->sh_devset = devset;
1952 }
1953 }
1954
1955 if (serr && !rv && hp->h_iap) {
1956
1957 /*
1958 * There was a state error. We successfully copied
1959 * in the ioctl argument, so let's fill in the
1960 * error and copy it back out.
1961 */
1962
1963 if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0)
1964 SBD_SET_ERRNO(ep, EIO);
1965
1966 SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
1967 ep->e_code,
1968 ep->e_rsc);
1969 (void) sbd_copyout_errs(hp->h_mode, hp->h_iap, shp->sh_arg);
1970 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1971 hp->h_iap = NULL;
1972 rv = -1;
1973 }
1974
1975 return (rv);
1976 }
1977
1978 static void
sbd_post_op(sbd_handle_t * hp)1979 sbd_post_op(sbd_handle_t *hp)
1980 {
1981 int cmd;
1982 sbderror_t *ep = SBD_HD2ERR(hp);
1983 sbd_priv_handle_t *shp = HD2MACHHD(hp);
1984 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1985
1986 cmd = hp->h_cmd;
1987
1988 switch (cmd) {
1989 case SBD_CMD_CONFIGURE:
1990 case SBD_CMD_UNCONFIGURE:
1991 case SBD_CMD_CONNECT:
1992 case SBD_CMD_DISCONNECT:
1993 sbp->sb_time = gethrestime_sec();
1994 break;
1995
1996 default:
1997 break;
1998 }
1999
2000 if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0) {
2001 SBD_SET_ERRNO(ep, EIO);
2002 }
2003
2004 if (shp->sh_arg != NULL) {
2005
2006 if (SBD_GET_ERR(ep) != ESBD_NOERROR) {
2007
2008 SBD_SET_IOCTL_ERR(&hp->h_iap->i_err,
2009 ep->e_code,
2010 ep->e_rsc);
2011
2012 (void) sbd_copyout_errs(hp->h_mode, hp->h_iap,
2013 shp->sh_arg);
2014 }
2015
2016 if (hp->h_iap != NULL) {
2017 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
2018 hp->h_iap = NULL;
2019 }
2020 }
2021 }
2022
2023 static int
sbd_probe_board(sbd_handle_t * hp)2024 sbd_probe_board(sbd_handle_t *hp)
2025 {
2026 int rv;
2027 sbd_board_t *sbp;
2028 sbdp_handle_t *hdp;
2029 static fn_t f = "sbd_probe_board";
2030
2031 sbp = SBDH2BD(hp->h_sbd);
2032
2033 ASSERT(sbp != NULL);
2034 PR_ALL("%s for board %d", f, sbp->sb_num);
2035
2036
2037 hdp = sbd_get_sbdp_handle(sbp, hp);
2038
2039 if ((rv = sbdp_connect_board(hdp)) != 0) {
2040 sbderror_t *ep = SBD_HD2ERR(hp);
2041
2042 SBD_GET_PERR(hdp->h_err, ep);
2043 }
2044
2045 /*
2046 * We need to force a recache after the connect. The cached
2047 * info may be incorrect
2048 */
2049 mutex_enter(&sbp->sb_flags_mutex);
2050 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2051 mutex_exit(&sbp->sb_flags_mutex);
2052
2053 SBD_INJECT_ERR(SBD_PROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2054 ESGT_PROBE, NULL);
2055
2056 sbd_release_sbdp_handle(hdp);
2057
2058 return (rv);
2059 }
2060
2061 static int
sbd_deprobe_board(sbd_handle_t * hp)2062 sbd_deprobe_board(sbd_handle_t *hp)
2063 {
2064 int rv;
2065 sbdp_handle_t *hdp;
2066 sbd_board_t *sbp;
2067 static fn_t f = "sbd_deprobe_board";
2068
2069 PR_ALL("%s...\n", f);
2070
2071 sbp = SBDH2BD(hp->h_sbd);
2072
2073 hdp = sbd_get_sbdp_handle(sbp, hp);
2074
2075 if ((rv = sbdp_disconnect_board(hdp)) != 0) {
2076 sbderror_t *ep = SBD_HD2ERR(hp);
2077
2078 SBD_GET_PERR(hdp->h_err, ep);
2079 }
2080
2081 mutex_enter(&sbp->sb_flags_mutex);
2082 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2083 mutex_exit(&sbp->sb_flags_mutex);
2084
2085 SBD_INJECT_ERR(SBD_DEPROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2086 ESGT_DEPROBE, NULL);
2087
2088 sbd_release_sbdp_handle(hdp);
2089 return (rv);
2090 }
2091
2092 /*
2093 * Check if a CPU node is part of a CMP.
2094 */
2095 int
sbd_is_cmp_child(dev_info_t * dip)2096 sbd_is_cmp_child(dev_info_t *dip)
2097 {
2098 dev_info_t *pdip;
2099
2100 if (strcmp(ddi_node_name(dip), "cpu") != 0) {
2101 return (0);
2102 }
2103
2104 pdip = ddi_get_parent(dip);
2105
2106 ASSERT(pdip);
2107
2108 if (strcmp(ddi_node_name(pdip), "cmp") == 0) {
2109 return (1);
2110 }
2111
2112 return (0);
2113 }
2114
2115 /*
2116 * Returns the nodetype if dip is a top dip on the board of
2117 * interest or SBD_COMP_UNKNOWN otherwise
2118 */
2119 static sbd_comp_type_t
get_node_type(sbd_board_t * sbp,dev_info_t * dip,int * unitp)2120 get_node_type(sbd_board_t *sbp, dev_info_t *dip, int *unitp)
2121 {
2122 int idx, unit;
2123 sbd_handle_t *hp;
2124 sbdp_handle_t *hdp;
2125 char otype[OBP_MAXDRVNAME];
2126 int otypelen;
2127
2128 ASSERT(sbp);
2129
2130 if (unitp)
2131 *unitp = -1;
2132
2133 hp = MACHBD2HD(sbp);
2134
2135 hdp = sbd_get_sbdp_handle(sbp, hp);
2136 if (sbdp_get_board_num(hdp, dip) != sbp->sb_num) {
2137 sbd_release_sbdp_handle(hdp);
2138 return (SBD_COMP_UNKNOWN);
2139 }
2140
2141 /*
2142 * sbdp_get_unit_num will return (-1) for cmp as there
2143 * is no "device_type" property associated with cmp.
2144 * Therefore we will just skip getting unit number for
2145 * cmp. Callers of this function need to check the
2146 * value set in unitp before using it to dereference
2147 * an array.
2148 */
2149 if (strcmp(ddi_node_name(dip), "cmp") == 0) {
2150 sbd_release_sbdp_handle(hdp);
2151 return (SBD_COMP_CMP);
2152 }
2153
2154 otypelen = sizeof (otype);
2155 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2156 OBP_DEVICETYPE, (caddr_t)otype, &otypelen)) {
2157 sbd_release_sbdp_handle(hdp);
2158 return (SBD_COMP_UNKNOWN);
2159 }
2160
2161 idx = sbd_otype_to_idx(otype);
2162
2163 if (SBD_COMP(idx) == SBD_COMP_UNKNOWN) {
2164 sbd_release_sbdp_handle(hdp);
2165 return (SBD_COMP_UNKNOWN);
2166 }
2167
2168 unit = sbdp_get_unit_num(hdp, dip);
2169 if (unit == -1) {
2170 cmn_err(CE_WARN,
2171 "get_node_type: %s unit fail %p", otype, (void *)dip);
2172 sbd_release_sbdp_handle(hdp);
2173 return (SBD_COMP_UNKNOWN);
2174 }
2175
2176 sbd_release_sbdp_handle(hdp);
2177
2178 if (unitp)
2179 *unitp = unit;
2180
2181 return (SBD_COMP(idx));
2182 }
2183
2184 typedef struct {
2185 sbd_board_t *sbp;
2186 int nmc;
2187 int hold;
2188 } walk_tree_t;
2189
2190 static int
sbd_setup_devlists(dev_info_t * dip,void * arg)2191 sbd_setup_devlists(dev_info_t *dip, void *arg)
2192 {
2193 walk_tree_t *wp;
2194 dev_info_t **devlist = NULL;
2195 char *pathname = NULL;
2196 sbd_mem_unit_t *mp;
2197 static fn_t f = "sbd_setup_devlists";
2198 sbd_board_t *sbp;
2199 int unit;
2200 sbd_comp_type_t nodetype;
2201
2202 ASSERT(dip);
2203
2204 wp = (walk_tree_t *)arg;
2205
2206 if (wp == NULL) {
2207 PR_ALL("%s:bad arg\n", f);
2208 return (DDI_WALK_TERMINATE);
2209 }
2210
2211 sbp = wp->sbp;
2212
2213 nodetype = get_node_type(sbp, dip, &unit);
2214
2215 switch (nodetype) {
2216
2217 case SBD_COMP_CPU:
2218 pathname = sbp->sb_cpupath[unit];
2219 break;
2220
2221 case SBD_COMP_MEM:
2222 pathname = sbp->sb_mempath[unit];
2223 break;
2224
2225 case SBD_COMP_IO:
2226 pathname = sbp->sb_iopath[unit];
2227 break;
2228
2229 case SBD_COMP_CMP:
2230 case SBD_COMP_UNKNOWN:
2231 /*
2232 * This dip is not of interest to us
2233 */
2234 return (DDI_WALK_CONTINUE);
2235
2236 default:
2237 ASSERT(0);
2238 return (DDI_WALK_CONTINUE);
2239 }
2240
2241 /*
2242 * dip's parent is being held busy by ddi_walk_devs(),
2243 * so dip doesn't have to be held while calling ddi_pathname()
2244 */
2245 if (pathname) {
2246 (void) ddi_pathname(dip, pathname);
2247 }
2248
2249 devlist = sbp->sb_devlist[NIX(nodetype)];
2250
2251 /*
2252 * The branch rooted at dip should already be held,
2253 * unless we are dealing with a core of a CMP.
2254 */
2255 ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
2256 devlist[unit] = dip;
2257
2258 /*
2259 * This test is required if multiple devices are considered
2260 * as one. This is the case for memory-controller nodes.
2261 */
2262 if (!SBD_DEV_IS_PRESENT(sbp, nodetype, unit)) {
2263 sbp->sb_ndev++;
2264 SBD_DEV_SET_PRESENT(sbp, nodetype, unit);
2265 }
2266
2267 if (nodetype == SBD_COMP_MEM) {
2268 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
2269 ASSERT(wp->nmc < SBD_NUM_MC_PER_BOARD);
2270 mp->sbm_dip[wp->nmc++] = dip;
2271 }
2272
2273 return (DDI_WALK_CONTINUE);
2274 }
2275
2276 /*
2277 * This routine is used to construct the memory devlist.
2278 * In Starcat and Serengeti platforms, a system board can contain up to
2279 * four memory controllers (MC). The MCs have been programmed by POST for
2280 * optimum memory interleaving amongst their peers on the same board.
2281 * This DR driver does not support deinterleaving. Therefore, the smallest
2282 * unit of memory that can be manipulated by this driver is all of the
2283 * memory on a board. Because of this restriction, a board's memory devlist
2284 * is populated with only one of the four (possible) MC dnodes on that board.
2285 * Care must be taken to ensure that the selected MC dnode represents the
2286 * lowest physical address to which memory on the board will respond to.
2287 * This is required in order to preserve the semantics of
2288 * sbdp_get_base_physaddr() when applied to a MC dnode stored in the
2289 * memory devlist.
2290 */
2291 static void
sbd_init_mem_devlists(sbd_board_t * sbp)2292 sbd_init_mem_devlists(sbd_board_t *sbp)
2293 {
2294 dev_info_t **devlist;
2295 sbd_mem_unit_t *mp;
2296 dev_info_t *mc_dip;
2297 sbdp_handle_t *hdp;
2298 uint64_t mc_pa, lowest_pa;
2299 int i;
2300 sbd_handle_t *hp = MACHBD2HD(sbp);
2301
2302 devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
2303
2304 mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2305
2306 mc_dip = mp->sbm_dip[0];
2307 if (mc_dip == NULL)
2308 return; /* No MC dips found for this board */
2309
2310 hdp = sbd_get_sbdp_handle(sbp, hp);
2311
2312 if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2313 /* TODO: log complaint about dnode */
2314
2315 pretend_no_mem:
2316 /*
2317 * We are here because sbdphw_get_base_physaddr() failed.
2318 * Although it is very unlikely to happen, it did. Lucky us.
2319 * Since we can no longer examine _all_ of the MCs on this
2320 * board to determine which one is programmed to the lowest
2321 * physical address, we cannot involve any of the MCs on
2322 * this board in DR operations. To ensure this, we pretend
2323 * that this board does not contain any memory.
2324 *
2325 * Paranoia: clear the dev_present mask.
2326 */
2327 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, 0)) {
2328 ASSERT(sbp->sb_ndev != 0);
2329 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, 0);
2330 sbp->sb_ndev--;
2331 }
2332
2333 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2334 mp->sbm_dip[i] = NULL;
2335 }
2336
2337 sbd_release_sbdp_handle(hdp);
2338 return;
2339 }
2340
2341 /* assume this one will win. */
2342 devlist[0] = mc_dip;
2343 mp->sbm_cm.sbdev_dip = mc_dip;
2344 lowest_pa = mc_pa;
2345
2346 /*
2347 * We know the base physical address of one of the MC devices. Now
2348 * we will enumerate through all of the remaining MC devices on
2349 * the board to find which of them is programmed to the lowest
2350 * physical address.
2351 */
2352 for (i = 1; i < SBD_NUM_MC_PER_BOARD; i++) {
2353 mc_dip = mp->sbm_dip[i];
2354 if (mc_dip == NULL) {
2355 break;
2356 }
2357
2358 if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2359 cmn_err(CE_NOTE, "No mem on board %d unit %d",
2360 sbp->sb_num, i);
2361 break;
2362 }
2363 if (mc_pa < lowest_pa) {
2364 mp->sbm_cm.sbdev_dip = mc_dip;
2365 devlist[0] = mc_dip;
2366 lowest_pa = mc_pa;
2367 }
2368 }
2369
2370 sbd_release_sbdp_handle(hdp);
2371 }
2372
2373 static int
sbd_name_to_idx(char * name)2374 sbd_name_to_idx(char *name)
2375 {
2376 int idx;
2377
2378 for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2379 if (strcmp(name, SBD_DEVNAME(idx)) == 0) {
2380 break;
2381 }
2382 }
2383
2384 return (idx);
2385 }
2386
2387 static int
sbd_otype_to_idx(char * otype)2388 sbd_otype_to_idx(char *otype)
2389 {
2390 int idx;
2391
2392 for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2393
2394 if (strcmp(otype, SBD_OTYPE(idx)) == 0) {
2395 break;
2396 }
2397 }
2398
2399 return (idx);
2400 }
2401
2402 static int
sbd_init_devlists(sbd_board_t * sbp)2403 sbd_init_devlists(sbd_board_t *sbp)
2404 {
2405 int i;
2406 sbd_dev_unit_t *dp;
2407 sbd_mem_unit_t *mp;
2408 walk_tree_t *wp, walk = {0};
2409 dev_info_t *pdip;
2410 static fn_t f = "sbd_init_devlists";
2411
2412 PR_ALL("%s (board = %d)...\n", f, sbp->sb_num);
2413
2414 wp = &walk;
2415
2416 SBD_DEVS_DISCONNECT(sbp, (uint_t)-1);
2417
2418 /*
2419 * Clear out old entries, if any.
2420 */
2421
2422 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2423 sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
2424 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_MEMUNIT(sbp, i);
2425 dp->u_common.sbdev_sbp = sbp;
2426 dp->u_common.sbdev_unum = i;
2427 dp->u_common.sbdev_type = SBD_COMP_MEM;
2428 }
2429
2430 mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2431 ASSERT(mp != NULL);
2432 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2433 mp->sbm_dip[i] = NULL;
2434 }
2435
2436 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2437 sbp->sb_devlist[NIX(SBD_COMP_CPU)][i] = NULL;
2438 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_CPUUNIT(sbp, i);
2439 dp->u_common.sbdev_sbp = sbp;
2440 dp->u_common.sbdev_unum = i;
2441 dp->u_common.sbdev_type = SBD_COMP_CPU;
2442 }
2443 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2444 sbp->sb_devlist[NIX(SBD_COMP_IO)][i] = NULL;
2445 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_IOUNIT(sbp, i);
2446 dp->u_common.sbdev_sbp = sbp;
2447 dp->u_common.sbdev_unum = i;
2448 dp->u_common.sbdev_type = SBD_COMP_IO;
2449 }
2450
2451 wp->sbp = sbp;
2452 wp->nmc = 0;
2453 sbp->sb_ndev = 0;
2454
2455 /*
2456 * ddi_walk_devs() requires that topdip's parent be held.
2457 */
2458 pdip = ddi_get_parent(sbp->sb_topdip);
2459 if (pdip) {
2460 ndi_hold_devi(pdip);
2461 ndi_devi_enter(pdip, &i);
2462 }
2463 ddi_walk_devs(sbp->sb_topdip, sbd_setup_devlists, (void *) wp);
2464 if (pdip) {
2465 ndi_devi_exit(pdip, i);
2466 ndi_rele_devi(pdip);
2467 }
2468
2469 /*
2470 * There is no point checking all the components if there
2471 * are no devices.
2472 */
2473 if (sbp->sb_ndev == 0) {
2474 sbp->sb_memaccess_ok = 0;
2475 return (sbp->sb_ndev);
2476 }
2477
2478 /*
2479 * Initialize cpu sections before calling sbd_init_mem_devlists
2480 * which will access the mmus.
2481 */
2482 sbp->sb_memaccess_ok = 1;
2483 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2484 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i)) {
2485 sbd_init_cpu_unit(sbp, i);
2486 if (sbd_connect_cpu(sbp, i)) {
2487 SBD_SET_ERR(HD2MACHERR(MACHBD2HD(sbp)),
2488 ESBD_CPUSTART);
2489 }
2490
2491 }
2492 }
2493
2494 if (sbp->sb_memaccess_ok) {
2495 sbd_init_mem_devlists(sbp);
2496 } else {
2497 cmn_err(CE_WARN, "unable to access memory on board %d",
2498 sbp->sb_num);
2499 }
2500
2501 return (sbp->sb_ndev);
2502 }
2503
2504 static void
sbd_init_cpu_unit(sbd_board_t * sbp,int unit)2505 sbd_init_cpu_unit(sbd_board_t *sbp, int unit)
2506 {
2507 sbd_istate_t new_state;
2508 sbd_cpu_unit_t *cp;
2509 int cpuid;
2510 dev_info_t *dip;
2511 sbdp_handle_t *hdp;
2512 sbd_handle_t *hp = MACHBD2HD(sbp);
2513 extern kmutex_t cpu_lock;
2514
2515 if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
2516 new_state = SBD_STATE_CONFIGURED;
2517 } else if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
2518 new_state = SBD_STATE_CONNECTED;
2519 } else {
2520 new_state = SBD_STATE_EMPTY;
2521 }
2522
2523 dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
2524
2525 cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
2526
2527 hdp = sbd_get_sbdp_handle(sbp, hp);
2528
2529 cpuid = sbdp_get_cpuid(hdp, dip);
2530
2531 cp->sbc_cpu_id = cpuid;
2532
2533 if (&sbdp_cpu_get_impl)
2534 cp->sbc_cpu_impl = sbdp_cpu_get_impl(hdp, dip);
2535 else
2536 cp->sbc_cpu_impl = -1;
2537
2538 mutex_enter(&cpu_lock);
2539 if ((cpuid >= 0) && cpu[cpuid])
2540 cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
2541 else
2542 cp->sbc_cpu_flags = CPU_OFFLINE | CPU_POWEROFF;
2543 mutex_exit(&cpu_lock);
2544
2545 sbd_cpu_set_prop(cp, dip);
2546
2547 cp->sbc_cm.sbdev_cond = sbd_get_comp_cond(dip);
2548 sbd_release_sbdp_handle(hdp);
2549
2550 /*
2551 * Any changes to the cpu should be performed above
2552 * this call to ensure the cpu is fully initialized
2553 * before transitioning to the new state.
2554 */
2555 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, unit, new_state);
2556 }
2557
2558 /*
2559 * Only do work if called to operate on an entire board
2560 * which doesn't already have components present.
2561 */
2562 static void
sbd_connect(sbd_handle_t * hp)2563 sbd_connect(sbd_handle_t *hp)
2564 {
2565 sbd_board_t *sbp;
2566 sbderror_t *ep;
2567 static fn_t f = "sbd_connect";
2568
2569 sbp = SBDH2BD(hp->h_sbd);
2570
2571 PR_ALL("%s board %d\n", f, sbp->sb_num);
2572
2573 ep = HD2MACHERR(hp);
2574
2575 if (SBD_DEVS_PRESENT(sbp)) {
2576 /*
2577 * Board already has devices present.
2578 */
2579 PR_ALL("%s: devices already present (0x%x)\n",
2580 f, SBD_DEVS_PRESENT(sbp));
2581 SBD_SET_ERRNO(ep, EINVAL);
2582 return;
2583 }
2584
2585 if (sbd_init_devlists(sbp) == 0) {
2586 cmn_err(CE_WARN, "%s: no devices present on board %d",
2587 f, sbp->sb_num);
2588 SBD_SET_ERR(ep, ESBD_NODEV);
2589 return;
2590 } else {
2591 int i;
2592
2593 /*
2594 * Initialize mem-unit section of board structure.
2595 */
2596 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2597 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
2598 sbd_init_mem_unit(sbp, i, SBD_HD2ERR(hp));
2599
2600 /*
2601 * Initialize sb_io sections.
2602 */
2603 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2604 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
2605 sbd_init_io_unit(sbp, i);
2606
2607 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
2608 sbp->sb_rstate = SBD_STAT_CONNECTED;
2609 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2610 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
2611 SBD_INJECT_ERR(SBD_CONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2612 ESBD_INTERNAL, NULL);
2613 }
2614 }
2615
2616 static int
sbd_disconnect(sbd_handle_t * hp)2617 sbd_disconnect(sbd_handle_t *hp)
2618 {
2619 int i;
2620 sbd_devset_t devset;
2621 sbd_board_t *sbp;
2622 static fn_t f = "sbd_disconnect it";
2623
2624 PR_ALL("%s ...\n", f);
2625
2626 sbp = SBDH2BD(hp->h_sbd);
2627
2628 /*
2629 * Only devices which are present, but
2630 * unattached can be disconnected.
2631 */
2632 devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_PRESENT(sbp) &
2633 SBD_DEVS_UNATTACHED(sbp);
2634
2635 ASSERT((SBD_DEVS_ATTACHED(sbp) & devset) == 0);
2636
2637 /*
2638 * Update per-device state transitions.
2639 */
2640
2641 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2642 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
2643 if (sbd_disconnect_mem(hp, i) == 0) {
2644 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
2645 SBD_STATE_EMPTY);
2646 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
2647 }
2648 }
2649
2650 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
2651 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, i)) {
2652 if (sbd_disconnect_cpu(hp, i) == 0) {
2653 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
2654 SBD_STATE_EMPTY);
2655 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_CPU, i);
2656 }
2657 }
2658
2659 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2660 if (DEVSET_IN_SET(devset, SBD_COMP_IO, i)) {
2661 if (sbd_disconnect_io(hp, i) == 0) {
2662 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
2663 SBD_STATE_EMPTY);
2664 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_IO, i);
2665 }
2666 }
2667
2668 /*
2669 * Once all the components on a board have been disconnect
2670 * the board's state can transition to disconnected and
2671 * we can allow the deprobe to take place.
2672 */
2673 if (SBD_DEVS_PRESENT(sbp) == 0) {
2674 SBD_BOARD_TRANSITION(sbp, SBD_STATE_OCCUPIED);
2675 sbp->sb_rstate = SBD_STAT_DISCONNECTED;
2676 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2677 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
2678 SBD_INJECT_ERR(SBD_DISCONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2679 ESBD_INTERNAL, NULL);
2680 return (0);
2681 } else {
2682 cmn_err(CE_WARN, "%s: could not disconnect devices on board %d",
2683 f, sbp->sb_num);
2684 return (-1);
2685 }
2686 }
2687
2688 static void
sbd_test_board(sbd_handle_t * hp)2689 sbd_test_board(sbd_handle_t *hp)
2690 {
2691 sbd_board_t *sbp;
2692 sbdp_handle_t *hdp;
2693
2694 sbp = SBDH2BD(hp->h_sbd);
2695
2696 PR_ALL("sbd_test_board: board %d\n", sbp->sb_num);
2697
2698
2699 hdp = sbd_get_sbdp_handle(sbp, hp);
2700
2701 if (sbdp_test_board(hdp, &hp->h_opts) != 0) {
2702 sbderror_t *ep = SBD_HD2ERR(hp);
2703
2704 SBD_GET_PERR(hdp->h_err, ep);
2705 }
2706
2707 SBD_INJECT_ERR(SBD_TEST_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2708 ESBD_INTERNAL, NULL);
2709
2710 sbd_release_sbdp_handle(hdp);
2711 }
2712
2713 static void
sbd_assign_board(sbd_handle_t * hp)2714 sbd_assign_board(sbd_handle_t *hp)
2715 {
2716 sbd_board_t *sbp;
2717 sbdp_handle_t *hdp;
2718
2719 sbp = SBDH2BD(hp->h_sbd);
2720
2721 PR_ALL("sbd_assign_board: board %d\n", sbp->sb_num);
2722
2723 hdp = sbd_get_sbdp_handle(sbp, hp);
2724
2725 if (sbdp_assign_board(hdp) != 0) {
2726 sbderror_t *ep = SBD_HD2ERR(hp);
2727
2728 SBD_GET_PERR(hdp->h_err, ep);
2729 }
2730
2731 SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2732 ESBD_INTERNAL, NULL);
2733
2734 sbd_release_sbdp_handle(hdp);
2735 }
2736
2737 static void
sbd_unassign_board(sbd_handle_t * hp)2738 sbd_unassign_board(sbd_handle_t *hp)
2739 {
2740 sbd_board_t *sbp;
2741 sbdp_handle_t *hdp;
2742
2743 sbp = SBDH2BD(hp->h_sbd);
2744
2745 PR_ALL("sbd_unassign_board: board %d\n", sbp->sb_num);
2746
2747 hdp = sbd_get_sbdp_handle(sbp, hp);
2748
2749 if (sbdp_unassign_board(hdp) != 0) {
2750 sbderror_t *ep = SBD_HD2ERR(hp);
2751
2752 SBD_GET_PERR(hdp->h_err, ep);
2753 }
2754
2755 SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2756 ESBD_INTERNAL, NULL);
2757
2758 sbd_release_sbdp_handle(hdp);
2759 }
2760
2761 static void
sbd_poweron_board(sbd_handle_t * hp)2762 sbd_poweron_board(sbd_handle_t *hp)
2763 {
2764 sbd_board_t *sbp;
2765 sbdp_handle_t *hdp;
2766
2767 sbp = SBDH2BD(hp->h_sbd);
2768
2769 PR_ALL("sbd_poweron_board: %d\n", sbp->sb_num);
2770
2771 hdp = sbd_get_sbdp_handle(sbp, hp);
2772
2773 if (sbdp_poweron_board(hdp) != 0) {
2774 sbderror_t *ep = SBD_HD2ERR(hp);
2775
2776 SBD_GET_PERR(hdp->h_err, ep);
2777 }
2778
2779 SBD_INJECT_ERR(SBD_POWERON_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2780 ESBD_INTERNAL, NULL);
2781
2782 sbd_release_sbdp_handle(hdp);
2783 }
2784
2785 static void
sbd_poweroff_board(sbd_handle_t * hp)2786 sbd_poweroff_board(sbd_handle_t *hp)
2787 {
2788 sbd_board_t *sbp;
2789 sbdp_handle_t *hdp;
2790
2791 sbp = SBDH2BD(hp->h_sbd);
2792
2793 PR_ALL("sbd_poweroff_board: %d\n", sbp->sb_num);
2794
2795 hdp = sbd_get_sbdp_handle(sbp, hp);
2796
2797 if (sbdp_poweroff_board(hdp) != 0) {
2798 sbderror_t *ep = SBD_HD2ERR(hp);
2799
2800 SBD_GET_PERR(hdp->h_err, ep);
2801 }
2802
2803 SBD_INJECT_ERR(SBD_POWEROFF_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2804 ESBD_INTERNAL, NULL);
2805
2806 sbd_release_sbdp_handle(hdp);
2807 }
2808
2809
2810 /*
2811 * Return a list of the dip's of devices that are
2812 * either present and attached, or present only but
2813 * not yet attached for the given board.
2814 */
2815 sbd_devlist_t *
sbd_get_devlist(sbd_handle_t * hp,sbd_board_t * sbp,sbd_comp_type_t nodetype,int max_units,uint_t uset,int * count,int present_only)2816 sbd_get_devlist(sbd_handle_t *hp, sbd_board_t *sbp, sbd_comp_type_t nodetype,
2817 int max_units, uint_t uset, int *count, int present_only)
2818 {
2819 int i, ix;
2820 sbd_devlist_t *ret_devlist;
2821 dev_info_t **devlist;
2822 sbdp_handle_t *hdp;
2823
2824 *count = 0;
2825 ret_devlist = GETSTRUCT(sbd_devlist_t, max_units);
2826 devlist = sbp->sb_devlist[NIX(nodetype)];
2827 /*
2828 * Turn into binary value since we're going
2829 * to be using XOR for a comparison.
2830 * if (present_only) then
2831 * dev must be PRESENT, but NOT ATTACHED.
2832 * else
2833 * dev must be PRESENT AND ATTACHED.
2834 * endif
2835 */
2836 if (present_only)
2837 present_only = 1;
2838
2839 hdp = sbd_get_sbdp_handle(sbp, hp);
2840
2841 for (i = ix = 0; (i < max_units) && uset; i++) {
2842 int ut, is_present, is_attached;
2843 dev_info_t *dip;
2844 sbderror_t *ep = SBD_HD2ERR(hp);
2845 int nunits, distance, j;
2846
2847 /*
2848 * For CMPs, we would like to perform DR operation on
2849 * all the cores before moving onto the next chip.
2850 * Therefore, when constructing the devlist, we process
2851 * all the cores together.
2852 */
2853 if (nodetype == SBD_COMP_CPU) {
2854 /*
2855 * Number of units to process in the inner loop
2856 */
2857 nunits = MAX_CORES_PER_CMP;
2858 /*
2859 * The distance between the units in the
2860 * board's sb_devlist structure.
2861 */
2862 distance = MAX_CMP_UNITS_PER_BOARD;
2863 } else {
2864 nunits = 1;
2865 distance = 0;
2866 }
2867
2868 for (j = 0; j < nunits; j++) {
2869 if ((dip = devlist[i + j * distance]) == NULL)
2870 continue;
2871
2872 ut = sbdp_get_unit_num(hdp, dip);
2873
2874 if (ut == -1) {
2875 SBD_GET_PERR(hdp->h_err, ep);
2876 PR_ALL("sbd_get_devlist bad unit %d"
2877 " code %d errno %d",
2878 i, ep->e_code, ep->e_errno);
2879 }
2880
2881 if ((uset & (1 << ut)) == 0)
2882 continue;
2883 uset &= ~(1 << ut);
2884 is_present = SBD_DEV_IS_PRESENT(sbp, nodetype, ut) ?
2885 1 : 0;
2886 is_attached = SBD_DEV_IS_ATTACHED(sbp, nodetype, ut) ?
2887 1 : 0;
2888
2889 if (is_present && (present_only ^ is_attached)) {
2890 ret_devlist[ix].dv_dip = dip;
2891 sbd_init_err(&ret_devlist[ix].dv_error);
2892 ix++;
2893 }
2894 }
2895 }
2896 sbd_release_sbdp_handle(hdp);
2897
2898 if ((*count = ix) == 0) {
2899 FREESTRUCT(ret_devlist, sbd_devlist_t, max_units);
2900 ret_devlist = NULL;
2901 }
2902
2903 return (ret_devlist);
2904 }
2905
2906 static sbd_devlist_t *
sbd_get_attach_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)2907 sbd_get_attach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
2908 {
2909 sbd_board_t *sbp;
2910 uint_t uset;
2911 sbd_devset_t devset;
2912 sbd_devlist_t *attach_devlist;
2913 static int next_pass = 1;
2914 static fn_t f = "sbd_get_attach_devlist";
2915
2916 PR_ALL("%s (pass = %d)...\n", f, pass);
2917
2918 sbp = SBDH2BD(hp->h_sbd);
2919 devset = HD2MACHHD(hp)->sh_devset;
2920
2921 *devnump = 0;
2922 attach_devlist = NULL;
2923
2924 /*
2925 * We switch on next_pass for the cases where a board
2926 * does not contain a particular type of component.
2927 * In these situations we don't want to return NULL
2928 * prematurely. We need to check other devices and
2929 * we don't want to check the same type multiple times.
2930 * For example, if there were no cpus, then on pass 1
2931 * we would drop through and return the memory nodes.
2932 * However, on pass 2 we would switch back to the memory
2933 * nodes thereby returning them twice! Using next_pass
2934 * forces us down to the end (or next item).
2935 */
2936 if (pass == 1)
2937 next_pass = 1;
2938
2939 switch (next_pass) {
2940 case 1:
2941 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2942 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
2943
2944 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_CPU,
2945 MAX_CPU_UNITS_PER_BOARD,
2946 uset, devnump, 1);
2947
2948 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
2949 if (!devset || attach_devlist) {
2950 next_pass = 2;
2951 return (attach_devlist);
2952 }
2953 /*
2954 * If the caller is interested in the entire
2955 * board, but there aren't any cpus, then just
2956 * fall through to check for the next component.
2957 */
2958 }
2959 /*FALLTHROUGH*/
2960
2961 case 2:
2962 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2963 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
2964
2965 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_MEM,
2966 MAX_MEM_UNITS_PER_BOARD,
2967 uset, devnump, 1);
2968
2969 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
2970 if (!devset || attach_devlist) {
2971 next_pass = 3;
2972 return (attach_devlist);
2973 }
2974 /*
2975 * If the caller is interested in the entire
2976 * board, but there isn't any memory, then
2977 * just fall through to next component.
2978 */
2979 }
2980 /*FALLTHROUGH*/
2981
2982
2983 case 3:
2984 next_pass = -1;
2985 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2986 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
2987
2988 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_IO,
2989 MAX_IO_UNITS_PER_BOARD,
2990 uset, devnump, 1);
2991
2992 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
2993 if (!devset || attach_devlist) {
2994 next_pass = 4;
2995 return (attach_devlist);
2996 }
2997 }
2998 /*FALLTHROUGH*/
2999
3000 default:
3001 *devnump = 0;
3002 return (NULL);
3003 }
3004 /*NOTREACHED*/
3005 }
3006
3007 static int
sbd_pre_attach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3008 sbd_pre_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3009 int32_t devnum)
3010 {
3011 int max_units = 0, rv = 0;
3012 sbd_comp_type_t nodetype;
3013 static fn_t f = "sbd_pre_attach_devlist";
3014
3015 /*
3016 * In this driver, all entries in a devlist[] are
3017 * of the same nodetype.
3018 */
3019 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3020
3021 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3022 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3023
3024 switch (nodetype) {
3025
3026 case SBD_COMP_MEM:
3027 max_units = MAX_MEM_UNITS_PER_BOARD;
3028 rv = sbd_pre_attach_mem(hp, devlist, devnum);
3029 break;
3030
3031 case SBD_COMP_CPU:
3032 max_units = MAX_CPU_UNITS_PER_BOARD;
3033 rv = sbd_pre_attach_cpu(hp, devlist, devnum);
3034 break;
3035
3036 case SBD_COMP_IO:
3037 max_units = MAX_IO_UNITS_PER_BOARD;
3038 break;
3039
3040 default:
3041 rv = -1;
3042 break;
3043 }
3044
3045 if (rv && max_units) {
3046 int i;
3047 /*
3048 * Need to clean up devlist
3049 * if pre-op is going to fail.
3050 */
3051 for (i = 0; i < max_units; i++) {
3052 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3053 SBD_FREE_ERR(&devlist[i].dv_error);
3054 } else {
3055 break;
3056 }
3057 }
3058 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3059 }
3060
3061 /*
3062 * If an error occurred, return "continue"
3063 * indication so that we can continue attaching
3064 * as much as possible.
3065 */
3066 return (rv ? -1 : 0);
3067 }
3068
3069 static int
sbd_post_attach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3070 sbd_post_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3071 int32_t devnum)
3072 {
3073 int i, max_units = 0, rv = 0;
3074 sbd_devset_t devs_unattached, devs_present;
3075 sbd_comp_type_t nodetype;
3076 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3077 sbdp_handle_t *hdp;
3078 static fn_t f = "sbd_post_attach_devlist";
3079
3080 sbp = SBDH2BD(hp->h_sbd);
3081 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3082
3083 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3084 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3085
3086 hdp = sbd_get_sbdp_handle(sbp, hp);
3087
3088 /*
3089 * Need to free up devlist[] created earlier in
3090 * sbd_get_attach_devlist().
3091 */
3092 switch (nodetype) {
3093 case SBD_COMP_CPU:
3094 max_units = MAX_CPU_UNITS_PER_BOARD;
3095 rv = sbd_post_attach_cpu(hp, devlist, devnum);
3096 break;
3097
3098
3099 case SBD_COMP_MEM:
3100 max_units = MAX_MEM_UNITS_PER_BOARD;
3101
3102 rv = sbd_post_attach_mem(hp, devlist, devnum);
3103 break;
3104
3105 case SBD_COMP_IO:
3106 max_units = MAX_IO_UNITS_PER_BOARD;
3107 break;
3108
3109 default:
3110 rv = -1;
3111 break;
3112 }
3113
3114
3115 for (i = 0; i < devnum; i++) {
3116 int unit;
3117 dev_info_t *dip;
3118 sbderror_t *ep;
3119
3120 ep = &devlist[i].dv_error;
3121
3122 if (sbd_set_err_in_hdl(hp, ep) == 0)
3123 continue;
3124
3125 dip = devlist[i].dv_dip;
3126 nodetype = sbd_get_devtype(hp, dip);
3127 unit = sbdp_get_unit_num(hdp, dip);
3128
3129 if (unit == -1) {
3130 SBD_GET_PERR(hdp->h_err, ep);
3131 continue;
3132 }
3133
3134 unit = sbd_check_unit_attached(sbp, dip, unit, nodetype, ep);
3135
3136 if (unit == -1) {
3137 PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not attached\n",
3138 f, sbd_ct_str[(int)nodetype], sbp->sb_num, i);
3139 continue;
3140 }
3141
3142 SBD_DEV_SET_ATTACHED(sbp, nodetype, unit);
3143 SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3144 SBD_STATE_CONFIGURED);
3145 }
3146 sbd_release_sbdp_handle(hdp);
3147
3148 if (rv) {
3149 PR_ALL("%s: errno %d, ecode %d during attach\n",
3150 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3151 SBD_GET_ERR(HD2MACHERR(hp)));
3152 }
3153
3154 devs_present = SBD_DEVS_PRESENT(sbp);
3155 devs_unattached = SBD_DEVS_UNATTACHED(sbp);
3156
3157 switch (SBD_BOARD_STATE(sbp)) {
3158 case SBD_STATE_CONNECTED:
3159 case SBD_STATE_UNCONFIGURED:
3160 ASSERT(devs_present);
3161
3162 if (devs_unattached == 0) {
3163 /*
3164 * All devices finally attached.
3165 */
3166 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3167 sbp->sb_rstate = SBD_STAT_CONNECTED;
3168 sbp->sb_ostate = SBD_STAT_CONFIGURED;
3169 } else if (devs_present != devs_unattached) {
3170 /*
3171 * Only some devices are fully attached.
3172 */
3173 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3174 sbp->sb_rstate = SBD_STAT_CONNECTED;
3175 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
3176 }
3177 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3178 break;
3179
3180 case SBD_STATE_PARTIAL:
3181 ASSERT(devs_present);
3182 /*
3183 * All devices finally attached.
3184 */
3185 if (devs_unattached == 0) {
3186 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3187 sbp->sb_rstate = SBD_STAT_CONNECTED;
3188 sbp->sb_ostate = SBD_STAT_CONFIGURED;
3189 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3190 }
3191 break;
3192
3193 default:
3194 break;
3195 }
3196
3197 if (max_units && devlist) {
3198 int i;
3199
3200 for (i = 0; i < max_units; i++) {
3201 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3202 SBD_FREE_ERR(&devlist[i].dv_error);
3203 } else {
3204 break;
3205 }
3206 }
3207 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3208 }
3209
3210 /*
3211 * Our policy is to attach all components that are
3212 * possible, thus we always return "success" on the
3213 * pre and post operations.
3214 */
3215 return (0);
3216 }
3217
3218 /*
3219 * We only need to "release" cpu and memory devices.
3220 */
3221 static sbd_devlist_t *
sbd_get_release_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)3222 sbd_get_release_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3223 {
3224 sbd_board_t *sbp;
3225 uint_t uset;
3226 sbd_devset_t devset;
3227 sbd_devlist_t *release_devlist;
3228 static int next_pass = 1;
3229 static fn_t f = "sbd_get_release_devlist";
3230
3231 PR_ALL("%s (pass = %d)...\n", f, pass);
3232
3233 sbp = SBDH2BD(hp->h_sbd);
3234 devset = HD2MACHHD(hp)->sh_devset;
3235
3236 *devnump = 0;
3237 release_devlist = NULL;
3238
3239 /*
3240 * We switch on next_pass for the cases where a board
3241 * does not contain a particular type of component.
3242 * In these situations we don't want to return NULL
3243 * prematurely. We need to check other devices and
3244 * we don't want to check the same type multiple times.
3245 * For example, if there were no cpus, then on pass 1
3246 * we would drop through and return the memory nodes.
3247 * However, on pass 2 we would switch back to the memory
3248 * nodes thereby returning them twice! Using next_pass
3249 * forces us down to the end (or next item).
3250 */
3251 if (pass == 1)
3252 next_pass = 1;
3253
3254 switch (next_pass) {
3255 case 1:
3256 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3257 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3258
3259 release_devlist = sbd_get_devlist(hp, sbp,
3260 SBD_COMP_MEM,
3261 MAX_MEM_UNITS_PER_BOARD,
3262 uset, devnump, 0);
3263
3264 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3265 if (!devset || release_devlist) {
3266 next_pass = 2;
3267 return (release_devlist);
3268 }
3269 /*
3270 * If the caller is interested in the entire
3271 * board, but there isn't any memory, then
3272 * just fall through to next component.
3273 */
3274 }
3275 /*FALLTHROUGH*/
3276
3277
3278 case 2:
3279 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3280 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3281
3282 release_devlist = sbd_get_devlist(hp, sbp,
3283 SBD_COMP_CPU,
3284 MAX_CPU_UNITS_PER_BOARD,
3285 uset, devnump, 0);
3286
3287 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3288 if (!devset || release_devlist) {
3289 next_pass = 3;
3290 return (release_devlist);
3291 }
3292 /*
3293 * If the caller is interested in the entire
3294 * board, but there aren't any cpus, then just
3295 * fall through to check for the next component.
3296 */
3297 }
3298 /*FALLTHROUGH*/
3299
3300
3301 case 3:
3302 next_pass = -1;
3303 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3304 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3305
3306 release_devlist = sbd_get_devlist(hp, sbp,
3307 SBD_COMP_IO,
3308 MAX_IO_UNITS_PER_BOARD,
3309 uset, devnump, 0);
3310
3311 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3312 if (!devset || release_devlist) {
3313 next_pass = 4;
3314 return (release_devlist);
3315 }
3316 }
3317 /*FALLTHROUGH*/
3318
3319 default:
3320 *devnump = 0;
3321 return (NULL);
3322 }
3323 /*NOTREACHED*/
3324 }
3325
3326 static int
sbd_pre_release_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3327 sbd_pre_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3328 int32_t devnum)
3329 {
3330 int max_units = 0, rv = 0;
3331 sbd_comp_type_t nodetype;
3332 static fn_t f = "sbd_pre_release_devlist";
3333
3334 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3335
3336 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3337 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3338
3339 switch (nodetype) {
3340 case SBD_COMP_CPU: {
3341 int i, mem_present = 0;
3342 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3343 sbd_devset_t devset;
3344 sbd_priv_handle_t *shp = HD2MACHHD(hp);
3345
3346 max_units = MAX_CPU_UNITS_PER_BOARD;
3347
3348 devset = shp->sh_orig_devset;
3349
3350 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3351 /*
3352 * if client also requested to unconfigure memory
3353 * the we allow the operation. Therefore
3354 * we need to warranty that memory gets unconfig
3355 * before cpus
3356 */
3357
3358 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
3359 continue;
3360 }
3361 if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_MEM, i)) {
3362 mem_present = 1;
3363 break;
3364 }
3365 }
3366 if (mem_present) {
3367 sbderror_t *ep = SBD_HD2ERR(hp);
3368 SBD_SET_ERR(ep, ESBD_MEMONLINE);
3369 SBD_SET_ERRSTR(ep, sbp->sb_mempath[i]);
3370 rv = -1;
3371 } else {
3372 rv = sbd_pre_release_cpu(hp, devlist, devnum);
3373 }
3374
3375 break;
3376
3377 }
3378 case SBD_COMP_MEM:
3379 max_units = MAX_MEM_UNITS_PER_BOARD;
3380 rv = sbd_pre_release_mem(hp, devlist, devnum);
3381 break;
3382
3383
3384 case SBD_COMP_IO:
3385 max_units = MAX_IO_UNITS_PER_BOARD;
3386 rv = sbd_pre_release_io(hp, devlist, devnum);
3387 break;
3388
3389 default:
3390 rv = -1;
3391 break;
3392 }
3393
3394 if (rv && max_units) {
3395 int i;
3396
3397 /*
3398 * the individual pre_release component routines should
3399 * have set the error in the handle. No need to set it
3400 * here
3401 *
3402 * Need to clean up dynamically allocated devlist
3403 * if pre-op is going to fail.
3404 */
3405 for (i = 0; i < max_units; i++) {
3406 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3407 SBD_FREE_ERR(&devlist[i].dv_error);
3408 } else {
3409 break;
3410 }
3411 }
3412 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3413 }
3414
3415 return (rv ? -1 : 0);
3416 }
3417
3418 static int
sbd_post_release_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3419 sbd_post_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3420 int32_t devnum)
3421 {
3422 int i, max_units = 0;
3423 sbd_comp_type_t nodetype;
3424 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3425 sbdp_handle_t *hdp;
3426 sbd_error_t *spe;
3427 static fn_t f = "sbd_post_release_devlist";
3428
3429 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3430 ASSERT(nodetype >= SBD_COMP_CPU && nodetype <= SBD_COMP_IO);
3431
3432 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3433 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3434
3435 /*
3436 * Need to free up devlist[] created earlier in
3437 * sbd_get_release_devlist().
3438 */
3439 switch (nodetype) {
3440 case SBD_COMP_CPU:
3441 max_units = MAX_CPU_UNITS_PER_BOARD;
3442 break;
3443
3444 case SBD_COMP_MEM:
3445 max_units = MAX_MEM_UNITS_PER_BOARD;
3446 break;
3447
3448 case SBD_COMP_IO:
3449 /*
3450 * Need to check if specific I/O is referenced and
3451 * fail post-op.
3452 */
3453
3454 if (sbd_check_io_refs(hp, devlist, devnum) > 0) {
3455 PR_IO("%s: error - I/O devices ref'd\n", f);
3456 }
3457
3458 max_units = MAX_IO_UNITS_PER_BOARD;
3459 break;
3460
3461 default:
3462 {
3463 cmn_err(CE_WARN, "%s: invalid nodetype (%d)",
3464 f, (int)nodetype);
3465 SBD_SET_ERR(HD2MACHERR(hp), ESBD_INVAL);
3466 }
3467 break;
3468 }
3469 hdp = sbd_get_sbdp_handle(sbp, hp);
3470 spe = hdp->h_err;
3471
3472 for (i = 0; i < devnum; i++) {
3473 int unit;
3474 sbderror_t *ep;
3475
3476 ep = &devlist[i].dv_error;
3477
3478 if (sbd_set_err_in_hdl(hp, ep) == 0) {
3479 continue;
3480 }
3481
3482 unit = sbdp_get_unit_num(hdp, devlist[i].dv_dip);
3483 if (unit == -1) {
3484 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3485 PR_ALL("%s bad unit num: %d code %d",
3486 f, unit, spe->e_code);
3487 continue;
3488 }
3489 }
3490 sbd_release_sbdp_handle(hdp);
3491
3492 if (SBD_GET_ERRNO(SBD_HD2ERR(hp))) {
3493 PR_ALL("%s: errno %d, ecode %d during release\n",
3494 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3495 SBD_GET_ERR(SBD_HD2ERR(hp)));
3496 }
3497
3498 if (max_units && devlist) {
3499 int i;
3500
3501 for (i = 0; i < max_units; i++) {
3502 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3503 SBD_FREE_ERR(&devlist[i].dv_error);
3504 } else {
3505 break;
3506 }
3507 }
3508 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3509 }
3510
3511 return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3512 }
3513
3514 static void
sbd_release_dev_done(sbd_board_t * sbp,sbd_comp_type_t nodetype,int unit)3515 sbd_release_dev_done(sbd_board_t *sbp, sbd_comp_type_t nodetype, int unit)
3516 {
3517 SBD_DEV_SET_UNREFERENCED(sbp, nodetype, unit);
3518 SBD_DEVICE_TRANSITION(sbp, nodetype, unit, SBD_STATE_UNREFERENCED);
3519 }
3520
3521 static void
sbd_release_done(sbd_handle_t * hp,sbd_comp_type_t nodetype,dev_info_t * dip)3522 sbd_release_done(sbd_handle_t *hp, sbd_comp_type_t nodetype, dev_info_t *dip)
3523 {
3524 int unit;
3525 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3526 sbderror_t *ep;
3527 static fn_t f = "sbd_release_done";
3528 sbdp_handle_t *hdp;
3529
3530 PR_ALL("%s...\n", f);
3531
3532 hdp = sbd_get_sbdp_handle(sbp, hp);
3533 ep = SBD_HD2ERR(hp);
3534
3535 if ((unit = sbdp_get_unit_num(hdp, dip)) < 0) {
3536 cmn_err(CE_WARN,
3537 "sbd:%s: unable to get unit for dip (0x%p)",
3538 f, (void *)dip);
3539 SBD_GET_PERR(hdp->h_err, ep);
3540 sbd_release_sbdp_handle(hdp);
3541 return;
3542 }
3543 sbd_release_sbdp_handle(hdp);
3544
3545 /*
3546 * Transfer the device which just completed its release
3547 * to the UNREFERENCED state.
3548 */
3549 switch (nodetype) {
3550
3551 case SBD_COMP_MEM:
3552 sbd_release_mem_done((void *)hp, unit);
3553 break;
3554
3555 default:
3556 sbd_release_dev_done(sbp, nodetype, unit);
3557 break;
3558 }
3559
3560 /*
3561 * If the entire board was released and all components
3562 * unreferenced then transfer it to the UNREFERENCED state.
3563 */
3564 if (SBD_DEVS_RELEASED(sbp) == SBD_DEVS_UNREFERENCED(sbp)) {
3565 SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNREFERENCED);
3566 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3567 }
3568 }
3569
3570 static sbd_devlist_t *
sbd_get_detach_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)3571 sbd_get_detach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3572 {
3573 sbd_board_t *sbp;
3574 uint_t uset;
3575 sbd_devset_t devset;
3576 sbd_devlist_t *detach_devlist;
3577 static int next_pass = 1;
3578 static fn_t f = "sbd_get_detach_devlist";
3579
3580 PR_ALL("%s (pass = %d)...\n", f, pass);
3581
3582 sbp = SBDH2BD(hp->h_sbd);
3583 devset = HD2MACHHD(hp)->sh_devset;
3584
3585 *devnump = 0;
3586 detach_devlist = NULL;
3587
3588 /*
3589 * We switch on next_pass for the cases where a board
3590 * does not contain a particular type of component.
3591 * In these situations we don't want to return NULL
3592 * prematurely. We need to check other devices and
3593 * we don't want to check the same type multiple times.
3594 * For example, if there were no cpus, then on pass 1
3595 * we would drop through and return the memory nodes.
3596 * However, on pass 2 we would switch back to the memory
3597 * nodes thereby returning them twice! Using next_pass
3598 * forces us down to the end (or next item).
3599 */
3600 if (pass == 1)
3601 next_pass = 1;
3602
3603 switch (next_pass) {
3604 case 1:
3605 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3606 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3607
3608 detach_devlist = sbd_get_devlist(hp, sbp,
3609 SBD_COMP_MEM,
3610 MAX_MEM_UNITS_PER_BOARD,
3611 uset, devnump, 0);
3612
3613 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3614 if (!devset || detach_devlist) {
3615 next_pass = 2;
3616 return (detach_devlist);
3617 }
3618 /*
3619 * If the caller is interested in the entire
3620 * board, but there isn't any memory, then
3621 * just fall through to next component.
3622 */
3623 }
3624 /*FALLTHROUGH*/
3625
3626 case 2:
3627 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3628 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3629
3630 detach_devlist = sbd_get_devlist(hp, sbp,
3631 SBD_COMP_CPU,
3632 MAX_CPU_UNITS_PER_BOARD,
3633 uset, devnump, 0);
3634
3635 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3636 if (!devset || detach_devlist) {
3637 next_pass = 2;
3638 return (detach_devlist);
3639 }
3640 /*
3641 * If the caller is interested in the entire
3642 * board, but there aren't any cpus, then just
3643 * fall through to check for the next component.
3644 */
3645 }
3646 /*FALLTHROUGH*/
3647
3648 case 3:
3649 next_pass = -1;
3650 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3651 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3652
3653 detach_devlist = sbd_get_devlist(hp, sbp,
3654 SBD_COMP_IO,
3655 MAX_IO_UNITS_PER_BOARD,
3656 uset, devnump, 0);
3657
3658 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3659 if (!devset || detach_devlist) {
3660 next_pass = 4;
3661 return (detach_devlist);
3662 }
3663 }
3664 /*FALLTHROUGH*/
3665
3666 default:
3667 *devnump = 0;
3668 return (NULL);
3669 }
3670 /*NOTREACHED*/
3671 }
3672
3673 static int
sbd_pre_detach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3674 sbd_pre_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3675 int32_t devnum)
3676 {
3677 int rv = 0;
3678 sbd_comp_type_t nodetype;
3679 static fn_t f = "sbd_pre_detach_devlist";
3680
3681 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3682
3683 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3684 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3685
3686 switch (nodetype) {
3687 case SBD_COMP_CPU:
3688 rv = sbd_pre_detach_cpu(hp, devlist, devnum);
3689 break;
3690
3691 case SBD_COMP_MEM:
3692 rv = sbd_pre_detach_mem(hp, devlist, devnum);
3693 break;
3694
3695 case SBD_COMP_IO:
3696 rv = sbd_pre_detach_io(hp, devlist, devnum);
3697 break;
3698
3699 default:
3700 rv = -1;
3701 break;
3702 }
3703
3704 /*
3705 * We want to continue attempting to detach
3706 * other components.
3707 */
3708 return (rv);
3709 }
3710
3711 static int
sbd_post_detach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3712 sbd_post_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3713 int32_t devnum)
3714 {
3715 int i, max_units = 0, rv = 0;
3716 sbd_comp_type_t nodetype;
3717 sbd_board_t *sbp;
3718 sbd_istate_t bstate;
3719 static fn_t f = "sbd_post_detach_devlist";
3720 sbdp_handle_t *hdp;
3721
3722 sbp = SBDH2BD(hp->h_sbd);
3723 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3724
3725 hdp = sbd_get_sbdp_handle(sbp, hp);
3726
3727 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3728 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3729
3730 /*
3731 * Need to free up devlist[] created earlier in
3732 * sbd_get_detach_devlist().
3733 */
3734 switch (nodetype) {
3735 case SBD_COMP_CPU:
3736 max_units = MAX_CPU_UNITS_PER_BOARD;
3737 rv = sbd_post_detach_cpu(hp, devlist, devnum);
3738 break;
3739
3740 case SBD_COMP_MEM:
3741 max_units = MAX_MEM_UNITS_PER_BOARD;
3742 rv = sbd_post_detach_mem(hp, devlist, devnum);
3743 break;
3744
3745 case SBD_COMP_IO:
3746 max_units = MAX_IO_UNITS_PER_BOARD;
3747 rv = sbd_post_detach_io(hp, devlist, devnum);
3748 break;
3749
3750 default:
3751 rv = -1;
3752 break;
3753 }
3754
3755
3756 for (i = 0; i < devnum; i++) {
3757 int unit;
3758 sbderror_t *ep;
3759 dev_info_t *dip;
3760
3761 ep = &devlist[i].dv_error;
3762
3763 if (sbd_set_err_in_hdl(hp, ep) == 0)
3764 continue;
3765
3766 dip = devlist[i].dv_dip;
3767 unit = sbdp_get_unit_num(hdp, dip);
3768 if (unit == -1) {
3769 if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
3770 continue;
3771 else {
3772 SBD_GET_PERR(hdp->h_err, ep);
3773 break;
3774 }
3775 }
3776 nodetype = sbd_get_devtype(hp, dip);
3777
3778 if (sbd_check_unit_attached(sbp, dip, unit, nodetype,
3779 ep) >= 0) {
3780 /*
3781 * Device is still attached probably due
3782 * to an error. Need to keep track of it.
3783 */
3784 PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not detached\n",
3785 f, sbd_ct_str[(int)nodetype], sbp->sb_num,
3786 unit);
3787 continue;
3788 }
3789
3790 SBD_DEV_CLR_ATTACHED(sbp, nodetype, unit);
3791 SBD_DEV_CLR_RELEASED(sbp, nodetype, unit);
3792 SBD_DEV_CLR_UNREFERENCED(sbp, nodetype, unit);
3793 SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3794 SBD_STATE_UNCONFIGURED);
3795 }
3796 sbd_release_sbdp_handle(hdp);
3797
3798 bstate = SBD_BOARD_STATE(sbp);
3799 if (bstate != SBD_STATE_UNCONFIGURED) {
3800 if (SBD_DEVS_PRESENT(sbp) == SBD_DEVS_UNATTACHED(sbp)) {
3801 /*
3802 * All devices are finally detached.
3803 */
3804 SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNCONFIGURED);
3805 } else if ((SBD_BOARD_STATE(sbp) != SBD_STATE_PARTIAL) &&
3806 SBD_DEVS_ATTACHED(sbp)) {
3807 /*
3808 * Some devices remain attached.
3809 */
3810 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3811 }
3812 }
3813
3814 if (rv) {
3815 PR_ALL("%s: errno %d, ecode %d during detach\n",
3816 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3817 SBD_GET_ERR(HD2MACHERR(hp)));
3818 }
3819
3820 if (max_units && devlist) {
3821 int i;
3822
3823 for (i = 0; i < max_units; i++) {
3824 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3825 SBD_FREE_ERR(&devlist[i].dv_error);
3826 } else {
3827 break;
3828 }
3829 }
3830 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3831 }
3832
3833 return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3834 }
3835
3836 /*
3837 * Return the unit number of the respective dip if
3838 * it's found to be attached.
3839 */
3840 static int
sbd_check_unit_attached(sbd_board_t * sbp,dev_info_t * dip,int unit,sbd_comp_type_t nodetype,sbderror_t * ep)3841 sbd_check_unit_attached(sbd_board_t *sbp, dev_info_t *dip, int unit,
3842 sbd_comp_type_t nodetype, sbderror_t *ep)
3843 {
3844 int rv = -1;
3845 processorid_t cpuid;
3846 uint64_t basepa, endpa;
3847 struct memlist *ml;
3848 extern struct memlist *phys_install;
3849 sbdp_handle_t *hdp;
3850 sbd_handle_t *hp = MACHBD2HD(sbp);
3851 static fn_t f = "sbd_check_unit_attached";
3852
3853 hdp = sbd_get_sbdp_handle(sbp, hp);
3854
3855 switch (nodetype) {
3856
3857 case SBD_COMP_CPU:
3858 cpuid = sbdp_get_cpuid(hdp, dip);
3859 if (cpuid < 0) {
3860 break;
3861 }
3862 mutex_enter(&cpu_lock);
3863 if (cpu_get(cpuid) != NULL)
3864 rv = unit;
3865 mutex_exit(&cpu_lock);
3866 break;
3867
3868 case SBD_COMP_MEM:
3869 if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
3870 break;
3871 }
3872 if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
3873 cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
3874 break;
3875 }
3876
3877 basepa &= ~(endpa - 1);
3878 endpa += basepa;
3879 /*
3880 * Check if base address is in phys_install.
3881 */
3882 memlist_read_lock();
3883 for (ml = phys_install; ml; ml = ml->ml_next)
3884 if ((endpa <= ml->ml_address) ||
3885 (basepa >= (ml->ml_address + ml->ml_size)))
3886 continue;
3887 else
3888 break;
3889 memlist_read_unlock();
3890 if (ml != NULL)
3891 rv = unit;
3892 break;
3893
3894 case SBD_COMP_IO:
3895 {
3896 dev_info_t *tdip, *pdip;
3897
3898 tdip = dip;
3899
3900 /*
3901 * ddi_walk_devs() requires that topdip's parent be held.
3902 */
3903 pdip = ddi_get_parent(sbp->sb_topdip);
3904 if (pdip) {
3905 ndi_hold_devi(pdip);
3906 ndi_devi_enter(pdip, &rv);
3907 }
3908 ddi_walk_devs(sbp->sb_topdip, sbd_check_io_attached,
3909 (void *)&tdip);
3910 if (pdip) {
3911 ndi_devi_exit(pdip, rv);
3912 ndi_rele_devi(pdip);
3913 }
3914
3915 if (tdip == NULL)
3916 rv = unit;
3917 else
3918 rv = -1;
3919 break;
3920 }
3921
3922 default:
3923 PR_ALL("%s: unexpected nodetype(%d) for dip 0x%p\n",
3924 f, nodetype, (void *)dip);
3925 rv = -1;
3926 break;
3927 }
3928
3929 /*
3930 * Save the error that sbdp sent us and report it
3931 */
3932 if (rv == -1)
3933 SBD_GET_PERR(hdp->h_err, ep);
3934
3935 sbd_release_sbdp_handle(hdp);
3936
3937 return (rv);
3938 }
3939
3940 /*
3941 * Return memhandle, if in fact, this memunit is the owner of
3942 * a scheduled memory delete.
3943 */
3944 int
sbd_get_memhandle(sbd_handle_t * hp,dev_info_t * dip,memhandle_t * mhp)3945 sbd_get_memhandle(sbd_handle_t *hp, dev_info_t *dip, memhandle_t *mhp)
3946 {
3947 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3948 sbd_mem_unit_t *mp;
3949 sbdp_handle_t *hdp;
3950 int unit;
3951 static fn_t f = "sbd_get_memhandle";
3952
3953 PR_MEM("%s...\n", f);
3954
3955 hdp = sbd_get_sbdp_handle(sbp, hp);
3956
3957 unit = sbdp_get_unit_num(hdp, dip);
3958 if (unit == -1) {
3959 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3960 sbd_release_sbdp_handle(hdp);
3961 return (-1);
3962 }
3963 sbd_release_sbdp_handle(hdp);
3964
3965 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
3966
3967 if (mp->sbm_flags & SBD_MFLAG_RELOWNER) {
3968 *mhp = mp->sbm_memhandle;
3969 return (0);
3970 } else {
3971 SBD_SET_ERR(SBD_HD2ERR(hp), ESBD_INTERNAL);
3972 SBD_SET_ERRSTR(SBD_HD2ERR(hp), sbp->sb_mempath[unit]);
3973 return (-1);
3974 }
3975 /*NOTREACHED*/
3976 }
3977
3978
3979 static int
sbd_cpu_cnt(sbd_handle_t * hp,sbd_devset_t devset)3980 sbd_cpu_cnt(sbd_handle_t *hp, sbd_devset_t devset)
3981 {
3982 int c, cix;
3983 sbd_board_t *sbp;
3984
3985 sbp = SBDH2BD(hp->h_sbd);
3986
3987 /*
3988 * Only look for requested devices that are actually present.
3989 */
3990 devset &= SBD_DEVS_PRESENT(sbp);
3991
3992 for (c = cix = 0; c < MAX_CMP_UNITS_PER_BOARD; c++) {
3993 /*
3994 * Index for core 1 , if exists.
3995 * With the current implementation it is
3996 * MAX_CMP_UNITS_PER_BOARD off from core 0.
3997 * The calculation will need to change if
3998 * the assumption is no longer true.
3999 */
4000 int c1 = c + MAX_CMP_UNITS_PER_BOARD;
4001
4002 if (DEVSET_IN_SET(devset, SBD_COMP_CMP, c) == 0) {
4003 continue;
4004 }
4005
4006 /*
4007 * Check to see if the dip(s) exist for this chip
4008 */
4009 if ((sbp->sb_devlist[NIX(SBD_COMP_CMP)][c] == NULL) &&
4010 (sbp->sb_devlist[NIX(SBD_COMP_CMP)][c1] == NULL))
4011 continue;
4012
4013 cix++;
4014 }
4015
4016 return (cix);
4017 }
4018
4019 static int
sbd_mem_cnt(sbd_handle_t * hp,sbd_devset_t devset)4020 sbd_mem_cnt(sbd_handle_t *hp, sbd_devset_t devset)
4021 {
4022 int i, ix;
4023 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4024
4025 /*
4026 * Only look for requested devices that are actually present.
4027 */
4028 devset &= SBD_DEVS_PRESENT(sbp);
4029
4030 for (i = ix = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4031 dev_info_t *dip;
4032
4033 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i) == 0) {
4034 continue;
4035 }
4036
4037 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4038 if (dip == NULL)
4039 continue;
4040
4041 ix++;
4042 }
4043
4044 return (ix);
4045 }
4046
4047 /*
4048 * NOTE: This routine is only partially smart about multiple
4049 * mem-units. Need to make mem-status structure smart
4050 * about them also.
4051 */
4052 static int
sbd_mem_status(sbd_handle_t * hp,sbd_devset_t devset,sbd_dev_stat_t * dsp)4053 sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
4054 {
4055 int m, mix, rv;
4056 memdelstat_t mdst;
4057 memquery_t mq;
4058 sbd_board_t *sbp;
4059 sbd_mem_unit_t *mp;
4060 sbd_mem_stat_t *msp;
4061 extern int kcage_on;
4062 int i;
4063 static fn_t f = "sbd_mem_status";
4064
4065 sbp = SBDH2BD(hp->h_sbd);
4066
4067 /*
4068 * Check the present devset and access the dip with
4069 * status lock held to protect agains a concurrent
4070 * unconfigure or disconnect thread.
4071 */
4072 mutex_enter(&sbp->sb_slock);
4073
4074 /*
4075 * Only look for requested devices that are actually present.
4076 */
4077 devset &= SBD_DEVS_PRESENT(sbp);
4078
4079 for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) {
4080 dev_info_t *dip;
4081
4082
4083 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0)
4084 continue;
4085
4086 /*
4087 * Check to make sure the memory unit is in a state
4088 * where its fully initialized.
4089 */
4090 if (SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, m) == SBD_STATE_EMPTY)
4091 continue;
4092
4093 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][m];
4094 if (dip == NULL)
4095 continue;
4096
4097 mp = SBD_GET_BOARD_MEMUNIT(sbp, m);
4098
4099 msp = &dsp->d_mem;
4100
4101 bzero((caddr_t)msp, sizeof (*msp));
4102 msp->ms_type = SBD_COMP_MEM;
4103
4104 /*
4105 * The plugin expects -1 for the mem unit
4106 */
4107 msp->ms_cm.c_id.c_unit = -1;
4108
4109 /*
4110 * Get the memory name from what sbdp gave us
4111 */
4112 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
4113 if (SBD_COMP(i) == SBD_COMP_MEM) {
4114 (void) strcpy(msp->ms_name, SBD_DEVNAME(i));
4115 }
4116 }
4117 msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
4118 msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy;
4119 msp->ms_cm.c_time = mp->sbm_cm.sbdev_time;
4120
4121 /* XXX revisit this after memory conversion */
4122 msp->ms_ostate = ostate_cvt(SBD_DEVICE_STATE(
4123 sbp, SBD_COMP_MEM, m));
4124
4125 msp->ms_basepfn = mp->sbm_basepfn;
4126 msp->ms_pageslost = mp->sbm_pageslost;
4127 msp->ms_cage_enabled = kcage_on;
4128 msp->ms_interleave = mp->sbm_interleave;
4129
4130 if (mp->sbm_flags & SBD_MFLAG_RELOWNER)
4131 rv = kphysm_del_status(mp->sbm_memhandle, &mdst);
4132 else
4133 rv = KPHYSM_EHANDLE; /* force 'if' to fail */
4134
4135 if (rv == KPHYSM_OK) {
4136 msp->ms_totpages += mdst.phys_pages;
4137
4138 /*
4139 * Any pages above managed is "free",
4140 * i.e. it's collected.
4141 */
4142 msp->ms_detpages += (uint_t)(mdst.collected +
4143 mdst.phys_pages -
4144 mdst.managed);
4145 } else {
4146 msp->ms_totpages += (uint_t)mp->sbm_npages;
4147
4148 /*
4149 * If we're UNREFERENCED or UNCONFIGURED,
4150 * then the number of detached pages is
4151 * however many pages are on the board.
4152 * I.e. detached = not in use by OS.
4153 */
4154 switch (msp->ms_cm.c_ostate) {
4155 /*
4156 * changed to use cfgadm states
4157 *
4158 * was:
4159 * case SFDR_STATE_UNREFERENCED:
4160 * case SFDR_STATE_UNCONFIGURED:
4161 */
4162 case SBD_STAT_UNCONFIGURED:
4163 msp->ms_detpages = msp->ms_totpages;
4164 break;
4165
4166 default:
4167 break;
4168 }
4169 }
4170
4171 rv = kphysm_del_span_query(mp->sbm_basepfn,
4172 mp->sbm_npages, &mq);
4173 if (rv == KPHYSM_OK) {
4174 msp->ms_managed_pages = mq.managed;
4175 msp->ms_noreloc_pages = mq.nonrelocatable;
4176 msp->ms_noreloc_first = mq.first_nonrelocatable;
4177 msp->ms_noreloc_last = mq.last_nonrelocatable;
4178 msp->ms_cm.c_sflags = 0;
4179 if (mq.nonrelocatable) {
4180 SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE,
4181 dsp->ds_suspend);
4182 }
4183 } else {
4184 PR_MEM("%s: kphysm_del_span_query() = %d\n", f, rv);
4185 }
4186
4187 mix++;
4188 dsp++;
4189 }
4190
4191 mutex_exit(&sbp->sb_slock);
4192
4193 return (mix);
4194 }
4195
4196 static void
sbd_cancel(sbd_handle_t * hp)4197 sbd_cancel(sbd_handle_t *hp)
4198 {
4199 int i;
4200 sbd_devset_t devset;
4201 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4202 static fn_t f = "sbd_cancel";
4203 int rv;
4204
4205 PR_ALL("%s...\n", f);
4206
4207 /*
4208 * Only devices which have been "released" are
4209 * subject to cancellation.
4210 */
4211 devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_UNREFERENCED(sbp);
4212
4213 /*
4214 * Nothing to do for CPUs or IO other than change back
4215 * their state.
4216 */
4217 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4218 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
4219 continue;
4220 if (sbd_cancel_cpu(hp, i) != SBD_CPUERR_FATAL) {
4221 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4222 SBD_STATE_CONFIGURED);
4223 } else {
4224 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4225 SBD_STATE_FATAL);
4226 }
4227 }
4228
4229 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4230 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
4231 continue;
4232 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
4233 SBD_STATE_CONFIGURED);
4234 }
4235
4236 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4237 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
4238 continue;
4239 if ((rv = sbd_cancel_mem(hp, i)) == 0) {
4240 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4241 SBD_STATE_CONFIGURED);
4242 } else if (rv == -1) {
4243 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4244 SBD_STATE_FATAL);
4245 }
4246 }
4247
4248 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
4249
4250 SBD_DEVS_CANCEL(sbp, devset);
4251
4252 if (SBD_DEVS_UNREFERENCED(sbp) == 0) {
4253 sbd_istate_t new_state;
4254 /*
4255 * If the board no longer has any released devices
4256 * than transfer it back to the CONFIG/PARTIAL state.
4257 */
4258 if (SBD_DEVS_ATTACHED(sbp) == SBD_DEVS_PRESENT(sbp))
4259 new_state = SBD_STATE_CONFIGURED;
4260 else
4261 new_state = SBD_STATE_PARTIAL;
4262 if (SBD_BOARD_STATE(sbp) != new_state) {
4263 SBD_BOARD_TRANSITION(sbp, new_state);
4264 }
4265 sbp->sb_ostate = SBD_STAT_CONFIGURED;
4266 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
4267 }
4268 }
4269
4270 static void
sbd_get_ncm(sbd_handle_t * hp)4271 sbd_get_ncm(sbd_handle_t *hp)
4272 {
4273 sbd_devset_t devset;
4274 sbd_priv_handle_t *shp = HD2MACHHD(hp);
4275 sbd_cmd_t *cmdp = (sbd_cmd_t *)hp->h_iap;
4276 int error;
4277
4278 /* pre_op restricted the devices to those selected by the ioctl */
4279 devset = shp->sh_devset;
4280
4281 cmdp->cmd_getncm.g_ncm = sbd_cpu_cnt(hp, devset)
4282 + sbd_io_cnt(hp, devset) + sbd_mem_cnt(hp, devset);
4283
4284 error = sbd_copyout_ioarg(hp->h_mode, hp->h_cmd, cmdp,
4285 (sbd_ioctl_arg_t *)shp->sh_arg);
4286
4287 if (error != 0)
4288 SBD_SET_ERRNO(SBD_HD2ERR(hp), error);
4289 }
4290
4291 static void
sbd_status(sbd_handle_t * hp)4292 sbd_status(sbd_handle_t *hp)
4293 {
4294 int nstat, mode, ncm, sz, cksz;
4295 sbd_priv_handle_t *shp = HD2MACHHD(hp);
4296 sbd_devset_t devset;
4297 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4298 sbd_stat_t *dstatp;
4299 sbd_cmd_t *cmdp = (sbd_cmd_t *)hp->h_iap;
4300 sbdp_handle_t *hdp;
4301 sbd_dev_stat_t *devstatp;
4302
4303 #ifdef _MULTI_DATAMODEL
4304 int sz32;
4305 sbd_stat32_t *dstat32p;
4306 #endif /* _MULTI_DATAMODEL */
4307
4308 static fn_t f = "sbd_status";
4309
4310 mode = hp->h_mode;
4311 devset = shp->sh_devset;
4312
4313 devset &= SBD_DEVS_PRESENT(sbp);
4314
4315 if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_NONE) {
4316 if (cmdp->cmd_cm.c_flags & SBD_FLAG_ALLCMP) {
4317 /*
4318 * Get the number of components "ncm" on the board.
4319 * Calculate size of buffer required to store one
4320 * sbd_stat_t structure plus ncm-1 sbd_dev_stat_t
4321 * structures. Note that sbd_stat_t already contains
4322 * one sbd_dev_stat_t, so only an additional ncm-1
4323 * sbd_dev_stat_t structures need to be accounted for
4324 * in the calculation when more than one component
4325 * is present.
4326 */
4327 ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4328 sbd_mem_cnt(hp, devset);
4329
4330 } else {
4331 /*
4332 * In the case of c_type == SBD_COMP_NONE, and
4333 * SBD_FLAG_ALLCMP not specified, only the board
4334 * info is to be returned, no components.
4335 */
4336 ncm = 0;
4337 devset = 0;
4338 }
4339 } else {
4340 /* Confirm that only one component is selected. */
4341 ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4342 sbd_mem_cnt(hp, devset);
4343 if (ncm != 1) {
4344 PR_ALL("%s: expected ncm of 1, got %d, devset 0x%x\n",
4345 f, ncm, devset);
4346 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4347 return;
4348 }
4349 }
4350
4351 sz = sizeof (sbd_stat_t);
4352 if (ncm > 1)
4353 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
4354
4355 cksz = sz;
4356
4357 /*
4358 * s_nbytes describes the size of the preallocated user
4359 * buffer into which the application is executing to
4360 * receive the sbd_stat_t and sbd_dev_stat_t structures.
4361 * This buffer must be at least the required (sz) size.
4362 */
4363
4364 #ifdef _MULTI_DATAMODEL
4365
4366 /*
4367 * More buffer space is required for the 64bit to 32bit
4368 * conversion of data structures.
4369 */
4370 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4371 sz32 = sizeof (sbd_stat32_t);
4372 if (ncm > 1)
4373 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
4374 cksz = sz32;
4375 } else
4376 sz32 = 0;
4377 #endif
4378
4379 if ((int)cmdp->cmd_stat.s_nbytes < cksz) {
4380 PR_ALL("%s: ncm=%d s_nbytes = 0x%x\n", f, ncm,
4381 cmdp->cmd_stat.s_nbytes);
4382 PR_ALL("%s: expected size of 0x%x\n", f, cksz);
4383 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4384 return;
4385 }
4386
4387 dstatp = kmem_zalloc(sz, KM_SLEEP);
4388 devstatp = &dstatp->s_stat[0];
4389
4390 #ifdef _MULTI_DATAMODEL
4391 if (sz32 != 0)
4392 dstat32p = kmem_zalloc(sz32, KM_SLEEP);
4393 #endif
4394
4395 /*
4396 * if connected or better, provide cached status if available,
4397 * otherwise call sbdp for status
4398 */
4399 mutex_enter(&sbp->sb_flags_mutex);
4400 switch (sbp->sb_state) {
4401
4402 case SBD_STATE_CONNECTED:
4403 case SBD_STATE_PARTIAL:
4404 case SBD_STATE_CONFIGURED:
4405 if (sbp->sb_flags & SBD_BOARD_STATUS_CACHED) {
4406 bcopy(&sbp->sb_stat, dstatp, sizeof (sbd_stat_t));
4407 dstatp->s_rstate = rstate_cvt(sbp->sb_state);
4408 dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4409 dstatp->s_busy = sbp->sb_busy;
4410 dstatp->s_time = sbp->sb_time;
4411 dstatp->s_cond = sbp->sb_cond;
4412 break;
4413 }
4414 /*FALLTHROUGH*/
4415
4416 default:
4417 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
4418 dstatp->s_board = sbp->sb_num;
4419 dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4420 dstatp->s_time = sbp->sb_time;
4421
4422 hdp = sbd_get_sbdp_handle(sbp, hp);
4423
4424 if (sbdp_get_board_status(hdp, dstatp) != 0) {
4425 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
4426 sbd_release_sbdp_handle(hdp);
4427 #ifdef _MULTI_DATAMODEL
4428 if (sz32 != 0)
4429 kmem_free(dstat32p, sz32);
4430 #endif
4431 kmem_free(dstatp, sz);
4432 mutex_exit(&sbp->sb_flags_mutex);
4433 return;
4434 }
4435 /*
4436 * Do not cache status if the busy flag has
4437 * been set by the call to sbdp_get_board_status().
4438 */
4439 if (!dstatp->s_busy) {
4440 /* Can get board busy flag now */
4441 dstatp->s_busy = sbp->sb_busy;
4442 sbp->sb_cond = (sbd_cond_t)dstatp->s_cond;
4443 bcopy(dstatp, &sbp->sb_stat,
4444 sizeof (sbd_stat_t));
4445 sbp->sb_flags |= SBD_BOARD_STATUS_CACHED;
4446 }
4447 sbd_release_sbdp_handle(hdp);
4448 break;
4449 }
4450 mutex_exit(&sbp->sb_flags_mutex);
4451
4452 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
4453 if ((nstat = sbd_cpu_flags(hp, devset, devstatp)) > 0) {
4454 dstatp->s_nstat += nstat;
4455 devstatp += nstat;
4456 }
4457
4458 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
4459 if ((nstat = sbd_mem_status(hp, devset, devstatp)) > 0) {
4460 dstatp->s_nstat += nstat;
4461 devstatp += nstat;
4462 }
4463
4464 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
4465 if ((nstat = sbd_io_status(hp, devset, devstatp)) > 0) {
4466 dstatp->s_nstat += nstat;
4467 devstatp += nstat;
4468 }
4469
4470 /* paranoia: detect buffer overrun */
4471 if ((caddr_t)devstatp > ((caddr_t)dstatp) + sz) {
4472 PR_ALL("%s: buffer overrun\n", f);
4473 #ifdef _MULTI_DATAMODEL
4474 if (sz32 != 0)
4475 kmem_free(dstat32p, sz32);
4476 #endif
4477 kmem_free(dstatp, sz);
4478 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4479 return;
4480 }
4481
4482 /* if necessary, move data into intermediate device status buffer */
4483 #ifdef _MULTI_DATAMODEL
4484 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4485 int i, j;
4486
4487 ASSERT(sz32 != 0);
4488 /* paranoia: detect buffer overrun */
4489 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
4490 ((caddr_t)dstat32p) + sz32) {
4491 cmn_err(CE_WARN,
4492 "sbd:%s: buffer32 overrun", f);
4493 #ifdef _MULTI_DATAMODEL
4494 if (sz32 != 0)
4495 kmem_free(dstat32p, sz32);
4496 #endif
4497 kmem_free(dstatp, sz);
4498 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4499 return;
4500 }
4501
4502 /*
4503 * initialize 32 bit sbd board status structure
4504 */
4505 dstat32p->s_board = (int32_t)dstatp->s_board;
4506 dstat32p->s_nstat = (int32_t)dstatp->s_nstat;
4507 dstat32p->s_rstate = dstatp->s_rstate;
4508 dstat32p->s_ostate = dstatp->s_ostate;
4509 dstat32p->s_cond = dstatp->s_cond;
4510 dstat32p->s_busy = dstatp->s_busy;
4511 dstat32p->s_time = dstatp->s_time;
4512 dstat32p->s_assigned = dstatp->s_assigned;
4513 dstat32p->s_power = dstatp->s_power;
4514 dstat32p->s_platopts = (int32_t)dstatp->s_platopts;
4515 (void) strcpy(dstat32p->s_type, dstatp->s_type);
4516
4517 for (i = 0; i < dstatp->s_nstat; i++) {
4518 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
4519 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
4520
4521 /*
4522 * copy common data for the device
4523 */
4524 ds32p->d_cm.ci_type = (int32_t)dsp->d_cm.ci_type;
4525 ds32p->d_cm.ci_unit = (int32_t)dsp->d_cm.ci_unit;
4526 ds32p->d_cm.c_ostate = (int32_t)dsp->d_cm.c_ostate;
4527 ds32p->d_cm.c_cond = (int32_t)dsp->d_cm.c_cond;
4528 ds32p->d_cm.c_busy = (int32_t)dsp->d_cm.c_busy;
4529 ds32p->d_cm.c_time = (time32_t)dsp->d_cm.c_time;
4530 ds32p->d_cm.c_sflags = (int32_t)dsp->d_cm.c_sflags;
4531 (void) strcpy(ds32p->d_cm.ci_name, dsp->d_cm.ci_name);
4532
4533 /* copy type specific data for the device */
4534 switch (dsp->d_cm.ci_type) {
4535
4536 case SBD_COMP_CPU:
4537 ds32p->d_cpu.cs_isbootproc =
4538 (int32_t)dsp->d_cpu.cs_isbootproc;
4539 ds32p->d_cpu.cs_cpuid =
4540 (int32_t)dsp->d_cpu.cs_cpuid;
4541 ds32p->d_cpu.cs_speed =
4542 (int32_t)dsp->d_cpu.cs_speed;
4543 ds32p->d_cpu.cs_ecache =
4544 (int32_t)dsp->d_cpu.cs_ecache;
4545 break;
4546
4547 case SBD_COMP_MEM:
4548 ds32p->d_mem.ms_type =
4549 (int32_t)dsp->d_mem.ms_type;
4550 ds32p->d_mem.ms_ostate =
4551 (int32_t)dsp->d_mem.ms_ostate;
4552 ds32p->d_mem.ms_cond =
4553 (int32_t)dsp->d_mem.ms_cond;
4554 ds32p->d_mem.ms_interleave =
4555 (uint32_t)dsp->d_mem.ms_interleave;
4556 ds32p->d_mem.ms_basepfn =
4557 (uint32_t)dsp->d_mem.ms_basepfn;
4558 ds32p->d_mem.ms_totpages =
4559 (uint32_t)dsp->d_mem.ms_totpages;
4560 ds32p->d_mem.ms_detpages =
4561 (uint32_t)dsp->d_mem.ms_detpages;
4562 ds32p->d_mem.ms_pageslost =
4563 (int32_t)dsp->d_mem.ms_pageslost;
4564 ds32p->d_mem.ms_managed_pages =
4565 (int32_t)dsp->d_mem.ms_managed_pages;
4566 ds32p->d_mem.ms_noreloc_pages =
4567 (int32_t)dsp->d_mem.ms_noreloc_pages;
4568 ds32p->d_mem.ms_noreloc_first =
4569 (int32_t)dsp->d_mem.ms_noreloc_first;
4570 ds32p->d_mem.ms_noreloc_last =
4571 (int32_t)dsp->d_mem.ms_noreloc_last;
4572 ds32p->d_mem.ms_cage_enabled =
4573 (int32_t)dsp->d_mem.ms_cage_enabled;
4574 ds32p->d_mem.ms_peer_is_target =
4575 (int32_t)dsp->d_mem.ms_peer_is_target;
4576 (void) strcpy(ds32p->d_mem.ms_peer_ap_id,
4577 dsp->d_mem.ms_peer_ap_id);
4578 break;
4579
4580
4581 case SBD_COMP_IO:
4582
4583 ds32p->d_io.is_type =
4584 (int32_t)dsp->d_io.is_type;
4585 ds32p->d_io.is_unsafe_count =
4586 (int32_t)dsp->d_io.is_unsafe_count;
4587 ds32p->d_io.is_referenced =
4588 (int32_t)dsp->d_io.is_referenced;
4589 for (j = 0; j < SBD_MAX_UNSAFE; j++)
4590 ds32p->d_io.is_unsafe_list[j] =
4591 (int32_t)
4592 ds32p->d_io.is_unsafe_list[j];
4593 bcopy(dsp->d_io.is_pathname,
4594 ds32p->d_io.is_pathname, MAXPATHLEN);
4595 break;
4596
4597 case SBD_COMP_CMP:
4598 /* copy sbd_cmp_stat_t structure members */
4599 bcopy(&dsp->d_cmp.ps_cpuid[0],
4600 &ds32p->d_cmp.ps_cpuid[0],
4601 sizeof (ds32p->d_cmp.ps_cpuid));
4602 ds32p->d_cmp.ps_ncores =
4603 (int32_t)dsp->d_cmp.ps_ncores;
4604 ds32p->d_cmp.ps_speed =
4605 (int32_t)dsp->d_cmp.ps_speed;
4606 ds32p->d_cmp.ps_ecache =
4607 (int32_t)dsp->d_cmp.ps_ecache;
4608 break;
4609
4610 default:
4611 cmn_err(CE_WARN,
4612 "sbd:%s: unknown dev type (%d)", f,
4613 (int)dsp->d_cm.c_id.c_type);
4614 break;
4615 }
4616 }
4617
4618 if (ddi_copyout((void *)dstat32p,
4619 cmdp->cmd_stat.s_statp, sz32, mode) != 0) {
4620 cmn_err(CE_WARN,
4621 "sbd:%s: failed to copyout status "
4622 "for board %d", f, sbp->sb_num);
4623 SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4624 }
4625 } else
4626 #endif /* _MULTI_DATAMODEL */
4627 if (ddi_copyout((void *)dstatp, cmdp->cmd_stat.s_statp,
4628 sz, mode) != 0) {
4629 cmn_err(CE_WARN,
4630 "sbd:%s: failed to copyout status for board %d",
4631 f, sbp->sb_num);
4632 SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4633 }
4634
4635 #ifdef _MULTI_DATAMODEL
4636 if (sz32 != 0)
4637 kmem_free(dstat32p, sz32);
4638 #endif
4639 kmem_free(dstatp, sz);
4640 }
4641
4642 /*
4643 * Called at driver load time to determine the state and condition
4644 * of an existing board in the system.
4645 */
4646 static void
sbd_board_discovery(sbd_board_t * sbp)4647 sbd_board_discovery(sbd_board_t *sbp)
4648 {
4649 int i;
4650 dev_info_t *dip;
4651 sbd_devset_t devs_lost, devs_attached = 0;
4652 extern kmutex_t cpu_lock;
4653 sbdp_handle_t *hdp;
4654 static fn_t f = "sbd_board_discovery";
4655 sbderror_t error, *ep;
4656 sbd_handle_t *hp = MACHBD2HD(sbp);
4657
4658 if (SBD_DEVS_PRESENT(sbp) == 0) {
4659 PR_ALL("%s: board %d has no devices present\n",
4660 f, sbp->sb_num);
4661 return;
4662 }
4663
4664 ep = &error;
4665 bzero(ep, sizeof (sbderror_t));
4666
4667 /*
4668 * Check for existence of cpus.
4669 */
4670
4671 hdp = sbd_get_sbdp_handle(sbp, hp);
4672
4673 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4674 processorid_t cpuid;
4675
4676 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i))
4677 continue;
4678
4679 dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][i];
4680
4681 if (dip != NULL) {
4682 cpuid = sbdp_get_cpuid(hdp, dip);
4683
4684 if (cpuid < 0) {
4685 SBD_GET_PERR(hdp->h_err,
4686 ep);
4687 continue;
4688 }
4689
4690 mutex_enter(&cpu_lock); /* needed to call cpu_get() */
4691 if (cpu_get(cpuid)) {
4692 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_CPU, i);
4693 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
4694 PR_ALL("%s: board %d, cpuid %d - attached\n",
4695 f, sbp->sb_num, cpuid);
4696 }
4697 mutex_exit(&cpu_lock);
4698 sbd_init_cpu_unit(sbp, i);
4699 }
4700 }
4701
4702 /*
4703 * Check for existence of memory.
4704 */
4705 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4706 uint64_t basepa, endpa;
4707 struct memlist *ml;
4708 extern struct memlist *phys_install;
4709
4710 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
4711 continue;
4712
4713 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4714 if (dip == NULL)
4715 continue;
4716
4717 if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
4718 /* omit phantom memory controllers on I/O boards */
4719 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i)) {
4720 ASSERT(sbp->sb_ndev != 0);
4721 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
4722 sbp->sb_ndev--;
4723 }
4724 sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
4725 continue;
4726 }
4727
4728 /*
4729 * basepa may not be on a alignment boundary, make it so.
4730 */
4731 if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
4732 cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
4733 continue;
4734 }
4735
4736 basepa &= ~(endpa - 1);
4737 endpa += basepa;
4738
4739 /*
4740 * Check if base address is in phys_install.
4741 */
4742 memlist_read_lock();
4743 for (ml = phys_install; ml; ml = ml->ml_next)
4744 if ((endpa <= ml->ml_address) ||
4745 (basepa >= (ml->ml_address + ml->ml_size)))
4746 continue;
4747 else
4748 break;
4749 memlist_read_unlock();
4750
4751 if (ml) {
4752 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_MEM, i);
4753 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
4754 PR_ALL("%s: board %d, mem-unit %d - attached\n",
4755 f, sbp->sb_num, i);
4756 }
4757 sbd_init_mem_unit(sbp, i, ep);
4758 }
4759 sbd_release_sbdp_handle(hdp);
4760
4761 /*
4762 * If so far we have found an error, we just log it but continue
4763 */
4764 if (SBD_GET_ERRNO(ep) != 0)
4765 cmn_err(CE_WARN, "%s errno has occurred: errno %d", f,
4766 SBD_GET_ERRNO(ep));
4767
4768 /*
4769 * Check for i/o state.
4770 */
4771 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4772
4773 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
4774 continue;
4775
4776 dip = sbp->sb_devlist[NIX(SBD_COMP_IO)][i];
4777 if (dip == NULL)
4778 continue;
4779
4780 ASSERT(e_ddi_branch_held(dip));
4781
4782 /*
4783 * XXX Is the devstate check needed ?
4784 */
4785 if (i_ddi_devi_attached(dip) ||
4786 ddi_get_devstate(dip) == DDI_DEVSTATE_UP) {
4787
4788 /*
4789 * Found it!
4790 */
4791 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_IO, i);
4792 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
4793 PR_ALL("%s: board %d, io-unit %d - attached\n",
4794 f, sbp->sb_num, i);
4795 }
4796 sbd_init_io_unit(sbp, i);
4797 }
4798
4799 SBD_DEVS_CONFIGURE(sbp, devs_attached);
4800 if (devs_attached && ((devs_lost = SBD_DEVS_UNATTACHED(sbp)) != 0)) {
4801 int ut;
4802 /*
4803 * A prior comment stated that a partially configured
4804 * board was not permitted. The Serengeti architecture
4805 * makes this possible, so the SB_DEVS_DISCONNECT
4806 * at the end of this block has been removed.
4807 */
4808
4809 PR_ALL("%s: some devices not configured (0x%x)...\n",
4810 f, devs_lost);
4811
4812 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++)
4813 if (DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut)) {
4814 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU,
4815 ut, SBD_STATE_UNCONFIGURED);
4816 }
4817
4818 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++)
4819 if (DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut)) {
4820 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM,
4821 ut, SBD_STATE_UNCONFIGURED);
4822 }
4823
4824 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++)
4825 if (DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut)) {
4826 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO,
4827 ut, SBD_STATE_UNCONFIGURED);
4828 }
4829 }
4830 }
4831
4832 static int
hold_rele_branch(dev_info_t * rdip,void * arg)4833 hold_rele_branch(dev_info_t *rdip, void *arg)
4834 {
4835 walk_tree_t *wp = (walk_tree_t *)arg;
4836
4837 ASSERT(wp && (wp->hold == 0 || wp->hold == 1));
4838
4839 switch (get_node_type(wp->sbp, rdip, NULL)) {
4840 case SBD_COMP_CMP:
4841 case SBD_COMP_MEM:
4842 case SBD_COMP_IO:
4843 break;
4844 case SBD_COMP_CPU:
4845
4846 /*
4847 * All CPU nodes under CMP nodes should have
4848 * gotten pruned when the CMP node was first
4849 * encountered.
4850 */
4851 ASSERT(!sbd_is_cmp_child(rdip));
4852
4853 break;
4854
4855 case SBD_COMP_UNKNOWN:
4856 /* Not of interest to us */
4857 return (DDI_WALK_CONTINUE);
4858 default:
4859 ASSERT(0);
4860 return (DDI_WALK_PRUNECHILD);
4861 }
4862
4863 if (wp->hold) {
4864 ASSERT(!e_ddi_branch_held(rdip));
4865 e_ddi_branch_hold(rdip);
4866 } else {
4867 ASSERT(e_ddi_branch_held(rdip));
4868 e_ddi_branch_rele(rdip);
4869 }
4870
4871 return (DDI_WALK_PRUNECHILD);
4872 }
4873
4874 static void
sbd_board_init(sbd_board_t * sbp,sbd_softstate_t * softsp,int bd,dev_info_t * top_dip,int wnode)4875 sbd_board_init(sbd_board_t *sbp, sbd_softstate_t *softsp,
4876 int bd, dev_info_t *top_dip, int wnode)
4877 {
4878 int i;
4879 dev_info_t *pdip;
4880 int circ;
4881 walk_tree_t walk = {0};
4882
4883 mutex_init(&sbp->sb_mutex, NULL, MUTEX_DRIVER, NULL);
4884 mutex_init(&sbp->sb_flags_mutex, NULL, MUTEX_DRIVER, NULL);
4885 mutex_init(&sbp->sb_slock, NULL, MUTEX_DRIVER, NULL);
4886
4887 sbp->sb_ref = 0;
4888 sbp->sb_num = bd;
4889 sbp->sb_time = gethrestime_sec();
4890 /*
4891 * For serengeti, top_dip doesn't need to be held because
4892 * sbp i.e. sbd_board_t will be destroyed in sbd_teardown_instance()
4893 * before top_dip detaches. For Daktari, top_dip is the
4894 * root node which never has to be held.
4895 */
4896 sbp->sb_topdip = top_dip;
4897 sbp->sb_cpuid = -1;
4898 sbp->sb_softsp = (void *) softsp;
4899 sbp->sb_cond = SBD_COND_UNKNOWN;
4900 sbp->sb_wnode = wnode;
4901 sbp->sb_memaccess_ok = 1;
4902
4903 ASSERT(MAX_IO_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4904 ASSERT(MAX_CPU_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4905 ASSERT(MAX_MEM_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4906
4907 /*
4908 * Allocate the devlist for cpus.
4909 */
4910 sbp->sb_devlist[NIX(SBD_COMP_CPU)] = GETSTRUCT(dev_info_t *,
4911 MAX_CPU_UNITS_PER_BOARD);
4912
4913 /*
4914 * Allocate the devlist for mem.
4915 */
4916 sbp->sb_devlist[NIX(SBD_COMP_MEM)] = GETSTRUCT(dev_info_t *,
4917 MAX_MEM_UNITS_PER_BOARD);
4918
4919 /*
4920 * Allocate the devlist for io.
4921 */
4922 sbp->sb_devlist[NIX(SBD_COMP_IO)] = GETSTRUCT(dev_info_t *,
4923 MAX_IO_UNITS_PER_BOARD);
4924
4925
4926 sbp->sb_dev[NIX(SBD_COMP_CPU)] = GETSTRUCT(sbd_dev_unit_t,
4927 MAX_CPU_UNITS_PER_BOARD);
4928
4929 sbp->sb_dev[NIX(SBD_COMP_MEM)] = GETSTRUCT(sbd_dev_unit_t,
4930 MAX_MEM_UNITS_PER_BOARD);
4931
4932 sbp->sb_dev[NIX(SBD_COMP_IO)] = GETSTRUCT(sbd_dev_unit_t,
4933 MAX_IO_UNITS_PER_BOARD);
4934
4935 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4936 sbp->sb_cpupath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4937 }
4938
4939 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4940 sbp->sb_mempath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4941 }
4942
4943 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4944 sbp->sb_iopath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4945 }
4946
4947 /*
4948 * Walk the device tree, find all top dips on this board and
4949 * hold the branches rooted at them
4950 */
4951 ASSERT(sbp->sb_topdip);
4952 pdip = ddi_get_parent(sbp->sb_topdip);
4953 if (pdip)
4954 ndi_devi_enter(pdip, &circ);
4955 walk.sbp = sbp;
4956 walk.hold = 1;
4957 ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
4958 if (pdip)
4959 ndi_devi_exit(pdip, circ);
4960
4961 /*
4962 * Initialize the devlists
4963 */
4964 if (sbd_init_devlists(sbp) == 0) {
4965 SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
4966 } else {
4967 /*
4968 * Couldn't have made it down here without
4969 * having found at least one device.
4970 */
4971 ASSERT(SBD_DEVS_PRESENT(sbp) != 0);
4972 /*
4973 * Check the state of any possible devices on the
4974 * board.
4975 */
4976 sbd_board_discovery(sbp);
4977
4978 if (SBD_DEVS_UNATTACHED(sbp) == 0) {
4979 /*
4980 * The board has no unattached devices, therefore
4981 * by reason of insanity it must be configured!
4982 */
4983 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
4984 sbp->sb_cond = SBD_COND_OK;
4985 } else if (SBD_DEVS_ATTACHED(sbp)) {
4986 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
4987 } else {
4988 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
4989 }
4990 }
4991 }
4992
4993 static void
sbd_board_destroy(sbd_board_t * sbp)4994 sbd_board_destroy(sbd_board_t *sbp)
4995 {
4996 int i;
4997 dev_info_t *pdip;
4998 int circ;
4999 walk_tree_t walk = {0};
5000
5001 SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
5002
5003 #ifdef DEBUG
5004 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5005 sbd_mem_unit_t *mp;
5006
5007 mp = SBD_GET_BOARD_MEMUNIT(sbp, i);
5008 ASSERT(mp->sbm_mlist == NULL);
5009 }
5010 #endif /* DEBUG */
5011
5012 /*
5013 * Free up MEM unit structs.
5014 */
5015 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_MEM)],
5016 sbd_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
5017 sbp->sb_dev[NIX(SBD_COMP_MEM)] = NULL;
5018
5019 /*
5020 * Free up CPU unit structs.
5021 */
5022 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_CPU)],
5023 sbd_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
5024 sbp->sb_dev[NIX(SBD_COMP_CPU)] = NULL;
5025
5026 /*
5027 * Free up IO unit structs.
5028 */
5029 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_IO)],
5030 sbd_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
5031 sbp->sb_dev[NIX(SBD_COMP_IO)] = NULL;
5032
5033 /*
5034 * free up CPU devlists.
5035 */
5036
5037 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
5038 kmem_free((caddr_t)sbp->sb_cpupath[i], MAXPATHLEN);
5039 }
5040 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_CPU)], dev_info_t *,
5041 MAX_CPU_UNITS_PER_BOARD);
5042 sbp->sb_devlist[NIX(SBD_COMP_CPU)] = NULL;
5043
5044 /*
5045 * free up MEM devlists.
5046 */
5047 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5048 kmem_free((caddr_t)sbp->sb_mempath[i], MAXPATHLEN);
5049 }
5050 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_MEM)], dev_info_t *,
5051 MAX_MEM_UNITS_PER_BOARD);
5052 sbp->sb_devlist[NIX(SBD_COMP_MEM)] = NULL;
5053
5054 /*
5055 * free up IO devlists.
5056 */
5057 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
5058 kmem_free((caddr_t)sbp->sb_iopath[i], MAXPATHLEN);
5059 }
5060 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_IO)], dev_info_t *,
5061 MAX_IO_UNITS_PER_BOARD);
5062 sbp->sb_devlist[NIX(SBD_COMP_IO)] = NULL;
5063
5064 /*
5065 * Release all branches held earlier
5066 */
5067 ASSERT(sbp->sb_topdip);
5068 pdip = ddi_get_parent(sbp->sb_topdip);
5069 if (pdip)
5070 ndi_devi_enter(pdip, &circ);
5071 walk.sbp = sbp;
5072 walk.hold = 0;
5073 ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
5074 if (pdip)
5075 ndi_devi_exit(pdip, circ);
5076
5077 mutex_destroy(&sbp->sb_slock);
5078 mutex_destroy(&sbp->sb_flags_mutex);
5079 mutex_destroy(&sbp->sb_mutex);
5080 }
5081
5082 sbd_comp_type_t
sbd_cm_type(char * name)5083 sbd_cm_type(char *name)
5084 {
5085 sbd_comp_type_t type = SBD_COMP_UNKNOWN;
5086 int i;
5087
5088 /* look up type in table */
5089 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
5090 if (strcmp(name, SBD_OTYPE(i)) == 0) {
5091 type = SBD_COMP(i);
5092 break;
5093 }
5094 }
5095
5096 return (type);
5097 }
5098
5099 /*
5100 * There are certain cases where obp marks components as failed
5101 * If the status is ok the node won't have any status property. It
5102 * is only there if the status is other than ok.
5103 *
5104 * The translation is as follows:
5105 * If there is no status prop, the the cond is SBD_COND_OK
5106 * If we find a status prop but can't get to it then cond is SBD_COND_UNKNOWN
5107 * if we find a stat and it is failed the cond is SBD_COND_FAILED
5108 * If the stat is disabled, the cond is SBD_COND_UNUSABLE
5109 * Otherwise we return con as SBD_COND_OK
5110 */
5111 sbd_cond_t
sbd_get_comp_cond(dev_info_t * dip)5112 sbd_get_comp_cond(dev_info_t *dip)
5113 {
5114 int len;
5115 char *status_buf;
5116 static const char *status = "status";
5117 static const char *failed = "fail";
5118 static const char *disabled = "disabled";
5119
5120 if (dip == NULL) {
5121 PR_BYP("dip is NULL\n");
5122 return (SBD_COND_UNKNOWN);
5123 }
5124
5125 /*
5126 * If retired, return FAILED
5127 */
5128 if (DEVI(dip)->devi_flags & DEVI_RETIRED) {
5129 PR_CPU("dip is retired\n");
5130 return (SBD_COND_FAILED);
5131 }
5132
5133 if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5134 (char *)status, &len) != DDI_PROP_SUCCESS) {
5135 PR_CPU("status in sbd is ok\n");
5136 return (SBD_COND_OK);
5137 }
5138
5139 status_buf = kmem_zalloc(sizeof (char) * OBP_MAXPROPNAME, KM_SLEEP);
5140 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5141 (char *)status, status_buf, &len) != DDI_PROP_SUCCESS) {
5142 PR_CPU("status in sbd is unknown\n");
5143 return (SBD_COND_UNKNOWN);
5144 }
5145
5146 if (strncmp(status_buf, failed, strlen(failed)) == 0) {
5147 PR_CPU("status in sbd is failed\n");
5148 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5149 return (SBD_COND_FAILED);
5150 }
5151
5152 if (strcmp(status_buf, disabled) == 0) {
5153 PR_CPU("status in sbd is unusable\n");
5154 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5155 return (SBD_COND_UNUSABLE);
5156 }
5157
5158 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5159 return (SBD_COND_OK);
5160 }
5161
5162 #ifdef SBD_DEBUG_ERRS
5163
5164 /* function to simulate errors throughout the sbd code */
5165 void
sbd_inject_err(int error,sbderror_t * ep,int Errno,int ecode,char * rsc)5166 sbd_inject_err(int error, sbderror_t *ep, int Errno, int ecode,
5167 char *rsc)
5168 {
5169 static fn_t f = "sbd_inject_err";
5170
5171 if (sbd_err_debug == 0)
5172 return;
5173
5174 if (ep == NULL) {
5175 cmn_err(CE_WARN, "%s ep is NULL", f);
5176 return;
5177 }
5178
5179 if (SBD_GET_ERRNO(ep) != 0) {
5180 cmn_err(CE_WARN, "%s errno already set to %d", f,
5181 SBD_GET_ERRNO(ep));
5182 return;
5183 }
5184
5185 if (SBD_GET_ERR(ep) != 0) {
5186 cmn_err(CE_WARN, "%s code already set to %d", f,
5187 SBD_GET_ERR(ep));
5188 return;
5189 }
5190
5191 if ((sbd_err_debug & (1 << error)) != 0) {
5192 ep->e_errno = Errno;
5193 ep->e_code = ecode;
5194
5195 if (rsc != NULL)
5196 bcopy((caddr_t)rsc,
5197 (caddr_t)ep->e_rsc,
5198 sizeof (ep->e_rsc));
5199
5200 if (Errno != 0)
5201 PR_ERR_ERRNO("%s set errno to %d", f, ep->e_errno);
5202
5203 if (ecode != 0)
5204 PR_ERR_ECODE("%s set ecode to %d", f, ep->e_code);
5205
5206 if (rsc != NULL)
5207 PR_ERR_RSC("%s set rsc to %s", f, ep->e_rsc);
5208 }
5209 }
5210 #endif
5211