1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2023 Oxide Computer Company
29 */
30
31 /*
32 * safari system board DR module.
33 */
34
35 #include <sys/debug.h>
36 #include <sys/types.h>
37 #include <sys/errno.h>
38 #include <sys/cred.h>
39 #include <sys/dditypes.h>
40 #include <sys/devops.h>
41 #include <sys/modctl.h>
42 #include <sys/poll.h>
43 #include <sys/conf.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/sunndi.h>
47 #include <sys/ndi_impldefs.h>
48 #include <sys/stat.h>
49 #include <sys/kmem.h>
50 #include <sys/cpuvar.h>
51 #include <sys/mem_config.h>
52 #include <sys/mem_cage.h>
53
54 #include <sys/autoconf.h>
55 #include <sys/cmn_err.h>
56
57 #include <sys/ddi_impldefs.h>
58 #include <sys/machsystm.h>
59 #include <sys/param.h>
60
61 #include <sys/sbdpriv.h>
62 #include <sys/sbd_io.h>
63
64 /* start sbd includes */
65
66 #include <sys/systm.h>
67 #include <sys/sysmacros.h>
68 #include <sys/x_call.h>
69 #include <sys/membar.h>
70 #include <vm/seg_kmem.h>
71
72 extern int nulldev();
73 extern int nodev();
74
75 typedef struct { /* arg to sbd_get_handle */
76 dev_t dev;
77 int cmd;
78 int mode;
79 sbd_ioctl_arg_t *ioargp;
80 } sbd_init_arg_t;
81
82
83 /*
84 * sbd support operations.
85 */
86 static void sbd_exec_op(sbd_handle_t *hp);
87 static void sbd_dev_configure(sbd_handle_t *hp);
88 static int sbd_dev_release(sbd_handle_t *hp);
89 static int sbd_dev_unconfigure(sbd_handle_t *hp);
90 static void sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep,
91 dev_info_t *dip, int unit);
92 static void sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep,
93 dev_info_t *dip, int unit);
94 static int sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit);
95 static void sbd_cancel(sbd_handle_t *hp);
96 void sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip);
97 int sbd_dealloc_instance(sbd_board_t *sbp, int max_boards);
98 int sbd_errno2ecode(int error);
99 #pragma weak sbdp_cpu_get_impl
100
101 #ifdef DEBUG
102 uint_t sbd_debug = (uint_t)0x0;
103
104 #ifdef SBD_DEBUG_ERRS
105 /* controls which errors are injected */
106 uint_t sbd_err_debug = (uint_t)0x0;
107
108 /* controls printing about error injection */
109 uint_t sbd_print_errs = (uint_t)0x0;
110
111 #endif /* SBD_DEBUG_ERRS */
112
113 #endif /* DEBUG */
114
115 char *sbd_state_str[] = {
116 "EMPTY", "OCCUPIED", "CONNECTED", "UNCONFIGURED",
117 "PARTIAL", "CONFIGURED", "RELEASE", "UNREFERENCED",
118 "FATAL"
119 };
120
121 /* Note: this must be changed in tandem with sbd_ioctl.h */
122 char *sbd_ct_str[] = {
123 "NONE", "CPU", "MEM", "IO", "UNKNOWN"
124 };
125
126 /* Note: this must also be changed in tandem with sbd_ioctl.h */
127 #define SBD_CMD_STR(c) \
128 (((c) == SBD_CMD_ASSIGN) ? "ASSIGN" : \
129 ((c) == SBD_CMD_UNASSIGN) ? "UNASSIGN" : \
130 ((c) == SBD_CMD_POWERON) ? "POWERON" : \
131 ((c) == SBD_CMD_POWEROFF) ? "POWEROFF" : \
132 ((c) == SBD_CMD_TEST) ? "TEST" : \
133 ((c) == SBD_CMD_CONNECT) ? "CONNECT" : \
134 ((c) == SBD_CMD_CONFIGURE) ? "CONFIGURE" : \
135 ((c) == SBD_CMD_UNCONFIGURE) ? "UNCONFIGURE" : \
136 ((c) == SBD_CMD_DISCONNECT) ? "DISCONNECT" : \
137 ((c) == SBD_CMD_STATUS) ? "STATUS" : \
138 ((c) == SBD_CMD_GETNCM) ? "GETNCM" : \
139 ((c) == SBD_CMD_PASSTHRU) ? "PASSTHRU" : "unknown")
140
141 /*
142 * Defines and structures for device tree naming and mapping
143 * to node types
144 */
145
146 sbd_devattr_t *sbd_devattr;
147
148 /* defines to access the attribute struct */
149 #define SBD_DEVNAME(i) sbd_devattr[i].s_devname
150 #define SBD_OTYPE(i) sbd_devattr[(i)].s_obp_type
151 #define SBD_COMP(i) sbd_devattr[i].s_dnodetype
152
153 /*
154 * State transition table. States valid transitions for "board" state.
155 * Recall that non-zero return value terminates operation, however
156 * the herrno value is what really indicates an error , if any.
157 */
158 static int
_cmd2index(int c)159 _cmd2index(int c)
160 {
161 /*
162 * Translate DR CMD to index into sbd_state_transition.
163 */
164 switch (c) {
165 case SBD_CMD_CONNECT: return (0);
166 case SBD_CMD_DISCONNECT: return (1);
167 case SBD_CMD_CONFIGURE: return (2);
168 case SBD_CMD_UNCONFIGURE: return (3);
169 case SBD_CMD_POWEROFF: return (4);
170 case SBD_CMD_POWERON: return (5);
171 case SBD_CMD_UNASSIGN: return (6);
172 case SBD_CMD_ASSIGN: return (7);
173 case SBD_CMD_TEST: return (8);
174 default: return (-1);
175 }
176 }
177
178 #define CMD2INDEX(c) _cmd2index(c)
179
180 static struct sbd_state_trans {
181 int x_cmd;
182 struct {
183 int x_rv; /* return value of pre_op */
184 int x_err; /* errno, if any */
185 } x_op[SBD_NUM_STATES];
186 } sbd_state_transition[] = {
187 { SBD_CMD_CONNECT,
188 {
189 { 0, 0 }, /* empty */
190 { 0, 0 }, /* occupied */
191 { 1, EIO }, /* connected */
192 { 1, EIO }, /* unconfigured */
193 { 1, EIO }, /* partial */
194 { 1, EIO }, /* configured */
195 { 1, EIO }, /* release */
196 { 1, EIO }, /* unreferenced */
197 { 1, EIO }, /* fatal */
198 }
199 },
200 { SBD_CMD_DISCONNECT,
201 {
202 { 1, EIO }, /* empty */
203 { 0, 0 }, /* occupied */
204 { 0, 0 }, /* connected */
205 { 0, 0 }, /* unconfigured */
206 { 1, EIO }, /* partial */
207 { 1, EIO }, /* configured */
208 { 1, EIO }, /* release */
209 { 1, EIO }, /* unreferenced */
210 { 1, EIO }, /* fatal */
211 }
212 },
213 { SBD_CMD_CONFIGURE,
214 {
215 { 1, EIO }, /* empty */
216 { 1, EIO }, /* occupied */
217 { 0, 0 }, /* connected */
218 { 0, 0 }, /* unconfigured */
219 { 0, 0 }, /* partial */
220 { 1, 0 }, /* configured */
221 { 0, 0 }, /* release */
222 { 0, 0 }, /* unreferenced */
223 { 1, EIO }, /* fatal */
224 }
225 },
226 { SBD_CMD_UNCONFIGURE,
227 {
228 { 1, EIO }, /* empty */
229 { 1, EIO }, /* occupied */
230 { 1, EIO }, /* connected */
231 { 1, EIO }, /* unconfigured */
232 { 1, EIO }, /* partial */
233 { 0, 0 }, /* configured */
234 { 0, 0 }, /* release */
235 { 0, 0 }, /* unreferenced */
236 { 1, EIO }, /* fatal */
237 }
238 },
239 { SBD_CMD_POWEROFF,
240 {
241 { 1, EIO }, /* empty */
242 { 0, 0 }, /* occupied */
243 { 1, EIO }, /* connected */
244 { 1, EIO }, /* unconfigured */
245 { 1, EIO }, /* partial */
246 { 1, EIO }, /* configured */
247 { 1, EIO }, /* release */
248 { 1, EIO }, /* unreferenced */
249 { 1, EIO }, /* fatal */
250 }
251 },
252 { SBD_CMD_POWERON,
253 {
254 { 1, EIO }, /* empty */
255 { 0, 0 }, /* occupied */
256 { 1, EIO }, /* connected */
257 { 1, EIO }, /* unconfigured */
258 { 1, EIO }, /* partial */
259 { 1, EIO }, /* configured */
260 { 1, EIO }, /* release */
261 { 1, EIO }, /* unreferenced */
262 { 1, EIO }, /* fatal */
263 }
264 },
265 { SBD_CMD_UNASSIGN,
266 {
267 { 1, EIO }, /* empty */
268 { 0, 0 }, /* occupied */
269 { 1, EIO }, /* connected */
270 { 1, EIO }, /* unconfigured */
271 { 1, EIO }, /* partial */
272 { 1, EIO }, /* configured */
273 { 1, EIO }, /* release */
274 { 1, EIO }, /* unreferenced */
275 { 1, EIO }, /* fatal */
276 }
277 },
278 { SBD_CMD_ASSIGN,
279 {
280 { 1, EIO }, /* empty */
281 { 0, 0 }, /* occupied */
282 { 1, EIO }, /* connected */
283 { 1, EIO }, /* unconfigured */
284 { 1, EIO }, /* partial */
285 { 1, EIO }, /* configured */
286 { 1, EIO }, /* release */
287 { 1, EIO }, /* unreferenced */
288 { 1, EIO }, /* fatal */
289 }
290 },
291 { SBD_CMD_TEST,
292 {
293 { 1, EIO }, /* empty */
294 { 0, 0 }, /* occupied */
295 { 1, EIO }, /* connected */
296 { 1, EIO }, /* unconfigured */
297 { 1, EIO }, /* partial */
298 { 1, EIO }, /* configured */
299 { 1, EIO }, /* release */
300 { 1, EIO }, /* unreferenced */
301 { 1, EIO }, /* fatal */
302 }
303 },
304 };
305
306 /*
307 * Global R/W lock to synchronize access across
308 * multiple boards. Users wanting multi-board access
309 * must grab WRITE lock, others must grab READ lock.
310 */
311 krwlock_t sbd_grwlock;
312
313 /*
314 * Global to determine if an event needs to be sent
315 */
316 char send_event = 0;
317
318 /*
319 * Required/Expected functions.
320 */
321
322 static sbd_handle_t *sbd_get_handle(dev_t dev, sbd_softstate_t *softsp,
323 intptr_t arg, sbd_init_arg_t *iap);
324 static void sbd_release_handle(sbd_handle_t *hp);
325 static int sbd_pre_op(sbd_handle_t *hp);
326 static void sbd_post_op(sbd_handle_t *hp);
327 static int sbd_probe_board(sbd_handle_t *hp);
328 static int sbd_deprobe_board(sbd_handle_t *hp);
329 static void sbd_connect(sbd_handle_t *hp);
330 static void sbd_assign_board(sbd_handle_t *hp);
331 static void sbd_unassign_board(sbd_handle_t *hp);
332 static void sbd_poweron_board(sbd_handle_t *hp);
333 static void sbd_poweroff_board(sbd_handle_t *hp);
334 static void sbd_test_board(sbd_handle_t *hp);
335
336 static int sbd_disconnect(sbd_handle_t *hp);
337 static sbd_devlist_t *sbd_get_attach_devlist(sbd_handle_t *hp,
338 int32_t *devnump, int32_t pass);
339 static int sbd_pre_attach_devlist(sbd_handle_t *hp,
340 sbd_devlist_t *devlist, int32_t devnum);
341 static int sbd_post_attach_devlist(sbd_handle_t *hp,
342 sbd_devlist_t *devlist, int32_t devnum);
343 static sbd_devlist_t *sbd_get_release_devlist(sbd_handle_t *hp,
344 int32_t *devnump, int32_t pass);
345 static int sbd_pre_release_devlist(sbd_handle_t *hp,
346 sbd_devlist_t *devlist, int32_t devnum);
347 static int sbd_post_release_devlist(sbd_handle_t *hp,
348 sbd_devlist_t *devlist, int32_t devnum);
349 static void sbd_release_done(sbd_handle_t *hp,
350 sbd_comp_type_t nodetype,
351 dev_info_t *dip);
352 static sbd_devlist_t *sbd_get_detach_devlist(sbd_handle_t *hp,
353 int32_t *devnump, int32_t pass);
354 static int sbd_pre_detach_devlist(sbd_handle_t *hp,
355 sbd_devlist_t *devlist, int32_t devnum);
356 static int sbd_post_detach_devlist(sbd_handle_t *hp,
357 sbd_devlist_t *devlist, int32_t devnum);
358 static void sbd_status(sbd_handle_t *hp);
359 static void sbd_get_ncm(sbd_handle_t *hp);
360
361
362 /*
363 * Support functions.
364 */
365 static sbd_devset_t sbd_dev2devset(sbd_comp_id_t *cid);
366 static int sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd,
367 sbd_cmd_t *cmdp, sbd_ioctl_arg_t *iap);
368 static int sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap,
369 void *arg);
370 static int sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp,
371 sbd_ioctl_arg_t *iap);
372 static int sbd_check_transition(sbd_board_t *sbp,
373 sbd_devset_t *devsetp,
374 struct sbd_state_trans *transp);
375 static sbd_devlist_t *sbd_get_devlist(sbd_handle_t *hp,
376 sbd_board_t *sbp,
377 sbd_comp_type_t nodetype,
378 int max_units, uint_t uset,
379 int *count, int present_only);
380 static int sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset,
381 sbd_dev_stat_t *dsp);
382
383 static int sbd_init_devlists(sbd_board_t *sbp);
384 static int sbd_name_to_idx(char *name);
385 static int sbd_otype_to_idx(char *otpye);
386 static int sbd_setup_devlists(dev_info_t *dip, void *arg);
387 static void sbd_init_mem_devlists(sbd_board_t *sbp);
388 static void sbd_init_cpu_unit(sbd_board_t *sbp, int unit);
389 static void sbd_board_discovery(sbd_board_t *sbp);
390 static void sbd_board_init(sbd_board_t *sbp,
391 sbd_softstate_t *softsp,
392 int bd, dev_info_t *dip, int wnode);
393 static void sbd_board_destroy(sbd_board_t *sbp);
394 static int sbd_check_unit_attached(sbd_board_t *sbp,
395 dev_info_t *dip, int unit,
396 sbd_comp_type_t nodetype, sbderror_t *ep);
397
398 static sbd_state_t rstate_cvt(sbd_istate_t state);
399
400 /*
401 * Autoconfiguration data structures
402 */
403
404 extern struct mod_ops mod_miscops;
405
406 static struct modlmisc modlmisc = {
407 &mod_miscops,
408 "System Board DR"
409 };
410
411 static struct modlinkage modlinkage = {
412 MODREV_1,
413 (void *)&modlmisc,
414 NULL
415 };
416
417 static int sbd_instances = 0;
418
419 /*
420 * dr Global data elements
421 */
422 sbd_global sbd_g;
423
424 /*
425 * We want to be able to unload the module when we wish to do so, but we don't
426 * want anything else to unload it. Unloading cannot occur until
427 * sbd_teardown_instance is called by an explicit IOCTL into the parent node.
428 * This support is for debugging purposes and should it be expected to work
429 * on the field, it should be enhanced:
430 * Currently, there is still a window where sbd_teardow_instance gets called,
431 * sbd_prevent_unloading now = 0, the driver doesn't get unloaded, and
432 * sbd_setup_instance gets called. This may cause a panic.
433 */
434 int sbd_prevent_unloading = 1;
435
436 /*
437 * Driver entry points.
438 */
439 int
_init(void)440 _init(void)
441 {
442 int err;
443
444 /*
445 * If you need to support multiple nodes (instances), then
446 * whatever the maximum number of supported nodes is would
447 * need to passed as the third parameter to ddi_soft_state_init().
448 * Alternative would be to dynamically fini and re-init the
449 * soft state structure each time a node is attached.
450 */
451 err = ddi_soft_state_init((void **)&sbd_g.softsp,
452 sizeof (sbd_softstate_t), SBD_MAX_INSTANCES);
453 if (err)
454 return (err);
455
456 if ((err = mod_install(&modlinkage)) != 0) {
457 ddi_soft_state_fini((void **)&sbd_g.softsp);
458 return (err);
459 }
460
461 /* Get the array of names from platform helper routine */
462 sbd_devattr = sbdp_get_devattr();
463
464 return (err);
465 }
466
467 int
_fini(void)468 _fini(void)
469 {
470 int err;
471
472 if (sbd_prevent_unloading)
473 return (DDI_FAILURE);
474
475 ASSERT(sbd_instances == 0);
476
477 if ((err = mod_remove(&modlinkage)) != 0)
478 return (err);
479
480 ddi_soft_state_fini((void **)&sbd_g.softsp);
481
482 return (0);
483 }
484
485 int
_info(struct modinfo * modinfop)486 _info(struct modinfo *modinfop)
487 {
488 return (mod_info(&modlinkage, modinfop));
489 }
490
491 int
sbd_ioctl(dev_t dev,int cmd,intptr_t arg,int mode,char * event)492 sbd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, char *event)
493 {
494 int rv = 0, instance;
495 sbd_handle_t *hp;
496 sbd_softstate_t *softsp;
497 sbd_init_arg_t init_arg;
498 static fn_t f = "sbd_ioctl";
499 int dr_avail;
500
501 PR_BYP("sbd_ioctl cmd=%x, arg=%lx\n", cmd, arg);
502
503 /* Note: this must also be changed in tandem with sbd_ioctl.h */
504 switch (cmd) {
505 case SBD_CMD_ASSIGN:
506 case SBD_CMD_UNASSIGN:
507 case SBD_CMD_POWERON:
508 case SBD_CMD_POWEROFF:
509 case SBD_CMD_TEST:
510 case SBD_CMD_CONNECT:
511 case SBD_CMD_CONFIGURE:
512 case SBD_CMD_UNCONFIGURE:
513 case SBD_CMD_DISCONNECT:
514 case SBD_CMD_STATUS:
515 case SBD_CMD_GETNCM:
516 case SBD_CMD_PASSTHRU:
517 break;
518 default:
519 return (ENOTTY);
520 }
521
522 instance = SBD_GET_MINOR2INST(getminor(dev));
523 if ((softsp = (sbd_softstate_t *)GET_SOFTC(instance)) == NULL) {
524 cmn_err(CE_WARN,
525 "sbd:%s:%d: module not yet attached", f, instance);
526 return (ENXIO);
527 }
528
529 init_arg.dev = dev;
530 init_arg.cmd = cmd;
531 init_arg.mode = mode;
532 init_arg.ioargp = (sbd_ioctl_arg_t *)arg;
533
534 hp = sbd_get_handle(dev, softsp, arg, &init_arg);
535 /* Check to see if we support dr */
536 dr_avail = sbdp_dr_avail();
537 if (dr_avail != 1) {
538 switch (hp->h_cmd) {
539 case SBD_CMD_STATUS:
540 case SBD_CMD_GETNCM:
541 case SBD_CMD_PASSTHRU:
542 break;
543 default:
544 sbd_release_handle(hp);
545 return (ENOTSUP);
546 }
547 }
548
549 switch (hp->h_cmd) {
550 case SBD_CMD_STATUS:
551 case SBD_CMD_GETNCM:
552 case SBD_CMD_PASSTHRU:
553 /* no locks needed for these commands */
554 break;
555
556 default:
557 rw_enter(&sbd_grwlock, RW_WRITER);
558 mutex_enter(&SBDH2BD(hp->h_sbd)->sb_mutex);
559
560 /*
561 * If we're dealing with memory at all, then we have
562 * to keep the "exclusive" global lock held. This is
563 * necessary since we will probably need to look at
564 * multiple board structs. Otherwise, we only have
565 * to deal with the board in question and so can drop
566 * the global lock to "shared".
567 */
568 /*
569 * XXX This is incorrect. The sh_devset has not
570 * been set at this point - it is 0.
571 */
572 rv = DEVSET_IN_SET(HD2MACHHD(hp)->sh_devset,
573 SBD_COMP_MEM, DEVSET_ANYUNIT);
574 if (rv == 0)
575 rw_downgrade(&sbd_grwlock);
576 break;
577 }
578
579 /*
580 * Before any operations happen, reset the event flag
581 */
582 send_event = 0;
583
584 if (sbd_pre_op(hp) == 0) {
585 sbd_exec_op(hp);
586 sbd_post_op(hp);
587 }
588
589 rv = SBD_GET_ERRNO(SBD_HD2ERR(hp));
590 *event = send_event;
591
592 /* undo locking, if any, done before sbd_pre_op */
593 switch (hp->h_cmd) {
594 case SBD_CMD_STATUS:
595 case SBD_CMD_GETNCM:
596 case SBD_CMD_PASSTHRU:
597 break;
598 default:
599 mutex_exit(&SBDH2BD(hp->h_sbd)->sb_mutex);
600 rw_exit(&sbd_grwlock);
601 }
602
603 sbd_release_handle(hp);
604
605 return (rv);
606 }
607
608 int
sbd_setup_instance(int instance,dev_info_t * root,int max_boards,int wnode,caddr_t sbdp_arg)609 sbd_setup_instance(int instance, dev_info_t *root, int max_boards, int wnode,
610 caddr_t sbdp_arg)
611 {
612 int b;
613 sbd_softstate_t *softsp;
614 sbd_board_t *sbd_boardlist;
615 static fn_t f = "sbd_setup_instance";
616
617 sbd_instances++;
618
619 if (sbdp_setup_instance(sbdp_arg) != DDI_SUCCESS) {
620 sbd_instances--;
621 return (DDI_FAILURE);
622 }
623
624 if (ALLOC_SOFTC(instance) != DDI_SUCCESS) {
625 cmn_err(CE_WARN,
626 "sbd:%s:%d: failed to alloc soft-state", f, instance);
627 (void) sbdp_teardown_instance(sbdp_arg);
628 sbd_instances--;
629 return (DDI_FAILURE);
630 }
631
632 softsp = (sbd_softstate_t *)GET_SOFTC(instance);
633
634 if (softsp == NULL) {
635 cmn_err(CE_WARN,
636 "sbd:%s:%d: failed to get soft-state instance",
637 f, instance);
638 goto exit;
639 }
640
641 sbd_boardlist = GETSTRUCT(sbd_board_t, max_boards);
642 if (sbd_boardlist == NULL) {
643 cmn_err(CE_WARN,
644 "sbd:%s: failed to alloc board list %d", f, instance);
645 goto exit;
646 }
647
648
649 softsp->sbd_boardlist = (void *)sbd_boardlist;
650 softsp->max_boards = max_boards;
651 softsp->wnode = wnode;
652
653
654 for (b = 0; b < max_boards; b++) {
655 sbd_board_init(sbd_boardlist++, softsp, b, root, wnode);
656 }
657
658
659 return (DDI_SUCCESS);
660 exit:
661 (void) sbdp_teardown_instance(sbdp_arg);
662 FREE_SOFTC(instance);
663 sbd_instances--;
664 return (DDI_FAILURE);
665 }
666
667 int
sbd_teardown_instance(int instance,caddr_t sbdp_arg)668 sbd_teardown_instance(int instance, caddr_t sbdp_arg)
669 {
670 sbd_softstate_t *softsp;
671
672 if (sbdp_teardown_instance(sbdp_arg) != DDI_SUCCESS)
673 return (DDI_FAILURE);
674
675 softsp = (sbd_softstate_t *)GET_SOFTC(instance);
676 if (softsp == NULL) {
677 return (DDI_FAILURE);
678 }
679
680 (void) sbd_dealloc_instance((sbd_board_t *)softsp->sbd_boardlist,
681 softsp->max_boards);
682
683 FREE_SOFTC(instance);
684 sbd_instances--;
685 sbd_prevent_unloading = 0;
686
687 return (DDI_SUCCESS);
688 }
689
690 static void
sbd_exec_op(sbd_handle_t * hp)691 sbd_exec_op(sbd_handle_t *hp)
692 {
693 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
694 static fn_t f = "sbd_exec_op";
695
696 switch (hp->h_cmd) {
697 int dev_canceled;
698
699 case SBD_CMD_CONNECT:
700 if (sbd_probe_board(hp))
701 break;
702
703 sbd_connect(hp);
704 break;
705
706 case SBD_CMD_CONFIGURE:
707 sbd_dev_configure(hp);
708 break;
709
710 case SBD_CMD_UNCONFIGURE:
711 if (((dev_canceled = sbd_dev_release(hp)) == 0) &&
712 (SBD_GET_ERRNO(SBD_HD2ERR(hp)) == 0 &&
713 SBD_GET_ERR(SBD_HD2ERR(hp)) == 0))
714 dev_canceled = sbd_dev_unconfigure(hp);
715
716 if (dev_canceled)
717 sbd_cancel(hp);
718 break;
719
720 case SBD_CMD_DISCONNECT:
721 mutex_enter(&sbp->sb_slock);
722 if (sbd_disconnect(hp) == 0)
723 (void) sbd_deprobe_board(hp);
724 mutex_exit(&sbp->sb_slock);
725 break;
726
727 case SBD_CMD_STATUS:
728 sbd_status(hp);
729 break;
730
731 case SBD_CMD_GETNCM:
732 sbd_get_ncm(hp);
733 break;
734
735 case SBD_CMD_ASSIGN:
736 sbd_assign_board(hp);
737 break;
738
739 case SBD_CMD_UNASSIGN:
740 sbd_unassign_board(hp);
741 break;
742
743 case SBD_CMD_POWEROFF:
744 sbd_poweroff_board(hp);
745 break;
746
747 case SBD_CMD_POWERON:
748 sbd_poweron_board(hp);
749 break;
750
751 case SBD_CMD_TEST:
752 sbd_test_board(hp);
753 break;
754
755 case SBD_CMD_PASSTHRU:
756 {
757 int rv;
758 sbdp_handle_t *hdp;
759 sbderror_t *ep = SBD_HD2ERR(hp);
760 sbdp_ioctl_arg_t ia, *iap;
761
762 iap = &ia;
763
764 iap->h_dev = hp->h_dev;
765 iap->h_cmd = hp->h_cmd;
766 iap->h_iap = (intptr_t)hp->h_iap;
767 iap->h_mode = hp->h_mode;
768
769 hdp = sbd_get_sbdp_handle(sbp, hp);
770 rv = sbdp_ioctl(hdp, iap);
771 if (rv != 0) {
772 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
773 ep->e_errno = rv;
774 }
775 sbd_release_sbdp_handle(hdp);
776 break;
777 }
778
779 default:
780 SBD_SET_ERRNO(SBD_HD2ERR(hp), ENOTTY);
781 cmn_err(CE_WARN, "sbd:%s: unknown command (%d)", f, hp->h_cmd);
782 break;
783
784 }
785
786 if (SBD_GET_ERR(SBD_HD2ERR(hp)))
787 PR_BYP("XXX e_code=%d", SBD_GET_ERR(SBD_HD2ERR(hp)));
788 if (SBD_GET_ERRNO(SBD_HD2ERR(hp)))
789 PR_BYP("XXX errno=%d", SBD_GET_ERRNO(SBD_HD2ERR(hp)));
790 }
791
792 sbd_comp_type_t
sbd_get_devtype(sbd_handle_t * hp,dev_info_t * dip)793 sbd_get_devtype(sbd_handle_t *hp, dev_info_t *dip)
794 {
795 sbd_board_t *sbp = hp ? SBDH2BD(hp->h_sbd) : NULL;
796 sbd_istate_t bstate;
797 dev_info_t **devlist;
798 int i;
799 char device[OBP_MAXDRVNAME];
800 int devicelen;
801
802 devicelen = sizeof (device);
803
804 bstate = sbp ? SBD_BOARD_STATE(sbp) : SBD_STATE_EMPTY;
805 /*
806 * if the board's connected or configured, search the
807 * devlists. Otherwise check the device tree
808 */
809 switch (bstate) {
810
811 case SBD_STATE_CONNECTED:
812 case SBD_STATE_CONFIGURED:
813 case SBD_STATE_UNREFERENCED:
814 case SBD_STATE_UNCONFIGURED:
815 devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
816 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
817 if (devlist[i] == dip)
818 return (SBD_COMP_MEM);
819
820 devlist = sbp->sb_devlist[NIX(SBD_COMP_CPU)];
821 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
822 if (devlist[i] == dip)
823 return (SBD_COMP_CPU);
824
825 devlist = sbp->sb_devlist[NIX(SBD_COMP_IO)];
826 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
827 if (devlist[i] == dip)
828 return (SBD_COMP_IO);
829 /*FALLTHROUGH*/
830
831 default:
832 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
833 OBP_DEVICETYPE, (caddr_t)device, &devicelen))
834 break;
835
836 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
837 if (strcmp(device, SBD_OTYPE(i)) != 0)
838 continue;
839 return (SBD_COMP(i));
840 }
841
842 break;
843 }
844 return (SBD_COMP_UNKNOWN);
845 }
846
847 static void
sbd_dev_configure(sbd_handle_t * hp)848 sbd_dev_configure(sbd_handle_t *hp)
849 {
850 int n, unit;
851 int32_t pass, devnum;
852 dev_info_t *dip;
853 sbd_devlist_t *devlist;
854 sbdp_handle_t *hdp;
855 sbd_comp_type_t nodetype;
856 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
857
858 pass = 1;
859
860 hdp = sbd_get_sbdp_handle(sbp, hp);
861 while ((devlist = sbd_get_attach_devlist(hp, &devnum, pass)) != NULL) {
862 int err;
863
864 err = sbd_pre_attach_devlist(hp, devlist, devnum);
865 if (err < 0) {
866 break;
867 } else if (err > 0) {
868 pass++;
869 continue;
870 }
871
872 for (n = 0; n < devnum; n++) {
873 sbderror_t *ep;
874
875 ep = &devlist[n].dv_error;
876 SBD_SET_ERRNO(ep, 0);
877 SBD_SET_ERR(ep, 0);
878 dip = devlist[n].dv_dip;
879 nodetype = sbd_get_devtype(hp, dip);
880
881 unit = sbdp_get_unit_num(hdp, dip);
882 if (unit < 0) {
883 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
884 break;
885 }
886
887 switch (nodetype) {
888 case SBD_COMP_MEM:
889 sbd_attach_mem(hp, ep);
890 if (SBD_GET_ERR(ep) == ESBD_CPUONLINE) {
891 FREESTRUCT(devlist, sbd_devlist_t,
892 MAX_MEM_UNITS_PER_BOARD);
893 sbd_release_sbdp_handle(hdp);
894 return;
895 }
896 break;
897
898 case SBD_COMP_CPU:
899 sbd_attach_cpu(hp, ep, dip, unit);
900 break;
901
902 case SBD_COMP_IO:
903 sbd_attach_io(hp, ep, dip, unit);
904 break;
905
906 default:
907 SBD_SET_ERRNO(ep, ENOTTY);
908 break;
909 }
910
911 if (sbd_set_err_in_hdl(hp, ep) == 0)
912 continue;
913 }
914
915 err = sbd_post_attach_devlist(hp, devlist, devnum);
916 if (err < 0)
917 break;
918
919 pass++;
920 }
921 sbd_release_sbdp_handle(hdp);
922 }
923
924 static int
sbd_dev_release(sbd_handle_t * hp)925 sbd_dev_release(sbd_handle_t *hp)
926 {
927 int n, unit;
928 int32_t pass, devnum;
929 dev_info_t *dip;
930 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
931 sbdp_handle_t *hdp;
932 sbd_devlist_t *devlist;
933 sbd_comp_type_t nodetype;
934 int err = 0;
935 int dev_canceled;
936
937 pass = 1;
938 hdp = sbd_get_sbdp_handle(sbp, hp);
939
940 sbp->sb_busy = 1;
941 while ((devlist = sbd_get_release_devlist(hp, &devnum, pass)) != NULL) {
942
943 err = sbd_pre_release_devlist(hp, devlist, devnum);
944 if (err < 0) {
945 dev_canceled = 1;
946 break;
947 } else if (err > 0) {
948 pass++;
949 continue;
950 }
951
952 dev_canceled = 0;
953 for (n = 0; n < devnum; n++) {
954 dip = devlist[n].dv_dip;
955 nodetype = sbd_get_devtype(hp, dip);
956
957 unit = sbdp_get_unit_num(hdp, dip);
958 if (unit < 0) {
959 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
960 break;
961 }
962
963 if ((nodetype == SBD_COMP_MEM) &&
964 sbd_release_mem(hp, dip, unit)) {
965
966 dev_canceled++;
967 }
968
969 sbd_release_done(hp, nodetype, dip);
970 }
971
972 err = sbd_post_release_devlist(hp, devlist, devnum);
973
974 if (err < 0)
975 break;
976
977 if (dev_canceled)
978 break;
979
980 pass++;
981 }
982 sbp->sb_busy = 0;
983
984 sbd_release_sbdp_handle(hdp);
985
986 if (dev_canceled)
987 return (dev_canceled);
988
989 return (err);
990 }
991
992 static int
sbd_dev_unconfigure(sbd_handle_t * hp)993 sbd_dev_unconfigure(sbd_handle_t *hp)
994 {
995 int n, unit;
996 int32_t pass, devnum;
997 dev_info_t *dip;
998 sbd_devlist_t *devlist;
999 sbdp_handle_t *hdp;
1000 sbd_comp_type_t nodetype;
1001 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1002 int dev_canceled = 0;
1003 static fn_t f = "sbd_dev_unconfigure";
1004
1005 PR_ALL("%s...\n", f);
1006
1007 pass = 1;
1008 hdp = sbd_get_sbdp_handle(sbp, hp);
1009
1010 while ((devlist = sbd_get_detach_devlist(hp, &devnum, pass)) != NULL) {
1011 int err, detach_err = 0;
1012
1013 err = sbd_pre_detach_devlist(hp, devlist, devnum);
1014 if (err) {
1015 /*
1016 * Only cancel the operation for memory in
1017 * case of failure.
1018 */
1019 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
1020 if (nodetype == SBD_COMP_MEM)
1021 dev_canceled = 1;
1022 (void) sbd_post_detach_devlist(hp, devlist, devnum);
1023 break;
1024 }
1025
1026 for (n = 0; n < devnum; n++) {
1027 sbderror_t *ep;
1028
1029 ep = &devlist[n].dv_error;
1030 SBD_SET_ERRNO(ep, 0);
1031 SBD_SET_ERR(ep, 0);
1032 dip = devlist[n].dv_dip;
1033 nodetype = sbd_get_devtype(hp, dip);
1034
1035 unit = sbdp_get_unit_num(hdp, dip);
1036 if (unit < 0) {
1037 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
1038 break;
1039 }
1040
1041 switch (nodetype) {
1042 case SBD_COMP_MEM:
1043 dev_canceled = sbd_detach_mem(hp, ep, unit);
1044 break;
1045
1046 case SBD_COMP_CPU:
1047 sbd_detach_cpu(hp, ep, dip, unit);
1048 break;
1049
1050 case SBD_COMP_IO:
1051 sbd_detach_io(hp, ep, dip, unit);
1052 break;
1053
1054 default:
1055 SBD_SET_ERRNO(ep, ENOTTY);
1056 break;
1057 }
1058
1059 if (sbd_set_err_in_hdl(hp, ep) == 0) {
1060 detach_err = -1;
1061 break;
1062 }
1063
1064 }
1065 err = sbd_post_detach_devlist(hp, devlist, devnum);
1066 if ((err < 0) || (detach_err < 0))
1067 break;
1068
1069 pass++;
1070 }
1071
1072 sbd_release_sbdp_handle(hdp);
1073 return (dev_canceled);
1074 }
1075
1076 int
sbd_errno2ecode(int error)1077 sbd_errno2ecode(int error)
1078 {
1079 int rv;
1080
1081 switch (error) {
1082 case EBUSY:
1083 rv = ESBD_BUSY;
1084 break;
1085 case EINVAL:
1086 rv = ESBD_INVAL;
1087 break;
1088 case EALREADY:
1089 rv = ESBD_ALREADY;
1090 break;
1091 case ENODEV:
1092 rv = ESBD_NODEV;
1093 break;
1094 case ENOMEM:
1095 rv = ESBD_NOMEM;
1096 break;
1097 default:
1098 rv = ESBD_INVAL;
1099 }
1100
1101 return (rv);
1102 }
1103
1104 static void
sbd_attach_cpu(sbd_handle_t * hp,sbderror_t * ep,dev_info_t * dip,int unit)1105 sbd_attach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1106 {
1107 int rv = 0;
1108 processorid_t cpuid;
1109 sbdp_handle_t *hdp;
1110 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1111 static fn_t f = "sbd_attach_cpu";
1112 char *pathname;
1113
1114 ASSERT(MUTEX_HELD(&cpu_lock));
1115
1116 ASSERT(dip);
1117
1118 /*
1119 * With the introduction of CMP devices, the CPU nodes
1120 * are no longer directly under the top node. Since
1121 * there is no plan to support CPU attach in the near
1122 * future, a branch configure operation is not required.
1123 */
1124
1125 hdp = sbd_get_sbdp_handle(sbp, hp);
1126 cpuid = sbdp_get_cpuid(hdp, dip);
1127 if (cpuid < 0) {
1128 rv = -1;
1129 SBD_GET_PERR(hdp->h_err, ep);
1130 } else if ((rv = cpu_configure(cpuid)) != 0) {
1131 cmn_err(CE_WARN,
1132 "sbd:%s: cpu_configure for cpuid %d failed", f, cpuid);
1133 SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1134 }
1135 sbd_release_sbdp_handle(hdp);
1136
1137 if (rv == 0) {
1138 ASSERT(sbp->sb_cpupath[unit] != NULL);
1139 pathname = sbp->sb_cpupath[unit];
1140 (void) ddi_pathname(dip, pathname);
1141 }
1142 }
1143
1144 /*
1145 * translate errno
1146 */
1147 void
sbd_errno_decode(int err,sbderror_t * ep,dev_info_t * dip)1148 sbd_errno_decode(int err, sbderror_t *ep, dev_info_t *dip)
1149 {
1150 ASSERT(err != 0);
1151
1152 switch (err) {
1153 case ENOMEM:
1154 SBD_SET_ERR(ep, ESBD_NOMEM);
1155 break;
1156
1157 case EBUSY:
1158 SBD_SET_ERR(ep, ESBD_BUSY);
1159 break;
1160
1161 case EIO:
1162 SBD_SET_ERR(ep, ESBD_IO);
1163 break;
1164
1165 case ENXIO:
1166 SBD_SET_ERR(ep, ESBD_NODEV);
1167 break;
1168
1169 case EINVAL:
1170 SBD_SET_ERR(ep, ESBD_INVAL);
1171 break;
1172
1173 case EFAULT:
1174 default:
1175 SBD_SET_ERR(ep, ESBD_FAULT);
1176 break;
1177 }
1178
1179 (void) ddi_pathname(dip, SBD_GET_ERRSTR(ep));
1180 }
1181
1182 static void
sbd_detach_cpu(sbd_handle_t * hp,sbderror_t * ep,dev_info_t * dip,int unit)1183 sbd_detach_cpu(sbd_handle_t *hp, sbderror_t *ep, dev_info_t *dip, int unit)
1184 {
1185 processorid_t cpuid;
1186 int rv;
1187 sbdp_handle_t *hdp;
1188 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1189 sbd_error_t *spe;
1190 static fn_t f = "sbd_detach_cpu";
1191
1192 ASSERT(MUTEX_HELD(&cpu_lock));
1193
1194 ASSERT(dip);
1195 hdp = sbd_get_sbdp_handle(sbp, hp);
1196 spe = hdp->h_err;
1197 cpuid = sbdp_get_cpuid(hdp, dip);
1198 if (cpuid < 0) {
1199 SBD_GET_PERR(spe, ep);
1200 sbd_release_sbdp_handle(hdp);
1201 return;
1202 }
1203
1204 if ((rv = cpu_unconfigure(cpuid)) != 0) {
1205 SBD_SET_ERR(ep, sbd_errno2ecode(rv));
1206 SBD_SET_ERRSTR(ep, sbp->sb_cpupath[unit]);
1207 cmn_err(CE_WARN,
1208 "sbd:%s: cpu_unconfigure for cpu %d failed", f, cpuid);
1209 sbd_release_sbdp_handle(hdp);
1210 return;
1211 }
1212 sbd_release_sbdp_handle(hdp);
1213
1214 /*
1215 * Since CPU nodes are no longer configured in CPU
1216 * attach, the corresponding branch unconfigure
1217 * operation that would be performed here is also
1218 * no longer required.
1219 */
1220 }
1221
1222
1223 int
sbd_detach_mem(sbd_handle_t * hp,sbderror_t * ep,int unit)1224 sbd_detach_mem(sbd_handle_t *hp, sbderror_t *ep, int unit)
1225 {
1226 sbd_mem_unit_t *mp;
1227 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1228 int i, rv;
1229 static fn_t f = "sbd_detach_mem";
1230
1231 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
1232
1233 if (sbd_detach_memory(hp, ep, mp, unit)) {
1234 cmn_err(CE_WARN, "%s: detach fail", f);
1235 return (-1);
1236 }
1237
1238 /*
1239 * Now detach mem devinfo nodes with status lock held.
1240 */
1241 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
1242 dev_info_t *fdip = NULL;
1243
1244 if (mp->sbm_dip[i] == NULL)
1245 continue;
1246 ASSERT(e_ddi_branch_held(mp->sbm_dip[i]));
1247 mutex_enter(&sbp->sb_slock);
1248 rv = e_ddi_branch_unconfigure(mp->sbm_dip[i], &fdip,
1249 DEVI_BRANCH_EVENT);
1250 mutex_exit(&sbp->sb_slock);
1251 if (rv) {
1252 /*
1253 * If non-NULL, fdip is returned held and must be
1254 * released.
1255 */
1256 if (fdip != NULL) {
1257 sbd_errno_decode(rv, ep, fdip);
1258 ddi_release_devi(fdip);
1259 } else {
1260 sbd_errno_decode(rv, ep, mp->sbm_dip[i]);
1261 }
1262 }
1263 }
1264
1265 return (0);
1266 }
1267
1268 /* start beginning of sbd.c */
1269
1270 /*
1271 * MDR memory support - somewhat disabled for now.
1272 * UNSAFE unsafe driver code - I don't think we want this.
1273 * need to check.
1274 * DEVNODE This driver creates attachment points for individual
1275 * components as well as boards. We only need board
1276 * support.
1277 * DEV2DEVSET Put only present devices in devset.
1278 */
1279
1280
1281 static sbd_state_t
rstate_cvt(sbd_istate_t state)1282 rstate_cvt(sbd_istate_t state)
1283 {
1284 sbd_state_t cs;
1285
1286 switch (state) {
1287 case SBD_STATE_EMPTY:
1288 cs = SBD_STAT_EMPTY;
1289 break;
1290 case SBD_STATE_OCCUPIED:
1291 case SBD_STATE_FATAL:
1292 cs = SBD_STAT_DISCONNECTED;
1293 break;
1294 case SBD_STATE_CONFIGURED:
1295 case SBD_STATE_CONNECTED:
1296 case SBD_STATE_UNCONFIGURED:
1297 case SBD_STATE_PARTIAL:
1298 case SBD_STATE_RELEASE:
1299 case SBD_STATE_UNREFERENCED:
1300 cs = SBD_STAT_CONNECTED;
1301 break;
1302 default:
1303 cs = SBD_STAT_NONE;
1304 break;
1305 }
1306
1307 return (cs);
1308 }
1309
1310
1311 sbd_state_t
ostate_cvt(sbd_istate_t state)1312 ostate_cvt(sbd_istate_t state)
1313 {
1314 sbd_state_t cs;
1315
1316 switch (state) {
1317 case SBD_STATE_EMPTY:
1318 case SBD_STATE_OCCUPIED:
1319 case SBD_STATE_UNCONFIGURED:
1320 case SBD_STATE_CONNECTED:
1321 case SBD_STATE_FATAL:
1322 cs = SBD_STAT_UNCONFIGURED;
1323 break;
1324 case SBD_STATE_PARTIAL:
1325 case SBD_STATE_CONFIGURED:
1326 case SBD_STATE_RELEASE:
1327 case SBD_STATE_UNREFERENCED:
1328 cs = SBD_STAT_CONFIGURED;
1329 break;
1330 default:
1331 cs = SBD_STAT_NONE;
1332 break;
1333 }
1334
1335 return (cs);
1336 }
1337
1338 int
sbd_dealloc_instance(sbd_board_t * sbp,int max_boards)1339 sbd_dealloc_instance(sbd_board_t *sbp, int max_boards)
1340 {
1341 int b;
1342 sbd_board_t *list = sbp;
1343 static fn_t f = "sbd_dealloc_instance";
1344
1345 PR_ALL("%s...\n", f);
1346
1347 if (sbp == NULL) {
1348 return (-1);
1349 }
1350
1351 for (b = 0; b < max_boards; b++) {
1352 sbd_board_destroy(sbp++);
1353 }
1354
1355 FREESTRUCT(list, sbd_board_t, max_boards);
1356
1357 return (0);
1358 }
1359
1360 static sbd_devset_t
sbd_dev2devset(sbd_comp_id_t * cid)1361 sbd_dev2devset(sbd_comp_id_t *cid)
1362 {
1363 static fn_t f = "sbd_dev2devset";
1364
1365 sbd_devset_t devset;
1366 int unit = cid->c_unit;
1367
1368 switch (cid->c_type) {
1369 case SBD_COMP_NONE:
1370 devset = DEVSET(SBD_COMP_CPU, DEVSET_ANYUNIT);
1371 devset |= DEVSET(SBD_COMP_MEM, DEVSET_ANYUNIT);
1372 devset |= DEVSET(SBD_COMP_IO, DEVSET_ANYUNIT);
1373 break;
1374
1375 case SBD_COMP_CPU:
1376 if ((unit > MAX_CPU_UNITS_PER_BOARD) || (unit < 0)) {
1377 PR_ALL("%s: invalid cpu unit# = %d", f, unit);
1378 devset = 0;
1379 } else
1380 /*
1381 * Generate a devset that includes all the
1382 * cores of a CMP device. If this is not a
1383 * CMP, the extra cores will be eliminated
1384 * later since they are not present. This is
1385 * also true for CMP devices that do not have
1386 * all cores active.
1387 */
1388 devset = DEVSET(SBD_COMP_CMP, unit);
1389
1390 break;
1391
1392 case SBD_COMP_MEM:
1393
1394 if ((unit > MAX_MEM_UNITS_PER_BOARD) || (unit < 0)) {
1395 #ifdef XXX_jeffco
1396 PR_ALL("%s: invalid mem unit# = %d", f, unit);
1397 devset = 0;
1398 #endif
1399 devset = DEVSET(cid->c_type, 0);
1400 PR_ALL("%s: adjusted MEM devset = 0x%x\n",
1401 f, devset);
1402 } else
1403 devset = DEVSET(cid->c_type, unit);
1404 break;
1405
1406 case SBD_COMP_IO:
1407 if ((unit > MAX_IO_UNITS_PER_BOARD) || (unit < 0)) {
1408 PR_ALL("%s: invalid io unit# = %d",
1409 f, unit);
1410 devset = 0;
1411 } else
1412 devset = DEVSET(cid->c_type, unit);
1413
1414 break;
1415
1416 default:
1417 case SBD_COMP_UNKNOWN:
1418 devset = 0;
1419 break;
1420 }
1421
1422 return (devset);
1423 }
1424
1425 /*
1426 * Simple mutex for covering handle list ops as it is only
1427 * used "infrequently". No need to add another mutex to the sbd_board_t.
1428 */
1429 static kmutex_t sbd_handle_list_mutex;
1430
1431 static sbd_handle_t *
sbd_get_handle(dev_t dev,sbd_softstate_t * softsp,intptr_t arg,sbd_init_arg_t * iap)1432 sbd_get_handle(dev_t dev, sbd_softstate_t *softsp, intptr_t arg,
1433 sbd_init_arg_t *iap)
1434 {
1435 sbd_handle_t *hp;
1436 sbderror_t *ep;
1437 sbd_priv_handle_t *shp;
1438 sbd_board_t *sbp = softsp->sbd_boardlist;
1439 int board;
1440
1441 board = SBDGETSLOT(dev);
1442 ASSERT(board < softsp->max_boards);
1443 sbp += board;
1444
1445 /*
1446 * Brand-new handle.
1447 */
1448 shp = kmem_zalloc(sizeof (sbd_priv_handle_t), KM_SLEEP);
1449 shp->sh_arg = (void *)arg;
1450
1451 hp = MACHHD2HD(shp);
1452
1453 ep = &shp->sh_err;
1454
1455 hp->h_err = ep;
1456 hp->h_sbd = (void *) sbp;
1457 hp->h_dev = iap->dev;
1458 hp->h_cmd = iap->cmd;
1459 hp->h_mode = iap->mode;
1460 sbd_init_err(ep);
1461
1462 mutex_enter(&sbd_handle_list_mutex);
1463 shp->sh_next = sbp->sb_handle;
1464 sbp->sb_handle = shp;
1465 mutex_exit(&sbd_handle_list_mutex);
1466
1467 return (hp);
1468 }
1469
1470 void
sbd_init_err(sbderror_t * ep)1471 sbd_init_err(sbderror_t *ep)
1472 {
1473 ep->e_errno = 0;
1474 ep->e_code = 0;
1475 ep->e_rsc[0] = '\0';
1476 }
1477
1478 int
sbd_set_err_in_hdl(sbd_handle_t * hp,sbderror_t * ep)1479 sbd_set_err_in_hdl(sbd_handle_t *hp, sbderror_t *ep)
1480 {
1481 sbderror_t *hep = SBD_HD2ERR(hp);
1482
1483 /*
1484 * If there is an error logged already, don't rewrite it
1485 */
1486 if (SBD_GET_ERR(hep) || SBD_GET_ERRNO(hep)) {
1487 return (0);
1488 }
1489
1490 if (SBD_GET_ERR(ep) || SBD_GET_ERRNO(ep)) {
1491 SBD_SET_ERR(hep, SBD_GET_ERR(ep));
1492 SBD_SET_ERRNO(hep, SBD_GET_ERRNO(ep));
1493 SBD_SET_ERRSTR(hep, SBD_GET_ERRSTR(ep));
1494 return (0);
1495 }
1496
1497 return (-1);
1498 }
1499
1500 static void
sbd_release_handle(sbd_handle_t * hp)1501 sbd_release_handle(sbd_handle_t *hp)
1502 {
1503 sbd_priv_handle_t *shp, **shpp;
1504 sbd_board_t *sbp;
1505 static fn_t f = "sbd_release_handle";
1506
1507 if (hp == NULL)
1508 return;
1509
1510 sbp = SBDH2BD(hp->h_sbd);
1511
1512 shp = HD2MACHHD(hp);
1513
1514 mutex_enter(&sbd_handle_list_mutex);
1515 /*
1516 * Locate the handle in the board's reference list.
1517 */
1518 for (shpp = &sbp->sb_handle; (*shpp) && ((*shpp) != shp);
1519 shpp = &((*shpp)->sh_next))
1520 /* empty */;
1521
1522 if (*shpp == NULL) {
1523 cmn_err(CE_PANIC,
1524 "sbd:%s: handle not found in board %d", f, sbp->sb_num);
1525 /*NOTREACHED*/
1526 } else {
1527 *shpp = shp->sh_next;
1528 }
1529 mutex_exit(&sbd_handle_list_mutex);
1530
1531 if (hp->h_opts.copts != NULL) {
1532 FREESTRUCT(hp->h_opts.copts, char, hp->h_opts.size);
1533 }
1534
1535 FREESTRUCT(shp, sbd_priv_handle_t, 1);
1536 }
1537
1538 sbdp_handle_t *
sbd_get_sbdp_handle(sbd_board_t * sbp,sbd_handle_t * hp)1539 sbd_get_sbdp_handle(sbd_board_t *sbp, sbd_handle_t *hp)
1540 {
1541 sbdp_handle_t *hdp;
1542
1543 hdp = kmem_zalloc(sizeof (sbdp_handle_t), KM_SLEEP);
1544 hdp->h_err = kmem_zalloc(sizeof (sbd_error_t), KM_SLEEP);
1545 if (sbp == NULL) {
1546 hdp->h_board = -1;
1547 hdp->h_wnode = -1;
1548 } else {
1549 hdp->h_board = sbp->sb_num;
1550 hdp->h_wnode = sbp->sb_wnode;
1551 }
1552
1553 if (hp == NULL) {
1554 hdp->h_flags = 0;
1555 hdp->h_opts = NULL;
1556 } else {
1557 hdp->h_flags = SBD_2_SBDP_FLAGS(hp->h_flags);
1558 hdp->h_opts = &hp->h_opts;
1559 }
1560
1561 return (hdp);
1562 }
1563
1564 void
sbd_release_sbdp_handle(sbdp_handle_t * hdp)1565 sbd_release_sbdp_handle(sbdp_handle_t *hdp)
1566 {
1567 if (hdp == NULL)
1568 return;
1569
1570 kmem_free(hdp->h_err, sizeof (sbd_error_t));
1571 kmem_free(hdp, sizeof (sbdp_handle_t));
1572 }
1573
1574 void
sbd_reset_error_sbdph(sbdp_handle_t * hdp)1575 sbd_reset_error_sbdph(sbdp_handle_t *hdp)
1576 {
1577 if ((hdp != NULL) && (hdp->h_err != NULL)) {
1578 bzero(hdp->h_err, sizeof (sbd_error_t));
1579 }
1580 }
1581
1582 static int
sbd_copyin_ioarg(sbd_handle_t * hp,int mode,int cmd,sbd_cmd_t * cmdp,sbd_ioctl_arg_t * iap)1583 sbd_copyin_ioarg(sbd_handle_t *hp, int mode, int cmd, sbd_cmd_t *cmdp,
1584 sbd_ioctl_arg_t *iap)
1585 {
1586 static fn_t f = "sbd_copyin_ioarg";
1587
1588 if (iap == NULL)
1589 return (EINVAL);
1590
1591 bzero((caddr_t)cmdp, sizeof (sbd_cmd_t));
1592
1593 #ifdef _MULTI_DATAMODEL
1594 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1595 sbd_cmd32_t scmd32;
1596
1597 bzero((caddr_t)&scmd32, sizeof (sbd_cmd32_t));
1598
1599 if (ddi_copyin((void *)iap, (void *)&scmd32,
1600 sizeof (sbd_cmd32_t), mode)) {
1601 cmn_err(CE_WARN,
1602 "sbd:%s: (32bit) failed to copyin sbdcmd-struct",
1603 f);
1604 return (EFAULT);
1605 }
1606 cmdp->cmd_cm.c_id.c_type = scmd32.cmd_cm.c_id.c_type;
1607 cmdp->cmd_cm.c_id.c_unit = scmd32.cmd_cm.c_id.c_unit;
1608 bcopy(&scmd32.cmd_cm.c_id.c_name[0],
1609 &cmdp->cmd_cm.c_id.c_name[0], OBP_MAXPROPNAME);
1610 cmdp->cmd_cm.c_flags = scmd32.cmd_cm.c_flags;
1611 cmdp->cmd_cm.c_len = scmd32.cmd_cm.c_len;
1612 cmdp->cmd_cm.c_opts = (caddr_t)(uintptr_t)scmd32.cmd_cm.c_opts;
1613
1614 if (cmd == SBD_CMD_PASSTHRU) {
1615 PR_BYP("passthru copyin: iap=%p, sz=%ld", (void *)iap,
1616 sizeof (sbd_cmd32_t));
1617 PR_BYP("passthru copyin: c_opts=%x, c_len=%d",
1618 scmd32.cmd_cm.c_opts, scmd32.cmd_cm.c_len);
1619 }
1620
1621 switch (cmd) {
1622 case SBD_CMD_STATUS:
1623 cmdp->cmd_stat.s_nbytes = scmd32.cmd_stat.s_nbytes;
1624 cmdp->cmd_stat.s_statp =
1625 (caddr_t)(uintptr_t)scmd32.cmd_stat.s_statp;
1626 break;
1627 default:
1628 break;
1629
1630 }
1631 } else
1632 #endif /* _MULTI_DATAMODEL */
1633 if (ddi_copyin((void *)iap, (void *)cmdp,
1634 sizeof (sbd_cmd_t), mode) != 0) {
1635 cmn_err(CE_WARN, "sbd:%s: failed to copyin sbd cmd_t struct",
1636 f);
1637 return (EFAULT);
1638 }
1639 /*
1640 * A user may set platform specific options so we need to
1641 * copy them in
1642 */
1643 if (cmd != SBD_CMD_STATUS &&
1644 (hp->h_opts.size = cmdp->cmd_cm.c_len) > 0) {
1645 hp->h_opts.size += 1; /* For null termination of string. */
1646 hp->h_opts.copts = GETSTRUCT(char, hp->h_opts.size);
1647 if (ddi_copyin((void *)cmdp->cmd_cm.c_opts,
1648 (void *)hp->h_opts.copts,
1649 cmdp->cmd_cm.c_len, hp->h_mode) != 0) {
1650 /* copts is freed in sbd_release_handle(). */
1651 cmn_err(CE_WARN, "sbd:%s: failed to copyin options", f);
1652 return (EFAULT);
1653 }
1654 }
1655
1656 return (0);
1657 }
1658
1659 static int
sbd_copyout_ioarg(int mode,int cmd,sbd_cmd_t * scp,sbd_ioctl_arg_t * iap)1660 sbd_copyout_ioarg(int mode, int cmd, sbd_cmd_t *scp, sbd_ioctl_arg_t *iap)
1661 {
1662 static fn_t f = "sbd_copyout_ioarg";
1663
1664 if ((iap == NULL) || (scp == NULL))
1665 return (EINVAL);
1666
1667 #ifdef _MULTI_DATAMODEL
1668 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1669 sbd_cmd32_t scmd32;
1670
1671 scmd32.cmd_cm.c_id.c_type = scp->cmd_cm.c_id.c_type;
1672 scmd32.cmd_cm.c_id.c_unit = scp->cmd_cm.c_id.c_unit;
1673 bcopy(scp->cmd_cm.c_id.c_name,
1674 scmd32.cmd_cm.c_id.c_name, OBP_MAXPROPNAME);
1675
1676 scmd32.cmd_cm.c_flags = scp->cmd_cm.c_flags;
1677
1678 switch (cmd) {
1679 case SBD_CMD_GETNCM:
1680 scmd32.cmd_getncm.g_ncm = scp->cmd_getncm.g_ncm;
1681 break;
1682 default:
1683 break;
1684 }
1685
1686 if (ddi_copyout((void *)&scmd32, (void *)iap,
1687 sizeof (sbd_cmd32_t), mode)) {
1688 cmn_err(CE_WARN,
1689 "sbd:%s: (32bit) failed to copyout sbdcmd struct",
1690 f);
1691 return (EFAULT);
1692 }
1693 } else
1694 #endif /* _MULTI_DATAMODEL */
1695 if (ddi_copyout((void *)scp, (void *)iap,
1696 sizeof (sbd_cmd_t), mode) != 0) {
1697 cmn_err(CE_WARN, "sbd:%s: failed to copyout sbdcmd struct", f);
1698 return (EFAULT);
1699 }
1700
1701 return (0);
1702 }
1703
1704 static int
sbd_copyout_errs(int mode,sbd_ioctl_arg_t * iap,void * arg)1705 sbd_copyout_errs(int mode, sbd_ioctl_arg_t *iap, void *arg)
1706 {
1707 static fn_t f = "sbd_copyout_errs";
1708 sbd_ioctl_arg_t *uap;
1709
1710 uap = (sbd_ioctl_arg_t *)arg;
1711
1712 #ifdef _MULTI_DATAMODEL
1713 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
1714 sbd_error32_t err32;
1715 sbd_ioctl_arg32_t *uap32;
1716
1717 uap32 = (sbd_ioctl_arg32_t *)arg;
1718
1719 err32.e_code = iap->ie_code;
1720 (void) strcpy(err32.e_rsc, iap->ie_rsc);
1721
1722 if (ddi_copyout((void *)&err32, (void *)&uap32->i_err,
1723 sizeof (sbd_error32_t), mode)) {
1724 cmn_err(CE_WARN,
1725 "sbd:%s: failed to copyout ioctl32 errs", f);
1726 return (EFAULT);
1727 }
1728 } else
1729 #endif /* _MULTI_DATAMODEL */
1730 if (ddi_copyout((void *)&iap->i_err, (void *)&uap->i_err,
1731 sizeof (sbd_error_t), mode) != 0) {
1732 cmn_err(CE_WARN, "sbd:%s: failed to copyout ioctl errs", f);
1733 return (EFAULT);
1734 }
1735
1736 return (0);
1737 }
1738
1739 /*
1740 * State transition policy is that if at least one
1741 * device cannot make the transition, then none of
1742 * the requested devices are allowed to transition.
1743 *
1744 * Returns the state that is in error, if any.
1745 */
1746 static int
sbd_check_transition(sbd_board_t * sbp,sbd_devset_t * devsetp,struct sbd_state_trans * transp)1747 sbd_check_transition(sbd_board_t *sbp, sbd_devset_t *devsetp,
1748 struct sbd_state_trans *transp)
1749 {
1750 int s, ut;
1751 int state_err = 0;
1752 sbd_devset_t devset;
1753 static fn_t f = "sbd_check_transition";
1754
1755 devset = *devsetp;
1756
1757 if (!devset) {
1758 /*
1759 * Transition does not deal with any components.
1760 * This is the case for addboard/deleteboard.
1761 */
1762 PR_ALL("%s: no devs: requested devset = 0x%x,"
1763 " final devset = 0x%x\n",
1764 f, (uint_t)*devsetp, (uint_t)devset);
1765
1766 return (0);
1767 }
1768
1769 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
1770 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++) {
1771 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, ut) == 0)
1772 continue;
1773 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, ut);
1774 if (transp->x_op[s].x_rv) {
1775 if (!state_err)
1776 state_err = s;
1777 DEVSET_DEL(devset, SBD_COMP_MEM, ut);
1778 }
1779 }
1780 }
1781
1782 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
1783 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++) {
1784 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, ut) == 0)
1785 continue;
1786 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_CPU, ut);
1787 if (transp->x_op[s].x_rv) {
1788 if (!state_err)
1789 state_err = s;
1790 DEVSET_DEL(devset, SBD_COMP_CPU, ut);
1791 }
1792 }
1793 }
1794
1795 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
1796 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++) {
1797 if (DEVSET_IN_SET(devset, SBD_COMP_IO, ut) == 0)
1798 continue;
1799 s = (int)SBD_DEVICE_STATE(sbp, SBD_COMP_IO, ut);
1800 if (transp->x_op[s].x_rv) {
1801 if (!state_err)
1802 state_err = s;
1803 DEVSET_DEL(devset, SBD_COMP_IO, ut);
1804 }
1805 }
1806 }
1807
1808 PR_ALL("%s: requested devset = 0x%x, final devset = 0x%x\n",
1809 f, (uint_t)*devsetp, (uint_t)devset);
1810
1811 *devsetp = devset;
1812 /*
1813 * If there are some remaining components for which
1814 * this state transition is valid, then allow them
1815 * through, otherwise if none are left then return
1816 * the state error.
1817 */
1818 return (devset ? 0 : state_err);
1819 }
1820
1821 /*
1822 * pre-op entry point must SET_ERRNO(), if needed.
1823 * Return value of non-zero indicates failure.
1824 */
1825 static int
sbd_pre_op(sbd_handle_t * hp)1826 sbd_pre_op(sbd_handle_t *hp)
1827 {
1828 int rv = 0, t;
1829 int cmd, serr = 0;
1830 sbd_devset_t devset;
1831 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1832 sbd_priv_handle_t *shp = HD2MACHHD(hp);
1833 sbderror_t *ep = SBD_HD2ERR(hp);
1834 sbd_cmd_t *cmdp;
1835 static fn_t f = "sbd_pre_op";
1836
1837 cmd = hp->h_cmd;
1838 devset = shp->sh_devset;
1839
1840 switch (cmd) {
1841 case SBD_CMD_CONNECT:
1842 case SBD_CMD_DISCONNECT:
1843 case SBD_CMD_UNCONFIGURE:
1844 case SBD_CMD_CONFIGURE:
1845 case SBD_CMD_ASSIGN:
1846 case SBD_CMD_UNASSIGN:
1847 case SBD_CMD_POWERON:
1848 case SBD_CMD_POWEROFF:
1849 case SBD_CMD_TEST:
1850 /* ioctls allowed if caller has write permission */
1851 if (!(hp->h_mode & FWRITE)) {
1852 SBD_SET_ERRNO(ep, EPERM);
1853 return (-1);
1854 }
1855
1856 default:
1857 break;
1858 }
1859
1860 hp->h_iap = GETSTRUCT(sbd_ioctl_arg_t, 1);
1861 rv = sbd_copyin_ioarg(hp, hp->h_mode, cmd,
1862 (sbd_cmd_t *)hp->h_iap, shp->sh_arg);
1863 if (rv) {
1864 SBD_SET_ERRNO(ep, rv);
1865 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1866 hp->h_iap = NULL;
1867 cmn_err(CE_WARN, "%s: copyin fail", f);
1868 return (-1);
1869 } else {
1870 cmdp = (sbd_cmd_t *)hp->h_iap;
1871 if (cmdp->cmd_cm.c_id.c_name[0] != '\0') {
1872
1873 cmdp->cmd_cm.c_id.c_type =
1874 SBD_COMP(sbd_name_to_idx(cmdp->cmd_cm.c_id.c_name));
1875 if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_MEM) {
1876 if (cmdp->cmd_cm.c_id.c_unit == -1)
1877 cmdp->cmd_cm.c_id.c_unit = 0;
1878 }
1879 }
1880 devset = shp->sh_orig_devset = shp->sh_devset =
1881 sbd_dev2devset(&cmdp->cmd_cm.c_id);
1882 if (devset == 0) {
1883 SBD_SET_ERRNO(ep, EINVAL);
1884 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1885 hp->h_iap = NULL;
1886 return (-1);
1887 }
1888 }
1889
1890 /*
1891 * Always turn on these bits ala Sunfire DR.
1892 */
1893 hp->h_flags |= SBD_FLAG_DEVI_FORCE;
1894
1895 if (cmdp->cmd_cm.c_flags & SBD_FLAG_FORCE)
1896 hp->h_flags |= SBD_IOCTL_FLAG_FORCE;
1897
1898 /*
1899 * Check for valid state transitions.
1900 */
1901 if (!serr && ((t = CMD2INDEX(cmd)) != -1)) {
1902 struct sbd_state_trans *transp;
1903 int state_err;
1904
1905 transp = &sbd_state_transition[t];
1906 ASSERT(transp->x_cmd == cmd);
1907
1908 state_err = sbd_check_transition(sbp, &devset, transp);
1909
1910 if (state_err < 0) {
1911 /*
1912 * Invalidate device.
1913 */
1914 SBD_SET_ERRNO(ep, ENOTTY);
1915 serr = -1;
1916 PR_ALL("%s: invalid devset (0x%x)\n",
1917 f, (uint_t)devset);
1918 } else if (state_err != 0) {
1919 /*
1920 * State transition is not a valid one.
1921 */
1922 SBD_SET_ERRNO(ep, transp->x_op[state_err].x_err);
1923 serr = transp->x_op[state_err].x_rv;
1924 PR_ALL("%s: invalid state %s(%d) for cmd %s(%d)\n",
1925 f, sbd_state_str[state_err], state_err,
1926 SBD_CMD_STR(cmd), cmd);
1927 }
1928 if (serr && SBD_GET_ERRNO(ep) != 0) {
1929 /*
1930 * A state transition error occurred.
1931 */
1932 if (serr < 0) {
1933 SBD_SET_ERR(ep, ESBD_INVAL);
1934 } else {
1935 SBD_SET_ERR(ep, ESBD_STATE);
1936 }
1937 PR_ALL("%s: invalid state transition\n", f);
1938 } else {
1939 shp->sh_devset = devset;
1940 }
1941 }
1942
1943 if (serr && !rv && hp->h_iap) {
1944
1945 /*
1946 * There was a state error. We successfully copied
1947 * in the ioctl argument, so let's fill in the
1948 * error and copy it back out.
1949 */
1950
1951 if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0)
1952 SBD_SET_ERRNO(ep, EIO);
1953
1954 SBD_SET_IOCTL_ERR(&hp->h_iap->i_err, ep->e_code, ep->e_rsc);
1955 (void) sbd_copyout_errs(hp->h_mode, hp->h_iap, shp->sh_arg);
1956 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
1957 hp->h_iap = NULL;
1958 rv = -1;
1959 }
1960
1961 return (rv);
1962 }
1963
1964 static void
sbd_post_op(sbd_handle_t * hp)1965 sbd_post_op(sbd_handle_t *hp)
1966 {
1967 int cmd;
1968 sbderror_t *ep = SBD_HD2ERR(hp);
1969 sbd_priv_handle_t *shp = HD2MACHHD(hp);
1970 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
1971
1972 cmd = hp->h_cmd;
1973
1974 switch (cmd) {
1975 case SBD_CMD_CONFIGURE:
1976 case SBD_CMD_UNCONFIGURE:
1977 case SBD_CMD_CONNECT:
1978 case SBD_CMD_DISCONNECT:
1979 sbp->sb_time = gethrestime_sec();
1980 break;
1981
1982 default:
1983 break;
1984 }
1985
1986 if (SBD_GET_ERR(ep) && SBD_GET_ERRNO(ep) == 0) {
1987 SBD_SET_ERRNO(ep, EIO);
1988 }
1989
1990 if (shp->sh_arg != NULL) {
1991
1992 if (SBD_GET_ERR(ep) != ESBD_NOERROR) {
1993
1994 SBD_SET_IOCTL_ERR(&hp->h_iap->i_err, ep->e_code,
1995 ep->e_rsc);
1996
1997 (void) sbd_copyout_errs(hp->h_mode, hp->h_iap,
1998 shp->sh_arg);
1999 }
2000
2001 if (hp->h_iap != NULL) {
2002 FREESTRUCT(hp->h_iap, sbd_ioctl_arg_t, 1);
2003 hp->h_iap = NULL;
2004 }
2005 }
2006 }
2007
2008 static int
sbd_probe_board(sbd_handle_t * hp)2009 sbd_probe_board(sbd_handle_t *hp)
2010 {
2011 int rv;
2012 sbd_board_t *sbp;
2013 sbdp_handle_t *hdp;
2014 static fn_t f = "sbd_probe_board";
2015
2016 sbp = SBDH2BD(hp->h_sbd);
2017
2018 ASSERT(sbp != NULL);
2019 PR_ALL("%s for board %d", f, sbp->sb_num);
2020
2021
2022 hdp = sbd_get_sbdp_handle(sbp, hp);
2023
2024 if ((rv = sbdp_connect_board(hdp)) != 0) {
2025 sbderror_t *ep = SBD_HD2ERR(hp);
2026
2027 SBD_GET_PERR(hdp->h_err, ep);
2028 }
2029
2030 /*
2031 * We need to force a recache after the connect. The cached
2032 * info may be incorrect
2033 */
2034 mutex_enter(&sbp->sb_flags_mutex);
2035 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2036 mutex_exit(&sbp->sb_flags_mutex);
2037
2038 SBD_INJECT_ERR(SBD_PROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2039 ESGT_PROBE, NULL);
2040
2041 sbd_release_sbdp_handle(hdp);
2042
2043 return (rv);
2044 }
2045
2046 static int
sbd_deprobe_board(sbd_handle_t * hp)2047 sbd_deprobe_board(sbd_handle_t *hp)
2048 {
2049 int rv;
2050 sbdp_handle_t *hdp;
2051 sbd_board_t *sbp;
2052 static fn_t f = "sbd_deprobe_board";
2053
2054 PR_ALL("%s...\n", f);
2055
2056 sbp = SBDH2BD(hp->h_sbd);
2057
2058 hdp = sbd_get_sbdp_handle(sbp, hp);
2059
2060 if ((rv = sbdp_disconnect_board(hdp)) != 0) {
2061 sbderror_t *ep = SBD_HD2ERR(hp);
2062
2063 SBD_GET_PERR(hdp->h_err, ep);
2064 }
2065
2066 mutex_enter(&sbp->sb_flags_mutex);
2067 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
2068 mutex_exit(&sbp->sb_flags_mutex);
2069
2070 SBD_INJECT_ERR(SBD_DEPROBE_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2071 ESGT_DEPROBE, NULL);
2072
2073 sbd_release_sbdp_handle(hdp);
2074 return (rv);
2075 }
2076
2077 /*
2078 * Check if a CPU node is part of a CMP.
2079 */
2080 int
sbd_is_cmp_child(dev_info_t * dip)2081 sbd_is_cmp_child(dev_info_t *dip)
2082 {
2083 dev_info_t *pdip;
2084
2085 if (strcmp(ddi_node_name(dip), "cpu") != 0) {
2086 return (0);
2087 }
2088
2089 pdip = ddi_get_parent(dip);
2090
2091 ASSERT(pdip);
2092
2093 if (strcmp(ddi_node_name(pdip), "cmp") == 0) {
2094 return (1);
2095 }
2096
2097 return (0);
2098 }
2099
2100 /*
2101 * Returns the nodetype if dip is a top dip on the board of
2102 * interest or SBD_COMP_UNKNOWN otherwise
2103 */
2104 static sbd_comp_type_t
get_node_type(sbd_board_t * sbp,dev_info_t * dip,int * unitp)2105 get_node_type(sbd_board_t *sbp, dev_info_t *dip, int *unitp)
2106 {
2107 int idx, unit;
2108 sbd_handle_t *hp;
2109 sbdp_handle_t *hdp;
2110 char otype[OBP_MAXDRVNAME];
2111 int otypelen;
2112
2113 ASSERT(sbp);
2114
2115 if (unitp)
2116 *unitp = -1;
2117
2118 hp = MACHBD2HD(sbp);
2119
2120 hdp = sbd_get_sbdp_handle(sbp, hp);
2121 if (sbdp_get_board_num(hdp, dip) != sbp->sb_num) {
2122 sbd_release_sbdp_handle(hdp);
2123 return (SBD_COMP_UNKNOWN);
2124 }
2125
2126 /*
2127 * sbdp_get_unit_num will return (-1) for cmp as there
2128 * is no "device_type" property associated with cmp.
2129 * Therefore we will just skip getting unit number for
2130 * cmp. Callers of this function need to check the
2131 * value set in unitp before using it to dereference
2132 * an array.
2133 */
2134 if (strcmp(ddi_node_name(dip), "cmp") == 0) {
2135 sbd_release_sbdp_handle(hdp);
2136 return (SBD_COMP_CMP);
2137 }
2138
2139 otypelen = sizeof (otype);
2140 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2141 OBP_DEVICETYPE, (caddr_t)otype, &otypelen)) {
2142 sbd_release_sbdp_handle(hdp);
2143 return (SBD_COMP_UNKNOWN);
2144 }
2145
2146 idx = sbd_otype_to_idx(otype);
2147
2148 if (SBD_COMP(idx) == SBD_COMP_UNKNOWN) {
2149 sbd_release_sbdp_handle(hdp);
2150 return (SBD_COMP_UNKNOWN);
2151 }
2152
2153 unit = sbdp_get_unit_num(hdp, dip);
2154 if (unit == -1) {
2155 cmn_err(CE_WARN,
2156 "get_node_type: %s unit fail %p", otype, (void *)dip);
2157 sbd_release_sbdp_handle(hdp);
2158 return (SBD_COMP_UNKNOWN);
2159 }
2160
2161 sbd_release_sbdp_handle(hdp);
2162
2163 if (unitp)
2164 *unitp = unit;
2165
2166 return (SBD_COMP(idx));
2167 }
2168
2169 typedef struct {
2170 sbd_board_t *sbp;
2171 int nmc;
2172 int hold;
2173 } walk_tree_t;
2174
2175 static int
sbd_setup_devlists(dev_info_t * dip,void * arg)2176 sbd_setup_devlists(dev_info_t *dip, void *arg)
2177 {
2178 walk_tree_t *wp;
2179 dev_info_t **devlist = NULL;
2180 char *pathname = NULL;
2181 sbd_mem_unit_t *mp;
2182 static fn_t f = "sbd_setup_devlists";
2183 sbd_board_t *sbp;
2184 int unit;
2185 sbd_comp_type_t nodetype;
2186
2187 ASSERT(dip);
2188
2189 wp = (walk_tree_t *)arg;
2190
2191 if (wp == NULL) {
2192 PR_ALL("%s:bad arg\n", f);
2193 return (DDI_WALK_TERMINATE);
2194 }
2195
2196 sbp = wp->sbp;
2197
2198 nodetype = get_node_type(sbp, dip, &unit);
2199
2200 switch (nodetype) {
2201
2202 case SBD_COMP_CPU:
2203 pathname = sbp->sb_cpupath[unit];
2204 break;
2205
2206 case SBD_COMP_MEM:
2207 pathname = sbp->sb_mempath[unit];
2208 break;
2209
2210 case SBD_COMP_IO:
2211 pathname = sbp->sb_iopath[unit];
2212 break;
2213
2214 case SBD_COMP_CMP:
2215 case SBD_COMP_UNKNOWN:
2216 /*
2217 * This dip is not of interest to us
2218 */
2219 return (DDI_WALK_CONTINUE);
2220
2221 default:
2222 ASSERT(0);
2223 return (DDI_WALK_CONTINUE);
2224 }
2225
2226 /*
2227 * dip's parent is being held busy by ddi_walk_devs(),
2228 * so dip doesn't have to be held while calling ddi_pathname()
2229 */
2230 if (pathname) {
2231 (void) ddi_pathname(dip, pathname);
2232 }
2233
2234 devlist = sbp->sb_devlist[NIX(nodetype)];
2235
2236 /*
2237 * The branch rooted at dip should already be held,
2238 * unless we are dealing with a core of a CMP.
2239 */
2240 ASSERT(sbd_is_cmp_child(dip) || e_ddi_branch_held(dip));
2241 devlist[unit] = dip;
2242
2243 /*
2244 * This test is required if multiple devices are considered
2245 * as one. This is the case for memory-controller nodes.
2246 */
2247 if (!SBD_DEV_IS_PRESENT(sbp, nodetype, unit)) {
2248 sbp->sb_ndev++;
2249 SBD_DEV_SET_PRESENT(sbp, nodetype, unit);
2250 }
2251
2252 if (nodetype == SBD_COMP_MEM) {
2253 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
2254 ASSERT(wp->nmc < SBD_NUM_MC_PER_BOARD);
2255 mp->sbm_dip[wp->nmc++] = dip;
2256 }
2257
2258 return (DDI_WALK_CONTINUE);
2259 }
2260
2261 /*
2262 * This routine is used to construct the memory devlist.
2263 * In Starcat and Serengeti platforms, a system board can contain up to
2264 * four memory controllers (MC). The MCs have been programmed by POST for
2265 * optimum memory interleaving amongst their peers on the same board.
2266 * This DR driver does not support deinterleaving. Therefore, the smallest
2267 * unit of memory that can be manipulated by this driver is all of the
2268 * memory on a board. Because of this restriction, a board's memory devlist
2269 * is populated with only one of the four (possible) MC dnodes on that board.
2270 * Care must be taken to ensure that the selected MC dnode represents the
2271 * lowest physical address to which memory on the board will respond to.
2272 * This is required in order to preserve the semantics of
2273 * sbdp_get_base_physaddr() when applied to a MC dnode stored in the
2274 * memory devlist.
2275 */
2276 static void
sbd_init_mem_devlists(sbd_board_t * sbp)2277 sbd_init_mem_devlists(sbd_board_t *sbp)
2278 {
2279 dev_info_t **devlist;
2280 sbd_mem_unit_t *mp;
2281 dev_info_t *mc_dip;
2282 sbdp_handle_t *hdp;
2283 uint64_t mc_pa, lowest_pa;
2284 int i;
2285 sbd_handle_t *hp = MACHBD2HD(sbp);
2286
2287 devlist = sbp->sb_devlist[NIX(SBD_COMP_MEM)];
2288
2289 mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2290
2291 mc_dip = mp->sbm_dip[0];
2292 if (mc_dip == NULL)
2293 return; /* No MC dips found for this board */
2294
2295 hdp = sbd_get_sbdp_handle(sbp, hp);
2296
2297 if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2298 /* TODO: log complaint about dnode */
2299
2300 pretend_no_mem:
2301 /*
2302 * We are here because sbdphw_get_base_physaddr() failed.
2303 * Although it is very unlikely to happen, it did. Lucky us.
2304 * Since we can no longer examine _all_ of the MCs on this
2305 * board to determine which one is programmed to the lowest
2306 * physical address, we cannot involve any of the MCs on
2307 * this board in DR operations. To ensure this, we pretend
2308 * that this board does not contain any memory.
2309 *
2310 * Paranoia: clear the dev_present mask.
2311 */
2312 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, 0)) {
2313 ASSERT(sbp->sb_ndev != 0);
2314 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, 0);
2315 sbp->sb_ndev--;
2316 }
2317
2318 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2319 mp->sbm_dip[i] = NULL;
2320 }
2321
2322 sbd_release_sbdp_handle(hdp);
2323 return;
2324 }
2325
2326 /* assume this one will win. */
2327 devlist[0] = mc_dip;
2328 mp->sbm_cm.sbdev_dip = mc_dip;
2329 lowest_pa = mc_pa;
2330
2331 /*
2332 * We know the base physical address of one of the MC devices. Now
2333 * we will enumerate through all of the remaining MC devices on
2334 * the board to find which of them is programmed to the lowest
2335 * physical address.
2336 */
2337 for (i = 1; i < SBD_NUM_MC_PER_BOARD; i++) {
2338 mc_dip = mp->sbm_dip[i];
2339 if (mc_dip == NULL) {
2340 break;
2341 }
2342
2343 if (sbdphw_get_base_physaddr(hdp, mc_dip, &mc_pa)) {
2344 cmn_err(CE_NOTE, "No mem on board %d unit %d",
2345 sbp->sb_num, i);
2346 break;
2347 }
2348 if (mc_pa < lowest_pa) {
2349 mp->sbm_cm.sbdev_dip = mc_dip;
2350 devlist[0] = mc_dip;
2351 lowest_pa = mc_pa;
2352 }
2353 }
2354
2355 sbd_release_sbdp_handle(hdp);
2356 }
2357
2358 static int
sbd_name_to_idx(char * name)2359 sbd_name_to_idx(char *name)
2360 {
2361 int idx;
2362
2363 for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2364 if (strcmp(name, SBD_DEVNAME(idx)) == 0) {
2365 break;
2366 }
2367 }
2368
2369 return (idx);
2370 }
2371
2372 static int
sbd_otype_to_idx(char * otype)2373 sbd_otype_to_idx(char *otype)
2374 {
2375 int idx;
2376
2377 for (idx = 0; SBD_COMP(idx) != SBD_COMP_UNKNOWN; idx++) {
2378
2379 if (strcmp(otype, SBD_OTYPE(idx)) == 0) {
2380 break;
2381 }
2382 }
2383
2384 return (idx);
2385 }
2386
2387 static int
sbd_init_devlists(sbd_board_t * sbp)2388 sbd_init_devlists(sbd_board_t *sbp)
2389 {
2390 int i;
2391 sbd_dev_unit_t *dp;
2392 sbd_mem_unit_t *mp;
2393 walk_tree_t *wp, walk = {0};
2394 dev_info_t *pdip;
2395 static fn_t f = "sbd_init_devlists";
2396
2397 PR_ALL("%s (board = %d)...\n", f, sbp->sb_num);
2398
2399 wp = &walk;
2400
2401 SBD_DEVS_DISCONNECT(sbp, (uint_t)-1);
2402
2403 /*
2404 * Clear out old entries, if any.
2405 */
2406
2407 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
2408 sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
2409 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_MEMUNIT(sbp, i);
2410 dp->u_common.sbdev_sbp = sbp;
2411 dp->u_common.sbdev_unum = i;
2412 dp->u_common.sbdev_type = SBD_COMP_MEM;
2413 }
2414
2415 mp = SBD_GET_BOARD_MEMUNIT(sbp, 0);
2416 ASSERT(mp != NULL);
2417 for (i = 0; i < SBD_NUM_MC_PER_BOARD; i++) {
2418 mp->sbm_dip[i] = NULL;
2419 }
2420
2421 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2422 sbp->sb_devlist[NIX(SBD_COMP_CPU)][i] = NULL;
2423 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_CPUUNIT(sbp, i);
2424 dp->u_common.sbdev_sbp = sbp;
2425 dp->u_common.sbdev_unum = i;
2426 dp->u_common.sbdev_type = SBD_COMP_CPU;
2427 }
2428 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
2429 sbp->sb_devlist[NIX(SBD_COMP_IO)][i] = NULL;
2430 dp = (sbd_dev_unit_t *)SBD_GET_BOARD_IOUNIT(sbp, i);
2431 dp->u_common.sbdev_sbp = sbp;
2432 dp->u_common.sbdev_unum = i;
2433 dp->u_common.sbdev_type = SBD_COMP_IO;
2434 }
2435
2436 wp->sbp = sbp;
2437 wp->nmc = 0;
2438 sbp->sb_ndev = 0;
2439
2440 /*
2441 * ddi_walk_devs() requires that topdip's parent be held.
2442 */
2443 pdip = ddi_get_parent(sbp->sb_topdip);
2444 if (pdip) {
2445 ndi_hold_devi(pdip);
2446 ndi_devi_enter(pdip);
2447 }
2448 ddi_walk_devs(sbp->sb_topdip, sbd_setup_devlists, (void *) wp);
2449 if (pdip) {
2450 ndi_devi_exit(pdip);
2451 ndi_rele_devi(pdip);
2452 }
2453
2454 /*
2455 * There is no point checking all the components if there
2456 * are no devices.
2457 */
2458 if (sbp->sb_ndev == 0) {
2459 sbp->sb_memaccess_ok = 0;
2460 return (sbp->sb_ndev);
2461 }
2462
2463 /*
2464 * Initialize cpu sections before calling sbd_init_mem_devlists
2465 * which will access the mmus.
2466 */
2467 sbp->sb_memaccess_ok = 1;
2468 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
2469 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i)) {
2470 sbd_init_cpu_unit(sbp, i);
2471 if (sbd_connect_cpu(sbp, i)) {
2472 SBD_SET_ERR(HD2MACHERR(MACHBD2HD(sbp)),
2473 ESBD_CPUSTART);
2474 }
2475
2476 }
2477 }
2478
2479 if (sbp->sb_memaccess_ok) {
2480 sbd_init_mem_devlists(sbp);
2481 } else {
2482 cmn_err(CE_WARN, "unable to access memory on board %d",
2483 sbp->sb_num);
2484 }
2485
2486 return (sbp->sb_ndev);
2487 }
2488
2489 static void
sbd_init_cpu_unit(sbd_board_t * sbp,int unit)2490 sbd_init_cpu_unit(sbd_board_t *sbp, int unit)
2491 {
2492 sbd_istate_t new_state;
2493 sbd_cpu_unit_t *cp;
2494 int cpuid;
2495 dev_info_t *dip;
2496 sbdp_handle_t *hdp;
2497 sbd_handle_t *hp = MACHBD2HD(sbp);
2498 extern kmutex_t cpu_lock;
2499
2500 if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_CPU, unit)) {
2501 new_state = SBD_STATE_CONFIGURED;
2502 } else if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, unit)) {
2503 new_state = SBD_STATE_CONNECTED;
2504 } else {
2505 new_state = SBD_STATE_EMPTY;
2506 }
2507
2508 dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][unit];
2509
2510 cp = SBD_GET_BOARD_CPUUNIT(sbp, unit);
2511
2512 hdp = sbd_get_sbdp_handle(sbp, hp);
2513
2514 cpuid = sbdp_get_cpuid(hdp, dip);
2515
2516 cp->sbc_cpu_id = cpuid;
2517
2518 if (&sbdp_cpu_get_impl)
2519 cp->sbc_cpu_impl = sbdp_cpu_get_impl(hdp, dip);
2520 else
2521 cp->sbc_cpu_impl = -1;
2522
2523 mutex_enter(&cpu_lock);
2524 if ((cpuid >= 0) && cpu[cpuid])
2525 cp->sbc_cpu_flags = cpu[cpuid]->cpu_flags;
2526 else
2527 cp->sbc_cpu_flags = CPU_OFFLINE | CPU_POWEROFF;
2528 mutex_exit(&cpu_lock);
2529
2530 sbd_cpu_set_prop(cp, dip);
2531
2532 cp->sbc_cm.sbdev_cond = sbd_get_comp_cond(dip);
2533 sbd_release_sbdp_handle(hdp);
2534
2535 /*
2536 * Any changes to the cpu should be performed above
2537 * this call to ensure the cpu is fully initialized
2538 * before transitioning to the new state.
2539 */
2540 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, unit, new_state);
2541 }
2542
2543 /*
2544 * Only do work if called to operate on an entire board
2545 * which doesn't already have components present.
2546 */
2547 static void
sbd_connect(sbd_handle_t * hp)2548 sbd_connect(sbd_handle_t *hp)
2549 {
2550 sbd_board_t *sbp;
2551 sbderror_t *ep;
2552 static fn_t f = "sbd_connect";
2553
2554 sbp = SBDH2BD(hp->h_sbd);
2555
2556 PR_ALL("%s board %d\n", f, sbp->sb_num);
2557
2558 ep = HD2MACHERR(hp);
2559
2560 if (SBD_DEVS_PRESENT(sbp)) {
2561 /*
2562 * Board already has devices present.
2563 */
2564 PR_ALL("%s: devices already present (0x%x)\n",
2565 f, SBD_DEVS_PRESENT(sbp));
2566 SBD_SET_ERRNO(ep, EINVAL);
2567 return;
2568 }
2569
2570 if (sbd_init_devlists(sbp) == 0) {
2571 cmn_err(CE_WARN, "%s: no devices present on board %d",
2572 f, sbp->sb_num);
2573 SBD_SET_ERR(ep, ESBD_NODEV);
2574 return;
2575 } else {
2576 int i;
2577
2578 /*
2579 * Initialize mem-unit section of board structure.
2580 */
2581 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2582 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
2583 sbd_init_mem_unit(sbp, i, SBD_HD2ERR(hp));
2584
2585 /*
2586 * Initialize sb_io sections.
2587 */
2588 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2589 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
2590 sbd_init_io_unit(sbp, i);
2591
2592 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
2593 sbp->sb_rstate = SBD_STAT_CONNECTED;
2594 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2595 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
2596 SBD_INJECT_ERR(SBD_CONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2597 ESBD_INTERNAL, NULL);
2598 }
2599 }
2600
2601 static int
sbd_disconnect(sbd_handle_t * hp)2602 sbd_disconnect(sbd_handle_t *hp)
2603 {
2604 int i;
2605 sbd_devset_t devset;
2606 sbd_board_t *sbp;
2607 static fn_t f = "sbd_disconnect it";
2608
2609 PR_ALL("%s ...\n", f);
2610
2611 sbp = SBDH2BD(hp->h_sbd);
2612
2613 /*
2614 * Only devices which are present, but
2615 * unattached can be disconnected.
2616 */
2617 devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_PRESENT(sbp) &
2618 SBD_DEVS_UNATTACHED(sbp);
2619
2620 ASSERT((SBD_DEVS_ATTACHED(sbp) & devset) == 0);
2621
2622 /*
2623 * Update per-device state transitions.
2624 */
2625
2626 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++)
2627 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
2628 if (sbd_disconnect_mem(hp, i) == 0) {
2629 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
2630 SBD_STATE_EMPTY);
2631 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
2632 }
2633 }
2634
2635 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++)
2636 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, i)) {
2637 if (sbd_disconnect_cpu(hp, i) == 0) {
2638 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
2639 SBD_STATE_EMPTY);
2640 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_CPU, i);
2641 }
2642 }
2643
2644 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++)
2645 if (DEVSET_IN_SET(devset, SBD_COMP_IO, i)) {
2646 if (sbd_disconnect_io(hp, i) == 0) {
2647 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
2648 SBD_STATE_EMPTY);
2649 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_IO, i);
2650 }
2651 }
2652
2653 /*
2654 * Once all the components on a board have been disconnect
2655 * the board's state can transition to disconnected and
2656 * we can allow the deprobe to take place.
2657 */
2658 if (SBD_DEVS_PRESENT(sbp) == 0) {
2659 SBD_BOARD_TRANSITION(sbp, SBD_STATE_OCCUPIED);
2660 sbp->sb_rstate = SBD_STAT_DISCONNECTED;
2661 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
2662 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
2663 SBD_INJECT_ERR(SBD_DISCONNECT_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2664 ESBD_INTERNAL, NULL);
2665 return (0);
2666 } else {
2667 cmn_err(CE_WARN, "%s: could not disconnect devices on board %d",
2668 f, sbp->sb_num);
2669 return (-1);
2670 }
2671 }
2672
2673 static void
sbd_test_board(sbd_handle_t * hp)2674 sbd_test_board(sbd_handle_t *hp)
2675 {
2676 sbd_board_t *sbp;
2677 sbdp_handle_t *hdp;
2678
2679 sbp = SBDH2BD(hp->h_sbd);
2680
2681 PR_ALL("sbd_test_board: board %d\n", sbp->sb_num);
2682
2683
2684 hdp = sbd_get_sbdp_handle(sbp, hp);
2685
2686 if (sbdp_test_board(hdp, &hp->h_opts) != 0) {
2687 sbderror_t *ep = SBD_HD2ERR(hp);
2688
2689 SBD_GET_PERR(hdp->h_err, ep);
2690 }
2691
2692 SBD_INJECT_ERR(SBD_TEST_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2693 ESBD_INTERNAL, NULL);
2694
2695 sbd_release_sbdp_handle(hdp);
2696 }
2697
2698 static void
sbd_assign_board(sbd_handle_t * hp)2699 sbd_assign_board(sbd_handle_t *hp)
2700 {
2701 sbd_board_t *sbp;
2702 sbdp_handle_t *hdp;
2703
2704 sbp = SBDH2BD(hp->h_sbd);
2705
2706 PR_ALL("sbd_assign_board: board %d\n", sbp->sb_num);
2707
2708 hdp = sbd_get_sbdp_handle(sbp, hp);
2709
2710 if (sbdp_assign_board(hdp) != 0) {
2711 sbderror_t *ep = SBD_HD2ERR(hp);
2712
2713 SBD_GET_PERR(hdp->h_err, ep);
2714 }
2715
2716 SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2717 ESBD_INTERNAL, NULL);
2718
2719 sbd_release_sbdp_handle(hdp);
2720 }
2721
2722 static void
sbd_unassign_board(sbd_handle_t * hp)2723 sbd_unassign_board(sbd_handle_t *hp)
2724 {
2725 sbd_board_t *sbp;
2726 sbdp_handle_t *hdp;
2727
2728 sbp = SBDH2BD(hp->h_sbd);
2729
2730 PR_ALL("sbd_unassign_board: board %d\n", sbp->sb_num);
2731
2732 hdp = sbd_get_sbdp_handle(sbp, hp);
2733
2734 if (sbdp_unassign_board(hdp) != 0) {
2735 sbderror_t *ep = SBD_HD2ERR(hp);
2736
2737 SBD_GET_PERR(hdp->h_err, ep);
2738 }
2739
2740 SBD_INJECT_ERR(SBD_ASSIGN_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2741 ESBD_INTERNAL, NULL);
2742
2743 sbd_release_sbdp_handle(hdp);
2744 }
2745
2746 static void
sbd_poweron_board(sbd_handle_t * hp)2747 sbd_poweron_board(sbd_handle_t *hp)
2748 {
2749 sbd_board_t *sbp;
2750 sbdp_handle_t *hdp;
2751
2752 sbp = SBDH2BD(hp->h_sbd);
2753
2754 PR_ALL("sbd_poweron_board: %d\n", sbp->sb_num);
2755
2756 hdp = sbd_get_sbdp_handle(sbp, hp);
2757
2758 if (sbdp_poweron_board(hdp) != 0) {
2759 sbderror_t *ep = SBD_HD2ERR(hp);
2760
2761 SBD_GET_PERR(hdp->h_err, ep);
2762 }
2763
2764 SBD_INJECT_ERR(SBD_POWERON_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2765 ESBD_INTERNAL, NULL);
2766
2767 sbd_release_sbdp_handle(hdp);
2768 }
2769
2770 static void
sbd_poweroff_board(sbd_handle_t * hp)2771 sbd_poweroff_board(sbd_handle_t *hp)
2772 {
2773 sbd_board_t *sbp;
2774 sbdp_handle_t *hdp;
2775
2776 sbp = SBDH2BD(hp->h_sbd);
2777
2778 PR_ALL("sbd_poweroff_board: %d\n", sbp->sb_num);
2779
2780 hdp = sbd_get_sbdp_handle(sbp, hp);
2781
2782 if (sbdp_poweroff_board(hdp) != 0) {
2783 sbderror_t *ep = SBD_HD2ERR(hp);
2784
2785 SBD_GET_PERR(hdp->h_err, ep);
2786 }
2787
2788 SBD_INJECT_ERR(SBD_POWEROFF_BOARD_PSEUDO_ERR, hp->h_err, EIO,
2789 ESBD_INTERNAL, NULL);
2790
2791 sbd_release_sbdp_handle(hdp);
2792 }
2793
2794
2795 /*
2796 * Return a list of the dip's of devices that are
2797 * either present and attached, or present only but
2798 * not yet attached for the given board.
2799 */
2800 sbd_devlist_t *
sbd_get_devlist(sbd_handle_t * hp,sbd_board_t * sbp,sbd_comp_type_t nodetype,int max_units,uint_t uset,int * count,int present_only)2801 sbd_get_devlist(sbd_handle_t *hp, sbd_board_t *sbp, sbd_comp_type_t nodetype,
2802 int max_units, uint_t uset, int *count, int present_only)
2803 {
2804 int i, ix;
2805 sbd_devlist_t *ret_devlist;
2806 dev_info_t **devlist;
2807 sbdp_handle_t *hdp;
2808
2809 *count = 0;
2810 ret_devlist = GETSTRUCT(sbd_devlist_t, max_units);
2811 devlist = sbp->sb_devlist[NIX(nodetype)];
2812 /*
2813 * Turn into binary value since we're going
2814 * to be using XOR for a comparison.
2815 * if (present_only) then
2816 * dev must be PRESENT, but NOT ATTACHED.
2817 * else
2818 * dev must be PRESENT AND ATTACHED.
2819 * endif
2820 */
2821 if (present_only)
2822 present_only = 1;
2823
2824 hdp = sbd_get_sbdp_handle(sbp, hp);
2825
2826 for (i = ix = 0; (i < max_units) && uset; i++) {
2827 int ut, is_present, is_attached;
2828 dev_info_t *dip;
2829 sbderror_t *ep = SBD_HD2ERR(hp);
2830 int nunits, distance, j;
2831
2832 /*
2833 * For CMPs, we would like to perform DR operation on
2834 * all the cores before moving onto the next chip.
2835 * Therefore, when constructing the devlist, we process
2836 * all the cores together.
2837 */
2838 if (nodetype == SBD_COMP_CPU) {
2839 /*
2840 * Number of units to process in the inner loop
2841 */
2842 nunits = MAX_CORES_PER_CMP;
2843 /*
2844 * The distance between the units in the
2845 * board's sb_devlist structure.
2846 */
2847 distance = MAX_CMP_UNITS_PER_BOARD;
2848 } else {
2849 nunits = 1;
2850 distance = 0;
2851 }
2852
2853 for (j = 0; j < nunits; j++) {
2854 if ((dip = devlist[i + j * distance]) == NULL)
2855 continue;
2856
2857 ut = sbdp_get_unit_num(hdp, dip);
2858
2859 if (ut == -1) {
2860 SBD_GET_PERR(hdp->h_err, ep);
2861 PR_ALL("sbd_get_devlist bad unit %d"
2862 " code %d errno %d",
2863 i, ep->e_code, ep->e_errno);
2864 }
2865
2866 if ((uset & (1 << ut)) == 0)
2867 continue;
2868 uset &= ~(1 << ut);
2869 is_present =
2870 SBD_DEV_IS_PRESENT(sbp, nodetype, ut) ? 1 : 0;
2871 is_attached =
2872 SBD_DEV_IS_ATTACHED(sbp, nodetype, ut) ? 1 : 0;
2873
2874 if (is_present && (present_only ^ is_attached)) {
2875 ret_devlist[ix].dv_dip = dip;
2876 sbd_init_err(&ret_devlist[ix].dv_error);
2877 ix++;
2878 }
2879 }
2880 }
2881 sbd_release_sbdp_handle(hdp);
2882
2883 if ((*count = ix) == 0) {
2884 FREESTRUCT(ret_devlist, sbd_devlist_t, max_units);
2885 ret_devlist = NULL;
2886 }
2887
2888 return (ret_devlist);
2889 }
2890
2891 static sbd_devlist_t *
sbd_get_attach_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)2892 sbd_get_attach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
2893 {
2894 sbd_board_t *sbp;
2895 uint_t uset;
2896 sbd_devset_t devset;
2897 sbd_devlist_t *attach_devlist;
2898 static int next_pass = 1;
2899 static fn_t f = "sbd_get_attach_devlist";
2900
2901 PR_ALL("%s (pass = %d)...\n", f, pass);
2902
2903 sbp = SBDH2BD(hp->h_sbd);
2904 devset = HD2MACHHD(hp)->sh_devset;
2905
2906 *devnump = 0;
2907 attach_devlist = NULL;
2908
2909 /*
2910 * We switch on next_pass for the cases where a board
2911 * does not contain a particular type of component.
2912 * In these situations we don't want to return NULL
2913 * prematurely. We need to check other devices and
2914 * we don't want to check the same type multiple times.
2915 * For example, if there were no cpus, then on pass 1
2916 * we would drop through and return the memory nodes.
2917 * However, on pass 2 we would switch back to the memory
2918 * nodes thereby returning them twice! Using next_pass
2919 * forces us down to the end (or next item).
2920 */
2921 if (pass == 1)
2922 next_pass = 1;
2923
2924 switch (next_pass) {
2925 case 1:
2926 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
2927 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
2928
2929 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_CPU,
2930 MAX_CPU_UNITS_PER_BOARD, uset, devnump, 1);
2931
2932 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
2933 if (!devset || attach_devlist) {
2934 next_pass = 2;
2935 return (attach_devlist);
2936 }
2937 /*
2938 * If the caller is interested in the entire
2939 * board, but there aren't any cpus, then just
2940 * fall through to check for the next component.
2941 */
2942 }
2943 /*FALLTHROUGH*/
2944
2945 case 2:
2946 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
2947 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
2948
2949 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_MEM,
2950 MAX_MEM_UNITS_PER_BOARD, uset, devnump, 1);
2951
2952 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
2953 if (!devset || attach_devlist) {
2954 next_pass = 3;
2955 return (attach_devlist);
2956 }
2957 /*
2958 * If the caller is interested in the entire
2959 * board, but there isn't any memory, then
2960 * just fall through to next component.
2961 */
2962 }
2963 /*FALLTHROUGH*/
2964
2965
2966 case 3:
2967 next_pass = -1;
2968 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
2969 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
2970
2971 attach_devlist = sbd_get_devlist(hp, sbp, SBD_COMP_IO,
2972 MAX_IO_UNITS_PER_BOARD, uset, devnump, 1);
2973
2974 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
2975 if (!devset || attach_devlist) {
2976 next_pass = 4;
2977 return (attach_devlist);
2978 }
2979 }
2980 /*FALLTHROUGH*/
2981
2982 default:
2983 *devnump = 0;
2984 return (NULL);
2985 }
2986 /*NOTREACHED*/
2987 }
2988
2989 static int
sbd_pre_attach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)2990 sbd_pre_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist, int32_t devnum)
2991 {
2992 int max_units = 0, rv = 0;
2993 sbd_comp_type_t nodetype;
2994 static fn_t f = "sbd_pre_attach_devlist";
2995
2996 /*
2997 * In this driver, all entries in a devlist[] are
2998 * of the same nodetype.
2999 */
3000 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3001
3002 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3003 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3004
3005 switch (nodetype) {
3006
3007 case SBD_COMP_MEM:
3008 max_units = MAX_MEM_UNITS_PER_BOARD;
3009 rv = sbd_pre_attach_mem(hp, devlist, devnum);
3010 break;
3011
3012 case SBD_COMP_CPU:
3013 max_units = MAX_CPU_UNITS_PER_BOARD;
3014 rv = sbd_pre_attach_cpu(hp, devlist, devnum);
3015 break;
3016
3017 case SBD_COMP_IO:
3018 max_units = MAX_IO_UNITS_PER_BOARD;
3019 break;
3020
3021 default:
3022 rv = -1;
3023 break;
3024 }
3025
3026 if (rv && max_units) {
3027 int i;
3028 /*
3029 * Need to clean up devlist
3030 * if pre-op is going to fail.
3031 */
3032 for (i = 0; i < max_units; i++) {
3033 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3034 SBD_FREE_ERR(&devlist[i].dv_error);
3035 } else {
3036 break;
3037 }
3038 }
3039 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3040 }
3041
3042 /*
3043 * If an error occurred, return "continue"
3044 * indication so that we can continue attaching
3045 * as much as possible.
3046 */
3047 return (rv ? -1 : 0);
3048 }
3049
3050 static int
sbd_post_attach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3051 sbd_post_attach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3052 int32_t devnum)
3053 {
3054 int i, max_units = 0, rv = 0;
3055 sbd_devset_t devs_unattached, devs_present;
3056 sbd_comp_type_t nodetype;
3057 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3058 sbdp_handle_t *hdp;
3059 static fn_t f = "sbd_post_attach_devlist";
3060
3061 sbp = SBDH2BD(hp->h_sbd);
3062 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3063
3064 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3065 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3066
3067 hdp = sbd_get_sbdp_handle(sbp, hp);
3068
3069 /*
3070 * Need to free up devlist[] created earlier in
3071 * sbd_get_attach_devlist().
3072 */
3073 switch (nodetype) {
3074 case SBD_COMP_CPU:
3075 max_units = MAX_CPU_UNITS_PER_BOARD;
3076 rv = sbd_post_attach_cpu(hp, devlist, devnum);
3077 break;
3078
3079
3080 case SBD_COMP_MEM:
3081 max_units = MAX_MEM_UNITS_PER_BOARD;
3082
3083 rv = sbd_post_attach_mem(hp, devlist, devnum);
3084 break;
3085
3086 case SBD_COMP_IO:
3087 max_units = MAX_IO_UNITS_PER_BOARD;
3088 break;
3089
3090 default:
3091 rv = -1;
3092 break;
3093 }
3094
3095
3096 for (i = 0; i < devnum; i++) {
3097 int unit;
3098 dev_info_t *dip;
3099 sbderror_t *ep;
3100
3101 ep = &devlist[i].dv_error;
3102
3103 if (sbd_set_err_in_hdl(hp, ep) == 0)
3104 continue;
3105
3106 dip = devlist[i].dv_dip;
3107 nodetype = sbd_get_devtype(hp, dip);
3108 unit = sbdp_get_unit_num(hdp, dip);
3109
3110 if (unit == -1) {
3111 SBD_GET_PERR(hdp->h_err, ep);
3112 continue;
3113 }
3114
3115 unit = sbd_check_unit_attached(sbp, dip, unit, nodetype, ep);
3116
3117 if (unit == -1) {
3118 PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not attached\n",
3119 f, sbd_ct_str[(int)nodetype], sbp->sb_num, i);
3120 continue;
3121 }
3122
3123 SBD_DEV_SET_ATTACHED(sbp, nodetype, unit);
3124 SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3125 SBD_STATE_CONFIGURED);
3126 }
3127 sbd_release_sbdp_handle(hdp);
3128
3129 if (rv) {
3130 PR_ALL("%s: errno %d, ecode %d during attach\n",
3131 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3132 SBD_GET_ERR(HD2MACHERR(hp)));
3133 }
3134
3135 devs_present = SBD_DEVS_PRESENT(sbp);
3136 devs_unattached = SBD_DEVS_UNATTACHED(sbp);
3137
3138 switch (SBD_BOARD_STATE(sbp)) {
3139 case SBD_STATE_CONNECTED:
3140 case SBD_STATE_UNCONFIGURED:
3141 ASSERT(devs_present);
3142
3143 if (devs_unattached == 0) {
3144 /*
3145 * All devices finally attached.
3146 */
3147 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3148 sbp->sb_rstate = SBD_STAT_CONNECTED;
3149 sbp->sb_ostate = SBD_STAT_CONFIGURED;
3150 } else if (devs_present != devs_unattached) {
3151 /*
3152 * Only some devices are fully attached.
3153 */
3154 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3155 sbp->sb_rstate = SBD_STAT_CONNECTED;
3156 sbp->sb_ostate = SBD_STAT_UNCONFIGURED;
3157 }
3158 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3159 break;
3160
3161 case SBD_STATE_PARTIAL:
3162 ASSERT(devs_present);
3163 /*
3164 * All devices finally attached.
3165 */
3166 if (devs_unattached == 0) {
3167 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
3168 sbp->sb_rstate = SBD_STAT_CONNECTED;
3169 sbp->sb_ostate = SBD_STAT_CONFIGURED;
3170 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3171 }
3172 break;
3173
3174 default:
3175 break;
3176 }
3177
3178 if (max_units && devlist) {
3179 int i;
3180
3181 for (i = 0; i < max_units; i++) {
3182 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3183 SBD_FREE_ERR(&devlist[i].dv_error);
3184 } else {
3185 break;
3186 }
3187 }
3188 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3189 }
3190
3191 /*
3192 * Our policy is to attach all components that are
3193 * possible, thus we always return "success" on the
3194 * pre and post operations.
3195 */
3196 return (0);
3197 }
3198
3199 /*
3200 * We only need to "release" cpu and memory devices.
3201 */
3202 static sbd_devlist_t *
sbd_get_release_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)3203 sbd_get_release_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3204 {
3205 sbd_board_t *sbp;
3206 uint_t uset;
3207 sbd_devset_t devset;
3208 sbd_devlist_t *release_devlist;
3209 static int next_pass = 1;
3210 static fn_t f = "sbd_get_release_devlist";
3211
3212 PR_ALL("%s (pass = %d)...\n", f, pass);
3213
3214 sbp = SBDH2BD(hp->h_sbd);
3215 devset = HD2MACHHD(hp)->sh_devset;
3216
3217 *devnump = 0;
3218 release_devlist = NULL;
3219
3220 /*
3221 * We switch on next_pass for the cases where a board
3222 * does not contain a particular type of component.
3223 * In these situations we don't want to return NULL
3224 * prematurely. We need to check other devices and
3225 * we don't want to check the same type multiple times.
3226 * For example, if there were no cpus, then on pass 1
3227 * we would drop through and return the memory nodes.
3228 * However, on pass 2 we would switch back to the memory
3229 * nodes thereby returning them twice! Using next_pass
3230 * forces us down to the end (or next item).
3231 */
3232 if (pass == 1)
3233 next_pass = 1;
3234
3235 switch (next_pass) {
3236 case 1:
3237 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3238 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3239
3240 release_devlist = sbd_get_devlist(hp, sbp,
3241 SBD_COMP_MEM,
3242 MAX_MEM_UNITS_PER_BOARD,
3243 uset, devnump, 0);
3244
3245 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3246 if (!devset || release_devlist) {
3247 next_pass = 2;
3248 return (release_devlist);
3249 }
3250 /*
3251 * If the caller is interested in the entire
3252 * board, but there isn't any memory, then
3253 * just fall through to next component.
3254 */
3255 }
3256 /*FALLTHROUGH*/
3257
3258
3259 case 2:
3260 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3261 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3262
3263 release_devlist = sbd_get_devlist(hp, sbp,
3264 SBD_COMP_CPU,
3265 MAX_CPU_UNITS_PER_BOARD,
3266 uset, devnump, 0);
3267
3268 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3269 if (!devset || release_devlist) {
3270 next_pass = 3;
3271 return (release_devlist);
3272 }
3273 /*
3274 * If the caller is interested in the entire
3275 * board, but there aren't any cpus, then just
3276 * fall through to check for the next component.
3277 */
3278 }
3279 /*FALLTHROUGH*/
3280
3281
3282 case 3:
3283 next_pass = -1;
3284 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3285 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3286
3287 release_devlist = sbd_get_devlist(hp, sbp,
3288 SBD_COMP_IO,
3289 MAX_IO_UNITS_PER_BOARD,
3290 uset, devnump, 0);
3291
3292 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3293 if (!devset || release_devlist) {
3294 next_pass = 4;
3295 return (release_devlist);
3296 }
3297 }
3298 /*FALLTHROUGH*/
3299
3300 default:
3301 *devnump = 0;
3302 return (NULL);
3303 }
3304 /*NOTREACHED*/
3305 }
3306
3307 static int
sbd_pre_release_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3308 sbd_pre_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3309 int32_t devnum)
3310 {
3311 int max_units = 0, rv = 0;
3312 sbd_comp_type_t nodetype;
3313 static fn_t f = "sbd_pre_release_devlist";
3314
3315 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3316
3317 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3318 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3319
3320 switch (nodetype) {
3321 case SBD_COMP_CPU: {
3322 int i, mem_present = 0;
3323 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3324 sbd_devset_t devset;
3325 sbd_priv_handle_t *shp = HD2MACHHD(hp);
3326
3327 max_units = MAX_CPU_UNITS_PER_BOARD;
3328
3329 devset = shp->sh_orig_devset;
3330
3331 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
3332 /*
3333 * if client also requested to unconfigure memory
3334 * the we allow the operation. Therefore
3335 * we need to warranty that memory gets unconfig
3336 * before cpus
3337 */
3338
3339 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i)) {
3340 continue;
3341 }
3342 if (SBD_DEV_IS_ATTACHED(sbp, SBD_COMP_MEM, i)) {
3343 mem_present = 1;
3344 break;
3345 }
3346 }
3347 if (mem_present) {
3348 sbderror_t *ep = SBD_HD2ERR(hp);
3349 SBD_SET_ERR(ep, ESBD_MEMONLINE);
3350 SBD_SET_ERRSTR(ep, sbp->sb_mempath[i]);
3351 rv = -1;
3352 } else {
3353 rv = sbd_pre_release_cpu(hp, devlist, devnum);
3354 }
3355
3356 break;
3357
3358 }
3359 case SBD_COMP_MEM:
3360 max_units = MAX_MEM_UNITS_PER_BOARD;
3361 rv = sbd_pre_release_mem(hp, devlist, devnum);
3362 break;
3363
3364
3365 case SBD_COMP_IO:
3366 max_units = MAX_IO_UNITS_PER_BOARD;
3367 rv = sbd_pre_release_io(hp, devlist, devnum);
3368 break;
3369
3370 default:
3371 rv = -1;
3372 break;
3373 }
3374
3375 if (rv && max_units) {
3376 int i;
3377
3378 /*
3379 * the individual pre_release component routines should
3380 * have set the error in the handle. No need to set it
3381 * here
3382 *
3383 * Need to clean up dynamically allocated devlist
3384 * if pre-op is going to fail.
3385 */
3386 for (i = 0; i < max_units; i++) {
3387 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3388 SBD_FREE_ERR(&devlist[i].dv_error);
3389 } else {
3390 break;
3391 }
3392 }
3393 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3394 }
3395
3396 return (rv ? -1 : 0);
3397 }
3398
3399 static int
sbd_post_release_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3400 sbd_post_release_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3401 int32_t devnum)
3402 {
3403 int i, max_units = 0;
3404 sbd_comp_type_t nodetype;
3405 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3406 sbdp_handle_t *hdp;
3407 sbd_error_t *spe;
3408 static fn_t f = "sbd_post_release_devlist";
3409
3410 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3411 ASSERT(nodetype >= SBD_COMP_CPU && nodetype <= SBD_COMP_IO);
3412
3413 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3414 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3415
3416 /*
3417 * Need to free up devlist[] created earlier in
3418 * sbd_get_release_devlist().
3419 */
3420 switch (nodetype) {
3421 case SBD_COMP_CPU:
3422 max_units = MAX_CPU_UNITS_PER_BOARD;
3423 break;
3424
3425 case SBD_COMP_MEM:
3426 max_units = MAX_MEM_UNITS_PER_BOARD;
3427 break;
3428
3429 case SBD_COMP_IO:
3430 /*
3431 * Need to check if specific I/O is referenced and
3432 * fail post-op.
3433 */
3434
3435 if (sbd_check_io_refs(hp, devlist, devnum) > 0) {
3436 PR_IO("%s: error - I/O devices ref'd\n", f);
3437 }
3438
3439 max_units = MAX_IO_UNITS_PER_BOARD;
3440 break;
3441
3442 default:
3443 {
3444 cmn_err(CE_WARN, "%s: invalid nodetype (%d)",
3445 f, (int)nodetype);
3446 SBD_SET_ERR(HD2MACHERR(hp), ESBD_INVAL);
3447 }
3448 break;
3449 }
3450 hdp = sbd_get_sbdp_handle(sbp, hp);
3451 spe = hdp->h_err;
3452
3453 for (i = 0; i < devnum; i++) {
3454 int unit;
3455 sbderror_t *ep;
3456
3457 ep = &devlist[i].dv_error;
3458
3459 if (sbd_set_err_in_hdl(hp, ep) == 0) {
3460 continue;
3461 }
3462
3463 unit = sbdp_get_unit_num(hdp, devlist[i].dv_dip);
3464 if (unit == -1) {
3465 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3466 PR_ALL("%s bad unit num: %d code %d",
3467 f, unit, spe->e_code);
3468 continue;
3469 }
3470 }
3471 sbd_release_sbdp_handle(hdp);
3472
3473 if (SBD_GET_ERRNO(SBD_HD2ERR(hp))) {
3474 PR_ALL("%s: errno %d, ecode %d during release\n",
3475 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3476 SBD_GET_ERR(SBD_HD2ERR(hp)));
3477 }
3478
3479 if (max_units && devlist) {
3480 int i;
3481
3482 for (i = 0; i < max_units; i++) {
3483 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3484 SBD_FREE_ERR(&devlist[i].dv_error);
3485 } else {
3486 break;
3487 }
3488 }
3489 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3490 }
3491
3492 return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3493 }
3494
3495 static void
sbd_release_dev_done(sbd_board_t * sbp,sbd_comp_type_t nodetype,int unit)3496 sbd_release_dev_done(sbd_board_t *sbp, sbd_comp_type_t nodetype, int unit)
3497 {
3498 SBD_DEV_SET_UNREFERENCED(sbp, nodetype, unit);
3499 SBD_DEVICE_TRANSITION(sbp, nodetype, unit, SBD_STATE_UNREFERENCED);
3500 }
3501
3502 static void
sbd_release_done(sbd_handle_t * hp,sbd_comp_type_t nodetype,dev_info_t * dip)3503 sbd_release_done(sbd_handle_t *hp, sbd_comp_type_t nodetype, dev_info_t *dip)
3504 {
3505 int unit;
3506 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3507 sbderror_t *ep;
3508 static fn_t f = "sbd_release_done";
3509 sbdp_handle_t *hdp;
3510
3511 PR_ALL("%s...\n", f);
3512
3513 hdp = sbd_get_sbdp_handle(sbp, hp);
3514 ep = SBD_HD2ERR(hp);
3515
3516 if ((unit = sbdp_get_unit_num(hdp, dip)) < 0) {
3517 cmn_err(CE_WARN,
3518 "sbd:%s: unable to get unit for dip (0x%p)",
3519 f, (void *)dip);
3520 SBD_GET_PERR(hdp->h_err, ep);
3521 sbd_release_sbdp_handle(hdp);
3522 return;
3523 }
3524 sbd_release_sbdp_handle(hdp);
3525
3526 /*
3527 * Transfer the device which just completed its release
3528 * to the UNREFERENCED state.
3529 */
3530 switch (nodetype) {
3531
3532 case SBD_COMP_MEM:
3533 sbd_release_mem_done((void *)hp, unit);
3534 break;
3535
3536 default:
3537 sbd_release_dev_done(sbp, nodetype, unit);
3538 break;
3539 }
3540
3541 /*
3542 * If the entire board was released and all components
3543 * unreferenced then transfer it to the UNREFERENCED state.
3544 */
3545 if (SBD_DEVS_RELEASED(sbp) == SBD_DEVS_UNREFERENCED(sbp)) {
3546 SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNREFERENCED);
3547 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
3548 }
3549 }
3550
3551 static sbd_devlist_t *
sbd_get_detach_devlist(sbd_handle_t * hp,int32_t * devnump,int32_t pass)3552 sbd_get_detach_devlist(sbd_handle_t *hp, int32_t *devnump, int32_t pass)
3553 {
3554 sbd_board_t *sbp;
3555 uint_t uset;
3556 sbd_devset_t devset;
3557 sbd_devlist_t *detach_devlist;
3558 static int next_pass = 1;
3559 static fn_t f = "sbd_get_detach_devlist";
3560
3561 PR_ALL("%s (pass = %d)...\n", f, pass);
3562
3563 sbp = SBDH2BD(hp->h_sbd);
3564 devset = HD2MACHHD(hp)->sh_devset;
3565
3566 *devnump = 0;
3567 detach_devlist = NULL;
3568
3569 /*
3570 * We switch on next_pass for the cases where a board
3571 * does not contain a particular type of component.
3572 * In these situations we don't want to return NULL
3573 * prematurely. We need to check other devices and
3574 * we don't want to check the same type multiple times.
3575 * For example, if there were no cpus, then on pass 1
3576 * we would drop through and return the memory nodes.
3577 * However, on pass 2 we would switch back to the memory
3578 * nodes thereby returning them twice! Using next_pass
3579 * forces us down to the end (or next item).
3580 */
3581 if (pass == 1)
3582 next_pass = 1;
3583
3584 switch (next_pass) {
3585 case 1:
3586 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT)) {
3587 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_MEM);
3588
3589 detach_devlist = sbd_get_devlist(hp, sbp,
3590 SBD_COMP_MEM,
3591 MAX_MEM_UNITS_PER_BOARD,
3592 uset, devnump, 0);
3593
3594 DEVSET_DEL(devset, SBD_COMP_MEM, DEVSET_ANYUNIT);
3595 if (!devset || detach_devlist) {
3596 next_pass = 2;
3597 return (detach_devlist);
3598 }
3599 /*
3600 * If the caller is interested in the entire
3601 * board, but there isn't any memory, then
3602 * just fall through to next component.
3603 */
3604 }
3605 /*FALLTHROUGH*/
3606
3607 case 2:
3608 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT)) {
3609 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_CPU);
3610
3611 detach_devlist = sbd_get_devlist(hp, sbp,
3612 SBD_COMP_CPU,
3613 MAX_CPU_UNITS_PER_BOARD,
3614 uset, devnump, 0);
3615
3616 DEVSET_DEL(devset, SBD_COMP_CPU, DEVSET_ANYUNIT);
3617 if (!devset || detach_devlist) {
3618 next_pass = 2;
3619 return (detach_devlist);
3620 }
3621 /*
3622 * If the caller is interested in the entire
3623 * board, but there aren't any cpus, then just
3624 * fall through to check for the next component.
3625 */
3626 }
3627 /*FALLTHROUGH*/
3628
3629 case 3:
3630 next_pass = -1;
3631 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT)) {
3632 uset = DEVSET_GET_UNITSET(devset, SBD_COMP_IO);
3633
3634 detach_devlist = sbd_get_devlist(hp, sbp,
3635 SBD_COMP_IO,
3636 MAX_IO_UNITS_PER_BOARD,
3637 uset, devnump, 0);
3638
3639 DEVSET_DEL(devset, SBD_COMP_IO, DEVSET_ANYUNIT);
3640 if (!devset || detach_devlist) {
3641 next_pass = 4;
3642 return (detach_devlist);
3643 }
3644 }
3645 /*FALLTHROUGH*/
3646
3647 default:
3648 *devnump = 0;
3649 return (NULL);
3650 }
3651 /*NOTREACHED*/
3652 }
3653
3654 static int
sbd_pre_detach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3655 sbd_pre_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist, int32_t devnum)
3656 {
3657 int rv = 0;
3658 sbd_comp_type_t nodetype;
3659 static fn_t f = "sbd_pre_detach_devlist";
3660
3661 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3662
3663 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3664 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3665
3666 switch (nodetype) {
3667 case SBD_COMP_CPU:
3668 rv = sbd_pre_detach_cpu(hp, devlist, devnum);
3669 break;
3670
3671 case SBD_COMP_MEM:
3672 rv = sbd_pre_detach_mem(hp, devlist, devnum);
3673 break;
3674
3675 case SBD_COMP_IO:
3676 rv = sbd_pre_detach_io(hp, devlist, devnum);
3677 break;
3678
3679 default:
3680 rv = -1;
3681 break;
3682 }
3683
3684 /*
3685 * We want to continue attempting to detach
3686 * other components.
3687 */
3688 return (rv);
3689 }
3690
3691 static int
sbd_post_detach_devlist(sbd_handle_t * hp,sbd_devlist_t * devlist,int32_t devnum)3692 sbd_post_detach_devlist(sbd_handle_t *hp, sbd_devlist_t *devlist,
3693 int32_t devnum)
3694 {
3695 int i, max_units = 0, rv = 0;
3696 sbd_comp_type_t nodetype;
3697 sbd_board_t *sbp;
3698 sbd_istate_t bstate;
3699 static fn_t f = "sbd_post_detach_devlist";
3700 sbdp_handle_t *hdp;
3701
3702 sbp = SBDH2BD(hp->h_sbd);
3703 nodetype = sbd_get_devtype(hp, devlist->dv_dip);
3704
3705 hdp = sbd_get_sbdp_handle(sbp, hp);
3706
3707 PR_ALL("%s (nt = %s(%d), num = %d)...\n",
3708 f, sbd_ct_str[(int)nodetype], (int)nodetype, devnum);
3709
3710 /*
3711 * Need to free up devlist[] created earlier in
3712 * sbd_get_detach_devlist().
3713 */
3714 switch (nodetype) {
3715 case SBD_COMP_CPU:
3716 max_units = MAX_CPU_UNITS_PER_BOARD;
3717 rv = sbd_post_detach_cpu(hp, devlist, devnum);
3718 break;
3719
3720 case SBD_COMP_MEM:
3721 max_units = MAX_MEM_UNITS_PER_BOARD;
3722 rv = sbd_post_detach_mem(hp, devlist, devnum);
3723 break;
3724
3725 case SBD_COMP_IO:
3726 max_units = MAX_IO_UNITS_PER_BOARD;
3727 rv = sbd_post_detach_io(hp, devlist, devnum);
3728 break;
3729
3730 default:
3731 rv = -1;
3732 break;
3733 }
3734
3735
3736 for (i = 0; i < devnum; i++) {
3737 int unit;
3738 sbderror_t *ep;
3739 dev_info_t *dip;
3740
3741 ep = &devlist[i].dv_error;
3742
3743 if (sbd_set_err_in_hdl(hp, ep) == 0)
3744 continue;
3745
3746 dip = devlist[i].dv_dip;
3747 unit = sbdp_get_unit_num(hdp, dip);
3748 if (unit == -1) {
3749 if (hp->h_flags & SBD_IOCTL_FLAG_FORCE)
3750 continue;
3751 else {
3752 SBD_GET_PERR(hdp->h_err, ep);
3753 break;
3754 }
3755 }
3756 nodetype = sbd_get_devtype(hp, dip);
3757
3758 if (sbd_check_unit_attached(sbp, dip, unit, nodetype,
3759 ep) >= 0) {
3760 /*
3761 * Device is still attached probably due
3762 * to an error. Need to keep track of it.
3763 */
3764 PR_ALL("%s: ERROR (nt=%s, b=%d, u=%d) not detached\n",
3765 f, sbd_ct_str[(int)nodetype], sbp->sb_num,
3766 unit);
3767 continue;
3768 }
3769
3770 SBD_DEV_CLR_ATTACHED(sbp, nodetype, unit);
3771 SBD_DEV_CLR_RELEASED(sbp, nodetype, unit);
3772 SBD_DEV_CLR_UNREFERENCED(sbp, nodetype, unit);
3773 SBD_DEVICE_TRANSITION(sbp, nodetype, unit,
3774 SBD_STATE_UNCONFIGURED);
3775 }
3776 sbd_release_sbdp_handle(hdp);
3777
3778 bstate = SBD_BOARD_STATE(sbp);
3779 if (bstate != SBD_STATE_UNCONFIGURED) {
3780 if (SBD_DEVS_PRESENT(sbp) == SBD_DEVS_UNATTACHED(sbp)) {
3781 /*
3782 * All devices are finally detached.
3783 */
3784 SBD_BOARD_TRANSITION(sbp, SBD_STATE_UNCONFIGURED);
3785 } else if ((SBD_BOARD_STATE(sbp) != SBD_STATE_PARTIAL) &&
3786 SBD_DEVS_ATTACHED(sbp)) {
3787 /*
3788 * Some devices remain attached.
3789 */
3790 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
3791 }
3792 }
3793
3794 if (rv) {
3795 PR_ALL("%s: errno %d, ecode %d during detach\n",
3796 f, SBD_GET_ERRNO(SBD_HD2ERR(hp)),
3797 SBD_GET_ERR(HD2MACHERR(hp)));
3798 }
3799
3800 if (max_units && devlist) {
3801 int i;
3802
3803 for (i = 0; i < max_units; i++) {
3804 if (SBD_GET_ERRSTR(&devlist[i].dv_error)) {
3805 SBD_FREE_ERR(&devlist[i].dv_error);
3806 } else {
3807 break;
3808 }
3809 }
3810 FREESTRUCT(devlist, sbd_devlist_t, max_units);
3811 }
3812
3813 return (SBD_GET_ERRNO(SBD_HD2ERR(hp)) ? -1 : 0);
3814 }
3815
3816 /*
3817 * Return the unit number of the respective dip if
3818 * it's found to be attached.
3819 */
3820 static int
sbd_check_unit_attached(sbd_board_t * sbp,dev_info_t * dip,int unit,sbd_comp_type_t nodetype,sbderror_t * ep)3821 sbd_check_unit_attached(sbd_board_t *sbp, dev_info_t *dip, int unit,
3822 sbd_comp_type_t nodetype, sbderror_t *ep)
3823 {
3824 int rv = -1;
3825 processorid_t cpuid;
3826 uint64_t basepa, endpa;
3827 struct memlist *ml;
3828 extern struct memlist *phys_install;
3829 sbdp_handle_t *hdp;
3830 sbd_handle_t *hp = MACHBD2HD(sbp);
3831 static fn_t f = "sbd_check_unit_attached";
3832
3833 hdp = sbd_get_sbdp_handle(sbp, hp);
3834
3835 switch (nodetype) {
3836
3837 case SBD_COMP_CPU:
3838 cpuid = sbdp_get_cpuid(hdp, dip);
3839 if (cpuid < 0) {
3840 break;
3841 }
3842 mutex_enter(&cpu_lock);
3843 if (cpu_get(cpuid) != NULL)
3844 rv = unit;
3845 mutex_exit(&cpu_lock);
3846 break;
3847
3848 case SBD_COMP_MEM:
3849 if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
3850 break;
3851 }
3852 if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
3853 cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
3854 break;
3855 }
3856
3857 basepa &= ~(endpa - 1);
3858 endpa += basepa;
3859 /*
3860 * Check if base address is in phys_install.
3861 */
3862 memlist_read_lock();
3863 for (ml = phys_install; ml; ml = ml->ml_next)
3864 if ((endpa <= ml->ml_address) ||
3865 (basepa >= (ml->ml_address + ml->ml_size)))
3866 continue;
3867 else
3868 break;
3869 memlist_read_unlock();
3870 if (ml != NULL)
3871 rv = unit;
3872 break;
3873
3874 case SBD_COMP_IO:
3875 {
3876 dev_info_t *tdip, *pdip;
3877
3878 tdip = dip;
3879
3880 /*
3881 * ddi_walk_devs() requires that topdip's parent be held.
3882 */
3883 pdip = ddi_get_parent(sbp->sb_topdip);
3884 if (pdip) {
3885 ndi_hold_devi(pdip);
3886 ndi_devi_enter(pdip);
3887 }
3888 ddi_walk_devs(sbp->sb_topdip, sbd_check_io_attached,
3889 (void *)&tdip);
3890 if (pdip) {
3891 ndi_devi_exit(pdip);
3892 ndi_rele_devi(pdip);
3893 }
3894
3895 if (tdip == NULL)
3896 rv = unit;
3897 else
3898 rv = -1;
3899 break;
3900 }
3901
3902 default:
3903 PR_ALL("%s: unexpected nodetype(%d) for dip 0x%p\n",
3904 f, nodetype, (void *)dip);
3905 rv = -1;
3906 break;
3907 }
3908
3909 /*
3910 * Save the error that sbdp sent us and report it
3911 */
3912 if (rv == -1)
3913 SBD_GET_PERR(hdp->h_err, ep);
3914
3915 sbd_release_sbdp_handle(hdp);
3916
3917 return (rv);
3918 }
3919
3920 /*
3921 * Return memhandle, if in fact, this memunit is the owner of
3922 * a scheduled memory delete.
3923 */
3924 int
sbd_get_memhandle(sbd_handle_t * hp,dev_info_t * dip,memhandle_t * mhp)3925 sbd_get_memhandle(sbd_handle_t *hp, dev_info_t *dip, memhandle_t *mhp)
3926 {
3927 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
3928 sbd_mem_unit_t *mp;
3929 sbdp_handle_t *hdp;
3930 int unit;
3931 static fn_t f = "sbd_get_memhandle";
3932
3933 PR_MEM("%s...\n", f);
3934
3935 hdp = sbd_get_sbdp_handle(sbp, hp);
3936
3937 unit = sbdp_get_unit_num(hdp, dip);
3938 if (unit == -1) {
3939 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
3940 sbd_release_sbdp_handle(hdp);
3941 return (-1);
3942 }
3943 sbd_release_sbdp_handle(hdp);
3944
3945 mp = SBD_GET_BOARD_MEMUNIT(sbp, unit);
3946
3947 if (mp->sbm_flags & SBD_MFLAG_RELOWNER) {
3948 *mhp = mp->sbm_memhandle;
3949 return (0);
3950 } else {
3951 SBD_SET_ERR(SBD_HD2ERR(hp), ESBD_INTERNAL);
3952 SBD_SET_ERRSTR(SBD_HD2ERR(hp), sbp->sb_mempath[unit]);
3953 return (-1);
3954 }
3955 /*NOTREACHED*/
3956 }
3957
3958
3959 static int
sbd_cpu_cnt(sbd_handle_t * hp,sbd_devset_t devset)3960 sbd_cpu_cnt(sbd_handle_t *hp, sbd_devset_t devset)
3961 {
3962 int c, cix;
3963 sbd_board_t *sbp;
3964
3965 sbp = SBDH2BD(hp->h_sbd);
3966
3967 /*
3968 * Only look for requested devices that are actually present.
3969 */
3970 devset &= SBD_DEVS_PRESENT(sbp);
3971
3972 for (c = cix = 0; c < MAX_CMP_UNITS_PER_BOARD; c++) {
3973 /*
3974 * Index for core 1 , if exists.
3975 * With the current implementation it is
3976 * MAX_CMP_UNITS_PER_BOARD off from core 0.
3977 * The calculation will need to change if
3978 * the assumption is no longer true.
3979 */
3980 int c1 = c + MAX_CMP_UNITS_PER_BOARD;
3981
3982 if (DEVSET_IN_SET(devset, SBD_COMP_CMP, c) == 0) {
3983 continue;
3984 }
3985
3986 /*
3987 * Check to see if the dip(s) exist for this chip
3988 */
3989 if ((sbp->sb_devlist[NIX(SBD_COMP_CMP)][c] == NULL) &&
3990 (sbp->sb_devlist[NIX(SBD_COMP_CMP)][c1] == NULL))
3991 continue;
3992
3993 cix++;
3994 }
3995
3996 return (cix);
3997 }
3998
3999 static int
sbd_mem_cnt(sbd_handle_t * hp,sbd_devset_t devset)4000 sbd_mem_cnt(sbd_handle_t *hp, sbd_devset_t devset)
4001 {
4002 int i, ix;
4003 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4004
4005 /*
4006 * Only look for requested devices that are actually present.
4007 */
4008 devset &= SBD_DEVS_PRESENT(sbp);
4009
4010 for (i = ix = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4011 dev_info_t *dip;
4012
4013 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, i) == 0) {
4014 continue;
4015 }
4016
4017 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4018 if (dip == NULL)
4019 continue;
4020
4021 ix++;
4022 }
4023
4024 return (ix);
4025 }
4026
4027 /*
4028 * NOTE: This routine is only partially smart about multiple
4029 * mem-units. Need to make mem-status structure smart
4030 * about them also.
4031 */
4032 static int
sbd_mem_status(sbd_handle_t * hp,sbd_devset_t devset,sbd_dev_stat_t * dsp)4033 sbd_mem_status(sbd_handle_t *hp, sbd_devset_t devset, sbd_dev_stat_t *dsp)
4034 {
4035 int m, mix, rv;
4036 memdelstat_t mdst;
4037 memquery_t mq;
4038 sbd_board_t *sbp;
4039 sbd_mem_unit_t *mp;
4040 sbd_mem_stat_t *msp;
4041 extern int kcage_on;
4042 int i;
4043 static fn_t f = "sbd_mem_status";
4044
4045 sbp = SBDH2BD(hp->h_sbd);
4046
4047 /*
4048 * Check the present devset and access the dip with
4049 * status lock held to protect agains a concurrent
4050 * unconfigure or disconnect thread.
4051 */
4052 mutex_enter(&sbp->sb_slock);
4053
4054 /*
4055 * Only look for requested devices that are actually present.
4056 */
4057 devset &= SBD_DEVS_PRESENT(sbp);
4058
4059 for (m = mix = 0; m < MAX_MEM_UNITS_PER_BOARD; m++) {
4060 dev_info_t *dip;
4061
4062
4063 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, m) == 0)
4064 continue;
4065
4066 /*
4067 * Check to make sure the memory unit is in a state
4068 * where its fully initialized.
4069 */
4070 if (SBD_DEVICE_STATE(sbp, SBD_COMP_MEM, m) == SBD_STATE_EMPTY)
4071 continue;
4072
4073 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][m];
4074 if (dip == NULL)
4075 continue;
4076
4077 mp = SBD_GET_BOARD_MEMUNIT(sbp, m);
4078
4079 msp = &dsp->d_mem;
4080
4081 bzero((caddr_t)msp, sizeof (*msp));
4082 msp->ms_type = SBD_COMP_MEM;
4083
4084 /*
4085 * The plugin expects -1 for the mem unit
4086 */
4087 msp->ms_cm.c_id.c_unit = -1;
4088
4089 /*
4090 * Get the memory name from what sbdp gave us
4091 */
4092 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
4093 if (SBD_COMP(i) == SBD_COMP_MEM) {
4094 (void) strcpy(msp->ms_name, SBD_DEVNAME(i));
4095 }
4096 }
4097 msp->ms_cm.c_cond = mp->sbm_cm.sbdev_cond;
4098 msp->ms_cm.c_busy = mp->sbm_cm.sbdev_busy;
4099 msp->ms_cm.c_time = mp->sbm_cm.sbdev_time;
4100
4101 /* XXX revisit this after memory conversion */
4102 msp->ms_ostate = ostate_cvt(SBD_DEVICE_STATE(
4103 sbp, SBD_COMP_MEM, m));
4104
4105 msp->ms_basepfn = mp->sbm_basepfn;
4106 msp->ms_pageslost = mp->sbm_pageslost;
4107 msp->ms_cage_enabled = kcage_on;
4108 msp->ms_interleave = mp->sbm_interleave;
4109
4110 if (mp->sbm_flags & SBD_MFLAG_RELOWNER)
4111 rv = kphysm_del_status(mp->sbm_memhandle, &mdst);
4112 else
4113 rv = KPHYSM_EHANDLE; /* force 'if' to fail */
4114
4115 if (rv == KPHYSM_OK) {
4116 msp->ms_totpages += mdst.phys_pages;
4117
4118 /*
4119 * Any pages above managed is "free",
4120 * i.e. it's collected.
4121 */
4122 msp->ms_detpages += (uint_t)(mdst.collected +
4123 mdst.phys_pages - mdst.managed);
4124 } else {
4125 msp->ms_totpages += (uint_t)mp->sbm_npages;
4126
4127 /*
4128 * If we're UNREFERENCED or UNCONFIGURED,
4129 * then the number of detached pages is
4130 * however many pages are on the board.
4131 * I.e. detached = not in use by OS.
4132 */
4133 switch (msp->ms_cm.c_ostate) {
4134 /*
4135 * changed to use cfgadm states
4136 *
4137 * was:
4138 * case SFDR_STATE_UNREFERENCED:
4139 * case SFDR_STATE_UNCONFIGURED:
4140 */
4141 case SBD_STAT_UNCONFIGURED:
4142 msp->ms_detpages = msp->ms_totpages;
4143 break;
4144
4145 default:
4146 break;
4147 }
4148 }
4149
4150 rv = kphysm_del_span_query(mp->sbm_basepfn,
4151 mp->sbm_npages, &mq);
4152 if (rv == KPHYSM_OK) {
4153 msp->ms_managed_pages = mq.managed;
4154 msp->ms_noreloc_pages = mq.nonrelocatable;
4155 msp->ms_noreloc_first = mq.first_nonrelocatable;
4156 msp->ms_noreloc_last = mq.last_nonrelocatable;
4157 msp->ms_cm.c_sflags = 0;
4158 if (mq.nonrelocatable) {
4159 SBD_SET_SUSPEND(SBD_CMD_UNCONFIGURE,
4160 dsp->ds_suspend);
4161 }
4162 } else {
4163 PR_MEM("%s: kphysm_del_span_query() = %d\n", f, rv);
4164 }
4165
4166 mix++;
4167 dsp++;
4168 }
4169
4170 mutex_exit(&sbp->sb_slock);
4171
4172 return (mix);
4173 }
4174
4175 static void
sbd_cancel(sbd_handle_t * hp)4176 sbd_cancel(sbd_handle_t *hp)
4177 {
4178 int i;
4179 sbd_devset_t devset;
4180 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4181 static fn_t f = "sbd_cancel";
4182 int rv;
4183
4184 PR_ALL("%s...\n", f);
4185
4186 /*
4187 * Only devices which have been "released" are
4188 * subject to cancellation.
4189 */
4190 devset = HD2MACHHD(hp)->sh_devset & SBD_DEVS_UNREFERENCED(sbp);
4191
4192 /*
4193 * Nothing to do for CPUs or IO other than change back
4194 * their state.
4195 */
4196 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4197 if (!DEVSET_IN_SET(devset, SBD_COMP_CPU, i))
4198 continue;
4199 if (sbd_cancel_cpu(hp, i) != SBD_CPUERR_FATAL) {
4200 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4201 SBD_STATE_CONFIGURED);
4202 } else {
4203 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU, i,
4204 SBD_STATE_FATAL);
4205 }
4206 }
4207
4208 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4209 if (!DEVSET_IN_SET(devset, SBD_COMP_IO, i))
4210 continue;
4211 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO, i,
4212 SBD_STATE_CONFIGURED);
4213 }
4214
4215 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4216 if (!DEVSET_IN_SET(devset, SBD_COMP_MEM, i))
4217 continue;
4218 if ((rv = sbd_cancel_mem(hp, i)) == 0) {
4219 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4220 SBD_STATE_CONFIGURED);
4221 } else if (rv == -1) {
4222 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM, i,
4223 SBD_STATE_FATAL);
4224 }
4225 }
4226
4227 PR_ALL("%s: unreleasing devset (0x%x)\n", f, (uint_t)devset);
4228
4229 SBD_DEVS_CANCEL(sbp, devset);
4230
4231 if (SBD_DEVS_UNREFERENCED(sbp) == 0) {
4232 sbd_istate_t new_state;
4233 /*
4234 * If the board no longer has any released devices
4235 * than transfer it back to the CONFIG/PARTIAL state.
4236 */
4237 if (SBD_DEVS_ATTACHED(sbp) == SBD_DEVS_PRESENT(sbp))
4238 new_state = SBD_STATE_CONFIGURED;
4239 else
4240 new_state = SBD_STATE_PARTIAL;
4241 if (SBD_BOARD_STATE(sbp) != new_state) {
4242 SBD_BOARD_TRANSITION(sbp, new_state);
4243 }
4244 sbp->sb_ostate = SBD_STAT_CONFIGURED;
4245 (void) drv_getparm(TIME, (void *)&sbp->sb_time);
4246 }
4247 }
4248
4249 static void
sbd_get_ncm(sbd_handle_t * hp)4250 sbd_get_ncm(sbd_handle_t *hp)
4251 {
4252 sbd_devset_t devset;
4253 sbd_priv_handle_t *shp = HD2MACHHD(hp);
4254 sbd_cmd_t *cmdp = (sbd_cmd_t *)hp->h_iap;
4255 int error;
4256
4257 /* pre_op restricted the devices to those selected by the ioctl */
4258 devset = shp->sh_devset;
4259
4260 cmdp->cmd_getncm.g_ncm = sbd_cpu_cnt(hp, devset) +
4261 sbd_io_cnt(hp, devset) + sbd_mem_cnt(hp, devset);
4262
4263 error = sbd_copyout_ioarg(hp->h_mode, hp->h_cmd, cmdp,
4264 (sbd_ioctl_arg_t *)shp->sh_arg);
4265
4266 if (error != 0)
4267 SBD_SET_ERRNO(SBD_HD2ERR(hp), error);
4268 }
4269
4270 static void
sbd_status(sbd_handle_t * hp)4271 sbd_status(sbd_handle_t *hp)
4272 {
4273 int nstat, mode, ncm, sz, cksz;
4274 sbd_priv_handle_t *shp = HD2MACHHD(hp);
4275 sbd_devset_t devset;
4276 sbd_board_t *sbp = SBDH2BD(hp->h_sbd);
4277 sbd_stat_t *dstatp;
4278 sbd_cmd_t *cmdp = (sbd_cmd_t *)hp->h_iap;
4279 sbdp_handle_t *hdp;
4280 sbd_dev_stat_t *devstatp;
4281
4282 #ifdef _MULTI_DATAMODEL
4283 int sz32;
4284 sbd_stat32_t *dstat32p;
4285 #endif /* _MULTI_DATAMODEL */
4286
4287 static fn_t f = "sbd_status";
4288
4289 mode = hp->h_mode;
4290 devset = shp->sh_devset;
4291
4292 devset &= SBD_DEVS_PRESENT(sbp);
4293
4294 if (cmdp->cmd_cm.c_id.c_type == SBD_COMP_NONE) {
4295 if (cmdp->cmd_cm.c_flags & SBD_FLAG_ALLCMP) {
4296 /*
4297 * Get the number of components "ncm" on the board.
4298 * Calculate size of buffer required to store one
4299 * sbd_stat_t structure plus ncm-1 sbd_dev_stat_t
4300 * structures. Note that sbd_stat_t already contains
4301 * one sbd_dev_stat_t, so only an additional ncm-1
4302 * sbd_dev_stat_t structures need to be accounted for
4303 * in the calculation when more than one component
4304 * is present.
4305 */
4306 ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4307 sbd_mem_cnt(hp, devset);
4308
4309 } else {
4310 /*
4311 * In the case of c_type == SBD_COMP_NONE, and
4312 * SBD_FLAG_ALLCMP not specified, only the board
4313 * info is to be returned, no components.
4314 */
4315 ncm = 0;
4316 devset = 0;
4317 }
4318 } else {
4319 /* Confirm that only one component is selected. */
4320 ncm = sbd_cpu_cnt(hp, devset) + sbd_io_cnt(hp, devset) +
4321 sbd_mem_cnt(hp, devset);
4322 if (ncm != 1) {
4323 PR_ALL("%s: expected ncm of 1, got %d, devset 0x%x\n",
4324 f, ncm, devset);
4325 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4326 return;
4327 }
4328 }
4329
4330 sz = sizeof (sbd_stat_t);
4331 if (ncm > 1)
4332 sz += sizeof (sbd_dev_stat_t) * (ncm - 1);
4333
4334 cksz = sz;
4335
4336 /*
4337 * s_nbytes describes the size of the preallocated user
4338 * buffer into which the application is executing to
4339 * receive the sbd_stat_t and sbd_dev_stat_t structures.
4340 * This buffer must be at least the required (sz) size.
4341 */
4342
4343 #ifdef _MULTI_DATAMODEL
4344
4345 /*
4346 * More buffer space is required for the 64bit to 32bit
4347 * conversion of data structures.
4348 */
4349 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4350 sz32 = sizeof (sbd_stat32_t);
4351 if (ncm > 1)
4352 sz32 += sizeof (sbd_dev_stat32_t) * (ncm - 1);
4353 cksz = sz32;
4354 } else
4355 sz32 = 0;
4356 #endif
4357
4358 if ((int)cmdp->cmd_stat.s_nbytes < cksz) {
4359 PR_ALL("%s: ncm=%d s_nbytes = 0x%x\n", f, ncm,
4360 cmdp->cmd_stat.s_nbytes);
4361 PR_ALL("%s: expected size of 0x%x\n", f, cksz);
4362 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4363 return;
4364 }
4365
4366 dstatp = kmem_zalloc(sz, KM_SLEEP);
4367 devstatp = &dstatp->s_stat[0];
4368
4369 #ifdef _MULTI_DATAMODEL
4370 if (sz32 != 0)
4371 dstat32p = kmem_zalloc(sz32, KM_SLEEP);
4372 #endif
4373
4374 /*
4375 * if connected or better, provide cached status if available,
4376 * otherwise call sbdp for status
4377 */
4378 mutex_enter(&sbp->sb_flags_mutex);
4379 switch (sbp->sb_state) {
4380
4381 case SBD_STATE_CONNECTED:
4382 case SBD_STATE_PARTIAL:
4383 case SBD_STATE_CONFIGURED:
4384 if (sbp->sb_flags & SBD_BOARD_STATUS_CACHED) {
4385 bcopy(&sbp->sb_stat, dstatp, sizeof (sbd_stat_t));
4386 dstatp->s_rstate = rstate_cvt(sbp->sb_state);
4387 dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4388 dstatp->s_busy = sbp->sb_busy;
4389 dstatp->s_time = sbp->sb_time;
4390 dstatp->s_cond = sbp->sb_cond;
4391 break;
4392 }
4393 /*FALLTHROUGH*/
4394
4395 default:
4396 sbp->sb_flags &= ~SBD_BOARD_STATUS_CACHED;
4397 dstatp->s_board = sbp->sb_num;
4398 dstatp->s_ostate = ostate_cvt(sbp->sb_state);
4399 dstatp->s_time = sbp->sb_time;
4400
4401 hdp = sbd_get_sbdp_handle(sbp, hp);
4402
4403 if (sbdp_get_board_status(hdp, dstatp) != 0) {
4404 SBD_GET_PERR(hdp->h_err, SBD_HD2ERR(hp));
4405 sbd_release_sbdp_handle(hdp);
4406 #ifdef _MULTI_DATAMODEL
4407 if (sz32 != 0)
4408 kmem_free(dstat32p, sz32);
4409 #endif
4410 kmem_free(dstatp, sz);
4411 mutex_exit(&sbp->sb_flags_mutex);
4412 return;
4413 }
4414 /*
4415 * Do not cache status if the busy flag has
4416 * been set by the call to sbdp_get_board_status().
4417 */
4418 if (!dstatp->s_busy) {
4419 /* Can get board busy flag now */
4420 dstatp->s_busy = sbp->sb_busy;
4421 sbp->sb_cond = (sbd_cond_t)dstatp->s_cond;
4422 bcopy(dstatp, &sbp->sb_stat, sizeof (sbd_stat_t));
4423 sbp->sb_flags |= SBD_BOARD_STATUS_CACHED;
4424 }
4425 sbd_release_sbdp_handle(hdp);
4426 break;
4427 }
4428 mutex_exit(&sbp->sb_flags_mutex);
4429
4430 if (DEVSET_IN_SET(devset, SBD_COMP_CPU, DEVSET_ANYUNIT))
4431 if ((nstat = sbd_cpu_flags(hp, devset, devstatp)) > 0) {
4432 dstatp->s_nstat += nstat;
4433 devstatp += nstat;
4434 }
4435
4436 if (DEVSET_IN_SET(devset, SBD_COMP_MEM, DEVSET_ANYUNIT))
4437 if ((nstat = sbd_mem_status(hp, devset, devstatp)) > 0) {
4438 dstatp->s_nstat += nstat;
4439 devstatp += nstat;
4440 }
4441
4442 if (DEVSET_IN_SET(devset, SBD_COMP_IO, DEVSET_ANYUNIT))
4443 if ((nstat = sbd_io_status(hp, devset, devstatp)) > 0) {
4444 dstatp->s_nstat += nstat;
4445 devstatp += nstat;
4446 }
4447
4448 /* paranoia: detect buffer overrun */
4449 if ((caddr_t)devstatp > ((caddr_t)dstatp) + sz) {
4450 PR_ALL("%s: buffer overrun\n", f);
4451 #ifdef _MULTI_DATAMODEL
4452 if (sz32 != 0)
4453 kmem_free(dstat32p, sz32);
4454 #endif
4455 kmem_free(dstatp, sz);
4456 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4457 return;
4458 }
4459
4460 /* if necessary, move data into intermediate device status buffer */
4461 #ifdef _MULTI_DATAMODEL
4462 if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
4463 int i, j;
4464
4465 ASSERT(sz32 != 0);
4466 /* paranoia: detect buffer overrun */
4467 if ((caddr_t)&dstat32p->s_stat[dstatp->s_nstat] >
4468 ((caddr_t)dstat32p) + sz32) {
4469 cmn_err(CE_WARN, "sbd:%s: buffer32 overrun", f);
4470 #ifdef _MULTI_DATAMODEL
4471 if (sz32 != 0)
4472 kmem_free(dstat32p, sz32);
4473 #endif
4474 kmem_free(dstatp, sz);
4475 SBD_SET_ERRNO(SBD_HD2ERR(hp), EINVAL);
4476 return;
4477 }
4478
4479 /*
4480 * initialize 32 bit sbd board status structure
4481 */
4482 dstat32p->s_board = (int32_t)dstatp->s_board;
4483 dstat32p->s_nstat = (int32_t)dstatp->s_nstat;
4484 dstat32p->s_rstate = dstatp->s_rstate;
4485 dstat32p->s_ostate = dstatp->s_ostate;
4486 dstat32p->s_cond = dstatp->s_cond;
4487 dstat32p->s_busy = dstatp->s_busy;
4488 dstat32p->s_time = dstatp->s_time;
4489 dstat32p->s_assigned = dstatp->s_assigned;
4490 dstat32p->s_power = dstatp->s_power;
4491 dstat32p->s_platopts = (int32_t)dstatp->s_platopts;
4492 (void) strcpy(dstat32p->s_type, dstatp->s_type);
4493
4494 for (i = 0; i < dstatp->s_nstat; i++) {
4495 sbd_dev_stat_t *dsp = &dstatp->s_stat[i];
4496 sbd_dev_stat32_t *ds32p = &dstat32p->s_stat[i];
4497
4498 /*
4499 * copy common data for the device
4500 */
4501 ds32p->d_cm.ci_type = (int32_t)dsp->d_cm.ci_type;
4502 ds32p->d_cm.ci_unit = (int32_t)dsp->d_cm.ci_unit;
4503 ds32p->d_cm.c_ostate = (int32_t)dsp->d_cm.c_ostate;
4504 ds32p->d_cm.c_cond = (int32_t)dsp->d_cm.c_cond;
4505 ds32p->d_cm.c_busy = (int32_t)dsp->d_cm.c_busy;
4506 ds32p->d_cm.c_time = (time32_t)dsp->d_cm.c_time;
4507 ds32p->d_cm.c_sflags = (int32_t)dsp->d_cm.c_sflags;
4508 (void) strcpy(ds32p->d_cm.ci_name, dsp->d_cm.ci_name);
4509
4510 /* copy type specific data for the device */
4511 switch (dsp->d_cm.ci_type) {
4512
4513 case SBD_COMP_CPU:
4514 ds32p->d_cpu.cs_isbootproc =
4515 (int32_t)dsp->d_cpu.cs_isbootproc;
4516 ds32p->d_cpu.cs_cpuid =
4517 (int32_t)dsp->d_cpu.cs_cpuid;
4518 ds32p->d_cpu.cs_speed =
4519 (int32_t)dsp->d_cpu.cs_speed;
4520 ds32p->d_cpu.cs_ecache =
4521 (int32_t)dsp->d_cpu.cs_ecache;
4522 break;
4523
4524 case SBD_COMP_MEM:
4525 ds32p->d_mem.ms_type =
4526 (int32_t)dsp->d_mem.ms_type;
4527 ds32p->d_mem.ms_ostate =
4528 (int32_t)dsp->d_mem.ms_ostate;
4529 ds32p->d_mem.ms_cond =
4530 (int32_t)dsp->d_mem.ms_cond;
4531 ds32p->d_mem.ms_interleave =
4532 (uint32_t)dsp->d_mem.ms_interleave;
4533 ds32p->d_mem.ms_basepfn =
4534 (uint32_t)dsp->d_mem.ms_basepfn;
4535 ds32p->d_mem.ms_totpages =
4536 (uint32_t)dsp->d_mem.ms_totpages;
4537 ds32p->d_mem.ms_detpages =
4538 (uint32_t)dsp->d_mem.ms_detpages;
4539 ds32p->d_mem.ms_pageslost =
4540 (int32_t)dsp->d_mem.ms_pageslost;
4541 ds32p->d_mem.ms_managed_pages =
4542 (int32_t)dsp->d_mem.ms_managed_pages;
4543 ds32p->d_mem.ms_noreloc_pages =
4544 (int32_t)dsp->d_mem.ms_noreloc_pages;
4545 ds32p->d_mem.ms_noreloc_first =
4546 (int32_t)dsp->d_mem.ms_noreloc_first;
4547 ds32p->d_mem.ms_noreloc_last =
4548 (int32_t)dsp->d_mem.ms_noreloc_last;
4549 ds32p->d_mem.ms_cage_enabled =
4550 (int32_t)dsp->d_mem.ms_cage_enabled;
4551 ds32p->d_mem.ms_peer_is_target =
4552 (int32_t)dsp->d_mem.ms_peer_is_target;
4553 (void) strcpy(ds32p->d_mem.ms_peer_ap_id,
4554 dsp->d_mem.ms_peer_ap_id);
4555 break;
4556
4557
4558 case SBD_COMP_IO:
4559
4560 ds32p->d_io.is_type =
4561 (int32_t)dsp->d_io.is_type;
4562 ds32p->d_io.is_unsafe_count =
4563 (int32_t)dsp->d_io.is_unsafe_count;
4564 ds32p->d_io.is_referenced =
4565 (int32_t)dsp->d_io.is_referenced;
4566 for (j = 0; j < SBD_MAX_UNSAFE; j++)
4567 ds32p->d_io.is_unsafe_list[j] =
4568 (int32_t)
4569 ds32p->d_io.is_unsafe_list[j];
4570 bcopy(dsp->d_io.is_pathname,
4571 ds32p->d_io.is_pathname, MAXPATHLEN);
4572 break;
4573
4574 case SBD_COMP_CMP:
4575 /* copy sbd_cmp_stat_t structure members */
4576 bcopy(&dsp->d_cmp.ps_cpuid[0],
4577 &ds32p->d_cmp.ps_cpuid[0],
4578 sizeof (ds32p->d_cmp.ps_cpuid));
4579 ds32p->d_cmp.ps_ncores =
4580 (int32_t)dsp->d_cmp.ps_ncores;
4581 ds32p->d_cmp.ps_speed =
4582 (int32_t)dsp->d_cmp.ps_speed;
4583 ds32p->d_cmp.ps_ecache =
4584 (int32_t)dsp->d_cmp.ps_ecache;
4585 break;
4586
4587 default:
4588 cmn_err(CE_WARN,
4589 "sbd:%s: unknown dev type (%d)", f,
4590 (int)dsp->d_cm.c_id.c_type);
4591 break;
4592 }
4593 }
4594
4595 if (ddi_copyout((void *)dstat32p,
4596 cmdp->cmd_stat.s_statp, sz32, mode) != 0) {
4597 cmn_err(CE_WARN,
4598 "sbd:%s: failed to copyout status for board %d",
4599 f, sbp->sb_num);
4600 SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4601 }
4602 } else
4603 #endif /* _MULTI_DATAMODEL */
4604 if (ddi_copyout((void *)dstatp, cmdp->cmd_stat.s_statp,
4605 sz, mode) != 0) {
4606 cmn_err(CE_WARN,
4607 "sbd:%s: failed to copyout status for board %d",
4608 f, sbp->sb_num);
4609 SBD_SET_ERRNO(SBD_HD2ERR(hp), EFAULT);
4610 }
4611
4612 #ifdef _MULTI_DATAMODEL
4613 if (sz32 != 0)
4614 kmem_free(dstat32p, sz32);
4615 #endif
4616 kmem_free(dstatp, sz);
4617 }
4618
4619 /*
4620 * Called at driver load time to determine the state and condition
4621 * of an existing board in the system.
4622 */
4623 static void
sbd_board_discovery(sbd_board_t * sbp)4624 sbd_board_discovery(sbd_board_t *sbp)
4625 {
4626 int i;
4627 dev_info_t *dip;
4628 sbd_devset_t devs_lost, devs_attached = 0;
4629 extern kmutex_t cpu_lock;
4630 sbdp_handle_t *hdp;
4631 static fn_t f = "sbd_board_discovery";
4632 sbderror_t error, *ep;
4633 sbd_handle_t *hp = MACHBD2HD(sbp);
4634
4635 if (SBD_DEVS_PRESENT(sbp) == 0) {
4636 PR_ALL("%s: board %d has no devices present\n",
4637 f, sbp->sb_num);
4638 return;
4639 }
4640
4641 ep = &error;
4642 bzero(ep, sizeof (sbderror_t));
4643
4644 /*
4645 * Check for existence of cpus.
4646 */
4647
4648 hdp = sbd_get_sbdp_handle(sbp, hp);
4649
4650 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4651 processorid_t cpuid;
4652
4653 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_CPU, i))
4654 continue;
4655
4656 dip = sbp->sb_devlist[NIX(SBD_COMP_CPU)][i];
4657
4658 if (dip != NULL) {
4659 cpuid = sbdp_get_cpuid(hdp, dip);
4660
4661 if (cpuid < 0) {
4662 SBD_GET_PERR(hdp->h_err, ep);
4663 continue;
4664 }
4665
4666 mutex_enter(&cpu_lock); /* needed to call cpu_get() */
4667 if (cpu_get(cpuid)) {
4668 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_CPU, i);
4669 DEVSET_ADD(devs_attached, SBD_COMP_CPU, i);
4670 PR_ALL("%s: board %d, cpuid %d - attached\n",
4671 f, sbp->sb_num, cpuid);
4672 }
4673 mutex_exit(&cpu_lock);
4674 sbd_init_cpu_unit(sbp, i);
4675 }
4676 }
4677
4678 /*
4679 * Check for existence of memory.
4680 */
4681 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4682 uint64_t basepa, endpa;
4683 struct memlist *ml;
4684 extern struct memlist *phys_install;
4685
4686 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i))
4687 continue;
4688
4689 dip = sbp->sb_devlist[NIX(SBD_COMP_MEM)][i];
4690 if (dip == NULL)
4691 continue;
4692
4693 if (sbdphw_get_base_physaddr(hdp, dip, &basepa)) {
4694 /* omit phantom memory controllers on I/O boards */
4695 if (SBD_DEV_IS_PRESENT(sbp, SBD_COMP_MEM, i)) {
4696 ASSERT(sbp->sb_ndev != 0);
4697 SBD_DEV_CLR_PRESENT(sbp, SBD_COMP_MEM, i);
4698 sbp->sb_ndev--;
4699 }
4700 sbp->sb_devlist[NIX(SBD_COMP_MEM)][i] = NULL;
4701 continue;
4702 }
4703
4704 /*
4705 * basepa may not be on a alignment boundary, make it so.
4706 */
4707 if (sbdp_get_mem_alignment(hdp, dip, &endpa)) {
4708 cmn_err(CE_WARN, "%s sbdp_get_mem_alignment fail", f);
4709 continue;
4710 }
4711
4712 basepa &= ~(endpa - 1);
4713 endpa += basepa;
4714
4715 /*
4716 * Check if base address is in phys_install.
4717 */
4718 memlist_read_lock();
4719 for (ml = phys_install; ml; ml = ml->ml_next)
4720 if ((endpa <= ml->ml_address) ||
4721 (basepa >= (ml->ml_address + ml->ml_size)))
4722 continue;
4723 else
4724 break;
4725 memlist_read_unlock();
4726
4727 if (ml) {
4728 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_MEM, i);
4729 DEVSET_ADD(devs_attached, SBD_COMP_MEM, i);
4730 PR_ALL("%s: board %d, mem-unit %d - attached\n",
4731 f, sbp->sb_num, i);
4732 }
4733 sbd_init_mem_unit(sbp, i, ep);
4734 }
4735 sbd_release_sbdp_handle(hdp);
4736
4737 /*
4738 * If so far we have found an error, we just log it but continue
4739 */
4740 if (SBD_GET_ERRNO(ep) != 0)
4741 cmn_err(CE_WARN, "%s errno has occurred: errno %d", f,
4742 SBD_GET_ERRNO(ep));
4743
4744 /*
4745 * Check for i/o state.
4746 */
4747 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4748
4749 if (!SBD_DEV_IS_PRESENT(sbp, SBD_COMP_IO, i))
4750 continue;
4751
4752 dip = sbp->sb_devlist[NIX(SBD_COMP_IO)][i];
4753 if (dip == NULL)
4754 continue;
4755
4756 ASSERT(e_ddi_branch_held(dip));
4757
4758 /*
4759 * XXX Is the devstate check needed ?
4760 */
4761 if (i_ddi_devi_attached(dip) ||
4762 ddi_get_devstate(dip) == DDI_DEVSTATE_UP) {
4763
4764 /*
4765 * Found it!
4766 */
4767 SBD_DEV_SET_ATTACHED(sbp, SBD_COMP_IO, i);
4768 DEVSET_ADD(devs_attached, SBD_COMP_IO, i);
4769 PR_ALL("%s: board %d, io-unit %d - attached\n",
4770 f, sbp->sb_num, i);
4771 }
4772 sbd_init_io_unit(sbp, i);
4773 }
4774
4775 SBD_DEVS_CONFIGURE(sbp, devs_attached);
4776 if (devs_attached && ((devs_lost = SBD_DEVS_UNATTACHED(sbp)) != 0)) {
4777 int ut;
4778 /*
4779 * A prior comment stated that a partially configured
4780 * board was not permitted. The Serengeti architecture
4781 * makes this possible, so the SB_DEVS_DISCONNECT
4782 * at the end of this block has been removed.
4783 */
4784
4785 PR_ALL("%s: some devices not configured (0x%x)...\n",
4786 f, devs_lost);
4787
4788 for (ut = 0; ut < MAX_CPU_UNITS_PER_BOARD; ut++)
4789 if (DEVSET_IN_SET(devs_lost, SBD_COMP_CPU, ut)) {
4790 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_CPU,
4791 ut, SBD_STATE_UNCONFIGURED);
4792 }
4793
4794 for (ut = 0; ut < MAX_MEM_UNITS_PER_BOARD; ut++)
4795 if (DEVSET_IN_SET(devs_lost, SBD_COMP_MEM, ut)) {
4796 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_MEM,
4797 ut, SBD_STATE_UNCONFIGURED);
4798 }
4799
4800 for (ut = 0; ut < MAX_IO_UNITS_PER_BOARD; ut++)
4801 if (DEVSET_IN_SET(devs_lost, SBD_COMP_IO, ut)) {
4802 SBD_DEVICE_TRANSITION(sbp, SBD_COMP_IO,
4803 ut, SBD_STATE_UNCONFIGURED);
4804 }
4805 }
4806 }
4807
4808 static int
hold_rele_branch(dev_info_t * rdip,void * arg)4809 hold_rele_branch(dev_info_t *rdip, void *arg)
4810 {
4811 walk_tree_t *wp = (walk_tree_t *)arg;
4812
4813 ASSERT(wp && (wp->hold == 0 || wp->hold == 1));
4814
4815 switch (get_node_type(wp->sbp, rdip, NULL)) {
4816 case SBD_COMP_CMP:
4817 case SBD_COMP_MEM:
4818 case SBD_COMP_IO:
4819 break;
4820 case SBD_COMP_CPU:
4821
4822 /*
4823 * All CPU nodes under CMP nodes should have
4824 * gotten pruned when the CMP node was first
4825 * encountered.
4826 */
4827 ASSERT(!sbd_is_cmp_child(rdip));
4828
4829 break;
4830
4831 case SBD_COMP_UNKNOWN:
4832 /* Not of interest to us */
4833 return (DDI_WALK_CONTINUE);
4834 default:
4835 ASSERT(0);
4836 return (DDI_WALK_PRUNECHILD);
4837 }
4838
4839 if (wp->hold) {
4840 ASSERT(!e_ddi_branch_held(rdip));
4841 e_ddi_branch_hold(rdip);
4842 } else {
4843 ASSERT(e_ddi_branch_held(rdip));
4844 e_ddi_branch_rele(rdip);
4845 }
4846
4847 return (DDI_WALK_PRUNECHILD);
4848 }
4849
4850 static void
sbd_board_init(sbd_board_t * sbp,sbd_softstate_t * softsp,int bd,dev_info_t * top_dip,int wnode)4851 sbd_board_init(sbd_board_t *sbp, sbd_softstate_t *softsp,
4852 int bd, dev_info_t *top_dip, int wnode)
4853 {
4854 int i;
4855 dev_info_t *pdip;
4856 walk_tree_t walk = {0};
4857
4858 mutex_init(&sbp->sb_mutex, NULL, MUTEX_DRIVER, NULL);
4859 mutex_init(&sbp->sb_flags_mutex, NULL, MUTEX_DRIVER, NULL);
4860 mutex_init(&sbp->sb_slock, NULL, MUTEX_DRIVER, NULL);
4861
4862 sbp->sb_ref = 0;
4863 sbp->sb_num = bd;
4864 sbp->sb_time = gethrestime_sec();
4865 /*
4866 * For serengeti, top_dip doesn't need to be held because
4867 * sbp i.e. sbd_board_t will be destroyed in sbd_teardown_instance()
4868 * before top_dip detaches. For Daktari, top_dip is the
4869 * root node which never has to be held.
4870 */
4871 sbp->sb_topdip = top_dip;
4872 sbp->sb_cpuid = -1;
4873 sbp->sb_softsp = (void *) softsp;
4874 sbp->sb_cond = SBD_COND_UNKNOWN;
4875 sbp->sb_wnode = wnode;
4876 sbp->sb_memaccess_ok = 1;
4877
4878 ASSERT(MAX_IO_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4879 ASSERT(MAX_CPU_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4880 ASSERT(MAX_MEM_UNITS_PER_BOARD <= SBD_MAX_UNITS_PER_BOARD);
4881
4882 /*
4883 * Allocate the devlist for cpus.
4884 */
4885 sbp->sb_devlist[NIX(SBD_COMP_CPU)] =
4886 GETSTRUCT(dev_info_t *, MAX_CPU_UNITS_PER_BOARD);
4887
4888 /*
4889 * Allocate the devlist for mem.
4890 */
4891 sbp->sb_devlist[NIX(SBD_COMP_MEM)] =
4892 GETSTRUCT(dev_info_t *, MAX_MEM_UNITS_PER_BOARD);
4893
4894 /*
4895 * Allocate the devlist for io.
4896 */
4897 sbp->sb_devlist[NIX(SBD_COMP_IO)] =
4898 GETSTRUCT(dev_info_t *, MAX_IO_UNITS_PER_BOARD);
4899
4900
4901 sbp->sb_dev[NIX(SBD_COMP_CPU)] =
4902 GETSTRUCT(sbd_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
4903
4904 sbp->sb_dev[NIX(SBD_COMP_MEM)] =
4905 GETSTRUCT(sbd_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
4906
4907 sbp->sb_dev[NIX(SBD_COMP_IO)] =
4908 GETSTRUCT(sbd_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
4909
4910 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
4911 sbp->sb_cpupath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4912 }
4913
4914 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4915 sbp->sb_mempath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4916 }
4917
4918 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
4919 sbp->sb_iopath[i] = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
4920 }
4921
4922 /*
4923 * Walk the device tree, find all top dips on this board and
4924 * hold the branches rooted at them
4925 */
4926 ASSERT(sbp->sb_topdip);
4927 pdip = ddi_get_parent(sbp->sb_topdip);
4928 if (pdip)
4929 ndi_devi_enter(pdip);
4930 walk.sbp = sbp;
4931 walk.hold = 1;
4932 ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
4933 if (pdip)
4934 ndi_devi_exit(pdip);
4935
4936 /*
4937 * Initialize the devlists
4938 */
4939 if (sbd_init_devlists(sbp) == 0) {
4940 SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
4941 } else {
4942 /*
4943 * Couldn't have made it down here without
4944 * having found at least one device.
4945 */
4946 ASSERT(SBD_DEVS_PRESENT(sbp) != 0);
4947 /*
4948 * Check the state of any possible devices on the
4949 * board.
4950 */
4951 sbd_board_discovery(sbp);
4952
4953 if (SBD_DEVS_UNATTACHED(sbp) == 0) {
4954 /*
4955 * The board has no unattached devices, therefore
4956 * by reason of insanity it must be configured!
4957 */
4958 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONFIGURED);
4959 sbp->sb_cond = SBD_COND_OK;
4960 } else if (SBD_DEVS_ATTACHED(sbp)) {
4961 SBD_BOARD_TRANSITION(sbp, SBD_STATE_PARTIAL);
4962 } else {
4963 SBD_BOARD_TRANSITION(sbp, SBD_STATE_CONNECTED);
4964 }
4965 }
4966 }
4967
4968 static void
sbd_board_destroy(sbd_board_t * sbp)4969 sbd_board_destroy(sbd_board_t *sbp)
4970 {
4971 int i;
4972 dev_info_t *pdip;
4973 walk_tree_t walk = {0};
4974
4975 SBD_BOARD_TRANSITION(sbp, SBD_STATE_EMPTY);
4976
4977 #ifdef DEBUG
4978 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
4979 sbd_mem_unit_t *mp;
4980
4981 mp = SBD_GET_BOARD_MEMUNIT(sbp, i);
4982 ASSERT(mp->sbm_mlist == NULL);
4983 }
4984 #endif /* DEBUG */
4985
4986 /*
4987 * Free up MEM unit structs.
4988 */
4989 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_MEM)],
4990 sbd_dev_unit_t, MAX_MEM_UNITS_PER_BOARD);
4991 sbp->sb_dev[NIX(SBD_COMP_MEM)] = NULL;
4992
4993 /*
4994 * Free up CPU unit structs.
4995 */
4996 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_CPU)],
4997 sbd_dev_unit_t, MAX_CPU_UNITS_PER_BOARD);
4998 sbp->sb_dev[NIX(SBD_COMP_CPU)] = NULL;
4999
5000 /*
5001 * Free up IO unit structs.
5002 */
5003 FREESTRUCT(sbp->sb_dev[NIX(SBD_COMP_IO)],
5004 sbd_dev_unit_t, MAX_IO_UNITS_PER_BOARD);
5005 sbp->sb_dev[NIX(SBD_COMP_IO)] = NULL;
5006
5007 /*
5008 * free up CPU devlists.
5009 */
5010
5011 for (i = 0; i < MAX_CPU_UNITS_PER_BOARD; i++) {
5012 kmem_free((caddr_t)sbp->sb_cpupath[i], MAXPATHLEN);
5013 }
5014 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_CPU)], dev_info_t *,
5015 MAX_CPU_UNITS_PER_BOARD);
5016 sbp->sb_devlist[NIX(SBD_COMP_CPU)] = NULL;
5017
5018 /*
5019 * free up MEM devlists.
5020 */
5021 for (i = 0; i < MAX_MEM_UNITS_PER_BOARD; i++) {
5022 kmem_free((caddr_t)sbp->sb_mempath[i], MAXPATHLEN);
5023 }
5024 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_MEM)], dev_info_t *,
5025 MAX_MEM_UNITS_PER_BOARD);
5026 sbp->sb_devlist[NIX(SBD_COMP_MEM)] = NULL;
5027
5028 /*
5029 * free up IO devlists.
5030 */
5031 for (i = 0; i < MAX_IO_UNITS_PER_BOARD; i++) {
5032 kmem_free((caddr_t)sbp->sb_iopath[i], MAXPATHLEN);
5033 }
5034 FREESTRUCT(sbp->sb_devlist[NIX(SBD_COMP_IO)], dev_info_t *,
5035 MAX_IO_UNITS_PER_BOARD);
5036 sbp->sb_devlist[NIX(SBD_COMP_IO)] = NULL;
5037
5038 /*
5039 * Release all branches held earlier
5040 */
5041 ASSERT(sbp->sb_topdip);
5042 pdip = ddi_get_parent(sbp->sb_topdip);
5043 if (pdip)
5044 ndi_devi_enter(pdip);
5045 walk.sbp = sbp;
5046 walk.hold = 0;
5047 ddi_walk_devs(sbp->sb_topdip, hold_rele_branch, (void *)&walk);
5048 if (pdip)
5049 ndi_devi_exit(pdip);
5050
5051 mutex_destroy(&sbp->sb_slock);
5052 mutex_destroy(&sbp->sb_flags_mutex);
5053 mutex_destroy(&sbp->sb_mutex);
5054 }
5055
5056 sbd_comp_type_t
sbd_cm_type(char * name)5057 sbd_cm_type(char *name)
5058 {
5059 sbd_comp_type_t type = SBD_COMP_UNKNOWN;
5060 int i;
5061
5062 /* look up type in table */
5063 for (i = 0; SBD_COMP(i) != SBD_COMP_UNKNOWN; i++) {
5064 if (strcmp(name, SBD_OTYPE(i)) == 0) {
5065 type = SBD_COMP(i);
5066 break;
5067 }
5068 }
5069
5070 return (type);
5071 }
5072
5073 /*
5074 * There are certain cases where obp marks components as failed
5075 * If the status is ok the node won't have any status property. It
5076 * is only there if the status is other than ok.
5077 *
5078 * The translation is as follows:
5079 * If there is no status prop, the the cond is SBD_COND_OK
5080 * If we find a status prop but can't get to it then cond is SBD_COND_UNKNOWN
5081 * if we find a stat and it is failed the cond is SBD_COND_FAILED
5082 * If the stat is disabled, the cond is SBD_COND_UNUSABLE
5083 * Otherwise we return con as SBD_COND_OK
5084 */
5085 sbd_cond_t
sbd_get_comp_cond(dev_info_t * dip)5086 sbd_get_comp_cond(dev_info_t *dip)
5087 {
5088 int len;
5089 char *status_buf;
5090 static const char *status = "status";
5091 static const char *failed = "fail";
5092 static const char *disabled = "disabled";
5093
5094 if (dip == NULL) {
5095 PR_BYP("dip is NULL\n");
5096 return (SBD_COND_UNKNOWN);
5097 }
5098
5099 /*
5100 * If retired, return FAILED
5101 */
5102 if (DEVI(dip)->devi_flags & DEVI_RETIRED) {
5103 PR_CPU("dip is retired\n");
5104 return (SBD_COND_FAILED);
5105 }
5106
5107 if (ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5108 (char *)status, &len) != DDI_PROP_SUCCESS) {
5109 PR_CPU("status in sbd is ok\n");
5110 return (SBD_COND_OK);
5111 }
5112
5113 status_buf = kmem_zalloc(sizeof (char) * OBP_MAXPROPNAME, KM_SLEEP);
5114 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
5115 (char *)status, status_buf, &len) != DDI_PROP_SUCCESS) {
5116 PR_CPU("status in sbd is unknown\n");
5117 return (SBD_COND_UNKNOWN);
5118 }
5119
5120 if (strncmp(status_buf, failed, strlen(failed)) == 0) {
5121 PR_CPU("status in sbd is failed\n");
5122 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5123 return (SBD_COND_FAILED);
5124 }
5125
5126 if (strcmp(status_buf, disabled) == 0) {
5127 PR_CPU("status in sbd is unusable\n");
5128 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5129 return (SBD_COND_UNUSABLE);
5130 }
5131
5132 kmem_free(status_buf, sizeof (char) * OBP_MAXPROPNAME);
5133 return (SBD_COND_OK);
5134 }
5135
5136 #ifdef SBD_DEBUG_ERRS
5137
5138 /* function to simulate errors throughout the sbd code */
5139 void
sbd_inject_err(int error,sbderror_t * ep,int Errno,int ecode,char * rsc)5140 sbd_inject_err(int error, sbderror_t *ep, int Errno, int ecode,
5141 char *rsc)
5142 {
5143 static fn_t f = "sbd_inject_err";
5144
5145 if (sbd_err_debug == 0)
5146 return;
5147
5148 if (ep == NULL) {
5149 cmn_err(CE_WARN, "%s ep is NULL", f);
5150 return;
5151 }
5152
5153 if (SBD_GET_ERRNO(ep) != 0) {
5154 cmn_err(CE_WARN, "%s errno already set to %d", f,
5155 SBD_GET_ERRNO(ep));
5156 return;
5157 }
5158
5159 if (SBD_GET_ERR(ep) != 0) {
5160 cmn_err(CE_WARN, "%s code already set to %d", f,
5161 SBD_GET_ERR(ep));
5162 return;
5163 }
5164
5165 if ((sbd_err_debug & (1 << error)) != 0) {
5166 ep->e_errno = Errno;
5167 ep->e_code = ecode;
5168
5169 if (rsc != NULL)
5170 bcopy((caddr_t)rsc, (caddr_t)ep->e_rsc,
5171 sizeof (ep->e_rsc));
5172
5173 if (Errno != 0)
5174 PR_ERR_ERRNO("%s set errno to %d", f, ep->e_errno);
5175
5176 if (ecode != 0)
5177 PR_ERR_ECODE("%s set ecode to %d", f, ep->e_code);
5178
5179 if (rsc != NULL)
5180 PR_ERR_RSC("%s set rsc to %s", f, ep->e_rsc);
5181 }
5182 }
5183 #endif
5184