1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 * Copyright (c) 2011 Bayard G. Bell. All rights reserved. 26 */ 27 28 #include <sys/debug.h> 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/errno.h> 32 #include <sys/cred.h> 33 #include <sys/dditypes.h> 34 #include <sys/devops.h> 35 #include <sys/modctl.h> 36 #include <sys/poll.h> 37 #include <sys/conf.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/ndi_impldefs.h> 42 #include <sys/stat.h> 43 #include <sys/kmem.h> 44 #include <sys/vmem.h> 45 #include <sys/processor.h> 46 #include <sys/spitregs.h> 47 #include <sys/cpuvar.h> 48 #include <sys/cpupart.h> 49 #include <sys/mem_config.h> 50 #include <sys/ddi_impldefs.h> 51 #include <sys/systm.h> 52 #include <sys/machsystm.h> 53 #include <sys/autoconf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/sysmacros.h> 56 #include <sys/x_call.h> 57 #include <sys/promif.h> 58 #include <sys/prom_plat.h> 59 #include <sys/membar.h> 60 #include <vm/seg_kmem.h> 61 #include <sys/mem_cage.h> 62 #include <sys/stack.h> 63 #include <sys/archsystm.h> 64 #include <vm/hat_sfmmu.h> 65 #include <sys/pte.h> 66 #include <sys/mmu.h> 67 #include <sys/cpu_module.h> 68 #include <sys/obpdefs.h> 69 #include <sys/note.h> 70 71 #include <sys/starfire.h> /* plat_max_... decls */ 72 #include <sys/cvc.h> 73 #include <sys/cpu_sgnblk_defs.h> 74 #include <sys/drmach.h> 75 #include <sys/dr_util.h> 76 #include <sys/pda.h> 77 78 #include <sys/sysevent.h> 79 #include <sys/sysevent/dr.h> 80 #include <sys/sysevent/eventdefs.h> 81 82 83 extern void bcopy32_il(uint64_t, uint64_t); 84 extern void flush_ecache_il( 85 uint64_t physaddr, int size, int linesz); 86 extern uint_t ldphysio_il(uint64_t physaddr); 87 extern void stphysio_il(uint64_t physaddr, uint_t value); 88 89 extern uint64_t mc_get_mem_alignment(void); 90 extern uint64_t mc_get_asr_addr(pnode_t); 91 extern uint64_t mc_get_idle_addr(pnode_t); 92 extern uint64_t mc_get_alignment_mask(pnode_t); 93 extern int mc_read_asr(pnode_t, uint_t *); 94 extern int mc_write_asr(pnode_t, uint_t); 95 extern uint64_t mc_asr_to_pa(uint_t); 96 extern uint_t mc_pa_to_asr(uint_t, uint64_t); 97 98 extern int pc_madr_add(int, int, int, int); 99 100 typedef struct { 101 struct drmach_node *node; 102 void *data; 103 } drmach_node_walk_args_t; 104 105 typedef struct drmach_node { 106 void *here; 107 108 pnode_t (*get_dnode)(struct drmach_node *node); 109 int (*walk)(struct drmach_node *node, void *data, 110 int (*cb)(drmach_node_walk_args_t *args)); 111 } drmach_node_t; 112 113 typedef struct { 114 int min_index; 115 int max_index; 116 int arr_sz; 117 drmachid_t *arr; 118 } drmach_array_t; 119 120 typedef struct { 121 void *isa; 122 123 sbd_error_t *(*release)(drmachid_t); 124 sbd_error_t *(*status)(drmachid_t, drmach_status_t *); 125 126 char name[MAXNAMELEN]; 127 } drmach_common_t; 128 129 typedef struct { 130 drmach_common_t cm; 131 int bnum; 132 int assigned; 133 int powered; 134 int connect_cpuid; 135 int cond; 136 drmach_node_t *tree; 137 drmach_array_t *devices; 138 } drmach_board_t; 139 140 typedef struct { 141 drmach_common_t cm; 142 drmach_board_t *bp; 143 int unum; 144 int busy; 145 int powered; 146 const char *type; 147 drmach_node_t *node; 148 } drmach_device_t; 149 150 typedef struct { 151 int flags; 152 drmach_device_t *dp; 153 sbd_error_t *err; 154 dev_info_t *dip; 155 } drmach_config_args_t; 156 157 typedef struct { 158 uint64_t idle_addr; 159 drmach_device_t *mem; 160 } drmach_mc_idle_script_t; 161 162 typedef struct { 163 uint64_t masr_addr; 164 uint_t masr; 165 uint_t _filler; 166 } drmach_rename_script_t; 167 168 typedef struct { 169 void (*run)(void *arg); 170 caddr_t data; 171 pda_handle_t *ph; 172 struct memlist *c_ml; 173 uint64_t s_copybasepa; 174 uint64_t t_copybasepa; 175 drmach_device_t *restless_mc; /* diagnostic output */ 176 } drmach_copy_rename_program_t; 177 178 typedef enum { 179 DO_IDLE, 180 DO_UNIDLE, 181 DO_PAUSE, 182 DO_UNPAUSE 183 } drmach_iopc_op_t; 184 185 typedef struct { 186 drmach_board_t *obj; 187 int ndevs; 188 void *a; 189 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t); 190 sbd_error_t *err; 191 } drmach_board_cb_data_t; 192 193 static caddr_t drmach_shutdown_va; 194 195 static int drmach_initialized; 196 static drmach_array_t *drmach_boards; 197 198 static int drmach_cpu_delay = 100; 199 static int drmach_cpu_ntries = 50000; 200 201 volatile uchar_t *drmach_xt_mb; 202 203 /* 204 * Do not change the drmach_shutdown_mbox structure without 205 * considering the drmach_shutdown_asm assembly language code. 206 */ 207 struct drmach_shutdown_mbox { 208 uint64_t estack; 209 uint64_t flushaddr; 210 int size; 211 int linesize; 212 uint64_t physaddr; 213 }; 214 struct drmach_shutdown_mbox *drmach_shutdown_asm_mbox; 215 216 static int drmach_fini(void); 217 static sbd_error_t *drmach_device_new(drmach_node_t *, 218 drmach_board_t *, drmach_device_t **); 219 static sbd_error_t *drmach_cpu_new(drmach_device_t *); 220 static sbd_error_t *drmach_mem_new(drmach_device_t *); 221 static sbd_error_t *drmach_io_new(drmach_device_t *); 222 static sbd_error_t *drmach_board_release(drmachid_t); 223 static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *); 224 static sbd_error_t *drmach_cpu_release(drmachid_t); 225 static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *); 226 static sbd_error_t *drmach_io_release(drmachid_t); 227 static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *); 228 static sbd_error_t *drmach_mem_release(drmachid_t); 229 static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *); 230 231 extern struct cpu *SIGBCPU; 232 233 #ifdef DEBUG 234 235 #define DRMACH_PR if (drmach_debug) printf 236 int drmach_debug = 0; /* set to non-zero to enable debug messages */ 237 #else 238 239 #define DRMACH_PR _NOTE(CONSTANTCONDITION) if (0) printf 240 #endif /* DEBUG */ 241 242 #define DRMACH_OBJ(id) ((drmach_common_t *)id) 243 244 #define DRMACH_IS_BOARD_ID(id) \ 245 ((id != 0) && \ 246 (DRMACH_OBJ(id)->isa == (void *)drmach_board_new)) 247 248 #define DRMACH_IS_CPU_ID(id) \ 249 ((id != 0) && \ 250 (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new)) 251 252 #define DRMACH_IS_MEM_ID(id) \ 253 ((id != 0) && \ 254 (DRMACH_OBJ(id)->isa == (void *)drmach_mem_new)) 255 256 #define DRMACH_IS_IO_ID(id) \ 257 ((id != 0) && \ 258 (DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 259 260 #define DRMACH_IS_DEVICE_ID(id) \ 261 ((id != 0) && \ 262 (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \ 263 DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \ 264 DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 265 266 #define DRMACH_IS_ID(id) \ 267 ((id != 0) && \ 268 (DRMACH_OBJ(id)->isa == (void *)drmach_board_new || \ 269 DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \ 270 DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \ 271 DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 272 273 #define DRMACH_CPUID2BNUM(cpuid) \ 274 ((cpuid) / MAX_CPU_UNITS_PER_BOARD) 275 276 #define DRMACH_INTERNAL_ERROR() \ 277 drerr_new(1, ESTF_INTERNAL, drmach_ie_fmt, __LINE__) 278 static char *drmach_ie_fmt = "drmach.c %d"; 279 280 static struct { 281 const char *name; 282 const char *type; 283 sbd_error_t *(*new)(drmach_device_t *); 284 } name2type[] = { 285 { "SUNW,UltraSPARC", DRMACH_DEVTYPE_CPU, drmach_cpu_new }, 286 { "mem-unit", DRMACH_DEVTYPE_MEM, drmach_mem_new }, 287 { "pci", DRMACH_DEVTYPE_PCI, drmach_io_new }, 288 { "sbus", DRMACH_DEVTYPE_SBUS, drmach_io_new }, 289 }; 290 291 /* node types to cleanup when a board is unconfigured */ 292 #define MISC_COUNTER_TIMER_DEVNAME "counter-timer" 293 #define MISC_PERF_COUNTER_DEVNAME "perf-counter" 294 295 /* utility */ 296 #define MBYTE (1048576ull) 297 298 /* 299 * drmach autoconfiguration data structures and interfaces 300 */ 301 302 extern struct mod_ops mod_miscops; 303 304 static struct modlmisc modlmisc = { 305 &mod_miscops, 306 "Sun Enterprise 10000 DR" 307 }; 308 309 static struct modlinkage modlinkage = { 310 MODREV_1, 311 (void *)&modlmisc, 312 NULL 313 }; 314 315 static kmutex_t drmach_i_lock; 316 317 int 318 _init(void) 319 { 320 int err; 321 322 /* check that we have the correct version of obp */ 323 if (prom_test("SUNW,UE10000,add-brd") != 0) { 324 325 cmn_err(CE_WARN, "!OBP/SSP upgrade is required to enable " 326 "DR Functionality"); 327 328 return (-1); 329 } 330 331 mutex_init(&drmach_i_lock, NULL, MUTEX_DRIVER, NULL); 332 333 drmach_xt_mb = (uchar_t *)vmem_alloc(static_alloc_arena, 334 NCPU * sizeof (uchar_t), VM_SLEEP); 335 drmach_shutdown_asm_mbox = (struct drmach_shutdown_mbox *) 336 vmem_alloc(static_alloc_arena, sizeof (struct drmach_shutdown_mbox), 337 VM_SLEEP); 338 339 if ((err = mod_install(&modlinkage)) != 0) { 340 mutex_destroy(&drmach_i_lock); 341 vmem_free(static_alloc_arena, (void *)drmach_xt_mb, 342 NCPU * sizeof (uchar_t)); 343 vmem_free(static_alloc_arena, (void *)drmach_shutdown_asm_mbox, 344 sizeof (struct drmach_shutdown_mbox)); 345 } 346 347 return (err); 348 } 349 350 int 351 _fini(void) 352 { 353 if (drmach_fini()) 354 return (DDI_FAILURE); 355 else 356 return (mod_remove(&modlinkage)); 357 } 358 359 int 360 _info(struct modinfo *modinfop) 361 { 362 return (mod_info(&modlinkage, modinfop)); 363 } 364 365 static pnode_t 366 drmach_node_obp_get_dnode(drmach_node_t *np) 367 { 368 return ((pnode_t)(uintptr_t)np->here); 369 } 370 371 static int 372 drmach_node_obp_walk(drmach_node_t *np, void *data, 373 int (*cb)(drmach_node_walk_args_t *args)) 374 { 375 pnode_t nodeid; 376 int rv; 377 drmach_node_walk_args_t args; 378 379 /* initialized args structure for callback */ 380 args.node = np; 381 args.data = data; 382 383 nodeid = prom_childnode(prom_rootnode()); 384 385 /* save our new position with in the tree */ 386 np->here = (void *)(uintptr_t)nodeid; 387 388 rv = 0; 389 while (nodeid != OBP_NONODE) { 390 rv = (*cb)(&args); 391 if (rv) 392 break; 393 394 nodeid = prom_nextnode(nodeid); 395 396 /* save our new position with in the tree */ 397 np->here = (void *)(uintptr_t)nodeid; 398 } 399 400 return (rv); 401 } 402 403 static drmach_node_t * 404 drmach_node_new(void) 405 { 406 drmach_node_t *np; 407 408 np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP); 409 410 np->get_dnode = drmach_node_obp_get_dnode; 411 np->walk = drmach_node_obp_walk; 412 413 return (np); 414 } 415 416 static void 417 drmach_node_dispose(drmach_node_t *np) 418 { 419 kmem_free(np, sizeof (*np)); 420 } 421 422 static dev_info_t * 423 drmach_node_get_dip(drmach_node_t *np) 424 { 425 pnode_t nodeid; 426 427 nodeid = np->get_dnode(np); 428 if (nodeid == OBP_NONODE) 429 return (NULL); 430 else { 431 dev_info_t *dip; 432 433 /* The root node doesn't have to be held */ 434 dip = e_ddi_nodeid_to_dip(nodeid); 435 if (dip) { 436 /* 437 * Branch rooted at dip is already held, so release 438 * hold acquired in e_ddi_nodeid_to_dip() 439 */ 440 ddi_release_devi(dip); 441 ASSERT(e_ddi_branch_held(dip)); 442 } 443 444 return (dip); 445 } 446 /*NOTREACHED*/ 447 } 448 449 static pnode_t 450 drmach_node_get_dnode(drmach_node_t *np) 451 { 452 return (np->get_dnode(np)); 453 } 454 455 static int 456 drmach_node_walk(drmach_node_t *np, void *param, 457 int (*cb)(drmach_node_walk_args_t *args)) 458 { 459 return (np->walk(np, param, cb)); 460 } 461 462 static int 463 drmach_node_get_prop(drmach_node_t *np, char *name, void *buf) 464 { 465 pnode_t nodeid; 466 int rv; 467 468 nodeid = np->get_dnode(np); 469 if (nodeid == OBP_NONODE) 470 rv = -1; 471 else if (prom_getproplen(nodeid, (caddr_t)name) < 0) 472 rv = -1; 473 else { 474 (void) prom_getprop(nodeid, (caddr_t)name, (caddr_t)buf); 475 rv = 0; 476 } 477 478 return (rv); 479 } 480 481 static int 482 drmach_node_get_proplen(drmach_node_t *np, char *name, int *len) 483 { 484 pnode_t nodeid; 485 int rv; 486 487 nodeid = np->get_dnode(np); 488 if (nodeid == OBP_NONODE) 489 rv = -1; 490 else { 491 *len = prom_getproplen(nodeid, (caddr_t)name); 492 rv = (*len < 0 ? -1 : 0); 493 } 494 495 return (rv); 496 } 497 498 static drmachid_t 499 drmach_node_dup(drmach_node_t *np) 500 { 501 drmach_node_t *dup; 502 503 dup = drmach_node_new(); 504 dup->here = np->here; 505 506 return (dup); 507 } 508 509 /* 510 * drmach_array provides convenient array construction, access, 511 * bounds checking and array destruction logic. 512 */ 513 514 static drmach_array_t * 515 drmach_array_new(int min_index, int max_index) 516 { 517 drmach_array_t *arr; 518 519 arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP); 520 521 arr->arr_sz = (max_index - min_index + 1) * sizeof (void *); 522 if (arr->arr_sz > 0) { 523 arr->min_index = min_index; 524 arr->max_index = max_index; 525 526 arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP); 527 return (arr); 528 } else { 529 kmem_free(arr, sizeof (*arr)); 530 return (0); 531 } 532 } 533 534 static int 535 drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val) 536 { 537 if (idx < arr->min_index || idx > arr->max_index) 538 return (-1); 539 else { 540 arr->arr[idx - arr->min_index] = val; 541 return (0); 542 } 543 /*NOTREACHED*/ 544 } 545 546 static int 547 drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val) 548 { 549 if (idx < arr->min_index || idx > arr->max_index) 550 return (-1); 551 else { 552 *val = arr->arr[idx - arr->min_index]; 553 return (0); 554 } 555 /*NOTREACHED*/ 556 } 557 558 static int 559 drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val) 560 { 561 int rv; 562 563 *idx = arr->min_index; 564 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL) 565 *idx += 1; 566 567 return (rv); 568 } 569 570 static int 571 drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val) 572 { 573 int rv; 574 575 *idx += 1; 576 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL) 577 *idx += 1; 578 579 return (rv); 580 } 581 582 static void 583 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t)) 584 { 585 drmachid_t val; 586 int idx; 587 int rv; 588 589 rv = drmach_array_first(arr, &idx, &val); 590 while (rv == 0) { 591 (*disposer)(val); 592 rv = drmach_array_next(arr, &idx, &val); 593 } 594 595 kmem_free(arr->arr, arr->arr_sz); 596 kmem_free(arr, sizeof (*arr)); 597 } 598 599 /*ARGSUSED*/ 600 static int 601 drmach_prom_select(pnode_t nodeid, void *arg, uint_t flags) 602 { 603 int rprop[64]; 604 pnode_t saved; 605 drmach_config_args_t *ap = (drmach_config_args_t *)arg; 606 drmach_device_t *dp = ap->dp; 607 sbd_error_t *err; 608 609 saved = drmach_node_get_dnode(dp->node); 610 611 if (nodeid != saved) 612 return (DDI_FAILURE); 613 614 if (saved == OBP_NONODE) { 615 err = DRMACH_INTERNAL_ERROR(); 616 DRERR_SET_C(&ap->err, &err); 617 return (DDI_FAILURE); 618 } 619 620 if (prom_getprop(nodeid, OBP_REG, (caddr_t)rprop) <= 0) { 621 return (DDI_FAILURE); 622 } 623 624 return (DDI_SUCCESS); 625 } 626 627 /*ARGSUSED*/ 628 static void 629 drmach_branch_callback(dev_info_t *rdip, void *arg, uint_t flags) 630 { 631 drmach_config_args_t *ap = (drmach_config_args_t *)arg; 632 633 ASSERT(ap->dip == NULL); 634 635 ap->dip = rdip; 636 } 637 638 sbd_error_t * 639 drmach_configure(drmachid_t id, int flags) 640 { 641 drmach_device_t *dp; 642 sbd_error_t *err; 643 drmach_config_args_t ca; 644 devi_branch_t b = {0}; 645 dev_info_t *fdip = NULL; 646 647 if (!DRMACH_IS_DEVICE_ID(id)) 648 return (drerr_new(0, ESTF_INAPPROP, NULL)); 649 dp = id; 650 651 ca.dp = dp; 652 ca.flags = flags; 653 ca.err = NULL; /* will be set if error detected */ 654 ca.dip = NULL; 655 656 b.arg = &ca; 657 b.type = DEVI_BRANCH_PROM; 658 b.create.prom_branch_select = drmach_prom_select; 659 b.devi_branch_callback = drmach_branch_callback; 660 661 if (e_ddi_branch_create(ddi_root_node(), &b, &fdip, 662 DEVI_BRANCH_CHILD | DEVI_BRANCH_CONFIGURE) != 0) { 663 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 664 665 /* 666 * If non-NULL, fdip is returned held and must be released. 667 */ 668 if (fdip != NULL) { 669 (void) ddi_pathname(fdip, path); 670 ddi_release_devi(fdip); 671 } else if (ca.dip != NULL) { 672 /* safe to call ddi_pathname as dip already held */ 673 (void) ddi_pathname(ca.dip, path); 674 } else { 675 (void) strcpy(path, "<none>"); 676 } 677 678 err = drerr_new(1, ESTF_DRVFAIL, path); 679 DRERR_SET_C(&ca.err, &err); 680 kmem_free(path, MAXPATHLEN); 681 } 682 683 return (ca.err); 684 } 685 686 static sbd_error_t * 687 drmach_device_new(drmach_node_t *node, 688 drmach_board_t *bp, drmach_device_t **dpp) 689 { 690 int i; 691 int rv; 692 drmach_device_t *dp; 693 sbd_error_t *err; 694 char name[OBP_MAXDRVNAME]; 695 696 rv = drmach_node_get_prop(node, OBP_NAME, name); 697 if (rv) { 698 /* every node is expected to have a name */ 699 err = drerr_new(1, ESTF_GETPROP, 700 "PROM Node 0x%x: property %s", 701 (uint_t)node->get_dnode(node), OBP_NAME); 702 703 return (err); 704 } 705 706 /* 707 * The node currently being examined is not listed in the name2type[] 708 * array. In this case, the node is no interest to drmach. Both 709 * dp and err are initialized here to yield nothing (no device or 710 * error structure) for this case. 711 */ 712 for (i = 0; i < sizeof (name2type) / sizeof (name2type[0]); i++) 713 if (strcmp(name2type[i].name, name) == 0) 714 break; 715 716 if (i < sizeof (name2type) / sizeof (name2type[0])) { 717 dp = kmem_zalloc(sizeof (drmach_device_t), KM_SLEEP); 718 719 dp->bp = bp; 720 dp->unum = -1; 721 dp->node = drmach_node_dup(node); 722 dp->type = name2type[i].type; 723 724 err = (name2type[i].new)(dp); 725 if (err) { 726 drmach_node_dispose(node); 727 kmem_free(dp, sizeof (*dp)); 728 dp = NULL; 729 } 730 731 *dpp = dp; 732 return (err); 733 } 734 735 /* 736 * The node currently being examined is not listed in the name2type[] 737 * array. In this case, the node is no interest to drmach. Both 738 * dp and err are initialized here to yield nothing (no device or 739 * error structure) for this case. 740 */ 741 *dpp = NULL; 742 return (NULL); 743 } 744 745 static void 746 drmach_device_dispose(drmachid_t id) 747 { 748 drmach_device_t *self = id; 749 750 if (self->node) 751 drmach_node_dispose(self->node); 752 753 kmem_free(self, sizeof (*self)); 754 } 755 756 static sbd_error_t * 757 drmach_device_get_prop(drmach_device_t *dp, char *name, void *buf) 758 { 759 sbd_error_t *err = NULL; 760 int rv; 761 762 rv = drmach_node_get_prop(dp->node, name, buf); 763 if (rv) { 764 err = drerr_new(1, ESTF_GETPROP, 765 "%s::%s: property %s", 766 dp->bp->cm.name, dp->cm.name, name); 767 } 768 769 return (err); 770 } 771 772 static sbd_error_t * 773 drmach_device_get_proplen(drmach_device_t *dp, char *name, int *len) 774 { 775 sbd_error_t *err = NULL; 776 int rv; 777 778 rv = drmach_node_get_proplen(dp->node, name, len); 779 if (rv) { 780 err = drerr_new(1, ESTF_GETPROPLEN, 781 "%s::%s: property %s", 782 dp->bp->cm.name, dp->cm.name, name); 783 } 784 785 return (err); 786 } 787 788 static drmach_board_t * 789 drmach_board_new(int bnum) 790 { 791 drmach_board_t *bp; 792 793 bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP); 794 795 bp->cm.isa = (void *)drmach_board_new; 796 bp->cm.release = drmach_board_release; 797 bp->cm.status = drmach_board_status; 798 799 (void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name)); 800 801 bp->bnum = bnum; 802 bp->devices = NULL; 803 bp->connect_cpuid = -1; 804 bp->tree = drmach_node_new(); 805 bp->assigned = !drmach_initialized; 806 bp->powered = !drmach_initialized; 807 808 (void) drmach_array_set(drmach_boards, bnum, bp); 809 return (bp); 810 } 811 812 static void 813 drmach_board_dispose(drmachid_t id) 814 { 815 drmach_board_t *bp; 816 817 ASSERT(DRMACH_IS_BOARD_ID(id)); 818 bp = id; 819 820 if (bp->tree) 821 drmach_node_dispose(bp->tree); 822 823 if (bp->devices) 824 drmach_array_dispose(bp->devices, drmach_device_dispose); 825 826 kmem_free(bp, sizeof (*bp)); 827 } 828 829 static sbd_error_t * 830 drmach_board_status(drmachid_t id, drmach_status_t *stat) 831 { 832 sbd_error_t *err = NULL; 833 drmach_board_t *bp; 834 835 if (!DRMACH_IS_BOARD_ID(id)) 836 return (drerr_new(0, ESTF_INAPPROP, NULL)); 837 bp = id; 838 839 stat->assigned = bp->assigned; 840 stat->powered = bp->powered; 841 stat->busy = 0; /* assume not busy */ 842 stat->configured = 0; /* assume not configured */ 843 stat->empty = 0; 844 stat->cond = bp->cond = SBD_COND_OK; 845 (void) strncpy(stat->type, "System Brd", sizeof (stat->type)); 846 stat->info[0] = '\0'; 847 848 if (bp->devices) { 849 int rv; 850 int d_idx; 851 drmachid_t d_id; 852 853 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 854 while (rv == 0) { 855 drmach_status_t d_stat; 856 857 err = drmach_status(d_id, &d_stat); 858 if (err) 859 break; 860 861 stat->busy |= d_stat.busy; 862 stat->configured |= d_stat.configured; 863 864 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 865 } 866 } 867 868 return (err); 869 } 870 871 /* a simple routine to reduce redundancy of this common logic */ 872 static pda_handle_t 873 drmach_pda_open(void) 874 { 875 pda_handle_t ph; 876 877 ph = pda_open(); 878 if (ph == NULL) { 879 /* catch in debug kernels */ 880 ASSERT(0); 881 cmn_err(CE_WARN, "pda_open failed"); 882 } 883 884 return (ph); 885 } 886 887 #ifdef DEBUG 888 int drmach_init_break = 0; 889 #endif 890 891 static int 892 hold_rele_branch(dev_info_t *rdip, void *arg) 893 { 894 int i; 895 int *holdp = (int *)arg; 896 char *name = ddi_node_name(rdip); 897 898 /* 899 * For Starfire, we must be children of the root devinfo node 900 */ 901 ASSERT(ddi_get_parent(rdip) == ddi_root_node()); 902 903 for (i = 0; i < sizeof (name2type) / sizeof (name2type[0]); i++) 904 if (strcmp(name2type[i].name, name) == 0) 905 break; 906 907 if (i == sizeof (name2type) / sizeof (name2type[0])) { 908 /* Not of interest to us */ 909 return (DDI_WALK_PRUNECHILD); 910 } 911 912 if (*holdp) { 913 ASSERT(!e_ddi_branch_held(rdip)); 914 e_ddi_branch_hold(rdip); 915 } else { 916 ASSERT(e_ddi_branch_held(rdip)); 917 e_ddi_branch_rele(rdip); 918 } 919 920 return (DDI_WALK_PRUNECHILD); 921 } 922 923 static int 924 drmach_init(void) 925 { 926 pnode_t nodeid; 927 dev_info_t *rdip; 928 int hold, circ; 929 930 #ifdef DEBUG 931 if (drmach_init_break) 932 debug_enter("drmach_init: drmach_init_break set\n"); 933 #endif 934 mutex_enter(&drmach_i_lock); 935 if (drmach_initialized) { 936 mutex_exit(&drmach_i_lock); 937 return (0); 938 } 939 940 drmach_boards = drmach_array_new(0, MAX_BOARDS - 1); 941 942 nodeid = prom_childnode(prom_rootnode()); 943 do { 944 int bnum; 945 drmachid_t id; 946 947 bnum = -1; 948 (void) prom_getprop(nodeid, OBP_BOARDNUM, (caddr_t)&bnum); 949 if (bnum == -1) 950 continue; 951 952 if (drmach_array_get(drmach_boards, bnum, &id) == -1) { 953 cmn_err(CE_WARN, "OBP node 0x%x has" 954 " invalid property value, %s=%d", 955 nodeid, OBP_BOARDNUM, bnum); 956 957 /* clean up */ 958 drmach_array_dispose( 959 drmach_boards, drmach_board_dispose); 960 961 mutex_exit(&drmach_i_lock); 962 return (-1); 963 } else if (id == NULL) 964 (void) drmach_board_new(bnum); 965 } while ((nodeid = prom_nextnode(nodeid)) != OBP_NONODE); 966 967 drmach_shutdown_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 968 969 /* 970 * Walk immediate children of devinfo root node and hold 971 * all devinfo branches of interest. 972 */ 973 hold = 1; 974 rdip = ddi_root_node(); 975 976 ndi_devi_enter(rdip, &circ); 977 ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold); 978 ndi_devi_exit(rdip, circ); 979 980 drmach_initialized = 1; 981 982 mutex_exit(&drmach_i_lock); 983 984 return (0); 985 } 986 987 static int 988 drmach_fini(void) 989 { 990 dev_info_t *rdip; 991 int hold, circ; 992 993 if (drmach_initialized) { 994 int busy = 0; 995 int rv; 996 int idx; 997 drmachid_t id; 998 999 ASSERT(drmach_boards != NULL); 1000 1001 rv = drmach_array_first(drmach_boards, &idx, &id); 1002 while (rv == 0) { 1003 sbd_error_t *err; 1004 drmach_status_t stat; 1005 1006 err = drmach_board_status(id, &stat); 1007 if (err) { 1008 /* catch in debug kernels */ 1009 ASSERT(0); 1010 sbd_err_clear(&err); 1011 busy = 1; 1012 } else 1013 busy |= stat.busy; 1014 1015 rv = drmach_array_next(drmach_boards, &idx, &id); 1016 } 1017 1018 if (busy) 1019 return (-1); 1020 1021 drmach_array_dispose(drmach_boards, drmach_board_dispose); 1022 drmach_boards = NULL; 1023 1024 vmem_free(heap_arena, drmach_shutdown_va, PAGESIZE); 1025 1026 /* 1027 * Walk immediate children of the root devinfo node 1028 * releasing holds acquired on branches in drmach_init() 1029 */ 1030 hold = 0; 1031 rdip = ddi_root_node(); 1032 1033 ndi_devi_enter(rdip, &circ); 1034 ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold); 1035 ndi_devi_exit(rdip, circ); 1036 1037 mutex_destroy(&drmach_i_lock); 1038 1039 drmach_initialized = 0; 1040 } 1041 if (drmach_xt_mb != NULL) { 1042 vmem_free(static_alloc_arena, (void *)drmach_xt_mb, 1043 NCPU * sizeof (uchar_t)); 1044 } 1045 if (drmach_shutdown_asm_mbox != NULL) { 1046 vmem_free(static_alloc_arena, (void *)drmach_shutdown_asm_mbox, 1047 sizeof (struct drmach_shutdown_mbox)); 1048 } 1049 return (0); 1050 } 1051 1052 static sbd_error_t * 1053 drmach_get_mc_asr_addr(drmachid_t id, uint64_t *pa) 1054 { 1055 drmach_device_t *dp; 1056 pnode_t nodeid; 1057 uint64_t addr; 1058 1059 if (!DRMACH_IS_MEM_ID(id)) 1060 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1061 dp = id; 1062 1063 nodeid = drmach_node_get_dnode(dp->node); 1064 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) 1065 return (DRMACH_INTERNAL_ERROR()); 1066 1067 addr = mc_get_asr_addr(nodeid); 1068 if (addr == (uint64_t)-1) 1069 return (DRMACH_INTERNAL_ERROR()); 1070 1071 *pa = addr; 1072 return (NULL); 1073 } 1074 1075 static sbd_error_t * 1076 drmach_get_mc_idle_addr(drmachid_t id, uint64_t *pa) 1077 { 1078 drmach_device_t *dp; 1079 pnode_t nodeid; 1080 uint64_t addr; 1081 1082 if (!DRMACH_IS_MEM_ID(id)) 1083 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1084 dp = id; 1085 1086 nodeid = drmach_node_get_dnode(dp->node); 1087 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) 1088 return (DRMACH_INTERNAL_ERROR()); 1089 1090 addr = mc_get_idle_addr(nodeid); 1091 if (addr == (uint64_t)-1) 1092 return (DRMACH_INTERNAL_ERROR()); 1093 1094 *pa = addr; 1095 return (NULL); 1096 } 1097 1098 static sbd_error_t * 1099 drmach_read_mc_asr(drmachid_t id, uint_t *mcregp) 1100 { 1101 drmach_device_t *dp; 1102 pnode_t nodeid; 1103 sbd_error_t *err; 1104 1105 if (!DRMACH_IS_MEM_ID(id)) 1106 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1107 dp = id; 1108 1109 nodeid = drmach_node_get_dnode(dp->node); 1110 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) 1111 err = DRMACH_INTERNAL_ERROR(); 1112 else if (mc_read_asr(nodeid, mcregp) == -1) 1113 err = DRMACH_INTERNAL_ERROR(); 1114 else 1115 err = NULL; 1116 1117 return (err); 1118 } 1119 1120 static sbd_error_t * 1121 drmach_write_mc_asr(drmachid_t id, uint_t mcreg) 1122 { 1123 drmach_device_t *dp; 1124 pnode_t nodeid; 1125 sbd_error_t *err; 1126 1127 if (!DRMACH_IS_MEM_ID(id)) 1128 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1129 dp = id; 1130 1131 nodeid = drmach_node_get_dnode(dp->node); 1132 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) 1133 err = DRMACH_INTERNAL_ERROR(); 1134 else if (mc_write_asr(nodeid, mcreg) == -1) 1135 err = DRMACH_INTERNAL_ERROR(); 1136 else 1137 err = NULL; 1138 1139 return (err); 1140 } 1141 1142 static sbd_error_t * 1143 drmach_prep_rename_script(drmach_device_t *s_mem, drmach_device_t *t_mem, 1144 uint64_t t_slice_offset, caddr_t buf, int buflen) 1145 { 1146 int i, b, m; 1147 drmach_mc_idle_script_t *isp; 1148 drmach_rename_script_t *rsp; 1149 int s_bd, t_bd; 1150 uint_t s_masr, t_masr; 1151 uint64_t s_new_basepa, t_new_basepa; 1152 int b_idx, rv; 1153 sbd_error_t *err; 1154 drmachid_t b_id; 1155 drmach_board_t *brd; 1156 1157 #ifdef DEBUG 1158 /* 1159 * Starfire CPU/MEM/IO boards have only one MC per board. 1160 * This function has been coded with that fact in mind. 1161 */ 1162 ASSERT(MAX_MEM_UNITS_PER_BOARD == 1); 1163 1164 /* 1165 * calculate the maximum space that could be consumed, 1166 * then verify the available buffer space is adequate. 1167 */ 1168 m = sizeof (drmach_mc_idle_script_t *) * 2; /* two MCs */ 1169 b = sizeof (drmach_rename_script_t *) * 3 * MAX_CPU_UNITS_PER_BOARD; 1170 b += sizeof (drmach_rename_script_t *) * 3 * MAX_IO_UNITS_PER_BOARD; 1171 b *= MAX_BOARDS; 1172 b += sizeof (drmach_rename_script_t *) * 3; 1173 b += sizeof (drmach_rename_script_t *) * 1; 1174 ASSERT(m + b < buflen); 1175 #endif 1176 1177 /* 1178 * construct an array of MC idle register addresses of 1179 * both MCs. The array is zero terminated -- as expected 1180 * by drmach_copy_rename_prog__relocatable(). 1181 */ 1182 isp = (drmach_mc_idle_script_t *)buf; 1183 1184 /* source mc */ 1185 err = drmach_get_mc_idle_addr(s_mem, &isp->idle_addr); 1186 if (err) 1187 return (err); 1188 isp->mem = s_mem; 1189 isp += 1; 1190 1191 /* target mc */ 1192 err = drmach_get_mc_idle_addr(t_mem, &isp->idle_addr); 1193 if (err) 1194 return (err); 1195 isp->mem = t_mem; 1196 isp += 1; 1197 1198 /* terminator */ 1199 isp->idle_addr = 0; 1200 isp->mem = NULL; 1201 isp += 1; 1202 1203 /* fetch source mc asr register value */ 1204 err = drmach_read_mc_asr(s_mem, &s_masr); 1205 if (err) 1206 return (err); 1207 else if (s_masr & STARFIRE_MC_INTERLEAVE_MASK) { 1208 return (drerr_new(1, ESTF_INTERBOARD, "%s::%s", 1209 s_mem->bp->cm.name, s_mem->cm.name)); 1210 } 1211 1212 /* fetch target mc asr register value */ 1213 err = drmach_read_mc_asr(t_mem, &t_masr); 1214 if (err) 1215 return (err); 1216 else if (t_masr & STARFIRE_MC_INTERLEAVE_MASK) { 1217 return (drerr_new(1, ESTF_INTERBOARD, "%s::%s", 1218 t_mem->bp->cm.name, t_mem->cm.name)); 1219 } 1220 1221 /* get new source base pa from target's masr */ 1222 s_new_basepa = mc_asr_to_pa(t_masr); 1223 1224 /* 1225 * remove any existing slice offset to realign 1226 * memory with board's slice boundary 1227 */ 1228 s_new_basepa &= ~ (mc_get_mem_alignment() - 1); 1229 1230 /* get new target base pa from source's masr */ 1231 t_new_basepa = mc_asr_to_pa(s_masr); 1232 1233 /* remove any existing slice offset, then apply new offset */ 1234 t_new_basepa &= ~ (mc_get_mem_alignment() - 1); 1235 t_new_basepa += t_slice_offset; 1236 1237 /* encode new base pa into s_masr. turn off mem present bit */ 1238 s_masr = mc_pa_to_asr(s_masr, s_new_basepa); 1239 s_masr &= ~STARFIRE_MC_MEM_PRESENT_MASK; 1240 1241 /* encode new base pa into t_masr. turn on mem present bit */ 1242 t_masr = mc_pa_to_asr(t_masr, t_new_basepa); 1243 t_masr |= STARFIRE_MC_MEM_PRESENT_MASK; 1244 1245 /* 1246 * Step 0: Mark source memory as not present. 1247 */ 1248 m = 0; 1249 rsp = (drmach_rename_script_t *)isp; 1250 err = drmach_get_mc_asr_addr(s_mem, &rsp[m].masr_addr); 1251 if (err) 1252 return (err); 1253 rsp[m].masr = s_masr; 1254 m++; 1255 1256 /* 1257 * Step 1: Write source base address to target MC 1258 * with present bit off. 1259 */ 1260 err = drmach_get_mc_asr_addr(t_mem, &rsp[m].masr_addr); 1261 if (err) 1262 return (err); 1263 rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK; 1264 m++; 1265 1266 /* 1267 * Step 2: Now rewrite target reg with present bit on. 1268 */ 1269 rsp[m].masr_addr = rsp[m-1].masr_addr; 1270 rsp[m].masr = t_masr; 1271 m++; 1272 1273 s_bd = s_mem->bp->bnum; 1274 t_bd = t_mem->bp->bnum; 1275 1276 DRMACH_PR("preparing script for CPU and IO units:\n"); 1277 1278 rv = drmach_array_first(drmach_boards, &b_idx, &b_id); 1279 if (rv) { 1280 /* catch this in debug kernels */ 1281 ASSERT(0); 1282 return (DRMACH_INTERNAL_ERROR()); 1283 } 1284 1285 do { 1286 int d_idx; 1287 drmachid_t d_id; 1288 drmach_device_t *device; 1289 1290 ASSERT(DRMACH_IS_BOARD_ID(b_id)); 1291 brd = b_id; 1292 b = brd->bnum; 1293 1294 /* 1295 * Step 3: Update PC MADR tables for CPUs. 1296 */ 1297 if (brd->devices == NULL) { 1298 /* devices not initialized */ 1299 continue; 1300 } 1301 1302 rv = drmach_array_first(brd->devices, &d_idx, &d_id); 1303 if (rv) { 1304 /* must mean no devices on this board */ 1305 break; 1306 } 1307 1308 DRMACH_PR("\t%s\n", brd->cm.name); 1309 1310 do { 1311 ASSERT(DRMACH_IS_DEVICE_ID(d_id)); 1312 1313 if (!DRMACH_IS_CPU_ID(d_id)) 1314 continue; 1315 1316 device = d_id; 1317 i = device->unum; 1318 1319 DRMACH_PR("\t\t%s\n", device->cm.name); 1320 1321 /* 1322 * Disabled detaching mem node. 1323 */ 1324 rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, s_bd, i); 1325 rsp[m].masr = s_masr; 1326 m++; 1327 /* 1328 * Always write masr with present bit 1329 * off and then again with it on. 1330 */ 1331 rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, t_bd, i); 1332 rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK; 1333 m++; 1334 rsp[m].masr_addr = rsp[m-1].masr_addr; 1335 rsp[m].masr = t_masr; 1336 m++; 1337 1338 } while (drmach_array_next(brd->devices, &d_idx, &d_id) == 0); 1339 1340 /* 1341 * Step 4: Update PC MADR tables for IOs. 1342 */ 1343 rv = drmach_array_first(brd->devices, &d_idx, &d_id); 1344 /* this worked for previous loop, must work here too */ 1345 ASSERT(rv == 0); 1346 1347 do { 1348 ASSERT(DRMACH_IS_DEVICE_ID(d_id)); 1349 1350 if (!DRMACH_IS_IO_ID(d_id)) 1351 continue; 1352 1353 device = d_id; 1354 i = device->unum; 1355 1356 DRMACH_PR("\t\t%s\n", device->cm.name); 1357 1358 /* 1359 * Disabled detaching mem node. 1360 */ 1361 rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, s_bd, i+4); 1362 rsp[m].masr = s_masr; 1363 m++; 1364 /* 1365 * Always write masr with present bit 1366 * off and then again with it on. 1367 */ 1368 rsp[m].masr_addr = STARFIRE_PC_MADR_ADDR(b, t_bd, i+4); 1369 rsp[m].masr = t_masr & ~STARFIRE_MC_MEM_PRESENT_MASK; 1370 m++; 1371 rsp[m].masr_addr = rsp[m-1].masr_addr; 1372 rsp[m].masr = t_masr; 1373 m++; 1374 1375 } while (drmach_array_next(brd->devices, &d_idx, &d_id) == 0); 1376 } while (drmach_array_next(drmach_boards, &b_idx, &b_id) == 0); 1377 1378 /* 1379 * Zero masr_addr value indicates the END. 1380 */ 1381 rsp[m].masr_addr = 0ull; 1382 rsp[m].masr = 0; 1383 DRMACH_PR("number of steps in rename script = %d\n", m); 1384 m++; 1385 1386 /* paranoia */ 1387 ASSERT((caddr_t)&rsp[m] <= buf + buflen); 1388 1389 #ifdef DEBUG 1390 { 1391 int j; 1392 1393 DRMACH_PR("mc idle register address list:"); 1394 isp = (drmach_mc_idle_script_t *)buf; 1395 DRMACH_PR("source mc idle addr 0x%lx, mem id %p", 1396 isp[0].idle_addr, (void *)isp[0].mem); 1397 DRMACH_PR("target mc idle addr 0x%lx, mem id %p", 1398 isp[1].idle_addr, (void *)isp[1].mem); 1399 ASSERT(isp[2].idle_addr == 0); 1400 1401 DRMACH_PR("copy-rename script:"); 1402 for (j = 0; j < m; j++) { 1403 DRMACH_PR("0x%lx = 0x%08x", 1404 rsp[j].masr_addr, rsp[j].masr); 1405 } 1406 1407 DELAY(1000000); 1408 } 1409 #endif 1410 1411 /* return number of bytes consumed */ 1412 b = (caddr_t)&rsp[m] - buf; 1413 DRMACH_PR("total number of bytes consumed is %d\n", b); 1414 ASSERT(b <= buflen); 1415 1416 #ifdef lint 1417 buflen = buflen; 1418 #endif 1419 1420 return (NULL); 1421 } 1422 1423 /* 1424 * The routine performs the necessary memory COPY and MC adr SWITCH. 1425 * Both operations MUST be at the same "level" so that the stack is 1426 * maintained correctly between the copy and switch. The switch 1427 * portion implements a caching mechanism to guarantee the code text 1428 * is cached prior to execution. This is to guard against possible 1429 * memory access while the MC adr's are being modified. 1430 * 1431 * IMPORTANT: The _drmach_copy_rename_end() function must immediately 1432 * follow drmach_copy_rename_prog__relocatable() so that the correct 1433 * "length" of the drmach_copy_rename_prog__relocatable can be 1434 * calculated. This routine MUST be a LEAF function, i.e. it can 1435 * make NO function calls, primarily for two reasons: 1436 * 1437 * 1. We must keep the stack consistent across the "switch". 1438 * 2. Function calls are compiled to relative offsets, and 1439 * we execute this function we'll be executing it from 1440 * a copied version in a different area of memory, thus 1441 * the relative offsets will be bogus. 1442 * 1443 * Moreover, it must have the "__relocatable" suffix to inform DTrace 1444 * providers (and anything else, for that matter) that this 1445 * function's text is manually relocated elsewhere before it is 1446 * executed. That is, it cannot be safely instrumented with any 1447 * methodology that is PC-relative. 1448 */ 1449 static void 1450 drmach_copy_rename_prog__relocatable(drmach_copy_rename_program_t *prog) 1451 { 1452 extern void drmach_exec_script_il(drmach_rename_script_t *rsp); 1453 1454 drmach_mc_idle_script_t *isp; 1455 struct memlist *ml; 1456 int csize; 1457 int lnsize; 1458 uint64_t caddr; 1459 1460 isp = (drmach_mc_idle_script_t *)prog->data; 1461 1462 caddr = ecache_flushaddr; 1463 csize = (cpunodes[CPU->cpu_id].ecache_size << 1); 1464 lnsize = cpunodes[CPU->cpu_id].ecache_linesize; 1465 1466 /* 1467 * DO COPY. 1468 */ 1469 for (ml = prog->c_ml; ml; ml = ml->ml_next) { 1470 uint64_t s_pa, t_pa; 1471 uint64_t nbytes; 1472 1473 s_pa = prog->s_copybasepa + ml->ml_address; 1474 t_pa = prog->t_copybasepa + ml->ml_address; 1475 nbytes = ml->ml_size; 1476 1477 while (nbytes != 0ull) { 1478 /* 1479 * This copy does NOT use an ASI 1480 * that avoids the Ecache, therefore 1481 * the dst_pa addresses may remain 1482 * in our Ecache after the dst_pa 1483 * has been removed from the system. 1484 * A subsequent write-back to memory 1485 * will cause an ARB-stop because the 1486 * physical address no longer exists 1487 * in the system. Therefore we must 1488 * flush out local Ecache after we 1489 * finish the copy. 1490 */ 1491 1492 /* copy 32 bytes at src_pa to dst_pa */ 1493 bcopy32_il(s_pa, t_pa); 1494 1495 /* increment by 32 bytes */ 1496 s_pa += (4 * sizeof (uint64_t)); 1497 t_pa += (4 * sizeof (uint64_t)); 1498 1499 /* decrement by 32 bytes */ 1500 nbytes -= (4 * sizeof (uint64_t)); 1501 } 1502 } 1503 1504 /* 1505 * Since bcopy32_il() does NOT use an ASI to bypass 1506 * the Ecache, we need to flush our Ecache after 1507 * the copy is complete. 1508 */ 1509 flush_ecache_il(caddr, csize, lnsize); /* inline version */ 1510 1511 /* 1512 * Wait for MCs to go idle. 1513 */ 1514 do { 1515 register int t = 10; 1516 register uint_t v; 1517 1518 /* loop t cycles waiting for each mc to indicate it's idle */ 1519 do { 1520 v = ldphysio_il(isp->idle_addr) 1521 & STARFIRE_MC_IDLE_MASK; 1522 1523 } while (v != STARFIRE_MC_IDLE_MASK && t-- > 0); 1524 1525 /* bailout if timedout */ 1526 if (t <= 0) { 1527 prog->restless_mc = isp->mem; 1528 return; 1529 } 1530 1531 isp += 1; 1532 1533 /* stop if terminating zero has been reached */ 1534 } while (isp->idle_addr != 0); 1535 1536 /* advance passed terminating zero */ 1537 isp += 1; 1538 1539 /* 1540 * The following inline assembly routine caches 1541 * the rename script and then caches the code that 1542 * will do the rename. This is necessary 1543 * so that we don't have any memory references during 1544 * the reprogramming. We accomplish this by first 1545 * jumping through the code to guarantee it's cached 1546 * before we actually execute it. 1547 */ 1548 drmach_exec_script_il((drmach_rename_script_t *)isp); 1549 } 1550 1551 static void 1552 drmach_copy_rename_end(void) 1553 { 1554 /* 1555 * IMPORTANT: This function's location MUST be located immediately 1556 * following drmach_copy_rename_prog__relocatable to 1557 * accurately estimate its size. Note that this assumes 1558 * the compiler keeps these functions in the order in 1559 * which they appear :-o 1560 */ 1561 } 1562 1563 sbd_error_t * 1564 drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset, 1565 drmachid_t s_id, struct memlist *c_ml, drmachid_t *pgm_id) 1566 { 1567 drmach_device_t *s_mem; 1568 drmach_device_t *t_mem; 1569 struct memlist *x_ml; 1570 uint64_t off_mask, s_copybasepa, t_copybasepa, t_basepa; 1571 int len; 1572 caddr_t bp, wp; 1573 pda_handle_t ph; 1574 sbd_error_t *err; 1575 drmach_copy_rename_program_t *prog; 1576 1577 if (!DRMACH_IS_MEM_ID(s_id)) 1578 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1579 if (!DRMACH_IS_MEM_ID(t_id)) 1580 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1581 s_mem = s_id; 1582 t_mem = t_id; 1583 1584 /* get starting physical address of target memory */ 1585 err = drmach_mem_get_base_physaddr(t_id, &t_basepa); 1586 if (err) 1587 return (err); 1588 1589 /* calculate slice offset mask from slice size */ 1590 off_mask = mc_get_mem_alignment() - 1; 1591 1592 /* calculate source and target base pa */ 1593 s_copybasepa = c_ml->ml_address; 1594 t_copybasepa = 1595 t_basepa + ((c_ml->ml_address & off_mask) - t_slice_offset); 1596 1597 /* paranoia */ 1598 ASSERT((c_ml->ml_address & off_mask) >= t_slice_offset); 1599 1600 /* adjust copy memlist addresses to be relative to copy base pa */ 1601 x_ml = c_ml; 1602 while (x_ml != NULL) { 1603 x_ml->ml_address -= s_copybasepa; 1604 x_ml = x_ml->ml_next; 1605 } 1606 1607 #ifdef DEBUG 1608 { 1609 uint64_t s_basepa, s_size, t_size; 1610 1611 x_ml = c_ml; 1612 while (x_ml->ml_next != NULL) 1613 x_ml = x_ml->ml_next; 1614 1615 DRMACH_PR("source copy span: base pa 0x%lx, end pa 0x%lx\n", 1616 s_copybasepa, 1617 s_copybasepa + x_ml->ml_address + x_ml->ml_size); 1618 1619 DRMACH_PR("target copy span: base pa 0x%lx, end pa 0x%lx\n", 1620 t_copybasepa, 1621 t_copybasepa + x_ml->ml_address + x_ml->ml_size); 1622 1623 DRMACH_PR("copy memlist (relative to copy base pa):\n"); 1624 MEMLIST_DUMP(c_ml); 1625 1626 err = drmach_mem_get_base_physaddr(s_id, &s_basepa); 1627 ASSERT(err == NULL); 1628 1629 err = drmach_mem_get_size(s_id, &s_size); 1630 ASSERT(err == NULL); 1631 1632 err = drmach_mem_get_size(t_id, &t_size); 1633 ASSERT(err == NULL); 1634 1635 DRMACH_PR("current source base pa 0x%lx, size 0x%lx\n", 1636 s_basepa, s_size); 1637 DRMACH_PR("current target base pa 0x%lx, size 0x%lx\n", 1638 t_basepa, t_size); 1639 1640 ASSERT(s_copybasepa + x_ml->ml_address + x_ml->ml_size <= 1641 s_basepa + s_size); 1642 ASSERT(t_copybasepa + x_ml->ml_address + x_ml->ml_size <= 1643 t_basepa + t_size); 1644 } 1645 #endif 1646 1647 ph = drmach_pda_open(); 1648 if (ph == NULL) 1649 return (DRMACH_INTERNAL_ERROR()); 1650 1651 /* 1652 * bp will be page aligned, since we're calling 1653 * kmem_zalloc() with an exact multiple of PAGESIZE. 1654 */ 1655 wp = bp = kmem_zalloc(PAGESIZE, KM_SLEEP); 1656 1657 /* allocate space for copy rename struct */ 1658 len = sizeof (drmach_copy_rename_program_t); 1659 DRMACH_PR("prog = 0x%p, header len %d\n", (void *)wp, len); 1660 prog = (drmach_copy_rename_program_t *)wp; 1661 wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1); 1662 1663 /* 1664 * Copy the code for the copy-rename routine into 1665 * a page aligned piece of memory. We do this to guarantee 1666 * that we're executing within the same page and thus reduce 1667 * the possibility of cache collisions between different 1668 * pages. 1669 */ 1670 len = (int)((ulong_t)drmach_copy_rename_end - 1671 (ulong_t)drmach_copy_rename_prog__relocatable); 1672 ASSERT(wp + len < bp + PAGESIZE); 1673 bcopy((caddr_t)drmach_copy_rename_prog__relocatable, wp, len); 1674 1675 DRMACH_PR("copy-rename function 0x%p, len %d\n", (void *)wp, len); 1676 prog->run = (void (*)())wp; 1677 wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1); 1678 1679 /* 1680 * Prepare data page that will contain script of 1681 * operations to perform during copy-rename. 1682 * Allocate temporary buffer to hold script. 1683 */ 1684 err = drmach_prep_rename_script(s_mem, t_mem, t_slice_offset, 1685 wp, PAGESIZE - (wp - bp)); 1686 if (err) { 1687 (void) drmach_copy_rename_fini(prog); 1688 return (err); 1689 } 1690 1691 DRMACH_PR("copy-rename script 0x%p, len %d\n", (void *)wp, len); 1692 prog->data = wp; 1693 wp += (len + ecache_alignsize - 1) & ~ (ecache_alignsize - 1); 1694 1695 prog->ph = ph; 1696 prog->s_copybasepa = s_copybasepa; 1697 prog->t_copybasepa = t_copybasepa; 1698 prog->c_ml = c_ml; 1699 *pgm_id = prog; 1700 1701 return (NULL); 1702 } 1703 1704 sbd_error_t * 1705 drmach_copy_rename_fini(drmachid_t id) 1706 { 1707 drmach_copy_rename_program_t *prog = id; 1708 sbd_error_t *err = NULL; 1709 1710 if (prog->c_ml != NULL) 1711 memlist_delete(prog->c_ml); 1712 1713 if (prog->ph != NULL) 1714 pda_close(prog->ph); 1715 1716 if (prog->restless_mc != 0) { 1717 cmn_err(CE_WARN, "MC did not idle; OBP Node 0x%x", 1718 (uint_t)drmach_node_get_dnode(prog->restless_mc->node)); 1719 1720 err = DRMACH_INTERNAL_ERROR(); 1721 } 1722 1723 kmem_free(prog, PAGESIZE); 1724 1725 return (err); 1726 } 1727 1728 static sbd_error_t * 1729 drmach_io_new(drmach_device_t *dp) 1730 { 1731 sbd_error_t *err; 1732 int portid; 1733 1734 err = drmach_device_get_prop(dp, "upa-portid", &portid); 1735 if (err == NULL) { 1736 ASSERT(portid & 0x40); 1737 dp->unum = portid & 1; 1738 } 1739 1740 dp->cm.isa = (void *)drmach_io_new; 1741 dp->cm.release = drmach_io_release; 1742 dp->cm.status = drmach_io_status; 1743 1744 (void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s%d", dp->type, 1745 dp->unum); 1746 1747 return (err); 1748 } 1749 1750 static void 1751 drmach_iopc_op(pda_handle_t ph, drmach_iopc_op_t op) 1752 { 1753 register int b; 1754 1755 for (b = 0; b < MAX_BOARDS; b++) { 1756 int p; 1757 ushort_t bda_ioc; 1758 board_desc_t *bdesc; 1759 1760 if (pda_board_present(ph, b) == 0) 1761 continue; 1762 1763 bdesc = (board_desc_t *)pda_get_board_info(ph, b); 1764 /* 1765 * Update PCs for IOCs. 1766 */ 1767 bda_ioc = bdesc->bda_ioc; 1768 for (p = 0; p < MAX_IOCS; p++) { 1769 u_longlong_t idle_addr; 1770 uchar_t value; 1771 1772 if (BDA_NBL(bda_ioc, p) != BDAN_GOOD) 1773 continue; 1774 1775 idle_addr = STARFIRE_BB_PC_ADDR(b, p, 1); 1776 1777 switch (op) { 1778 case DO_PAUSE: 1779 value = STARFIRE_BB_PC_PAUSE(p); 1780 break; 1781 1782 case DO_IDLE: 1783 value = STARFIRE_BB_PC_IDLE(p); 1784 break; 1785 1786 case DO_UNPAUSE: 1787 value = ldbphysio(idle_addr); 1788 value &= ~STARFIRE_BB_PC_PAUSE(p); 1789 break; 1790 1791 case DO_UNIDLE: 1792 value = ldbphysio(idle_addr); 1793 value &= ~STARFIRE_BB_PC_IDLE(p); 1794 break; 1795 1796 default: 1797 cmn_err(CE_PANIC, 1798 "drmach_iopc_op: unknown op (%d)", 1799 (int)op); 1800 /*NOTREACHED*/ 1801 } 1802 stbphysio(idle_addr, value); 1803 } 1804 } 1805 } 1806 1807 void 1808 drmach_copy_rename(drmachid_t id) 1809 { 1810 drmach_copy_rename_program_t *prog = id; 1811 uint64_t neer; 1812 1813 /* 1814 * UPA IDLE 1815 * Protocol = PAUSE -> IDLE -> UNPAUSE 1816 * In reality since we only "idle" the IOPCs it's sufficient 1817 * to just issue the IDLE operation since (in theory) all IOPCs 1818 * in the field are PC6. However, we'll be robust and do the 1819 * proper workaround protocol so that we never have to worry! 1820 */ 1821 drmach_iopc_op(prog->ph, DO_PAUSE); 1822 drmach_iopc_op(prog->ph, DO_IDLE); 1823 DELAY(100); 1824 drmach_iopc_op(prog->ph, DO_UNPAUSE); 1825 DELAY(100); 1826 1827 /* disable CE reporting */ 1828 neer = get_error_enable(); 1829 set_error_enable(neer & ~EER_CEEN); 1830 1831 /* run the copy/rename program */ 1832 prog->run(prog); 1833 1834 /* enable CE reporting */ 1835 set_error_enable(neer); 1836 1837 /* 1838 * UPA UNIDLE 1839 * Protocol = UNIDLE 1840 */ 1841 drmach_iopc_op(prog->ph, DO_UNIDLE); 1842 DELAY(100); 1843 } 1844 1845 /* 1846 * The counter-timer and perf-counter nodes are not being cleaned 1847 * up after a board that was present at start of day is detached. 1848 * If the board has become unconfigured with this operation, walk 1849 * the prom tree and find all counter-timer and perf-counter nodes 1850 * that have the same board number as the board that was just 1851 * unconfigured and remove them. 1852 */ 1853 static sbd_error_t * 1854 drmach_remove_counter_nodes(drmachid_t id) 1855 { 1856 int num; 1857 char name[OBP_MAXDRVNAME]; 1858 pnode_t child; 1859 dev_info_t *dip; 1860 sbd_error_t *err; 1861 drmach_status_t stat; 1862 drmach_board_t *bp; 1863 1864 if (!DRMACH_IS_BOARD_ID(id)) { 1865 return (drerr_new(0, ESTF_INAPPROP, NULL)); 1866 } 1867 1868 if ((err = drmach_board_status(id, &stat)) != NULL) { 1869 return (err); 1870 } 1871 1872 /* 1873 * Only clean up the counter-timer and perf-counter 1874 * nodes when the entire board is unconfigured. 1875 */ 1876 if (stat.configured) { 1877 return (NULL); 1878 } 1879 1880 bp = (drmach_board_t *)id; 1881 1882 err = NULL; 1883 1884 for (child = prom_childnode(prom_rootnode()); child != OBP_NONODE; 1885 child = prom_nextnode(child)) { 1886 1887 if (prom_getprop(child, OBP_BOARDNUM, (caddr_t)&num) == -1) { 1888 continue; 1889 } 1890 1891 if (bp->bnum != num) { 1892 continue; 1893 } 1894 1895 if (prom_getprop(child, OBP_NAME, (caddr_t)name) == -1) { 1896 continue; 1897 } 1898 1899 if (strncmp(name, MISC_COUNTER_TIMER_DEVNAME, OBP_MAXDRVNAME) && 1900 strncmp(name, MISC_PERF_COUNTER_DEVNAME, OBP_MAXDRVNAME)) { 1901 continue; 1902 } 1903 1904 /* Root node doesn't have to be held */ 1905 dip = e_ddi_nodeid_to_dip(child); 1906 1907 /* 1908 * If the node is only in the OBP tree, then 1909 * we don't have to remove it. 1910 */ 1911 if (dip) { 1912 dev_info_t *fdip = NULL; 1913 1914 DRMACH_PR("removing %s devinfo node\n", name); 1915 1916 e_ddi_branch_hold(dip); 1917 ddi_release_devi(dip); /* held in e_ddi_nodeid_to_dip */ 1918 1919 if (e_ddi_branch_destroy(dip, &fdip, 0)) { 1920 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1921 1922 /* 1923 * If non-NULL, fdip is held and must be 1924 * released. 1925 */ 1926 if (fdip != NULL) { 1927 (void) ddi_pathname(fdip, path); 1928 ddi_release_devi(fdip); 1929 } else { 1930 (void) ddi_pathname(dip, path); 1931 } 1932 1933 err = drerr_new(1, ESTF_DRVFAIL, path); 1934 kmem_free(path, MAXPATHLEN); 1935 e_ddi_branch_rele(dip); 1936 break; 1937 } 1938 } 1939 } 1940 1941 return (err); 1942 } 1943 1944 /*ARGSUSED*/ 1945 sbd_error_t * 1946 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts) 1947 { 1948 /* allow status and ncm operations to always succeed */ 1949 if ((cmd == SBD_CMD_STATUS) || (cmd == SBD_CMD_GETNCM)) { 1950 return (NULL); 1951 } 1952 1953 /* check all other commands for the required option string */ 1954 if ((opts->size > 0) && (opts->copts != NULL)) { 1955 1956 DRMACH_PR("platform options: %s\n", opts->copts); 1957 1958 if (strstr(opts->copts, "xfdr") != NULL) { 1959 return (NULL); 1960 } 1961 } 1962 1963 return (drerr_new(0, ESTF_SUPPORT, NULL)); 1964 } 1965 1966 /*ARGSUSED*/ 1967 sbd_error_t * 1968 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts) 1969 { 1970 sbd_error_t *err = NULL; 1971 1972 switch (cmd) { 1973 case SBD_CMD_UNCONFIGURE: 1974 1975 err = drmach_remove_counter_nodes(id); 1976 break; 1977 1978 case SBD_CMD_CONFIGURE: 1979 case SBD_CMD_DISCONNECT: 1980 case SBD_CMD_CONNECT: 1981 case SBD_CMD_GETNCM: 1982 case SBD_CMD_STATUS: 1983 break; 1984 1985 default: 1986 break; 1987 } 1988 1989 return (err); 1990 } 1991 1992 sbd_error_t * 1993 drmach_board_assign(int bnum, drmachid_t *id) 1994 { 1995 sbd_error_t *err; 1996 1997 if (!drmach_initialized && drmach_init() == -1) { 1998 err = DRMACH_INTERNAL_ERROR(); 1999 } else if (drmach_array_get(drmach_boards, bnum, id) == -1) { 2000 err = drerr_new(1, ESTF_BNUM, "%d", bnum); 2001 } else if (*id != NULL) { 2002 err = NULL; 2003 } else { 2004 drmach_board_t *bp; 2005 2006 *id = (drmachid_t)drmach_board_new(bnum); 2007 bp = *id; 2008 bp->assigned = 1; 2009 err = NULL; 2010 } 2011 2012 return (err); 2013 } 2014 2015 static int 2016 drmach_attach_board(void *arg) 2017 { 2018 drmach_board_t *obj = (drmach_board_t *)arg; 2019 cpuset_t cset; 2020 int retval; 2021 2022 /* 2023 * OBP disables traps during the board probe. 2024 * So, in order to prevent cross-call/cross-trap timeouts, 2025 * and thus panics, we effectively block anybody from 2026 * issuing xc's/xt's by doing a promsafe_xc_attention. 2027 * In the previous version of Starfire DR (2.6), a timeout 2028 * suspension mechanism was implemented in the send-mondo 2029 * assembly. That mechanism is unnecessary with the 2030 * existence of xc_attention/xc_dismissed. 2031 */ 2032 cset = cpu_ready_set; 2033 promsafe_xc_attention(cset); 2034 2035 retval = prom_starfire_add_brd(obj->connect_cpuid); 2036 2037 xc_dismissed(cset); 2038 2039 return (retval); 2040 } 2041 2042 sbd_error_t * 2043 drmach_board_connect(drmachid_t id, drmach_opts_t *opts) 2044 { 2045 drmach_board_t *obj = (drmach_board_t *)id; 2046 int retval; 2047 sbd_error_t *err; 2048 char *cptr, *copts; 2049 2050 if (!DRMACH_IS_BOARD_ID(id)) 2051 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2052 2053 if (opts->size > 0) 2054 copts = opts->copts; 2055 2056 if ((cptr = strstr(copts, "cpuid=")) != NULL) { 2057 int cpuid; 2058 2059 cptr += strlen("cpuid="); 2060 cpuid = stoi(&cptr); 2061 2062 if (DRMACH_CPUID2BNUM(cpuid) == obj->bnum) { 2063 obj->connect_cpuid = cpuid; 2064 obj->assigned = 1; 2065 } else 2066 return (drerr_new(1, ESTF_SETCPUVAL, "%d", cpuid)); 2067 } else { 2068 /* cpuid was not specified */ 2069 obj->connect_cpuid = -1; 2070 } 2071 2072 if (obj->connect_cpuid == -1) { 2073 err = drerr_new(1, ESTF_NOCPUID, obj->cm.name); 2074 return (err); 2075 } 2076 2077 cmn_err(CE_CONT, "DRMACH: PROM attach %s CPU %d\n", 2078 obj->cm.name, obj->connect_cpuid); 2079 2080 retval = prom_tree_update(drmach_attach_board, obj); 2081 2082 if (retval == 0) 2083 err = NULL; 2084 else { 2085 cmn_err(CE_WARN, "prom error: prom_starfire_add_brd(%d) " 2086 "returned %d", obj->connect_cpuid, retval); 2087 2088 err = drerr_new(1, ESTF_PROBE, obj->cm.name); 2089 } 2090 2091 obj->connect_cpuid = -1; 2092 2093 return (err); 2094 } 2095 2096 /*ARGSUSED*/ 2097 sbd_error_t * 2098 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts) 2099 { 2100 drmach_board_t *bp; 2101 int rv; 2102 int d_idx; /* device index */ 2103 drmachid_t d_id; /* device ID */ 2104 sbd_error_t *err; 2105 2106 if (!DRMACH_IS_BOARD_ID(id)) 2107 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2108 2109 bp = id; 2110 2111 /* 2112 * We need to make sure all of the board's device nodes 2113 * have been removed from the Solaris device tree before 2114 * continuing with the disconnect. Otherwise, we could 2115 * disconnect the board and remove the OBP device tree 2116 * nodes with Solaris device tree nodes remaining. 2117 * 2118 * On Starfire, Solaris device tree nodes are deleted 2119 * during unconfigure by drmach_unconfigure(). It's 2120 * necessary to do this here because drmach_unconfigure() 2121 * failures are not handled during unconfigure. 2122 */ 2123 if (bp->devices) { 2124 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 2125 while (rv == 0) { 2126 err = drmach_unconfigure(d_id, DRMACH_DEVI_REMOVE); 2127 if (err) 2128 return (err); 2129 2130 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 2131 } 2132 } 2133 2134 /* 2135 * Starfire board Solaris device tree counter nodes, 2136 * which are only present on start-of-day boards, are 2137 * removed in the dr_post_op() code flow after the 2138 * board is unconfigured. We call the counter node 2139 * removal function here because unconfigure errors 2140 * can cause the dr_post_op() function to be skipped 2141 * after an unconfigure operation even though all of 2142 * the board's devices have been transitioned to the 2143 * unconfigured state. 2144 */ 2145 err = drmach_remove_counter_nodes(id); 2146 if (err) 2147 return (err); 2148 2149 return (NULL); 2150 } 2151 2152 static int 2153 drmach_board_find_devices_cb(drmach_node_walk_args_t *args) 2154 { 2155 drmach_node_t *node = args->node; 2156 drmach_board_cb_data_t *data = args->data; 2157 drmach_board_t *obj = data->obj; 2158 2159 int rv; 2160 int bnum; 2161 drmach_device_t *device; 2162 2163 rv = drmach_node_get_prop(node, OBP_BOARDNUM, &bnum); 2164 if (rv) { 2165 /* 2166 * if the node does not have a board# property, then 2167 * by that information alone it is known that drmach 2168 * is not interested in it. 2169 */ 2170 return (0); 2171 } else if (bnum != obj->bnum) 2172 return (0); 2173 2174 /* 2175 * Create a device data structure from this node data. 2176 * The call may yield nothing if the node is not of interest 2177 * to drmach. 2178 */ 2179 data->err = drmach_device_new(node, obj, &device); 2180 if (data->err) 2181 return (-1); 2182 else if (device == NULL) { 2183 /* 2184 * drmach_device_new examined the node we passed in 2185 * and determined that it was one not of interest to 2186 * drmach. So, it is skipped. 2187 */ 2188 return (0); 2189 } 2190 2191 rv = drmach_array_set(obj->devices, data->ndevs++, device); 2192 if (rv) { 2193 drmach_device_dispose(device); 2194 data->err = DRMACH_INTERNAL_ERROR(); 2195 return (-1); 2196 } 2197 2198 data->err = (*data->found)(data->a, device->type, device->unum, device); 2199 return (data->err == NULL ? 0 : -1); 2200 } 2201 2202 sbd_error_t * 2203 drmach_board_find_devices(drmachid_t id, void *a, 2204 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t)) 2205 { 2206 extern int plat_max_cpu_units_per_board(); 2207 extern int plat_max_mem_units_per_board(); 2208 extern int plat_max_io_units_per_board(); 2209 2210 drmach_board_t *obj = (drmach_board_t *)id; 2211 sbd_error_t *err; 2212 int max_devices; 2213 int rv; 2214 drmach_board_cb_data_t data; 2215 2216 max_devices = plat_max_cpu_units_per_board(); 2217 max_devices += plat_max_mem_units_per_board(); 2218 max_devices += plat_max_io_units_per_board(); 2219 2220 obj->devices = drmach_array_new(0, max_devices); 2221 2222 data.obj = obj; 2223 data.ndevs = 0; 2224 data.found = found; 2225 data.a = a; 2226 data.err = NULL; 2227 2228 rv = drmach_node_walk(obj->tree, &data, drmach_board_find_devices_cb); 2229 if (rv == 0) 2230 err = NULL; 2231 else { 2232 drmach_array_dispose(obj->devices, drmach_device_dispose); 2233 obj->devices = NULL; 2234 2235 if (data.err) 2236 err = data.err; 2237 else 2238 err = DRMACH_INTERNAL_ERROR(); 2239 } 2240 2241 return (err); 2242 } 2243 2244 int 2245 drmach_board_lookup(int bnum, drmachid_t *id) 2246 { 2247 int rv = 0; 2248 2249 if (!drmach_initialized && drmach_init() == -1) { 2250 *id = 0; 2251 rv = -1; 2252 } else if (drmach_array_get(drmach_boards, bnum, id)) { 2253 *id = 0; 2254 rv = -1; 2255 } 2256 return (rv); 2257 } 2258 2259 sbd_error_t * 2260 drmach_board_name(int bnum, char *buf, int buflen) 2261 { 2262 (void) snprintf(buf, buflen, "SB%d", bnum); 2263 return (NULL); 2264 } 2265 2266 sbd_error_t * 2267 drmach_board_poweroff(drmachid_t id) 2268 { 2269 drmach_board_t *bp; 2270 sbd_error_t *err; 2271 drmach_status_t stat; 2272 2273 if (!DRMACH_IS_BOARD_ID(id)) 2274 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2275 bp = id; 2276 2277 err = drmach_board_status(id, &stat); 2278 if (err) 2279 return (err); 2280 else if (stat.configured || stat.busy) 2281 return (drerr_new(0, ESTF_CONFIGBUSY, bp->cm.name)); 2282 else { 2283 /* board power off is essentially a noop for Starfire */ 2284 bp->powered = 0; 2285 return (NULL); 2286 } 2287 /*NOTREACHED*/ 2288 } 2289 2290 sbd_error_t * 2291 drmach_board_poweron(drmachid_t id) 2292 { 2293 drmach_board_t *bp; 2294 2295 if (!DRMACH_IS_BOARD_ID(id)) 2296 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2297 bp = id; 2298 2299 /* board power on is essentially a noop for Starfire */ 2300 bp->powered = 1; 2301 2302 return (NULL); 2303 } 2304 2305 static sbd_error_t * 2306 drmach_board_release(drmachid_t id) 2307 { 2308 if (!DRMACH_IS_BOARD_ID(id)) 2309 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2310 return (NULL); 2311 } 2312 2313 /*ARGSUSED*/ 2314 sbd_error_t * 2315 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force) 2316 { 2317 return (NULL); 2318 } 2319 2320 sbd_error_t * 2321 drmach_board_unassign(drmachid_t id) 2322 { 2323 drmach_board_t *bp; 2324 sbd_error_t *err; 2325 drmach_status_t stat; 2326 2327 if (!DRMACH_IS_BOARD_ID(id)) 2328 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2329 bp = id; 2330 2331 err = drmach_board_status(id, &stat); 2332 if (err) 2333 return (err); 2334 else if (stat.configured || stat.busy) 2335 return (drerr_new(0, ESTF_CONFIGBUSY, bp->cm.name)); 2336 else if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0) 2337 return (DRMACH_INTERNAL_ERROR()); 2338 else { 2339 drmach_board_dispose(bp); 2340 return (NULL); 2341 } 2342 /*NOTREACHED*/ 2343 } 2344 2345 static sbd_error_t * 2346 drmach_cpu_new(drmach_device_t *dp) 2347 { 2348 sbd_error_t *err; 2349 int portid; 2350 2351 err = drmach_device_get_prop(dp, "upa-portid", &portid); 2352 if (err == NULL) 2353 dp->unum = portid & 3; 2354 2355 dp->cm.isa = (void *)drmach_cpu_new; 2356 dp->cm.release = drmach_cpu_release; 2357 dp->cm.status = drmach_cpu_status; 2358 2359 (void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s%d", dp->type, 2360 dp->unum); 2361 2362 return (err); 2363 } 2364 2365 /* 2366 * drmach_cpu_obp_detach() 2367 * This requires two steps, first, we must put the cpuid into the OBP 2368 * idle loop (Idle in Program) state. Then we call OBP to place the CPU 2369 * into the "Detached" state, which does any special processing to 2370 * actually detach the cpu, such as flushing ecache, and also ensures 2371 * that a subsequent breakpoint won't restart the cpu (if it was just in 2372 * Idle in Program state). 2373 */ 2374 static void 2375 drmach_cpu_obp_detach(int cpuid) 2376 { 2377 /* 2378 * Cpu may not be under OBP's control. Eg, if cpu exited to download 2379 * helper on a prior attach. 2380 */ 2381 if (CPU_SGN_EXISTS(cpuid) && 2382 !SGN_CPU_IS_OS(cpuid) && 2383 !SGN_CPU_IS_OBP(cpuid)) { 2384 cmn_err(CE_WARN, 2385 "unexpected signature (0x%x) for cpu %d", 2386 get_cpu_sgn(cpuid), cpuid); 2387 } 2388 2389 /* 2390 * Now we place the CPU into the "Detached" idle loop in OBP. 2391 * This is so that the CPU won't be restarted if we break into 2392 * OBP with a breakpoint or BREAK key from the console, and also 2393 * if we need to do any special processing, such as flushing the 2394 * cpu's ecache, disabling interrupts (by turning of the ET bit in 2395 * the PSR) and/or spinning in BBSRAM rather than global memory. 2396 */ 2397 DRMACH_PR("prom_starfire_rm_cpu(%d)\n", cpuid); 2398 prom_starfire_rm_cpu(cpuid); 2399 } 2400 2401 /* 2402 * drmach_cpu_obp_is_detached() returns TRUE if the cpu sigblock signature state 2403 * is SIGBST_DETACHED; otherwise it returns FALSE. This routine should only 2404 * be called after we have asked OBP to detach the CPU. It should NOT be 2405 * called as a check during any other flow. 2406 */ 2407 static int 2408 drmach_cpu_obp_is_detached(int cpuid) 2409 { 2410 if (!CPU_SGN_EXISTS(cpuid) || 2411 (SGN_CPU_IS_OS(cpuid) && SGN_CPU_STATE_IS_DETACHED(cpuid))) 2412 return (1); 2413 else 2414 return (0); 2415 } 2416 2417 static int 2418 drmach_cpu_start(struct cpu *cp) 2419 { 2420 int cpuid = cp->cpu_id; 2421 int ntries = drmach_cpu_ntries; 2422 extern void restart_other_cpu(int); 2423 2424 ASSERT(MUTEX_HELD(&cpu_lock)); 2425 ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0); 2426 2427 cp->cpu_flags &= ~CPU_POWEROFF; 2428 2429 /* 2430 * NOTE: restart_other_cpu pauses cpus during the 2431 * slave cpu start. This helps to quiesce the 2432 * bus traffic a bit which makes the tick sync 2433 * routine in the prom more robust. 2434 */ 2435 DRMACH_PR("COLD START for cpu (%d)\n", cpuid); 2436 2437 prom_starfire_add_cpu(cpuid); 2438 2439 restart_other_cpu(cpuid); 2440 2441 /* 2442 * Wait for the cpu to reach its idle thread before 2443 * we zap him with a request to blow away the mappings 2444 * he (might) have for the drmach_shutdown_asm code 2445 * he may have executed on unconfigure. 2446 */ 2447 while ((cp->cpu_thread != cp->cpu_idle_thread) && (ntries > 0)) { 2448 DELAY(drmach_cpu_delay); 2449 ntries--; 2450 } 2451 2452 DRMACH_PR("waited %d out of %d loops for cpu %d\n", 2453 drmach_cpu_ntries - ntries, drmach_cpu_ntries, cpuid); 2454 2455 xt_one(cpuid, vtag_flushpage_tl1, 2456 (uint64_t)drmach_shutdown_va, (uint64_t)ksfmmup); 2457 2458 return (0); 2459 } 2460 2461 /* 2462 * A detaching CPU is xcalled with an xtrap to drmach_cpu_stop_self() after 2463 * it has been offlined. The function of this routine is to get the cpu 2464 * spinning in a safe place. The requirement is that the system will not 2465 * reference anything on the detaching board (memory and i/o is detached 2466 * elsewhere) and that the CPU not reference anything on any other board 2467 * in the system. This isolation is required during and after the writes 2468 * to the domain masks to remove the board from the domain. 2469 * 2470 * To accomplish this isolation the following is done: 2471 * 1) Create a locked mapping to a location in BBSRAM where 2472 * the cpu will execute. 2473 * 2) Copy the target function (drmach_shutdown_asm) in which 2474 * the cpu will execute into BBSRAM. 2475 * 3) Jump into function with BBSRAM. 2476 * Function will: 2477 * 3.1) Flush its Ecache (displacement). 2478 * 3.2) Flush its Dcache with HW mechanism. 2479 * 3.3) Flush its Icache with HW mechanism. 2480 * 3.4) Flush all valid and _unlocked_ D-TLB entries. 2481 * 3.5) Flush all valid and _unlocked_ I-TLB entries. 2482 * 3.6) Clear xt_mb to signal completion. Note: cache line is 2483 * recovered by drmach_cpu_poweroff(). 2484 * 4) Jump into a tight loop. 2485 */ 2486 #define DRMACH_BBSRAM_OFFSET 0x1000 2487 2488 static void 2489 drmach_cpu_stop_self(void) 2490 { 2491 int cpuid = (int)CPU->cpu_id; 2492 tte_t tte; 2493 volatile uint_t *src, *dst; 2494 size_t funclen; 2495 uint64_t bbsram_pa, bbsram_offset; 2496 uint_t bbsram_pfn; 2497 uint64_t bbsram_addr; 2498 void (*bbsram_func)(uint64_t); 2499 extern void drmach_shutdown_asm(uint64_t); 2500 extern void drmach_shutdown_asm_end(void); 2501 2502 funclen = (uintptr_t)drmach_shutdown_asm_end - 2503 (uintptr_t)drmach_shutdown_asm; 2504 ASSERT(funclen <= MMU_PAGESIZE); 2505 /* 2506 * We'll start from the 0th's base. 2507 */ 2508 bbsram_pa = STARFIRE_UPAID2UPS(cpuid) | STARFIRE_PSI_BASE; 2509 bbsram_offset = bbsram_pa | 0xfe0ULL; 2510 bbsram_pa += ldphysio(bbsram_offset) + DRMACH_BBSRAM_OFFSET; 2511 2512 bbsram_pfn = (uint_t)(bbsram_pa >> MMU_PAGESHIFT); 2513 2514 bbsram_addr = (uint64_t)drmach_shutdown_va; 2515 drmach_shutdown_asm_mbox->estack = bbsram_addr + funclen; 2516 2517 tte.tte_inthi = TTE_VALID_INT | TTE_SZ_INT(TTE8K) | 2518 TTE_PFN_INTHI(bbsram_pfn); 2519 tte.tte_intlo = TTE_PFN_INTLO(bbsram_pfn) | 2520 TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT; 2521 sfmmu_dtlb_ld_kva(drmach_shutdown_va, &tte); /* load dtlb */ 2522 sfmmu_itlb_ld_kva(drmach_shutdown_va, &tte); /* load itlb */ 2523 2524 for (src = (uint_t *)drmach_shutdown_asm, dst = (uint_t *)bbsram_addr; 2525 src < (uint_t *)drmach_shutdown_asm_end; src++, dst++) 2526 *dst = *src; 2527 2528 bbsram_func = (void (*)())bbsram_addr; 2529 drmach_shutdown_asm_mbox->flushaddr = ecache_flushaddr; 2530 drmach_shutdown_asm_mbox->size = (cpunodes[cpuid].ecache_size << 1); 2531 drmach_shutdown_asm_mbox->linesize = cpunodes[cpuid].ecache_linesize; 2532 drmach_shutdown_asm_mbox->physaddr = 2533 va_to_pa((void *)&drmach_xt_mb[cpuid]); 2534 2535 /* 2536 * Signal to drmach_cpu_poweroff() is via drmach_xt_mb cleared 2537 * by asm code 2538 */ 2539 2540 (*bbsram_func)(va_to_pa((void *)drmach_shutdown_asm_mbox)); 2541 } 2542 2543 static void 2544 drmach_cpu_shutdown_self(void) 2545 { 2546 cpu_t *cp = CPU; 2547 int cpuid = cp->cpu_id; 2548 extern void flush_windows(void); 2549 2550 flush_windows(); 2551 2552 (void) spl8(); 2553 2554 ASSERT(cp->cpu_intr_actv == 0); 2555 ASSERT(cp->cpu_thread == cp->cpu_idle_thread || 2556 cp->cpu_thread == cp->cpu_startup_thread); 2557 2558 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF; 2559 2560 drmach_cpu_stop_self(); 2561 2562 cmn_err(CE_PANIC, "CPU %d FAILED TO SHUTDOWN", cpuid); 2563 } 2564 2565 /* a helper routine to keep the math in one place */ 2566 static processorid_t 2567 drmach_cpu_calc_id(drmach_device_t *dp) 2568 { 2569 return (dp->bp->bnum * MAX_CPU_UNITS_PER_BOARD + dp->unum); 2570 } 2571 2572 /* 2573 * Move bootproc (SIGBCPU) to another cpu. If dst_cpu is NULL, a 2574 * destination cpu is chosen from the set of cpus not located on the 2575 * same board as the current bootproc cpu. 2576 */ 2577 static sbd_error_t * 2578 drmach_cpu_juggle_bootproc(drmach_device_t *dst_cpu) 2579 { 2580 processorid_t cpuid; 2581 struct cpu *cp; 2582 sbd_error_t *err; 2583 int rv; 2584 2585 ASSERT(MUTEX_HELD(&cpu_lock)); 2586 2587 /* dst_cpu is NULL when target cpu is unspecified. So, pick one. */ 2588 if (dst_cpu == NULL) { 2589 int avoid_board = DRMACH_CPUID2BNUM(SIGBCPU->cpu_id); 2590 int max_cpuid = MAX_BOARDS * MAX_CPU_UNITS_PER_BOARD; 2591 2592 for (cpuid = 0; cpuid < max_cpuid; cpuid++) 2593 if (DRMACH_CPUID2BNUM(cpuid) != avoid_board) { 2594 cp = cpu_get(cpuid); 2595 if (cp != NULL && cpu_is_online(cp)) 2596 break; 2597 } 2598 2599 if (cpuid == max_cpuid) { 2600 err = drerr_new(1, ESTF_JUGGLE, NULL); 2601 return (err); 2602 } 2603 2604 /* else, cp points to the selected target cpu */ 2605 } else { 2606 cpuid = drmach_cpu_calc_id(dst_cpu); 2607 2608 if ((cp = cpu_get(cpuid)) == NULL) { 2609 err = drerr_new(1, ESTF_NODEV, "%s::%s", 2610 dst_cpu->bp->cm.name, dst_cpu->cm.name); 2611 return (err); 2612 } 2613 2614 if (cpuid == SIGBCPU->cpu_id) { 2615 cmn_err(CE_WARN, 2616 "SIGBCPU(%d) same as new selection(%d)", 2617 SIGBCPU->cpu_id, cpuid); 2618 2619 /* technically not an error, but a no-op */ 2620 return (NULL); 2621 } 2622 } 2623 2624 cmn_err(CE_NOTE, "?relocating SIGBCPU from %d to %d", 2625 SIGBCPU->cpu_id, cpuid); 2626 2627 DRMACH_PR("moving SIGBCPU to CPU %d\n", cpuid); 2628 2629 /* 2630 * Tell OBP to initialize cvc-offset field of new CPU0 2631 * so that it's in sync with OBP and cvc_server 2632 */ 2633 prom_starfire_init_console(cpuid); 2634 2635 /* 2636 * Assign cvc to new cpu0's bbsram for I/O. This has to be 2637 * done BEFORE cpu0 is moved via obp, since this logic 2638 * will cause obp_helper to switch to a different bbsram for 2639 * cvc I/O. We don't want cvc writing to a buffer from which 2640 * nobody will pick up the data! 2641 */ 2642 cvc_assign_iocpu(cpuid); 2643 2644 rv = prom_starfire_move_cpu0(cpuid); 2645 2646 if (rv == 0) { 2647 SIGBCPU = cp; 2648 2649 DRMACH_PR("successfully juggled to CPU %d\n", cpuid); 2650 return (NULL); 2651 } else { 2652 DRMACH_PR("prom error: prom_starfire_move_cpu0(%d) " 2653 "returned %d\n", cpuid, rv); 2654 2655 /* 2656 * The move failed, hopefully obp_helper is still back 2657 * at the old bootproc. Move cvc back there. 2658 */ 2659 cvc_assign_iocpu(SIGBCPU->cpu_id); 2660 2661 2662 err = drerr_new(1, ESTF_MOVESIGB, "CPU %d", cpuid); 2663 return (err); 2664 } 2665 /*NOTREACHED*/ 2666 } 2667 2668 static sbd_error_t * 2669 drmach_cpu_release(drmachid_t id) 2670 { 2671 drmach_device_t *dp; 2672 processorid_t cpuid; 2673 struct cpu *cp; 2674 sbd_error_t *err; 2675 2676 if (!DRMACH_IS_CPU_ID(id)) 2677 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2678 dp = id; 2679 cpuid = drmach_cpu_calc_id(dp); 2680 2681 ASSERT(MUTEX_HELD(&cpu_lock)); 2682 2683 cp = cpu_get(cpuid); 2684 if (cp == NULL) 2685 err = DRMACH_INTERNAL_ERROR(); 2686 else if (SIGBCPU->cpu_id == cp->cpu_id) 2687 err = drmach_cpu_juggle_bootproc(NULL); 2688 else 2689 err = NULL; 2690 2691 return (err); 2692 } 2693 2694 static sbd_error_t * 2695 drmach_cpu_status(drmachid_t id, drmach_status_t *stat) 2696 { 2697 drmach_device_t *dp; 2698 2699 ASSERT(DRMACH_IS_CPU_ID(id)); 2700 dp = id; 2701 2702 stat->assigned = dp->bp->assigned; 2703 stat->powered = dp->bp->powered; 2704 mutex_enter(&cpu_lock); 2705 stat->configured = (cpu_get(drmach_cpu_calc_id(dp)) != NULL); 2706 mutex_exit(&cpu_lock); 2707 stat->busy = dp->busy; 2708 (void) strncpy(stat->type, dp->type, sizeof (stat->type)); 2709 stat->info[0] = '\0'; 2710 2711 return (NULL); 2712 } 2713 2714 sbd_error_t * 2715 drmach_cpu_disconnect(drmachid_t id) 2716 { 2717 drmach_device_t *cpu; 2718 int cpuid; 2719 int ntries; 2720 int p; 2721 u_longlong_t pc_addr; 2722 uchar_t rvalue; 2723 2724 if (!DRMACH_IS_CPU_ID(id)) 2725 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2726 cpu = id; 2727 2728 cpuid = drmach_cpu_calc_id(cpu); 2729 if (SIGBCPU->cpu_id == cpuid) { 2730 /* this cpu is SIGBCPU, can't disconnect */ 2731 return (drerr_new(1, ESTF_HASSIGB, "%s::%s", 2732 cpu->bp->cm.name, cpu->cm.name)); 2733 } 2734 2735 /* 2736 * Make sure SIGBST_DETACHED is set before 2737 * mapping out the sig block. 2738 */ 2739 ntries = drmach_cpu_ntries; 2740 while (!drmach_cpu_obp_is_detached(cpuid) && ntries) { 2741 DELAY(drmach_cpu_delay); 2742 ntries--; 2743 } 2744 if (!drmach_cpu_obp_is_detached(cpuid)) { 2745 cmn_err(CE_WARN, "failed to mark cpu %d detached in sigblock", 2746 cpuid); 2747 } 2748 2749 /* map out signature block */ 2750 if (CPU_SGN_EXISTS(cpuid)) { 2751 CPU_SGN_MAPOUT(cpuid); 2752 } 2753 2754 /* 2755 * We now PC IDLE the processor to guarantee we 2756 * stop any transactions from coming from it. 2757 */ 2758 p = cpu->unum & 1; 2759 pc_addr = STARFIRE_BB_PC_ADDR(cpu->bp->bnum, cpu->unum, 0); 2760 2761 DRMACH_PR("PC idle cpu %d (addr = 0x%llx, port = %d, p = %d)", 2762 drmach_cpu_calc_id(cpu), pc_addr, cpu->unum, p); 2763 2764 rvalue = ldbphysio(pc_addr); 2765 rvalue |= STARFIRE_BB_PC_IDLE(p); 2766 stbphysio(pc_addr, rvalue); 2767 DELAY(50000); 2768 2769 return (NULL); 2770 } 2771 2772 sbd_error_t * 2773 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid) 2774 { 2775 drmach_device_t *cpu; 2776 2777 if (!DRMACH_IS_CPU_ID(id)) 2778 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2779 cpu = id; 2780 2781 *cpuid = drmach_cpu_calc_id(cpu); 2782 return (NULL); 2783 } 2784 2785 sbd_error_t * 2786 drmach_cpu_get_impl(drmachid_t id, int *ip) 2787 { 2788 drmach_device_t *cpu; 2789 int impl; 2790 2791 if (!DRMACH_IS_CPU_ID(id)) 2792 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2793 2794 cpu = id; 2795 2796 if (drmach_node_get_prop(cpu->node, "implementation#", &impl) == -1) { 2797 return (DRMACH_INTERNAL_ERROR()); 2798 } 2799 2800 *ip = impl; 2801 2802 return (NULL); 2803 } 2804 2805 void 2806 drmach_cpu_flush_ecache_sync(void) 2807 { 2808 ASSERT(curthread->t_bound_cpu == CPU); 2809 2810 /* 2811 * Now let's flush our ecache thereby removing all references 2812 * to the target (detaching) memory from all ecache's in 2813 * system. 2814 */ 2815 cpu_flush_ecache(); 2816 2817 /* 2818 * Delay 100 usec out of paranoia to insure everything 2819 * (hardware queues) has drained before we start reprogramming 2820 * the hardware. 2821 */ 2822 DELAY(100); 2823 } 2824 2825 sbd_error_t * 2826 drmach_get_dip(drmachid_t id, dev_info_t **dip) 2827 { 2828 drmach_device_t *dp; 2829 2830 if (!DRMACH_IS_DEVICE_ID(id)) 2831 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2832 dp = id; 2833 2834 *dip = drmach_node_get_dip(dp->node); 2835 return (NULL); 2836 } 2837 2838 sbd_error_t * 2839 drmach_io_is_attached(drmachid_t id, int *yes) 2840 { 2841 drmach_device_t *dp; 2842 dev_info_t *dip; 2843 int state; 2844 2845 if (!DRMACH_IS_IO_ID(id)) 2846 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2847 dp = id; 2848 2849 dip = drmach_node_get_dip(dp->node); 2850 if (dip == NULL) { 2851 *yes = 0; 2852 return (NULL); 2853 } 2854 2855 state = ddi_get_devstate(dip); 2856 *yes = (i_ddi_devi_attached(dip) || (state == DDI_DEVSTATE_UP)); 2857 2858 return (NULL); 2859 } 2860 2861 sbd_error_t * 2862 drmach_io_pre_release(drmachid_t id) 2863 { 2864 if (!DRMACH_IS_IO_ID(id)) 2865 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2866 return (NULL); 2867 } 2868 2869 static sbd_error_t * 2870 drmach_io_release(drmachid_t id) 2871 { 2872 if (!DRMACH_IS_IO_ID(id)) 2873 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2874 return (NULL); 2875 } 2876 2877 sbd_error_t * 2878 drmach_io_unrelease(drmachid_t id) 2879 { 2880 if (!DRMACH_IS_IO_ID(id)) 2881 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2882 return (NULL); 2883 } 2884 2885 /*ARGSUSED*/ 2886 sbd_error_t * 2887 drmach_io_post_release(drmachid_t id) 2888 { 2889 return (NULL); 2890 } 2891 2892 /*ARGSUSED*/ 2893 sbd_error_t * 2894 drmach_io_post_attach(drmachid_t id) 2895 { 2896 return (NULL); 2897 } 2898 2899 static sbd_error_t * 2900 drmach_io_status(drmachid_t id, drmach_status_t *stat) 2901 { 2902 drmach_device_t *dp; 2903 sbd_error_t *err; 2904 int configured; 2905 2906 ASSERT(DRMACH_IS_IO_ID(id)); 2907 dp = id; 2908 2909 err = drmach_io_is_attached(id, &configured); 2910 if (err) 2911 return (err); 2912 2913 stat->assigned = dp->bp->assigned; 2914 stat->powered = dp->bp->powered; 2915 stat->configured = (configured != 0); 2916 stat->busy = dp->busy; 2917 (void) strncpy(stat->type, dp->type, sizeof (stat->type)); 2918 stat->info[0] = '\0'; 2919 2920 return (NULL); 2921 } 2922 2923 static sbd_error_t * 2924 drmach_mem_new(drmach_device_t *dp) 2925 { 2926 dp->unum = 0; 2927 dp->cm.isa = (void *)drmach_mem_new; 2928 dp->cm.release = drmach_mem_release; 2929 dp->cm.status = drmach_mem_status; 2930 2931 (void) snprintf(dp->cm.name, sizeof (dp->cm.name), "%s", dp->type); 2932 2933 return (NULL); 2934 } 2935 2936 sbd_error_t * 2937 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size) 2938 { 2939 pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT); 2940 pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT); 2941 pda_handle_t ph; 2942 int rv; 2943 2944 ASSERT(size != 0); 2945 2946 if (!DRMACH_IS_MEM_ID(id)) 2947 return (drerr_new(0, ESTF_INAPPROP, NULL)); 2948 2949 rv = kcage_range_add(basepfn, npages, KCAGE_DOWN); 2950 if (rv == ENOMEM) { 2951 cmn_err(CE_WARN, "%lu megabytes not available to kernel cage", 2952 (ulong_t)(size == 0 ? 0 : size / MBYTE)); 2953 } else if (rv != 0) { 2954 /* catch this in debug kernels */ 2955 ASSERT(0); 2956 2957 cmn_err(CE_WARN, "unexpected kcage_range_add" 2958 " return value %d", rv); 2959 } 2960 2961 /* 2962 * Update the PDA (post2obp) structure with the 2963 * range of the newly added memory. 2964 */ 2965 ph = drmach_pda_open(); 2966 if (ph != NULL) { 2967 pda_mem_add_span(ph, basepa, size); 2968 pda_close(ph); 2969 } 2970 2971 return (NULL); 2972 } 2973 2974 sbd_error_t * 2975 drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size) 2976 { 2977 drmach_device_t *mem = id; 2978 pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT); 2979 pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT); 2980 uint_t mcreg; 2981 sbd_error_t *err; 2982 pda_handle_t ph; 2983 int rv; 2984 2985 err = drmach_read_mc_asr(id, &mcreg); 2986 if (err) 2987 return (err); 2988 else if (mcreg & STARFIRE_MC_INTERLEAVE_MASK) { 2989 return (drerr_new(1, ESTF_INTERBOARD, "%s::%s", 2990 mem->bp->cm.name, mem->cm.name)); 2991 } 2992 2993 if (size > 0) { 2994 rv = kcage_range_delete_post_mem_del(basepfn, npages); 2995 if (rv != 0) { 2996 cmn_err(CE_WARN, 2997 "unexpected kcage_range_delete_post_mem_del" 2998 " return value %d", rv); 2999 return (DRMACH_INTERNAL_ERROR()); 3000 } 3001 } 3002 3003 /* 3004 * Update the PDA (post2obp) structure with the 3005 * range of removed memory. 3006 */ 3007 ph = drmach_pda_open(); 3008 if (ph != NULL) { 3009 if (size > 0) 3010 pda_mem_del_span(ph, basepa, size); 3011 3012 /* update PDA to board's new mc register settings */ 3013 pda_mem_sync(ph, mem->bp->bnum, 0); 3014 3015 pda_close(ph); 3016 } 3017 3018 return (NULL); 3019 } 3020 3021 /* support routine for enable and disable */ 3022 static sbd_error_t * 3023 drmach_mem_update_interconnect(drmachid_t id, uint_t mcreg) 3024 { 3025 drmach_device_t *dp; 3026 pda_handle_t ph; 3027 int b; 3028 3029 if (!DRMACH_IS_MEM_ID(id)) 3030 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3031 dp = id; 3032 3033 ph = drmach_pda_open(); 3034 if (ph == NULL) 3035 return (DRMACH_INTERNAL_ERROR()); 3036 3037 for (b = 0; b < MAX_BOARDS; b++) { 3038 int p; 3039 int rv; 3040 ushort_t bda_proc, bda_ioc; 3041 board_desc_t *bdesc; 3042 3043 if (pda_board_present(ph, b) == 0) 3044 continue; 3045 3046 bdesc = (board_desc_t *)pda_get_board_info(ph, b); 3047 3048 /* 3049 * Update PCs for CPUs. 3050 */ 3051 3052 /* make sure definition in platmod is in sync with pda */ 3053 ASSERT(MAX_PROCMODS == MAX_CPU_UNITS_PER_BOARD); 3054 3055 bda_proc = bdesc->bda_proc; 3056 for (p = 0; p < MAX_PROCMODS; p++) { 3057 if (BDA_NBL(bda_proc, p) != BDAN_GOOD) 3058 continue; 3059 3060 rv = pc_madr_add(b, dp->bp->bnum, p, mcreg); 3061 if (rv) { 3062 pda_close(ph); 3063 return (DRMACH_INTERNAL_ERROR()); 3064 } 3065 } 3066 3067 /* 3068 * Update PCs for IOCs. 3069 */ 3070 3071 /* make sure definition in platmod is in sync with pda */ 3072 ASSERT(MAX_IOCS == MAX_IO_UNITS_PER_BOARD); 3073 3074 bda_ioc = bdesc->bda_ioc; 3075 for (p = 0; p < MAX_IOCS; p++) { 3076 if (BDA_NBL(bda_ioc, p) != BDAN_GOOD) 3077 continue; 3078 3079 rv = pc_madr_add(b, dp->bp->bnum, p + 4, mcreg); 3080 if (rv) { 3081 pda_close(ph); 3082 return (DRMACH_INTERNAL_ERROR()); 3083 } 3084 } 3085 } 3086 3087 pda_close(ph); 3088 return (NULL); 3089 } 3090 3091 sbd_error_t * 3092 drmach_mem_disable(drmachid_t id) 3093 { 3094 sbd_error_t *err; 3095 uint_t mcreg; 3096 3097 err = drmach_read_mc_asr(id, &mcreg); 3098 if (err == NULL) { 3099 ASSERT(mcreg & STARFIRE_MC_MEM_PRESENT_MASK); 3100 3101 /* Turn off presence bit. */ 3102 mcreg &= ~STARFIRE_MC_MEM_PRESENT_MASK; 3103 3104 err = drmach_mem_update_interconnect(id, mcreg); 3105 if (err == NULL) 3106 err = drmach_write_mc_asr(id, mcreg); 3107 } 3108 3109 return (err); 3110 } 3111 3112 sbd_error_t * 3113 drmach_mem_enable(drmachid_t id) 3114 { 3115 sbd_error_t *err; 3116 uint_t mcreg; 3117 3118 err = drmach_read_mc_asr(id, &mcreg); 3119 if (err == NULL) { 3120 mcreg |= STARFIRE_MC_MEM_PRESENT_MASK; 3121 3122 err = drmach_write_mc_asr(id, mcreg); 3123 if (err == NULL) 3124 err = drmach_mem_update_interconnect(id, mcreg); 3125 } 3126 3127 return (err); 3128 } 3129 3130 sbd_error_t * 3131 drmach_mem_get_alignment(drmachid_t id, uint64_t *mask) 3132 { 3133 drmach_device_t *mem; 3134 sbd_error_t *err; 3135 pnode_t nodeid; 3136 3137 if (!DRMACH_IS_MEM_ID(id)) 3138 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3139 mem = id; 3140 3141 nodeid = drmach_node_get_dnode(mem->node); 3142 if (nodeid == OBP_NONODE || nodeid == OBP_BADNODE) 3143 err = DRMACH_INTERNAL_ERROR(); 3144 else { 3145 uint64_t size; 3146 3147 size = mc_get_alignment_mask(nodeid); 3148 if (size == (uint64_t)-1) 3149 err = DRMACH_INTERNAL_ERROR(); 3150 else { 3151 *mask = size - 1; 3152 err = NULL; 3153 } 3154 } 3155 3156 return (err); 3157 } 3158 3159 sbd_error_t * 3160 drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *pa) 3161 { 3162 sbd_error_t *err; 3163 uint_t mcreg; 3164 3165 err = drmach_read_mc_asr(id, &mcreg); 3166 if (err == NULL) 3167 *pa = mc_asr_to_pa(mcreg); 3168 3169 return (err); 3170 } 3171 3172 /* 3173 * Use of this routine after copy/rename will yield incorrect results, 3174 * because the OBP MEMAVAIL property will not correctly reflect the 3175 * programming of the MCs. 3176 */ 3177 sbd_error_t * 3178 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml) 3179 { 3180 drmach_device_t *mem; 3181 int rv, i, rlen, rblks; 3182 sbd_error_t *err; 3183 struct memlist *mlist; 3184 struct sf_memunit_regspec *rlist; 3185 3186 if (!DRMACH_IS_MEM_ID(id)) 3187 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3188 mem = id; 3189 3190 err = drmach_device_get_proplen(mem, "dr-available", &rlen); 3191 if (err) 3192 return (err); 3193 3194 rlist = kmem_zalloc(rlen, KM_SLEEP); 3195 3196 err = drmach_device_get_prop(mem, "dr-available", rlist); 3197 if (err) { 3198 kmem_free(rlist, rlen); 3199 return (err); 3200 } 3201 3202 mlist = NULL; 3203 rblks = rlen / sizeof (struct sf_memunit_regspec); 3204 for (i = 0; i < rblks; i++) { 3205 uint64_t addr, size; 3206 3207 addr = (uint64_t)rlist[i].regspec_addr_hi << 32; 3208 addr |= (uint64_t)rlist[i].regspec_addr_lo; 3209 size = (uint64_t)rlist[i].regspec_size_hi << 32; 3210 size |= (uint64_t)rlist[i].regspec_size_lo; 3211 3212 mlist = memlist_add_span(mlist, addr, size); 3213 } 3214 3215 kmem_free(rlist, rlen); 3216 3217 /* 3218 * Make sure the incoming memlist doesn't already 3219 * intersect with what's present in the system (phys_install). 3220 */ 3221 memlist_read_lock(); 3222 rv = memlist_intersect(phys_install, mlist); 3223 memlist_read_unlock(); 3224 if (rv) { 3225 #ifdef DEBUG 3226 DRMACH_PR("OBP derived memlist intersects" 3227 " with phys_install\n"); 3228 memlist_dump(mlist); 3229 3230 DRMACH_PR("phys_install memlist:\n"); 3231 memlist_dump(phys_install); 3232 #endif 3233 3234 memlist_delete(mlist); 3235 return (DRMACH_INTERNAL_ERROR()); 3236 } 3237 3238 #ifdef DEBUG 3239 DRMACH_PR("OBP derived memlist:"); 3240 memlist_dump(mlist); 3241 #endif 3242 3243 *ml = mlist; 3244 return (NULL); 3245 } 3246 3247 sbd_error_t * 3248 drmach_mem_get_size(drmachid_t id, uint64_t *bytes) 3249 { 3250 drmach_device_t *mem; 3251 pda_handle_t ph; 3252 pgcnt_t npages; 3253 3254 if (!DRMACH_IS_MEM_ID(id)) 3255 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3256 mem = id; 3257 3258 ph = drmach_pda_open(); 3259 if (ph == NULL) 3260 return (DRMACH_INTERNAL_ERROR()); 3261 3262 npages = pda_get_mem_size(ph, mem->bp->bnum); 3263 *bytes = (uint64_t)npages << PAGESHIFT; 3264 3265 pda_close(ph); 3266 return (NULL); 3267 } 3268 3269 sbd_error_t * 3270 drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes) 3271 { 3272 if (!DRMACH_IS_MEM_ID(id)) 3273 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3274 3275 *bytes = mc_get_mem_alignment(); 3276 return (NULL); 3277 } 3278 3279 /* field debugging tool */ 3280 processorid_t drmach_mem_cpu_affinity_nail = 0; 3281 3282 processorid_t 3283 drmach_mem_cpu_affinity(drmachid_t id) 3284 { 3285 drmach_device_t *mp; 3286 drmach_board_t *bp; 3287 processorid_t cpuid; 3288 3289 if (!DRMACH_IS_MEM_ID(id)) 3290 return (CPU_CURRENT); 3291 3292 if (drmach_mem_cpu_affinity_nail) { 3293 cpuid = drmach_mem_cpu_affinity_nail; 3294 3295 if (cpuid < 0 || cpuid > NCPU) 3296 return (CPU_CURRENT); 3297 3298 mutex_enter(&cpu_lock); 3299 if (cpu[cpuid] == NULL || !CPU_ACTIVE(cpu[cpuid])) 3300 cpuid = CPU_CURRENT; 3301 mutex_exit(&cpu_lock); 3302 3303 return (cpuid); 3304 } 3305 3306 /* try to choose a proc on the target board */ 3307 mp = id; 3308 bp = mp->bp; 3309 if (bp->devices) { 3310 int rv; 3311 int d_idx; 3312 drmachid_t d_id; 3313 3314 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 3315 while (rv == 0) { 3316 if (DRMACH_IS_CPU_ID(d_id)) { 3317 cpuid = drmach_cpu_calc_id(d_id); 3318 3319 mutex_enter(&cpu_lock); 3320 if (cpu[cpuid] && CPU_ACTIVE(cpu[cpuid])) { 3321 mutex_exit(&cpu_lock); 3322 DRMACH_PR("drmach_mem_cpu_affinity: " 3323 "selected cpuid=%d\n", cpuid); 3324 return (cpuid); 3325 } else { 3326 mutex_exit(&cpu_lock); 3327 } 3328 } 3329 3330 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 3331 } 3332 } 3333 3334 /* otherwise, this proc, wherever it is */ 3335 DRMACH_PR("drmach_mem_cpu_affinity: using default CPU_CURRENT\n"); 3336 3337 return (CPU_CURRENT); 3338 } 3339 3340 static sbd_error_t * 3341 drmach_mem_release(drmachid_t id) 3342 { 3343 if (!DRMACH_IS_MEM_ID(id)) 3344 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3345 return (NULL); 3346 } 3347 3348 static sbd_error_t * 3349 drmach_mem_status(drmachid_t id, drmach_status_t *stat) 3350 { 3351 drmach_device_t *dp; 3352 sbd_error_t *err; 3353 uint64_t pa, slice_size; 3354 struct memlist *ml; 3355 3356 ASSERT(DRMACH_IS_MEM_ID(id)); 3357 dp = id; 3358 3359 /* get starting physical address of target memory */ 3360 err = drmach_mem_get_base_physaddr(id, &pa); 3361 if (err) 3362 return (err); 3363 3364 /* round down to slice boundary */ 3365 slice_size = mc_get_mem_alignment(); 3366 pa &= ~ (slice_size - 1); 3367 3368 /* stop at first span that is in slice */ 3369 memlist_read_lock(); 3370 for (ml = phys_install; ml; ml = ml->ml_next) 3371 if (ml->ml_address >= pa && ml->ml_address < pa + slice_size) 3372 break; 3373 memlist_read_unlock(); 3374 3375 stat->assigned = dp->bp->assigned; 3376 stat->powered = dp->bp->powered; 3377 stat->configured = (ml != NULL); 3378 stat->busy = dp->busy; 3379 (void) strncpy(stat->type, dp->type, sizeof (stat->type)); 3380 stat->info[0] = '\0'; 3381 3382 return (NULL); 3383 } 3384 3385 static int 3386 drmach_detach_board(void *arg) 3387 { 3388 cpuset_t cset; 3389 int retval; 3390 drmach_board_t *bp = (drmach_board_t *)arg; 3391 3392 cset = cpu_ready_set; 3393 promsafe_xc_attention(cset); 3394 3395 retval = prom_starfire_rm_brd(bp->bnum); 3396 3397 xc_dismissed(cset); 3398 3399 return (retval); 3400 } 3401 3402 sbd_error_t * 3403 drmach_board_deprobe(drmachid_t id) 3404 { 3405 drmach_board_t *bp; 3406 int retval; 3407 3408 if (!DRMACH_IS_BOARD_ID(id)) 3409 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3410 bp = id; 3411 3412 cmn_err(CE_CONT, "DR: PROM detach board %d\n", bp->bnum); 3413 3414 retval = prom_tree_update(drmach_detach_board, bp); 3415 3416 if (retval == 0) 3417 return (NULL); 3418 else { 3419 cmn_err(CE_WARN, "prom error: prom_starfire_rm_brd(%d) " 3420 "returned %d", bp->bnum, retval); 3421 return (drerr_new(1, ESTF_DEPROBE, "%s", bp->cm.name)); 3422 } 3423 } 3424 3425 /*ARGSUSED*/ 3426 static sbd_error_t * 3427 drmach_pt_juggle_bootproc(drmachid_t id, drmach_opts_t *opts) 3428 { 3429 drmach_device_t *cpu; 3430 sbd_error_t *err; 3431 3432 if (!DRMACH_IS_CPU_ID(id)) 3433 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3434 cpu = id; 3435 3436 mutex_enter(&cpu_lock); 3437 3438 err = drmach_cpu_juggle_bootproc(cpu); 3439 3440 mutex_exit(&cpu_lock); 3441 3442 return (err); 3443 } 3444 3445 /*ARGSUSED*/ 3446 static sbd_error_t * 3447 drmach_pt_dump_pdainfo(drmachid_t id, drmach_opts_t *opts) 3448 { 3449 drmach_board_t *bp; 3450 int board; 3451 int i; 3452 pda_handle_t ph; 3453 board_desc_t *bdesc; 3454 3455 if (!DRMACH_IS_BOARD_ID(id)) 3456 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3457 bp = id; 3458 board = bp->bnum; 3459 3460 ph = drmach_pda_open(); 3461 if (ph == NULL) 3462 return (DRMACH_INTERNAL_ERROR()); 3463 3464 if (pda_board_present(ph, board) == 0) { 3465 cmn_err(CE_CONT, "board %d is MISSING\n", board); 3466 pda_close(ph); 3467 return (DRMACH_INTERNAL_ERROR()); 3468 } 3469 3470 cmn_err(CE_CONT, "board %d is PRESENT\n", board); 3471 3472 bdesc = (board_desc_t *)pda_get_board_info(ph, board); 3473 if (bdesc == NULL) { 3474 cmn_err(CE_CONT, 3475 "no board descriptor found for board %d\n", 3476 board); 3477 pda_close(ph); 3478 return (DRMACH_INTERNAL_ERROR()); 3479 } 3480 3481 /* make sure definition in platmod is in sync with pda */ 3482 ASSERT(MAX_PROCMODS == MAX_CPU_UNITS_PER_BOARD); 3483 3484 for (i = 0; i < MAX_PROCMODS; i++) { 3485 if (BDA_NBL(bdesc->bda_proc, i) == BDAN_GOOD) 3486 cmn_err(CE_CONT, 3487 "proc %d.%d PRESENT\n", board, i); 3488 else 3489 cmn_err(CE_CONT, 3490 "proc %d.%d MISSING\n", board, i); 3491 } 3492 3493 for (i = 0; i < MAX_MGROUPS; i++) { 3494 if (BDA_NBL(bdesc->bda_mgroup, i) == BDAN_GOOD) 3495 cmn_err(CE_CONT, 3496 "mgroup %d.%d PRESENT\n", board, i); 3497 else 3498 cmn_err(CE_CONT, 3499 "mgroup %d.%d MISSING\n", board, i); 3500 } 3501 3502 /* make sure definition in platmod is in sync with pda */ 3503 ASSERT(MAX_IOCS == MAX_IO_UNITS_PER_BOARD); 3504 3505 for (i = 0; i < MAX_IOCS; i++) { 3506 int s; 3507 3508 if (BDA_NBL(bdesc->bda_ioc, i) == BDAN_GOOD) { 3509 cmn_err(CE_CONT, 3510 "ioc %d.%d PRESENT\n", board, i); 3511 for (s = 0; s < MAX_SLOTS_PER_IOC; s++) { 3512 if (BDA_NBL(bdesc->bda_ios[i], s) != BDAN_GOOD) 3513 continue; 3514 cmn_err(CE_CONT, 3515 "..scard %d.%d.%d PRESENT\n", 3516 board, i, s); 3517 } 3518 } else { 3519 cmn_err(CE_CONT, 3520 "ioc %d.%d MISSING\n", 3521 board, i); 3522 } 3523 } 3524 3525 cmn_err(CE_CONT, 3526 "board %d memsize = %d pages\n", 3527 board, pda_get_mem_size(ph, board)); 3528 3529 pda_close(ph); 3530 3531 return (NULL); 3532 } 3533 3534 /*ARGSUSED*/ 3535 sbd_error_t * 3536 drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts) 3537 { 3538 struct memlist *ml; 3539 uint64_t src_pa; 3540 uint64_t dst_pa; 3541 uint64_t dst; 3542 3543 dst_pa = va_to_pa(&dst); 3544 3545 memlist_read_lock(); 3546 for (ml = phys_install; ml; ml = ml->ml_next) { 3547 uint64_t nbytes; 3548 3549 src_pa = ml->ml_address; 3550 nbytes = ml->ml_size; 3551 3552 while (nbytes != 0ull) { 3553 3554 /* copy 32 bytes at arc_pa to dst_pa */ 3555 bcopy32_il(src_pa, dst_pa); 3556 3557 /* increment by 32 bytes */ 3558 src_pa += (4 * sizeof (uint64_t)); 3559 3560 /* decrement by 32 bytes */ 3561 nbytes -= (4 * sizeof (uint64_t)); 3562 } 3563 } 3564 memlist_read_unlock(); 3565 3566 return (NULL); 3567 } 3568 3569 static struct { 3570 const char *name; 3571 sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts); 3572 } drmach_pt_arr[] = { 3573 { "juggle", drmach_pt_juggle_bootproc }, 3574 { "pda", drmach_pt_dump_pdainfo }, 3575 { "readmem", drmach_pt_readmem }, 3576 3577 /* the following line must always be last */ 3578 { NULL, NULL } 3579 }; 3580 3581 /*ARGSUSED*/ 3582 sbd_error_t * 3583 drmach_passthru(drmachid_t id, drmach_opts_t *opts) 3584 { 3585 int i; 3586 sbd_error_t *err; 3587 3588 i = 0; 3589 while (drmach_pt_arr[i].name != NULL) { 3590 int len = strlen(drmach_pt_arr[i].name); 3591 3592 if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0) 3593 break; 3594 3595 i += 1; 3596 } 3597 3598 if (drmach_pt_arr[i].name == NULL) 3599 err = drerr_new(0, ESTF_UNKPTCMD, opts->copts); 3600 else 3601 err = (*drmach_pt_arr[i].handler)(id, opts); 3602 3603 return (err); 3604 } 3605 3606 sbd_error_t * 3607 drmach_release(drmachid_t id) 3608 { 3609 drmach_common_t *cp; 3610 if (!DRMACH_IS_DEVICE_ID(id)) 3611 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3612 cp = id; 3613 3614 return (cp->release(id)); 3615 } 3616 3617 sbd_error_t * 3618 drmach_status(drmachid_t id, drmach_status_t *stat) 3619 { 3620 drmach_common_t *cp; 3621 3622 if (!DRMACH_IS_ID(id)) 3623 return (drerr_new(0, ESTF_NOTID, NULL)); 3624 cp = id; 3625 3626 return (cp->status(id, stat)); 3627 } 3628 3629 sbd_error_t * 3630 drmach_unconfigure(drmachid_t id, int flags) 3631 { 3632 drmach_device_t *dp; 3633 pnode_t nodeid; 3634 dev_info_t *dip, *fdip = NULL; 3635 3636 if (!DRMACH_IS_DEVICE_ID(id)) 3637 return (drerr_new(0, ESTF_INAPPROP, NULL)); 3638 3639 dp = id; 3640 3641 nodeid = drmach_node_get_dnode(dp->node); 3642 if (nodeid == OBP_NONODE) 3643 return (DRMACH_INTERNAL_ERROR()); 3644 3645 dip = e_ddi_nodeid_to_dip(nodeid); 3646 if (dip == NULL) 3647 return (NULL); 3648 3649 /* 3650 * Branch already held, so hold acquired in 3651 * e_ddi_nodeid_to_dip() can be released 3652 */ 3653 ddi_release_devi(dip); 3654 3655 if (flags & DEVI_BRANCH_DESTROY) 3656 flags |= DEVI_BRANCH_EVENT; 3657 3658 /* 3659 * Force flag is no longer necessary. See starcat/io/drmach.c 3660 * for details. 3661 */ 3662 ASSERT(e_ddi_branch_held(dip)); 3663 if (e_ddi_branch_unconfigure(dip, &fdip, flags)) { 3664 sbd_error_t *err; 3665 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3666 3667 /* 3668 * If non-NULL, fdip is returned held and must be released. 3669 */ 3670 if (fdip != NULL) { 3671 (void) ddi_pathname(fdip, path); 3672 ndi_rele_devi(fdip); 3673 } else { 3674 (void) ddi_pathname(dip, path); 3675 } 3676 3677 err = drerr_new(1, ESTF_DRVFAIL, path); 3678 3679 kmem_free(path, MAXPATHLEN); 3680 3681 return (err); 3682 } 3683 3684 return (NULL); 3685 } 3686 3687 /* 3688 * drmach interfaces to legacy Starfire platmod logic 3689 * linkage via runtime symbol look up, called from plat_cpu_power* 3690 */ 3691 3692 /* 3693 * Start up a cpu. It is possible that we're attempting to restart 3694 * the cpu after an UNCONFIGURE in which case the cpu will be 3695 * spinning in its cache. So, all we have to do is wakeup him up. 3696 * Under normal circumstances the cpu will be coming from a previous 3697 * CONNECT and thus will be spinning in OBP. In both cases, the 3698 * startup sequence is the same. 3699 */ 3700 int 3701 drmach_cpu_poweron(struct cpu *cp) 3702 { 3703 DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id); 3704 3705 ASSERT(MUTEX_HELD(&cpu_lock)); 3706 3707 if (drmach_cpu_start(cp) != 0) 3708 return (EBUSY); 3709 else 3710 return (0); 3711 } 3712 3713 int 3714 drmach_cpu_poweroff(struct cpu *cp) 3715 { 3716 int ntries, cnt; 3717 processorid_t cpuid = cp->cpu_id; 3718 void drmach_cpu_shutdown_self(void); 3719 3720 DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id); 3721 3722 ASSERT(MUTEX_HELD(&cpu_lock)); 3723 3724 /* 3725 * Capture all CPUs (except for detaching proc) to prevent 3726 * crosscalls to the detaching proc until it has cleared its 3727 * bit in cpu_ready_set. 3728 * 3729 * The CPU's remain paused and the prom_mutex is known to be free. 3730 * This prevents the x-trap victim from blocking when doing prom 3731 * IEEE-1275 calls at a high PIL level. 3732 */ 3733 promsafe_pause_cpus(); 3734 3735 /* 3736 * Quiesce interrupts on the target CPU. We do this by setting 3737 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to 3738 * prevent it from receiving cross calls and cross traps. 3739 * This prevents the processor from receiving any new soft interrupts. 3740 */ 3741 mp_cpu_quiesce(cp); 3742 3743 /* setup xt_mb, will be cleared by drmach_shutdown_asm when ready */ 3744 drmach_xt_mb[cpuid] = 0x80; 3745 3746 xt_one_unchecked(cpuid, (xcfunc_t *)idle_stop_xcall, 3747 (uint64_t)drmach_cpu_shutdown_self, NULL); 3748 3749 ntries = drmach_cpu_ntries; 3750 cnt = 0; 3751 while (drmach_xt_mb[cpuid] && ntries) { 3752 DELAY(drmach_cpu_delay); 3753 ntries--; 3754 cnt++; 3755 } 3756 3757 drmach_xt_mb[cpuid] = 0; /* steal the cache line back */ 3758 3759 start_cpus(); 3760 3761 DRMACH_PR("waited %d out of %d tries for " 3762 "drmach_cpu_shutdown_self on cpu%d", 3763 drmach_cpu_ntries - ntries, drmach_cpu_ntries, cp->cpu_id); 3764 3765 drmach_cpu_obp_detach(cpuid); 3766 3767 CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid); 3768 3769 return (0); 3770 } 3771 3772 /*ARGSUSED*/ 3773 int 3774 drmach_verify_sr(dev_info_t *dip, int sflag) 3775 { 3776 return (0); 3777 } 3778 3779 void 3780 drmach_suspend_last(void) 3781 { 3782 } 3783 3784 void 3785 drmach_resume_first(void) 3786 { 3787 } 3788 3789 /* 3790 * Log a DR sysevent. 3791 * Return value: 0 success, non-zero failure. 3792 */ 3793 int 3794 drmach_log_sysevent(int board, char *hint, int flag, int verbose) 3795 { 3796 sysevent_t *ev; 3797 sysevent_id_t eid; 3798 int rv, km_flag; 3799 sysevent_value_t evnt_val; 3800 sysevent_attr_list_t *evnt_attr_list = NULL; 3801 char attach_pnt[MAXNAMELEN]; 3802 3803 km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 3804 attach_pnt[0] = '\0'; 3805 if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) { 3806 rv = -1; 3807 goto logexit; 3808 } 3809 if (verbose) 3810 DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n", 3811 attach_pnt, hint, flag, verbose); 3812 3813 if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE, 3814 SUNW_KERN_PUB"dr", km_flag)) == NULL) { 3815 rv = -2; 3816 goto logexit; 3817 } 3818 evnt_val.value_type = SE_DATA_TYPE_STRING; 3819 evnt_val.value.sv_string = attach_pnt; 3820 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, 3821 &evnt_val, km_flag)) != 0) 3822 goto logexit; 3823 3824 evnt_val.value_type = SE_DATA_TYPE_STRING; 3825 evnt_val.value.sv_string = hint; 3826 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, 3827 &evnt_val, km_flag)) != 0) { 3828 sysevent_free_attr(evnt_attr_list); 3829 goto logexit; 3830 } 3831 3832 (void) sysevent_attach_attributes(ev, evnt_attr_list); 3833 3834 /* 3835 * Log the event but do not sleep waiting for its 3836 * delivery. This provides insulation from syseventd. 3837 */ 3838 rv = log_sysevent(ev, SE_NOSLEEP, &eid); 3839 3840 logexit: 3841 if (ev) 3842 sysevent_free(ev); 3843 if ((rv != 0) && verbose) 3844 cmn_err(CE_WARN, 3845 "drmach_log_sysevent failed (rv %d) for %s %s\n", 3846 rv, attach_pnt, hint); 3847 3848 return (rv); 3849 } 3850 3851 /*ARGSUSED*/ 3852 int 3853 drmach_allow_memrange_modify(drmachid_t id) 3854 { 3855 return (1); /* TRUE */ 3856 } 3857