1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/note.h> 28 #include <sys/debug.h> 29 #include <sys/types.h> 30 #include <sys/varargs.h> 31 #include <sys/errno.h> 32 #include <sys/cred.h> 33 #include <sys/dditypes.h> 34 #include <sys/devops.h> 35 #include <sys/modctl.h> 36 #include <sys/poll.h> 37 #include <sys/conf.h> 38 #include <sys/ddi.h> 39 #include <sys/sunddi.h> 40 #include <sys/sunndi.h> 41 #include <sys/ndi_impldefs.h> 42 #include <sys/stat.h> 43 #include <sys/kmem.h> 44 #include <sys/vmem.h> 45 #include <sys/disp.h> 46 #include <sys/processor.h> 47 #include <sys/cheetahregs.h> 48 #include <sys/cpuvar.h> 49 #include <sys/mem_config.h> 50 #include <sys/ddi_impldefs.h> 51 #include <sys/systm.h> 52 #include <sys/machsystm.h> 53 #include <sys/autoconf.h> 54 #include <sys/cmn_err.h> 55 #include <sys/sysmacros.h> 56 #include <sys/x_call.h> 57 #include <sys/promif.h> 58 #include <sys/prom_plat.h> 59 #include <sys/membar.h> 60 #include <vm/seg_kmem.h> 61 #include <sys/mem_cage.h> 62 #include <sys/stack.h> 63 #include <sys/archsystm.h> 64 #include <vm/hat_sfmmu.h> 65 #include <sys/pte.h> 66 #include <sys/mmu.h> 67 #include <sys/cpu_module.h> 68 #include <sys/obpdefs.h> 69 #include <sys/mboxsc.h> 70 #include <sys/plat_ecc_dimm.h> 71 72 #include <sys/hotplug/hpctrl.h> /* XXX should be included by schpc.h */ 73 #include <sys/schpc.h> 74 #include <sys/pci.h> 75 76 #include <sys/starcat.h> 77 #include <sys/cpu_sgnblk_defs.h> 78 #include <sys/drmach.h> 79 #include <sys/dr_util.h> 80 #include <sys/dr_mbx.h> 81 #include <sys/sc_gptwocfg.h> 82 #include <sys/iosramreg.h> 83 #include <sys/iosramio.h> 84 #include <sys/iosramvar.h> 85 #include <sys/axq.h> 86 #include <sys/post/scat_dcd.h> 87 #include <sys/kobj.h> 88 #include <sys/taskq.h> 89 #include <sys/cmp.h> 90 #include <sys/sbd_ioctl.h> 91 92 #include <sys/sysevent.h> 93 #include <sys/sysevent/dr.h> 94 #include <sys/sysevent/eventdefs.h> 95 96 #include <sys/pci/pcisch.h> 97 #include <sys/pci/pci_regs.h> 98 99 #include <sys/ontrap.h> 100 101 /* defined in ../ml/drmach.il.cpp */ 102 extern void bcopy32_il(uint64_t, uint64_t); 103 extern void flush_ecache_il(int64_t physaddr, int size, int linesz); 104 extern void flush_dcache_il(void); 105 extern void flush_icache_il(void); 106 extern void flush_pcache_il(void); 107 108 /* defined in ../ml/drmach_asm.s */ 109 extern uint64_t lddmcdecode(uint64_t physaddr); 110 extern uint64_t lddsafconfig(void); 111 112 /* XXX here until provided by sys/dman.h */ 113 extern int man_dr_attach(dev_info_t *); 114 extern int man_dr_detach(dev_info_t *); 115 116 #define DRMACH_BNUM2EXP(bnum) ((bnum) >> 1) 117 #define DRMACH_BNUM2SLOT(bnum) ((bnum) & 1) 118 #define DRMACH_EXPSLOT2BNUM(exp, slot) (((exp) << 1) + (slot)) 119 120 #define DRMACH_SLICE_MASK 0x1Full 121 #define DRMACH_SLICE_TO_PA(s) (((s) & DRMACH_SLICE_MASK) << 37) 122 #define DRMACH_PA_TO_SLICE(a) (((a) >> 37) & DRMACH_SLICE_MASK) 123 124 /* 125 * DRMACH_MEM_SLICE_SIZE and DRMACH_MEM_USABLE_SLICE_SIZE define the 126 * available address space and the usable address space for every slice. 127 * There must be a distinction between the available and usable do to a 128 * restriction imposed by CDC memory size. 129 */ 130 131 #define DRMACH_MEM_SLICE_SIZE (1ull << 37) /* 128GB */ 132 #define DRMACH_MEM_USABLE_SLICE_SIZE (1ull << 36) /* 64GB */ 133 134 #define DRMACH_MC_NBANKS 4 135 136 #define DRMACH_MC_ADDR(mp, bank) ((mp)->madr_pa + 16 + 8 * (bank)) 137 #define DRMACH_MC_ASI_ADDR(mp, bank) (DRMACH_MC_ADDR(mp, bank) & 0xFF) 138 139 #define DRMACH_EMU_ACT_STATUS_OFFSET 0x50 140 #define DRMACH_EMU_ACT_STATUS_ADDR(mp) \ 141 ((mp)->madr_pa + DRMACH_EMU_ACT_STATUS_OFFSET) 142 143 /* 144 * The Cheetah's Safari Configuration Register and the Schizo's 145 * Safari Control/Status Register place the LPA base and bound fields in 146 * same bit locations with in their register word. This source code takes 147 * advantage of this by defining only one set of LPA encoding/decoding macros 148 * which are shared by various Cheetah and Schizo drmach routines. 149 */ 150 #define DRMACH_LPA_BASE_MASK (0x3Full << 3) 151 #define DRMACH_LPA_BND_MASK (0x3Full << 9) 152 153 #define DRMACH_LPA_BASE_TO_PA(scr) (((scr) & DRMACH_LPA_BASE_MASK) << 34) 154 #define DRMACH_LPA_BND_TO_PA(scr) (((scr) & DRMACH_LPA_BND_MASK) << 28) 155 #define DRMACH_PA_TO_LPA_BASE(pa) (((pa) >> 34) & DRMACH_LPA_BASE_MASK) 156 #define DRMACH_PA_TO_LPA_BND(pa) (((pa) >> 28) & DRMACH_LPA_BND_MASK) 157 158 #define DRMACH_L1_SET_LPA(b) \ 159 (((b)->flags & DRMACH_NULL_PROC_LPA) == 0) 160 161 #define DRMACH_CPU_SRAM_ADDR 0x7fff0900000ull 162 #define DRMACH_CPU_SRAM_SIZE 0x20000ull 163 164 /* 165 * Name properties for frequently accessed device nodes. 166 */ 167 #define DRMACH_CPU_NAMEPROP "cpu" 168 #define DRMACH_CMP_NAMEPROP "cmp" 169 #define DRMACH_AXQ_NAMEPROP "address-extender-queue" 170 #define DRMACH_PCI_NAMEPROP "pci" 171 172 /* 173 * Maximum value of processor Safari Timeout Log (TOL) field of 174 * Safari Config reg (7 secs). 175 */ 176 #define DRMACH_SAF_TOL_MAX 7 * 1000000 177 178 /* 179 * drmach_board_t flag definitions 180 */ 181 #define DRMACH_NULL_PROC_LPA 0x1 182 183 typedef struct { 184 uint32_t reg_addr_hi; 185 uint32_t reg_addr_lo; 186 uint32_t reg_size_hi; 187 uint32_t reg_size_lo; 188 } drmach_reg_t; 189 190 typedef struct { 191 struct drmach_node *node; 192 void *data; 193 } drmach_node_walk_args_t; 194 195 typedef struct drmach_node { 196 void *here; 197 198 pnode_t (*get_dnode)(struct drmach_node *node); 199 int (*walk)(struct drmach_node *node, void *data, 200 int (*cb)(drmach_node_walk_args_t *args)); 201 dev_info_t *(*n_getdip)(struct drmach_node *node); 202 int (*n_getproplen)(struct drmach_node *node, char *name, 203 int *len); 204 int (*n_getprop)(struct drmach_node *node, char *name, 205 void *buf, int len); 206 int (*get_parent)(struct drmach_node *node, 207 struct drmach_node *pnode); 208 } drmach_node_t; 209 210 typedef struct { 211 int min_index; 212 int max_index; 213 int arr_sz; 214 drmachid_t *arr; 215 } drmach_array_t; 216 217 typedef struct { 218 void *isa; 219 220 void (*dispose)(drmachid_t); 221 sbd_error_t *(*release)(drmachid_t); 222 sbd_error_t *(*status)(drmachid_t, drmach_status_t *); 223 224 char name[MAXNAMELEN]; 225 } drmach_common_t; 226 227 struct drmach_board; 228 typedef struct drmach_board drmach_board_t; 229 230 typedef struct { 231 drmach_common_t cm; 232 const char *type; 233 drmach_board_t *bp; 234 drmach_node_t *node; 235 int portid; 236 int unum; 237 int busy; 238 int powered; 239 } drmach_device_t; 240 241 typedef struct drmach_cpu { 242 drmach_device_t dev; 243 uint64_t scr_pa; 244 processorid_t cpuid; 245 int coreid; 246 } drmach_cpu_t; 247 248 typedef struct drmach_mem { 249 drmach_device_t dev; 250 struct drmach_mem *next; 251 uint64_t nbytes; 252 uint64_t madr_pa; 253 } drmach_mem_t; 254 255 typedef struct drmach_io { 256 drmach_device_t dev; 257 uint64_t scsr_pa; /* PA of Schizo Control/Status Register */ 258 } drmach_io_t; 259 260 struct drmach_board { 261 drmach_common_t cm; 262 int bnum; 263 int assigned; 264 int powered; 265 int connected; 266 int empty; 267 int cond; 268 uint_t cpu_impl; 269 uint_t flags; 270 drmach_node_t *tree; 271 drmach_array_t *devices; 272 drmach_mem_t *mem; 273 uint64_t stardrb_offset; 274 char type[BD_TYPELEN]; 275 }; 276 277 typedef struct { 278 int flags; 279 drmach_device_t *dp; 280 sbd_error_t *err; 281 dev_info_t *fdip; 282 } drmach_config_args_t; 283 284 typedef struct { 285 drmach_board_t *obj; 286 int ndevs; 287 void *a; 288 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t); 289 sbd_error_t *err; 290 } drmach_board_cb_data_t; 291 292 typedef struct drmach_casmslot { 293 int valid; 294 int slice; 295 } drmach_casmslot_t; 296 297 typedef enum { 298 DRMACH_CR_OK, 299 DRMACH_CR_MC_IDLE_ERR, 300 DRMACH_CR_IOPAUSE_ERR, 301 DRMACH_CR_ONTRAP_ERR 302 } drmach_cr_err_t; 303 304 typedef struct { 305 void *isa; 306 caddr_t data; 307 drmach_mem_t *s_mp; 308 drmach_mem_t *t_mp; 309 struct memlist *c_ml; 310 uint64_t s_copybasepa; 311 uint64_t t_copybasepa; 312 drmach_cr_err_t ecode; 313 void *earg; 314 } drmach_copy_rename_t; 315 316 /* 317 * The following global is read as a boolean value, non-zero is true. 318 * If zero, DR copy-rename and cpu poweron will not set the processor 319 * LPA settings (CBASE, CBND of Safari config register) to correspond 320 * to the current memory slice map. LPAs of processors present at boot 321 * will remain as programmed by POST. LPAs of processors on boards added 322 * by DR will remain NULL, as programmed by POST. This can be used to 323 * to override the per-board L1SSFLG_THIS_L1_NULL_PROC_LPA flag set by 324 * POST in the LDCD (and copied to the GDCD by SMS). 325 * 326 * drmach_reprogram_lpa and L1SSFLG_THIS_L1_NULL_PROC_LPA do not apply 327 * to Schizo device LPAs. These are always set by DR. 328 */ 329 static int drmach_reprogram_lpa = 1; 330 331 /* 332 * There is a known HW bug where a Jaguar CPU in Safari port 0 (SBX/P0) 333 * can fail to receive an XIR. To workaround this issue until a hardware 334 * fix is implemented, we will exclude the selection of these CPUs. 335 * Setting this to 0 will allow their selection again. 336 */ 337 static int drmach_iocage_exclude_jaguar_port_zero = 1; 338 339 static int drmach_initialized; 340 static drmach_array_t *drmach_boards; 341 342 static int drmach_cpu_delay = 1000; 343 static int drmach_cpu_ntries = 50000; 344 345 static uint32_t drmach_slice_table[AXQ_MAX_EXP]; 346 static kmutex_t drmach_slice_table_lock; 347 348 tte_t drmach_cpu_sram_tte[NCPU]; 349 caddr_t drmach_cpu_sram_va; 350 351 /* 352 * Setting to non-zero will enable delay before all disconnect ops. 353 */ 354 static int drmach_unclaim_delay_all; 355 /* 356 * Default delay is slightly greater than the max processor Safari timeout. 357 * This delay is intended to ensure the outstanding Safari activity has 358 * retired on this board prior to a board disconnect. 359 */ 360 static clock_t drmach_unclaim_usec_delay = DRMACH_SAF_TOL_MAX + 10; 361 362 /* 363 * By default, DR of non-Panther procs is not allowed into a Panther 364 * domain with large page sizes enabled. Setting this to 0 will remove 365 * the restriction. 366 */ 367 static int drmach_large_page_restriction = 1; 368 369 /* 370 * Used to pass updated LPA values to procs. 371 * Protocol is to clear the array before use. 372 */ 373 volatile uchar_t *drmach_xt_mb; 374 volatile uint64_t drmach_xt_ready; 375 static kmutex_t drmach_xt_mb_lock; 376 static int drmach_xt_mb_size; 377 378 uint64_t drmach_bus_sync_list[18 * 4 * 4 + 1]; 379 static kmutex_t drmach_bus_sync_lock; 380 381 static sbd_error_t *drmach_device_new(drmach_node_t *, 382 drmach_board_t *, int, drmachid_t *); 383 static sbd_error_t *drmach_cpu_new(drmach_device_t *, drmachid_t *); 384 static sbd_error_t *drmach_mem_new(drmach_device_t *, drmachid_t *); 385 static sbd_error_t *drmach_pci_new(drmach_device_t *, drmachid_t *); 386 static sbd_error_t *drmach_io_new(drmach_device_t *, drmachid_t *); 387 388 static dev_info_t *drmach_node_ddi_get_dip(drmach_node_t *np); 389 static int drmach_node_ddi_get_prop(drmach_node_t *np, 390 char *name, void *buf, int len); 391 static int drmach_node_ddi_get_proplen(drmach_node_t *np, 392 char *name, int *len); 393 394 static dev_info_t *drmach_node_obp_get_dip(drmach_node_t *np); 395 static int drmach_node_obp_get_prop(drmach_node_t *np, 396 char *name, void *buf, int len); 397 static int drmach_node_obp_get_proplen(drmach_node_t *np, 398 char *name, int *len); 399 400 static sbd_error_t *drmach_mbox_trans(uint8_t msgtype, int bnum, 401 caddr_t obufp, int olen, 402 caddr_t ibufp, int ilen); 403 404 sbd_error_t *drmach_io_post_attach(drmachid_t id); 405 sbd_error_t *drmach_io_post_release(drmachid_t id); 406 407 static sbd_error_t *drmach_iocage_setup(dr_testboard_req_t *, 408 drmach_device_t **dpp, cpu_flag_t *oflags); 409 static int drmach_iocage_cpu_return(drmach_device_t *dp, 410 cpu_flag_t oflags); 411 static sbd_error_t *drmach_iocage_mem_return(dr_testboard_reply_t *tbr); 412 void drmach_iocage_mem_scrub(uint64_t nbytes); 413 414 static sbd_error_t *drmach_i_status(drmachid_t id, drmach_status_t *stat); 415 416 static void drmach_slot1_lpa_set(drmach_board_t *bp); 417 418 static void drmach_cpu_read(uint64_t arg1, uint64_t arg2); 419 static int drmach_cpu_read_scr(drmach_cpu_t *cp, uint64_t *scr); 420 421 static void drmach_bus_sync_list_update(void); 422 static void drmach_slice_table_update(drmach_board_t *, int); 423 static int drmach_portid2bnum(int); 424 425 static void drmach_msg_memslice_init(dr_memslice_t slice_arr[]); 426 static void drmach_msg_memregs_init(dr_memregs_t regs_arr[]); 427 428 static int drmach_panther_boards(void); 429 430 static int drmach_name2type_idx(char *); 431 432 #ifdef DEBUG 433 434 #define DRMACH_PR if (drmach_debug) printf 435 #define DRMACH_MEMLIST_DUMP if (drmach_debug) MEMLIST_DUMP 436 int drmach_debug = 0; /* set to non-zero to enable debug messages */ 437 #else 438 439 #define DRMACH_PR _NOTE(CONSTANTCONDITION) if (0) printf 440 #define DRMACH_MEMLIST_DUMP _NOTE(CONSTANTCONDITION) if (0) MEMLIST_DUMP 441 #endif /* DEBUG */ 442 443 #define DRMACH_OBJ(id) ((drmach_common_t *)id) 444 445 #define DRMACH_IS_BOARD_ID(id) \ 446 ((id != 0) && \ 447 (DRMACH_OBJ(id)->isa == (void *)drmach_board_new)) 448 449 #define DRMACH_IS_CPU_ID(id) \ 450 ((id != 0) && \ 451 (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new)) 452 453 #define DRMACH_IS_MEM_ID(id) \ 454 ((id != 0) && \ 455 (DRMACH_OBJ(id)->isa == (void *)drmach_mem_new)) 456 457 #define DRMACH_IS_IO_ID(id) \ 458 ((id != 0) && \ 459 (DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 460 461 #define DRMACH_IS_DEVICE_ID(id) \ 462 ((id != 0) && \ 463 (DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \ 464 DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \ 465 DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 466 467 #define DRMACH_IS_ID(id) \ 468 ((id != 0) && \ 469 (DRMACH_OBJ(id)->isa == (void *)drmach_board_new || \ 470 DRMACH_OBJ(id)->isa == (void *)drmach_cpu_new || \ 471 DRMACH_OBJ(id)->isa == (void *)drmach_mem_new || \ 472 DRMACH_OBJ(id)->isa == (void *)drmach_io_new)) 473 474 #define DRMACH_INTERNAL_ERROR() \ 475 drerr_new(1, ESTC_INTERNAL, drmach_ie_fmt, __LINE__) 476 static char *drmach_ie_fmt = "drmach.c %d"; 477 478 static struct { 479 const char *name; 480 const char *type; 481 sbd_error_t *(*new)(drmach_device_t *, drmachid_t *); 482 } drmach_name2type[] = { 483 {"cmp", DRMACH_DEVTYPE_CMP, NULL }, 484 {"cpu", DRMACH_DEVTYPE_CPU, drmach_cpu_new }, 485 {"SUNW,UltraSPARC-III", DRMACH_DEVTYPE_CPU, drmach_cpu_new }, 486 {"SUNW,UltraSPARC-III+", DRMACH_DEVTYPE_CPU, drmach_cpu_new }, 487 {"memory-controller", DRMACH_DEVTYPE_MEM, drmach_mem_new }, 488 {"pci", DRMACH_DEVTYPE_PCI, drmach_pci_new }, 489 {"SUNW,wci", DRMACH_DEVTYPE_WCI, drmach_io_new }, 490 }; 491 492 /* 493 * drmach autoconfiguration data structures and interfaces 494 */ 495 496 extern struct mod_ops mod_miscops; 497 498 static struct modlmisc modlmisc = { 499 &mod_miscops, 500 "Sun Fire 15000 DR" 501 }; 502 503 static struct modlinkage modlinkage = { 504 MODREV_1, 505 (void *)&modlmisc, 506 NULL 507 }; 508 509 /* 510 * drmach_boards_rwlock is used to synchronize read/write 511 * access to drmach_boards array between status and board lookup 512 * as READERS, and assign, and unassign threads as WRITERS. 513 */ 514 static krwlock_t drmach_boards_rwlock; 515 516 static kmutex_t drmach_i_lock; 517 static kmutex_t drmach_iocage_lock; 518 static kcondvar_t drmach_iocage_cv; 519 static int drmach_iocage_is_busy = 0; 520 uint64_t drmach_iocage_paddr; 521 static caddr_t drmach_iocage_vaddr; 522 static int drmach_iocage_size = 0; 523 static int drmach_is_cheetah = -1; 524 525 int 526 _init(void) 527 { 528 int err; 529 530 mutex_init(&drmach_i_lock, NULL, MUTEX_DRIVER, NULL); 531 rw_init(&drmach_boards_rwlock, NULL, RW_DEFAULT, NULL); 532 drmach_xt_mb_size = NCPU * sizeof (uchar_t); 533 drmach_xt_mb = (uchar_t *)vmem_alloc(static_alloc_arena, 534 drmach_xt_mb_size, VM_SLEEP); 535 bzero((void *)drmach_xt_mb, drmach_xt_mb_size); 536 if ((err = mod_install(&modlinkage)) != 0) { 537 mutex_destroy(&drmach_i_lock); 538 rw_destroy(&drmach_boards_rwlock); 539 vmem_free(static_alloc_arena, (void *)drmach_xt_mb, 540 drmach_xt_mb_size); 541 } 542 543 return (err); 544 } 545 546 int 547 _fini(void) 548 { 549 static void drmach_fini(void); 550 int err; 551 552 if ((err = mod_remove(&modlinkage)) == 0) 553 drmach_fini(); 554 555 return (err); 556 } 557 558 int 559 _info(struct modinfo *modinfop) 560 { 561 return (mod_info(&modlinkage, modinfop)); 562 } 563 564 /* 565 * drmach_node_* routines serve the purpose of separating the 566 * rest of the code from the device tree and OBP. This is necessary 567 * because of In-Kernel-Probing. Devices probed after stod, are probed 568 * by the in-kernel-prober, not OBP. These devices, therefore, do not 569 * have dnode ids. 570 */ 571 572 static int 573 drmach_node_obp_get_parent(drmach_node_t *np, drmach_node_t *pp) 574 { 575 pnode_t nodeid; 576 static char *fn = "drmach_node_obp_get_parent"; 577 578 nodeid = np->get_dnode(np); 579 if (nodeid == OBP_NONODE) { 580 cmn_err(CE_WARN, "%s: invalid dnode", fn); 581 return (-1); 582 } 583 584 bcopy(np, pp, sizeof (drmach_node_t)); 585 586 pp->here = (void *)(uintptr_t)prom_parentnode(nodeid); 587 if (pp->here == OBP_NONODE) { 588 cmn_err(CE_WARN, "%s: invalid parent dnode", fn); 589 return (-1); 590 } 591 592 return (0); 593 } 594 595 static pnode_t 596 drmach_node_obp_get_dnode(drmach_node_t *np) 597 { 598 return ((pnode_t)(uintptr_t)np->here); 599 } 600 601 typedef struct { 602 drmach_node_walk_args_t *nwargs; 603 int (*cb)(drmach_node_walk_args_t *args); 604 int err; 605 } drmach_node_ddi_walk_args_t; 606 607 int 608 drmach_node_ddi_walk_cb(dev_info_t *dip, void *arg) 609 { 610 drmach_node_ddi_walk_args_t *nargs; 611 612 nargs = (drmach_node_ddi_walk_args_t *)arg; 613 614 /* 615 * dip doesn't have to be held here as we are called 616 * from ddi_walk_devs() which holds the dip. 617 */ 618 nargs->nwargs->node->here = (void *)dip; 619 620 nargs->err = nargs->cb(nargs->nwargs); 621 622 /* 623 * Set "here" to NULL so that unheld dip is not accessible 624 * outside ddi_walk_devs() 625 */ 626 nargs->nwargs->node->here = NULL; 627 628 if (nargs->err) 629 return (DDI_WALK_TERMINATE); 630 else 631 return (DDI_WALK_CONTINUE); 632 } 633 634 static int 635 drmach_node_ddi_walk(drmach_node_t *np, void *data, 636 int (*cb)(drmach_node_walk_args_t *args)) 637 { 638 drmach_node_walk_args_t args; 639 drmach_node_ddi_walk_args_t nargs; 640 641 /* initialized args structure for callback */ 642 args.node = np; 643 args.data = data; 644 645 nargs.nwargs = &args; 646 nargs.cb = cb; 647 nargs.err = 0; 648 649 /* 650 * Root node doesn't have to be held in any way. 651 */ 652 ddi_walk_devs(ddi_root_node(), drmach_node_ddi_walk_cb, (void *)&nargs); 653 654 return (nargs.err); 655 } 656 657 static int 658 drmach_node_obp_walk(drmach_node_t *np, void *data, 659 int (*cb)(drmach_node_walk_args_t *args)) 660 { 661 pnode_t nodeid; 662 int rv; 663 drmach_node_walk_args_t args; 664 665 /* initialized args structure for callback */ 666 args.node = np; 667 args.data = data; 668 669 nodeid = prom_childnode(prom_rootnode()); 670 671 /* save our new position within the tree */ 672 np->here = (void *)(uintptr_t)nodeid; 673 674 rv = 0; 675 while (nodeid != OBP_NONODE) { 676 677 pnode_t child; 678 679 rv = (*cb)(&args); 680 if (rv) 681 break; 682 683 child = prom_childnode(nodeid); 684 np->here = (void *)(uintptr_t)child; 685 686 while (child != OBP_NONODE) { 687 rv = (*cb)(&args); 688 if (rv) 689 break; 690 691 child = prom_nextnode(child); 692 np->here = (void *)(uintptr_t)child; 693 } 694 695 nodeid = prom_nextnode(nodeid); 696 697 /* save our new position within the tree */ 698 np->here = (void *)(uintptr_t)nodeid; 699 } 700 701 return (rv); 702 } 703 704 static int 705 drmach_node_ddi_get_parent(drmach_node_t *np, drmach_node_t *pp) 706 { 707 dev_info_t *ndip; 708 static char *fn = "drmach_node_ddi_get_parent"; 709 710 ndip = np->n_getdip(np); 711 if (ndip == NULL) { 712 cmn_err(CE_WARN, "%s: NULL dip", fn); 713 return (-1); 714 } 715 716 bcopy(np, pp, sizeof (drmach_node_t)); 717 718 pp->here = (void *)ddi_get_parent(ndip); 719 if (pp->here == NULL) { 720 cmn_err(CE_WARN, "%s: NULL parent dip", fn); 721 return (-1); 722 } 723 724 return (0); 725 } 726 727 /*ARGSUSED*/ 728 static pnode_t 729 drmach_node_ddi_get_dnode(drmach_node_t *np) 730 { 731 return ((pnode_t)NULL); 732 } 733 734 static drmach_node_t * 735 drmach_node_new(void) 736 { 737 drmach_node_t *np; 738 739 np = kmem_zalloc(sizeof (drmach_node_t), KM_SLEEP); 740 741 if (drmach_initialized) { 742 np->get_dnode = drmach_node_ddi_get_dnode; 743 np->walk = drmach_node_ddi_walk; 744 np->n_getdip = drmach_node_ddi_get_dip; 745 np->n_getproplen = drmach_node_ddi_get_proplen; 746 np->n_getprop = drmach_node_ddi_get_prop; 747 np->get_parent = drmach_node_ddi_get_parent; 748 } else { 749 np->get_dnode = drmach_node_obp_get_dnode; 750 np->walk = drmach_node_obp_walk; 751 np->n_getdip = drmach_node_obp_get_dip; 752 np->n_getproplen = drmach_node_obp_get_proplen; 753 np->n_getprop = drmach_node_obp_get_prop; 754 np->get_parent = drmach_node_obp_get_parent; 755 } 756 757 return (np); 758 } 759 760 static void 761 drmach_node_dispose(drmach_node_t *np) 762 { 763 kmem_free(np, sizeof (*np)); 764 } 765 766 /* 767 * Check if a CPU node is part of a CMP. 768 */ 769 static int 770 drmach_is_cmp_child(dev_info_t *dip) 771 { 772 dev_info_t *pdip; 773 774 if (strcmp(ddi_node_name(dip), DRMACH_CPU_NAMEPROP) != 0) { 775 return (0); 776 } 777 778 pdip = ddi_get_parent(dip); 779 780 ASSERT(pdip); 781 782 if (strcmp(ddi_node_name(pdip), DRMACH_CMP_NAMEPROP) == 0) { 783 return (1); 784 } 785 786 return (0); 787 } 788 789 static dev_info_t * 790 drmach_node_obp_get_dip(drmach_node_t *np) 791 { 792 pnode_t nodeid; 793 dev_info_t *dip; 794 795 nodeid = np->get_dnode(np); 796 if (nodeid == OBP_NONODE) 797 return (NULL); 798 799 dip = e_ddi_nodeid_to_dip(nodeid); 800 if (dip) { 801 /* 802 * The branch rooted at dip will have been previously 803 * held, or it will be the child of a CMP. In either 804 * case, the hold acquired in e_ddi_nodeid_to_dip() 805 * is not needed. 806 */ 807 ddi_release_devi(dip); 808 ASSERT(drmach_is_cmp_child(dip) || e_ddi_branch_held(dip)); 809 } 810 811 return (dip); 812 } 813 814 static dev_info_t * 815 drmach_node_ddi_get_dip(drmach_node_t *np) 816 { 817 return ((dev_info_t *)np->here); 818 } 819 820 static int 821 drmach_node_walk(drmach_node_t *np, void *param, 822 int (*cb)(drmach_node_walk_args_t *args)) 823 { 824 return (np->walk(np, param, cb)); 825 } 826 827 static int 828 drmach_node_ddi_get_prop(drmach_node_t *np, char *name, void *buf, int len) 829 { 830 int rv = 0; 831 dev_info_t *ndip; 832 static char *fn = "drmach_node_ddi_get_prop"; 833 834 ndip = np->n_getdip(np); 835 if (ndip == NULL) { 836 cmn_err(CE_WARN, "%s: NULL dip", fn); 837 rv = -1; 838 } else if (ddi_getlongprop_buf(DDI_DEV_T_ANY, ndip, 839 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, name, 840 (caddr_t)buf, &len) != DDI_PROP_SUCCESS) { 841 rv = -1; 842 } 843 844 return (rv); 845 } 846 847 /* ARGSUSED */ 848 static int 849 drmach_node_obp_get_prop(drmach_node_t *np, char *name, void *buf, int len) 850 { 851 int rv = 0; 852 pnode_t nodeid; 853 static char *fn = "drmach_node_obp_get_prop"; 854 855 nodeid = np->get_dnode(np); 856 if (nodeid == OBP_NONODE) { 857 cmn_err(CE_WARN, "%s: invalid dnode", fn); 858 rv = -1; 859 } else if (prom_getproplen(nodeid, (caddr_t)name) < 0) { 860 rv = -1; 861 } else { 862 (void) prom_getprop(nodeid, (caddr_t)name, (caddr_t)buf); 863 } 864 865 return (rv); 866 } 867 868 static int 869 drmach_node_ddi_get_proplen(drmach_node_t *np, char *name, int *len) 870 { 871 int rv = 0; 872 dev_info_t *ndip; 873 874 ndip = np->n_getdip(np); 875 if (ndip == NULL) { 876 rv = -1; 877 } else if (ddi_getproplen(DDI_DEV_T_ANY, ndip, DDI_PROP_DONTPASS, 878 name, len) != DDI_PROP_SUCCESS) { 879 rv = -1; 880 } 881 882 return (rv); 883 } 884 885 static int 886 drmach_node_obp_get_proplen(drmach_node_t *np, char *name, int *len) 887 { 888 pnode_t nodeid; 889 int rv; 890 891 nodeid = np->get_dnode(np); 892 if (nodeid == OBP_NONODE) 893 rv = -1; 894 else { 895 *len = prom_getproplen(nodeid, (caddr_t)name); 896 rv = (*len < 0 ? -1 : 0); 897 } 898 899 return (rv); 900 } 901 902 static drmachid_t 903 drmach_node_dup(drmach_node_t *np) 904 { 905 drmach_node_t *dup; 906 907 dup = drmach_node_new(); 908 dup->here = np->here; 909 dup->get_dnode = np->get_dnode; 910 dup->walk = np->walk; 911 dup->n_getdip = np->n_getdip; 912 dup->n_getproplen = np->n_getproplen; 913 dup->n_getprop = np->n_getprop; 914 dup->get_parent = np->get_parent; 915 916 return (dup); 917 } 918 919 /* 920 * drmach_array provides convenient array construction, access, 921 * bounds checking and array destruction logic. 922 */ 923 924 static drmach_array_t * 925 drmach_array_new(int min_index, int max_index) 926 { 927 drmach_array_t *arr; 928 929 arr = kmem_zalloc(sizeof (drmach_array_t), KM_SLEEP); 930 931 arr->arr_sz = (max_index - min_index + 1) * sizeof (void *); 932 if (arr->arr_sz > 0) { 933 arr->min_index = min_index; 934 arr->max_index = max_index; 935 936 arr->arr = kmem_zalloc(arr->arr_sz, KM_SLEEP); 937 return (arr); 938 } else { 939 kmem_free(arr, sizeof (*arr)); 940 return (0); 941 } 942 } 943 944 static int 945 drmach_array_set(drmach_array_t *arr, int idx, drmachid_t val) 946 { 947 if (idx < arr->min_index || idx > arr->max_index) 948 return (-1); 949 else { 950 arr->arr[idx - arr->min_index] = val; 951 return (0); 952 } 953 /*NOTREACHED*/ 954 } 955 956 static int 957 drmach_array_get(drmach_array_t *arr, int idx, drmachid_t *val) 958 { 959 if (idx < arr->min_index || idx > arr->max_index) 960 return (-1); 961 else { 962 *val = arr->arr[idx - arr->min_index]; 963 return (0); 964 } 965 /*NOTREACHED*/ 966 } 967 968 static int 969 drmach_array_first(drmach_array_t *arr, int *idx, drmachid_t *val) 970 { 971 int rv; 972 973 *idx = arr->min_index; 974 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL) 975 *idx += 1; 976 977 return (rv); 978 } 979 980 static int 981 drmach_array_next(drmach_array_t *arr, int *idx, drmachid_t *val) 982 { 983 int rv; 984 985 *idx += 1; 986 while ((rv = drmach_array_get(arr, *idx, val)) == 0 && *val == NULL) 987 *idx += 1; 988 989 return (rv); 990 } 991 992 static void 993 drmach_array_dispose(drmach_array_t *arr, void (*disposer)(drmachid_t)) 994 { 995 drmachid_t val; 996 int idx; 997 int rv; 998 999 rv = drmach_array_first(arr, &idx, &val); 1000 while (rv == 0) { 1001 (*disposer)(val); 1002 1003 /* clear the array entry */ 1004 rv = drmach_array_set(arr, idx, NULL); 1005 ASSERT(rv == 0); 1006 1007 rv = drmach_array_next(arr, &idx, &val); 1008 } 1009 1010 kmem_free(arr->arr, arr->arr_sz); 1011 kmem_free(arr, sizeof (*arr)); 1012 } 1013 1014 1015 static gdcd_t * 1016 drmach_gdcd_new() 1017 { 1018 gdcd_t *gdcd; 1019 1020 gdcd = kmem_zalloc(sizeof (gdcd_t), KM_SLEEP); 1021 1022 /* read the gdcd, bail if magic or ver #s are not what is expected */ 1023 if (iosram_rd(GDCD_MAGIC, 0, sizeof (gdcd_t), (caddr_t)gdcd)) { 1024 bail: 1025 kmem_free(gdcd, sizeof (gdcd_t)); 1026 return (NULL); 1027 } else if (gdcd->h.dcd_magic != GDCD_MAGIC) { 1028 goto bail; 1029 } else if (gdcd->h.dcd_version != DCD_VERSION) { 1030 goto bail; 1031 } 1032 1033 return (gdcd); 1034 } 1035 1036 static void 1037 drmach_gdcd_dispose(gdcd_t *gdcd) 1038 { 1039 kmem_free(gdcd, sizeof (gdcd_t)); 1040 } 1041 1042 /*ARGSUSED*/ 1043 sbd_error_t * 1044 drmach_configure(drmachid_t id, int flags) 1045 { 1046 drmach_device_t *dp; 1047 dev_info_t *rdip; 1048 sbd_error_t *err = NULL; 1049 1050 /* 1051 * On Starcat, there is no CPU driver, so it is 1052 * not necessary to configure any CPU nodes. 1053 */ 1054 if (DRMACH_IS_CPU_ID(id)) { 1055 return (NULL); 1056 } 1057 1058 for (; id; ) { 1059 dev_info_t *fdip = NULL; 1060 1061 if (!DRMACH_IS_DEVICE_ID(id)) 1062 return (drerr_new(0, ESTC_INAPPROP, NULL)); 1063 dp = id; 1064 1065 rdip = dp->node->n_getdip(dp->node); 1066 1067 /* 1068 * We held this branch earlier, so at a minimum its 1069 * root should still be present in the device tree. 1070 */ 1071 ASSERT(rdip); 1072 1073 DRMACH_PR("drmach_configure: configuring DDI branch"); 1074 1075 ASSERT(e_ddi_branch_held(rdip)); 1076 if (e_ddi_branch_configure(rdip, &fdip, 0) != 0) { 1077 if (err == NULL) { 1078 /* 1079 * Record first failure but don't stop 1080 */ 1081 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 1082 dev_info_t *dip = (fdip != NULL) ? fdip : rdip; 1083 1084 (void) ddi_pathname(dip, path); 1085 err = drerr_new(1, ESTC_DRVFAIL, path); 1086 1087 kmem_free(path, MAXPATHLEN); 1088 } 1089 1090 /* 1091 * If non-NULL, fdip is returned held and must be 1092 * released. 1093 */ 1094 if (fdip != NULL) { 1095 ddi_release_devi(fdip); 1096 } 1097 } 1098 1099 if (DRMACH_IS_MEM_ID(id)) { 1100 drmach_mem_t *mp = id; 1101 id = mp->next; 1102 } else { 1103 id = NULL; 1104 } 1105 } 1106 1107 return (err); 1108 } 1109 1110 static sbd_error_t * 1111 drmach_device_new(drmach_node_t *node, 1112 drmach_board_t *bp, int portid, drmachid_t *idp) 1113 { 1114 int i, rv, device_id, unum; 1115 char name[OBP_MAXDRVNAME]; 1116 drmach_device_t proto; 1117 1118 rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME); 1119 if (rv) { 1120 sbd_error_t *err; 1121 1122 /* every node is expected to have a name */ 1123 err = drerr_new(1, ESTC_GETPROP, 1124 "dip: 0x%p: property %s", 1125 node->n_getdip(node), OBP_NAME); 1126 1127 return (err); 1128 } 1129 1130 i = drmach_name2type_idx(name); 1131 1132 if (i < 0 || strcmp(name, "cmp") == 0) { 1133 /* 1134 * Not a node of interest to dr - including "cmp", 1135 * but it is in drmach_name2type[], which lets gptwocfg 1136 * driver to check if node is OBP created. 1137 */ 1138 *idp = (drmachid_t)0; 1139 return (NULL); 1140 } 1141 1142 /* 1143 * Derive a best-guess unit number from the portid value. 1144 * Some drmach_*_new constructors (drmach_pci_new, for example) 1145 * will overwrite the prototype unum value with one that is more 1146 * appropriate for the device. 1147 */ 1148 device_id = portid & 0x1f; 1149 if (device_id < 4) 1150 unum = device_id; 1151 else if (device_id == 8) { 1152 unum = 0; 1153 } else if (device_id == 9) { 1154 unum = 1; 1155 } else if (device_id == 0x1c) { 1156 unum = 0; 1157 } else if (device_id == 0x1d) { 1158 unum = 1; 1159 } else { 1160 return (DRMACH_INTERNAL_ERROR()); 1161 } 1162 1163 bzero(&proto, sizeof (proto)); 1164 proto.type = drmach_name2type[i].type; 1165 proto.bp = bp; 1166 proto.node = node; 1167 proto.portid = portid; 1168 proto.unum = unum; 1169 1170 return (drmach_name2type[i].new(&proto, idp)); 1171 } 1172 1173 static void 1174 drmach_device_dispose(drmachid_t id) 1175 { 1176 drmach_device_t *self = id; 1177 1178 self->cm.dispose(id); 1179 } 1180 1181 static drmach_board_t * 1182 drmach_board_new(int bnum) 1183 { 1184 static sbd_error_t *drmach_board_release(drmachid_t); 1185 static sbd_error_t *drmach_board_status(drmachid_t, drmach_status_t *); 1186 1187 drmach_board_t *bp; 1188 1189 bp = kmem_zalloc(sizeof (drmach_board_t), KM_SLEEP); 1190 1191 bp->cm.isa = (void *)drmach_board_new; 1192 bp->cm.release = drmach_board_release; 1193 bp->cm.status = drmach_board_status; 1194 1195 (void) drmach_board_name(bnum, bp->cm.name, sizeof (bp->cm.name)); 1196 1197 bp->bnum = bnum; 1198 bp->devices = NULL; 1199 bp->tree = drmach_node_new(); 1200 1201 (void) drmach_array_set(drmach_boards, bnum, bp); 1202 return (bp); 1203 } 1204 1205 static void 1206 drmach_board_dispose(drmachid_t id) 1207 { 1208 drmach_board_t *bp; 1209 1210 ASSERT(DRMACH_IS_BOARD_ID(id)); 1211 bp = id; 1212 1213 if (bp->tree) 1214 drmach_node_dispose(bp->tree); 1215 1216 if (bp->devices) 1217 drmach_array_dispose(bp->devices, drmach_device_dispose); 1218 1219 kmem_free(bp, sizeof (*bp)); 1220 } 1221 1222 static sbd_error_t * 1223 drmach_board_status(drmachid_t id, drmach_status_t *stat) 1224 { 1225 sbd_error_t *err = NULL; 1226 drmach_board_t *bp; 1227 caddr_t obufp; 1228 dr_showboard_t shb; 1229 1230 if (!DRMACH_IS_BOARD_ID(id)) 1231 return (drerr_new(0, ESTC_INAPPROP, NULL)); 1232 1233 bp = id; 1234 1235 /* 1236 * we need to know if the board's connected before 1237 * issuing a showboard message. If it's connected, we just 1238 * reply with status composed of cached info 1239 */ 1240 1241 if (!bp->connected) { 1242 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 1243 err = drmach_mbox_trans(DRMSG_SHOWBOARD, bp->bnum, obufp, 1244 sizeof (dr_proto_hdr_t), (caddr_t)&shb, 1245 sizeof (dr_showboard_t)); 1246 1247 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 1248 if (err) 1249 return (err); 1250 1251 bp->connected = (shb.bd_assigned && shb.bd_active); 1252 (void) strncpy(bp->type, shb.board_type, sizeof (bp->type)); 1253 stat->assigned = bp->assigned = shb.bd_assigned; 1254 stat->powered = bp->powered = shb.power_on; 1255 stat->empty = bp->empty = shb.slot_empty; 1256 1257 switch (shb.test_status) { 1258 case DR_TEST_STATUS_UNKNOWN: 1259 case DR_TEST_STATUS_IPOST: 1260 case DR_TEST_STATUS_ABORTED: 1261 stat->cond = bp->cond = SBD_COND_UNKNOWN; 1262 break; 1263 case DR_TEST_STATUS_PASSED: 1264 stat->cond = bp->cond = SBD_COND_OK; 1265 break; 1266 case DR_TEST_STATUS_FAILED: 1267 stat->cond = bp->cond = SBD_COND_FAILED; 1268 break; 1269 default: 1270 stat->cond = bp->cond = SBD_COND_UNKNOWN; 1271 DRMACH_PR("Unknown test status=0x%x from SC\n", 1272 shb.test_status); 1273 break; 1274 1275 } 1276 1277 (void) strncpy(stat->type, shb.board_type, sizeof (stat->type)); 1278 (void) snprintf(stat->info, sizeof (stat->info), 1279 "Test Level=%d", shb.test_level); 1280 } else { 1281 stat->assigned = bp->assigned; 1282 stat->powered = bp->powered; 1283 stat->empty = bp->empty; 1284 stat->cond = bp->cond; 1285 (void) strncpy(stat->type, bp->type, sizeof (stat->type)); 1286 } 1287 1288 stat->busy = 0; /* assume not busy */ 1289 stat->configured = 0; /* assume not configured */ 1290 if (bp->devices) { 1291 int rv; 1292 int d_idx; 1293 drmachid_t d_id; 1294 1295 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 1296 while (rv == 0) { 1297 drmach_status_t d_stat; 1298 1299 err = drmach_i_status(d_id, &d_stat); 1300 if (err) 1301 break; 1302 1303 stat->busy |= d_stat.busy; 1304 stat->configured |= d_stat.configured; 1305 1306 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 1307 } 1308 } 1309 1310 return (err); 1311 } 1312 1313 typedef struct drmach_msglist { 1314 kcondvar_t s_cv; /* condvar for sending msg */ 1315 kmutex_t s_lock; /* mutex for sending */ 1316 kcondvar_t g_cv; /* condvar for getting reply */ 1317 kmutex_t g_lock; /* mutex for getting reply */ 1318 struct drmach_msglist *prev; /* link to previous entry */ 1319 struct drmach_msglist *next; /* link to next entry */ 1320 struct drmach_msglist *link; /* link to related entry */ 1321 caddr_t o_buf; /* address of output buffer */ 1322 caddr_t i_buf; /* address of input buffer */ 1323 uint32_t o_buflen; /* output buffer length */ 1324 uint32_t i_buflen; /* input buffer length */ 1325 uint32_t msgid; /* message identifier */ 1326 int o_nretry; /* number of sending retries */ 1327 int f_error; /* mailbox framework error */ 1328 uint8_t e_code; /* error code returned by SC */ 1329 uint8_t p_flag :1, /* successfully putmsg */ 1330 m_reply :1, /* msg reply received */ 1331 unused :6; 1332 } drmach_msglist_t; 1333 1334 kmutex_t drmach_g_mbox_mutex; /* mutex for mailbox globals */ 1335 kmutex_t drmach_ri_mbox_mutex; /* mutex for mailbox reinit */ 1336 kmutex_t drmach_msglist_mutex; /* mutex for message list */ 1337 drmach_msglist_t *drmach_msglist_first; /* first entry in msg list */ 1338 drmach_msglist_t *drmach_msglist_last; /* last entry in msg list */ 1339 uint32_t drmach_msgid; /* current message id */ 1340 kthread_t *drmach_getmsg_thread; /* ptr to getmsg thread */ 1341 volatile int drmach_getmsg_thread_run; /* run flag for getmsg thr */ 1342 kmutex_t drmach_sendmsg_mutex; /* mutex for sendmsg cv */ 1343 kcondvar_t drmach_sendmsg_cv; /* signaled to send new msg */ 1344 kthread_t *drmach_sendmsg_thread; /* ptr to sendmsg thread */ 1345 volatile int drmach_sendmsg_thread_run; /* run flag for sendmsg */ 1346 int drmach_mbox_istate; /* mailbox init state */ 1347 int drmach_mbox_iflag; /* set if init'd with SC */ 1348 int drmach_mbox_ipending; /* set if reinit scheduled */ 1349 1350 /* 1351 * Timeout values (in seconds) used when waiting for replies (from the SC) to 1352 * requests that we sent. Since we only receive boardevent messages, and they 1353 * are events rather than replies, there is no boardevent timeout. 1354 */ 1355 int drmach_to_mbxinit = 60; /* 1 minute */ 1356 int drmach_to_assign = 60; /* 1 minute */ 1357 int drmach_to_unassign = 60; /* 1 minute */ 1358 int drmach_to_claim = 3600; /* 1 hour */ 1359 int drmach_to_unclaim = 3600; /* 1 hour */ 1360 int drmach_to_poweron = 480; /* 8 minutes */ 1361 int drmach_to_poweroff = 480; /* 8 minutes */ 1362 int drmach_to_testboard = 43200; /* 12 hours */ 1363 int drmach_to_aborttest = 180; /* 3 minutes */ 1364 int drmach_to_showboard = 180; /* 3 minutes */ 1365 int drmach_to_unconfig = 180; /* 3 minutes */ 1366 1367 /* 1368 * Delay (in seconds) used after receiving a non-transient error indication from 1369 * an mboxsc_getmsg call in the thread that loops waiting for incoming messages. 1370 */ 1371 int drmach_mbxerr_delay = 15; /* 15 seconds */ 1372 1373 /* 1374 * Timeout values (in milliseconds) for mboxsc_putmsg and mboxsc_getmsg calls. 1375 */ 1376 clock_t drmach_to_putmsg; /* set in drmach_mbox_init */ 1377 clock_t drmach_to_getmsg = 31000; /* 31 seconds */ 1378 1379 /* 1380 * Normally, drmach_to_putmsg is set dynamically during initialization in 1381 * drmach_mbox_init. This has the potentially undesirable side effect of 1382 * clobbering any value that might have been set in /etc/system. To prevent 1383 * dynamic setting of drmach_to_putmsg (thereby allowing it to be tuned in 1384 * /etc/system), set drmach_use_tuned_putmsg_to to 1. 1385 */ 1386 int drmach_use_tuned_putmsg_to = 0; 1387 1388 1389 /* maximum conceivable message size for future mailbox protocol versions */ 1390 #define DRMACH_MAX_MBOX_MSG_SIZE 4096 1391 1392 /*ARGSUSED*/ 1393 void 1394 drmach_mbox_prmsg(dr_mbox_msg_t *mbp, int dir) 1395 { 1396 int i, j; 1397 dr_memregs_t *memregs; 1398 dr_proto_hdr_t *php = &mbp->p_hdr; 1399 dr_msg_t *mp = &mbp->msgdata; 1400 1401 #ifdef DEBUG 1402 switch (php->command) { 1403 case DRMSG_BOARDEVENT: 1404 if (dir) { 1405 DRMACH_PR("ERROR!! outgoing BOARDEVENT\n"); 1406 } else { 1407 DRMACH_PR("BOARDEVENT received:\n"); 1408 DRMACH_PR("init=%d ins=%d rem=%d asgn=%d\n", 1409 mp->dm_be.initialized, 1410 mp->dm_be.board_insertion, 1411 mp->dm_be.board_removal, 1412 mp->dm_be.slot_assign); 1413 DRMACH_PR("unasgn=%d avail=%d unavail=%d\n", 1414 mp->dm_be.slot_unassign, 1415 mp->dm_be.slot_avail, 1416 mp->dm_be.slot_unavail); 1417 } 1418 break; 1419 case DRMSG_MBOX_INIT: 1420 if (dir) { 1421 DRMACH_PR("MBOX_INIT Request:\n"); 1422 } else { 1423 DRMACH_PR("MBOX_INIT Reply:\n"); 1424 } 1425 break; 1426 case DRMSG_ASSIGN: 1427 if (dir) { 1428 DRMACH_PR("ASSIGN Request:\n"); 1429 } else { 1430 DRMACH_PR("ASSIGN Reply:\n"); 1431 } 1432 break; 1433 case DRMSG_UNASSIGN: 1434 if (dir) { 1435 DRMACH_PR("UNASSIGN Request:\n"); 1436 } else { 1437 DRMACH_PR("UNASSIGN Reply:\n"); 1438 } 1439 break; 1440 case DRMSG_CLAIM: 1441 if (!dir) { 1442 DRMACH_PR("CLAIM Reply:\n"); 1443 break; 1444 } 1445 1446 DRMACH_PR("CLAIM Request:\n"); 1447 for (i = 0; i < 18; ++i) { 1448 DRMACH_PR("exp%d: val=%d slice=0x%x\n", i, 1449 mp->dm_cr.mem_slice[i].valid, 1450 mp->dm_cr.mem_slice[i].slice); 1451 memregs = &(mp->dm_cr.mem_regs[i]); 1452 for (j = 0; j < S0_LPORT_COUNT; j++) { 1453 DRMACH_PR(" MC %2d: " 1454 "MADR[%d] = 0x%lx, " 1455 "MADR[%d] = 0x%lx\n", j, 1456 0, DRMACH_MCREG_TO_U64( 1457 memregs->madr[j][0]), 1458 1, DRMACH_MCREG_TO_U64( 1459 memregs->madr[j][1])); 1460 DRMACH_PR(" : " 1461 "MADR[%d] = 0x%lx, " 1462 "MADR[%d] = 0x%lx\n", 1463 2, DRMACH_MCREG_TO_U64( 1464 memregs->madr[j][2]), 1465 3, DRMACH_MCREG_TO_U64( 1466 memregs->madr[j][3])); 1467 } 1468 } 1469 break; 1470 case DRMSG_UNCLAIM: 1471 if (!dir) { 1472 DRMACH_PR("UNCLAIM Reply:\n"); 1473 break; 1474 } 1475 1476 DRMACH_PR("UNCLAIM Request:\n"); 1477 for (i = 0; i < 18; ++i) { 1478 DRMACH_PR("exp%d: val=%d slice=0x%x\n", i, 1479 mp->dm_ur.mem_slice[i].valid, 1480 mp->dm_ur.mem_slice[i].slice); 1481 memregs = &(mp->dm_ur.mem_regs[i]); 1482 for (j = 0; j < S0_LPORT_COUNT; j++) { 1483 DRMACH_PR(" MC %2d: " 1484 "MADR[%d] = 0x%lx, " 1485 "MADR[%d] = 0x%lx\n", j, 1486 0, DRMACH_MCREG_TO_U64( 1487 memregs->madr[j][0]), 1488 1, DRMACH_MCREG_TO_U64( 1489 memregs->madr[j][1])); 1490 DRMACH_PR(" : " 1491 "MADR[%d] = 0x%lx, " 1492 "MADR[%d] = 0x%lx\n", 1493 2, DRMACH_MCREG_TO_U64( 1494 memregs->madr[j][2]), 1495 3, DRMACH_MCREG_TO_U64( 1496 memregs->madr[j][3])); 1497 } 1498 } 1499 DRMACH_PR(" mem_clear=%d\n", mp->dm_ur.mem_clear); 1500 break; 1501 case DRMSG_UNCONFIG: 1502 if (!dir) { 1503 DRMACH_PR("UNCONFIG Reply:\n"); 1504 break; 1505 } 1506 1507 DRMACH_PR("UNCONFIG Request:\n"); 1508 for (i = 0; i < 18; ++i) { 1509 DRMACH_PR("exp%d: val=%d slice=0x%x\n", i, 1510 mp->dm_uc.mem_slice[i].valid, 1511 mp->dm_uc.mem_slice[i].slice); 1512 memregs = &(mp->dm_uc.mem_regs[i]); 1513 for (j = 0; j < S0_LPORT_COUNT; j++) { 1514 DRMACH_PR(" MC %2d: " 1515 "MADR[%d] = 0x%lx, " 1516 "MADR[%d] = 0x%lx\n", j, 1517 0, DRMACH_MCREG_TO_U64( 1518 memregs->madr[j][0]), 1519 1, DRMACH_MCREG_TO_U64( 1520 memregs->madr[j][1])); 1521 DRMACH_PR(" : " 1522 "MADR[%d] = 0x%lx, " 1523 "MADR[%d] = 0x%lx\n", 1524 2, DRMACH_MCREG_TO_U64( 1525 memregs->madr[j][2]), 1526 3, DRMACH_MCREG_TO_U64( 1527 memregs->madr[j][3])); 1528 } 1529 } 1530 break; 1531 case DRMSG_POWERON: 1532 if (dir) { 1533 DRMACH_PR("POWERON Request:\n"); 1534 } else { 1535 DRMACH_PR("POWERON Reply:\n"); 1536 } 1537 break; 1538 case DRMSG_POWEROFF: 1539 if (dir) { 1540 DRMACH_PR("POWEROFF Request:\n"); 1541 } else { 1542 DRMACH_PR("POWEROFF Reply:\n"); 1543 } 1544 break; 1545 case DRMSG_TESTBOARD: 1546 if (dir) { 1547 DRMACH_PR("TESTBOARD Request:\n"); 1548 DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ", 1549 mp->dm_tb.memaddrhi, 1550 mp->dm_tb.memaddrlo); 1551 DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n", 1552 mp->dm_tb.memlen, mp->dm_tb.cpu_portid); 1553 DRMACH_PR("\tforce=0x%x imm=0x%x\n", 1554 mp->dm_tb.force, mp->dm_tb.immediate); 1555 } else { 1556 DRMACH_PR("TESTBOARD Reply:\n"); 1557 DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ", 1558 mp->dm_tr.memaddrhi, 1559 mp->dm_tr.memaddrlo); 1560 DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n", 1561 mp->dm_tr.memlen, mp->dm_tr.cpu_portid); 1562 DRMACH_PR("\trecovered=0x%x test status=0x%x\n", 1563 mp->dm_tr.cpu_recovered, 1564 mp->dm_tr.test_status); 1565 1566 } 1567 break; 1568 case DRMSG_ABORT_TEST: 1569 if (dir) { 1570 DRMACH_PR("ABORT_TEST Request:\n"); 1571 } else { 1572 DRMACH_PR("ABORT_TEST Reply:\n"); 1573 } 1574 1575 DRMACH_PR("\tmemaddrhi=0x%x memaddrlo=0x%x ", 1576 mp->dm_ta.memaddrhi, 1577 mp->dm_ta.memaddrlo); 1578 DRMACH_PR("memlen=0x%x cpu_portid=0x%x\n", 1579 mp->dm_ta.memlen, mp->dm_ta.cpu_portid); 1580 break; 1581 case DRMSG_SHOWBOARD: 1582 if (dir) { 1583 DRMACH_PR("SHOWBOARD Request:\n"); 1584 } else { 1585 DRMACH_PR("SHOWBOARD Reply:\n"); 1586 1587 DRMACH_PR(": empty=%d power=%d assigned=%d", 1588 mp->dm_sb.slot_empty, 1589 mp->dm_sb.power_on, 1590 mp->dm_sb.bd_assigned); 1591 DRMACH_PR(": active=%d t_status=%d t_level=%d ", 1592 mp->dm_sb.bd_active, 1593 mp->dm_sb.test_status, 1594 mp->dm_sb.test_level); 1595 DRMACH_PR(": type=%s ", mp->dm_sb.board_type); 1596 } 1597 break; 1598 default: 1599 DRMACH_PR("Unknown message type\n"); 1600 break; 1601 } 1602 1603 DRMACH_PR("dr hdr:\n\tid=0x%x vers=0x%x cmd=0x%x exp=0x%x slot=0x%x\n", 1604 php->message_id, php->drproto_version, php->command, 1605 php->expbrd, php->slot); 1606 #endif 1607 DRMACH_PR("\treply_status=0x%x error_code=0x%x\n", php->reply_status, 1608 php->error_code); 1609 } 1610 1611 /* 1612 * Callback function passed to taskq_dispatch when a mailbox reinitialization 1613 * handshake needs to be scheduled. The handshake can't be performed by the 1614 * thread that determines it is needed, in most cases, so this function is 1615 * dispatched on the system-wide taskq pool of threads. Failure is reported but 1616 * otherwise ignored, since any situation that requires a mailbox initialization 1617 * handshake will continue to request the handshake until it succeeds. 1618 */ 1619 static void 1620 drmach_mbox_reinit(void *unused) 1621 { 1622 _NOTE(ARGUNUSED(unused)) 1623 1624 caddr_t obufp = NULL; 1625 sbd_error_t *serr = NULL; 1626 1627 DRMACH_PR("scheduled mailbox reinit running\n"); 1628 1629 mutex_enter(&drmach_ri_mbox_mutex); 1630 mutex_enter(&drmach_g_mbox_mutex); 1631 if (drmach_mbox_iflag == 0) { 1632 /* need to initialize the mailbox */ 1633 mutex_exit(&drmach_g_mbox_mutex); 1634 1635 cmn_err(CE_NOTE, "!reinitializing DR mailbox"); 1636 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 1637 serr = drmach_mbox_trans(DRMSG_MBOX_INIT, 0, obufp, 1638 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 1639 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 1640 1641 if (serr) { 1642 cmn_err(CE_WARN, 1643 "mbox_init: MBOX_INIT failed ecode=0x%x", 1644 serr->e_code); 1645 sbd_err_clear(&serr); 1646 } 1647 mutex_enter(&drmach_g_mbox_mutex); 1648 if (!serr) { 1649 drmach_mbox_iflag = 1; 1650 } 1651 } 1652 drmach_mbox_ipending = 0; 1653 mutex_exit(&drmach_g_mbox_mutex); 1654 mutex_exit(&drmach_ri_mbox_mutex); 1655 } 1656 1657 /* 1658 * To ensure sufficient compatibility with future versions of the DR mailbox 1659 * protocol, we use a buffer that is large enough to receive the largest message 1660 * that could possibly be sent to us. However, since that ends up being fairly 1661 * large, allocating it on the stack is a bad idea. Fortunately, this function 1662 * does not need to be MT-safe since it is only invoked by the mailbox 1663 * framework, which will never invoke it multiple times concurrently. Since 1664 * that is the case, we can use a static buffer. 1665 */ 1666 void 1667 drmach_mbox_event(void) 1668 { 1669 static uint8_t buf[DRMACH_MAX_MBOX_MSG_SIZE]; 1670 dr_mbox_msg_t *msg = (dr_mbox_msg_t *)buf; 1671 int err; 1672 uint32_t type = MBOXSC_MSG_EVENT; 1673 uint32_t command = DRMSG_BOARDEVENT; 1674 uint64_t transid = 0; 1675 uint32_t length = DRMACH_MAX_MBOX_MSG_SIZE; 1676 char *hint = ""; 1677 int logsys = 0; 1678 1679 do { 1680 err = mboxsc_getmsg(KEY_SCDR, &type, &command, &transid, 1681 &length, (void *)msg, 0); 1682 } while (err == EAGAIN); 1683 1684 /* don't try to interpret anything with the wrong version number */ 1685 if ((err == 0) && (msg->p_hdr.drproto_version != DRMBX_VERSION)) { 1686 cmn_err(CE_WARN, "mailbox version mismatch 0x%x vs 0x%x", 1687 msg->p_hdr.drproto_version, DRMBX_VERSION); 1688 mutex_enter(&drmach_g_mbox_mutex); 1689 drmach_mbox_iflag = 0; 1690 /* schedule a reinit handshake if one isn't pending */ 1691 if (!drmach_mbox_ipending) { 1692 if (taskq_dispatch(system_taskq, drmach_mbox_reinit, 1693 NULL, TQ_NOSLEEP) != NULL) { 1694 drmach_mbox_ipending = 1; 1695 } else { 1696 cmn_err(CE_WARN, 1697 "failed to schedule mailbox reinit"); 1698 } 1699 } 1700 mutex_exit(&drmach_g_mbox_mutex); 1701 return; 1702 } 1703 1704 if ((err != 0) || (msg->p_hdr.reply_status != DRMSG_REPLY_OK)) { 1705 cmn_err(CE_WARN, 1706 "Unsolicited mboxsc_getmsg failed: err=0x%x code=0x%x", 1707 err, msg->p_hdr.error_code); 1708 } else { 1709 dr_boardevent_t *be; 1710 be = (dr_boardevent_t *)&msg->msgdata; 1711 1712 /* check for initialization event */ 1713 if (be->initialized) { 1714 mutex_enter(&drmach_g_mbox_mutex); 1715 drmach_mbox_iflag = 0; 1716 /* schedule a reinit handshake if one isn't pending */ 1717 if (!drmach_mbox_ipending) { 1718 if (taskq_dispatch(system_taskq, 1719 drmach_mbox_reinit, NULL, TQ_NOSLEEP) 1720 != NULL) { 1721 drmach_mbox_ipending = 1; 1722 } else { 1723 cmn_err(CE_WARN, "failed to schedule " 1724 "mailbox reinit"); 1725 } 1726 } 1727 mutex_exit(&drmach_g_mbox_mutex); 1728 cmn_err(CE_NOTE, "!Mailbox Init event received"); 1729 } 1730 1731 /* anything else will be a log_sysevent call */ 1732 1733 if (be->board_insertion) { 1734 DRMACH_PR("Board Insertion event received"); 1735 hint = DR_HINT_INSERT; 1736 logsys++; 1737 } 1738 if (be->board_removal) { 1739 DRMACH_PR("Board Removal event received"); 1740 hint = DR_HINT_REMOVE; 1741 logsys++; 1742 } 1743 if (be->slot_assign) { 1744 DRMACH_PR("Slot Assign event received"); 1745 logsys++; 1746 } 1747 if (be->slot_unassign) { 1748 DRMACH_PR("Slot Unassign event received"); 1749 logsys++; 1750 } 1751 if (be->slot_avail) { 1752 DRMACH_PR("Slot Available event received"); 1753 logsys++; 1754 } 1755 if (be->slot_unavail) { 1756 DRMACH_PR("Slot Unavailable event received"); 1757 logsys++; 1758 } 1759 if (be->power_on) { 1760 DRMACH_PR("Power ON event received"); 1761 logsys++; 1762 } 1763 if (be->power_off) { 1764 DRMACH_PR("Power OFF event received"); 1765 logsys++; 1766 } 1767 1768 if (logsys) 1769 (void) drmach_log_sysevent( 1770 DRMACH_EXPSLOT2BNUM(msg->p_hdr.expbrd, 1771 msg->p_hdr.slot), hint, SE_NOSLEEP, 1); 1772 } 1773 } 1774 1775 static uint32_t 1776 drmach_get_msgid() 1777 { 1778 uint32_t rv; 1779 mutex_enter(&drmach_msglist_mutex); 1780 if (!(++drmach_msgid)) 1781 ++drmach_msgid; 1782 rv = drmach_msgid; 1783 mutex_exit(&drmach_msglist_mutex); 1784 return (rv); 1785 } 1786 1787 /* 1788 * unlink an entry from the message transaction list 1789 * 1790 * caller must hold drmach_msglist_mutex 1791 */ 1792 void 1793 drmach_msglist_unlink(drmach_msglist_t *entry) 1794 { 1795 ASSERT(mutex_owned(&drmach_msglist_mutex)); 1796 if (entry->prev) { 1797 entry->prev->next = entry->next; 1798 if (entry->next) 1799 entry->next->prev = entry->prev; 1800 } else { 1801 drmach_msglist_first = entry->next; 1802 if (entry->next) 1803 entry->next->prev = NULL; 1804 } 1805 if (entry == drmach_msglist_last) { 1806 drmach_msglist_last = entry->prev; 1807 } 1808 } 1809 1810 void 1811 drmach_msglist_link(drmach_msglist_t *entry) 1812 { 1813 mutex_enter(&drmach_msglist_mutex); 1814 if (drmach_msglist_last) { 1815 entry->prev = drmach_msglist_last; 1816 drmach_msglist_last->next = entry; 1817 drmach_msglist_last = entry; 1818 } else { 1819 drmach_msglist_last = drmach_msglist_first = entry; 1820 } 1821 mutex_exit(&drmach_msglist_mutex); 1822 } 1823 1824 void 1825 drmach_mbox_getmsg() 1826 { 1827 int err; 1828 register int msgid; 1829 static uint8_t buf[DRMACH_MAX_MBOX_MSG_SIZE]; 1830 dr_mbox_msg_t *msg = (dr_mbox_msg_t *)buf; 1831 dr_proto_hdr_t *php; 1832 drmach_msglist_t *found, *entry; 1833 uint32_t type = MBOXSC_MSG_REPLY; 1834 uint32_t command; 1835 uint64_t transid; 1836 uint32_t length; 1837 1838 php = &msg->p_hdr; 1839 1840 while (drmach_getmsg_thread_run != 0) { 1841 /* get a reply message */ 1842 command = 0; 1843 transid = 0; 1844 length = DRMACH_MAX_MBOX_MSG_SIZE; 1845 err = mboxsc_getmsg(KEY_SCDR, &type, &command, &transid, 1846 &length, (void *)msg, drmach_to_getmsg); 1847 1848 if (err) { 1849 /* 1850 * If mboxsc_getmsg returns ETIMEDOUT or EAGAIN, then 1851 * the "error" is really just a normal, transient 1852 * condition and we can retry the operation right away. 1853 * Any other error suggests a more serious problem, 1854 * ranging from a message being too big for our buffer 1855 * (EMSGSIZE) to total failure of the mailbox layer. 1856 * This second class of errors is much less "transient", 1857 * so rather than retrying over and over (and getting 1858 * the same error over and over) as fast as we can, 1859 * we'll sleep for a while before retrying. 1860 */ 1861 if ((err != ETIMEDOUT) && (err != EAGAIN)) { 1862 cmn_err(CE_WARN, 1863 "mboxsc_getmsg failed, err=0x%x", err); 1864 delay(drmach_mbxerr_delay * hz); 1865 } 1866 continue; 1867 } 1868 1869 drmach_mbox_prmsg(msg, 0); 1870 1871 if (php->drproto_version != DRMBX_VERSION) { 1872 cmn_err(CE_WARN, 1873 "mailbox version mismatch 0x%x vs 0x%x", 1874 php->drproto_version, DRMBX_VERSION); 1875 1876 mutex_enter(&drmach_g_mbox_mutex); 1877 drmach_mbox_iflag = 0; 1878 /* schedule a reinit handshake if one isn't pending */ 1879 if (!drmach_mbox_ipending) { 1880 if (taskq_dispatch(system_taskq, 1881 drmach_mbox_reinit, NULL, TQ_NOSLEEP) 1882 != NULL) { 1883 drmach_mbox_ipending = 1; 1884 } else { 1885 cmn_err(CE_WARN, "failed to schedule " 1886 "mailbox reinit"); 1887 } 1888 } 1889 mutex_exit(&drmach_g_mbox_mutex); 1890 1891 continue; 1892 } 1893 1894 msgid = php->message_id; 1895 found = NULL; 1896 mutex_enter(&drmach_msglist_mutex); 1897 entry = drmach_msglist_first; 1898 while (entry != NULL) { 1899 if (entry->msgid == msgid) { 1900 found = entry; 1901 drmach_msglist_unlink(entry); 1902 entry = NULL; 1903 } else 1904 entry = entry->next; 1905 } 1906 1907 if (found) { 1908 mutex_enter(&found->g_lock); 1909 1910 found->e_code = php->error_code; 1911 if (found->i_buflen > 0) 1912 bcopy((caddr_t)&msg->msgdata, found->i_buf, 1913 found->i_buflen); 1914 found->m_reply = 1; 1915 1916 cv_signal(&found->g_cv); 1917 mutex_exit(&found->g_lock); 1918 } else { 1919 cmn_err(CE_WARN, "!mbox_getmsg: no match for id 0x%x", 1920 msgid); 1921 cmn_err(CE_WARN, "! cmd = 0x%x, exb = %d, slot = %d", 1922 php->command, php->expbrd, php->slot); 1923 } 1924 1925 mutex_exit(&drmach_msglist_mutex); 1926 } 1927 cmn_err(CE_WARN, "mbox_getmsg: exiting"); 1928 mutex_enter(&drmach_msglist_mutex); 1929 entry = drmach_msglist_first; 1930 while (entry != NULL) { 1931 if (entry->p_flag == 1) { 1932 entry->f_error = -1; 1933 mutex_enter(&entry->g_lock); 1934 cv_signal(&entry->g_cv); 1935 mutex_exit(&entry->g_lock); 1936 drmach_msglist_unlink(entry); 1937 } 1938 entry = entry->next; 1939 } 1940 mutex_exit(&drmach_msglist_mutex); 1941 drmach_getmsg_thread_run = -1; 1942 thread_exit(); 1943 } 1944 1945 void 1946 drmach_mbox_sendmsg() 1947 { 1948 int err, retry; 1949 drmach_msglist_t *entry; 1950 dr_mbox_msg_t *mp; 1951 dr_proto_hdr_t *php; 1952 1953 while (drmach_sendmsg_thread_run != 0) { 1954 /* 1955 * Search through the list to find entries awaiting 1956 * transmission to the SC 1957 */ 1958 mutex_enter(&drmach_msglist_mutex); 1959 entry = drmach_msglist_first; 1960 retry = 0; 1961 while (entry != NULL) { 1962 if (entry->p_flag == 1) { 1963 entry = entry->next; 1964 continue; 1965 } 1966 1967 mutex_exit(&drmach_msglist_mutex); 1968 1969 if (!retry) 1970 mutex_enter(&entry->s_lock); 1971 mp = (dr_mbox_msg_t *)entry->o_buf; 1972 php = &mp->p_hdr; 1973 1974 drmach_mbox_prmsg(mp, 1); 1975 1976 err = mboxsc_putmsg(KEY_DRSC, MBOXSC_MSG_REQUEST, 1977 php->command, NULL, entry->o_buflen, (void *)mp, 1978 drmach_to_putmsg); 1979 1980 if (err) { 1981 switch (err) { 1982 1983 case EAGAIN: 1984 case EBUSY: 1985 ++retry; 1986 mutex_enter(&drmach_msglist_mutex); 1987 continue; 1988 1989 case ETIMEDOUT: 1990 if (--entry->o_nretry <= 0) { 1991 mutex_enter( 1992 &drmach_msglist_mutex); 1993 drmach_msglist_unlink(entry); 1994 mutex_exit( 1995 &drmach_msglist_mutex); 1996 entry->f_error = err; 1997 entry->p_flag = 1; 1998 cv_signal(&entry->s_cv); 1999 } else { 2000 ++retry; 2001 mutex_enter( 2002 &drmach_msglist_mutex); 2003 continue; 2004 } 2005 break; 2006 default: 2007 mutex_enter(&drmach_msglist_mutex); 2008 drmach_msglist_unlink(entry); 2009 mutex_exit(&drmach_msglist_mutex); 2010 entry->f_error = err; 2011 entry->p_flag = 1; 2012 cv_signal(&entry->s_cv); 2013 break; 2014 } 2015 } else { 2016 entry->p_flag = 1; 2017 cv_signal(&entry->s_cv); 2018 } 2019 2020 mutex_exit(&entry->s_lock); 2021 retry = 0; 2022 mutex_enter(&drmach_msglist_mutex); 2023 entry = drmach_msglist_first; 2024 } 2025 mutex_exit(&drmach_msglist_mutex); 2026 2027 mutex_enter(&drmach_sendmsg_mutex); 2028 (void) cv_reltimedwait(&drmach_sendmsg_cv, 2029 &drmach_sendmsg_mutex, (5 * hz), TR_CLOCK_TICK); 2030 mutex_exit(&drmach_sendmsg_mutex); 2031 } 2032 cmn_err(CE_WARN, "mbox_sendmsg: exiting"); 2033 mutex_enter(&drmach_msglist_mutex); 2034 entry = drmach_msglist_first; 2035 while (entry != NULL) { 2036 if (entry->p_flag == 0) { 2037 entry->f_error = -1; 2038 mutex_enter(&entry->s_lock); 2039 cv_signal(&entry->s_cv); 2040 mutex_exit(&entry->s_lock); 2041 drmach_msglist_unlink(entry); 2042 } 2043 entry = entry->next; 2044 } 2045 mutex_exit(&drmach_msglist_mutex); 2046 cv_destroy(&drmach_sendmsg_cv); 2047 mutex_destroy(&drmach_sendmsg_mutex); 2048 2049 drmach_sendmsg_thread_run = -1; 2050 thread_exit(); 2051 } 2052 2053 void 2054 drmach_msglist_destroy(drmach_msglist_t *listp) 2055 { 2056 if (listp != NULL) { 2057 drmach_msglist_t *entry; 2058 2059 mutex_enter(&drmach_msglist_mutex); 2060 entry = drmach_msglist_first; 2061 while (entry) { 2062 if (listp == entry) { 2063 drmach_msglist_unlink(listp); 2064 entry = NULL; 2065 } else 2066 entry = entry->next; 2067 } 2068 2069 mutex_destroy(&listp->s_lock); 2070 cv_destroy(&listp->s_cv); 2071 mutex_destroy(&listp->g_lock); 2072 cv_destroy(&listp->g_cv); 2073 kmem_free(listp, sizeof (drmach_msglist_t)); 2074 2075 mutex_exit(&drmach_msglist_mutex); 2076 } 2077 } 2078 2079 static drmach_msglist_t * 2080 drmach_msglist_new(caddr_t ibufp, uint32_t ilen, dr_proto_hdr_t *hdrp, 2081 uint32_t olen, int nrtry) 2082 { 2083 drmach_msglist_t *listp; 2084 2085 listp = kmem_zalloc(sizeof (drmach_msglist_t), KM_SLEEP); 2086 mutex_init(&listp->s_lock, NULL, MUTEX_DRIVER, NULL); 2087 cv_init(&listp->s_cv, NULL, CV_DRIVER, NULL); 2088 mutex_init(&listp->g_lock, NULL, MUTEX_DRIVER, NULL); 2089 cv_init(&listp->g_cv, NULL, CV_DRIVER, NULL); 2090 listp->o_buf = (caddr_t)hdrp; 2091 listp->o_buflen = olen; 2092 listp->i_buf = ibufp; 2093 listp->i_buflen = ilen; 2094 listp->o_nretry = nrtry; 2095 listp->msgid = hdrp->message_id; 2096 2097 return (listp); 2098 } 2099 2100 static drmach_msglist_t * 2101 drmach_mbox_req_rply(dr_proto_hdr_t *hdrp, uint32_t olen, caddr_t ibufp, 2102 uint32_t ilen, int timeout, int nrtry, int nosig, 2103 drmach_msglist_t *link) 2104 { 2105 int crv; 2106 drmach_msglist_t *listp; 2107 clock_t to_val; 2108 dr_proto_hdr_t *php; 2109 2110 /* setup transaction list entry */ 2111 listp = drmach_msglist_new(ibufp, ilen, hdrp, olen, nrtry); 2112 2113 /* send mailbox message, await reply */ 2114 mutex_enter(&listp->s_lock); 2115 mutex_enter(&listp->g_lock); 2116 2117 listp->link = link; 2118 drmach_msglist_link(listp); 2119 2120 mutex_enter(&drmach_sendmsg_mutex); 2121 cv_signal(&drmach_sendmsg_cv); 2122 mutex_exit(&drmach_sendmsg_mutex); 2123 2124 while (listp->p_flag == 0) { 2125 cv_wait(&listp->s_cv, &listp->s_lock); 2126 } 2127 2128 to_val = ddi_get_lbolt() + (timeout * hz); 2129 2130 if (listp->f_error) { 2131 listp->p_flag = 0; 2132 cmn_err(CE_WARN, "!mboxsc_putmsg failed: 0x%x", listp->f_error); 2133 php = (dr_proto_hdr_t *)listp->o_buf; 2134 cmn_err(CE_WARN, "! cmd = 0x%x, exb = %d, slot = %d", 2135 php->command, php->expbrd, php->slot); 2136 } else { 2137 while (listp->m_reply == 0 && listp->f_error == 0) { 2138 if (nosig) 2139 crv = cv_timedwait(&listp->g_cv, &listp->g_lock, 2140 to_val); 2141 else 2142 crv = cv_timedwait_sig(&listp->g_cv, 2143 &listp->g_lock, to_val); 2144 switch (crv) { 2145 case -1: /* timed out */ 2146 cmn_err(CE_WARN, 2147 "!msgid=0x%x reply timed out", 2148 hdrp->message_id); 2149 php = (dr_proto_hdr_t *)listp->o_buf; 2150 cmn_err(CE_WARN, "! cmd = 0x%x, " 2151 "exb = %d, slot = %d", php->command, 2152 php->expbrd, php->slot); 2153 listp->f_error = ETIMEDOUT; 2154 break; 2155 case 0: /* signal received */ 2156 cmn_err(CE_WARN, 2157 "operation interrupted by signal"); 2158 listp->f_error = EINTR; 2159 break; 2160 default: 2161 break; 2162 } 2163 } 2164 2165 /* 2166 * If link is set for this entry, check to see if 2167 * the linked entry has been replied to. If not, 2168 * wait for the response. 2169 * Currently, this is only used for ABORT_TEST functionality, 2170 * wherein a check is made for the TESTBOARD reply when 2171 * the ABORT_TEST reply is received. 2172 */ 2173 2174 if (link) { 2175 mutex_enter(&link->g_lock); 2176 /* 2177 * If the reply to the linked entry hasn't been 2178 * received, clear the existing link->f_error, 2179 * and await the reply. 2180 */ 2181 if (link->m_reply == 0) { 2182 link->f_error = 0; 2183 } 2184 to_val = ddi_get_lbolt() + (timeout * hz); 2185 while (link->m_reply == 0 && link->f_error == 0) { 2186 crv = cv_timedwait(&link->g_cv, &link->g_lock, 2187 to_val); 2188 switch (crv) { 2189 case -1: /* timed out */ 2190 cmn_err(CE_NOTE, 2191 "!link msgid=0x%x reply timed out", 2192 link->msgid); 2193 link->f_error = ETIMEDOUT; 2194 break; 2195 default: 2196 break; 2197 } 2198 } 2199 mutex_exit(&link->g_lock); 2200 } 2201 } 2202 mutex_exit(&listp->g_lock); 2203 mutex_exit(&listp->s_lock); 2204 return (listp); 2205 } 2206 2207 static sbd_error_t * 2208 drmach_mbx2sbderr(drmach_msglist_t *mlp) 2209 { 2210 char a_pnt[MAXNAMELEN]; 2211 dr_proto_hdr_t *php; 2212 int bnum; 2213 2214 if (mlp->f_error) { 2215 /* 2216 * If framework failure is due to signal, return "no error" 2217 * error. 2218 */ 2219 if (mlp->f_error == EINTR) 2220 return (drerr_new(0, ESTC_NONE, NULL)); 2221 2222 mutex_enter(&drmach_g_mbox_mutex); 2223 drmach_mbox_iflag = 0; 2224 mutex_exit(&drmach_g_mbox_mutex); 2225 if (!mlp->p_flag) 2226 return (drerr_new(1, ESTC_MBXRQST, NULL)); 2227 else 2228 return (drerr_new(1, ESTC_MBXRPLY, NULL)); 2229 } 2230 php = (dr_proto_hdr_t *)mlp->o_buf; 2231 bnum = 2 * php->expbrd + php->slot; 2232 a_pnt[0] = '\0'; 2233 (void) drmach_board_name(bnum, a_pnt, MAXNAMELEN); 2234 2235 switch (mlp->e_code) { 2236 case 0: 2237 return (NULL); 2238 case DRERR_NOACL: 2239 return (drerr_new(0, ESTC_NOACL, "%s", a_pnt)); 2240 case DRERR_NOT_ASSIGNED: 2241 return (drerr_new(0, ESTC_NOT_ASSIGNED, "%s", a_pnt)); 2242 case DRERR_NOT_ACTIVE: 2243 return (drerr_new(0, ESTC_NOT_ACTIVE, "%s", a_pnt)); 2244 case DRERR_EMPTY_SLOT: 2245 return (drerr_new(0, ESTC_EMPTY_SLOT, "%s", a_pnt)); 2246 case DRERR_POWER_OFF: 2247 return (drerr_new(0, ESTC_POWER_OFF, "%s", a_pnt)); 2248 case DRERR_TEST_IN_PROGRESS: 2249 return (drerr_new(0, ESTC_TEST_IN_PROGRESS, "%s", 2250 a_pnt)); 2251 case DRERR_TESTING_BUSY: 2252 return (drerr_new(0, ESTC_TESTING_BUSY, "%s", a_pnt)); 2253 case DRERR_TEST_REQUIRED: 2254 return (drerr_new(0, ESTC_TEST_REQUIRED, "%s", a_pnt)); 2255 case DRERR_UNAVAILABLE: 2256 return (drerr_new(0, ESTC_UNAVAILABLE, "%s", a_pnt)); 2257 case DRERR_RECOVERABLE: 2258 return (drerr_new(0, ESTC_SMS_ERR_RECOVERABLE, "%s", 2259 a_pnt)); 2260 case DRERR_UNRECOVERABLE: 2261 return (drerr_new(1, ESTC_SMS_ERR_UNRECOVERABLE, "%s", 2262 a_pnt)); 2263 default: 2264 return (drerr_new(1, ESTC_MBOX_UNKNOWN, NULL)); 2265 } 2266 } 2267 2268 static sbd_error_t * 2269 drmach_mbox_trans(uint8_t msgtype, int bnum, caddr_t obufp, int olen, 2270 caddr_t ibufp, int ilen) 2271 { 2272 int timeout = 0; 2273 int ntries = 0; 2274 int nosignals = 0; 2275 dr_proto_hdr_t *hdrp; 2276 drmach_msglist_t *mlp; 2277 sbd_error_t *err = NULL; 2278 2279 if (msgtype != DRMSG_MBOX_INIT) { 2280 mutex_enter(&drmach_ri_mbox_mutex); 2281 mutex_enter(&drmach_g_mbox_mutex); 2282 if (drmach_mbox_iflag == 0) { 2283 /* need to initialize the mailbox */ 2284 dr_proto_hdr_t imsg; 2285 2286 mutex_exit(&drmach_g_mbox_mutex); 2287 2288 imsg.command = DRMSG_MBOX_INIT; 2289 2290 imsg.message_id = drmach_get_msgid(); 2291 imsg.drproto_version = DRMBX_VERSION; 2292 imsg.expbrd = 0; 2293 imsg.slot = 0; 2294 2295 cmn_err(CE_WARN, "!reinitializing DR mailbox"); 2296 mlp = drmach_mbox_req_rply(&imsg, sizeof (imsg), 0, 0, 2297 10, 5, 0, NULL); 2298 err = drmach_mbx2sbderr(mlp); 2299 /* 2300 * If framework failure incoming is encountered on 2301 * the MBOX_INIT [timeout on SMS reply], the error 2302 * type must be changed before returning to caller. 2303 * This is to prevent drmach_board_connect() and 2304 * drmach_board_disconnect() from marking boards 2305 * UNUSABLE based on MBOX_INIT failures. 2306 */ 2307 if ((err != NULL) && (err->e_code == ESTC_MBXRPLY)) { 2308 cmn_err(CE_WARN, 2309 "!Changed mbox incoming to outgoing" 2310 " failure on reinit"); 2311 sbd_err_clear(&err); 2312 err = drerr_new(0, ESTC_MBXRQST, NULL); 2313 } 2314 drmach_msglist_destroy(mlp); 2315 if (err) { 2316 mutex_exit(&drmach_ri_mbox_mutex); 2317 return (err); 2318 } 2319 mutex_enter(&drmach_g_mbox_mutex); 2320 drmach_mbox_iflag = 1; 2321 } 2322 mutex_exit(&drmach_g_mbox_mutex); 2323 mutex_exit(&drmach_ri_mbox_mutex); 2324 } 2325 2326 hdrp = (dr_proto_hdr_t *)obufp; 2327 2328 /* setup outgoing mailbox header */ 2329 hdrp->command = msgtype; 2330 hdrp->message_id = drmach_get_msgid(); 2331 hdrp->drproto_version = DRMBX_VERSION; 2332 hdrp->expbrd = DRMACH_BNUM2EXP(bnum); 2333 hdrp->slot = DRMACH_BNUM2SLOT(bnum); 2334 2335 switch (msgtype) { 2336 2337 case DRMSG_MBOX_INIT: 2338 timeout = drmach_to_mbxinit; 2339 ntries = 1; 2340 nosignals = 0; 2341 break; 2342 2343 case DRMSG_ASSIGN: 2344 timeout = drmach_to_assign; 2345 ntries = 1; 2346 nosignals = 0; 2347 break; 2348 2349 case DRMSG_UNASSIGN: 2350 timeout = drmach_to_unassign; 2351 ntries = 1; 2352 nosignals = 0; 2353 break; 2354 2355 case DRMSG_POWERON: 2356 timeout = drmach_to_poweron; 2357 ntries = 1; 2358 nosignals = 0; 2359 break; 2360 2361 case DRMSG_POWEROFF: 2362 timeout = drmach_to_poweroff; 2363 ntries = 1; 2364 nosignals = 0; 2365 break; 2366 2367 case DRMSG_SHOWBOARD: 2368 timeout = drmach_to_showboard; 2369 ntries = 1; 2370 nosignals = 0; 2371 break; 2372 2373 case DRMSG_CLAIM: 2374 timeout = drmach_to_claim; 2375 ntries = 1; 2376 nosignals = 1; 2377 break; 2378 2379 case DRMSG_UNCLAIM: 2380 timeout = drmach_to_unclaim; 2381 ntries = 1; 2382 nosignals = 1; 2383 break; 2384 2385 case DRMSG_UNCONFIG: 2386 timeout = drmach_to_unconfig; 2387 ntries = 1; 2388 nosignals = 0; 2389 break; 2390 2391 case DRMSG_TESTBOARD: 2392 timeout = drmach_to_testboard; 2393 ntries = 1; 2394 nosignals = 0; 2395 break; 2396 2397 default: 2398 cmn_err(CE_WARN, "Unknown outgoing message type 0x%x", 2399 msgtype); 2400 err = DRMACH_INTERNAL_ERROR(); 2401 break; 2402 } 2403 2404 if (err == NULL) { 2405 mlp = drmach_mbox_req_rply(hdrp, olen, ibufp, ilen, timeout, 2406 ntries, nosignals, NULL); 2407 err = drmach_mbx2sbderr(mlp); 2408 2409 /* 2410 * For DRMSG_TESTBOARD attempts which have timed out, or 2411 * been aborted due to a signal received after mboxsc_putmsg() 2412 * has succeeded in sending the message, a DRMSG_ABORT_TEST 2413 * must be sent. 2414 */ 2415 if ((msgtype == DRMSG_TESTBOARD) && (err != NULL) && 2416 ((mlp->f_error == EINTR) || ((mlp->f_error == ETIMEDOUT) && 2417 (mlp->p_flag != 0)))) { 2418 drmach_msglist_t *abmlp; 2419 dr_abort_test_t abibuf; 2420 2421 hdrp->command = DRMSG_ABORT_TEST; 2422 hdrp->message_id = drmach_get_msgid(); 2423 abmlp = drmach_mbox_req_rply(hdrp, 2424 sizeof (dr_abort_test_t), (caddr_t)&abibuf, 2425 sizeof (abibuf), drmach_to_aborttest, 5, 1, mlp); 2426 cmn_err(CE_WARN, "test aborted"); 2427 drmach_msglist_destroy(abmlp); 2428 } 2429 2430 drmach_msglist_destroy(mlp); 2431 } 2432 2433 return (err); 2434 } 2435 2436 static int 2437 drmach_mbox_init() 2438 { 2439 int err; 2440 caddr_t obufp; 2441 sbd_error_t *serr = NULL; 2442 mboxsc_timeout_range_t mbxtoz; 2443 2444 drmach_mbox_istate = 0; 2445 /* register the outgoing mailbox */ 2446 if ((err = mboxsc_init(KEY_DRSC, MBOXSC_MBOX_OUT, 2447 NULL)) != 0) { 2448 cmn_err(CE_WARN, "DR - SC mboxsc_init failed: 0x%x", err); 2449 return (-1); 2450 } 2451 drmach_mbox_istate = 1; 2452 2453 /* setup the mboxsc_putmsg timeout value */ 2454 if (drmach_use_tuned_putmsg_to) { 2455 cmn_err(CE_NOTE, "!using tuned drmach_to_putmsg = 0x%lx\n", 2456 drmach_to_putmsg); 2457 } else { 2458 if ((err = mboxsc_ctrl(KEY_DRSC, 2459 MBOXSC_CMD_PUTMSG_TIMEOUT_RANGE, &mbxtoz)) != 0) { 2460 cmn_err(CE_WARN, "mboxsc_ctrl failed: 0x%x", err); 2461 drmach_to_putmsg = 60000; 2462 } else { 2463 drmach_to_putmsg = mboxsc_putmsg_def_timeout() * 6; 2464 DRMACH_PR("putmsg range is 0x%lx - 0x%lx value" 2465 " is 0x%lx\n", mbxtoz.min_timeout, 2466 mbxtoz.max_timeout, drmach_to_putmsg); 2467 } 2468 } 2469 2470 /* register the incoming mailbox */ 2471 if ((err = mboxsc_init(KEY_SCDR, MBOXSC_MBOX_IN, 2472 drmach_mbox_event)) != 0) { 2473 cmn_err(CE_WARN, "SC - DR mboxsc_init failed: 0x%x", err); 2474 return (-1); 2475 } 2476 drmach_mbox_istate = 2; 2477 2478 /* initialize mutex for mailbox globals */ 2479 mutex_init(&drmach_g_mbox_mutex, NULL, MUTEX_DRIVER, NULL); 2480 2481 /* initialize mutex for mailbox re-init */ 2482 mutex_init(&drmach_ri_mbox_mutex, NULL, MUTEX_DRIVER, NULL); 2483 2484 /* initialize mailbox message list elements */ 2485 drmach_msglist_first = drmach_msglist_last = NULL; 2486 mutex_init(&drmach_msglist_mutex, NULL, MUTEX_DRIVER, NULL); 2487 2488 mutex_init(&drmach_sendmsg_mutex, NULL, MUTEX_DRIVER, NULL); 2489 cv_init(&drmach_sendmsg_cv, NULL, CV_DRIVER, NULL); 2490 2491 drmach_mbox_istate = 3; 2492 2493 /* start mailbox sendmsg thread */ 2494 drmach_sendmsg_thread_run = 1; 2495 if (drmach_sendmsg_thread == NULL) 2496 drmach_sendmsg_thread = thread_create(NULL, 0, 2497 (void (*)())drmach_mbox_sendmsg, NULL, 0, &p0, 2498 TS_RUN, minclsyspri); 2499 2500 /* start mailbox getmsg thread */ 2501 drmach_getmsg_thread_run = 1; 2502 if (drmach_getmsg_thread == NULL) 2503 drmach_getmsg_thread = thread_create(NULL, 0, 2504 (void (*)())drmach_mbox_getmsg, NULL, 0, &p0, 2505 TS_RUN, minclsyspri); 2506 2507 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 2508 serr = drmach_mbox_trans(DRMSG_MBOX_INIT, 0, obufp, 2509 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 2510 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 2511 if (serr) { 2512 cmn_err(CE_WARN, "mbox_init: MBOX_INIT failed ecode=0x%x", 2513 serr->e_code); 2514 sbd_err_clear(&serr); 2515 return (-1); 2516 } 2517 mutex_enter(&drmach_g_mbox_mutex); 2518 drmach_mbox_iflag = 1; 2519 drmach_mbox_ipending = 0; 2520 mutex_exit(&drmach_g_mbox_mutex); 2521 2522 return (0); 2523 } 2524 2525 static int 2526 drmach_mbox_fini() 2527 { 2528 int err, rv = 0; 2529 2530 if (drmach_mbox_istate > 2) { 2531 drmach_getmsg_thread_run = 0; 2532 drmach_sendmsg_thread_run = 0; 2533 cmn_err(CE_WARN, 2534 "drmach_mbox_fini: waiting for mbox threads..."); 2535 while ((drmach_getmsg_thread_run == 0) || 2536 (drmach_sendmsg_thread_run == 0)) { 2537 continue; 2538 } 2539 cmn_err(CE_WARN, "drmach_mbox_fini: mbox threads done."); 2540 mutex_destroy(&drmach_msglist_mutex); 2541 2542 } 2543 if (drmach_mbox_istate) { 2544 /* de-register the outgoing mailbox */ 2545 if ((err = mboxsc_fini(KEY_DRSC)) != 0) { 2546 cmn_err(CE_WARN, "DR - SC mboxsc_fini failed: 0x%x", 2547 err); 2548 rv = -1; 2549 } 2550 } 2551 if (drmach_mbox_istate > 1) { 2552 /* de-register the incoming mailbox */ 2553 if ((err = mboxsc_fini(KEY_SCDR)) != 0) { 2554 cmn_err(CE_WARN, "SC - DR mboxsc_fini failed: 0x%x", 2555 err); 2556 rv = -1; 2557 } 2558 } 2559 mutex_destroy(&drmach_g_mbox_mutex); 2560 mutex_destroy(&drmach_ri_mbox_mutex); 2561 return (rv); 2562 } 2563 2564 static int 2565 drmach_portid2bnum(int portid) 2566 { 2567 int slot; 2568 2569 switch (portid & 0x1f) { 2570 case 0: case 1: case 2: case 3: /* cpu/wci devices */ 2571 case 0x1e: /* slot 0 axq registers */ 2572 slot = 0; 2573 break; 2574 2575 case 8: case 9: /* cpu devices */ 2576 case 0x1c: case 0x1d: /* schizo/wci devices */ 2577 case 0x1f: /* slot 1 axq registers */ 2578 slot = 1; 2579 break; 2580 2581 default: 2582 ASSERT(0); /* catch in debug kernels */ 2583 } 2584 2585 return (((portid >> 4) & 0x7e) | slot); 2586 } 2587 2588 extern int axq_suspend_iopause; 2589 2590 static int 2591 hold_rele_branch(dev_info_t *rdip, void *arg) 2592 { 2593 int i; 2594 int *holdp = (int *)arg; 2595 char *name = ddi_node_name(rdip); 2596 2597 /* 2598 * For Starcat, we must be children of the root devinfo node 2599 */ 2600 ASSERT(ddi_get_parent(rdip) == ddi_root_node()); 2601 2602 i = drmach_name2type_idx(name); 2603 2604 /* 2605 * Only children of the root devinfo node need to be 2606 * held/released since they are the only valid targets 2607 * of tree operations. This corresponds to the node types 2608 * listed in the drmach_name2type array. 2609 */ 2610 if (i < 0) { 2611 /* Not of interest to us */ 2612 return (DDI_WALK_PRUNECHILD); 2613 } 2614 2615 if (*holdp) { 2616 ASSERT(!e_ddi_branch_held(rdip)); 2617 e_ddi_branch_hold(rdip); 2618 } else { 2619 ASSERT(e_ddi_branch_held(rdip)); 2620 e_ddi_branch_rele(rdip); 2621 } 2622 2623 return (DDI_WALK_PRUNECHILD); 2624 } 2625 2626 static int 2627 drmach_init(void) 2628 { 2629 pnode_t nodeid; 2630 gdcd_t *gdcd; 2631 int bnum; 2632 dev_info_t *rdip; 2633 int hold, circ; 2634 2635 mutex_enter(&drmach_i_lock); 2636 if (drmach_initialized) { 2637 mutex_exit(&drmach_i_lock); 2638 return (0); 2639 } 2640 2641 gdcd = drmach_gdcd_new(); 2642 if (gdcd == NULL) { 2643 mutex_exit(&drmach_i_lock); 2644 cmn_err(CE_WARN, "drmach_init: failed to access GDCD\n"); 2645 return (-1); 2646 } 2647 2648 drmach_boards = drmach_array_new(0, MAX_BOARDS - 1); 2649 2650 nodeid = prom_childnode(prom_rootnode()); 2651 do { 2652 int len; 2653 int portid; 2654 drmachid_t id; 2655 2656 len = prom_getproplen(nodeid, "portid"); 2657 if (len != sizeof (portid)) 2658 continue; 2659 2660 portid = -1; 2661 (void) prom_getprop(nodeid, "portid", (caddr_t)&portid); 2662 if (portid == -1) 2663 continue; 2664 2665 bnum = drmach_portid2bnum(portid); 2666 2667 if (drmach_array_get(drmach_boards, bnum, &id) == -1) { 2668 /* portid translated to an invalid board number */ 2669 cmn_err(CE_WARN, "OBP node 0x%x has" 2670 " invalid property value, %s=%u", 2671 nodeid, "portid", portid); 2672 2673 /* clean up */ 2674 drmach_array_dispose(drmach_boards, 2675 drmach_board_dispose); 2676 drmach_gdcd_dispose(gdcd); 2677 mutex_exit(&drmach_i_lock); 2678 return (-1); 2679 } else if (id == NULL) { 2680 drmach_board_t *bp; 2681 l1_slot_stat_t *dcd; 2682 int exp, slot; 2683 2684 bp = drmach_board_new(bnum); 2685 bp->assigned = !drmach_initialized; 2686 bp->powered = !drmach_initialized; 2687 2688 exp = DRMACH_BNUM2EXP(bnum); 2689 slot = DRMACH_BNUM2SLOT(bnum); 2690 dcd = &gdcd->dcd_slot[exp][slot]; 2691 bp->stardrb_offset = 2692 dcd->l1ss_cpu_drblock_xwd_offset << 3; 2693 DRMACH_PR("%s: stardrb_offset=0x%lx\n", bp->cm.name, 2694 bp->stardrb_offset); 2695 2696 if (gdcd->dcd_slot[exp][slot].l1ss_flags & 2697 L1SSFLG_THIS_L1_NULL_PROC_LPA) { 2698 bp->flags |= DRMACH_NULL_PROC_LPA; 2699 DRMACH_PR("%s: NULL proc LPA\n", bp->cm.name); 2700 } 2701 } 2702 } while ((nodeid = prom_nextnode(nodeid)) != OBP_NONODE); 2703 2704 drmach_cpu_sram_va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 2705 2706 if (gdcd->dcd_testcage_log2_mbytes_size != DCD_DR_TESTCAGE_DISABLED) { 2707 ASSERT(gdcd->dcd_testcage_log2_mbytes_size == 2708 gdcd->dcd_testcage_log2_mbytes_align); 2709 drmach_iocage_paddr = 2710 (uint64_t)gdcd->dcd_testcage_mbyte_PA << 20; 2711 drmach_iocage_size = 2712 1 << (gdcd->dcd_testcage_log2_mbytes_size + 20); 2713 2714 drmach_iocage_vaddr = (caddr_t)vmem_alloc(heap_arena, 2715 drmach_iocage_size, VM_SLEEP); 2716 hat_devload(kas.a_hat, drmach_iocage_vaddr, drmach_iocage_size, 2717 mmu_btop(drmach_iocage_paddr), 2718 PROT_READ | PROT_WRITE, 2719 HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 2720 2721 DRMACH_PR("gdcd size=0x%x align=0x%x PA=0x%x\n", 2722 gdcd->dcd_testcage_log2_mbytes_size, 2723 gdcd->dcd_testcage_log2_mbytes_align, 2724 gdcd->dcd_testcage_mbyte_PA); 2725 DRMACH_PR("drmach size=0x%x PA=0x%lx VA=0x%p\n", 2726 drmach_iocage_size, drmach_iocage_paddr, 2727 (void *)drmach_iocage_vaddr); 2728 } 2729 2730 if (drmach_iocage_size == 0) { 2731 drmach_array_dispose(drmach_boards, drmach_board_dispose); 2732 drmach_boards = NULL; 2733 vmem_free(heap_arena, drmach_cpu_sram_va, PAGESIZE); 2734 drmach_gdcd_dispose(gdcd); 2735 mutex_exit(&drmach_i_lock); 2736 cmn_err(CE_WARN, "drmach_init: iocage not available\n"); 2737 return (-1); 2738 } 2739 2740 drmach_gdcd_dispose(gdcd); 2741 2742 mutex_init(&drmach_iocage_lock, NULL, MUTEX_DRIVER, NULL); 2743 cv_init(&drmach_iocage_cv, NULL, CV_DRIVER, NULL); 2744 mutex_init(&drmach_xt_mb_lock, NULL, MUTEX_DRIVER, NULL); 2745 mutex_init(&drmach_bus_sync_lock, NULL, MUTEX_DRIVER, NULL); 2746 mutex_init(&drmach_slice_table_lock, NULL, MUTEX_DRIVER, NULL); 2747 2748 mutex_enter(&cpu_lock); 2749 mutex_enter(&drmach_iocage_lock); 2750 ASSERT(drmach_iocage_is_busy == 0); 2751 drmach_iocage_is_busy = 1; 2752 drmach_iocage_mem_scrub(drmach_iocage_size); 2753 drmach_iocage_is_busy = 0; 2754 cv_signal(&drmach_iocage_cv); 2755 mutex_exit(&drmach_iocage_lock); 2756 mutex_exit(&cpu_lock); 2757 2758 2759 if (drmach_mbox_init() == -1) { 2760 cmn_err(CE_WARN, "DR - SC mailbox initialization Failed"); 2761 } 2762 2763 /* 2764 * Walk immediate children of devinfo root node and hold 2765 * all devinfo branches of interest. 2766 */ 2767 hold = 1; 2768 rdip = ddi_root_node(); 2769 2770 ndi_devi_enter(rdip, &circ); 2771 ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold); 2772 ndi_devi_exit(rdip, circ); 2773 2774 drmach_initialized = 1; 2775 2776 /* 2777 * To avoid a circular patch dependency between DR and AXQ, the AXQ 2778 * rev introducing the axq_iopause_*_all interfaces should not regress 2779 * when installed without the DR rev using those interfaces. The default 2780 * is for iopause to be enabled/disabled during axq suspend/resume. By 2781 * setting the following axq flag to zero, axq will not enable iopause 2782 * during suspend/resume, instead DR will call the axq_iopause_*_all 2783 * interfaces during drmach_copy_rename. 2784 */ 2785 axq_suspend_iopause = 0; 2786 2787 mutex_exit(&drmach_i_lock); 2788 2789 return (0); 2790 } 2791 2792 static void 2793 drmach_fini(void) 2794 { 2795 dev_info_t *rdip; 2796 int hold, circ; 2797 2798 if (drmach_initialized) { 2799 rw_enter(&drmach_boards_rwlock, RW_WRITER); 2800 drmach_array_dispose(drmach_boards, drmach_board_dispose); 2801 drmach_boards = NULL; 2802 rw_exit(&drmach_boards_rwlock); 2803 2804 mutex_destroy(&drmach_slice_table_lock); 2805 mutex_destroy(&drmach_xt_mb_lock); 2806 mutex_destroy(&drmach_bus_sync_lock); 2807 cv_destroy(&drmach_iocage_cv); 2808 mutex_destroy(&drmach_iocage_lock); 2809 2810 vmem_free(heap_arena, drmach_cpu_sram_va, PAGESIZE); 2811 2812 /* 2813 * Walk immediate children of the root devinfo node 2814 * releasing holds acquired on branches in drmach_init() 2815 */ 2816 hold = 0; 2817 rdip = ddi_root_node(); 2818 2819 ndi_devi_enter(rdip, &circ); 2820 ddi_walk_devs(ddi_get_child(rdip), hold_rele_branch, &hold); 2821 ndi_devi_exit(rdip, circ); 2822 2823 drmach_initialized = 0; 2824 } 2825 2826 (void) drmach_mbox_fini(); 2827 if (drmach_xt_mb != NULL) { 2828 vmem_free(static_alloc_arena, (void *)drmach_xt_mb, 2829 drmach_xt_mb_size); 2830 } 2831 rw_destroy(&drmach_boards_rwlock); 2832 mutex_destroy(&drmach_i_lock); 2833 } 2834 2835 static void 2836 drmach_mem_read_madr(drmach_mem_t *mp, int bank, uint64_t *madr) 2837 { 2838 kpreempt_disable(); 2839 2840 /* get register address, read madr value */ 2841 if (STARCAT_CPUID_TO_PORTID(CPU->cpu_id) == mp->dev.portid) { 2842 *madr = lddmcdecode(DRMACH_MC_ASI_ADDR(mp, bank)); 2843 } else { 2844 *madr = lddphysio(DRMACH_MC_ADDR(mp, bank)); 2845 } 2846 2847 kpreempt_enable(); 2848 } 2849 2850 2851 static uint64_t * 2852 drmach_prep_mc_rename(uint64_t *p, int local, 2853 drmach_mem_t *mp, uint64_t current_basepa, uint64_t new_basepa) 2854 { 2855 int bank; 2856 2857 for (bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 2858 uint64_t madr, bank_offset; 2859 2860 /* fetch mc's bank madr register value */ 2861 drmach_mem_read_madr(mp, bank, &madr); 2862 if (madr & DRMACH_MC_VALID_MASK) { 2863 uint64_t bankpa; 2864 2865 bank_offset = (DRMACH_MC_UM_TO_PA(madr) | 2866 DRMACH_MC_LM_TO_PA(madr)) - current_basepa; 2867 bankpa = new_basepa + bank_offset; 2868 2869 /* encode new base pa into madr */ 2870 madr &= ~DRMACH_MC_UM_MASK; 2871 madr |= DRMACH_MC_PA_TO_UM(bankpa); 2872 madr &= ~DRMACH_MC_LM_MASK; 2873 madr |= DRMACH_MC_PA_TO_LM(bankpa); 2874 2875 if (local) 2876 *p++ = DRMACH_MC_ASI_ADDR(mp, bank); 2877 else 2878 *p++ = DRMACH_MC_ADDR(mp, bank); 2879 2880 *p++ = madr; 2881 } 2882 } 2883 2884 return (p); 2885 } 2886 2887 static uint64_t * 2888 drmach_prep_schizo_script(uint64_t *p, drmach_mem_t *mp, uint64_t new_basepa) 2889 { 2890 drmach_board_t *bp; 2891 int rv; 2892 int idx; 2893 drmachid_t id; 2894 uint64_t last_scsr_pa = 0; 2895 2896 /* memory is always in slot 0 */ 2897 ASSERT(DRMACH_BNUM2SLOT(mp->dev.bp->bnum) == 0); 2898 2899 /* look up slot 1 board on same expander */ 2900 idx = DRMACH_EXPSLOT2BNUM(DRMACH_BNUM2EXP(mp->dev.bp->bnum), 1); 2901 rv = drmach_array_get(drmach_boards, idx, &id); 2902 bp = id; /* bp will be NULL if board not found */ 2903 2904 /* look up should never be out of bounds */ 2905 ASSERT(rv == 0); 2906 2907 /* nothing to do when board is not found or has no devices */ 2908 if (rv == -1 || bp == NULL || bp->devices == NULL) 2909 return (p); 2910 2911 rv = drmach_array_first(bp->devices, &idx, &id); 2912 while (rv == 0) { 2913 if (DRMACH_IS_IO_ID(id)) { 2914 drmach_io_t *io = id; 2915 2916 /* 2917 * Skip all non-Schizo IO devices (only IO nodes 2918 * that are Schizo devices have non-zero scsr_pa). 2919 * Filter out "other" leaf to avoid writing to the 2920 * same Schizo Control/Status Register twice. 2921 */ 2922 if (io->scsr_pa && io->scsr_pa != last_scsr_pa) { 2923 uint64_t scsr; 2924 2925 scsr = lddphysio(io->scsr_pa); 2926 scsr &= ~(DRMACH_LPA_BASE_MASK | 2927 DRMACH_LPA_BND_MASK); 2928 scsr |= DRMACH_PA_TO_LPA_BASE(new_basepa); 2929 scsr |= DRMACH_PA_TO_LPA_BND( 2930 new_basepa + DRMACH_MEM_SLICE_SIZE); 2931 2932 *p++ = io->scsr_pa; 2933 *p++ = scsr; 2934 2935 last_scsr_pa = io->scsr_pa; 2936 } 2937 } 2938 rv = drmach_array_next(bp->devices, &idx, &id); 2939 } 2940 2941 return (p); 2942 } 2943 2944 /* 2945 * For Panther MCs, append the MC idle reg address and drmach_mem_t pointer. 2946 * The latter is returned when drmach_rename fails to idle a Panther MC and 2947 * is used to identify the MC for error reporting. 2948 */ 2949 static uint64_t * 2950 drmach_prep_pn_mc_idle(uint64_t *p, drmach_mem_t *mp, int local) 2951 { 2952 /* only slot 0 has memory */ 2953 ASSERT(DRMACH_BNUM2SLOT(mp->dev.bp->bnum) == 0); 2954 ASSERT(IS_PANTHER(mp->dev.bp->cpu_impl)); 2955 2956 for (mp = mp->dev.bp->mem; mp != NULL; mp = mp->next) { 2957 ASSERT(DRMACH_IS_MEM_ID(mp)); 2958 2959 if (mp->dev.portid == STARCAT_CPUID_TO_PORTID(CPU->cpu_id)) { 2960 if (local) { 2961 *p++ = ASI_EMU_ACT_STATUS_VA; /* local ASI */ 2962 *p++ = (uintptr_t)mp; 2963 } 2964 } else if (!local) { 2965 *p++ = DRMACH_EMU_ACT_STATUS_ADDR(mp); /* PIO */ 2966 *p++ = (uintptr_t)mp; 2967 } 2968 } 2969 2970 return (p); 2971 } 2972 2973 static sbd_error_t * 2974 drmach_prep_rename_script(drmach_mem_t *s_mp, drmach_mem_t *t_mp, 2975 uint64_t t_slice_offset, caddr_t buf, int buflen) 2976 { 2977 _NOTE(ARGUNUSED(buflen)) 2978 2979 uint64_t *p = (uint64_t *)buf, *q; 2980 sbd_error_t *err; 2981 int rv; 2982 drmach_mem_t *mp, *skip_mp; 2983 uint64_t s_basepa, t_basepa; 2984 uint64_t s_new_basepa, t_new_basepa; 2985 2986 /* verify supplied buffer space is adequate */ 2987 ASSERT(buflen >= 2988 /* addr for all possible MC banks */ 2989 (sizeof (uint64_t) * 4 * 4 * 18) + 2990 /* list section terminator */ 2991 (sizeof (uint64_t) * 1) + 2992 /* addr/id tuple for local Panther MC idle reg */ 2993 (sizeof (uint64_t) * 2) + 2994 /* list section terminator */ 2995 (sizeof (uint64_t) * 1) + 2996 /* addr/id tuple for 2 boards with 4 Panther MC idle regs */ 2997 (sizeof (uint64_t) * 2 * 2 * 4) + 2998 /* list section terminator */ 2999 (sizeof (uint64_t) * 1) + 3000 /* addr/val tuple for 1 proc with 4 MC banks */ 3001 (sizeof (uint64_t) * 2 * 4) + 3002 /* list section terminator */ 3003 (sizeof (uint64_t) * 1) + 3004 /* addr/val tuple for 2 boards w/ 2 schizos each */ 3005 (sizeof (uint64_t) * 2 * 2 * 2) + 3006 /* addr/val tuple for 2 boards w/ 16 MC banks each */ 3007 (sizeof (uint64_t) * 2 * 2 * 16) + 3008 /* list section terminator */ 3009 (sizeof (uint64_t) * 1) + 3010 /* addr/val tuple for 18 AXQs w/ two slots each */ 3011 (sizeof (uint64_t) * 2 * 2 * 18) + 3012 /* list section terminator */ 3013 (sizeof (uint64_t) * 1) + 3014 /* list terminator */ 3015 (sizeof (uint64_t) * 1)); 3016 3017 /* copy bank list to rename script */ 3018 mutex_enter(&drmach_bus_sync_lock); 3019 for (q = drmach_bus_sync_list; *q; q++, p++) 3020 *p = *q; 3021 mutex_exit(&drmach_bus_sync_lock); 3022 3023 /* list section terminator */ 3024 *p++ = 0; 3025 3026 /* 3027 * Write idle script for MC on this processor. A script will be 3028 * produced only if this is a Panther processor on the source or 3029 * target board. 3030 */ 3031 if (IS_PANTHER(s_mp->dev.bp->cpu_impl)) 3032 p = drmach_prep_pn_mc_idle(p, s_mp, 1); 3033 3034 if (IS_PANTHER(t_mp->dev.bp->cpu_impl)) 3035 p = drmach_prep_pn_mc_idle(p, t_mp, 1); 3036 3037 /* list section terminator */ 3038 *p++ = 0; 3039 3040 /* 3041 * Write idle script for all other MCs on source and target 3042 * Panther boards. 3043 */ 3044 if (IS_PANTHER(s_mp->dev.bp->cpu_impl)) 3045 p = drmach_prep_pn_mc_idle(p, s_mp, 0); 3046 3047 if (IS_PANTHER(t_mp->dev.bp->cpu_impl)) 3048 p = drmach_prep_pn_mc_idle(p, t_mp, 0); 3049 3050 /* list section terminator */ 3051 *p++ = 0; 3052 3053 /* 3054 * Step 1: Write source base address to target MC 3055 * with present bit off. 3056 * Step 2: Now rewrite target reg with present bit on. 3057 */ 3058 err = drmach_mem_get_base_physaddr(s_mp, &s_basepa); 3059 ASSERT(err == NULL); 3060 err = drmach_mem_get_base_physaddr(t_mp, &t_basepa); 3061 ASSERT(err == NULL); 3062 3063 /* exchange base pa. include slice offset in new target base pa */ 3064 s_new_basepa = t_basepa & ~ (DRMACH_MEM_SLICE_SIZE - 1); 3065 t_new_basepa = (s_basepa & ~ (DRMACH_MEM_SLICE_SIZE - 1)) + 3066 t_slice_offset; 3067 3068 DRMACH_PR("s_new_basepa 0x%lx\n", s_new_basepa); 3069 DRMACH_PR("t_new_basepa 0x%lx\n", t_new_basepa); 3070 3071 DRMACH_PR("preparing MC MADR rename script (master is CPU%d):\n", 3072 CPU->cpu_id); 3073 3074 /* 3075 * Write rename script for MC on this processor. A script will 3076 * be produced only if this processor is on the source or target 3077 * board. 3078 */ 3079 3080 skip_mp = NULL; 3081 mp = s_mp->dev.bp->mem; 3082 while (mp != NULL && skip_mp == NULL) { 3083 if (mp->dev.portid == STARCAT_CPUID_TO_PORTID(CPU->cpu_id)) { 3084 skip_mp = mp; 3085 p = drmach_prep_mc_rename(p, 1, mp, s_basepa, 3086 s_new_basepa); 3087 } 3088 3089 mp = mp->next; 3090 } 3091 3092 mp = t_mp->dev.bp->mem; 3093 while (mp != NULL && skip_mp == NULL) { 3094 if (mp->dev.portid == STARCAT_CPUID_TO_PORTID(CPU->cpu_id)) { 3095 skip_mp = mp; 3096 p = drmach_prep_mc_rename(p, 1, mp, t_basepa, 3097 t_new_basepa); 3098 } 3099 3100 mp = mp->next; 3101 } 3102 3103 /* list section terminator */ 3104 *p++ = 0; 3105 3106 /* 3107 * Write rename script for all other MCs on source and target 3108 * boards. 3109 */ 3110 3111 for (mp = s_mp->dev.bp->mem; mp; mp = mp->next) { 3112 if (mp == skip_mp) 3113 continue; 3114 p = drmach_prep_mc_rename(p, 0, mp, s_basepa, s_new_basepa); 3115 } 3116 3117 for (mp = t_mp->dev.bp->mem; mp; mp = mp->next) { 3118 if (mp == skip_mp) 3119 continue; 3120 p = drmach_prep_mc_rename(p, 0, mp, t_basepa, t_new_basepa); 3121 } 3122 3123 /* Write rename script for Schizo LPA_BASE/LPA_BND */ 3124 p = drmach_prep_schizo_script(p, s_mp, s_new_basepa); 3125 p = drmach_prep_schizo_script(p, t_mp, t_new_basepa); 3126 3127 /* list section terminator */ 3128 *p++ = 0; 3129 3130 DRMACH_PR("preparing AXQ CASM rename script (EXP%d <> EXP%d):\n", 3131 DRMACH_BNUM2EXP(s_mp->dev.bp->bnum), 3132 DRMACH_BNUM2EXP(t_mp->dev.bp->bnum)); 3133 3134 rv = axq_do_casm_rename_script(&p, 3135 DRMACH_PA_TO_SLICE(s_new_basepa), 3136 DRMACH_PA_TO_SLICE(t_new_basepa)); 3137 if (rv == DDI_FAILURE) 3138 return (DRMACH_INTERNAL_ERROR()); 3139 3140 /* list section & final terminator */ 3141 *p++ = 0; 3142 *p++ = 0; 3143 3144 #ifdef DEBUG 3145 { 3146 uint64_t *q = (uint64_t *)buf; 3147 3148 /* paranoia */ 3149 ASSERT((caddr_t)p <= buf + buflen); 3150 3151 DRMACH_PR("MC bank base pa list:\n"); 3152 while (*q) { 3153 uint64_t a = *q++; 3154 3155 DRMACH_PR("0x%lx\n", a); 3156 } 3157 3158 /* skip terminator */ 3159 q += 1; 3160 3161 DRMACH_PR("local Panther MC idle reg (via ASI 0x4a):\n"); 3162 while (*q) { 3163 DRMACH_PR("addr=0x%lx, mp=0x%lx\n", *q, *(q + 1)); 3164 q += 2; 3165 } 3166 3167 /* skip terminator */ 3168 q += 1; 3169 3170 DRMACH_PR("non-local Panther MC idle reg (via ASI 0x15):\n"); 3171 while (*q) { 3172 DRMACH_PR("addr=0x%lx, mp=0x%lx\n", *q, *(q + 1)); 3173 q += 2; 3174 } 3175 3176 /* skip terminator */ 3177 q += 1; 3178 3179 DRMACH_PR("MC reprogramming script (via ASI 0x72):\n"); 3180 while (*q) { 3181 uint64_t r = *q++; /* register address */ 3182 uint64_t v = *q++; /* new register value */ 3183 3184 DRMACH_PR("0x%lx = 0x%lx, basepa 0x%lx\n", 3185 r, v, (long)(DRMACH_MC_UM_TO_PA(v)| 3186 DRMACH_MC_LM_TO_PA(v))); 3187 } 3188 3189 /* skip terminator */ 3190 q += 1; 3191 3192 DRMACH_PR("MC/SCHIZO reprogramming script:\n"); 3193 while (*q) { 3194 DRMACH_PR("0x%lx = 0x%lx\n", *q, *(q + 1)); 3195 q += 2; 3196 } 3197 3198 /* skip terminator */ 3199 q += 1; 3200 3201 DRMACH_PR("AXQ reprogramming script:\n"); 3202 while (*q) { 3203 DRMACH_PR("0x%lx = 0x%lx\n", *q, *(q + 1)); 3204 q += 2; 3205 } 3206 3207 /* verify final terminator is present */ 3208 ASSERT(*(q + 1) == 0); 3209 3210 DRMACH_PR("copy-rename script 0x%p, len %d\n", 3211 (void *)buf, (int)((intptr_t)p - (intptr_t)buf)); 3212 3213 if (drmach_debug) 3214 DELAY(10000000); 3215 } 3216 #endif 3217 3218 return (NULL); 3219 } 3220 3221 static void 3222 drmach_prep_xt_mb_for_slice_update(drmach_board_t *bp, uchar_t slice) 3223 { 3224 int rv; 3225 3226 ASSERT(MUTEX_HELD(&drmach_xt_mb_lock)); 3227 3228 if (bp->devices) { 3229 int d_idx; 3230 drmachid_t d_id; 3231 3232 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 3233 while (rv == 0) { 3234 if (DRMACH_IS_CPU_ID(d_id)) { 3235 drmach_cpu_t *cp = d_id; 3236 processorid_t cpuid = cp->cpuid; 3237 3238 mutex_enter(&cpu_lock); 3239 if (cpu[cpuid] && cpu[cpuid]->cpu_flags) 3240 drmach_xt_mb[cpuid] = 0x80 | slice; 3241 mutex_exit(&cpu_lock); 3242 } 3243 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 3244 } 3245 } 3246 if (DRMACH_BNUM2SLOT(bp->bnum) == 0) { 3247 drmach_board_t *s1bp = NULL; 3248 3249 rv = drmach_array_get(drmach_boards, bp->bnum + 1, 3250 (void *) &s1bp); 3251 if (rv == 0 && s1bp != NULL) { 3252 ASSERT(DRMACH_IS_BOARD_ID(s1bp)); 3253 ASSERT(DRMACH_BNUM2SLOT(s1bp->bnum) == 1); 3254 drmach_prep_xt_mb_for_slice_update(s1bp, slice); 3255 } 3256 } 3257 } 3258 3259 sbd_error_t * 3260 drmach_copy_rename_init(drmachid_t t_id, uint64_t t_slice_offset, 3261 drmachid_t s_id, struct memlist *c_ml, drmachid_t *cr_id) 3262 { 3263 extern void drmach_rename(uint64_t *, uint_t *, uint64_t *); 3264 extern void drmach_rename_end(void); 3265 3266 drmach_mem_t *s_mp, *t_mp; 3267 struct memlist *x_ml; 3268 uint64_t off_mask, s_copybasepa, t_copybasepa, t_basepa; 3269 int len; 3270 caddr_t bp, wp; 3271 uint_t *p, *q; 3272 sbd_error_t *err; 3273 tte_t *tte; 3274 drmach_copy_rename_t *cr; 3275 3276 if (!DRMACH_IS_MEM_ID(s_id)) 3277 return (drerr_new(0, ESTC_INAPPROP, NULL)); 3278 if (!DRMACH_IS_MEM_ID(t_id)) 3279 return (drerr_new(0, ESTC_INAPPROP, NULL)); 3280 s_mp = s_id; 3281 t_mp = t_id; 3282 3283 /* get starting physical address of target memory */ 3284 err = drmach_mem_get_base_physaddr(t_id, &t_basepa); 3285 if (err) 3286 return (err); 3287 3288 /* calculate slice offset mask from slice size */ 3289 off_mask = DRMACH_MEM_SLICE_SIZE - 1; 3290 3291 /* calculate source and target base pa */ 3292 s_copybasepa = c_ml->address; 3293 t_copybasepa = t_basepa + ((c_ml->address & off_mask) - t_slice_offset); 3294 3295 /* paranoia */ 3296 ASSERT((c_ml->address & off_mask) >= t_slice_offset); 3297 3298 /* adjust copy memlist addresses to be relative to copy base pa */ 3299 x_ml = c_ml; 3300 while (x_ml != NULL) { 3301 x_ml->address -= s_copybasepa; 3302 x_ml = x_ml->next; 3303 } 3304 3305 #ifdef DEBUG 3306 { 3307 uint64_t s_basepa, s_size, t_size; 3308 3309 x_ml = c_ml; 3310 while (x_ml->next != NULL) 3311 x_ml = x_ml->next; 3312 3313 DRMACH_PR("source copy span: base pa 0x%lx, end pa 0x%lx\n", 3314 s_copybasepa, 3315 s_copybasepa + x_ml->address + x_ml->size); 3316 3317 DRMACH_PR("target copy span: base pa 0x%lx, end pa 0x%lx\n", 3318 t_copybasepa, 3319 t_copybasepa + x_ml->address + x_ml->size); 3320 3321 DRMACH_PR("copy memlist (relative to copy base pa):\n"); 3322 DRMACH_MEMLIST_DUMP(c_ml); 3323 3324 err = drmach_mem_get_base_physaddr(s_id, &s_basepa); 3325 ASSERT(err == NULL); 3326 3327 err = drmach_mem_get_size(s_id, &s_size); 3328 ASSERT(err == NULL); 3329 3330 err = drmach_mem_get_size(t_id, &t_size); 3331 ASSERT(err == NULL); 3332 3333 DRMACH_PR("current source base pa 0x%lx, size 0x%lx\n", 3334 s_basepa, s_size); 3335 DRMACH_PR("current target base pa 0x%lx, size 0x%lx\n", 3336 t_basepa, t_size); 3337 } 3338 #endif /* DEBUG */ 3339 3340 /* Map in appropriate cpu sram page */ 3341 tte = &drmach_cpu_sram_tte[CPU->cpu_id]; 3342 ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) && 3343 TTE_IS_PRIVILEGED(tte) && TTE_IS_LOCKED(tte)); 3344 sfmmu_dtlb_ld_kva(drmach_cpu_sram_va, tte); 3345 sfmmu_itlb_ld_kva(drmach_cpu_sram_va, tte); 3346 3347 bp = wp = drmach_cpu_sram_va; 3348 3349 /* Make sure the rename routine will fit */ 3350 len = (ptrdiff_t)drmach_rename_end - (ptrdiff_t)drmach_rename; 3351 ASSERT(wp + len < bp + PAGESIZE); 3352 3353 /* copy text. standard bcopy not designed to work in nc space */ 3354 p = (uint_t *)wp; 3355 q = (uint_t *)drmach_rename; 3356 while (q < (uint_t *)drmach_rename_end) 3357 *p++ = *q++; 3358 3359 /* zero remainder. standard bzero not designed to work in nc space */ 3360 while (p < (uint_t *)(bp + PAGESIZE)) 3361 *p++ = 0; 3362 3363 DRMACH_PR("drmach_rename function 0x%p, len %d\n", (void *)wp, len); 3364 wp += (len + 15) & ~15; 3365 3366 err = drmach_prep_rename_script(s_mp, t_mp, t_slice_offset, wp, 3367 PAGESIZE - (wp - bp)); 3368 if (err) { 3369 cleanup: 3370 xt_one(CPU->cpu_id, vtag_flushpage_tl1, 3371 (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup); 3372 return (err); 3373 } 3374 3375 /* disable and flush CDC */ 3376 if (axq_cdc_disable_flush_all() != DDI_SUCCESS) { 3377 axq_cdc_enable_all(); /* paranoia */ 3378 err = DRMACH_INTERNAL_ERROR(); 3379 goto cleanup; 3380 } 3381 3382 /* mark both memory units busy */ 3383 t_mp->dev.busy++; 3384 s_mp->dev.busy++; 3385 3386 cr = vmem_alloc(static_alloc_arena, sizeof (drmach_copy_rename_t), 3387 VM_SLEEP); 3388 cr->isa = (void *)drmach_copy_rename_init; 3389 cr->data = wp; 3390 cr->c_ml = c_ml; 3391 cr->s_mp = s_mp; 3392 cr->t_mp = t_mp; 3393 cr->s_copybasepa = s_copybasepa; 3394 cr->t_copybasepa = t_copybasepa; 3395 cr->ecode = DRMACH_CR_OK; 3396 3397 mutex_enter(&drmach_slice_table_lock); 3398 3399 mutex_enter(&drmach_xt_mb_lock); 3400 bzero((void *)drmach_xt_mb, drmach_xt_mb_size); 3401 3402 if (DRMACH_L1_SET_LPA(s_mp->dev.bp) && drmach_reprogram_lpa) { 3403 drmach_prep_xt_mb_for_slice_update(s_mp->dev.bp, 3404 DRMACH_PA_TO_SLICE(t_copybasepa)); 3405 } 3406 if (DRMACH_L1_SET_LPA(t_mp->dev.bp) && drmach_reprogram_lpa) { 3407 drmach_prep_xt_mb_for_slice_update(t_mp->dev.bp, 3408 DRMACH_PA_TO_SLICE(s_copybasepa)); 3409 } 3410 3411 *cr_id = cr; 3412 return (NULL); 3413 } 3414 3415 int drmach_rename_count; 3416 int drmach_rename_ntries; 3417 3418 sbd_error_t * 3419 drmach_copy_rename_fini(drmachid_t id) 3420 { 3421 drmach_copy_rename_t *cr = id; 3422 sbd_error_t *err = NULL; 3423 dr_mbox_msg_t *obufp; 3424 3425 ASSERT(cr->isa == (void *)drmach_copy_rename_init); 3426 3427 axq_cdc_enable_all(); 3428 3429 xt_one(CPU->cpu_id, vtag_flushpage_tl1, 3430 (uint64_t)drmach_cpu_sram_va, (uint64_t)ksfmmup); 3431 3432 switch (cr->ecode) { 3433 case DRMACH_CR_OK: 3434 break; 3435 case DRMACH_CR_MC_IDLE_ERR: { 3436 dev_info_t *dip = NULL; 3437 drmach_mem_t *mp = (drmach_mem_t *)cr->earg; 3438 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 3439 3440 ASSERT(DRMACH_IS_MEM_ID(mp)); 3441 3442 err = drmach_get_dip(mp, &dip); 3443 3444 ASSERT(err == NULL); 3445 ASSERT(dip != NULL); 3446 3447 err = drerr_new(0, ESBD_MEMFAIL, NULL); 3448 (void) ddi_pathname(dip, path); 3449 cmn_err(CE_WARN, "failed to idle memory controller %s on %s: " 3450 "copy-rename aborted", path, mp->dev.bp->cm.name); 3451 kmem_free(path, MAXPATHLEN); 3452 break; 3453 } 3454 case DRMACH_CR_IOPAUSE_ERR: 3455 ASSERT((uintptr_t)cr->earg >= 0 && 3456 (uintptr_t)cr->earg < AXQ_MAX_EXP); 3457 3458 err = drerr_new(0, ESBD_SUSPEND, "EX%d", (uintptr_t)cr->earg); 3459 cmn_err(CE_WARN, "failed to idle EX%ld AXQ slot1 activity prior" 3460 " to copy-rename", (uintptr_t)cr->earg); 3461 break; 3462 case DRMACH_CR_ONTRAP_ERR: 3463 err = drerr_new(0, ESBD_MEMFAIL, NULL); 3464 cmn_err(CE_WARN, "copy-rename aborted due to uncorrectable " 3465 "memory error"); 3466 break; 3467 default: 3468 err = DRMACH_INTERNAL_ERROR(); 3469 cmn_err(CE_WARN, "unknown copy-rename error code (%d)\n", 3470 cr->ecode); 3471 break; 3472 } 3473 3474 #ifdef DEBUG 3475 if ((DRMACH_L1_SET_LPA(cr->s_mp->dev.bp) || 3476 DRMACH_L1_SET_LPA(cr->t_mp->dev.bp)) && drmach_reprogram_lpa) { 3477 int i; 3478 for (i = 0; i < NCPU; i++) { 3479 if (drmach_xt_mb[i]) 3480 DRMACH_PR("cpu%d ignored drmach_xt_mb", i); 3481 } 3482 } 3483 #endif 3484 mutex_exit(&drmach_xt_mb_lock); 3485 3486 if (cr->c_ml != NULL) 3487 memlist_delete(cr->c_ml); 3488 3489 cr->t_mp->dev.busy--; 3490 cr->s_mp->dev.busy--; 3491 3492 if (err) { 3493 mutex_exit(&drmach_slice_table_lock); 3494 goto done; 3495 } 3496 3497 /* update casm shadow for target and source board */ 3498 drmach_slice_table_update(cr->t_mp->dev.bp, 0); 3499 drmach_slice_table_update(cr->s_mp->dev.bp, 0); 3500 mutex_exit(&drmach_slice_table_lock); 3501 3502 mutex_enter(&drmach_bus_sync_lock); 3503 drmach_bus_sync_list_update(); 3504 mutex_exit(&drmach_bus_sync_lock); 3505 3506 /* 3507 * Make a good-faith effort to notify the SC about the copy-rename, but 3508 * don't worry if it fails, since a subsequent claim/unconfig/unclaim 3509 * will duplicate the update. 3510 */ 3511 obufp = kmem_zalloc(sizeof (dr_mbox_msg_t), KM_SLEEP); 3512 mutex_enter(&drmach_slice_table_lock); 3513 drmach_msg_memslice_init(obufp->msgdata.dm_uc.mem_slice); 3514 drmach_msg_memregs_init(obufp->msgdata.dm_uc.mem_regs); 3515 mutex_exit(&drmach_slice_table_lock); 3516 (void) drmach_mbox_trans(DRMSG_UNCONFIG, cr->s_mp->dev.bp->bnum, 3517 (caddr_t)obufp, sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0); 3518 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 3519 3520 done: 3521 vmem_free(static_alloc_arena, cr, sizeof (drmach_copy_rename_t)); 3522 3523 DRMACH_PR("waited %d out of %d tries for drmach_rename_wait on %d cpus", 3524 drmach_rename_ntries, drmach_cpu_ntries, drmach_rename_count); 3525 3526 return (err); 3527 } 3528 3529 int drmach_slow_copy = 0; 3530 3531 void 3532 drmach_copy_rename(drmachid_t id) 3533 { 3534 extern uint_t getpstate(void); 3535 extern void setpstate(uint_t); 3536 3537 extern xcfunc_t drmach_rename_wait; 3538 extern xcfunc_t drmach_rename_done; 3539 extern xcfunc_t drmach_rename_abort; 3540 3541 drmach_copy_rename_t *cr = id; 3542 uint64_t neer; 3543 struct memlist *ml; 3544 int i, count; 3545 int csize, lnsize; 3546 uint64_t caddr; 3547 cpuset_t cpuset; 3548 uint_t pstate; 3549 uint32_t exp = 0; 3550 on_trap_data_t otd; 3551 xcfunc_t *drmach_end_wait_xcall = drmach_rename_done; 3552 3553 ASSERT(cr->isa == (void *)drmach_copy_rename_init); 3554 ASSERT(MUTEX_HELD(&cpu_lock)); 3555 ASSERT(cr->ecode == DRMACH_CR_OK); 3556 3557 /* 3558 * Prevent slot1 IO from accessing Safari memory bus. 3559 */ 3560 if (axq_iopause_enable_all(&exp) != DDI_SUCCESS) { 3561 ASSERT(exp >= 0 && exp < AXQ_MAX_EXP); 3562 cr->ecode = DRMACH_CR_IOPAUSE_ERR; 3563 cr->earg = (void *)(uintptr_t)exp; 3564 return; 3565 } 3566 3567 cpuset = cpu_ready_set; 3568 CPUSET_DEL(cpuset, CPU->cpu_id); 3569 count = ncpus - 1; 3570 drmach_rename_count = count; /* for debug */ 3571 3572 drmach_xt_ready = 0; 3573 xt_some(cpuset, drmach_rename_wait, NULL, NULL); 3574 3575 for (i = 0; i < drmach_cpu_ntries; i++) { 3576 if (drmach_xt_ready == count) 3577 break; 3578 DELAY(drmach_cpu_delay); 3579 } 3580 3581 drmach_rename_ntries = i; /* for debug */ 3582 3583 drmach_xt_ready = 0; /* steal the line back */ 3584 for (i = 0; i < NCPU; i++) /* steal the line back, preserve data */ 3585 drmach_xt_mb[i] = drmach_xt_mb[i]; 3586 3587 caddr = drmach_iocage_paddr; 3588 csize = cpunodes[CPU->cpu_id].ecache_size; 3589 lnsize = cpunodes[CPU->cpu_id].ecache_linesize; 3590 3591 /* disable CE reporting */ 3592 neer = get_error_enable(); 3593 set_error_enable(neer & ~EN_REG_CEEN); 3594 3595 /* disable interrupts (paranoia) */ 3596 pstate = getpstate(); 3597 setpstate(pstate & ~PSTATE_IE); 3598 3599 /* 3600 * Execute copy-rename under on_trap to protect against a panic due 3601 * to an uncorrectable error. Instead, DR will abort the copy-rename 3602 * operation and rely on the OS to do the error reporting. 3603 * 3604 * In general, trap handling on any cpu once the copy begins 3605 * can result in an inconsistent memory image on the target. 3606 */ 3607 if (on_trap(&otd, OT_DATA_EC)) { 3608 cr->ecode = DRMACH_CR_ONTRAP_ERR; 3609 goto copy_rename_end; 3610 } 3611 3612 /* 3613 * DO COPY. 3614 */ 3615 for (ml = cr->c_ml; ml; ml = ml->next) { 3616 uint64_t s_pa, t_pa; 3617 uint64_t nbytes; 3618 3619 s_pa = cr->s_copybasepa + ml->address; 3620 t_pa = cr->t_copybasepa + ml->address; 3621 nbytes = ml->size; 3622 3623 while (nbytes != 0ull) { 3624 /* copy 32 bytes at src_pa to dst_pa */ 3625 bcopy32_il(s_pa, t_pa); 3626 3627 /* increment by 32 bytes */ 3628 s_pa += (4 * sizeof (uint64_t)); 3629 t_pa += (4 * sizeof (uint64_t)); 3630 3631 /* decrement by 32 bytes */ 3632 nbytes -= (4 * sizeof (uint64_t)); 3633 3634 if (drmach_slow_copy) { /* for debug */ 3635 uint64_t i = 13 * 50; 3636 while (i--) 3637 ; 3638 } 3639 } 3640 } 3641 3642 /* 3643 * XXX CHEETAH SUPPORT 3644 * For cheetah, we need to grab the iocage lock since iocage 3645 * memory is used for e$ flush. 3646 * 3647 * NOTE: This code block is dangerous at this point in the 3648 * copy-rename operation. It modifies memory after the copy 3649 * has taken place which means that any persistent state will 3650 * be abandoned after the rename operation. The code is also 3651 * performing thread synchronization at a time when all but 3652 * one processors are paused. This is a potential deadlock 3653 * situation. 3654 * 3655 * This code block must be moved to drmach_copy_rename_init. 3656 */ 3657 if (drmach_is_cheetah) { 3658 mutex_enter(&drmach_iocage_lock); 3659 while (drmach_iocage_is_busy) 3660 cv_wait(&drmach_iocage_cv, &drmach_iocage_lock); 3661 drmach_iocage_is_busy = 1; 3662 drmach_iocage_mem_scrub(ecache_size * 2); 3663 mutex_exit(&drmach_iocage_lock); 3664 } 3665 3666 /* 3667 * bcopy32_il is implemented as a series of ldxa/stxa via 3668 * ASI_MEM instructions. Following the copy loop, the E$ 3669 * of the master (this) processor will have lines in state 3670 * O that correspond to lines of home memory in state gI. 3671 * An E$ flush is necessary to commit these lines before 3672 * proceeding with the rename operation. 3673 * 3674 * Flushing the E$ will automatically flush the W$, but 3675 * the D$ and I$ must be flushed separately and explicitly. 3676 */ 3677 flush_ecache_il(caddr, csize, lnsize); /* inline version */ 3678 3679 /* 3680 * Each line of home memory is now in state gM, except in 3681 * the case of a cheetah processor when the E$ flush area 3682 * is included within the copied region. In such a case, 3683 * the lines of home memory for the upper half of the 3684 * flush area are in state gS. 3685 * 3686 * Each line of target memory is in state gM. 3687 * 3688 * Each line of this processor's E$ is in state I, except 3689 * those of a cheetah processor. All lines of a cheetah 3690 * processor's E$ are in state S and correspond to the lines 3691 * in upper half of the E$ flush area. 3692 * 3693 * It is vital at this point that none of the lines in the 3694 * home or target memories are in state gI and that none 3695 * of the lines in this processor's E$ are in state O or Os. 3696 * A single instance of such a condition will cause loss of 3697 * coherency following the rename operation. 3698 */ 3699 3700 /* 3701 * Rename 3702 */ 3703 (*(void(*)())drmach_cpu_sram_va)(cr->data, &cr->ecode, &cr->earg); 3704 3705 /* 3706 * Rename operation complete. The physical address space 3707 * of the home and target memories have been swapped, the 3708 * routing data in the respective CASM entries have been 3709 * swapped, and LPA settings in the processor and schizo 3710 * devices have been reprogrammed accordingly. 3711 * 3712 * In the case of a cheetah processor, the E$ remains 3713 * populated with lines in state S that correspond to the 3714 * lines in the former home memory. Now that the physical 3715 * addresses have been swapped, these E$ lines correspond 3716 * to lines in the new home memory which are in state gM. 3717 * This combination is invalid. An additional E$ flush is 3718 * necessary to restore coherency. The E$ flush will cause 3719 * the lines of the new home memory for the flush region 3720 * to transition from state gM to gS. The former home memory 3721 * remains unmodified. This additional E$ flush has no effect 3722 * on a cheetah+ processor. 3723 */ 3724 flush_ecache_il(caddr, csize, lnsize); /* inline version */ 3725 3726 /* 3727 * The D$ and I$ must be flushed to ensure that coherency is 3728 * maintained. Any line in a cache that is in the valid 3729 * state has its corresponding line of the new home memory 3730 * in the gM state. This is an invalid condition. When the 3731 * flushes are complete the cache line states will be 3732 * resynchronized with those in the new home memory. 3733 */ 3734 flush_icache_il(); /* inline version */ 3735 flush_dcache_il(); /* inline version */ 3736 flush_pcache_il(); /* inline version */ 3737 3738 copy_rename_end: 3739 3740 no_trap(); 3741 3742 /* enable interrupts */ 3743 setpstate(pstate); 3744 3745 /* enable CE reporting */ 3746 set_error_enable(neer); 3747 3748 if (cr->ecode != DRMACH_CR_OK) 3749 drmach_end_wait_xcall = drmach_rename_abort; 3750 3751 /* 3752 * XXX CHEETAH SUPPORT 3753 */ 3754 if (drmach_is_cheetah) { 3755 mutex_enter(&drmach_iocage_lock); 3756 drmach_iocage_mem_scrub(ecache_size * 2); 3757 drmach_iocage_is_busy = 0; 3758 cv_signal(&drmach_iocage_cv); 3759 mutex_exit(&drmach_iocage_lock); 3760 } 3761 3762 axq_iopause_disable_all(); 3763 3764 xt_some(cpuset, drmach_end_wait_xcall, NULL, NULL); 3765 } 3766 3767 static void drmach_io_dispose(drmachid_t); 3768 static sbd_error_t *drmach_io_release(drmachid_t); 3769 static sbd_error_t *drmach_io_status(drmachid_t, drmach_status_t *); 3770 3771 static sbd_error_t * 3772 drmach_pci_new(drmach_device_t *proto, drmachid_t *idp) 3773 { 3774 drmach_node_t *node = proto->node; 3775 sbd_error_t *err; 3776 drmach_reg_t regs[3]; 3777 int rv; 3778 int len = 0; 3779 3780 rv = node->n_getproplen(node, "reg", &len); 3781 if (rv != 0 || len != sizeof (regs)) { 3782 sbd_error_t *err; 3783 3784 /* pci nodes are expected to have regs */ 3785 err = drerr_new(1, ESTC_GETPROP, 3786 "Device Node 0x%x: property %s", 3787 (uint_t)node->get_dnode(node), "reg"); 3788 return (err); 3789 } 3790 3791 rv = node->n_getprop(node, "reg", (void *)regs, sizeof (regs)); 3792 if (rv) { 3793 sbd_error_t *err; 3794 3795 err = drerr_new(1, ESTC_GETPROP, 3796 "Device Node 0x%x: property %s", 3797 (uint_t)node->get_dnode(node), "reg"); 3798 3799 return (err); 3800 } 3801 3802 /* 3803 * Fix up unit number so that Leaf A has a lower unit number 3804 * than Leaf B. 3805 */ 3806 if ((proto->portid % 2) != 0) { 3807 if ((regs[0].reg_addr_lo & 0x700000) == 0x700000) 3808 proto->unum = 0; 3809 else 3810 proto->unum = 1; 3811 } else { 3812 if ((regs[0].reg_addr_lo & 0x700000) == 0x700000) 3813 proto->unum = 2; 3814 else 3815 proto->unum = 3; 3816 } 3817 3818 err = drmach_io_new(proto, idp); 3819 if (err == NULL) { 3820 drmach_io_t *self = *idp; 3821 3822 /* reassemble 64-bit base address */ 3823 self->scsr_pa = (uint64_t)regs[1].reg_addr_hi << 32; 3824 self->scsr_pa |= (uint64_t)regs[1].reg_addr_lo; 3825 } 3826 3827 return (err); 3828 } 3829 3830 static sbd_error_t * 3831 drmach_io_new(drmach_device_t *proto, drmachid_t *idp) 3832 { 3833 drmach_io_t *ip; 3834 3835 ip = kmem_zalloc(sizeof (drmach_io_t), KM_SLEEP); 3836 bcopy(proto, &ip->dev, sizeof (ip->dev)); 3837 ip->dev.node = drmach_node_dup(proto->node); 3838 ip->dev.cm.isa = (void *)drmach_io_new; 3839 ip->dev.cm.dispose = drmach_io_dispose; 3840 ip->dev.cm.release = drmach_io_release; 3841 ip->dev.cm.status = drmach_io_status; 3842 3843 (void) snprintf(ip->dev.cm.name, sizeof (ip->dev.cm.name), "%s%d", 3844 ip->dev.type, ip->dev.unum); 3845 3846 *idp = (drmachid_t)ip; 3847 return (NULL); 3848 } 3849 3850 static void 3851 drmach_io_dispose(drmachid_t id) 3852 { 3853 drmach_io_t *self; 3854 3855 ASSERT(DRMACH_IS_IO_ID(id)); 3856 3857 self = id; 3858 if (self->dev.node) 3859 drmach_node_dispose(self->dev.node); 3860 3861 kmem_free(self, sizeof (*self)); 3862 } 3863 3864 /*ARGSUSED*/ 3865 sbd_error_t * 3866 drmach_pre_op(int cmd, drmachid_t id, drmach_opts_t *opts) 3867 { 3868 drmach_board_t *bp = (drmach_board_t *)id; 3869 sbd_error_t *err = NULL; 3870 3871 if (id && DRMACH_IS_BOARD_ID(id)) { 3872 switch (cmd) { 3873 case SBD_CMD_TEST: 3874 case SBD_CMD_STATUS: 3875 case SBD_CMD_GETNCM: 3876 break; 3877 case SBD_CMD_CONNECT: 3878 if (bp->connected) 3879 err = drerr_new(0, ESBD_STATE, NULL); 3880 3881 if (bp->cond == SBD_COND_UNUSABLE) 3882 err = drerr_new(0, 3883 ESBD_FATAL_STATE, NULL); 3884 break; 3885 case SBD_CMD_DISCONNECT: 3886 if (!bp->connected) 3887 err = drerr_new(0, ESBD_STATE, NULL); 3888 3889 if (bp->cond == SBD_COND_UNUSABLE) 3890 err = drerr_new(0, 3891 ESBD_FATAL_STATE, NULL); 3892 break; 3893 default: 3894 if (bp->cond == SBD_COND_UNUSABLE) 3895 err = drerr_new(0, 3896 ESBD_FATAL_STATE, NULL); 3897 break; 3898 3899 } 3900 } 3901 3902 return (err); 3903 } 3904 3905 /*ARGSUSED*/ 3906 sbd_error_t * 3907 drmach_post_op(int cmd, drmachid_t id, drmach_opts_t *opts) 3908 { 3909 return (NULL); 3910 } 3911 3912 sbd_error_t * 3913 drmach_board_assign(int bnum, drmachid_t *id) 3914 { 3915 sbd_error_t *err = NULL; 3916 caddr_t obufp; 3917 3918 if (!drmach_initialized && drmach_init() == -1) { 3919 err = DRMACH_INTERNAL_ERROR(); 3920 } 3921 3922 rw_enter(&drmach_boards_rwlock, RW_WRITER); 3923 3924 if (!err) { 3925 if (drmach_array_get(drmach_boards, bnum, id) == -1) { 3926 err = drerr_new(0, ESTC_BNUM, "%d", bnum); 3927 } else { 3928 drmach_board_t *bp; 3929 3930 if (*id) 3931 rw_downgrade(&drmach_boards_rwlock); 3932 3933 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 3934 err = drmach_mbox_trans(DRMSG_ASSIGN, bnum, obufp, 3935 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 3936 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 3937 3938 if (!err) { 3939 bp = *id; 3940 if (!*id) 3941 bp = *id = 3942 (drmachid_t)drmach_board_new(bnum); 3943 bp->assigned = 1; 3944 } 3945 } 3946 } 3947 rw_exit(&drmach_boards_rwlock); 3948 return (err); 3949 } 3950 3951 static uint_t 3952 drmach_board_non_panther_cpus(gdcd_t *gdcd, uint_t exp, uint_t slot) 3953 { 3954 uint_t port, port_start, port_end; 3955 uint_t non_panther_cpus = 0; 3956 uint_t impl; 3957 3958 ASSERT(gdcd != NULL); 3959 3960 /* 3961 * Determine PRD port indices based on slot location. 3962 */ 3963 switch (slot) { 3964 case 0: 3965 port_start = 0; 3966 port_end = 3; 3967 break; 3968 case 1: 3969 port_start = 4; 3970 port_end = 5; 3971 break; 3972 default: 3973 ASSERT(0); 3974 /* check all */ 3975 port_start = 0; 3976 port_end = 5; 3977 break; 3978 } 3979 3980 for (port = port_start; port <= port_end; port++) { 3981 if (gdcd->dcd_prd[exp][port].prd_ptype == SAFPTYPE_CPU && 3982 RSV_GOOD(gdcd->dcd_prd[exp][port].prd_prsv)) { 3983 /* 3984 * This Safari port passed POST and represents a 3985 * cpu, so check the implementation. 3986 */ 3987 impl = (gdcd->dcd_prd[exp][port].prd_ver_reg >> 32) 3988 & 0xffff; 3989 3990 switch (impl) { 3991 case CHEETAH_IMPL: 3992 case CHEETAH_PLUS_IMPL: 3993 case JAGUAR_IMPL: 3994 non_panther_cpus++; 3995 break; 3996 case PANTHER_IMPL: 3997 break; 3998 default: 3999 ASSERT(0); 4000 non_panther_cpus++; 4001 break; 4002 } 4003 } 4004 } 4005 4006 DRMACH_PR("drmach_board_non_panther_cpus: exp=%d, slot=%d, " 4007 "non_panther_cpus=%d", exp, slot, non_panther_cpus); 4008 4009 return (non_panther_cpus); 4010 } 4011 4012 sbd_error_t * 4013 drmach_board_connect(drmachid_t id, drmach_opts_t *opts) 4014 { 4015 _NOTE(ARGUNUSED(opts)) 4016 4017 drmach_board_t *bp = (drmach_board_t *)id; 4018 sbd_error_t *err; 4019 dr_mbox_msg_t *obufp; 4020 gdcd_t *gdcd = NULL; 4021 uint_t exp, slot; 4022 sc_gptwocfg_cookie_t scc; 4023 int panther_pages_enabled; 4024 4025 if (!DRMACH_IS_BOARD_ID(id)) 4026 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4027 4028 /* 4029 * Build the casm info portion of the CLAIM message. 4030 */ 4031 obufp = kmem_zalloc(sizeof (dr_mbox_msg_t), KM_SLEEP); 4032 mutex_enter(&drmach_slice_table_lock); 4033 drmach_msg_memslice_init(obufp->msgdata.dm_cr.mem_slice); 4034 drmach_msg_memregs_init(obufp->msgdata.dm_cr.mem_regs); 4035 mutex_exit(&drmach_slice_table_lock); 4036 err = drmach_mbox_trans(DRMSG_CLAIM, bp->bnum, (caddr_t)obufp, 4037 sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0); 4038 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 4039 4040 if (err) { 4041 /* 4042 * if mailbox timeout or unrecoverable error from SC, 4043 * board cannot be touched. Mark the status as 4044 * unusable. 4045 */ 4046 if ((err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) || 4047 (err->e_code == ESTC_MBXRPLY)) 4048 bp->cond = SBD_COND_UNUSABLE; 4049 return (err); 4050 } 4051 4052 gdcd = drmach_gdcd_new(); 4053 if (gdcd == NULL) { 4054 cmn_err(CE_WARN, "failed to read GDCD info for %s\n", 4055 bp->cm.name); 4056 return (DRMACH_INTERNAL_ERROR()); 4057 } 4058 4059 /* 4060 * Read CPU SRAM DR buffer offset from GDCD. 4061 */ 4062 exp = DRMACH_BNUM2EXP(bp->bnum); 4063 slot = DRMACH_BNUM2SLOT(bp->bnum); 4064 bp->stardrb_offset = 4065 gdcd->dcd_slot[exp][slot].l1ss_cpu_drblock_xwd_offset << 3; 4066 DRMACH_PR("%s: stardrb_offset=0x%lx\n", bp->cm.name, 4067 bp->stardrb_offset); 4068 4069 /* 4070 * Read board LPA setting from GDCD. 4071 */ 4072 bp->flags &= ~DRMACH_NULL_PROC_LPA; 4073 if (gdcd->dcd_slot[exp][slot].l1ss_flags & 4074 L1SSFLG_THIS_L1_NULL_PROC_LPA) { 4075 bp->flags |= DRMACH_NULL_PROC_LPA; 4076 DRMACH_PR("%s: NULL proc LPA\n", bp->cm.name); 4077 } 4078 4079 /* 4080 * XXX Until the Solaris large pages support heterogeneous cpu 4081 * domains, DR needs to prevent the addition of non-Panther cpus 4082 * to an all-Panther domain with large pages enabled. 4083 */ 4084 panther_pages_enabled = (page_num_pagesizes() > DEFAULT_MMU_PAGE_SIZES); 4085 if (drmach_board_non_panther_cpus(gdcd, exp, slot) > 0 && 4086 panther_pages_enabled && drmach_large_page_restriction) { 4087 cmn_err(CE_WARN, "Domain shutdown is required to add a non-" 4088 "UltraSPARC-IV+ board into an all UltraSPARC-IV+ domain"); 4089 err = drerr_new(0, ESTC_SUPPORT, NULL); 4090 } 4091 4092 if (err == NULL) { 4093 /* do saf configurator stuff */ 4094 DRMACH_PR("calling sc_probe_board for bnum=%d\n", bp->bnum); 4095 scc = sc_probe_board(bp->bnum); 4096 if (scc == NULL) 4097 err = drerr_new(0, ESTC_PROBE, bp->cm.name); 4098 } 4099 4100 if (err) { 4101 /* flush CDC srams */ 4102 if (axq_cdc_flush_all() != DDI_SUCCESS) { 4103 goto out; 4104 } 4105 4106 /* 4107 * Build the casm info portion of the UNCLAIM message. 4108 */ 4109 obufp = kmem_zalloc(sizeof (dr_mbox_msg_t), KM_SLEEP); 4110 mutex_enter(&drmach_slice_table_lock); 4111 drmach_msg_memslice_init(obufp->msgdata.dm_ur.mem_slice); 4112 drmach_msg_memregs_init(obufp->msgdata.dm_ur.mem_regs); 4113 mutex_exit(&drmach_slice_table_lock); 4114 (void) drmach_mbox_trans(DRMSG_UNCLAIM, bp->bnum, 4115 (caddr_t)obufp, sizeof (dr_mbox_msg_t), 4116 (caddr_t)NULL, 0); 4117 4118 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 4119 4120 /* 4121 * we clear the connected flag just in case it would have 4122 * been set by a concurrent drmach_board_status() thread 4123 * before the UNCLAIM completed. 4124 */ 4125 bp->connected = 0; 4126 goto out; 4127 } 4128 4129 /* 4130 * Now that the board has been successfully attached, obtain 4131 * platform-specific DIMM serial id information for the board. 4132 */ 4133 if ((DRMACH_BNUM2SLOT(bp->bnum) == 0) && 4134 plat_ecc_capability_sc_get(PLAT_ECC_DIMM_SID_MESSAGE)) { 4135 (void) plat_request_mem_sids(DRMACH_BNUM2EXP(bp->bnum)); 4136 } 4137 4138 out: 4139 if (gdcd != NULL) 4140 drmach_gdcd_dispose(gdcd); 4141 4142 return (err); 4143 } 4144 4145 static void 4146 drmach_slice_table_update(drmach_board_t *bp, int invalidate) 4147 { 4148 static char *axq_name = "address-extender-queue"; 4149 static dev_info_t *axq_dip = NULL; 4150 static int axq_exp = -1; 4151 static int axq_slot; 4152 int e, s, slice; 4153 4154 ASSERT(MUTEX_HELD(&drmach_slice_table_lock)); 4155 4156 e = DRMACH_BNUM2EXP(bp->bnum); 4157 if (invalidate) { 4158 ASSERT(DRMACH_BNUM2SLOT(bp->bnum) == 0); 4159 4160 /* invalidate cached casm value */ 4161 drmach_slice_table[e] = 0; 4162 4163 /* invalidate cached axq info if for same exp */ 4164 if (e == axq_exp && axq_dip) { 4165 ndi_rele_devi(axq_dip); 4166 axq_dip = NULL; 4167 } 4168 } 4169 4170 if (axq_dip == NULL || !i_ddi_devi_attached(axq_dip)) { 4171 int i, portid; 4172 4173 /* search for an attached slot0 axq instance */ 4174 for (i = 0; i < AXQ_MAX_EXP * AXQ_MAX_SLOT_PER_EXP; i++) { 4175 if (axq_dip) 4176 ndi_rele_devi(axq_dip); 4177 axq_dip = ddi_find_devinfo(axq_name, i, 0); 4178 if (axq_dip && DDI_CF2(axq_dip)) { 4179 portid = ddi_getprop(DDI_DEV_T_ANY, axq_dip, 4180 DDI_PROP_DONTPASS, "portid", -1); 4181 if (portid == -1) { 4182 DRMACH_PR("cant get portid of axq " 4183 "instance %d\n", i); 4184 continue; 4185 } 4186 4187 axq_exp = (portid >> 5) & 0x1f; 4188 axq_slot = portid & 1; 4189 4190 if (invalidate && axq_exp == e) 4191 continue; 4192 4193 if (axq_slot == 0) 4194 break; /* found */ 4195 } 4196 } 4197 4198 if (i == AXQ_MAX_EXP * AXQ_MAX_SLOT_PER_EXP) { 4199 if (axq_dip) { 4200 ndi_rele_devi(axq_dip); 4201 axq_dip = NULL; 4202 } 4203 DRMACH_PR("drmach_slice_table_update: failed to " 4204 "update axq dip\n"); 4205 return; 4206 } 4207 4208 } 4209 4210 ASSERT(axq_dip); 4211 ASSERT(axq_slot == 0); 4212 4213 if (invalidate) 4214 return; 4215 4216 s = DRMACH_BNUM2SLOT(bp->bnum); 4217 DRMACH_PR("using AXQ casm %d.%d for slot%d.%d\n", axq_exp, axq_slot, 4218 e, s); 4219 4220 /* invalidate entry */ 4221 drmach_slice_table[e] &= ~0x20; 4222 4223 /* 4224 * find a slice that routes to expander e. If no match 4225 * is found, drmach_slice_table[e] will remain invalid. 4226 * 4227 * The CASM is a routing table indexed by slice number. 4228 * Each element in the table contains permission bits, 4229 * a destination expander number and a valid bit. The 4230 * valid bit must true for the element to be meaningful. 4231 * 4232 * CASM entry structure 4233 * Bits 15..6 ignored 4234 * Bit 5 valid 4235 * Bits 0..4 expander number 4236 * 4237 * NOTE: the for loop is really enumerating the range of slices, 4238 * which is ALWAYS equal to the range of expanders. Hence, 4239 * AXQ_MAX_EXP is okay to use in this loop. 4240 */ 4241 for (slice = 0; slice < AXQ_MAX_EXP; slice++) { 4242 uint32_t casm = axq_casm_read(axq_exp, axq_slot, slice); 4243 4244 if ((casm & 0x20) && (casm & 0x1f) == e) 4245 drmach_slice_table[e] = 0x20 | slice; 4246 } 4247 } 4248 4249 /* 4250 * Get base and bound PAs for slot 1 board lpa programming 4251 * If a cpu/mem board is present in the same expander, use slice 4252 * information corresponding to the CASM. Otherwise, set base and 4253 * bound PAs to 0. 4254 */ 4255 static void 4256 drmach_lpa_bb_get(drmach_board_t *s1bp, uint64_t *basep, uint64_t *boundp) 4257 { 4258 drmachid_t s0id; 4259 4260 ASSERT(mutex_owned(&drmach_slice_table_lock)); 4261 ASSERT(DRMACH_BNUM2SLOT(s1bp->bnum) == 1); 4262 4263 *basep = *boundp = 0; 4264 if (drmach_array_get(drmach_boards, s1bp->bnum - 1, &s0id) == 0 && 4265 s0id != 0) { 4266 4267 uint32_t slice; 4268 if ((slice = drmach_slice_table[DRMACH_BNUM2EXP(s1bp->bnum)]) 4269 & 0x20) { 4270 *basep = DRMACH_SLICE_TO_PA(slice & DRMACH_SLICE_MASK); 4271 *boundp = *basep + DRMACH_MEM_SLICE_SIZE; 4272 } 4273 } 4274 } 4275 4276 4277 /* 4278 * Reprogram slot 1 lpa's as required. 4279 * The purpose of this routine is maintain the LPA settings of the devices 4280 * in slot 1. To date we know Schizo and Cheetah are the only devices that 4281 * require this attention. The LPA setting must match the slice field in the 4282 * CASM element for the local expander. This field is guaranteed to be 4283 * programmed in accordance with the cacheable address space on the slot 0 4284 * board of the local expander. If no memory is present on the slot 0 board, 4285 * there is no cacheable address space and, hence, the CASM slice field will 4286 * be zero or its valid bit will be false (or both). 4287 */ 4288 4289 static void 4290 drmach_slot1_lpa_set(drmach_board_t *bp) 4291 { 4292 drmachid_t id; 4293 drmach_board_t *s1bp = NULL; 4294 int rv, idx, is_maxcat = 1; 4295 uint64_t last_scsr_pa = 0; 4296 uint64_t new_basepa, new_boundpa; 4297 4298 if (DRMACH_BNUM2SLOT(bp->bnum)) { 4299 s1bp = bp; 4300 if (s1bp->devices == NULL) { 4301 DRMACH_PR("drmach...lpa_set: slot1=%d not present", 4302 bp->bnum); 4303 return; 4304 } 4305 } else { 4306 rv = drmach_array_get(drmach_boards, bp->bnum + 1, &id); 4307 /* nothing to do when board is not found or has no devices */ 4308 s1bp = id; 4309 if (rv == -1 || s1bp == NULL || s1bp->devices == NULL) { 4310 DRMACH_PR("drmach...lpa_set: slot1=%d not present", 4311 bp->bnum + 1); 4312 return; 4313 } 4314 ASSERT(DRMACH_IS_BOARD_ID(id)); 4315 } 4316 mutex_enter(&drmach_slice_table_lock); 4317 drmach_lpa_bb_get(s1bp, &new_basepa, &new_boundpa); 4318 DRMACH_PR("drmach_...lpa_set: bnum=%d base=0x%lx bound=0x%lx\n", 4319 s1bp->bnum, new_basepa, new_boundpa); 4320 4321 rv = drmach_array_first(s1bp->devices, &idx, &id); 4322 while (rv == 0) { 4323 if (DRMACH_IS_IO_ID(id)) { 4324 drmach_io_t *io = id; 4325 4326 is_maxcat = 0; 4327 4328 /* 4329 * Skip all non-Schizo IO devices (only IO nodes 4330 * that are Schizo devices have non-zero scsr_pa). 4331 * Filter out "other" leaf to avoid writing to the 4332 * same Schizo Control/Status Register twice. 4333 */ 4334 if (io->scsr_pa && io->scsr_pa != last_scsr_pa) { 4335 uint64_t scsr; 4336 4337 scsr = lddphysio(io->scsr_pa); 4338 DRMACH_PR("drmach...lpa_set: old scsr=0x%lx\n", 4339 scsr); 4340 scsr &= ~(DRMACH_LPA_BASE_MASK | 4341 DRMACH_LPA_BND_MASK); 4342 scsr |= DRMACH_PA_TO_LPA_BASE(new_basepa); 4343 scsr |= DRMACH_PA_TO_LPA_BND(new_boundpa); 4344 4345 stdphysio(io->scsr_pa, scsr); 4346 DRMACH_PR("drmach...lpa_set: new scsr=0x%lx\n", 4347 scsr); 4348 4349 last_scsr_pa = io->scsr_pa; 4350 } 4351 } 4352 rv = drmach_array_next(s1bp->devices, &idx, &id); 4353 } 4354 4355 if (is_maxcat && DRMACH_L1_SET_LPA(s1bp) && drmach_reprogram_lpa) { 4356 extern xcfunc_t drmach_set_lpa; 4357 4358 DRMACH_PR("reprogramming maxcat lpa's"); 4359 4360 mutex_enter(&cpu_lock); 4361 rv = drmach_array_first(s1bp->devices, &idx, &id); 4362 while (rv == 0 && id != NULL) { 4363 if (DRMACH_IS_CPU_ID(id)) { 4364 int ntries; 4365 processorid_t cpuid; 4366 4367 cpuid = ((drmach_cpu_t *)id)->cpuid; 4368 4369 /* 4370 * Check for unconfigured or powered-off 4371 * MCPUs. If CPU_READY flag is clear, the 4372 * MCPU cannot be xcalled. 4373 */ 4374 if ((cpu[cpuid] == NULL) || 4375 (cpu[cpuid]->cpu_flags & 4376 CPU_READY) == 0) { 4377 4378 rv = drmach_array_next(s1bp->devices, 4379 &idx, &id); 4380 continue; 4381 } 4382 4383 /* 4384 * XXX CHEETAH SUPPORT 4385 * for cheetah, we need to clear iocage 4386 * memory since it will be used for e$ flush 4387 * in drmach_set_lpa. 4388 */ 4389 if (drmach_is_cheetah) { 4390 mutex_enter(&drmach_iocage_lock); 4391 while (drmach_iocage_is_busy) 4392 cv_wait(&drmach_iocage_cv, 4393 &drmach_iocage_lock); 4394 drmach_iocage_is_busy = 1; 4395 drmach_iocage_mem_scrub(ecache_size * 4396 2); 4397 mutex_exit(&drmach_iocage_lock); 4398 } 4399 4400 /* 4401 * drmach_slice_table[*] 4402 * bit 5 valid 4403 * bit 0:4 slice number 4404 * 4405 * drmach_xt_mb[*] format for drmach_set_lpa 4406 * bit 7 valid 4407 * bit 6 set null LPA 4408 * (overrides bits 0:4) 4409 * bit 0:4 slice number 4410 * 4411 * drmach_set_lpa derives processor CBASE and 4412 * CBND from bits 6 and 0:4 of drmach_xt_mb. 4413 * If bit 6 is set, then CBASE = CBND = 0. 4414 * Otherwise, CBASE = slice number; 4415 * CBND = slice number + 1. 4416 * No action is taken if bit 7 is zero. 4417 */ 4418 4419 mutex_enter(&drmach_xt_mb_lock); 4420 bzero((void *)drmach_xt_mb, 4421 drmach_xt_mb_size); 4422 4423 if (new_basepa == 0 && new_boundpa == 0) 4424 drmach_xt_mb[cpuid] = 0x80 | 0x40; 4425 else 4426 drmach_xt_mb[cpuid] = 0x80 | 4427 DRMACH_PA_TO_SLICE(new_basepa); 4428 4429 drmach_xt_ready = 0; 4430 4431 xt_one(cpuid, drmach_set_lpa, NULL, NULL); 4432 4433 ntries = drmach_cpu_ntries; 4434 while (!drmach_xt_ready && ntries) { 4435 DELAY(drmach_cpu_delay); 4436 ntries--; 4437 } 4438 mutex_exit(&drmach_xt_mb_lock); 4439 drmach_xt_ready = 0; 4440 4441 /* 4442 * XXX CHEETAH SUPPORT 4443 * for cheetah, we need to clear iocage 4444 * memory since it was used for e$ flush 4445 * in performed drmach_set_lpa. 4446 */ 4447 if (drmach_is_cheetah) { 4448 mutex_enter(&drmach_iocage_lock); 4449 drmach_iocage_mem_scrub(ecache_size * 4450 2); 4451 drmach_iocage_is_busy = 0; 4452 cv_signal(&drmach_iocage_cv); 4453 mutex_exit(&drmach_iocage_lock); 4454 } 4455 } 4456 rv = drmach_array_next(s1bp->devices, &idx, &id); 4457 } 4458 mutex_exit(&cpu_lock); 4459 } 4460 mutex_exit(&drmach_slice_table_lock); 4461 } 4462 4463 /* 4464 * Return the number of connected Panther boards in the domain. 4465 */ 4466 static int 4467 drmach_panther_boards(void) 4468 { 4469 int rv; 4470 int b_idx; 4471 drmachid_t b_id; 4472 drmach_board_t *bp; 4473 int npanther = 0; 4474 4475 rv = drmach_array_first(drmach_boards, &b_idx, &b_id); 4476 while (rv == 0) { 4477 ASSERT(DRMACH_IS_BOARD_ID(b_id)); 4478 bp = b_id; 4479 4480 if (IS_PANTHER(bp->cpu_impl)) 4481 npanther++; 4482 4483 rv = drmach_array_next(drmach_boards, &b_idx, &b_id); 4484 } 4485 4486 return (npanther); 4487 } 4488 4489 /*ARGSUSED*/ 4490 sbd_error_t * 4491 drmach_board_disconnect(drmachid_t id, drmach_opts_t *opts) 4492 { 4493 drmach_board_t *bp; 4494 dr_mbox_msg_t *obufp; 4495 sbd_error_t *err = NULL; 4496 4497 sc_gptwocfg_cookie_t scc; 4498 4499 if (!DRMACH_IS_BOARD_ID(id)) 4500 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4501 bp = id; 4502 4503 /* 4504 * Build the casm info portion of the UNCLAIM message. 4505 * This must be done prior to calling for saf configurator 4506 * deprobe, to ensure that the associated axq instance 4507 * is not detached. 4508 */ 4509 obufp = kmem_zalloc(sizeof (dr_mbox_msg_t), KM_SLEEP); 4510 mutex_enter(&drmach_slice_table_lock); 4511 drmach_msg_memslice_init(obufp->msgdata.dm_ur.mem_slice); 4512 4513 /* 4514 * If disconnecting slot 0 board, update the casm slice table 4515 * info now, for use by drmach_slot1_lpa_set() 4516 */ 4517 if (DRMACH_BNUM2SLOT(bp->bnum) == 0) 4518 drmach_slice_table_update(bp, 1); 4519 4520 drmach_msg_memregs_init(obufp->msgdata.dm_ur.mem_regs); 4521 mutex_exit(&drmach_slice_table_lock); 4522 4523 /* 4524 * Update LPA information for slot1 board 4525 */ 4526 drmach_slot1_lpa_set(bp); 4527 4528 /* disable and flush CDC */ 4529 if (axq_cdc_disable_flush_all() != DDI_SUCCESS) { 4530 axq_cdc_enable_all(); /* paranoia */ 4531 err = DRMACH_INTERNAL_ERROR(); 4532 } 4533 4534 /* 4535 * call saf configurator for deprobe 4536 * It's done now before sending an UNCLAIM message because 4537 * IKP will probe boards it doesn't know about <present at boot> 4538 * prior to unprobing them. If this happens after sending the 4539 * UNCLAIM, it will cause a dstop for domain transgression error. 4540 */ 4541 4542 if (!err) { 4543 scc = sc_unprobe_board(bp->bnum); 4544 axq_cdc_enable_all(); 4545 if (scc != NULL) { 4546 err = drerr_new(0, ESTC_DEPROBE, bp->cm.name); 4547 } 4548 } 4549 4550 /* 4551 * If disconnecting a board from a Panther domain, wait a fixed- 4552 * time delay for pending Safari transactions to complete on the 4553 * disconnecting board's processors. The bus sync list read used 4554 * in drmach_shutdown_asm to synchronize with outstanding Safari 4555 * transactions assumes no read-bypass-write mode for all memory 4556 * controllers. Since Panther supports read-bypass-write, a 4557 * delay is used that is slightly larger than the maximum Safari 4558 * timeout value in the Safari/Fireplane Config Reg. 4559 */ 4560 if (drmach_panther_boards() > 0 || drmach_unclaim_delay_all) { 4561 clock_t stime = ddi_get_lbolt(); 4562 4563 delay(drv_usectohz(drmach_unclaim_usec_delay)); 4564 4565 stime = ddi_get_lbolt() - stime; 4566 DRMACH_PR("delayed %ld ticks (%ld secs) before disconnecting " 4567 "board %s from domain\n", stime, stime / hz, bp->cm.name); 4568 } 4569 4570 if (!err) { 4571 obufp->msgdata.dm_ur.mem_clear = 0; 4572 4573 err = drmach_mbox_trans(DRMSG_UNCLAIM, bp->bnum, (caddr_t)obufp, 4574 sizeof (dr_mbox_msg_t), (caddr_t)NULL, 0); 4575 4576 if (err) { 4577 /* 4578 * if mailbox timeout or unrecoverable error from SC, 4579 * board cannot be touched. Mark the status as 4580 * unusable. 4581 */ 4582 if ((err->e_code == ESTC_SMS_ERR_UNRECOVERABLE) || 4583 (err->e_code == ESTC_MBXRPLY)) 4584 bp->cond = SBD_COND_UNUSABLE; 4585 else { 4586 DRMACH_PR("UNCLAIM failed for bnum=%d\n", 4587 bp->bnum); 4588 DRMACH_PR("calling sc_probe_board: bnum=%d\n", 4589 bp->bnum); 4590 scc = sc_probe_board(bp->bnum); 4591 if (scc == NULL) { 4592 cmn_err(CE_WARN, 4593 "sc_probe_board failed for bnum=%d", 4594 bp->bnum); 4595 } else { 4596 if (DRMACH_BNUM2SLOT(bp->bnum) == 0) { 4597 mutex_enter( 4598 &drmach_slice_table_lock); 4599 drmach_slice_table_update(bp, 4600 0); 4601 mutex_exit( 4602 &drmach_slice_table_lock); 4603 } 4604 drmach_slot1_lpa_set(bp); 4605 } 4606 } 4607 } else { 4608 bp->connected = 0; 4609 /* 4610 * Now that the board has been successfully detached, 4611 * discard platform-specific DIMM serial id information 4612 * for the board. 4613 */ 4614 if ((DRMACH_BNUM2SLOT(bp->bnum) == 0) && 4615 plat_ecc_capability_sc_get( 4616 PLAT_ECC_DIMM_SID_MESSAGE)) { 4617 (void) plat_discard_mem_sids( 4618 DRMACH_BNUM2EXP(bp->bnum)); 4619 } 4620 } 4621 } 4622 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 4623 4624 return (err); 4625 } 4626 4627 static int 4628 drmach_get_portid(drmach_node_t *np) 4629 { 4630 drmach_node_t pp; 4631 int portid; 4632 char type[OBP_MAXPROPNAME]; 4633 4634 if (np->n_getprop(np, "portid", &portid, sizeof (portid)) == 0) 4635 return (portid); 4636 4637 /* 4638 * Get the device_type property to see if we should 4639 * continue processing this node. 4640 */ 4641 if (np->n_getprop(np, "device_type", &type, sizeof (type)) != 0) 4642 return (-1); 4643 4644 /* 4645 * If the device is a CPU without a 'portid' property, 4646 * it is a CMP core. For such cases, the parent node 4647 * has the portid. 4648 */ 4649 if (strcmp(type, DRMACH_CPU_NAMEPROP) == 0) { 4650 if (np->get_parent(np, &pp) != 0) 4651 return (-1); 4652 4653 if (pp.n_getprop(&pp, "portid", &portid, sizeof (portid)) == 0) 4654 return (portid); 4655 } 4656 4657 return (-1); 4658 } 4659 4660 /* 4661 * This is a helper function to determine if a given 4662 * node should be considered for a dr operation according 4663 * to predefined dr type nodes and the node's name. 4664 * Formal Parameter : The name of a device node. 4665 * Return Value: -1, name does not map to a valid dr type. 4666 * A value greater or equal to 0, name is a valid dr type. 4667 */ 4668 static int 4669 drmach_name2type_idx(char *name) 4670 { 4671 int index, ntypes; 4672 4673 if (name == NULL) 4674 return (-1); 4675 4676 /* 4677 * Determine how many possible types are currently supported 4678 * for dr. 4679 */ 4680 ntypes = sizeof (drmach_name2type) / sizeof (drmach_name2type[0]); 4681 4682 /* Determine if the node's name correspond to a predefined type. */ 4683 for (index = 0; index < ntypes; index++) { 4684 if (strcmp(drmach_name2type[index].name, name) == 0) 4685 /* The node is an allowed type for dr. */ 4686 return (index); 4687 } 4688 4689 /* 4690 * If the name of the node does not map to any of the 4691 * types in the array drmach_name2type then the node is not of 4692 * interest to dr. 4693 */ 4694 return (-1); 4695 } 4696 4697 static int 4698 drmach_board_find_devices_cb(drmach_node_walk_args_t *args) 4699 { 4700 drmach_node_t *node = args->node; 4701 drmach_board_cb_data_t *data = args->data; 4702 drmach_board_t *obj = data->obj; 4703 4704 int rv, portid; 4705 drmachid_t id; 4706 drmach_device_t *device; 4707 char name[OBP_MAXDRVNAME]; 4708 4709 portid = drmach_get_portid(node); 4710 if (portid == -1) { 4711 /* 4712 * if the node does not have a portid property, then 4713 * by that information alone it is known that drmach 4714 * is not interested in it. 4715 */ 4716 return (0); 4717 } 4718 rv = node->n_getprop(node, "name", name, OBP_MAXDRVNAME); 4719 4720 /* The node must have a name */ 4721 if (rv) 4722 return (0); 4723 4724 /* 4725 * Ignore devices whose portid do not map to this board, 4726 * or that their name property is not mapped to a valid 4727 * dr device name. 4728 */ 4729 if ((drmach_portid2bnum(portid) != obj->bnum) || 4730 (drmach_name2type_idx(name) < 0)) 4731 return (0); 4732 4733 /* 4734 * Create a device data structure from this node data. 4735 * The call may yield nothing if the node is not of interest 4736 * to drmach. 4737 */ 4738 data->err = drmach_device_new(node, obj, portid, &id); 4739 if (data->err) 4740 return (-1); 4741 else if (!id) { 4742 /* 4743 * drmach_device_new examined the node we passed in 4744 * and determined that it was either one not of 4745 * interest to drmach or the PIM dr layer. 4746 * So, it is skipped. 4747 */ 4748 return (0); 4749 } 4750 4751 rv = drmach_array_set(obj->devices, data->ndevs++, id); 4752 if (rv) { 4753 data->err = DRMACH_INTERNAL_ERROR(); 4754 return (-1); 4755 } 4756 4757 device = id; 4758 4759 #ifdef DEBUG 4760 DRMACH_PR("%d %s %d %p\n", portid, device->type, device->unum, id); 4761 if (DRMACH_IS_IO_ID(id)) 4762 DRMACH_PR("ndevs = %d dip/node = %p", data->ndevs, node->here); 4763 #endif 4764 4765 data->err = (*data->found)(data->a, device->type, device->unum, id); 4766 return (data->err == NULL ? 0 : -1); 4767 } 4768 4769 sbd_error_t * 4770 drmach_board_find_devices(drmachid_t id, void *a, 4771 sbd_error_t *(*found)(void *a, const char *, int, drmachid_t)) 4772 { 4773 drmach_board_t *bp = (drmach_board_t *)id; 4774 sbd_error_t *err; 4775 int max_devices; 4776 int rv; 4777 drmach_board_cb_data_t data; 4778 4779 if (!DRMACH_IS_BOARD_ID(id)) 4780 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4781 4782 max_devices = plat_max_cpu_units_per_board(); 4783 max_devices += plat_max_mem_units_per_board(); 4784 max_devices += plat_max_io_units_per_board(); 4785 4786 bp->devices = drmach_array_new(0, max_devices); 4787 4788 if (bp->tree == NULL) 4789 bp->tree = drmach_node_new(); 4790 4791 data.obj = bp; 4792 data.ndevs = 0; 4793 data.found = found; 4794 data.a = a; 4795 data.err = NULL; 4796 4797 mutex_enter(&drmach_slice_table_lock); 4798 mutex_enter(&drmach_bus_sync_lock); 4799 4800 rv = drmach_node_walk(bp->tree, &data, drmach_board_find_devices_cb); 4801 4802 drmach_slice_table_update(bp, 0); 4803 drmach_bus_sync_list_update(); 4804 4805 mutex_exit(&drmach_bus_sync_lock); 4806 mutex_exit(&drmach_slice_table_lock); 4807 4808 if (rv == 0) { 4809 err = NULL; 4810 drmach_slot1_lpa_set(bp); 4811 } else { 4812 drmach_array_dispose(bp->devices, drmach_device_dispose); 4813 bp->devices = NULL; 4814 4815 if (data.err) 4816 err = data.err; 4817 else 4818 err = DRMACH_INTERNAL_ERROR(); 4819 } 4820 4821 return (err); 4822 } 4823 4824 int 4825 drmach_board_lookup(int bnum, drmachid_t *id) 4826 { 4827 int rv = 0; 4828 4829 if (!drmach_initialized && drmach_init() == -1) { 4830 *id = 0; 4831 return (-1); 4832 } 4833 rw_enter(&drmach_boards_rwlock, RW_WRITER); 4834 if (drmach_array_get(drmach_boards, bnum, id)) { 4835 *id = 0; 4836 rv = -1; 4837 } else { 4838 caddr_t obufp; 4839 dr_showboard_t shb; 4840 sbd_error_t *err = NULL; 4841 drmach_board_t *bp; 4842 4843 bp = *id; 4844 4845 if (bp) 4846 rw_downgrade(&drmach_boards_rwlock); 4847 4848 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 4849 err = drmach_mbox_trans(DRMSG_SHOWBOARD, bnum, obufp, 4850 sizeof (dr_proto_hdr_t), (caddr_t)&shb, 4851 sizeof (dr_showboard_t)); 4852 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 4853 4854 if (err) { 4855 if (err->e_code == ESTC_UNAVAILABLE) { 4856 *id = 0; 4857 rv = -1; 4858 } 4859 sbd_err_clear(&err); 4860 } else { 4861 if (!bp) 4862 bp = *id = (drmachid_t)drmach_board_new(bnum); 4863 bp->connected = (shb.bd_assigned && shb.bd_active); 4864 bp->empty = shb.slot_empty; 4865 4866 switch (shb.test_status) { 4867 case DR_TEST_STATUS_UNKNOWN: 4868 case DR_TEST_STATUS_IPOST: 4869 case DR_TEST_STATUS_ABORTED: 4870 bp->cond = SBD_COND_UNKNOWN; 4871 break; 4872 case DR_TEST_STATUS_PASSED: 4873 bp->cond = SBD_COND_OK; 4874 break; 4875 case DR_TEST_STATUS_FAILED: 4876 bp->cond = SBD_COND_FAILED; 4877 break; 4878 default: 4879 bp->cond = SBD_COND_UNKNOWN; 4880 DRMACH_PR("Unknown test status=0x%x from SC\n", 4881 shb.test_status); 4882 break; 4883 } 4884 (void) strncpy(bp->type, shb.board_type, 4885 sizeof (bp->type)); 4886 bp->assigned = shb.bd_assigned; 4887 bp->powered = shb.power_on; 4888 } 4889 } 4890 rw_exit(&drmach_boards_rwlock); 4891 return (rv); 4892 } 4893 4894 sbd_error_t * 4895 drmach_board_name(int bnum, char *buf, int buflen) 4896 { 4897 (void) snprintf(buf, buflen, "%s%d", DRMACH_BNUM2SLOT(bnum) ? 4898 "IO" : "SB", DRMACH_BNUM2EXP(bnum)); 4899 4900 return (NULL); 4901 } 4902 4903 sbd_error_t * 4904 drmach_board_poweroff(drmachid_t id) 4905 { 4906 drmach_board_t *bp; 4907 sbd_error_t *err; 4908 drmach_status_t stat; 4909 4910 if (!DRMACH_IS_BOARD_ID(id)) 4911 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4912 bp = id; 4913 4914 err = drmach_board_status(id, &stat); 4915 if (!err) { 4916 if (stat.configured || stat.busy) 4917 err = drerr_new(0, ESTC_CONFIGBUSY, bp->cm.name); 4918 else { 4919 caddr_t obufp; 4920 4921 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 4922 err = drmach_mbox_trans(DRMSG_POWEROFF, bp->bnum, obufp, 4923 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 4924 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 4925 if (!err) 4926 bp->powered = 0; 4927 } 4928 } 4929 return (err); 4930 } 4931 4932 sbd_error_t * 4933 drmach_board_poweron(drmachid_t id) 4934 { 4935 drmach_board_t *bp; 4936 caddr_t obufp; 4937 sbd_error_t *err; 4938 4939 if (!DRMACH_IS_BOARD_ID(id)) 4940 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4941 bp = id; 4942 4943 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 4944 err = drmach_mbox_trans(DRMSG_POWERON, bp->bnum, obufp, 4945 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 4946 if (!err) 4947 bp->powered = 1; 4948 4949 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 4950 4951 return (err); 4952 } 4953 4954 static sbd_error_t * 4955 drmach_board_release(drmachid_t id) 4956 { 4957 if (!DRMACH_IS_BOARD_ID(id)) 4958 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4959 return (NULL); 4960 } 4961 4962 sbd_error_t * 4963 drmach_board_test(drmachid_t id, drmach_opts_t *opts, int force) 4964 { 4965 drmach_board_t *bp; 4966 drmach_device_t *dp[MAX_CORES_PER_CMP]; 4967 dr_mbox_msg_t *obufp; 4968 sbd_error_t *err; 4969 dr_testboard_reply_t tbr; 4970 int cpylen; 4971 char *copts; 4972 int is_io; 4973 cpu_flag_t oflags[MAX_CORES_PER_CMP]; 4974 4975 if (!DRMACH_IS_BOARD_ID(id)) 4976 return (drerr_new(0, ESTC_INAPPROP, NULL)); 4977 bp = id; 4978 4979 /* 4980 * If the board is an I/O or MAXCAT board, setup I/O cage for 4981 * testing. Slot 1 indicates I/O or MAXCAT board. 4982 */ 4983 4984 is_io = DRMACH_BNUM2SLOT(bp->bnum); 4985 4986 obufp = kmem_zalloc(sizeof (dr_mbox_msg_t), KM_SLEEP); 4987 4988 if (force) 4989 obufp->msgdata.dm_tb.force = 1; 4990 4991 obufp->msgdata.dm_tb.immediate = 1; 4992 4993 if ((opts->size > 0) && ((copts = opts->copts) != NULL)) { 4994 cpylen = (opts->size > DR_HPOPTLEN ? DR_HPOPTLEN : opts->size); 4995 bcopy(copts, obufp->msgdata.dm_tb.hpost_opts, cpylen); 4996 } 4997 4998 if (is_io) { 4999 err = drmach_iocage_setup(&obufp->msgdata.dm_tb, dp, oflags); 5000 5001 if (err) { 5002 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 5003 return (err); 5004 } 5005 } 5006 5007 err = drmach_mbox_trans(DRMSG_TESTBOARD, bp->bnum, (caddr_t)obufp, 5008 sizeof (dr_mbox_msg_t), (caddr_t)&tbr, sizeof (tbr)); 5009 5010 if (!err) 5011 bp->cond = SBD_COND_OK; 5012 else 5013 bp->cond = SBD_COND_UNKNOWN; 5014 5015 if ((!err) && (tbr.test_status != DR_TEST_STATUS_PASSED)) { 5016 /* examine test status */ 5017 switch (tbr.test_status) { 5018 case DR_TEST_STATUS_IPOST: 5019 bp->cond = SBD_COND_UNKNOWN; 5020 err = drerr_new(0, ESTC_TEST_IN_PROGRESS, NULL); 5021 break; 5022 case DR_TEST_STATUS_UNKNOWN: 5023 bp->cond = SBD_COND_UNKNOWN; 5024 err = drerr_new(1, 5025 ESTC_TEST_STATUS_UNKNOWN, NULL); 5026 break; 5027 case DR_TEST_STATUS_FAILED: 5028 bp->cond = SBD_COND_FAILED; 5029 err = drerr_new(1, ESTC_TEST_FAILED, NULL); 5030 break; 5031 case DR_TEST_STATUS_ABORTED: 5032 bp->cond = SBD_COND_UNKNOWN; 5033 err = drerr_new(1, ESTC_TEST_ABORTED, NULL); 5034 break; 5035 default: 5036 bp->cond = SBD_COND_UNKNOWN; 5037 err = drerr_new(1, ESTC_TEST_RESULT_UNKNOWN, 5038 NULL); 5039 break; 5040 } 5041 } 5042 5043 /* 5044 * If I/O cage test was performed, check for availability of the 5045 * cpu used. If cpu has been returned, it's OK to proceed with 5046 * reconfiguring it for use. 5047 */ 5048 if (is_io) { 5049 DRMACH_PR("drmach_board_test: tbr.cpu_recovered: %d", 5050 tbr.cpu_recovered); 5051 DRMACH_PR("drmach_board_test: port id: %d", 5052 tbr.cpu_portid); 5053 5054 /* 5055 * Check the cpu_recovered flag in the testboard reply, or 5056 * if the testboard request message was not sent to SMS due 5057 * to an mboxsc_putmsg() failure, it's OK to recover the 5058 * cpu since hpost hasn't touched it. 5059 */ 5060 if ((tbr.cpu_recovered && tbr.cpu_portid == 5061 obufp->msgdata.dm_tb.cpu_portid) || 5062 ((err) && (err->e_code == ESTC_MBXRQST))) { 5063 5064 int i; 5065 5066 mutex_enter(&cpu_lock); 5067 for (i = 0; i < MAX_CORES_PER_CMP; i++) { 5068 if (dp[i] != NULL) { 5069 (void) drmach_iocage_cpu_return(dp[i], 5070 oflags[i]); 5071 } 5072 } 5073 mutex_exit(&cpu_lock); 5074 } else { 5075 cmn_err(CE_WARN, "Unable to recover port id %d " 5076 "after I/O cage test: cpu_recovered=%d, " 5077 "returned portid=%d", 5078 obufp->msgdata.dm_tb.cpu_portid, 5079 tbr.cpu_recovered, tbr.cpu_portid); 5080 } 5081 (void) drmach_iocage_mem_return(&tbr); 5082 } 5083 kmem_free(obufp, sizeof (dr_mbox_msg_t)); 5084 5085 return (err); 5086 } 5087 5088 sbd_error_t * 5089 drmach_board_unassign(drmachid_t id) 5090 { 5091 drmach_board_t *bp; 5092 sbd_error_t *err; 5093 drmach_status_t stat; 5094 caddr_t obufp; 5095 5096 rw_enter(&drmach_boards_rwlock, RW_WRITER); 5097 5098 if (!DRMACH_IS_BOARD_ID(id)) { 5099 rw_exit(&drmach_boards_rwlock); 5100 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5101 } 5102 bp = id; 5103 5104 err = drmach_board_status(id, &stat); 5105 if (err) { 5106 rw_exit(&drmach_boards_rwlock); 5107 return (err); 5108 } 5109 5110 if (stat.configured || stat.busy) { 5111 err = drerr_new(0, ESTC_CONFIGBUSY, bp->cm.name); 5112 } else { 5113 5114 obufp = kmem_zalloc(sizeof (dr_proto_hdr_t), KM_SLEEP); 5115 err = drmach_mbox_trans(DRMSG_UNASSIGN, bp->bnum, obufp, 5116 sizeof (dr_proto_hdr_t), (caddr_t)NULL, 0); 5117 kmem_free(obufp, sizeof (dr_proto_hdr_t)); 5118 if (!err) { 5119 if (drmach_array_set(drmach_boards, bp->bnum, 0) != 0) 5120 err = DRMACH_INTERNAL_ERROR(); 5121 else 5122 drmach_board_dispose(bp); 5123 } 5124 } 5125 rw_exit(&drmach_boards_rwlock); 5126 return (err); 5127 } 5128 5129 static sbd_error_t * 5130 drmach_read_reg_addr(drmach_device_t *dp, uint64_t *p) 5131 { 5132 int len; 5133 drmach_reg_t reg; 5134 drmach_node_t pp; 5135 drmach_node_t *np = dp->node; 5136 5137 /* 5138 * If the node does not have a portid property, 5139 * it represents a CMP device. For a CMP, the reg 5140 * property of the parent holds the information of 5141 * interest. 5142 */ 5143 if (dp->node->n_getproplen(dp->node, "portid", &len) != 0) { 5144 5145 if (dp->node->get_parent(dp->node, &pp) != 0) { 5146 return (DRMACH_INTERNAL_ERROR()); 5147 } 5148 np = &pp; 5149 } 5150 5151 if (np->n_getproplen(np, "reg", &len) != 0) 5152 return (DRMACH_INTERNAL_ERROR()); 5153 5154 if (len != sizeof (reg)) 5155 return (DRMACH_INTERNAL_ERROR()); 5156 5157 if (np->n_getprop(np, "reg", ®, sizeof (reg)) != 0) 5158 return (DRMACH_INTERNAL_ERROR()); 5159 5160 /* reassemble 64-bit base address */ 5161 *p = ((uint64_t)reg.reg_addr_hi << 32) | reg.reg_addr_lo; 5162 5163 return (NULL); 5164 } 5165 5166 static void 5167 drmach_cpu_read(uint64_t arg1, uint64_t arg2) 5168 { 5169 uint64_t *saf_config_reg = (uint64_t *)arg1; 5170 uint_t *reg_read = (uint_t *)arg2; 5171 5172 *saf_config_reg = lddsafconfig(); 5173 *reg_read = 0x1; 5174 } 5175 5176 /* 5177 * A return value of 1 indicates success and 0 indicates a failure 5178 */ 5179 static int 5180 drmach_cpu_read_scr(drmach_cpu_t *cp, uint64_t *scr) 5181 { 5182 5183 int rv = 0x0; 5184 5185 *scr = 0x0; 5186 5187 /* 5188 * Confirm cpu was in ready set when xc was issued. 5189 * This is done by verifying rv which is 5190 * set to 0x1 when xc_one is successful. 5191 */ 5192 xc_one(cp->dev.portid, (xcfunc_t *)drmach_cpu_read, 5193 (uint64_t)scr, (uint64_t)&rv); 5194 5195 return (rv); 5196 5197 } 5198 5199 static sbd_error_t * 5200 drmach_cpu_read_cpuid(drmach_cpu_t *cp, processorid_t *cpuid) 5201 { 5202 drmach_node_t *np; 5203 5204 np = cp->dev.node; 5205 5206 /* 5207 * If a CPU does not have a portid property, it must 5208 * be a CMP device with a cpuid property. 5209 */ 5210 if (np->n_getprop(np, "portid", cpuid, sizeof (*cpuid)) != 0) { 5211 5212 if (np->n_getprop(np, "cpuid", cpuid, sizeof (*cpuid)) != 0) { 5213 return (DRMACH_INTERNAL_ERROR()); 5214 } 5215 } 5216 5217 return (NULL); 5218 } 5219 5220 /* Starcat CMP core id is bit 2 of the cpuid */ 5221 #define DRMACH_COREID_MASK (1u << 2) 5222 #define DRMACH_CPUID2SRAM_IDX(id) \ 5223 ((id & DRMACH_COREID_MASK) >> 1 | (id & 0x1)) 5224 5225 static sbd_error_t * 5226 drmach_cpu_new(drmach_device_t *proto, drmachid_t *idp) 5227 { 5228 static void drmach_cpu_dispose(drmachid_t); 5229 static sbd_error_t *drmach_cpu_release(drmachid_t); 5230 static sbd_error_t *drmach_cpu_status(drmachid_t, drmach_status_t *); 5231 5232 sbd_error_t *err; 5233 uint64_t scr_pa; 5234 drmach_cpu_t *cp = NULL; 5235 pfn_t pfn; 5236 uint64_t cpu_stardrb_offset, cpu_sram_pa; 5237 int idx; 5238 int impl; 5239 processorid_t cpuid; 5240 5241 err = drmach_read_reg_addr(proto, &scr_pa); 5242 if (err) { 5243 goto fail; 5244 } 5245 5246 cp = kmem_zalloc(sizeof (drmach_cpu_t), KM_SLEEP); 5247 bcopy(proto, &cp->dev, sizeof (cp->dev)); 5248 cp->dev.node = drmach_node_dup(proto->node); 5249 cp->dev.cm.isa = (void *)drmach_cpu_new; 5250 cp->dev.cm.dispose = drmach_cpu_dispose; 5251 cp->dev.cm.release = drmach_cpu_release; 5252 cp->dev.cm.status = drmach_cpu_status; 5253 cp->scr_pa = scr_pa; 5254 5255 err = drmach_cpu_read_cpuid(cp, &cpuid); 5256 if (err) { 5257 goto fail; 5258 } 5259 5260 err = drmach_cpu_get_impl(cp, &impl); 5261 if (err) { 5262 goto fail; 5263 } 5264 5265 cp->cpuid = cpuid; 5266 cp->coreid = STARCAT_CPUID_TO_COREID(cp->cpuid); 5267 cp->dev.unum = STARCAT_CPUID_TO_AGENT(cp->cpuid); 5268 5269 /* 5270 * Init the board cpu type. Assumes all board cpus are the same type. 5271 */ 5272 if (cp->dev.bp->cpu_impl == 0) { 5273 cp->dev.bp->cpu_impl = impl; 5274 } 5275 ASSERT(cp->dev.bp->cpu_impl == impl); 5276 5277 /* 5278 * XXX CHEETAH SUPPORT 5279 * determine if the domain uses Cheetah procs 5280 */ 5281 if (drmach_is_cheetah < 0) { 5282 drmach_is_cheetah = IS_CHEETAH(impl); 5283 } 5284 5285 /* 5286 * Initialize TTE for mapping CPU SRAM STARDRB buffer. 5287 * The STARDRB buffer (16KB on Cheetah+ boards, 32KB on 5288 * Jaguar/Panther boards) is shared by all cpus in a Safari port 5289 * pair. Each cpu uses 8KB according to the following layout: 5290 * 5291 * Page 0: even numbered Cheetah+'s and Panther/Jaguar core 0's 5292 * Page 1: odd numbered Cheetah+'s and Panther/Jaguar core 0's 5293 * Page 2: even numbered Panther/Jaguar core 1's 5294 * Page 3: odd numbered Panther/Jaguar core 1's 5295 */ 5296 idx = DRMACH_CPUID2SRAM_IDX(cp->cpuid); 5297 cpu_stardrb_offset = cp->dev.bp->stardrb_offset + (PAGESIZE * idx); 5298 cpu_sram_pa = DRMACH_CPU_SRAM_ADDR + cpu_stardrb_offset; 5299 pfn = cpu_sram_pa >> PAGESHIFT; 5300 5301 ASSERT(drmach_cpu_sram_tte[cp->cpuid].tte_inthi == 0 && 5302 drmach_cpu_sram_tte[cp->cpuid].tte_intlo == 0); 5303 drmach_cpu_sram_tte[cp->cpuid].tte_inthi = TTE_PFN_INTHI(pfn) | 5304 TTE_VALID_INT | TTE_SZ_INT(TTE8K); 5305 drmach_cpu_sram_tte[cp->cpuid].tte_intlo = TTE_PFN_INTLO(pfn) | 5306 TTE_HWWR_INT | TTE_PRIV_INT | TTE_LCK_INT; 5307 5308 DRMACH_PR("drmach_cpu_new: cpuid=%d, coreid=%d, stardrb_offset=0x%lx, " 5309 "cpu_sram_offset=0x%lx, idx=%d\n", cp->cpuid, cp->coreid, 5310 cp->dev.bp->stardrb_offset, cpu_stardrb_offset, idx); 5311 5312 (void) snprintf(cp->dev.cm.name, sizeof (cp->dev.cm.name), "%s%d", 5313 cp->dev.type, cp->dev.unum); 5314 5315 *idp = (drmachid_t)cp; 5316 return (NULL); 5317 5318 fail: 5319 if (cp) { 5320 drmach_node_dispose(cp->dev.node); 5321 kmem_free(cp, sizeof (*cp)); 5322 } 5323 5324 *idp = (drmachid_t)0; 5325 return (err); 5326 } 5327 5328 static void 5329 drmach_cpu_dispose(drmachid_t id) 5330 { 5331 drmach_cpu_t *self; 5332 processorid_t cpuid; 5333 5334 ASSERT(DRMACH_IS_CPU_ID(id)); 5335 5336 self = id; 5337 if (self->dev.node) 5338 drmach_node_dispose(self->dev.node); 5339 5340 cpuid = self->cpuid; 5341 ASSERT(TTE_IS_VALID(&drmach_cpu_sram_tte[cpuid]) && 5342 TTE_IS_8K(&drmach_cpu_sram_tte[cpuid]) && 5343 TTE_IS_PRIVILEGED(&drmach_cpu_sram_tte[cpuid]) && 5344 TTE_IS_LOCKED(&drmach_cpu_sram_tte[cpuid])); 5345 drmach_cpu_sram_tte[cpuid].tte_inthi = 0; 5346 drmach_cpu_sram_tte[cpuid].tte_intlo = 0; 5347 5348 kmem_free(self, sizeof (*self)); 5349 } 5350 5351 static int 5352 drmach_cpu_start(struct cpu *cp) 5353 { 5354 extern xcfunc_t drmach_set_lpa; 5355 extern void restart_other_cpu(int); 5356 int cpuid = cp->cpu_id; 5357 int rv, bnum; 5358 drmach_board_t *bp; 5359 5360 ASSERT(MUTEX_HELD(&cpu_lock)); 5361 ASSERT(cpunodes[cpuid].nodeid != (pnode_t)0); 5362 5363 cp->cpu_flags &= ~CPU_POWEROFF; 5364 5365 /* 5366 * NOTE: restart_other_cpu pauses cpus during the 5367 * slave cpu start. This helps to quiesce the 5368 * bus traffic a bit which makes the tick sync 5369 * routine in the prom more robust. 5370 */ 5371 DRMACH_PR("COLD START for cpu (%d)\n", cpuid); 5372 5373 if (prom_hotaddcpu(cpuid) != 0) { 5374 cmn_err(CE_PANIC, "prom_hotaddcpu() for cpuid=%d failed.", 5375 cpuid); 5376 } 5377 5378 restart_other_cpu(cpuid); 5379 5380 bnum = drmach_portid2bnum(cpunodes[cpuid].portid); 5381 rv = drmach_array_get(drmach_boards, bnum, (drmachid_t)&bp); 5382 if (rv == -1 || bp == NULL) { 5383 DRMACH_PR("drmach_cpu_start: cannot read board info for " 5384 "cpuid=%d: rv=%d, bp=%p\n", cpuid, rv, (void *)bp); 5385 } else if (DRMACH_L1_SET_LPA(bp) && drmach_reprogram_lpa) { 5386 int exp; 5387 int ntries; 5388 5389 mutex_enter(&drmach_xt_mb_lock); 5390 mutex_enter(&drmach_slice_table_lock); 5391 bzero((void *)drmach_xt_mb, drmach_xt_mb_size); 5392 5393 /* 5394 * drmach_slice_table[*] 5395 * bit 5 valid 5396 * bit 0:4 slice number 5397 * 5398 * drmach_xt_mb[*] format for drmach_set_lpa 5399 * bit 7 valid 5400 * bit 6 set null LPA (overrides bits 0:4) 5401 * bit 0:4 slice number 5402 * 5403 * drmach_set_lpa derives processor CBASE and CBND 5404 * from bits 6 and 0:4 of drmach_xt_mb. If bit 6 is 5405 * set, then CBASE = CBND = 0. Otherwise, CBASE = slice 5406 * number; CBND = slice number + 1. 5407 * No action is taken if bit 7 is zero. 5408 */ 5409 exp = (cpuid >> 5) & 0x1f; 5410 if (drmach_slice_table[exp] & 0x20) { 5411 drmach_xt_mb[cpuid] = 0x80 | 5412 (drmach_slice_table[exp] & 0x1f); 5413 } else { 5414 drmach_xt_mb[cpuid] = 0x80 | 0x40; 5415 } 5416 5417 drmach_xt_ready = 0; 5418 5419 xt_one(cpuid, drmach_set_lpa, NULL, NULL); 5420 5421 ntries = drmach_cpu_ntries; 5422 while (!drmach_xt_ready && ntries) { 5423 DELAY(drmach_cpu_delay); 5424 ntries--; 5425 } 5426 5427 mutex_exit(&drmach_slice_table_lock); 5428 mutex_exit(&drmach_xt_mb_lock); 5429 5430 DRMACH_PR( 5431 "waited %d out of %d tries for drmach_set_lpa on cpu%d", 5432 drmach_cpu_ntries - ntries, drmach_cpu_ntries, 5433 cp->cpu_id); 5434 } 5435 5436 xt_one(cpuid, vtag_flushpage_tl1, (uint64_t)drmach_cpu_sram_va, 5437 (uint64_t)ksfmmup); 5438 5439 return (0); 5440 } 5441 5442 /* 5443 * A detaching CPU is xcalled with an xtrap to drmach_cpu_stop_self() after 5444 * it has been offlined. The function of this routine is to get the cpu 5445 * spinning in a safe place. The requirement is that the system will not 5446 * reference anything on the detaching board (memory and i/o is detached 5447 * elsewhere) and that the CPU not reference anything on any other board 5448 * in the system. This isolation is required during and after the writes 5449 * to the domain masks to remove the board from the domain. 5450 * 5451 * To accomplish this isolation the following is done: 5452 * 1) Create a locked mapping to the STARDRB data buffer located 5453 * in this cpu's sram. There is one TTE per cpu, initialized in 5454 * drmach_cpu_new(). The cpuid is used to select which TTE to use. 5455 * Each Safari port pair shares the CPU SRAM on a Serengeti CPU/MEM 5456 * board. The STARDRB buffer is 16KB on Cheetah+ boards, 32KB on Jaguar 5457 * boards. Each STARDRB buffer is logically divided by DR into one 5458 * 8KB page per cpu (or Jaguar core). 5459 * 2) Copy the target function (drmach_shutdown_asm) into buffer. 5460 * 3) Jump to function now in the cpu sram. 5461 * Function will: 5462 * 3.1) Flush its Ecache (displacement). 5463 * 3.2) Flush its Dcache with HW mechanism. 5464 * 3.3) Flush its Icache with HW mechanism. 5465 * 3.4) Flush all valid and _unlocked_ D-TLB and I-TLB entries. 5466 * 3.5) Set LPA to NULL 5467 * 3.6) Clear xt_mb to signal completion. Note: cache line is 5468 * recovered by drmach_cpu_poweroff(). 5469 * 4) Jump into an infinite loop. 5470 */ 5471 5472 static void 5473 drmach_cpu_stop_self(void) 5474 { 5475 extern void drmach_shutdown_asm(uint64_t, uint64_t, int, int, uint64_t); 5476 extern void drmach_shutdown_asm_end(void); 5477 5478 tte_t *tte; 5479 uint_t *p, *q; 5480 uint64_t stack_pointer; 5481 5482 ASSERT(((ptrdiff_t)drmach_shutdown_asm_end - 5483 (ptrdiff_t)drmach_shutdown_asm) < PAGESIZE); 5484 5485 tte = &drmach_cpu_sram_tte[CPU->cpu_id]; 5486 ASSERT(TTE_IS_VALID(tte) && TTE_IS_8K(tte) && TTE_IS_PRIVILEGED(tte) && 5487 TTE_IS_LOCKED(tte)); 5488 sfmmu_dtlb_ld_kva(drmach_cpu_sram_va, tte); 5489 sfmmu_itlb_ld_kva(drmach_cpu_sram_va, tte); 5490 5491 /* copy text. standard bcopy not designed to work in nc space */ 5492 p = (uint_t *)drmach_cpu_sram_va; 5493 q = (uint_t *)drmach_shutdown_asm; 5494 while (q < (uint_t *)drmach_shutdown_asm_end) 5495 *p++ = *q++; 5496 5497 /* zero to assist debug */ 5498 q = (uint_t *)(drmach_cpu_sram_va + PAGESIZE); 5499 while (p < q) 5500 *p++ = 0; 5501 5502 /* a parking spot for the stack pointer */ 5503 stack_pointer = (uint64_t)q; 5504 5505 /* call copy of drmach_shutdown_asm */ 5506 (*(void (*)())drmach_cpu_sram_va)( 5507 stack_pointer, 5508 drmach_iocage_paddr, 5509 cpunodes[CPU->cpu_id].ecache_size, 5510 cpunodes[CPU->cpu_id].ecache_linesize, 5511 va_to_pa((void *)&drmach_xt_mb[CPU->cpu_id])); 5512 } 5513 5514 static void 5515 drmach_cpu_shutdown_self(void) 5516 { 5517 cpu_t *cp = CPU; 5518 int cpuid = cp->cpu_id; 5519 extern void flush_windows(void); 5520 5521 flush_windows(); 5522 5523 (void) spl8(); 5524 5525 ASSERT(cp->cpu_intr_actv == 0); 5526 ASSERT(cp->cpu_thread == cp->cpu_idle_thread || 5527 cp->cpu_thread == cp->cpu_startup_thread); 5528 5529 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF; 5530 5531 drmach_cpu_stop_self(); 5532 5533 cmn_err(CE_PANIC, "CPU %d FAILED TO SHUTDOWN", cpuid); 5534 } 5535 5536 static sbd_error_t * 5537 drmach_cpu_release(drmachid_t id) 5538 { 5539 drmach_cpu_t *cp; 5540 struct cpu *cpu; 5541 sbd_error_t *err; 5542 5543 if (!DRMACH_IS_CPU_ID(id)) 5544 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5545 cp = id; 5546 5547 ASSERT(MUTEX_HELD(&cpu_lock)); 5548 5549 cpu = cpu_get(cp->cpuid); 5550 if (cpu == NULL) 5551 err = DRMACH_INTERNAL_ERROR(); 5552 else 5553 err = NULL; 5554 5555 return (err); 5556 } 5557 5558 static sbd_error_t * 5559 drmach_cpu_status(drmachid_t id, drmach_status_t *stat) 5560 { 5561 drmach_cpu_t *cp; 5562 drmach_device_t *dp; 5563 5564 ASSERT(DRMACH_IS_CPU_ID(id)); 5565 cp = id; 5566 dp = &cp->dev; 5567 5568 stat->assigned = dp->bp->assigned; 5569 stat->powered = dp->bp->powered; 5570 mutex_enter(&cpu_lock); 5571 stat->configured = (cpu_get(cp->cpuid) != NULL); 5572 mutex_exit(&cpu_lock); 5573 stat->busy = dp->busy; 5574 (void) strncpy(stat->type, dp->type, sizeof (stat->type)); 5575 stat->info[0] = '\0'; 5576 5577 return (NULL); 5578 } 5579 5580 sbd_error_t * 5581 drmach_cpu_disconnect(drmachid_t id) 5582 { 5583 if (!DRMACH_IS_CPU_ID(id)) 5584 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5585 5586 return (NULL); 5587 } 5588 5589 sbd_error_t * 5590 drmach_cpu_get_id(drmachid_t id, processorid_t *cpuid) 5591 { 5592 drmach_cpu_t *cpu; 5593 5594 if (!DRMACH_IS_CPU_ID(id)) 5595 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5596 cpu = id; 5597 5598 *cpuid = cpu->cpuid; 5599 return (NULL); 5600 } 5601 5602 sbd_error_t * 5603 drmach_cpu_get_impl(drmachid_t id, int *ip) 5604 { 5605 drmach_node_t *np; 5606 int impl; 5607 5608 if (!DRMACH_IS_CPU_ID(id)) 5609 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5610 5611 np = ((drmach_device_t *)id)->node; 5612 5613 if (np->n_getprop(np, "implementation#", &impl, sizeof (impl)) == -1) { 5614 return (DRMACH_INTERNAL_ERROR()); 5615 } 5616 5617 *ip = impl; 5618 5619 return (NULL); 5620 } 5621 5622 /* 5623 * Flush this cpu's ecache, then ensure all outstanding safari 5624 * transactions have retired. 5625 */ 5626 void 5627 drmach_cpu_flush_ecache_sync(void) 5628 { 5629 uint64_t *p; 5630 5631 ASSERT(curthread->t_bound_cpu == CPU); 5632 5633 cpu_flush_ecache(); 5634 5635 mutex_enter(&drmach_bus_sync_lock); 5636 for (p = drmach_bus_sync_list; *p; p++) 5637 (void) ldphys(*p); 5638 mutex_exit(&drmach_bus_sync_lock); 5639 5640 cpu_flush_ecache(); 5641 } 5642 5643 sbd_error_t * 5644 drmach_get_dip(drmachid_t id, dev_info_t **dip) 5645 { 5646 drmach_device_t *dp; 5647 5648 if (!DRMACH_IS_DEVICE_ID(id)) 5649 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5650 dp = id; 5651 5652 *dip = dp->node->n_getdip(dp->node); 5653 return (NULL); 5654 } 5655 5656 sbd_error_t * 5657 drmach_io_is_attached(drmachid_t id, int *yes) 5658 { 5659 drmach_device_t *dp; 5660 dev_info_t *dip; 5661 int state; 5662 5663 if (!DRMACH_IS_IO_ID(id)) 5664 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5665 dp = id; 5666 5667 dip = dp->node->n_getdip(dp->node); 5668 if (dip == NULL) { 5669 *yes = 0; 5670 return (NULL); 5671 } 5672 5673 state = ddi_get_devstate(dip); 5674 *yes = i_ddi_devi_attached(dip) || (state == DDI_DEVSTATE_UP); 5675 5676 return (NULL); 5677 } 5678 5679 static int 5680 drmach_dip_is_schizo_xmits_0_pci_b(dev_info_t *dip) 5681 { 5682 char dtype[OBP_MAXPROPNAME]; 5683 int portid; 5684 uint_t pci_csr_base; 5685 struct pci_phys_spec *regbuf = NULL; 5686 int rv, len; 5687 5688 ASSERT(dip != NULL); 5689 rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "device_type", &len); 5690 if ((rv != DDI_PROP_SUCCESS) || (len > sizeof (dtype))) 5691 return (0); 5692 5693 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, "device_type", 5694 (caddr_t)dtype, &len) == DDI_PROP_SUCCESS) { 5695 5696 if (strncmp(dtype, "pci", 3) == 0) { 5697 5698 /* 5699 * Get safari portid. All schizo/xmits 0 5700 * safari IDs end in 0x1C. 5701 */ 5702 rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "portid", 5703 &len); 5704 5705 if ((rv != DDI_PROP_SUCCESS) || 5706 (len > sizeof (portid))) 5707 return (0); 5708 5709 rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, 5710 "portid", (caddr_t)&portid, &len); 5711 5712 if (rv != DDI_PROP_SUCCESS) 5713 return (0); 5714 5715 if ((portid & 0x1F) != 0x1C) 5716 return (0); 5717 5718 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, 5719 DDI_PROP_DONTPASS, "reg", (caddr_t)®buf, 5720 &len) == DDI_PROP_SUCCESS) { 5721 5722 pci_csr_base = regbuf[0].pci_phys_mid & 5723 PCI_CONF_ADDR_MASK; 5724 kmem_free(regbuf, len); 5725 /* 5726 * All PCI B-Leafs are at configspace 0x70.0000. 5727 */ 5728 if (pci_csr_base == 0x700000) 5729 return (1); 5730 } 5731 } 5732 } 5733 return (0); 5734 } 5735 5736 #define SCHIZO_BINDING_NAME "pci108e,8001" 5737 #define XMITS_BINDING_NAME "pci108e,8002" 5738 5739 /* 5740 * Verify if the dip is an instance of MAN 'eri'. 5741 */ 5742 static int 5743 drmach_dip_is_man_eri(dev_info_t *dip) 5744 { 5745 struct pci_phys_spec *regbuf = NULL; 5746 dev_info_t *parent_dip; 5747 char *name; 5748 uint_t pci_device; 5749 uint_t pci_function; 5750 int len; 5751 5752 if (dip == NULL) 5753 return (0); 5754 /* 5755 * Verify if the parent is schizo(xmits)0 and pci B leaf. 5756 */ 5757 if (((parent_dip = ddi_get_parent(dip)) == NULL) || 5758 ((name = ddi_binding_name(parent_dip)) == NULL)) 5759 return (0); 5760 if (strcmp(name, SCHIZO_BINDING_NAME) != 0) { 5761 /* 5762 * This RIO could be on XMITS, so get the dip to 5763 * XMITS PCI Leaf. 5764 */ 5765 if ((parent_dip = ddi_get_parent(parent_dip)) == NULL) 5766 return (0); 5767 if (((name = ddi_binding_name(parent_dip)) == NULL) || 5768 (strcmp(name, XMITS_BINDING_NAME) != 0)) { 5769 return (0); 5770 } 5771 } 5772 if (!drmach_dip_is_schizo_xmits_0_pci_b(parent_dip)) 5773 return (0); 5774 /* 5775 * Finally make sure it is the MAN eri. 5776 */ 5777 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 5778 "reg", (caddr_t)®buf, &len) == DDI_PROP_SUCCESS) { 5779 5780 pci_device = PCI_REG_DEV_G(regbuf->pci_phys_hi); 5781 pci_function = PCI_REG_FUNC_G(regbuf->pci_phys_hi); 5782 kmem_free(regbuf, len); 5783 5784 /* 5785 * The network function of the RIO ASIC will always be 5786 * device 3 and function 1 ("network@3,1"). 5787 */ 5788 if ((pci_device == 3) && (pci_function == 1)) 5789 return (1); 5790 } 5791 return (0); 5792 } 5793 5794 typedef struct { 5795 int iosram_inst; 5796 dev_info_t *eri_dip; 5797 int bnum; 5798 } drmach_io_inst_t; 5799 5800 int 5801 drmach_board_find_io_insts(dev_info_t *dip, void *args) 5802 { 5803 drmach_io_inst_t *ios = (drmach_io_inst_t *)args; 5804 5805 int rv; 5806 int len; 5807 int portid; 5808 char name[OBP_MAXDRVNAME]; 5809 5810 rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "portid", &len); 5811 5812 if ((rv != DDI_PROP_SUCCESS) || (len > sizeof (portid))) { 5813 return (DDI_WALK_CONTINUE); 5814 } 5815 5816 rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 0, 5817 "portid", (caddr_t)&portid, &len); 5818 if (rv != DDI_PROP_SUCCESS) 5819 return (DDI_WALK_CONTINUE); 5820 5821 /* ignore devices that are not on this board */ 5822 if (drmach_portid2bnum(portid) != ios->bnum) 5823 return (DDI_WALK_CONTINUE); 5824 5825 if ((ios->iosram_inst < 0) || (ios->eri_dip == NULL)) { 5826 rv = ddi_getproplen(DDI_DEV_T_ANY, dip, 0, "name", &len); 5827 if (rv == DDI_PROP_SUCCESS) { 5828 5829 rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 5830 0, "name", 5831 (caddr_t)name, &len); 5832 if (rv != DDI_PROP_SUCCESS) 5833 return (DDI_WALK_CONTINUE); 5834 5835 if (strncmp("iosram", name, 6) == 0) { 5836 ios->iosram_inst = ddi_get_instance(dip); 5837 if (ios->eri_dip == NULL) 5838 return (DDI_WALK_CONTINUE); 5839 else 5840 return (DDI_WALK_TERMINATE); 5841 } else { 5842 if (drmach_dip_is_man_eri(dip)) { 5843 ASSERT(ios->eri_dip == NULL); 5844 ndi_hold_devi(dip); 5845 ios->eri_dip = dip; 5846 if (ios->iosram_inst < 0) 5847 return (DDI_WALK_CONTINUE); 5848 else 5849 return (DDI_WALK_TERMINATE); 5850 } 5851 } 5852 } 5853 } 5854 return (DDI_WALK_CONTINUE); 5855 } 5856 5857 sbd_error_t * 5858 drmach_io_pre_release(drmachid_t id) 5859 { 5860 drmach_io_inst_t ios; 5861 drmach_board_t *bp; 5862 int rv = 0; 5863 sbd_error_t *err = NULL; 5864 drmach_device_t *dp; 5865 dev_info_t *rdip; 5866 int circ; 5867 5868 if (!DRMACH_IS_IO_ID(id)) 5869 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5870 dp = id; 5871 bp = dp->bp; 5872 5873 rdip = dp->node->n_getdip(dp->node); 5874 5875 /* walk device tree to find iosram instance for the board */ 5876 ios.iosram_inst = -1; 5877 ios.eri_dip = NULL; 5878 ios.bnum = bp->bnum; 5879 5880 ndi_devi_enter(rdip, &circ); 5881 ddi_walk_devs(ddi_get_child(rdip), drmach_board_find_io_insts, 5882 (void *)&ios); 5883 5884 DRMACH_PR("drmach_io_pre_release: bnum=%d iosram=%d eri=0x%p\n", 5885 ios.bnum, ios.iosram_inst, (void *)ios.eri_dip); 5886 ndi_devi_exit(rdip, circ); 5887 5888 if (ios.eri_dip) { 5889 /* 5890 * Release hold acquired in drmach_board_find_io_insts() 5891 */ 5892 ndi_rele_devi(ios.eri_dip); 5893 } 5894 if (ios.iosram_inst >= 0) { 5895 /* call for tunnel switch */ 5896 do { 5897 DRMACH_PR("calling iosram_switchfrom(%d)\n", 5898 ios.iosram_inst); 5899 rv = iosram_switchfrom(ios.iosram_inst); 5900 if (rv) 5901 DRMACH_PR("iosram_switchfrom returned %d\n", 5902 rv); 5903 } while (rv == EAGAIN); 5904 5905 if (rv) 5906 err = drerr_new(0, ESTC_IOSWITCH, NULL); 5907 } 5908 return (err); 5909 } 5910 5911 sbd_error_t * 5912 drmach_io_unrelease(drmachid_t id) 5913 { 5914 dev_info_t *dip; 5915 sbd_error_t *err = NULL; 5916 drmach_device_t *dp; 5917 5918 if (!DRMACH_IS_IO_ID(id)) 5919 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5920 dp = id; 5921 5922 dip = dp->node->n_getdip(dp->node); 5923 5924 if (dip == NULL) 5925 err = DRMACH_INTERNAL_ERROR(); 5926 else { 5927 int (*func)(dev_info_t *dip); 5928 5929 func = (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_attach", 5930 0); 5931 5932 if (func) { 5933 drmach_io_inst_t ios; 5934 dev_info_t *pdip; 5935 int circ; 5936 5937 /* 5938 * Walk device tree to find rio dip for the board 5939 * Since we are not interested in iosram instance here, 5940 * initialize it to 0, so that the walk terminates as 5941 * soon as eri dip is found. 5942 */ 5943 ios.iosram_inst = 0; 5944 ios.eri_dip = NULL; 5945 ios.bnum = dp->bp->bnum; 5946 5947 if (pdip = ddi_get_parent(dip)) { 5948 ndi_hold_devi(pdip); 5949 ndi_devi_enter(pdip, &circ); 5950 } 5951 /* 5952 * Root node doesn't have to be held in any way. 5953 */ 5954 ddi_walk_devs(dip, drmach_board_find_io_insts, 5955 (void *)&ios); 5956 5957 if (pdip) { 5958 ndi_devi_exit(pdip, circ); 5959 ndi_rele_devi(pdip); 5960 } 5961 5962 DRMACH_PR("drmach_io_unrelease: bnum=%d eri=0x%p\n", 5963 ios.bnum, (void *)ios.eri_dip); 5964 5965 if (ios.eri_dip) { 5966 DRMACH_PR("calling man_dr_attach\n"); 5967 if ((*func)(ios.eri_dip)) 5968 err = drerr_new(0, ESTC_NWSWITCH, NULL); 5969 /* 5970 * Release hold acquired in 5971 * drmach_board_find_io_insts() 5972 */ 5973 ndi_rele_devi(ios.eri_dip); 5974 } 5975 } else 5976 DRMACH_PR("man_dr_attach NOT present\n"); 5977 } 5978 return (err); 5979 } 5980 5981 static sbd_error_t * 5982 drmach_io_release(drmachid_t id) 5983 { 5984 dev_info_t *dip; 5985 sbd_error_t *err = NULL; 5986 drmach_device_t *dp; 5987 5988 if (!DRMACH_IS_IO_ID(id)) 5989 return (drerr_new(0, ESTC_INAPPROP, NULL)); 5990 dp = id; 5991 5992 dip = dp->node->n_getdip(dp->node); 5993 5994 if (dip == NULL) 5995 err = DRMACH_INTERNAL_ERROR(); 5996 else { 5997 int (*func)(dev_info_t *dip); 5998 5999 func = (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_detach", 6000 0); 6001 6002 if (func) { 6003 drmach_io_inst_t ios; 6004 dev_info_t *pdip; 6005 int circ; 6006 6007 /* 6008 * Walk device tree to find rio dip for the board 6009 * Since we are not interested in iosram instance here, 6010 * initialize it to 0, so that the walk terminates as 6011 * soon as eri dip is found. 6012 */ 6013 ios.iosram_inst = 0; 6014 ios.eri_dip = NULL; 6015 ios.bnum = dp->bp->bnum; 6016 6017 if (pdip = ddi_get_parent(dip)) { 6018 ndi_hold_devi(pdip); 6019 ndi_devi_enter(pdip, &circ); 6020 } 6021 /* 6022 * Root node doesn't have to be held in any way. 6023 */ 6024 ddi_walk_devs(dip, drmach_board_find_io_insts, 6025 (void *)&ios); 6026 6027 if (pdip) { 6028 ndi_devi_exit(pdip, circ); 6029 ndi_rele_devi(pdip); 6030 } 6031 6032 DRMACH_PR("drmach_io_release: bnum=%d eri=0x%p\n", 6033 ios.bnum, (void *)ios.eri_dip); 6034 6035 if (ios.eri_dip) { 6036 DRMACH_PR("calling man_dr_detach\n"); 6037 if ((*func)(ios.eri_dip)) 6038 err = drerr_new(0, ESTC_NWSWITCH, NULL); 6039 /* 6040 * Release hold acquired in 6041 * drmach_board_find_io_insts() 6042 */ 6043 ndi_rele_devi(ios.eri_dip); 6044 } 6045 } else 6046 DRMACH_PR("man_dr_detach NOT present\n"); 6047 } 6048 return (err); 6049 } 6050 6051 sbd_error_t * 6052 drmach_io_post_release(drmachid_t id) 6053 { 6054 char *path; 6055 dev_info_t *rdip; 6056 drmach_device_t *dp; 6057 6058 if (!DRMACH_IS_DEVICE_ID(id)) 6059 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6060 dp = id; 6061 6062 rdip = dp->node->n_getdip(dp->node); 6063 6064 /* 6065 * Always called after drmach_unconfigure() which on Starcat 6066 * unconfigures the branch but doesn't remove it so the 6067 * dip must always exist. 6068 */ 6069 ASSERT(rdip); 6070 6071 ASSERT(e_ddi_branch_held(rdip)); 6072 #ifdef DEBUG 6073 path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 6074 (void) ddi_pathname(rdip, path); 6075 DRMACH_PR("post_release dip path is: %s\n", path); 6076 kmem_free(path, MAXPATHLEN); 6077 #endif 6078 6079 if (strcmp(dp->type, DRMACH_DEVTYPE_PCI) == 0) { 6080 if (schpc_remove_pci(rdip)) { 6081 DRMACH_PR("schpc_remove_pci failed\n"); 6082 return (drerr_new(0, ESBD_OFFLINE, NULL)); 6083 } else { 6084 DRMACH_PR("schpc_remove_pci succeeded\n"); 6085 } 6086 } 6087 6088 return (NULL); 6089 } 6090 6091 sbd_error_t * 6092 drmach_io_post_attach(drmachid_t id) 6093 { 6094 int circ; 6095 dev_info_t *dip; 6096 dev_info_t *pdip; 6097 drmach_device_t *dp; 6098 drmach_io_inst_t ios; 6099 6100 if (!DRMACH_IS_DEVICE_ID(id)) 6101 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6102 dp = id; 6103 6104 dip = dp->node->n_getdip(dp->node); 6105 6106 /* 6107 * We held the branch rooted at dip earlier, so at a minimum the 6108 * root i.e. dip must be present in the device tree. 6109 */ 6110 ASSERT(dip); 6111 6112 if (strcmp(dp->type, DRMACH_DEVTYPE_PCI) == 0) { 6113 if (schpc_add_pci(dip)) { 6114 DRMACH_PR("schpc_add_pci failed\n"); 6115 } else { 6116 DRMACH_PR("schpc_add_pci succeeded\n"); 6117 } 6118 } 6119 6120 /* 6121 * Walk device tree to find rio dip for the board 6122 * Since we are not interested in iosram instance here, 6123 * initialize it to 0, so that the walk terminates as 6124 * soon as eri dip is found. 6125 */ 6126 ios.iosram_inst = 0; 6127 ios.eri_dip = NULL; 6128 ios.bnum = dp->bp->bnum; 6129 6130 if (pdip = ddi_get_parent(dip)) { 6131 ndi_hold_devi(pdip); 6132 ndi_devi_enter(pdip, &circ); 6133 } 6134 /* 6135 * Root node doesn't have to be held in any way. 6136 */ 6137 ddi_walk_devs(dip, drmach_board_find_io_insts, (void *)&ios); 6138 if (pdip) { 6139 ndi_devi_exit(pdip, circ); 6140 ndi_rele_devi(pdip); 6141 } 6142 6143 DRMACH_PR("drmach_io_post_attach: bnum=%d eri=0x%p\n", 6144 ios.bnum, (void *)ios.eri_dip); 6145 6146 if (ios.eri_dip) { 6147 int (*func)(dev_info_t *dip); 6148 6149 func = 6150 (int (*)(dev_info_t *))kobj_getsymvalue("man_dr_attach", 0); 6151 6152 if (func) { 6153 DRMACH_PR("calling man_dr_attach\n"); 6154 (void) (*func)(ios.eri_dip); 6155 } else { 6156 DRMACH_PR("man_dr_attach NOT present\n"); 6157 } 6158 6159 /* 6160 * Release hold acquired in drmach_board_find_io_insts() 6161 */ 6162 ndi_rele_devi(ios.eri_dip); 6163 6164 } 6165 6166 return (NULL); 6167 } 6168 6169 static sbd_error_t * 6170 drmach_io_status(drmachid_t id, drmach_status_t *stat) 6171 { 6172 drmach_device_t *dp; 6173 sbd_error_t *err; 6174 int configured; 6175 6176 ASSERT(DRMACH_IS_IO_ID(id)); 6177 dp = id; 6178 6179 err = drmach_io_is_attached(id, &configured); 6180 if (err) 6181 return (err); 6182 6183 stat->assigned = dp->bp->assigned; 6184 stat->powered = dp->bp->powered; 6185 stat->configured = (configured != 0); 6186 stat->busy = dp->busy; 6187 (void) strncpy(stat->type, dp->type, sizeof (stat->type)); 6188 stat->info[0] = '\0'; 6189 6190 return (NULL); 6191 } 6192 6193 sbd_error_t * 6194 drmach_mem_init_size(drmachid_t id) 6195 { 6196 drmach_mem_t *mp; 6197 sbd_error_t *err; 6198 gdcd_t *gdcd; 6199 mem_chunk_t *chunk; 6200 uint64_t chunks, pa, mask, sz; 6201 6202 if (!DRMACH_IS_MEM_ID(id)) 6203 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6204 mp = id; 6205 6206 err = drmach_mem_get_base_physaddr(id, &pa); 6207 if (err) 6208 return (err); 6209 6210 mask = ~ (DRMACH_MEM_SLICE_SIZE - 1); 6211 pa &= mask; 6212 6213 gdcd = drmach_gdcd_new(); 6214 if (gdcd == NULL) 6215 return (DRMACH_INTERNAL_ERROR()); 6216 6217 sz = 0; 6218 chunk = gdcd->dcd_chunk_list.dcl_chunk; 6219 chunks = gdcd->dcd_chunk_list.dcl_chunks; 6220 while (chunks-- != 0) { 6221 if ((chunk->mc_base_pa & mask) == pa) { 6222 sz += chunk->mc_mbytes * 1048576; 6223 } 6224 6225 ++chunk; 6226 } 6227 mp->nbytes = sz; 6228 6229 drmach_gdcd_dispose(gdcd); 6230 return (NULL); 6231 } 6232 6233 /* 6234 * Hardware registers are organized into consecutively 6235 * addressed registers. The reg property's hi and lo fields 6236 * together describe the base address of the register set for 6237 * this memory-controller. Register descriptions and offsets 6238 * (from the base address) are as follows: 6239 * 6240 * Description Offset Size (bytes) 6241 * Memory Timing Control Register I 0x00 8 6242 * Memory Timing Control Register II 0x08 8 6243 * Memory Address Decoding Register I 0x10 8 6244 * Memory Address Decoding Register II 0x18 8 6245 * Memory Address Decoding Register III 0x20 8 6246 * Memory Address Decoding Register IV 0x28 8 6247 * Memory Address Control Register 0x30 8 6248 * Memory Timing Control Register III 0x38 8 6249 * Memory Timing Control Register IV 0x40 8 6250 * Memory Timing Control Register V 0x48 8 (Jaguar, Panther only) 6251 * EMU Activity Status Register 0x50 8 (Panther only) 6252 * 6253 * Only the Memory Address Decoding Register and EMU Activity Status 6254 * Register addresses are needed for DRMACH. 6255 */ 6256 static sbd_error_t * 6257 drmach_mem_new(drmach_device_t *proto, drmachid_t *idp) 6258 { 6259 static void drmach_mem_dispose(drmachid_t); 6260 static sbd_error_t *drmach_mem_release(drmachid_t); 6261 static sbd_error_t *drmach_mem_status(drmachid_t, drmach_status_t *); 6262 6263 sbd_error_t *err; 6264 uint64_t madr_pa; 6265 drmach_mem_t *mp; 6266 int bank, count; 6267 6268 err = drmach_read_reg_addr(proto, &madr_pa); 6269 if (err) 6270 return (err); 6271 6272 mp = kmem_zalloc(sizeof (drmach_mem_t), KM_SLEEP); 6273 bcopy(proto, &mp->dev, sizeof (mp->dev)); 6274 mp->dev.node = drmach_node_dup(proto->node); 6275 mp->dev.cm.isa = (void *)drmach_mem_new; 6276 mp->dev.cm.dispose = drmach_mem_dispose; 6277 mp->dev.cm.release = drmach_mem_release; 6278 mp->dev.cm.status = drmach_mem_status; 6279 mp->madr_pa = madr_pa; 6280 6281 (void) snprintf(mp->dev.cm.name, 6282 sizeof (mp->dev.cm.name), "%s", mp->dev.type); 6283 6284 for (count = bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 6285 uint64_t madr; 6286 6287 drmach_mem_read_madr(mp, bank, &madr); 6288 if (madr & DRMACH_MC_VALID_MASK) { 6289 count += 1; 6290 break; 6291 } 6292 } 6293 6294 /* 6295 * If none of the banks had their valid bit set, that means 6296 * post did not configure this MC to participate in the 6297 * domain. So, pretend this node does not exist by returning 6298 * a drmachid of zero. 6299 */ 6300 if (count == 0) { 6301 /* drmach_mem_dispose frees board mem list */ 6302 drmach_node_dispose(mp->dev.node); 6303 kmem_free(mp, sizeof (*mp)); 6304 *idp = (drmachid_t)0; 6305 return (NULL); 6306 } 6307 6308 /* 6309 * Only one mem unit per board is exposed to the 6310 * PIM layer. The first mem unit encountered during 6311 * tree walk is used to represent all mem units on 6312 * the same board. 6313 */ 6314 if (mp->dev.bp->mem == NULL) { 6315 /* start list of mem units on this board */ 6316 mp->dev.bp->mem = mp; 6317 6318 /* 6319 * force unum to zero since this is the only mem unit 6320 * that will be visible to the PIM layer. 6321 */ 6322 mp->dev.unum = 0; 6323 6324 /* 6325 * board memory size kept in this mem unit only 6326 */ 6327 err = drmach_mem_init_size(mp); 6328 if (err) { 6329 mp->dev.bp->mem = NULL; 6330 /* drmach_mem_dispose frees board mem list */ 6331 drmach_node_dispose(mp->dev.node); 6332 kmem_free(mp, sizeof (*mp)); 6333 *idp = (drmachid_t)0; 6334 return (NULL); 6335 } 6336 6337 /* 6338 * allow this instance (the first encountered on this board) 6339 * to be visible to the PIM layer. 6340 */ 6341 *idp = (drmachid_t)mp; 6342 } else { 6343 drmach_mem_t *lp; 6344 6345 /* hide this mem instance behind the first. */ 6346 for (lp = mp->dev.bp->mem; lp->next; lp = lp->next) 6347 ; 6348 lp->next = mp; 6349 6350 /* 6351 * hide this instance from the caller. 6352 * See drmach_board_find_devices_cb() for details. 6353 */ 6354 *idp = (drmachid_t)0; 6355 } 6356 6357 return (NULL); 6358 } 6359 6360 static void 6361 drmach_mem_dispose(drmachid_t id) 6362 { 6363 drmach_mem_t *mp, *next; 6364 drmach_board_t *bp; 6365 6366 ASSERT(DRMACH_IS_MEM_ID(id)); 6367 6368 mutex_enter(&drmach_bus_sync_lock); 6369 6370 mp = id; 6371 bp = mp->dev.bp; 6372 6373 do { 6374 if (mp->dev.node) 6375 drmach_node_dispose(mp->dev.node); 6376 6377 next = mp->next; 6378 kmem_free(mp, sizeof (*mp)); 6379 mp = next; 6380 } while (mp); 6381 6382 bp->mem = NULL; 6383 6384 drmach_bus_sync_list_update(); 6385 mutex_exit(&drmach_bus_sync_lock); 6386 } 6387 6388 sbd_error_t * 6389 drmach_mem_add_span(drmachid_t id, uint64_t basepa, uint64_t size) 6390 { 6391 pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT); 6392 pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT); 6393 int rv; 6394 6395 ASSERT(size != 0); 6396 6397 if (!DRMACH_IS_MEM_ID(id)) 6398 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6399 6400 rv = kcage_range_add(basepfn, npages, KCAGE_DOWN); 6401 if (rv == ENOMEM) { 6402 cmn_err(CE_WARN, "%lu megabytes not available" 6403 " to kernel cage", size >> 20); 6404 } else if (rv != 0) { 6405 /* catch this in debug kernels */ 6406 ASSERT(0); 6407 6408 cmn_err(CE_WARN, "unexpected kcage_range_add" 6409 " return value %d", rv); 6410 } 6411 6412 return (NULL); 6413 } 6414 6415 sbd_error_t * 6416 drmach_mem_del_span(drmachid_t id, uint64_t basepa, uint64_t size) 6417 { 6418 pfn_t basepfn = (pfn_t)(basepa >> PAGESHIFT); 6419 pgcnt_t npages = (pgcnt_t)(size >> PAGESHIFT); 6420 int rv; 6421 6422 if (!DRMACH_IS_MEM_ID(id)) 6423 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6424 6425 if (size > 0) { 6426 rv = kcage_range_delete_post_mem_del(basepfn, npages); 6427 if (rv != 0) { 6428 cmn_err(CE_WARN, 6429 "unexpected kcage_range_delete_post_mem_del" 6430 " return value %d", rv); 6431 return (DRMACH_INTERNAL_ERROR()); 6432 } 6433 } 6434 6435 return (NULL); 6436 } 6437 6438 sbd_error_t * 6439 drmach_mem_disable(drmachid_t id) 6440 { 6441 if (!DRMACH_IS_MEM_ID(id)) 6442 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6443 else 6444 return (NULL); 6445 } 6446 6447 sbd_error_t * 6448 drmach_mem_enable(drmachid_t id) 6449 { 6450 if (!DRMACH_IS_MEM_ID(id)) 6451 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6452 else 6453 return (NULL); 6454 } 6455 6456 sbd_error_t * 6457 drmach_mem_get_alignment(drmachid_t id, uint64_t *mask) 6458 { 6459 #define MB(mb) ((mb) * 1048576ull) 6460 6461 static struct { 6462 uint_t uk; 6463 uint64_t segsz; 6464 } uk2segsz[] = { 6465 { 0x003, MB(256) }, 6466 { 0x007, MB(512) }, 6467 { 0x00f, MB(1024) }, 6468 { 0x01f, MB(2048) }, 6469 { 0x03f, MB(4096) }, 6470 { 0x07f, MB(8192) }, 6471 { 0x0ff, MB(16384) }, 6472 { 0x1ff, MB(32768) }, 6473 { 0x3ff, MB(65536) }, 6474 { 0x7ff, MB(131072) } 6475 }; 6476 static int len = sizeof (uk2segsz) / sizeof (uk2segsz[0]); 6477 6478 #undef MB 6479 6480 uint64_t largest_sz = 0; 6481 drmach_mem_t *mp; 6482 6483 if (!DRMACH_IS_MEM_ID(id)) 6484 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6485 6486 /* prime the result with a default value */ 6487 *mask = (DRMACH_MEM_SLICE_SIZE - 1); 6488 6489 for (mp = id; mp; mp = mp->next) { 6490 int bank; 6491 6492 for (bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 6493 int i; 6494 uint_t uk; 6495 uint64_t madr; 6496 6497 /* get register value, extract uk and normalize */ 6498 drmach_mem_read_madr(mp, bank, &madr); 6499 6500 if (!(madr & DRMACH_MC_VALID_MASK)) 6501 continue; 6502 6503 uk = DRMACH_MC_UK(madr); 6504 6505 /* match uk value */ 6506 for (i = 0; i < len; i++) 6507 if (uk == uk2segsz[i].uk) 6508 break; 6509 6510 if (i < len) { 6511 uint64_t sz = uk2segsz[i].segsz; 6512 6513 /* 6514 * remember largest segment size, 6515 * update mask result 6516 */ 6517 if (sz > largest_sz) { 6518 largest_sz = sz; 6519 *mask = sz - 1; 6520 } 6521 } else { 6522 /* 6523 * uk not in table, punt using 6524 * entire slice size. no longer any 6525 * reason to check other banks. 6526 */ 6527 *mask = (DRMACH_MEM_SLICE_SIZE - 1); 6528 return (NULL); 6529 } 6530 } 6531 } 6532 6533 return (NULL); 6534 } 6535 6536 sbd_error_t * 6537 drmach_mem_get_base_physaddr(drmachid_t id, uint64_t *base_addr) 6538 { 6539 drmach_mem_t *mp; 6540 6541 if (!DRMACH_IS_MEM_ID(id)) 6542 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6543 6544 *base_addr = (uint64_t)-1; 6545 for (mp = id; mp; mp = mp->next) { 6546 int bank; 6547 6548 for (bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 6549 uint64_t addr, madr; 6550 6551 drmach_mem_read_madr(mp, bank, &madr); 6552 if (madr & DRMACH_MC_VALID_MASK) { 6553 addr = DRMACH_MC_UM_TO_PA(madr) | 6554 DRMACH_MC_LM_TO_PA(madr); 6555 6556 if (addr < *base_addr) 6557 *base_addr = addr; 6558 } 6559 } 6560 } 6561 6562 /* should not happen, but ... */ 6563 if (*base_addr == (uint64_t)-1) 6564 return (DRMACH_INTERNAL_ERROR()); 6565 6566 return (NULL); 6567 } 6568 6569 void 6570 drmach_bus_sync_list_update(void) 6571 { 6572 int rv, idx, cnt = 0; 6573 drmachid_t id; 6574 6575 ASSERT(MUTEX_HELD(&drmach_bus_sync_lock)); 6576 6577 rv = drmach_array_first(drmach_boards, &idx, &id); 6578 while (rv == 0) { 6579 drmach_board_t *bp = id; 6580 drmach_mem_t *mp = bp->mem; 6581 6582 while (mp) { 6583 int bank; 6584 6585 for (bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 6586 uint64_t madr; 6587 6588 drmach_mem_read_madr(mp, bank, &madr); 6589 if (madr & DRMACH_MC_VALID_MASK) { 6590 uint64_t pa; 6591 6592 pa = DRMACH_MC_UM_TO_PA(madr); 6593 pa |= DRMACH_MC_LM_TO_PA(madr); 6594 6595 /* 6596 * The list is zero terminated. 6597 * Offset the pa by a doubleword 6598 * to avoid confusing a pa value of 6599 * of zero with the terminator. 6600 */ 6601 pa += sizeof (uint64_t); 6602 6603 drmach_bus_sync_list[cnt++] = pa; 6604 } 6605 } 6606 6607 mp = mp->next; 6608 } 6609 6610 rv = drmach_array_next(drmach_boards, &idx, &id); 6611 } 6612 6613 drmach_bus_sync_list[cnt] = 0; 6614 } 6615 6616 sbd_error_t * 6617 drmach_mem_get_memlist(drmachid_t id, struct memlist **ml) 6618 { 6619 sbd_error_t *err; 6620 struct memlist *mlist; 6621 gdcd_t *gdcd; 6622 mem_chunk_t *chunk; 6623 uint64_t chunks, pa, mask; 6624 6625 err = drmach_mem_get_base_physaddr(id, &pa); 6626 if (err) 6627 return (err); 6628 6629 gdcd = drmach_gdcd_new(); 6630 if (gdcd == NULL) 6631 return (DRMACH_INTERNAL_ERROR()); 6632 6633 mask = ~ (DRMACH_MEM_SLICE_SIZE - 1); 6634 pa &= mask; 6635 6636 mlist = NULL; 6637 chunk = gdcd->dcd_chunk_list.dcl_chunk; 6638 chunks = gdcd->dcd_chunk_list.dcl_chunks; 6639 while (chunks-- != 0) { 6640 if ((chunk->mc_base_pa & mask) == pa) { 6641 mlist = memlist_add_span(mlist, chunk->mc_base_pa, 6642 chunk->mc_mbytes * 1048576); 6643 } 6644 6645 ++chunk; 6646 } 6647 6648 drmach_gdcd_dispose(gdcd); 6649 6650 #ifdef DEBUG 6651 DRMACH_PR("GDCD derived memlist:"); 6652 memlist_dump(mlist); 6653 #endif 6654 6655 *ml = mlist; 6656 return (NULL); 6657 } 6658 6659 sbd_error_t * 6660 drmach_mem_get_size(drmachid_t id, uint64_t *bytes) 6661 { 6662 drmach_mem_t *mp; 6663 6664 if (!DRMACH_IS_MEM_ID(id)) 6665 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6666 mp = id; 6667 6668 ASSERT(mp->nbytes != 0); 6669 *bytes = mp->nbytes; 6670 6671 return (NULL); 6672 } 6673 6674 sbd_error_t * 6675 drmach_mem_get_slice_size(drmachid_t id, uint64_t *bytes) 6676 { 6677 sbd_error_t *err; 6678 drmach_device_t *mp; 6679 6680 if (!DRMACH_IS_MEM_ID(id)) 6681 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6682 mp = id; 6683 6684 switch (DRMACH_BNUM2SLOT(mp->bp->bnum)) { 6685 case 0: *bytes = DRMACH_MEM_USABLE_SLICE_SIZE; 6686 err = NULL; 6687 break; 6688 6689 case 1: *bytes = 0; 6690 err = NULL; 6691 break; 6692 6693 default: 6694 err = DRMACH_INTERNAL_ERROR(); 6695 break; 6696 } 6697 6698 return (err); 6699 } 6700 6701 processorid_t drmach_mem_cpu_affinity_nail; 6702 6703 processorid_t 6704 drmach_mem_cpu_affinity(drmachid_t id) 6705 { 6706 drmach_device_t *mp; 6707 drmach_board_t *bp; 6708 processorid_t cpuid; 6709 6710 if (!DRMACH_IS_MEM_ID(id)) 6711 return (CPU_CURRENT); 6712 6713 if (drmach_mem_cpu_affinity_nail) { 6714 cpuid = drmach_mem_cpu_affinity_nail; 6715 6716 if (cpuid < 0 || cpuid > NCPU) 6717 return (CPU_CURRENT); 6718 6719 mutex_enter(&cpu_lock); 6720 if (cpu[cpuid] == NULL || !CPU_ACTIVE(cpu[cpuid])) 6721 cpuid = CPU_CURRENT; 6722 mutex_exit(&cpu_lock); 6723 6724 return (cpuid); 6725 } 6726 6727 /* try to choose a proc on the target board */ 6728 mp = id; 6729 bp = mp->bp; 6730 if (bp->devices) { 6731 int rv; 6732 int d_idx; 6733 drmachid_t d_id; 6734 6735 rv = drmach_array_first(bp->devices, &d_idx, &d_id); 6736 while (rv == 0) { 6737 if (DRMACH_IS_CPU_ID(d_id)) { 6738 drmach_cpu_t *cp = d_id; 6739 6740 mutex_enter(&cpu_lock); 6741 cpuid = cp->cpuid; 6742 if (cpu[cpuid] && CPU_ACTIVE(cpu[cpuid])) { 6743 mutex_exit(&cpu_lock); 6744 return (cpuid); 6745 } else { 6746 mutex_exit(&cpu_lock); 6747 } 6748 } 6749 6750 rv = drmach_array_next(bp->devices, &d_idx, &d_id); 6751 } 6752 } 6753 6754 /* otherwise, this proc, wherever it is */ 6755 return (CPU_CURRENT); 6756 } 6757 6758 static sbd_error_t * 6759 drmach_mem_release(drmachid_t id) 6760 { 6761 if (!DRMACH_IS_MEM_ID(id)) 6762 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6763 return (NULL); 6764 } 6765 6766 static sbd_error_t * 6767 drmach_mem_status(drmachid_t id, drmach_status_t *stat) 6768 { 6769 drmach_mem_t *mp; 6770 sbd_error_t *err; 6771 uint64_t pa, slice_size; 6772 struct memlist *ml; 6773 6774 ASSERT(DRMACH_IS_MEM_ID(id)); 6775 mp = id; 6776 6777 /* get starting physical address of target memory */ 6778 err = drmach_mem_get_base_physaddr(id, &pa); 6779 if (err) 6780 return (err); 6781 6782 /* round down to slice boundary */ 6783 slice_size = DRMACH_MEM_SLICE_SIZE; 6784 pa &= ~ (slice_size - 1); 6785 6786 /* stop at first span that is in slice */ 6787 memlist_read_lock(); 6788 for (ml = phys_install; ml; ml = ml->next) 6789 if (ml->address >= pa && ml->address < pa + slice_size) 6790 break; 6791 memlist_read_unlock(); 6792 6793 stat->assigned = mp->dev.bp->assigned; 6794 stat->powered = mp->dev.bp->powered; 6795 stat->configured = (ml != NULL); 6796 stat->busy = mp->dev.busy; 6797 (void) strncpy(stat->type, mp->dev.type, sizeof (stat->type)); 6798 stat->info[0] = '\0'; 6799 6800 return (NULL); 6801 } 6802 6803 sbd_error_t * 6804 drmach_board_deprobe(drmachid_t id) 6805 { 6806 drmach_board_t *bp; 6807 sbd_error_t *err = NULL; 6808 6809 if (!DRMACH_IS_BOARD_ID(id)) 6810 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6811 bp = id; 6812 6813 if (bp->tree) { 6814 drmach_node_dispose(bp->tree); 6815 bp->tree = NULL; 6816 } 6817 if (bp->devices) { 6818 drmach_array_dispose(bp->devices, drmach_device_dispose); 6819 bp->devices = NULL; 6820 bp->mem = NULL; /* TODO: still needed? */ 6821 } 6822 return (err); 6823 } 6824 6825 /*ARGSUSED1*/ 6826 static sbd_error_t * 6827 drmach_pt_showlpa(drmachid_t id, drmach_opts_t *opts) 6828 { 6829 drmach_device_t *dp; 6830 uint64_t val; 6831 int err = 1; 6832 6833 if (DRMACH_IS_CPU_ID(id)) { 6834 drmach_cpu_t *cp = id; 6835 if (drmach_cpu_read_scr(cp, &val)) 6836 err = 0; 6837 } else if (DRMACH_IS_IO_ID(id) && ((drmach_io_t *)id)->scsr_pa != 0) { 6838 drmach_io_t *io = id; 6839 val = lddphysio(io->scsr_pa); 6840 err = 0; 6841 } 6842 if (err) 6843 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6844 6845 dp = id; 6846 uprintf("showlpa %s::%s portid %d, base pa %lx, bound pa %lx\n", 6847 dp->bp->cm.name, 6848 dp->cm.name, 6849 dp->portid, 6850 (long)(DRMACH_LPA_BASE_TO_PA(val)), 6851 (long)(DRMACH_LPA_BND_TO_PA(val))); 6852 6853 return (NULL); 6854 } 6855 6856 /*ARGSUSED*/ 6857 static sbd_error_t * 6858 drmach_pt_ikprobe(drmachid_t id, drmach_opts_t *opts) 6859 { 6860 drmach_board_t *bp = (drmach_board_t *)id; 6861 sbd_error_t *err; 6862 sc_gptwocfg_cookie_t scc; 6863 6864 if (!DRMACH_IS_BOARD_ID(id)) 6865 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6866 6867 /* do saf configurator stuff */ 6868 DRMACH_PR("calling sc_probe_board for bnum=%d\n", bp->bnum); 6869 scc = sc_probe_board(bp->bnum); 6870 if (scc == NULL) { 6871 err = drerr_new(0, ESTC_PROBE, bp->cm.name); 6872 return (err); 6873 } 6874 6875 return (err); 6876 } 6877 6878 /*ARGSUSED*/ 6879 static sbd_error_t * 6880 drmach_pt_ikdeprobe(drmachid_t id, drmach_opts_t *opts) 6881 { 6882 drmach_board_t *bp; 6883 sbd_error_t *err = NULL; 6884 sc_gptwocfg_cookie_t scc; 6885 6886 if (!DRMACH_IS_BOARD_ID(id)) 6887 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6888 bp = id; 6889 6890 cmn_err(CE_CONT, "DR: in-kernel unprobe board %d\n", bp->bnum); 6891 scc = sc_unprobe_board(bp->bnum); 6892 if (scc != NULL) { 6893 err = drerr_new(0, ESTC_DEPROBE, bp->cm.name); 6894 } 6895 6896 if (err == NULL) 6897 err = drmach_board_deprobe(id); 6898 6899 return (err); 6900 } 6901 6902 static sbd_error_t * 6903 drmach_pt_readmem(drmachid_t id, drmach_opts_t *opts) 6904 { 6905 _NOTE(ARGUNUSED(id)) 6906 _NOTE(ARGUNUSED(opts)) 6907 6908 struct memlist *ml; 6909 uint64_t src_pa; 6910 uint64_t dst_pa; 6911 uint64_t dst; 6912 6913 dst_pa = va_to_pa(&dst); 6914 6915 memlist_read_lock(); 6916 for (ml = phys_install; ml; ml = ml->next) { 6917 uint64_t nbytes; 6918 6919 src_pa = ml->address; 6920 nbytes = ml->size; 6921 6922 while (nbytes != 0ull) { 6923 6924 /* copy 32 bytes at src_pa to dst_pa */ 6925 bcopy32_il(src_pa, dst_pa); 6926 6927 /* increment by 32 bytes */ 6928 src_pa += (4 * sizeof (uint64_t)); 6929 6930 /* decrement by 32 bytes */ 6931 nbytes -= (4 * sizeof (uint64_t)); 6932 } 6933 } 6934 memlist_read_unlock(); 6935 6936 return (NULL); 6937 } 6938 6939 static sbd_error_t * 6940 drmach_pt_recovercpu(drmachid_t id, drmach_opts_t *opts) 6941 { 6942 _NOTE(ARGUNUSED(opts)) 6943 6944 drmach_cpu_t *cp; 6945 6946 if (!DRMACH_IS_CPU_ID(id)) 6947 return (drerr_new(0, ESTC_INAPPROP, NULL)); 6948 cp = id; 6949 6950 mutex_enter(&cpu_lock); 6951 (void) drmach_iocage_cpu_return(&(cp->dev), 6952 CPU_ENABLE | CPU_EXISTS | CPU_READY | CPU_RUNNING); 6953 mutex_exit(&cpu_lock); 6954 6955 return (NULL); 6956 } 6957 6958 /* 6959 * Starcat DR passthrus are for debugging purposes only. 6960 */ 6961 static struct { 6962 const char *name; 6963 sbd_error_t *(*handler)(drmachid_t id, drmach_opts_t *opts); 6964 } drmach_pt_arr[] = { 6965 { "showlpa", drmach_pt_showlpa }, 6966 { "ikprobe", drmach_pt_ikprobe }, 6967 { "ikdeprobe", drmach_pt_ikdeprobe }, 6968 { "readmem", drmach_pt_readmem }, 6969 { "recovercpu", drmach_pt_recovercpu }, 6970 6971 /* the following line must always be last */ 6972 { NULL, NULL } 6973 }; 6974 6975 /*ARGSUSED*/ 6976 sbd_error_t * 6977 drmach_passthru(drmachid_t id, drmach_opts_t *opts) 6978 { 6979 int i; 6980 sbd_error_t *err; 6981 6982 i = 0; 6983 while (drmach_pt_arr[i].name != NULL) { 6984 int len = strlen(drmach_pt_arr[i].name); 6985 6986 if (strncmp(drmach_pt_arr[i].name, opts->copts, len) == 0) 6987 break; 6988 6989 i += 1; 6990 } 6991 6992 if (drmach_pt_arr[i].name == NULL) 6993 err = drerr_new(0, ESTC_UNKPTCMD, opts->copts); 6994 else 6995 err = (*drmach_pt_arr[i].handler)(id, opts); 6996 6997 return (err); 6998 } 6999 7000 sbd_error_t * 7001 drmach_release(drmachid_t id) 7002 { 7003 drmach_common_t *cp; 7004 7005 if (!DRMACH_IS_DEVICE_ID(id)) 7006 return (drerr_new(0, ESTC_INAPPROP, NULL)); 7007 cp = id; 7008 7009 return (cp->release(id)); 7010 } 7011 7012 sbd_error_t * 7013 drmach_status(drmachid_t id, drmach_status_t *stat) 7014 { 7015 drmach_common_t *cp; 7016 sbd_error_t *err; 7017 7018 rw_enter(&drmach_boards_rwlock, RW_READER); 7019 7020 if (!DRMACH_IS_ID(id)) { 7021 rw_exit(&drmach_boards_rwlock); 7022 return (drerr_new(0, ESTC_NOTID, NULL)); 7023 } 7024 7025 cp = id; 7026 7027 err = cp->status(id, stat); 7028 rw_exit(&drmach_boards_rwlock); 7029 return (err); 7030 } 7031 7032 static sbd_error_t * 7033 drmach_i_status(drmachid_t id, drmach_status_t *stat) 7034 { 7035 drmach_common_t *cp; 7036 7037 if (!DRMACH_IS_ID(id)) 7038 return (drerr_new(0, ESTC_NOTID, NULL)); 7039 cp = id; 7040 7041 return (cp->status(id, stat)); 7042 } 7043 7044 /*ARGSUSED*/ 7045 sbd_error_t * 7046 drmach_unconfigure(drmachid_t id, int flags) 7047 { 7048 drmach_device_t *dp; 7049 dev_info_t *rdip; 7050 7051 char name[OBP_MAXDRVNAME]; 7052 int rv; 7053 7054 /* 7055 * Since CPU nodes are not configured, it is 7056 * necessary to skip the unconfigure step as 7057 * well. 7058 */ 7059 if (DRMACH_IS_CPU_ID(id)) { 7060 return (NULL); 7061 } 7062 7063 for (; id; ) { 7064 dev_info_t *fdip = NULL; 7065 7066 if (!DRMACH_IS_DEVICE_ID(id)) 7067 return (drerr_new(0, ESTC_INAPPROP, NULL)); 7068 dp = id; 7069 7070 rdip = dp->node->n_getdip(dp->node); 7071 7072 /* 7073 * drmach_unconfigure() is always called on a configured branch. 7074 * So the root of the branch was held earlier and must exist. 7075 */ 7076 ASSERT(rdip); 7077 7078 DRMACH_PR("drmach_unconfigure: unconfiguring DDI branch"); 7079 7080 rv = dp->node->n_getprop(dp->node, 7081 "name", name, OBP_MAXDRVNAME); 7082 7083 /* The node must have a name */ 7084 if (rv) 7085 return (0); 7086 7087 if (drmach_name2type_idx(name) < 0) { 7088 if (DRMACH_IS_MEM_ID(id)) { 7089 drmach_mem_t *mp = id; 7090 id = mp->next; 7091 } else { 7092 id = NULL; 7093 } 7094 continue; 7095 } 7096 7097 /* 7098 * NOTE: FORCE flag is no longer needed under devfs 7099 */ 7100 ASSERT(e_ddi_branch_held(rdip)); 7101 if (e_ddi_branch_unconfigure(rdip, &fdip, 0) != 0) { 7102 sbd_error_t *err = NULL; 7103 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP); 7104 7105 /* 7106 * If non-NULL, fdip is returned held and must be 7107 * released. 7108 */ 7109 if (fdip != NULL) { 7110 (void) ddi_pathname(fdip, path); 7111 ddi_release_devi(fdip); 7112 } else { 7113 (void) ddi_pathname(rdip, path); 7114 } 7115 7116 err = drerr_new(1, ESTC_DRVFAIL, path); 7117 7118 kmem_free(path, MAXPATHLEN); 7119 7120 /* 7121 * If we were unconfiguring an IO board, a call was 7122 * made to man_dr_detach. We now need to call 7123 * man_dr_attach to regain man use of the eri. 7124 */ 7125 if (DRMACH_IS_IO_ID(id)) { 7126 int (*func)(dev_info_t *dip); 7127 7128 func = (int (*)(dev_info_t *))kobj_getsymvalue\ 7129 ("man_dr_attach", 0); 7130 7131 if (func) { 7132 drmach_io_inst_t ios; 7133 dev_info_t *pdip; 7134 int circ; 7135 7136 /* 7137 * Walk device tree to find rio dip for 7138 * the board 7139 * Since we are not interested in iosram 7140 * instance here, initialize it to 0, so 7141 * that the walk terminates as soon as 7142 * eri dip is found. 7143 */ 7144 ios.iosram_inst = 0; 7145 ios.eri_dip = NULL; 7146 ios.bnum = dp->bp->bnum; 7147 7148 if (pdip = ddi_get_parent(rdip)) { 7149 ndi_hold_devi(pdip); 7150 ndi_devi_enter(pdip, &circ); 7151 } 7152 /* 7153 * Root node doesn't have to be held in 7154 * any way. 7155 */ 7156 ASSERT(e_ddi_branch_held(rdip)); 7157 ddi_walk_devs(rdip, 7158 drmach_board_find_io_insts, 7159 (void *)&ios); 7160 7161 DRMACH_PR("drmach_unconfigure: bnum=%d" 7162 " eri=0x%p\n", 7163 ios.bnum, (void *)ios.eri_dip); 7164 7165 if (pdip) { 7166 ndi_devi_exit(pdip, circ); 7167 ndi_rele_devi(pdip); 7168 } 7169 7170 if (ios.eri_dip) { 7171 DRMACH_PR("calling" 7172 " man_dr_attach\n"); 7173 (void) (*func)(ios.eri_dip); 7174 /* 7175 * Release hold acquired in 7176 * drmach_board_find_io_insts() 7177 */ 7178 ndi_rele_devi(ios.eri_dip); 7179 } 7180 } 7181 } 7182 return (err); 7183 } 7184 7185 if (DRMACH_IS_MEM_ID(id)) { 7186 drmach_mem_t *mp = id; 7187 id = mp->next; 7188 } else { 7189 id = NULL; 7190 } 7191 } 7192 7193 return (NULL); 7194 } 7195 7196 /* 7197 * drmach interfaces to legacy Starfire platmod logic 7198 * linkage via runtime symbol look up, called from plat_cpu_power* 7199 */ 7200 7201 /* 7202 * Start up a cpu. It is possible that we're attempting to restart 7203 * the cpu after an UNCONFIGURE in which case the cpu will be 7204 * spinning in its cache. So, all we have to do is wakeup him up. 7205 * Under normal circumstances the cpu will be coming from a previous 7206 * CONNECT and thus will be spinning in OBP. In both cases, the 7207 * startup sequence is the same. 7208 */ 7209 int 7210 drmach_cpu_poweron(struct cpu *cp) 7211 { 7212 DRMACH_PR("drmach_cpu_poweron: starting cpuid %d\n", cp->cpu_id); 7213 7214 ASSERT(MUTEX_HELD(&cpu_lock)); 7215 7216 if (drmach_cpu_start(cp) != 0) 7217 return (EBUSY); 7218 else 7219 return (0); 7220 } 7221 7222 int 7223 drmach_cpu_poweroff(struct cpu *cp) 7224 { 7225 int ntries; 7226 processorid_t cpuid; 7227 void drmach_cpu_shutdown_self(void); 7228 7229 DRMACH_PR("drmach_cpu_poweroff: stopping cpuid %d\n", cp->cpu_id); 7230 7231 ASSERT(MUTEX_HELD(&cpu_lock)); 7232 7233 /* 7234 * XXX CHEETAH SUPPORT 7235 * for cheetah, we need to grab the iocage lock since iocage 7236 * memory is used for e$ flush. 7237 */ 7238 if (drmach_is_cheetah) { 7239 mutex_enter(&drmach_iocage_lock); 7240 while (drmach_iocage_is_busy) 7241 cv_wait(&drmach_iocage_cv, &drmach_iocage_lock); 7242 drmach_iocage_is_busy = 1; 7243 drmach_iocage_mem_scrub(ecache_size * 2); 7244 mutex_exit(&drmach_iocage_lock); 7245 } 7246 7247 cpuid = cp->cpu_id; 7248 7249 /* 7250 * Set affinity to ensure consistent reading and writing of 7251 * drmach_xt_mb[cpuid] by one "master" CPU directing 7252 * the shutdown of the target CPU. 7253 */ 7254 affinity_set(CPU->cpu_id); 7255 7256 /* 7257 * Capture all CPUs (except for detaching proc) to prevent 7258 * crosscalls to the detaching proc until it has cleared its 7259 * bit in cpu_ready_set. 7260 * 7261 * The CPUs remain paused and the prom_mutex is known to be free. 7262 * This prevents blocking when doing prom IEEE-1275 calls at a 7263 * high PIL level. 7264 */ 7265 promsafe_pause_cpus(); 7266 7267 /* 7268 * Quiesce interrupts on the target CPU. We do this by setting 7269 * the CPU 'not ready'- (i.e. removing the CPU from cpu_ready_set) to 7270 * prevent it from receiving cross calls and cross traps. 7271 * This prevents the processor from receiving any new soft interrupts. 7272 */ 7273 mp_cpu_quiesce(cp); 7274 7275 (void) prom_hotremovecpu(cpuid); 7276 7277 start_cpus(); 7278 7279 /* setup xt_mb, will be cleared by drmach_shutdown_asm when ready */ 7280 drmach_xt_mb[cpuid] = 0x80; 7281 7282 xt_one_unchecked(cp->cpu_id, (xcfunc_t *)idle_stop_xcall, 7283 (uint64_t)drmach_cpu_shutdown_self, NULL); 7284 7285 ntries = drmach_cpu_ntries; 7286 while (drmach_xt_mb[cpuid] && ntries) { 7287 DELAY(drmach_cpu_delay); 7288 ntries--; 7289 } 7290 7291 drmach_xt_mb[cpuid] = 0; /* steal the cache line back */ 7292 7293 membar_sync(); /* make sure copy-back retires */ 7294 7295 affinity_clear(); 7296 7297 /* 7298 * XXX CHEETAH SUPPORT 7299 */ 7300 if (drmach_is_cheetah) { 7301 mutex_enter(&drmach_iocage_lock); 7302 drmach_iocage_mem_scrub(ecache_size * 2); 7303 drmach_iocage_is_busy = 0; 7304 cv_signal(&drmach_iocage_cv); 7305 mutex_exit(&drmach_iocage_lock); 7306 } 7307 7308 DRMACH_PR("waited %d out of %d tries for " 7309 "drmach_cpu_shutdown_self on cpu%d", 7310 drmach_cpu_ntries - ntries, drmach_cpu_ntries, cp->cpu_id); 7311 7312 /* 7313 * Do this here instead of drmach_cpu_shutdown_self() to 7314 * avoid an assertion failure panic in turnstile.c. 7315 */ 7316 CPU_SIGNATURE(OS_SIG, SIGST_DETACHED, SIGSUBST_NULL, cpuid); 7317 7318 return (0); 7319 } 7320 7321 void 7322 drmach_iocage_mem_scrub(uint64_t nbytes) 7323 { 7324 extern uint32_t drmach_bc_bzero(void*, size_t); 7325 uint32_t rv; 7326 7327 ASSERT(MUTEX_HELD(&cpu_lock)); 7328 7329 affinity_set(CPU->cpu_id); 7330 7331 rv = drmach_bc_bzero(drmach_iocage_vaddr, nbytes); 7332 if (rv != 0) { 7333 DRMACH_PR( 7334 "iocage scrub failed, drmach_bc_bzero returned %d\n", rv); 7335 rv = drmach_bc_bzero(drmach_iocage_vaddr, drmach_iocage_size); 7336 if (rv != 0) 7337 cmn_err(CE_PANIC, 7338 "iocage scrub failed, drmach_bc_bzero rv=%d\n", 7339 rv); 7340 } 7341 7342 cpu_flush_ecache(); 7343 7344 affinity_clear(); 7345 } 7346 7347 #define ALIGN(x, a) ((a) == 0 ? (uintptr_t)(x) : \ 7348 (((uintptr_t)(x) + (uintptr_t)(a) - 1l) & ~((uintptr_t)(a) - 1l))) 7349 7350 static sbd_error_t * 7351 drmach_iocage_mem_get(dr_testboard_req_t *tbrq) 7352 { 7353 pfn_t basepfn; 7354 pgcnt_t npages; 7355 extern int memscrub_delete_span(pfn_t, pgcnt_t); 7356 uint64_t drmach_iocage_paddr_mbytes; 7357 7358 ASSERT(drmach_iocage_paddr != -1); 7359 7360 basepfn = (pfn_t)(drmach_iocage_paddr >> PAGESHIFT); 7361 npages = (pgcnt_t)(drmach_iocage_size >> PAGESHIFT); 7362 7363 (void) memscrub_delete_span(basepfn, npages); 7364 7365 mutex_enter(&cpu_lock); 7366 drmach_iocage_mem_scrub(drmach_iocage_size); 7367 mutex_exit(&cpu_lock); 7368 7369 /* 7370 * HPOST wants the address of the cage to be 64 megabyte-aligned 7371 * and in megabyte units. 7372 * The size of the cage is also in megabyte units. 7373 */ 7374 ASSERT(drmach_iocage_paddr == ALIGN(drmach_iocage_paddr, 0x4000000)); 7375 7376 drmach_iocage_paddr_mbytes = drmach_iocage_paddr / 0x100000; 7377 7378 tbrq->memaddrhi = (uint32_t)(drmach_iocage_paddr_mbytes >> 32); 7379 tbrq->memaddrlo = (uint32_t)drmach_iocage_paddr_mbytes; 7380 tbrq->memlen = drmach_iocage_size / 0x100000; 7381 7382 DRMACH_PR("drmach_iocage_mem_get: hi: 0x%x", tbrq->memaddrhi); 7383 DRMACH_PR("drmach_iocage_mem_get: lo: 0x%x", tbrq->memaddrlo); 7384 DRMACH_PR("drmach_iocage_mem_get: size: 0x%x", tbrq->memlen); 7385 7386 return (NULL); 7387 } 7388 7389 static sbd_error_t * 7390 drmach_iocage_mem_return(dr_testboard_reply_t *tbr) 7391 { 7392 _NOTE(ARGUNUSED(tbr)) 7393 7394 pfn_t basepfn; 7395 pgcnt_t npages; 7396 extern int memscrub_add_span(pfn_t, pgcnt_t); 7397 7398 ASSERT(drmach_iocage_paddr != -1); 7399 7400 basepfn = (pfn_t)(drmach_iocage_paddr >> PAGESHIFT); 7401 npages = (pgcnt_t)(drmach_iocage_size >> PAGESHIFT); 7402 7403 (void) memscrub_add_span(basepfn, npages); 7404 7405 mutex_enter(&cpu_lock); 7406 mutex_enter(&drmach_iocage_lock); 7407 drmach_iocage_mem_scrub(drmach_iocage_size); 7408 drmach_iocage_is_busy = 0; 7409 cv_signal(&drmach_iocage_cv); 7410 mutex_exit(&drmach_iocage_lock); 7411 mutex_exit(&cpu_lock); 7412 7413 return (NULL); 7414 } 7415 7416 static int 7417 drmach_cpu_intr_disable(cpu_t *cp) 7418 { 7419 if (cpu_intr_disable(cp) != 0) 7420 return (-1); 7421 return (0); 7422 } 7423 7424 static int 7425 drmach_iocage_cpu_acquire(drmach_device_t *dp, cpu_flag_t *oflags) 7426 { 7427 struct cpu *cp; 7428 processorid_t cpuid; 7429 static char *fn = "drmach_iocage_cpu_acquire"; 7430 sbd_error_t *err; 7431 int impl; 7432 7433 ASSERT(DRMACH_IS_CPU_ID(dp)); 7434 ASSERT(MUTEX_HELD(&cpu_lock)); 7435 7436 cpuid = ((drmach_cpu_t *)dp)->cpuid; 7437 7438 DRMACH_PR("%s: attempting to acquire CPU id %d", fn, cpuid); 7439 7440 if (dp->busy) 7441 return (-1); 7442 7443 if ((cp = cpu_get(cpuid)) == NULL) { 7444 DRMACH_PR("%s: cpu_get(%d) returned NULL", fn, cpuid); 7445 return (-1); 7446 } 7447 7448 if (!CPU_ACTIVE(cp)) { 7449 DRMACH_PR("%s: skipping offlined CPU id %d", fn, cpuid); 7450 return (-1); 7451 } 7452 7453 /* 7454 * There is a known HW bug where a Jaguar CPU in Safari port 0 (SBX/P0) 7455 * can fail to receive an XIR. To workaround this issue until a hardware 7456 * fix is implemented, we will exclude the selection of these CPUs. 7457 * 7458 * Once a fix is implemented in hardware, this code should be updated 7459 * to allow Jaguar CPUs that have the fix to be used. However, support 7460 * must be retained to skip revisions that do not have this fix. 7461 */ 7462 7463 err = drmach_cpu_get_impl(dp, &impl); 7464 if (err) { 7465 DRMACH_PR("%s: error getting impl. of CPU id %d", fn, cpuid); 7466 sbd_err_clear(&err); 7467 return (-1); 7468 } 7469 7470 if (IS_JAGUAR(impl) && (STARCAT_CPUID_TO_LPORT(cpuid) == 0) && 7471 drmach_iocage_exclude_jaguar_port_zero) { 7472 DRMACH_PR("%s: excluding CPU id %d: port 0 on jaguar", 7473 fn, cpuid); 7474 return (-1); 7475 } 7476 7477 ASSERT(oflags); 7478 *oflags = cp->cpu_flags; 7479 7480 if (cpu_offline(cp, 0)) { 7481 DRMACH_PR("%s: cpu_offline failed for CPU id %d", fn, cpuid); 7482 return (-1); 7483 } 7484 7485 if (cpu_poweroff(cp)) { 7486 DRMACH_PR("%s: cpu_poweroff failed for CPU id %d", fn, cpuid); 7487 if (cpu_online(cp)) { 7488 cmn_err(CE_WARN, "failed to online CPU id %d " 7489 "during I/O cage test selection", cpuid); 7490 } 7491 if (CPU_ACTIVE(cp) && cpu_flagged_nointr(*oflags) && 7492 drmach_cpu_intr_disable(cp) != 0) { 7493 cmn_err(CE_WARN, "failed to restore CPU id %d " 7494 "no-intr during I/O cage test selection", cpuid); 7495 } 7496 return (-1); 7497 } 7498 7499 if (cpu_unconfigure(cpuid)) { 7500 DRMACH_PR("%s: cpu_unconfigure failed for CPU id %d", fn, 7501 cpuid); 7502 (void) cpu_configure(cpuid); 7503 if ((cp = cpu_get(cpuid)) == NULL) { 7504 cmn_err(CE_WARN, "failed to reconfigure CPU id %d " 7505 "during I/O cage test selection", cpuid); 7506 dp->busy = 1; 7507 return (-1); 7508 } 7509 if (cpu_poweron(cp) || cpu_online(cp)) { 7510 cmn_err(CE_WARN, "failed to %s CPU id %d " 7511 "during I/O cage test selection", 7512 cpu_is_poweredoff(cp) ? 7513 "poweron" : "online", cpuid); 7514 } 7515 if (CPU_ACTIVE(cp) && cpu_flagged_nointr(*oflags) && 7516 drmach_cpu_intr_disable(cp) != 0) { 7517 cmn_err(CE_WARN, "failed to restore CPU id %d " 7518 "no-intr during I/O cage test selection", cpuid); 7519 } 7520 return (-1); 7521 } 7522 7523 dp->busy = 1; 7524 7525 DRMACH_PR("%s: acquired CPU id %d", fn, cpuid); 7526 7527 return (0); 7528 } 7529 7530 /* 7531 * Attempt to acquire all the CPU devices passed in. It is 7532 * assumed that all the devices in the list are the cores of 7533 * a single CMP device. Non CMP devices can be handled as a 7534 * single core CMP by passing in a one element list. 7535 * 7536 * Success is only returned if *all* the devices in the list 7537 * can be acquired. In the failure case, none of the devices 7538 * in the list will be held as acquired. 7539 */ 7540 static int 7541 drmach_iocage_cmp_acquire(drmach_device_t **dpp, cpu_flag_t *oflags) 7542 { 7543 int curr; 7544 int i; 7545 int rv = 0; 7546 7547 ASSERT((dpp != NULL) && (*dpp != NULL)); 7548 7549 /* 7550 * Walk the list of CPU devices (cores of a CMP) 7551 * and attempt to acquire them. Bail out if an 7552 * error is encountered. 7553 */ 7554 for (curr = 0; curr < MAX_CORES_PER_CMP; curr++) { 7555 7556 /* check for the end of the list */ 7557 if (dpp[curr] == NULL) { 7558 break; 7559 } 7560 7561 ASSERT(DRMACH_IS_CPU_ID(dpp[curr])); 7562 ASSERT(dpp[curr]->portid == (*dpp)->portid); 7563 7564 rv = drmach_iocage_cpu_acquire(dpp[curr], &oflags[curr]); 7565 if (rv != 0) { 7566 break; 7567 } 7568 } 7569 7570 /* 7571 * Check for an error. 7572 */ 7573 if (rv != 0) { 7574 /* 7575 * Make a best effort attempt to return any cores 7576 * that were already acquired before the error was 7577 * encountered. 7578 */ 7579 for (i = 0; i < curr; i++) { 7580 (void) drmach_iocage_cpu_return(dpp[i], oflags[i]); 7581 } 7582 } 7583 7584 return (rv); 7585 } 7586 7587 static int 7588 drmach_iocage_cpu_return(drmach_device_t *dp, cpu_flag_t oflags) 7589 { 7590 processorid_t cpuid; 7591 struct cpu *cp; 7592 int rv = 0; 7593 static char *fn = "drmach_iocage_cpu_return"; 7594 7595 ASSERT(DRMACH_IS_CPU_ID(dp)); 7596 ASSERT(MUTEX_HELD(&cpu_lock)); 7597 7598 cpuid = ((drmach_cpu_t *)dp)->cpuid; 7599 7600 DRMACH_PR("%s: attempting to return CPU id: %d", fn, cpuid); 7601 7602 if (cpu_configure(cpuid)) { 7603 cmn_err(CE_WARN, "failed to reconfigure CPU id %d " 7604 "after I/O cage test", cpuid); 7605 /* 7606 * The component was never set to unconfigured during the IO 7607 * cage test, so we need to leave marked as busy to prevent 7608 * further DR operations involving this component. 7609 */ 7610 return (-1); 7611 } 7612 7613 if ((cp = cpu_get(cpuid)) == NULL) { 7614 cmn_err(CE_WARN, "cpu_get failed on CPU id %d after " 7615 "I/O cage test", cpuid); 7616 dp->busy = 0; 7617 return (-1); 7618 } 7619 7620 if (cpu_poweron(cp) || cpu_online(cp)) { 7621 cmn_err(CE_WARN, "failed to %s CPU id %d after I/O " 7622 "cage test", cpu_is_poweredoff(cp) ? 7623 "poweron" : "online", cpuid); 7624 rv = -1; 7625 } 7626 7627 /* 7628 * drmach_iocage_cpu_acquire will accept cpus in state P_ONLINE or 7629 * P_NOINTR. Need to return to previous user-visible state. 7630 */ 7631 if (CPU_ACTIVE(cp) && cpu_flagged_nointr(oflags) && 7632 drmach_cpu_intr_disable(cp) != 0) { 7633 cmn_err(CE_WARN, "failed to restore CPU id %d " 7634 "no-intr after I/O cage test", cpuid); 7635 rv = -1; 7636 } 7637 7638 dp->busy = 0; 7639 7640 DRMACH_PR("%s: returned CPU id: %d", fn, cpuid); 7641 7642 return (rv); 7643 } 7644 7645 static sbd_error_t * 7646 drmach_iocage_cpu_get(dr_testboard_req_t *tbrq, drmach_device_t **dpp, 7647 cpu_flag_t *oflags) 7648 { 7649 drmach_board_t *bp; 7650 int b_rv; 7651 int b_idx; 7652 drmachid_t b_id; 7653 int found; 7654 7655 mutex_enter(&cpu_lock); 7656 7657 ASSERT(drmach_boards != NULL); 7658 7659 found = 0; 7660 7661 /* 7662 * Walk the board list. 7663 */ 7664 b_rv = drmach_array_first(drmach_boards, &b_idx, &b_id); 7665 7666 while (b_rv == 0) { 7667 7668 int d_rv; 7669 int d_idx; 7670 drmachid_t d_id; 7671 7672 bp = b_id; 7673 7674 if (bp->connected == 0 || bp->devices == NULL) { 7675 b_rv = drmach_array_next(drmach_boards, &b_idx, &b_id); 7676 continue; 7677 } 7678 7679 /* An AXQ restriction disqualifies MCPU's as candidates. */ 7680 if (DRMACH_BNUM2SLOT(bp->bnum) == 1) { 7681 b_rv = drmach_array_next(drmach_boards, &b_idx, &b_id); 7682 continue; 7683 } 7684 7685 /* 7686 * Walk the device list of this board. 7687 */ 7688 d_rv = drmach_array_first(bp->devices, &d_idx, &d_id); 7689 7690 while (d_rv == 0) { 7691 7692 drmach_device_t *ndp; 7693 7694 /* only interested in CPU devices */ 7695 if (!DRMACH_IS_CPU_ID(d_id)) { 7696 d_rv = drmach_array_next(bp->devices, &d_idx, 7697 &d_id); 7698 continue; 7699 } 7700 7701 /* 7702 * The following code assumes two properties 7703 * of a CMP device: 7704 * 7705 * 1. All cores of a CMP are grouped together 7706 * in the device list. 7707 * 7708 * 2. There will only be a maximum of two cores 7709 * present in the CMP. 7710 * 7711 * If either of these two properties change, 7712 * this code will have to be revisited. 7713 */ 7714 7715 dpp[0] = d_id; 7716 dpp[1] = NULL; 7717 7718 /* 7719 * Get the next device. It may or may not be used. 7720 */ 7721 d_rv = drmach_array_next(bp->devices, &d_idx, &d_id); 7722 ndp = d_id; 7723 7724 if ((d_rv == 0) && DRMACH_IS_CPU_ID(d_id)) { 7725 /* 7726 * The second device is only interesting for 7727 * this pass if it has the same portid as the 7728 * first device. This implies that both are 7729 * cores of the same CMP. 7730 */ 7731 if (dpp[0]->portid == ndp->portid) { 7732 dpp[1] = d_id; 7733 } 7734 } 7735 7736 /* 7737 * Attempt to acquire all cores of the CMP. 7738 */ 7739 if (drmach_iocage_cmp_acquire(dpp, oflags) == 0) { 7740 found = 1; 7741 break; 7742 } 7743 7744 /* 7745 * Check if the search for the second core was 7746 * successful. If not, the next iteration should 7747 * use that device. 7748 */ 7749 if (dpp[1] == NULL) { 7750 continue; 7751 } 7752 7753 d_rv = drmach_array_next(bp->devices, &d_idx, &d_id); 7754 } 7755 7756 if (found) 7757 break; 7758 7759 b_rv = drmach_array_next(drmach_boards, &b_idx, &b_id); 7760 } 7761 7762 mutex_exit(&cpu_lock); 7763 7764 if (!found) { 7765 return (drerr_new(1, ESTC_IOCAGE_NO_CPU_AVAIL, NULL)); 7766 } 7767 7768 tbrq->cpu_portid = (*dpp)->portid; 7769 7770 return (NULL); 7771 } 7772 7773 /* 7774 * Setup an iocage by acquiring a cpu and memory. 7775 */ 7776 static sbd_error_t * 7777 drmach_iocage_setup(dr_testboard_req_t *tbrq, drmach_device_t **dpp, 7778 cpu_flag_t *oflags) 7779 { 7780 sbd_error_t *err; 7781 7782 err = drmach_iocage_cpu_get(tbrq, dpp, oflags); 7783 if (!err) { 7784 mutex_enter(&drmach_iocage_lock); 7785 while (drmach_iocage_is_busy) 7786 cv_wait(&drmach_iocage_cv, &drmach_iocage_lock); 7787 drmach_iocage_is_busy = 1; 7788 mutex_exit(&drmach_iocage_lock); 7789 err = drmach_iocage_mem_get(tbrq); 7790 if (err) { 7791 mutex_enter(&drmach_iocage_lock); 7792 drmach_iocage_is_busy = 0; 7793 cv_signal(&drmach_iocage_cv); 7794 mutex_exit(&drmach_iocage_lock); 7795 } 7796 } 7797 return (err); 7798 } 7799 7800 #define DRMACH_SCHIZO_PCI_LEAF_MAX 2 7801 #define DRMACH_SCHIZO_PCI_SLOT_MAX 8 7802 #define DRMACH_S1P_SAMPLE_MAX 2 7803 7804 typedef enum { 7805 DRMACH_POST_SUSPEND = 0, 7806 DRMACH_PRE_RESUME 7807 } drmach_sr_iter_t; 7808 7809 typedef struct { 7810 dev_info_t *dip; 7811 uint32_t portid; 7812 uint32_t pcr_sel_save; 7813 uint32_t pic_l2_io_q[DRMACH_S1P_SAMPLE_MAX]; 7814 uint64_t reg_basepa; 7815 } drmach_s1p_axq_t; 7816 7817 typedef struct { 7818 dev_info_t *dip; 7819 uint32_t portid; 7820 uint64_t csr_basepa; 7821 struct { 7822 uint64_t slot_intr_state_diag; 7823 uint64_t obio_intr_state_diag; 7824 uint_t nmap_regs; 7825 uint64_t *intr_map_regs; 7826 } regs[DRMACH_S1P_SAMPLE_MAX]; 7827 } drmach_s1p_pci_t; 7828 7829 typedef struct { 7830 uint64_t csr_basepa; 7831 struct { 7832 uint64_t csr; 7833 uint64_t errctrl; 7834 uint64_t errlog; 7835 } regs[DRMACH_S1P_SAMPLE_MAX]; 7836 drmach_s1p_pci_t pci[DRMACH_SCHIZO_PCI_LEAF_MAX]; 7837 } drmach_s1p_schizo_t; 7838 7839 typedef struct { 7840 drmach_s1p_axq_t axq; 7841 drmach_s1p_schizo_t schizo[STARCAT_SLOT1_IO_MAX]; 7842 } drmach_slot1_pause_t; 7843 7844 /* 7845 * Table of saved state for paused slot1 devices. 7846 */ 7847 static drmach_slot1_pause_t *drmach_slot1_paused[STARCAT_BDSET_MAX]; 7848 static int drmach_slot1_pause_init = 1; 7849 7850 #ifdef DEBUG 7851 int drmach_slot1_pause_debug = 1; 7852 #else 7853 int drmach_slot1_pause_debug = 0; 7854 #endif /* DEBUG */ 7855 7856 static int 7857 drmach_is_slot1_pause_axq(dev_info_t *dip, char *name, int *id, uint64_t *reg) 7858 { 7859 int portid, exp, slot, i; 7860 drmach_reg_t regs[2]; 7861 int reglen = sizeof (regs); 7862 7863 if ((portid = ddi_getprop(DDI_DEV_T_ANY, dip, 7864 DDI_PROP_DONTPASS, "portid", -1)) == -1) { 7865 return (0); 7866 } 7867 7868 exp = (portid >> 5) & 0x1f; 7869 slot = portid & 0x1; 7870 7871 if (slot == 0 || strncmp(name, DRMACH_AXQ_NAMEPROP, 7872 strlen(DRMACH_AXQ_NAMEPROP))) { 7873 return (0); 7874 } 7875 7876 mutex_enter(&cpu_lock); 7877 for (i = 0; i < STARCAT_SLOT1_CPU_MAX; i++) { 7878 if (cpu[MAKE_CPUID(exp, slot, i)]) { 7879 /* maxcat cpu present */ 7880 mutex_exit(&cpu_lock); 7881 return (0); 7882 } 7883 } 7884 mutex_exit(&cpu_lock); 7885 7886 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7887 "reg", (caddr_t)regs, ®len) != DDI_PROP_SUCCESS) { 7888 DRMACH_PR("drmach_is_slot1_pause_axq: no reg prop for " 7889 "axq dip=%p\n", (void *)dip); 7890 return (0); 7891 } 7892 7893 ASSERT(id && reg); 7894 *reg = (uint64_t)regs[0].reg_addr_hi << 32; 7895 *reg |= (uint64_t)regs[0].reg_addr_lo; 7896 *id = portid; 7897 7898 return (1); 7899 } 7900 7901 /* 7902 * Allocate an entry in the slot1_paused state table. 7903 */ 7904 static void 7905 drmach_slot1_pause_add_axq(dev_info_t *axq_dip, char *axq_name, int axq_portid, 7906 uint64_t reg, drmach_slot1_pause_t **slot1_paused) 7907 { 7908 int axq_exp; 7909 drmach_slot1_pause_t *slot1; 7910 7911 axq_exp = (axq_portid >> 5) & 0x1f; 7912 7913 ASSERT(axq_portid & 0x1); 7914 ASSERT(slot1_paused[axq_exp] == NULL); 7915 ASSERT(strncmp(axq_name, DRMACH_AXQ_NAMEPROP, 7916 strlen(DRMACH_AXQ_NAMEPROP)) == 0); 7917 7918 slot1 = kmem_zalloc(sizeof (*slot1), KM_SLEEP); 7919 7920 /* 7921 * XXX This dip should really be held (via ndi_hold_devi()) 7922 * before saving it in the axq pause structure. However that 7923 * would prevent DR as the pause data structures persist until 7924 * the next suspend. drmach code should be modified to free the 7925 * the slot 1 pause data structures for a boardset when its 7926 * slot 1 board is DRed out. The dip can then be released via 7927 * ndi_rele_devi() when the pause data structure is freed 7928 * allowing DR to proceed. Until this change is made, drmach 7929 * code should be careful about dereferencing the saved dip 7930 * as it may no longer exist. 7931 */ 7932 slot1->axq.dip = axq_dip; 7933 slot1->axq.portid = axq_portid; 7934 slot1->axq.reg_basepa = reg; 7935 slot1_paused[axq_exp] = slot1; 7936 } 7937 7938 static void 7939 drmach_s1p_pci_free(drmach_s1p_pci_t *pci) 7940 { 7941 int i; 7942 7943 for (i = 0; i < DRMACH_S1P_SAMPLE_MAX; i++) { 7944 if (pci->regs[i].intr_map_regs != NULL) { 7945 ASSERT(pci->regs[i].nmap_regs > 0); 7946 kmem_free(pci->regs[i].intr_map_regs, 7947 pci->regs[i].nmap_regs * sizeof (uint64_t)); 7948 } 7949 } 7950 } 7951 7952 static void 7953 drmach_slot1_pause_free(drmach_slot1_pause_t **slot1_paused) 7954 { 7955 int i, j, k; 7956 drmach_slot1_pause_t *slot1; 7957 7958 for (i = 0; i < STARCAT_BDSET_MAX; i++) { 7959 if ((slot1 = slot1_paused[i]) == NULL) 7960 continue; 7961 7962 for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++) 7963 for (k = 0; k < DRMACH_SCHIZO_PCI_LEAF_MAX; k++) 7964 drmach_s1p_pci_free(&slot1->schizo[j].pci[k]); 7965 7966 kmem_free(slot1, sizeof (*slot1)); 7967 slot1_paused[i] = NULL; 7968 } 7969 } 7970 7971 /* 7972 * Tree walk callback routine. If dip represents a Schizo PCI leaf, 7973 * fill in the appropriate info in the slot1_paused state table. 7974 */ 7975 static int 7976 drmach_find_slot1_io(dev_info_t *dip, void *arg) 7977 { 7978 int portid, exp, ioc_unum, leaf_unum; 7979 char buf[OBP_MAXDRVNAME]; 7980 int buflen = sizeof (buf); 7981 drmach_reg_t regs[3]; 7982 int reglen = sizeof (regs); 7983 uint32_t leaf_offset; 7984 uint64_t schizo_csr_pa, pci_csr_pa; 7985 drmach_s1p_pci_t *pci; 7986 drmach_slot1_pause_t **slot1_paused = (drmach_slot1_pause_t **)arg; 7987 7988 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 7989 "name", (caddr_t)buf, &buflen) != DDI_PROP_SUCCESS || 7990 strncmp(buf, DRMACH_PCI_NAMEPROP, strlen(DRMACH_PCI_NAMEPROP))) { 7991 return (DDI_WALK_CONTINUE); 7992 } 7993 7994 if ((portid = ddi_getprop(DDI_DEV_T_ANY, dip, 7995 DDI_PROP_DONTPASS, "portid", -1)) == -1) { 7996 return (DDI_WALK_CONTINUE); 7997 } 7998 7999 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 8000 "reg", (caddr_t)regs, ®len) != DDI_PROP_SUCCESS) { 8001 DRMACH_PR("drmach_find_slot1_io: no reg prop for pci " 8002 "dip=%p\n", (void *)dip); 8003 return (DDI_WALK_CONTINUE); 8004 } 8005 8006 exp = portid >> 5; 8007 ioc_unum = portid & 0x1; 8008 leaf_offset = regs[0].reg_addr_lo & 0x7fffff; 8009 pci_csr_pa = (uint64_t)regs[0].reg_addr_hi << 32; 8010 pci_csr_pa |= (uint64_t)regs[0].reg_addr_lo; 8011 schizo_csr_pa = (uint64_t)regs[1].reg_addr_hi << 32; 8012 schizo_csr_pa |= (uint64_t)regs[1].reg_addr_lo; 8013 8014 ASSERT(exp >= 0 && exp < STARCAT_BDSET_MAX); 8015 ASSERT(slot1_paused[exp] != NULL); 8016 ASSERT(leaf_offset == 0x600000 || leaf_offset == 0x700000); 8017 ASSERT(slot1_paused[exp]->schizo[ioc_unum].csr_basepa == 0x0UL || 8018 slot1_paused[exp]->schizo[ioc_unum].csr_basepa == schizo_csr_pa); 8019 8020 leaf_unum = (leaf_offset == 0x600000) ? 0 : 1; 8021 slot1_paused[exp]->schizo[ioc_unum].csr_basepa = schizo_csr_pa; 8022 pci = &slot1_paused[exp]->schizo[ioc_unum].pci[leaf_unum]; 8023 8024 /* 8025 * XXX This dip should really be held (via ndi_hold_devi()) 8026 * before saving it in the pci pause structure. However that 8027 * would prevent DR as the pause data structures persist until 8028 * the next suspend. drmach code should be modified to free the 8029 * the slot 1 pause data structures for a boardset when its 8030 * slot 1 board is DRed out. The dip can then be released via 8031 * ndi_rele_devi() when the pause data structure is freed 8032 * allowing DR to proceed. Until this change is made, drmach 8033 * code should be careful about dereferencing the saved dip as 8034 * it may no longer exist. 8035 */ 8036 pci->dip = dip; 8037 pci->portid = portid; 8038 pci->csr_basepa = pci_csr_pa; 8039 8040 DRMACH_PR("drmach_find_slot1_io: name=%s, portid=0x%x, dip=%p\n", 8041 buf, portid, (void *)dip); 8042 8043 return (DDI_WALK_PRUNECHILD); 8044 } 8045 8046 static void 8047 drmach_slot1_pause_add_io(drmach_slot1_pause_t **slot1_paused) 8048 { 8049 /* 8050 * Root node doesn't have to be held 8051 */ 8052 ddi_walk_devs(ddi_root_node(), drmach_find_slot1_io, 8053 (void *)slot1_paused); 8054 } 8055 8056 /* 8057 * Save the interrupt mapping registers for each non-idle interrupt 8058 * represented by the bit pairs in the saved interrupt state 8059 * diagnostic registers for this PCI leaf. 8060 */ 8061 static void 8062 drmach_s1p_intr_map_reg_save(drmach_s1p_pci_t *pci, drmach_sr_iter_t iter) 8063 { 8064 int i, cnt, ino; 8065 uint64_t reg; 8066 char *dname; 8067 uchar_t Xmits; 8068 8069 dname = ddi_binding_name(pci->dip); 8070 Xmits = (strcmp(dname, XMITS_BINDING_NAME) == 0) ? 1 : 0; 8071 8072 /* 8073 * 1st pass allocates, 2nd pass populates. 8074 */ 8075 for (i = 0; i < 2; i++) { 8076 cnt = ino = 0; 8077 8078 /* 8079 * PCI slot interrupts 8080 */ 8081 reg = pci->regs[iter].slot_intr_state_diag; 8082 while (reg) { 8083 /* 8084 * Xmits Interrupt Number Offset(ino) Assignments 8085 * 00-17 PCI Slot Interrupts 8086 * 18-1f Not Used 8087 */ 8088 if ((Xmits) && (ino > 0x17)) 8089 break; 8090 if ((reg & COMMON_CLEAR_INTR_REG_MASK) != 8091 COMMON_CLEAR_INTR_REG_IDLE) { 8092 if (i) { 8093 pci->regs[iter].intr_map_regs[cnt] = 8094 lddphysio(pci->csr_basepa + 8095 SCHIZO_IB_INTR_MAP_REG_OFFSET + 8096 ino * sizeof (reg)); 8097 } 8098 ++cnt; 8099 } 8100 ++ino; 8101 reg >>= 2; 8102 } 8103 8104 /* 8105 * Xmits Interrupt Number Offset(ino) Assignments 8106 * 20-2f Not Used 8107 * 30-37 Internal interrupts 8108 * 38-3e Not Used 8109 */ 8110 ino = (Xmits) ? 0x30 : 0x20; 8111 8112 /* 8113 * OBIO and internal schizo interrupts 8114 * Each PCI leaf has a set of mapping registers for all 8115 * possible interrupt sources except the NewLink interrupts. 8116 */ 8117 reg = pci->regs[iter].obio_intr_state_diag; 8118 while (reg && ino <= 0x38) { 8119 if ((reg & COMMON_CLEAR_INTR_REG_MASK) != 8120 COMMON_CLEAR_INTR_REG_IDLE) { 8121 if (i) { 8122 pci->regs[iter].intr_map_regs[cnt] = 8123 lddphysio(pci->csr_basepa + 8124 SCHIZO_IB_INTR_MAP_REG_OFFSET + 8125 ino * sizeof (reg)); 8126 } 8127 ++cnt; 8128 } 8129 ++ino; 8130 reg >>= 2; 8131 } 8132 8133 if (!i) { 8134 pci->regs[iter].nmap_regs = cnt; 8135 pci->regs[iter].intr_map_regs = 8136 kmem_zalloc(cnt * sizeof (reg), KM_SLEEP); 8137 } 8138 } 8139 } 8140 8141 static void 8142 drmach_s1p_axq_update(drmach_s1p_axq_t *axq, drmach_sr_iter_t iter) 8143 { 8144 uint32_t reg; 8145 8146 if (axq->reg_basepa == 0x0UL) 8147 return; 8148 8149 if (iter == DRMACH_POST_SUSPEND) { 8150 axq->pcr_sel_save = ldphysio(axq->reg_basepa + 8151 AXQ_SLOT1_PERFCNT_SEL); 8152 /* 8153 * Select l2_io_queue counter by writing L2_IO_Q mux 8154 * input to bits 0-6 of perf cntr select reg. 8155 */ 8156 reg = axq->pcr_sel_save; 8157 reg &= ~AXQ_PIC_CLEAR_MASK; 8158 reg |= L2_IO_Q; 8159 8160 stphysio(axq->reg_basepa + AXQ_SLOT1_PERFCNT_SEL, reg); 8161 } 8162 8163 axq->pic_l2_io_q[iter] = ldphysio(axq->reg_basepa + AXQ_SLOT1_PERFCNT0); 8164 8165 if (iter == DRMACH_PRE_RESUME) { 8166 stphysio(axq->reg_basepa + AXQ_SLOT1_PERFCNT_SEL, 8167 axq->pcr_sel_save); 8168 } 8169 8170 DRMACH_PR("drmach_s1p_axq_update: axq #%d pic_l2_io_q[%d]=%d\n", 8171 ddi_get_instance(axq->dip), iter, axq->pic_l2_io_q[iter]); 8172 } 8173 8174 static void 8175 drmach_s1p_schizo_update(drmach_s1p_schizo_t *schizo, drmach_sr_iter_t iter) 8176 { 8177 int i; 8178 drmach_s1p_pci_t *pci; 8179 8180 if (schizo->csr_basepa == 0x0UL) 8181 return; 8182 8183 schizo->regs[iter].csr = 8184 lddphysio(schizo->csr_basepa + SCHIZO_CB_CSR_OFFSET); 8185 schizo->regs[iter].errctrl = 8186 lddphysio(schizo->csr_basepa + SCHIZO_CB_ERRCTRL_OFFSET); 8187 schizo->regs[iter].errlog = 8188 lddphysio(schizo->csr_basepa + SCHIZO_CB_ERRLOG_OFFSET); 8189 8190 for (i = 0; i < DRMACH_SCHIZO_PCI_LEAF_MAX; i++) { 8191 pci = &schizo->pci[i]; 8192 if (pci->dip != NULL && pci->csr_basepa != 0x0UL) { 8193 pci->regs[iter].slot_intr_state_diag = 8194 lddphysio(pci->csr_basepa + 8195 COMMON_IB_SLOT_INTR_STATE_DIAG_REG); 8196 8197 pci->regs[iter].obio_intr_state_diag = 8198 lddphysio(pci->csr_basepa + 8199 COMMON_IB_OBIO_INTR_STATE_DIAG_REG); 8200 8201 drmach_s1p_intr_map_reg_save(pci, iter); 8202 } 8203 } 8204 } 8205 8206 /* 8207 * Called post-suspend and pre-resume to snapshot the suspend state 8208 * of slot1 AXQs and Schizos. 8209 */ 8210 static void 8211 drmach_slot1_pause_update(drmach_slot1_pause_t **slot1_paused, 8212 drmach_sr_iter_t iter) 8213 { 8214 int i, j; 8215 drmach_slot1_pause_t *slot1; 8216 8217 for (i = 0; i < STARCAT_BDSET_MAX; i++) { 8218 if ((slot1 = slot1_paused[i]) == NULL) 8219 continue; 8220 8221 drmach_s1p_axq_update(&slot1->axq, iter); 8222 for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++) 8223 drmach_s1p_schizo_update(&slot1->schizo[j], iter); 8224 } 8225 } 8226 8227 /* 8228 * Starcat hPCI Schizo devices. 8229 * 8230 * The name field is overloaded. NULL means the slot (interrupt concentrator 8231 * bus) is not used. intr_mask is a bit mask representing the 4 possible 8232 * interrupts per slot, on if valid (rio does not use interrupt lines 0, 1). 8233 */ 8234 static struct { 8235 char *name; 8236 uint8_t intr_mask; 8237 } drmach_schz_slot_intr[][DRMACH_SCHIZO_PCI_LEAF_MAX] = { 8238 /* Schizo 0 */ /* Schizo 1 */ 8239 {{"C3V0", 0xf}, {"C3V1", 0xf}}, /* slot 0 */ 8240 {{"C5V0", 0xf}, {"C5V1", 0xf}}, /* slot 1 */ 8241 {{"rio", 0xc}, {NULL, 0x0}}, /* slot 2 */ 8242 {{NULL, 0x0}, {NULL, 0x0}}, /* slot 3 */ 8243 {{"sbbc", 0xf}, {NULL, 0x0}}, /* slot 4 */ 8244 {{NULL, 0x0}, {NULL, 0x0}}, /* slot 5 */ 8245 {{NULL, 0x0}, {NULL, 0x0}}, /* slot 6 */ 8246 {{NULL, 0x0}, {NULL, 0x0}} /* slot 7 */ 8247 }; 8248 8249 /* 8250 * See Schizo Specification, Revision 51 (May 23, 2001), Section 22.4.4 8251 * "Interrupt Registers", Table 22-69, page 306. 8252 */ 8253 static char * 8254 drmach_schz_internal_ino2str(int ino) 8255 { 8256 int intr; 8257 8258 ASSERT(ino >= 0x30 && ino <= 0x37); 8259 8260 intr = ino & 0x7; 8261 switch (intr) { 8262 case (0x0): return ("Uncorrectable ECC error"); 8263 case (0x1): return ("Correctable ECC error"); 8264 case (0x2): return ("PCI Bus A Error"); 8265 case (0x3): return ("PCI Bus B Error"); 8266 case (0x4): return ("Safari Bus Error"); 8267 default: return ("Reserved"); 8268 } 8269 } 8270 8271 #define DRMACH_INTR_MASK_SHIFT(ino) ((ino) << 1) 8272 8273 static void 8274 drmach_s1p_decode_slot_intr(int exp, int unum, drmach_s1p_pci_t *pci, 8275 int ino, drmach_sr_iter_t iter) 8276 { 8277 uint8_t intr_mask; 8278 char *slot_devname; 8279 char namebuf[OBP_MAXDRVNAME]; 8280 int slot, intr_line, slot_valid, intr_valid; 8281 8282 ASSERT(ino >= 0 && ino <= 0x1f); 8283 ASSERT((pci->regs[iter].slot_intr_state_diag & 8284 (COMMON_CLEAR_INTR_REG_MASK << DRMACH_INTR_MASK_SHIFT(ino))) != 8285 COMMON_CLEAR_INTR_REG_IDLE); 8286 8287 slot = (ino >> 2) & 0x7; 8288 intr_line = ino & 0x3; 8289 8290 slot_devname = drmach_schz_slot_intr[slot][unum].name; 8291 slot_valid = (slot_devname == NULL) ? 0 : 1; 8292 if (!slot_valid) { 8293 (void) snprintf(namebuf, sizeof (namebuf), "slot %d (INVALID)", 8294 slot); 8295 slot_devname = namebuf; 8296 } 8297 8298 intr_mask = drmach_schz_slot_intr[slot][unum].intr_mask; 8299 intr_valid = (1 << intr_line) & intr_mask; 8300 8301 prom_printf("IO%d/P%d PCI slot interrupt: ino=0x%x, source device=%s, " 8302 "interrupt line=%d%s\n", exp, unum, ino, slot_devname, intr_line, 8303 (slot_valid && !intr_valid) ? " (INVALID)" : ""); 8304 } 8305 8306 /* 8307 * Log interrupt source device info for all valid, pending interrupts 8308 * on each Schizo PCI leaf. Called if Schizo has logged a Safari bus 8309 * error in the error ctrl reg. 8310 */ 8311 static void 8312 drmach_s1p_schizo_log_intr(drmach_s1p_schizo_t *schizo, int exp, 8313 int unum, drmach_sr_iter_t iter) 8314 { 8315 uint64_t reg; 8316 int i, n, ino; 8317 drmach_s1p_pci_t *pci; 8318 8319 ASSERT(exp >= 0 && exp < STARCAT_BDSET_MAX); 8320 ASSERT(unum < STARCAT_SLOT1_IO_MAX); 8321 8322 /* 8323 * Check the saved interrupt mapping registers. If interrupt is valid, 8324 * map the ino to the Schizo source device and check that the pci 8325 * slot and interrupt line are valid. 8326 */ 8327 for (i = 0; i < DRMACH_SCHIZO_PCI_LEAF_MAX; i++) { 8328 pci = &schizo->pci[i]; 8329 for (n = 0; n < pci->regs[iter].nmap_regs; n++) { 8330 reg = pci->regs[iter].intr_map_regs[n]; 8331 if (reg & COMMON_INTR_MAP_REG_VALID) { 8332 ino = reg & COMMON_INTR_MAP_REG_INO; 8333 8334 if (ino <= 0x1f) { 8335 /* 8336 * PCI slot interrupt 8337 */ 8338 drmach_s1p_decode_slot_intr(exp, unum, 8339 pci, ino, iter); 8340 } else if (ino <= 0x2f) { 8341 /* 8342 * OBIO interrupt 8343 */ 8344 prom_printf("IO%d/P%d OBIO interrupt: " 8345 "ino=0x%x\n", exp, unum, ino); 8346 } else if (ino <= 0x37) { 8347 /* 8348 * Internal interrupt 8349 */ 8350 prom_printf("IO%d/P%d Internal " 8351 "interrupt: ino=0x%x (%s)\n", 8352 exp, unum, ino, 8353 drmach_schz_internal_ino2str(ino)); 8354 } else { 8355 /* 8356 * NewLink interrupt 8357 */ 8358 prom_printf("IO%d/P%d NewLink " 8359 "interrupt: ino=0x%x\n", exp, 8360 unum, ino); 8361 } 8362 8363 DRMACH_PR("drmach_s1p_schizo_log_intr: " 8364 "exp=%d, schizo=%d, pci_leaf=%c, " 8365 "ino=0x%x, intr_map_reg=0x%lx\n", 8366 exp, unum, (i == 0) ? 'A' : 'B', ino, reg); 8367 } 8368 } 8369 } 8370 } 8371 8372 /* 8373 * See Schizo Specification, Revision 51 (May 23, 2001), Section 22.2.4 8374 * "Safari Error Control/Log Registers", Table 22-11, page 248. 8375 */ 8376 #define DRMACH_SCHIZO_SAFARI_UNMAPPED_ERR (0x1ull << 4) 8377 8378 /* 8379 * Check for possible error indicators prior to resuming the 8380 * AXQ driver, which will de-assert slot1 AXQ_DOMCTRL_PAUSE. 8381 */ 8382 static void 8383 drmach_slot1_pause_verify(drmach_slot1_pause_t **slot1_paused, 8384 drmach_sr_iter_t iter) 8385 { 8386 int i, j; 8387 int errflag = 0; 8388 drmach_slot1_pause_t *slot1; 8389 8390 /* 8391 * Check for logged schizo bus error and pending interrupts. 8392 */ 8393 for (i = 0; i < STARCAT_BDSET_MAX; i++) { 8394 if ((slot1 = slot1_paused[i]) == NULL) 8395 continue; 8396 8397 for (j = 0; j < STARCAT_SLOT1_IO_MAX; j++) { 8398 if (slot1->schizo[j].csr_basepa == 0x0UL) 8399 continue; 8400 8401 if (slot1->schizo[j].regs[iter].errlog & 8402 DRMACH_SCHIZO_SAFARI_UNMAPPED_ERR) { 8403 if (!errflag) { 8404 prom_printf("DR WARNING: interrupt " 8405 "attempt detected during " 8406 "copy-rename (%s):\n", 8407 (iter == DRMACH_POST_SUSPEND) ? 8408 "post suspend" : "pre resume"); 8409 ++errflag; 8410 } 8411 drmach_s1p_schizo_log_intr(&slot1->schizo[j], 8412 i, j, iter); 8413 } 8414 } 8415 } 8416 8417 /* 8418 * Check for changes in axq l2_io_q performance counters (2nd pass only) 8419 */ 8420 if (iter == DRMACH_PRE_RESUME) { 8421 for (i = 0; i < STARCAT_BDSET_MAX; i++) { 8422 if ((slot1 = slot1_paused[i]) == NULL) 8423 continue; 8424 8425 if (slot1->axq.pic_l2_io_q[DRMACH_POST_SUSPEND] != 8426 slot1->axq.pic_l2_io_q[DRMACH_PRE_RESUME]) { 8427 prom_printf("DR WARNING: IO transactions " 8428 "detected on IO%d during copy-rename: " 8429 "AXQ l2_io_q performance counter " 8430 "start=%d, end=%d\n", i, 8431 slot1->axq.pic_l2_io_q[DRMACH_POST_SUSPEND], 8432 slot1->axq.pic_l2_io_q[DRMACH_PRE_RESUME]); 8433 } 8434 } 8435 } 8436 } 8437 8438 struct drmach_sr_list { 8439 dev_info_t *dip; 8440 struct drmach_sr_list *next; 8441 struct drmach_sr_list *prev; 8442 }; 8443 8444 static struct drmach_sr_ordered { 8445 char *name; 8446 struct drmach_sr_list *ring; 8447 } drmach_sr_ordered[] = { 8448 { "iosram", NULL }, 8449 { "address-extender-queue", NULL }, 8450 { NULL, NULL }, /* terminator -- required */ 8451 }; 8452 8453 static void 8454 drmach_sr_insert(struct drmach_sr_list **lp, dev_info_t *dip) 8455 { 8456 struct drmach_sr_list *np; 8457 8458 DRMACH_PR("drmach_sr_insert: adding dip %p\n", (void *)dip); 8459 8460 np = (struct drmach_sr_list *)kmem_alloc( 8461 sizeof (struct drmach_sr_list), KM_SLEEP); 8462 8463 ndi_hold_devi(dip); 8464 np->dip = dip; 8465 8466 if (*lp == NULL) { 8467 /* establish list */ 8468 *lp = np->next = np->prev = np; 8469 } else { 8470 /* place new node behind head node on ring list */ 8471 np->prev = (*lp)->prev; 8472 np->next = *lp; 8473 np->prev->next = np; 8474 np->next->prev = np; 8475 } 8476 } 8477 8478 static void 8479 drmach_sr_delete(struct drmach_sr_list **lp, dev_info_t *dip) 8480 { 8481 DRMACH_PR("drmach_sr_delete: searching for dip %p\n", (void *)dip); 8482 8483 if (*lp) { 8484 struct drmach_sr_list *xp; 8485 8486 /* start search with mostly likely node */ 8487 xp = (*lp)->prev; 8488 do { 8489 if (xp->dip == dip) { 8490 xp->prev->next = xp->next; 8491 xp->next->prev = xp->prev; 8492 8493 if (xp == *lp) 8494 *lp = xp->next; 8495 if (xp == *lp) 8496 *lp = NULL; 8497 xp->dip = NULL; 8498 ndi_rele_devi(dip); 8499 kmem_free(xp, sizeof (*xp)); 8500 8501 DRMACH_PR("drmach_sr_delete:" 8502 " disposed sr node for dip %p", 8503 (void *)dip); 8504 return; 8505 } 8506 8507 DRMACH_PR("drmach_sr_delete: still searching\n"); 8508 8509 xp = xp->prev; 8510 } while (xp != (*lp)->prev); 8511 } 8512 8513 /* every dip should be found during resume */ 8514 DRMACH_PR("ERROR: drmach_sr_delete: can't find dip %p", (void *)dip); 8515 } 8516 8517 int 8518 drmach_verify_sr(dev_info_t *dip, int sflag) 8519 { 8520 int rv; 8521 int len; 8522 char name[OBP_MAXDRVNAME]; 8523 8524 if (drmach_slot1_pause_debug) { 8525 if (sflag && drmach_slot1_pause_init) { 8526 drmach_slot1_pause_free(drmach_slot1_paused); 8527 drmach_slot1_pause_init = 0; 8528 } else if (!sflag && !drmach_slot1_pause_init) { 8529 /* schedule init for next suspend */ 8530 drmach_slot1_pause_init = 1; 8531 } 8532 } 8533 8534 rv = ddi_getproplen(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 8535 "name", &len); 8536 if (rv == DDI_PROP_SUCCESS) { 8537 int portid; 8538 uint64_t reg; 8539 struct drmach_sr_ordered *op; 8540 8541 rv = ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, 8542 DDI_PROP_DONTPASS, "name", (caddr_t)name, &len); 8543 8544 if (rv != DDI_PROP_SUCCESS) 8545 return (0); 8546 8547 if (drmach_slot1_pause_debug && sflag && 8548 drmach_is_slot1_pause_axq(dip, name, &portid, ®)) { 8549 drmach_slot1_pause_add_axq(dip, name, portid, reg, 8550 drmach_slot1_paused); 8551 } 8552 8553 for (op = drmach_sr_ordered; op->name; op++) { 8554 if (strncmp(op->name, name, strlen(op->name)) == 0) { 8555 if (sflag) 8556 drmach_sr_insert(&op->ring, dip); 8557 else 8558 drmach_sr_delete(&op->ring, dip); 8559 return (1); 8560 } 8561 } 8562 } 8563 8564 return (0); 8565 } 8566 8567 static void 8568 drmach_sr_dip(dev_info_t *dip, int suspend) 8569 { 8570 int rv; 8571 major_t maj; 8572 char *name, *name_addr, *aka; 8573 8574 if ((name = ddi_get_name(dip)) == NULL) 8575 name = "<null name>"; 8576 else if ((maj = ddi_name_to_major(name)) != -1) 8577 aka = ddi_major_to_name(maj); 8578 else 8579 aka = "<unknown>"; 8580 8581 if ((name_addr = ddi_get_name_addr(dip)) == NULL) 8582 name_addr = "<null>"; 8583 8584 prom_printf("\t%s %s@%s (aka %s)\n", 8585 suspend ? "suspending" : "resuming", 8586 name, name_addr, aka); 8587 8588 if (suspend) { 8589 rv = devi_detach(dip, DDI_SUSPEND); 8590 } else { 8591 rv = devi_attach(dip, DDI_RESUME); 8592 } 8593 8594 if (rv != DDI_SUCCESS) { 8595 prom_printf("\tFAILED to %s %s@%s\n", 8596 suspend ? "suspend" : "resume", 8597 name, name_addr); 8598 } 8599 } 8600 8601 void 8602 drmach_suspend_last() 8603 { 8604 struct drmach_sr_ordered *op; 8605 8606 if (drmach_slot1_pause_debug) 8607 drmach_slot1_pause_add_io(drmach_slot1_paused); 8608 8609 /* 8610 * The ordering array declares the strict sequence in which 8611 * the named drivers are to suspended. Each element in 8612 * the array may have a double-linked ring list of driver 8613 * instances (dip) in the order in which they were presented 8614 * to drmach_verify_sr. If present, walk the list in the 8615 * forward direction to suspend each instance. 8616 */ 8617 for (op = drmach_sr_ordered; op->name; op++) { 8618 if (op->ring) { 8619 struct drmach_sr_list *rp; 8620 8621 rp = op->ring; 8622 do { 8623 drmach_sr_dip(rp->dip, 1); 8624 rp = rp->next; 8625 } while (rp != op->ring); 8626 } 8627 } 8628 8629 if (drmach_slot1_pause_debug) { 8630 drmach_slot1_pause_update(drmach_slot1_paused, 8631 DRMACH_POST_SUSPEND); 8632 drmach_slot1_pause_verify(drmach_slot1_paused, 8633 DRMACH_POST_SUSPEND); 8634 } 8635 } 8636 8637 void 8638 drmach_resume_first() 8639 { 8640 struct drmach_sr_ordered *op = drmach_sr_ordered + 8641 (sizeof (drmach_sr_ordered) / sizeof (drmach_sr_ordered[0])); 8642 8643 if (drmach_slot1_pause_debug) { 8644 drmach_slot1_pause_update(drmach_slot1_paused, 8645 DRMACH_PRE_RESUME); 8646 drmach_slot1_pause_verify(drmach_slot1_paused, 8647 DRMACH_PRE_RESUME); 8648 } 8649 8650 op -= 1; /* point at terminating element */ 8651 8652 /* 8653 * walk ordering array and rings backwards to resume dips 8654 * in reverse order in which they were suspended 8655 */ 8656 while (--op >= drmach_sr_ordered) { 8657 if (op->ring) { 8658 struct drmach_sr_list *rp; 8659 8660 rp = op->ring->prev; 8661 do { 8662 drmach_sr_dip(rp->dip, 0); 8663 rp = rp->prev; 8664 } while (rp != op->ring->prev); 8665 } 8666 } 8667 } 8668 8669 /* 8670 * Log a DR sysevent. 8671 * Return value: 0 success, non-zero failure. 8672 */ 8673 int 8674 drmach_log_sysevent(int board, char *hint, int flag, int verbose) 8675 { 8676 sysevent_t *ev; 8677 sysevent_id_t eid; 8678 int rv, km_flag; 8679 sysevent_value_t evnt_val; 8680 sysevent_attr_list_t *evnt_attr_list = NULL; 8681 char attach_pnt[MAXNAMELEN]; 8682 8683 km_flag = (flag == SE_SLEEP) ? KM_SLEEP : KM_NOSLEEP; 8684 attach_pnt[0] = '\0'; 8685 if (drmach_board_name(board, attach_pnt, MAXNAMELEN)) { 8686 rv = -1; 8687 goto logexit; 8688 } 8689 if (verbose) 8690 DRMACH_PR("drmach_log_sysevent: %s %s, flag: %d, verbose: %d\n", 8691 attach_pnt, hint, flag, verbose); 8692 8693 if ((ev = sysevent_alloc(EC_DR, ESC_DR_AP_STATE_CHANGE, 8694 SUNW_KERN_PUB"dr", km_flag)) == NULL) { 8695 rv = -2; 8696 goto logexit; 8697 } 8698 evnt_val.value_type = SE_DATA_TYPE_STRING; 8699 evnt_val.value.sv_string = attach_pnt; 8700 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_AP_ID, 8701 &evnt_val, km_flag)) != 0) 8702 goto logexit; 8703 8704 evnt_val.value_type = SE_DATA_TYPE_STRING; 8705 evnt_val.value.sv_string = hint; 8706 if ((rv = sysevent_add_attr(&evnt_attr_list, DR_HINT, 8707 &evnt_val, km_flag)) != 0) { 8708 sysevent_free_attr(evnt_attr_list); 8709 goto logexit; 8710 } 8711 8712 (void) sysevent_attach_attributes(ev, evnt_attr_list); 8713 8714 /* 8715 * Log the event but do not sleep waiting for its 8716 * delivery. This provides insulation from syseventd. 8717 */ 8718 rv = log_sysevent(ev, SE_NOSLEEP, &eid); 8719 8720 logexit: 8721 if (ev) 8722 sysevent_free(ev); 8723 if ((rv != 0) && verbose) 8724 cmn_err(CE_WARN, 8725 "drmach_log_sysevent failed (rv %d) for %s %s\n", 8726 rv, attach_pnt, hint); 8727 8728 return (rv); 8729 } 8730 8731 /* 8732 * Initialize the mem_slice portion of a claim/unconfig/unclaim mailbox message. 8733 * Only the valid entries are modified, so the array should be zeroed out 8734 * initially. 8735 */ 8736 static void 8737 drmach_msg_memslice_init(dr_memslice_t slice_arr[]) { 8738 int i; 8739 char c; 8740 8741 ASSERT(mutex_owned(&drmach_slice_table_lock)); 8742 8743 for (i = 0; i < AXQ_MAX_EXP; i++) { 8744 c = drmach_slice_table[i]; 8745 8746 if (c & 0x20) { 8747 slice_arr[i].valid = 1; 8748 slice_arr[i].slice = c & 0x1f; 8749 } 8750 } 8751 } 8752 8753 /* 8754 * Initialize the mem_regs portion of a claim/unconfig/unclaim mailbox message. 8755 * Only the valid entries are modified, so the array should be zeroed out 8756 * initially. 8757 */ 8758 static void 8759 drmach_msg_memregs_init(dr_memregs_t regs_arr[]) { 8760 int rv, exp, mcnum, bank; 8761 uint64_t madr; 8762 drmachid_t id; 8763 drmach_board_t *bp; 8764 drmach_mem_t *mp; 8765 dr_memregs_t *memregs; 8766 8767 /* CONSTCOND */ 8768 ASSERT(DRMACH_MC_NBANKS == (PMBANKS_PER_PORT * LMBANKS_PER_PMBANK)); 8769 8770 for (exp = 0; exp < 18; exp++) { 8771 rv = drmach_array_get(drmach_boards, 8772 DRMACH_EXPSLOT2BNUM(exp, 0), &id); 8773 ASSERT(rv == 0); /* should never be out of bounds */ 8774 if (id == NULL) { 8775 continue; 8776 } 8777 8778 memregs = ®s_arr[exp]; 8779 bp = (drmach_board_t *)id; 8780 for (mp = bp->mem; mp != NULL; mp = mp->next) { 8781 mcnum = mp->dev.portid & 0x3; 8782 for (bank = 0; bank < DRMACH_MC_NBANKS; bank++) { 8783 drmach_mem_read_madr(mp, bank, &madr); 8784 if (madr & DRMACH_MC_VALID_MASK) { 8785 DRMACH_PR("%d.%d.%d.madr = 0x%lx\n", 8786 exp, mcnum, bank, madr); 8787 memregs->madr[mcnum][bank].hi = 8788 DRMACH_U64_TO_MCREGHI(madr); 8789 memregs->madr[mcnum][bank].lo = 8790 DRMACH_U64_TO_MCREGLO(madr); 8791 } 8792 } 8793 } 8794 } 8795 } 8796 8797 /* 8798 * Do not allow physical address range modification if either board on this 8799 * expander has processors in NULL LPA mode (CBASE=CBND=NULL). 8800 * 8801 * A side effect of NULL proc LPA mode in Starcat SSM is that local reads will 8802 * install the cache line as owned/dirty as a result of the RTSR transaction. 8803 * See section 5.2.3 of the Safari spec. All processors will read the bus sync 8804 * list before the rename after flushing local caches. When copy-rename 8805 * requires changing the physical address ranges (i.e. smaller memory target), 8806 * the bus sync list contains physical addresses that will not exist after the 8807 * rename. If these cache lines are owned due to a RTSR, a system error can 8808 * occur following the rename when these cache lines are evicted and a writeback 8809 * is attempted. 8810 * 8811 * Incoming parameter represents either the copy-rename source or a candidate 8812 * target memory board. On Starcat, only slot0 boards may have memory. 8813 */ 8814 int 8815 drmach_allow_memrange_modify(drmachid_t s0id) 8816 { 8817 drmach_board_t *s0bp, *s1bp; 8818 drmachid_t s1id; 8819 int rv; 8820 8821 s0bp = s0id; 8822 8823 ASSERT(DRMACH_IS_BOARD_ID(s0id)); 8824 ASSERT(DRMACH_BNUM2SLOT(s0bp->bnum) == 0); 8825 8826 if (s0bp->flags & DRMACH_NULL_PROC_LPA) { 8827 /* 8828 * This is reason enough to fail the request, no need 8829 * to check the device list for cpus. 8830 */ 8831 return (0); 8832 } 8833 8834 /* 8835 * Check for MCPU board on the same expander. 8836 * 8837 * The board flag DRMACH_NULL_PROC_LPA can be set for all board 8838 * types, as it is derived at from the POST gdcd board flag 8839 * L1SSFLG_THIS_L1_NULL_PROC_LPA, which can be set (and should be 8840 * ignored) for boards with no processors. Since NULL proc LPA 8841 * applies only to processors, we walk the devices array to detect 8842 * MCPUs. 8843 */ 8844 rv = drmach_array_get(drmach_boards, s0bp->bnum + 1, &s1id); 8845 s1bp = s1id; 8846 if (rv == 0 && s1bp != NULL) { 8847 8848 ASSERT(DRMACH_IS_BOARD_ID(s1id)); 8849 ASSERT(DRMACH_BNUM2SLOT(s1bp->bnum) == 1); 8850 ASSERT(DRMACH_BNUM2EXP(s0bp->bnum) == 8851 DRMACH_BNUM2EXP(s1bp->bnum)); 8852 8853 if ((s1bp->flags & DRMACH_NULL_PROC_LPA) && 8854 s1bp->devices != NULL) { 8855 int d_idx; 8856 drmachid_t d_id; 8857 8858 rv = drmach_array_first(s1bp->devices, &d_idx, &d_id); 8859 while (rv == 0) { 8860 if (DRMACH_IS_CPU_ID(d_id)) { 8861 /* 8862 * Fail MCPU in NULL LPA mode. 8863 */ 8864 return (0); 8865 } 8866 8867 rv = drmach_array_next(s1bp->devices, &d_idx, 8868 &d_id); 8869 } 8870 } 8871 } 8872 8873 return (1); 8874 } 8875