1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/sysmacros.h> 31 #include <sys/open.h> 32 #include <sys/param.h> 33 #include <sys/machparam.h> 34 #include <sys/systm.h> 35 #include <sys/signal.h> 36 #include <sys/cred.h> 37 #include <sys/user.h> 38 #include <sys/proc.h> 39 #include <sys/vnode.h> 40 #include <sys/uio.h> 41 #include <sys/buf.h> 42 #include <sys/file.h> 43 #include <sys/kmem.h> 44 #include <sys/stat.h> 45 #include <sys/stream.h> 46 #include <sys/stropts.h> 47 #include <sys/strsubr.h> 48 #include <sys/strsun.h> 49 #include <inet/common.h> 50 #include <inet/mi.h> 51 #include <inet/nd.h> 52 #include <sys/poll.h> 53 #include <sys/utsname.h> 54 #include <sys/debug.h> 55 #include <sys/conf.h> 56 #include <sys/ddi.h> 57 #include <sys/sunddi.h> 58 #include <sys/errno.h> 59 #include <sys/modctl.h> 60 #include <sys/machsystm.h> 61 #include <sys/promif.h> 62 #include <sys/prom_plat.h> 63 #include <sys/obpdefs.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kp.h> 66 #include <sys/kstat.h> 67 #include <sys/membar.h> 68 #include <sys/ivintr.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/x_call.h> 71 #include <sys/cpuvar.h> 72 #include <sys/archsystm.h> 73 #include <sys/dmv.h> 74 75 #include <sys/idn.h> 76 #include <sys/idn_xf.h> 77 #include <sys/cpu_sgnblk_defs.h> 78 #include <sys/cpu_sgn.h> 79 80 struct idn_gkstat sg_kstat; 81 82 #define MBXTBL_PART_REPORT ((caddr_t)1) 83 #define MBXTBL_FULL_REPORT ((caddr_t)2) 84 85 idn_domain_t idn_domain[MAX_DOMAINS]; 86 idn_global_t idn; 87 int idn_debug; 88 int idn_snoop; 89 int idn_history; 90 91 typedef enum { 92 IDN_GPROPS_OKAY, 93 IDN_GPROPS_UNCHECKED, 94 IDN_GPROPS_ERROR 95 } idn_gprops_t; 96 97 struct idn_history idnhlog; 98 99 /* 100 * IDN "tunables". 101 */ 102 int idn_smr_size; 103 int idn_nwr_size; 104 int idn_lowat; 105 int idn_hiwat; 106 int idn_protocol_nservers; 107 int idn_awolmsg_interval; 108 int idn_smr_bufsize; 109 int idn_slab_bufcount; 110 int idn_slab_prealloc; 111 int idn_slab_maxperdomain; 112 int idn_slab_mintotal; 113 int idn_window_max; 114 int idn_window_incr; 115 int idn_window_emax; 116 int idn_reclaim_min; 117 int idn_reclaim_max; 118 int idn_mbox_per_net; 119 int idn_max_nets; 120 121 int idn_netsvr_spin_count; 122 int idn_netsvr_wait_min; 123 int idn_netsvr_wait_max; 124 int idn_netsvr_wait_shift; 125 126 int idn_checksum; 127 128 int idn_msgwait_nego; 129 int idn_msgwait_cfg; 130 int idn_msgwait_con; 131 int idn_msgwait_fin; 132 int idn_msgwait_cmd; 133 int idn_msgwait_data; 134 135 int idn_retryfreq_nego; 136 int idn_retryfreq_con; 137 int idn_retryfreq_fin; 138 139 int idn_window_emax; /* calculated */ 140 int idn_slab_maxperdomain; /* calculated */ 141 142 /* 143 * DMV interrupt support. 144 */ 145 int idn_pil; 146 int idn_dmv_pending_max; 147 idn_dmv_msg_t *idn_iv_queue[NCPU]; 148 int idn_intr_index[NCPU]; /* idn_handler ONLY */ 149 static idn_dmv_data_t *idn_dmv_data; 150 151 int idn_sigbpil; 152 153 idnparam_t idn_param_arr[] = { 154 { 0, 1, 0, /* 0 */ "idn_modunloadable" }, 155 }; 156 157 /* 158 * Parameters that are only accessible in a DEBUG driver. 159 */ 160 static char *idn_param_debug_only[] = { 161 #if 0 162 "idn_checksum", 163 #endif /* 0 */ 164 0 165 }; 166 167 /* 168 * Parameters that are READ-ONLY. 169 */ 170 static char *idn_param_read_only[] = { 171 #if 0 172 "idn_window_emax", 173 "idn_slab_maxperdomain", 174 #endif /* 0 */ 175 0 176 }; 177 178 static struct idn_global_props { 179 int p_min, p_max, p_def; 180 char *p_string; 181 int *p_var; 182 } idn_global_props[] = { 183 { 0, 0, 0, "idn_debug", &idn_debug }, 184 { 0, 1, 0, "idn_history", &idn_history }, 185 { 0, IDN_SMR_MAXSIZE, 186 0, "idn_smr_size", &idn_smr_size }, 187 { 0, IDN_SMR_MAXSIZE, 188 0, "idn_nwr_size", &idn_nwr_size }, 189 { 1, 512*1024, 190 1, "idn_lowat", &idn_lowat }, 191 { 1*1024, 192 1*1024*1024, 193 256*1024, 194 "idn_hiwat", &idn_hiwat }, 195 { IDN_SMR_BUFSIZE_MIN, 196 IDN_SMR_BUFSIZE_MAX, 197 IDN_SMR_BUFSIZE_DEF, 198 "idn_smr_bufsize", &idn_smr_bufsize }, 199 { 4, 1024, 32, "idn_slab_bufcount", &idn_slab_bufcount }, 200 { 0, 10, 0, "idn_slab_prealloc", &idn_slab_prealloc }, 201 { 2, MAX_DOMAINS, 202 8, "idn_slab_mintotal", &idn_slab_mintotal }, 203 { 8, 256, 64, "idn_window_max", &idn_window_max }, 204 { 0, 32, 8, "idn_window_incr", &idn_window_incr }, 205 { 1, 128, 5, "idn_reclaim_min", &idn_reclaim_min }, 206 { 0, 128, 0, "idn_reclaim_max", &idn_reclaim_max }, 207 { 1, IDN_MAXMAX_NETS, 208 8, "idn_max_nets", &idn_max_nets }, 209 { 31, 511, 127, "idn_mbox_per_net", &idn_mbox_per_net }, 210 { 0, 1, 1, "idn_checksum", &idn_checksum }, 211 { 0, 10000, 500, "idn_netsvr_spin_count", 212 &idn_netsvr_spin_count }, 213 { 0, 30*100, 40, "idn_netsvr_wait_min", &idn_netsvr_wait_min }, 214 { 0, 60*100, 16*100, "idn_netsvr_wait_max", &idn_netsvr_wait_max }, 215 { 1, 5, 1, "idn_netsvr_wait_shift", 216 &idn_netsvr_wait_shift }, 217 { 1, MAX_DOMAINS, 218 IDN_PROTOCOL_NSERVERS, 219 "idn_protocol_nservers", 220 &idn_protocol_nservers }, 221 { 0, 3600, IDN_AWOLMSG_INTERVAL, 222 "idn_awolmsg_interval", &idn_awolmsg_interval }, 223 { 10, 300, IDN_MSGWAIT_NEGO, 224 "idn_msgwait_nego", &idn_msgwait_nego }, 225 { 10, 300, IDN_MSGWAIT_CFG, 226 "idn_msgwait_cfg", &idn_msgwait_cfg }, 227 { 10, 300, IDN_MSGWAIT_CON, 228 "idn_msgwait_con", &idn_msgwait_con }, 229 { 10, 300, IDN_MSGWAIT_FIN, 230 "idn_msgwait_fin", &idn_msgwait_fin }, 231 { 10, 300, IDN_MSGWAIT_CMD, 232 "idn_msgwait_cmd", &idn_msgwait_cmd }, 233 { 10, 300, IDN_MSGWAIT_DATA, 234 "idn_msgwait_data", &idn_msgwait_data }, 235 { 1, 60, IDN_RETRYFREQ_NEGO, 236 "idn_retryfreq_nego", &idn_retryfreq_nego }, 237 { 1, 60, IDN_RETRYFREQ_CON, 238 "idn_retryfreq_con", &idn_retryfreq_con }, 239 { 1, 60, IDN_RETRYFREQ_FIN, 240 "idn_retryfreq_fin", &idn_retryfreq_fin }, 241 { 1, 9, IDN_PIL, 242 "idn_pil", &idn_pil }, 243 { 1, 9, IDN_SIGBPIL, 244 "idn_sigbpil", &idn_sigbpil }, 245 { 8, 512, IDN_DMV_PENDING_MAX, 246 "idn_dmv_pending_max", &idn_dmv_pending_max }, 247 { 0, 0, 0, NULL, NULL } 248 }; 249 250 struct idn *idn_i2s_table[IDN_MAXMAX_NETS << 1]; 251 clock_t idn_msg_waittime[IDN_NUM_MSGTYPES]; 252 clock_t idn_msg_retrytime[(int)IDN_NUM_RETRYTYPES]; 253 254 static caddr_t idn_ndlist; /* head of 'named dispatch' var list */ 255 256 static int idnattach(dev_info_t *, ddi_attach_cmd_t); 257 static int idndetach(dev_info_t *, ddi_detach_cmd_t); 258 static int idnopen(register queue_t *, dev_t *, int, int, cred_t *); 259 static int idnclose(queue_t *, int, cred_t *); 260 static int idnwput(queue_t *, mblk_t *); 261 static int idnwsrv(queue_t *); 262 static int idnrput(queue_t *, mblk_t *); 263 static void idnioctl(queue_t *, mblk_t *); 264 static idn_gprops_t idn_check_conf(dev_info_t *dip, processorid_t *cpuid); 265 static int idn_size_check(); 266 static void idn_xmit_monitor_init(); 267 static void idn_xmit_monitor_deinit(); 268 static void idn_init_msg_waittime(); 269 static void idn_init_msg_retrytime(); 270 static void idn_sigb_setup(cpu_sgnblk_t *sigbp, void *arg); 271 static int idn_init(dev_info_t *dip); 272 static int idn_deinit(); 273 static void idn_sigbhandler_create(); 274 static void idn_sigbhandler_kill(); 275 static uint_t idn_sigbhandler_wakeup(caddr_t arg1, caddr_t arg2); 276 static void idn_sigbhandler_thread(struct sigbintr **sbpp); 277 static void idn_sigbhandler(processorid_t cpuid, cpu_sgnblk_t *sgnblkp); 278 static int idn_info(idnsb_info_t *sfp); 279 static int idn_init_smr(); 280 static void idn_deinit_smr(); 281 static int idn_prom_getsmr(uint_t *smrsz, uint64_t *paddrp, 282 uint64_t *sizep); 283 static int idn_init_handler(); 284 static void idn_deinit_handler(); 285 static uint_t idn_handler(caddr_t unused, caddr_t unused2); 286 /* 287 * ioctl services 288 */ 289 static int idnioc_link(idnop_t *idnop); 290 static int idnioc_unlink(idnop_t *idnop); 291 static int idn_rw_mem(idnop_t *idnop); 292 static int idn_send_ping(idnop_t *idnop); 293 294 static void idn_domains_init(struct hwconfig *local_hw); 295 static void idn_domains_deinit(); 296 static void idn_retrytask_init(); 297 static void idn_retrytask_deinit(); 298 static void idn_gkstat_init(); 299 static void idn_gkstat_deinit(); 300 static int idn_gkstat_update(); 301 static void idn_timercache_init(); 302 static void idn_timercache_deinit(); 303 static void idn_dopers_init(); 304 static void idn_dopers_deinit(); 305 306 static void idn_param_cleanup(); 307 static int idn_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr); 308 static int idn_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 309 cred_t *cr); 310 static int idn_param_register(register idnparam_t *idnpa, int count); 311 static int idn_slabpool_report(queue_t *wq, mblk_t *mp, caddr_t cp, 312 cred_t *cr); 313 static int idn_buffer_report(queue_t *wq, mblk_t *mp, caddr_t cp, 314 cred_t *cr); 315 static int idn_mboxtbl_report(queue_t *wq, mblk_t *mp, caddr_t cp, 316 cred_t *cr); 317 static int idn_mainmbox_report(queue_t *wq, mblk_t *mp, caddr_t cp, 318 cred_t *cr); 319 static void idn_mainmbox_domain_report(queue_t *wq, mblk_t *mp, int domid, 320 idn_mainmbox_t *mmp, char *mbxtype); 321 static int idn_global_report(queue_t *wq, mblk_t *mp, caddr_t cp, 322 cred_t *cr); 323 static int idn_domain_report(queue_t *wq, mblk_t *mp, caddr_t cp, 324 cred_t *cr); 325 static int idn_get_net_binding(queue_t *wq, mblk_t *mp, caddr_t cp, 326 cred_t *cr); 327 static int idn_set_net_binding(queue_t *wq, mblk_t *mp, char *value, 328 caddr_t cp, cred_t *cr); 329 330 /* 331 * String definitions used for DEBUG and non-DEBUG. 332 */ 333 const char *idnm_str[] = { 334 /* 0 */ "null", 335 /* 1 */ "nego", 336 /* 2 */ "con", 337 /* 3 */ "cfg", 338 /* 4 */ "fin", 339 /* 5 */ "cmd", 340 /* 6 */ "data", 341 }; 342 343 const char *idnds_str[] = { 344 /* 0 */ "CLOSED", 345 /* 1 */ "NEGO_PEND", 346 /* 2 */ "NEGO_SENT", 347 /* 3 */ "NEGO_RCVD", 348 /* 4 */ "CONFIG", 349 /* 5 */ "CON_PEND", 350 /* 6 */ "CON_SENT", 351 /* 7 */ "CON_RCVD", 352 /* 8 */ "CON_READY", 353 /* 9 */ "CONNECTED", 354 /* 10 */ "FIN_PEND", 355 /* 11 */ "FIN_SENT", 356 /* 12 */ "FIN_RCVD", 357 /* 13 */ "DMAP" 358 }; 359 360 const char *idnxs_str[] = { 361 /* 0 */ "PEND", 362 /* 1 */ "SENT", 363 /* 2 */ "RCVD", 364 /* 3 */ "FINAL", 365 /* 4 */ "NIL" 366 }; 367 368 const char *idngs_str[] = { 369 /* 0 */ "OFFLINE", 370 /* 1 */ "CONNECT", 371 /* 2 */ "ONLINE", 372 /* 3 */ "DISCONNECT", 373 /* 4 */ "RECONFIG", 374 /* 5 */ "unknown", 375 /* 6 */ "unknown", 376 /* 7 */ "unknown", 377 /* 8 */ "unknown", 378 /* 9 */ "unknown", 379 /* 10 */ "IGNORE" 380 }; 381 382 const char *idncmd_str[] = { 383 /* 0 */ "unknown", 384 /* 1 */ "SLABALLOC", 385 /* 2 */ "SLABFREE", 386 /* 3 */ "SLABREAP", 387 /* 4 */ "NODENAME" 388 }; 389 390 const char *idncon_str[] = { 391 /* 0 */ "OFF", 392 /* 1 */ "NORMAL", 393 /* 2 */ "QUERY" 394 }; 395 396 const char *idnfin_str[] = { 397 /* 0 */ "OFF", 398 /* 1 */ "NORMAL", 399 /* 2 */ "FORCE_SOFT", 400 /* 3 */ "FORCE_HARD", 401 /* 4 */ "QUERY" 402 }; 403 404 const char *idnfinopt_str[] = { 405 /* 0 */ "NONE", 406 /* 1 */ "UNLINK", 407 /* 2 */ "RELINK" 408 }; 409 410 const char *idnfinarg_str[] = { 411 /* 0 */ "NONE", 412 /* 1 */ "SMRBAD", 413 /* 2 */ "CPUCFG", 414 /* 3 */ "HWERR", 415 /* 4 */ "CFGERR_FATAL", 416 /* 5 */ "CFGERR_MTU", 417 /* 6 */ "CFGERR_BUF", 418 /* 7 */ "CFGERR_SLAB", 419 /* 8 */ "CFGERR_NWR", 420 /* 9 */ "CFGERR_NETS", 421 /* 10 */ "CFGERR_MBOX", 422 /* 11 */ "CFGERR_NMCADR", 423 /* 12 */ "CFGERR_MCADR", 424 /* 13 */ "CFGERR_CKSUM", 425 /* 14 */ "CFGERR_SMR", 426 }; 427 428 const char *idnsync_str[] = { 429 /* 0 */ "NIL", 430 /* 1 */ "CONNECT", 431 /* 2 */ "DISCONNECT" 432 }; 433 434 const char *idnreg_str[] = { 435 /* 0 */ "REG", 436 /* 1 */ "NEW", 437 /* 2 */ "QUERY" 438 }; 439 440 const char *idnnack_str[] = { 441 /* 0 */ "unknown", 442 /* 1 */ "NOCONN", 443 /* 2 */ "BADCHAN", 444 /* 3 */ "BADCFG", 445 /* 4 */ "BADCMD", 446 /* 5 */ "RETRY", 447 /* 6 */ "DUP", 448 /* 7 */ "EXIT", 449 /* 8 */ "--reserved1", 450 /* 9 */ "--reserved2", 451 /* 10 */ "--reserved3" 452 }; 453 454 const char *idnop_str[] = { 455 /* 0 */ "DISCONNECTED", 456 /* 1 */ "CONNECTED", 457 /* 2 */ "ERROR" 458 }; 459 460 const char *chanop_str[] = { 461 /* 0 */ "OPEN", 462 /* 1 */ "SOFT_CLOSE", 463 /* 2 */ "HARD_CLOSE", 464 /* 3 */ "OFFLINE", 465 /* 4 */ "ONLINE" 466 }; 467 468 const char *chanaction_str[] = { 469 /* 0 */ "DETACH", 470 /* 1 */ "STOP", 471 /* 2 */ "SUSPEND", 472 /* 3 */ "RESUME", 473 /* 4 */ "RESTART", 474 /* 5 */ "ATTACH" 475 }; 476 477 const char *timer_str[] = { 478 /* 0 */ "NIL", 479 /* 1 */ "MSG" 480 }; 481 482 static struct module_info idnrinfo = { 483 IDNIDNUM, /* mi_idnum */ 484 IDNNAME, /* mi_idname */ 485 IDNMINPSZ, /* mi_minpsz */ 486 IDNMAXPSZ, /* mi_maxpsz */ 487 0, /* mi_hiwat - see IDN_HIWAT */ 488 0 /* mi_lowat - see IDN_LOWAT */ 489 }; 490 491 static struct module_info idnwinfo = { 492 IDNIDNUM, /* mi_idnum */ 493 IDNNAME, /* mi_idname */ 494 IDNMINPSZ, /* mi_minpsz */ 495 IDNMAXPSZ, /* mi_maxpsz */ 496 0, /* mi_hiwat - see IDN_HIWAT */ 497 0 /* mi_lowat - see IDN_LOWAT */ 498 }; 499 500 static struct qinit idnrinit = { 501 idnrput, /* qi_putp */ 502 NULL, /* qi_srvp */ 503 idnopen, /* qi_qopen */ 504 idnclose, /* qi_qclose */ 505 NULL, /* qi_qadmin */ 506 &idnrinfo, /* qi_minfo */ 507 NULL, /* qi_mstat */ 508 NULL, /* qi_rwp */ 509 NULL, /* qi_infop */ 510 STRUIOT_DONTCARE /* qi_struiot */ 511 }; 512 513 static struct qinit idnwinit = { 514 idnwput, /* qi_putp */ 515 idnwsrv, /* qi_srvp */ 516 NULL, /* qi_qopen */ 517 NULL, /* qi_qclose */ 518 NULL, /* qi_qadmin */ 519 &idnwinfo, /* qi_minfo */ 520 NULL, /* qi_mstat */ 521 NULL, /* qi_rwp */ 522 NULL, /* qi_infop */ 523 STRUIOT_DONTCARE /* qi_struiot */ 524 }; 525 526 struct streamtab idninfo = { 527 &idnrinit, /* st_rdinit */ 528 &idnwinit, /* st_wrinit */ 529 NULL, /* st_muxrinit */ 530 NULL, /* st_muxwinit */ 531 }; 532 533 /* 534 * Module linkage information (cb_ops & dev_ops) for the kernel. 535 */ 536 537 static struct cb_ops cb_idnops = { 538 nulldev, /* cb_open */ 539 nulldev, /* cb_close */ 540 nodev, /* cb_strategy */ 541 nodev, /* cb_print */ 542 nodev, /* cb_dump */ 543 nodev, /* cb_read */ 544 nodev, /* cb_write */ 545 nodev, /* cb_ioctl */ 546 nodev, /* cb_devmap */ 547 nodev, /* cb_mmap */ 548 nodev, /* cb_segmap */ 549 nochpoll, /* cb_chpoll */ 550 ddi_prop_op, /* cb_prop_op */ 551 &idninfo, /* cb_stream */ 552 D_MP, /* cb_flag */ 553 CB_REV, /* cb_rev */ 554 nodev, /* cb_aread */ 555 nodev, /* cb_awrite */ 556 }; 557 558 static struct dev_ops idnops = { 559 DEVO_REV, /* devo_rev */ 560 0, /* devo_refcnt */ 561 ddi_no_info, /* devo_getinfo */ 562 nulldev, /* devo_identify */ 563 nulldev, /* devo_probe */ 564 idnattach, /* devo_attach */ 565 idndetach, /* devo_detach */ 566 nodev, /* devo_reset */ 567 &cb_idnops, /* devo_cb_ops */ 568 (struct bus_ops *)NULL, /* devo_bus_ops */ 569 NULL /* devo_power */ 570 }; 571 572 extern cpuset_t cpu_ready_set; 573 574 static struct modldrv modldrv = { 575 &mod_driverops, /* This module is a pseudo driver */ 576 IDNDESC " %I%", 577 &idnops 578 }; 579 580 static struct modlinkage modlinkage = { 581 MODREV_1, 582 &modldrv, 583 NULL 584 }; 585 586 /* 587 * -------------------------------------------------- 588 */ 589 int 590 _init(void) 591 { 592 idn.version = IDN_VERSION; 593 594 return (mod_install(&modlinkage)); 595 } 596 597 int 598 _fini(void) 599 { 600 return (mod_remove(&modlinkage)); 601 } 602 603 int 604 _info(struct modinfo *modinfop) 605 { 606 return (mod_info(&modlinkage, modinfop)); 607 } 608 609 /* 610 * ---------------------------------------------- 611 */ 612 static int 613 idnattach(dev_info_t *dip, ddi_attach_cmd_t cmd) 614 { 615 int instance; 616 int doinit = 0; 617 processorid_t bcpuid; 618 struct idn *sip; 619 struct idnstr *stp; 620 procname_t proc = "idnattach"; 621 622 623 #ifndef lint 624 ASSERT(sizeof (idnsb_t) == IDNSB_SIZE); 625 ASSERT((uint_t)&((struct idnsb *)0)->id_hwchkpt[0] == 0x40); 626 #endif /* lint */ 627 628 switch (cmd) { 629 case DDI_RESUME: 630 sip = ddi_get_driver_private(dip); 631 /* 632 * sip may have not yet been set if the 633 * OBP environment variable (idn-smr-size) 634 * was not set. 635 */ 636 if (sip == NULL) 637 return (DDI_FAILURE); 638 /* 639 * RESUME IDN services. 640 */ 641 IDN_GLOCK_SHARED(); 642 if (idn.state != IDNGS_OFFLINE) { 643 cmn_err(CE_WARN, 644 "IDN: 101: not in expected OFFLINE state " 645 "for DDI_RESUME"); 646 ASSERT(0); 647 } 648 IDN_GUNLOCK(); 649 650 /* 651 * RESUME DLPI services. 652 */ 653 sip->si_flags &= ~IDNSUSPENDED; 654 655 rw_enter(&idn.struprwlock, RW_READER); 656 for (stp = idn.strup; stp; stp = stp->ss_nextp) 657 if (stp->ss_sip == sip) { 658 doinit = 1; 659 break; 660 } 661 rw_exit(&idn.struprwlock); 662 if (doinit) 663 (void) idndl_init(sip); 664 665 return (DDI_SUCCESS); 666 667 case DDI_ATTACH: 668 break; 669 670 default: 671 return (DDI_FAILURE); 672 } 673 674 instance = ddi_get_instance(dip); 675 676 PR_DRV("%s: instance = %d\n", proc, instance); 677 678 if (idn_check_conf(dip, &bcpuid) == IDN_GPROPS_ERROR) 679 return (DDI_FAILURE); 680 681 mutex_enter(&idn.siplock); 682 683 if (ddi_create_minor_node(dip, IDNNAME, S_IFCHR, instance, 684 DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) { 685 mutex_exit(&idn.siplock); 686 return (DDI_FAILURE); 687 } 688 689 if (idn.smr.ready == 0) { 690 if (idn_init_smr() == 0) { 691 idn.enabled = 1; 692 #ifdef DEBUG 693 cmn_err(CE_NOTE, "!IDN: Driver enabled"); 694 #endif /* DEBUG */ 695 } else { 696 cmn_err(CE_NOTE, 697 "!IDN: 102: driver disabled " 698 "- check OBP environment " 699 "(idn-smr-size)"); 700 mutex_exit(&idn.siplock); 701 return (DDI_SUCCESS); 702 } 703 } 704 705 ASSERT(idn.smr.ready || idn.enabled); 706 707 if (idn.dip == NULL) { 708 doinit = 1; 709 710 if (idn_size_check()) { 711 idn_deinit_smr(); 712 ddi_remove_minor_node(dip, NULL); 713 mutex_exit(&idn.siplock); 714 return (DDI_FAILURE); 715 } 716 717 if (idn_init(dip)) { 718 idn_deinit_smr(); 719 ddi_remove_minor_node(dip, NULL); 720 mutex_exit(&idn.siplock); 721 return (DDI_FAILURE); 722 } 723 } 724 725 ASSERT(idn.dip); 726 727 /* 728 * This must occur _after_ idn_init() since 729 * it assumes idn_chanservers_init() has been 730 * called. 731 */ 732 idn_chanserver_bind(ddi_get_instance(dip), bcpuid); 733 734 /* 735 * DLPI supporting stuff. 736 */ 737 sip = GETSTRUCT(struct idn, 1); 738 sip->si_dip = dip; 739 ddi_set_driver_private(dip, sip); 740 sip->si_nextp = idn.sip; 741 idn.sip = sip; 742 IDN_SET_INST2SIP(instance, sip); 743 mutex_exit(&idn.siplock); 744 745 if (doinit) 746 idndl_dlpi_init(); /* initializes idninfoack */ 747 /* 748 * Get our local IDN ethernet address. 749 */ 750 idndl_localetheraddr(sip, &sip->si_ouraddr); 751 idndl_statinit(sip); 752 753 if (doinit) { 754 idn_gkstat_init(); 755 /* 756 * Add our sigblock SSP interrupt handler. 757 */ 758 mutex_enter(&idn.sigbintr.sb_mutex); 759 idn_sigbhandler_create(); 760 mutex_exit(&idn.sigbintr.sb_mutex); 761 762 if (sgnblk_poll_register(idn_sigbhandler) == 0) { 763 mutex_enter(&idn.sigbintr.sb_mutex); 764 idn_sigbhandler_kill(); 765 idn.sigbintr.sb_cpuid = (uchar_t)-1; 766 idn.sigbintr.sb_busy = IDNSIGB_INACTIVE; 767 mutex_exit(&idn.sigbintr.sb_mutex); 768 769 idn_gkstat_deinit(); 770 771 mutex_enter(&idn.siplock); 772 (void) idn_deinit(); 773 IDN_SET_INST2SIP(instance, NULL); 774 idn.sip = sip->si_nextp; 775 mutex_exit(&idn.siplock); 776 777 ddi_remove_minor_node(dip, NULL); 778 779 return (DDI_FAILURE); 780 } 781 /* 782 * We require sigblkp[cpu0] to be mapped for hardware 783 * configuration determination and also auto-linking 784 * on bootup. 785 */ 786 if (sgnblk_poll_reference(idn_sigb_setup, NULL) != 0) { 787 sgnblk_poll_unregister(idn_sigbhandler); 788 mutex_enter(&idn.sigbintr.sb_mutex); 789 idn_sigbhandler_kill(); 790 idn.sigbintr.sb_cpuid = (uchar_t)-1; 791 idn.sigbintr.sb_busy = IDNSIGB_INACTIVE; 792 mutex_exit(&idn.sigbintr.sb_mutex); 793 794 idn_gkstat_deinit(); 795 796 mutex_enter(&idn.siplock); 797 (void) idn_deinit(); 798 IDN_SET_INST2SIP(instance, NULL); 799 idn.sip = sip->si_nextp; 800 mutex_exit(&idn.siplock); 801 802 ddi_remove_minor_node(dip, NULL); 803 804 cmn_err(CE_WARN, 805 "IDN: 103: unable to reference sigblock area"); 806 807 return (DDI_FAILURE); 808 } 809 810 idn_init_autolink(); 811 } 812 813 ddi_report_dev(dip); 814 815 return (DDI_SUCCESS); 816 } 817 818 /* 819 * ---------------------------------------------- 820 */ 821 static int 822 idndetach(dev_info_t *dip, ddi_detach_cmd_t cmd) 823 { 824 int err = 0; 825 int instance; 826 struct idn *sip, *hsip, *tsip; 827 procname_t proc = "idndetach"; 828 829 sip = ddi_get_driver_private(dip); 830 instance = ddi_get_instance(dip); 831 832 switch (cmd) { 833 case DDI_SUSPEND: 834 if (sip == NULL) 835 return (DDI_FAILURE); 836 /* 837 * SUSPEND IDN services. 838 * - Actually don't suspend anything, we just 839 * make sure we're not connected per DR protocol. 840 * If we really wanted to suspend it should 841 * be done _after_ DLPI is suspended so that 842 * we're not competing with that traffic. 843 */ 844 IDN_GLOCK_SHARED(); 845 846 if (idn.state != IDNGS_OFFLINE) { 847 int d; 848 849 cmn_err(CE_WARN, 850 "IDN: 104: cannot suspend while active " 851 "(state = %s)", 852 idngs_str[idn.state]); 853 854 for (d = 0; d < MAX_DOMAINS; d++) { 855 idn_domain_t *dp; 856 857 dp = &idn_domain[d]; 858 if (dp->dcpu < 0) 859 continue; 860 861 cmn_err(CE_CONT, 862 "IDN: 121: domain %d (CPU %d, name " 863 "\"%s\", state %s)\n", 864 d, dp->dcpu, dp->dname, 865 idnds_str[dp->dstate]); 866 } 867 err = 1; 868 } 869 870 IDN_GUNLOCK(); 871 872 if (err) 873 return (DDI_FAILURE); 874 /* 875 * SUSPEND DLPI services. 876 */ 877 sip->si_flags |= IDNSUSPENDED; 878 879 idndl_uninit(sip); 880 881 return (DDI_FAILURE); 882 883 case DDI_DETACH: 884 if (idn.enabled == 0) { 885 ddi_remove_minor_node(dip, NULL); 886 ASSERT(idn.dip == NULL); 887 return (DDI_SUCCESS); 888 } 889 if (!IDN_MODUNLOADABLE) 890 return (DDI_FAILURE); 891 break; 892 893 default: 894 return (DDI_FAILURE); 895 } 896 897 PR_DRV("%s: instance = %d\n", proc, instance); 898 899 if (sip == NULL) { 900 /* 901 * No resources allocated. 902 */ 903 return (DDI_SUCCESS); 904 } 905 906 mutex_enter(&idn.siplock); 907 if (idn.sip && (idn.sip->si_nextp == NULL)) { 908 /* 909 * This is our last stream connection 910 * going away. Time to deinit and flag 911 * the SSP we're (IDN) DOWN. 912 */ 913 if (idn_deinit()) { 914 /* 915 * Must still be active. 916 */ 917 mutex_exit(&idn.siplock); 918 return (DDI_FAILURE); 919 } 920 idn_deinit_autolink(); 921 /* 922 * Remove our sigblock SSP interrupt handler. 923 */ 924 sgnblk_poll_unregister(idn_sigbhandler); 925 mutex_enter(&idn.sigbintr.sb_mutex); 926 idn_sigbhandler_kill(); 927 idn.sigbintr.sb_cpuid = (uchar_t)-1; 928 idn.sigbintr.sb_busy = IDNSIGB_NOTREADY; 929 mutex_exit(&idn.sigbintr.sb_mutex); 930 /* 931 * Remove our reference to the sigblock area. 932 */ 933 sgnblk_poll_unreference(idn_sigb_setup); 934 idn_gkstat_deinit(); 935 } 936 937 ddi_remove_minor_node(dip, NULL); 938 939 /* 940 * Remove this instance from our linked list. 941 */ 942 IDN_SET_INST2SIP(instance, NULL); 943 if ((hsip = tsip = idn.sip) == sip) { 944 idn.sip = sip->si_nextp; 945 } else { 946 for (; hsip && (sip != hsip); tsip = hsip, 947 hsip = hsip->si_nextp) 948 ; 949 if (hsip) 950 tsip->si_nextp = hsip->si_nextp; 951 } 952 mutex_exit(&idn.siplock); 953 if (sip->si_ksp) 954 kstat_delete(sip->si_ksp); 955 956 ddi_set_driver_private(dip, NULL); 957 958 FREESTRUCT(sip, struct idn, 1); 959 960 return (DDI_SUCCESS); 961 } 962 963 /* 964 * ---------------------------------------------- 965 */ 966 static idn_gprops_t 967 idn_check_conf(dev_info_t *dip, processorid_t *cpuid) 968 { 969 static idn_gprops_t global_props = IDN_GPROPS_UNCHECKED; 970 971 if (global_props == IDN_GPROPS_UNCHECKED) { 972 int p; 973 974 global_props = IDN_GPROPS_OKAY; 975 976 for (p = 0; idn_global_props[p].p_string; p++) { 977 char *str; 978 int *var; 979 int val, v_min, v_max, v_def; 980 981 str = idn_global_props[p].p_string; 982 var = (int *)idn_global_props[p].p_var; 983 v_min = idn_global_props[p].p_min; 984 v_max = idn_global_props[p].p_max; 985 v_def = idn_global_props[p].p_def; 986 ASSERT(str && var); 987 988 val = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 989 DDI_PROP_DONTPASS | 990 DDI_PROP_NOTPROM, 991 str, v_def); 992 if ((v_min != v_max) && 993 ((val < v_min) || (val > v_max))) { 994 cmn_err(CE_WARN, 995 "IDN: 105: driver parameter " 996 "(%s) specified (%d) out of " 997 "range [%d - %d]", 998 str, val, v_min, v_max); 999 global_props = IDN_GPROPS_ERROR; 1000 } else { 1001 *var = val; 1002 } 1003 } 1004 } 1005 1006 *cpuid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 1007 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, 1008 "bind_cpu", -1); 1009 1010 return (global_props); 1011 } 1012 1013 static int 1014 idn_size_check() 1015 { 1016 int i, cnt; 1017 int rv = 0; 1018 ulong_t mboxareasize; 1019 int max_num_slabs; 1020 procname_t proc = "idn_size_check"; 1021 1022 if (IDN_NWR_SIZE == 0) 1023 IDN_NWR_SIZE = IDN_SMR_SIZE; 1024 1025 if (IDN_NWR_SIZE > IDN_SMR_SIZE) { 1026 cmn_err(CE_WARN, 1027 "IDN: 106: idn_nwr_size(%d) > idn_smr_size(%d)" 1028 " - Limiting to %d MB", 1029 IDN_NWR_SIZE, IDN_SMR_SIZE, IDN_SMR_SIZE); 1030 IDN_NWR_SIZE = IDN_SMR_SIZE; 1031 } 1032 1033 if (MB2B(IDN_NWR_SIZE) < IDN_SLAB_SIZE) { 1034 cmn_err(CE_WARN, 1035 "IDN: 107: memory region(%lu) < slab size(%u)", 1036 MB2B(IDN_NWR_SIZE), IDN_SLAB_SIZE); 1037 rv = -1; 1038 } 1039 1040 if (IDN_LOWAT >= IDN_HIWAT) { 1041 cmn_err(CE_WARN, 1042 "IDN: 108: idn_lowat(%d) >= idn_hiwat(%d)", 1043 IDN_LOWAT, IDN_HIWAT); 1044 rv = -1; 1045 } 1046 1047 mboxareasize = (ulong_t)(IDN_MBOXAREA_SIZE + (IDN_SMR_BUFSIZE - 1)); 1048 mboxareasize &= ~((ulong_t)IDN_SMR_BUFSIZE - 1); 1049 #ifdef DEBUG 1050 if ((ulong_t)IDN_SLAB_SIZE < mboxareasize) { 1051 PR_DRV("%s: slab size(%ld) < mailbox area(%ld)", 1052 proc, IDN_SLAB_SIZE, mboxareasize); 1053 /* not fatal */ 1054 } 1055 #endif /* DEBUG */ 1056 1057 if ((mboxareasize + (ulong_t)IDN_SLAB_SIZE) > MB2B(IDN_NWR_SIZE)) { 1058 cmn_err(CE_WARN, 1059 "IDN: 109: mailbox area(%lu) + slab size(%u) " 1060 "> nwr region(%lu)", 1061 mboxareasize, IDN_SLAB_SIZE, 1062 MB2B(IDN_NWR_SIZE)); 1063 rv = -1; 1064 } 1065 1066 max_num_slabs = (int)((MB2B(IDN_NWR_SIZE) - mboxareasize) / 1067 (ulong_t)IDN_SLAB_SIZE); 1068 if (max_num_slabs < IDN_SLAB_MINTOTAL) { 1069 cmn_err(CE_WARN, 1070 "IDN: 110: maximum number of slabs(%d) < " 1071 "minimum required(%d)", 1072 max_num_slabs, IDN_SLAB_MINTOTAL); 1073 rv = -1; 1074 } else { 1075 IDN_SLAB_MAXPERDOMAIN = max_num_slabs / IDN_SLAB_MINTOTAL; 1076 } 1077 1078 #if 0 1079 if ((IDN_MTU + sizeof (struct ether_header)) > IDN_DATA_SIZE) { 1080 cmn_err(CE_WARN, 1081 "IDN: (IDN_MTU(%d) + ether_header(%d)) " 1082 "> IDN_DATA_SIZE(%lu)", 1083 IDN_MTU, sizeof (struct ether_header), 1084 IDN_DATA_SIZE); 1085 rv = -1; 1086 } 1087 #endif /* 0 */ 1088 1089 if (IDN_SMR_BUFSIZE & (IDN_ALIGNSIZE - 1)) { 1090 cmn_err(CE_WARN, 1091 "IDN: 111: idn_smr_bufsize(%d) not on a " 1092 "64 byte boundary", IDN_SMR_BUFSIZE); 1093 rv = -1; 1094 } 1095 1096 for (i = cnt = 0; 1097 (cnt <= 1) && (((ulong_t)1 << i) < MB2B(IDN_NWR_SIZE)); 1098 i++) 1099 if ((1 << i) & IDN_SMR_BUFSIZE) 1100 cnt++; 1101 if ((i > 0) && (!cnt || (cnt > 1))) { 1102 cmn_err(CE_WARN, 1103 "IDN: 112: idn_smr_bufsize(%d) not a power of 2", 1104 IDN_SMR_BUFSIZE); 1105 rv = -1; 1106 } 1107 1108 if ((IDN_MBOX_PER_NET & 1) == 0) { 1109 cmn_err(CE_WARN, 1110 "IDN: 113: idn_mbox_per_net(%d) must be an " 1111 "odd number", IDN_MBOX_PER_NET); 1112 rv = -1; 1113 } 1114 1115 if (idn.nchannels > 0) 1116 IDN_WINDOW_EMAX = IDN_WINDOW_MAX + 1117 ((idn.nchannels - 1) * IDN_WINDOW_INCR); 1118 1119 if (IDN_NETSVR_WAIT_MIN > IDN_NETSVR_WAIT_MAX) { 1120 cmn_err(CE_WARN, 1121 "IDN: 115: idn_netsvr_wait_min(%d) cannot be " 1122 "greater than idn_netsvr_wait_max(%d)", 1123 IDN_NETSVR_WAIT_MIN, 1124 IDN_NETSVR_WAIT_MAX); 1125 rv = -1; 1126 } 1127 1128 return (rv); 1129 } 1130 1131 static int 1132 idn_init_smr() 1133 { 1134 uint64_t obp_paddr; 1135 uint64_t obp_size; /* in Bytes */ 1136 uint_t smr_size; /* in MBytes */ 1137 pgcnt_t npages; 1138 procname_t proc = "idn_init_smr"; 1139 1140 if (idn.smr.ready) 1141 return (0); 1142 1143 if (idn_prom_getsmr(&smr_size, &obp_paddr, &obp_size) < 0) 1144 return (-1); 1145 1146 PR_PROTO("%s: smr_size = %d, obp_paddr = 0x%llx, obp_size = 0x%llx\n", 1147 proc, smr_size, obp_paddr, obp_size); 1148 1149 if (IDN_SMR_SIZE) 1150 smr_size = MIN(smr_size, IDN_SMR_SIZE); 1151 1152 npages = btopr(MB2B(smr_size)); 1153 1154 idn.smr.prom_paddr = obp_paddr; 1155 idn.smr.prom_size = obp_size; 1156 idn.smr.vaddr = vmem_alloc(heap_arena, ptob(npages), VM_SLEEP); 1157 ASSERT(((ulong_t)idn.smr.vaddr & MMU_PAGEOFFSET) == 0); 1158 idn.smr.locpfn = (pfn_t)(obp_paddr >> MMU_PAGESHIFT); 1159 idn.smr.rempfn = idn.smr.rempfnlim = PFN_INVALID; 1160 IDN_SMR_SIZE = smr_size; 1161 1162 PR_PROTO("%s: smr vaddr = %p\n", proc, idn.smr.vaddr); 1163 1164 smr_remap(&kas, idn.smr.vaddr, idn.smr.locpfn, IDN_SMR_SIZE); 1165 1166 idn.localid = PADDR_TO_DOMAINID(obp_paddr); 1167 1168 idn.smr.ready = 1; 1169 1170 return (0); 1171 } 1172 1173 static void 1174 idn_deinit_smr() 1175 { 1176 pgcnt_t npages; 1177 1178 if (idn.smr.ready == 0) 1179 return; 1180 1181 smr_remap(&kas, idn.smr.vaddr, PFN_INVALID, IDN_SMR_SIZE); 1182 1183 npages = btopr(MB2B(IDN_SMR_SIZE)); 1184 1185 vmem_free(heap_arena, idn.smr.vaddr, ptob(npages)); 1186 1187 idn.localid = IDN_NIL_DOMID; 1188 1189 IDN_SMR_SIZE = 0; 1190 1191 idn.smr.ready = 0; 1192 } 1193 1194 /*ARGSUSED1*/ 1195 static void 1196 idn_sigb_setup(cpu_sgnblk_t *sigbp, void *arg) 1197 { 1198 procname_t proc = "idn_sigb_setup"; 1199 1200 PR_PROTO("%s: Setting sigb to %p\n", proc, sigbp); 1201 1202 mutex_enter(&idn.idnsb_mutex); 1203 if (sigbp == NULL) { 1204 idn.idnsb = NULL; 1205 idn.idnsb_eventp = NULL; 1206 mutex_exit(&idn.idnsb_mutex); 1207 return; 1208 } 1209 idn.idnsb_eventp = (idnsb_event_t *)sigbp->sigb_idn; 1210 idn.idnsb = (idnsb_t *)&idn.idnsb_eventp->idn_reserved1; 1211 mutex_exit(&idn.idnsb_mutex); 1212 } 1213 1214 static int 1215 idn_init(dev_info_t *dip) 1216 { 1217 struct hwconfig local_hw; 1218 procname_t proc = "idn_init"; 1219 1220 1221 ASSERT(MUTEX_HELD(&idn.siplock)); 1222 1223 if (!idn.enabled) { 1224 cmn_err(CE_WARN, 1225 "IDN: 117: IDN not enabled"); 1226 return (-1); 1227 } 1228 1229 if (idn.dip != NULL) { 1230 PR_DRV("%s: already initialized (dip = 0x%x)\n", 1231 proc, (uint_t)idn.dip); 1232 return (0); 1233 } 1234 1235 /* 1236 * Determine our local domain's hardware configuration. 1237 */ 1238 if (get_hw_config(&local_hw)) { 1239 cmn_err(CE_WARN, 1240 "IDN: 118: hardware config not appropriate"); 1241 return (-1); 1242 } 1243 1244 PR_DRV("%s: locpfn = 0x%lx\n", proc, idn.smr.locpfn); 1245 PR_DRV("%s: rempfn = 0x%lx\n", proc, idn.smr.rempfn); 1246 PR_DRV("%s: smrsize = %d MB\n", proc, IDN_SMR_SIZE); 1247 1248 rw_init(&idn.grwlock, NULL, RW_DEFAULT, NULL); 1249 rw_init(&idn.struprwlock, NULL, RW_DEFAULT, NULL); 1250 mutex_init(&idn.sync.sz_mutex, NULL, MUTEX_DEFAULT, NULL); 1251 mutex_init(&idn.sipwenlock, NULL, MUTEX_DEFAULT, NULL); 1252 1253 /* 1254 * Calculate proper value for idn.bframe_shift. 1255 * Kind of hokey as it assume knowledge of the format 1256 * of the idnparam_t structure. 1257 */ 1258 { 1259 int s; 1260 1261 for (s = 0; (1 << s) < IDN_SMR_BUFSIZE_MIN; s++) 1262 ; 1263 idn.bframe_shift = s; 1264 PR_DRV("%s: idn.bframe_shift = %d, minbuf = %ld\n", 1265 proc, idn.bframe_shift, IDN_SMR_BUFSIZE_MIN); 1266 1267 ASSERT((uint_t)IDN_OFFSET2BFRAME(MB2B(idn_smr_size)) < 1268 (1 << 24)); 1269 } 1270 1271 idn_xmit_monitor_init(); 1272 1273 /* 1274 * Initialize the domain op (dopers) stuff. 1275 */ 1276 idn_dopers_init(); 1277 1278 /* 1279 * Initialize the timer (kmem) cache used for timeout 1280 * structures. 1281 */ 1282 idn_timercache_init(); 1283 1284 /* 1285 * Initialize the slab waiting areas. 1286 */ 1287 (void) smr_slabwaiter_init(); 1288 1289 /* 1290 * Initialize retryjob kmem cache. 1291 */ 1292 idn_retrytask_init(); 1293 1294 idn_init_msg_waittime(); 1295 idn_init_msg_retrytime(); 1296 1297 /* 1298 * Initialize idn_domains[] and local domains information 1299 * include idn_global information. 1300 */ 1301 idn_domains_init(&local_hw); 1302 1303 /* 1304 * Start up IDN protocol servers. 1305 */ 1306 if (idn_protocol_init(idn_protocol_nservers) <= 0) { 1307 cmn_err(CE_WARN, 1308 "IDN: 119: failed to initialize %d protocol servers", 1309 idn_protocol_nservers); 1310 idn_domains_deinit(); 1311 idn_retrytask_deinit(); 1312 smr_slabwaiter_deinit(); 1313 idn_timercache_deinit(); 1314 idn_dopers_deinit(); 1315 idn_xmit_monitor_deinit(); 1316 mutex_destroy(&idn.sipwenlock); 1317 mutex_destroy(&idn.sync.sz_mutex); 1318 rw_destroy(&idn.grwlock); 1319 rw_destroy(&idn.struprwlock); 1320 return (-1); 1321 } 1322 1323 /* 1324 * Initialize chan_servers array. 1325 */ 1326 (void) idn_chanservers_init(); 1327 1328 /* 1329 * Need to register the IDN handler with the DMV subsystem. 1330 * 1331 * Need to prevent the IDN driver from being unloaded 1332 * once loaded since DMV's may come in at any time. 1333 * If the driver is not loaded and the idn_dmv_handler 1334 * has been registered with the DMV, system will crash. 1335 */ 1336 (void) idn_init_handler(); 1337 1338 idn.dip = dip; 1339 IDN_GLOCK_EXCL(); 1340 IDN_GSTATE_TRANSITION(IDNGS_OFFLINE); 1341 IDN_GUNLOCK(); 1342 1343 return (0); 1344 } 1345 1346 static int 1347 idn_deinit() 1348 { 1349 procname_t proc = "idn_deinit"; 1350 1351 ASSERT(MUTEX_HELD(&idn.siplock)); 1352 1353 IDN_GLOCK_EXCL(); 1354 1355 if (idn.state != IDNGS_OFFLINE) { 1356 int d; 1357 1358 cmn_err(CE_WARN, 1359 "IDN: 120: cannot deinit while active " 1360 "(state = %s)", idngs_str[idn.state]); 1361 1362 for (d = 0; d < MAX_DOMAINS; d++) { 1363 idn_domain_t *dp; 1364 1365 dp = &idn_domain[d]; 1366 if (dp->dcpu < 0) 1367 continue; 1368 1369 cmn_err(CE_CONT, 1370 "IDN: 121: domain %d (CPU %d, " 1371 "name \"%s\", state %s)\n", 1372 d, dp->dcpu, dp->dname, 1373 idnds_str[dp->dstate]); 1374 } 1375 IDN_GUNLOCK(); 1376 return (-1); 1377 } 1378 1379 if (idn.dip == NULL) { 1380 PR_DRV("%s: already deinitialized\n", proc); 1381 IDN_GUNLOCK(); 1382 return (0); 1383 } 1384 1385 IDN_GSTATE_TRANSITION(IDNGS_IGNORE); 1386 1387 IDN_GUNLOCK(); 1388 1389 idn_xmit_monitor_deinit(); 1390 1391 idn_deinit_handler(); 1392 1393 idn_chanservers_deinit(); 1394 1395 idn.nchannels = 0; 1396 ASSERT(idn.chan_servers == NULL); 1397 1398 smr_slabpool_deinit(); 1399 1400 idn_protocol_deinit(); 1401 1402 idn_domains_deinit(); 1403 1404 smr_slabwaiter_deinit(); 1405 1406 idn_retrytask_deinit(); 1407 1408 idn_timercache_deinit(); 1409 1410 idn_dopers_deinit(); 1411 1412 ASSERT(idn.localid == IDN_NIL_DOMID); 1413 1414 IDN_SET_MASTERID(IDN_NIL_DOMID); 1415 1416 idn_deinit_smr(); 1417 1418 mutex_destroy(&idn.sipwenlock); 1419 mutex_destroy(&idn.sync.sz_mutex); 1420 rw_destroy(&idn.grwlock); 1421 rw_destroy(&idn.struprwlock); 1422 1423 idn.dip = NULL; 1424 1425 return (0); 1426 } 1427 1428 static void 1429 idn_xmit_monitor_init() 1430 { 1431 mutex_init(&idn.xmit_lock, NULL, MUTEX_DEFAULT, NULL); 1432 idn.xmit_tid = (timeout_id_t)NULL; 1433 CHANSET_ZERO(idn.xmit_chanset_wanted); 1434 } 1435 1436 static void 1437 idn_xmit_monitor_deinit() 1438 { 1439 timeout_id_t tid; 1440 1441 mutex_enter(&idn.xmit_lock); 1442 CHANSET_ZERO(idn.xmit_chanset_wanted); 1443 if ((tid = idn.xmit_tid) != (timeout_id_t)NULL) { 1444 idn.xmit_tid = (timeout_id_t)NULL; 1445 mutex_exit(&idn.xmit_lock); 1446 (void) untimeout(tid); 1447 } else { 1448 mutex_exit(&idn.xmit_lock); 1449 } 1450 mutex_destroy(&idn.xmit_lock); 1451 } 1452 1453 static void 1454 idn_init_msg_waittime() 1455 { 1456 idn_msg_waittime[IDNP_NULL] = -1; 1457 idn_msg_waittime[IDNP_NEGO] = idn_msgwait_nego * hz; 1458 idn_msg_waittime[IDNP_CFG] = idn_msgwait_cfg * hz; 1459 idn_msg_waittime[IDNP_CON] = idn_msgwait_con * hz; 1460 idn_msg_waittime[IDNP_FIN] = idn_msgwait_fin * hz; 1461 idn_msg_waittime[IDNP_CMD] = idn_msgwait_cmd * hz; 1462 idn_msg_waittime[IDNP_DATA] = idn_msgwait_data * hz; 1463 } 1464 1465 static void 1466 idn_init_msg_retrytime() 1467 { 1468 idn_msg_retrytime[(int)IDNRETRY_NIL] = -1; 1469 idn_msg_retrytime[(int)IDNRETRY_NEGO] = idn_retryfreq_nego * hz; 1470 idn_msg_retrytime[(int)IDNRETRY_CON] = idn_retryfreq_con * hz; 1471 idn_msg_retrytime[(int)IDNRETRY_CONQ] = idn_retryfreq_con * hz; 1472 idn_msg_retrytime[(int)IDNRETRY_FIN] = idn_retryfreq_fin * hz; 1473 idn_msg_retrytime[(int)IDNRETRY_FINQ] = idn_retryfreq_fin * hz; 1474 } 1475 1476 /* 1477 * ---------------------------------------------- 1478 */ 1479 /*ARGSUSED*/ 1480 static int 1481 idnopen(register queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *crp) 1482 { 1483 register int err = 0; 1484 int minordev; 1485 struct idnstr *stp, **pstp; 1486 procname_t proc = "idnopen"; 1487 1488 ASSERT(sflag != MODOPEN); 1489 1490 IDN_GLOCK_EXCL(); 1491 1492 rw_enter(&idn.struprwlock, RW_WRITER); 1493 mutex_enter(&idn.sipwenlock); 1494 pstp = &idn.strup; 1495 1496 if (idn.enabled == 0) { 1497 PR_DRV("%s: Driver disabled (check OBP:idn-smr-size)\n", 1498 proc); 1499 mutex_exit(&idn.sipwenlock); 1500 rw_exit(&idn.struprwlock); 1501 IDN_GUNLOCK(); 1502 return (EACCES); 1503 } 1504 1505 if (!idn_ndlist && 1506 idn_param_register(idn_param_arr, A_CNT(idn_param_arr))) { 1507 PR_DRV("%s: failed to register ndd parameters\n", proc); 1508 mutex_exit(&idn.sipwenlock); 1509 rw_exit(&idn.struprwlock); 1510 IDN_GUNLOCK(); 1511 return (ENOMEM); 1512 } 1513 IDN_GUNLOCK(); 1514 1515 if (sflag == CLONEOPEN) { 1516 minordev = 0; 1517 for (stp = *pstp; stp; pstp = &stp->ss_nextp, stp = *pstp) { 1518 if (minordev < stp->ss_minor) 1519 break; 1520 minordev++; 1521 } 1522 *devp = makedevice(getmajor(*devp), minordev); 1523 } else { 1524 minordev = getminor(*devp); 1525 } 1526 if (rq->q_ptr) 1527 goto done; 1528 1529 stp = GETSTRUCT(struct idnstr, 1); 1530 stp->ss_rq = rq; 1531 stp->ss_minor = minordev; 1532 rw_init(&stp->ss_rwlock, NULL, RW_DEFAULT, NULL); 1533 /* 1534 * DLPI stuff 1535 */ 1536 stp->ss_sip = NULL; 1537 stp->ss_state = DL_UNATTACHED; 1538 stp->ss_sap = 0; 1539 stp->ss_flags = 0; 1540 stp->ss_mccount = 0; 1541 stp->ss_mctab = NULL; 1542 1543 /* 1544 * Link new entry into list of actives. 1545 */ 1546 stp->ss_nextp = *pstp; 1547 *pstp = stp; 1548 1549 WR(rq)->q_ptr = rq->q_ptr = (void *)stp; 1550 /* 1551 * Disable automatic enabling of our write service 1552 * procedure. We control this explicitly. 1553 */ 1554 noenable(WR(rq)); 1555 1556 /* 1557 * Set our STREAMs queue maximum packet size that 1558 * we'll accept and our high/low water marks. 1559 */ 1560 (void) strqset(WR(rq), QMAXPSZ, 0, IDN_DATA_SIZE); 1561 (void) strqset(WR(rq), QLOWAT, 0, IDN_LOWAT); 1562 (void) strqset(WR(rq), QHIWAT, 0, IDN_HIWAT); 1563 (void) strqset(rq, QMAXPSZ, 0, IDN_DATA_SIZE); 1564 (void) strqset(rq, QLOWAT, 0, IDN_LOWAT); 1565 (void) strqset(rq, QHIWAT, 0, IDN_HIWAT); 1566 1567 done: 1568 mutex_exit(&idn.sipwenlock); 1569 rw_exit(&idn.struprwlock); 1570 1571 (void) qassociate(rq, -1); 1572 qprocson(rq); 1573 1574 return (err); 1575 } 1576 1577 /* 1578 * ---------------------------------------------- 1579 */ 1580 /*ARGSUSED1*/ 1581 static int 1582 idnclose(queue_t *rq, int flag, cred_t *crp) 1583 { 1584 struct idnstr *stp, **pstp; 1585 1586 ASSERT(rq->q_ptr); 1587 1588 qprocsoff(rq); 1589 /* 1590 * Guaranteed to be single threaded with respect 1591 * to this stream at this point. 1592 */ 1593 1594 stp = (struct idnstr *)rq->q_ptr; 1595 1596 if (stp->ss_sip) 1597 idndl_dodetach(stp); 1598 1599 rw_enter(&idn.struprwlock, RW_WRITER); 1600 mutex_enter(&idn.sipwenlock); 1601 pstp = &idn.strup; 1602 for (stp = *pstp; stp; pstp = &stp->ss_nextp, stp = *pstp) 1603 if (stp == (struct idnstr *)rq->q_ptr) 1604 break; 1605 ASSERT(stp); 1606 ASSERT(stp->ss_rq == rq); 1607 *pstp = stp->ss_nextp; 1608 1609 rw_destroy(&stp->ss_rwlock); 1610 FREESTRUCT(stp, struct idnstr, 1); 1611 1612 WR(rq)->q_ptr = rq->q_ptr = NULL; 1613 mutex_exit(&idn.sipwenlock); 1614 rw_exit(&idn.struprwlock); 1615 1616 idn_param_cleanup(); 1617 (void) qassociate(rq, -1); 1618 1619 return (0); 1620 } 1621 1622 /* 1623 * ---------------------------------------------- 1624 */ 1625 static int 1626 idnwput(register queue_t *wq, register mblk_t *mp) 1627 { 1628 register struct idnstr *stp; 1629 struct idn *sip; 1630 procname_t proc = "idnwput"; 1631 1632 stp = (struct idnstr *)wq->q_ptr; 1633 sip = stp->ss_sip; 1634 1635 switch (DB_TYPE(mp)) { 1636 case M_IOCTL: 1637 idnioctl(wq, mp); 1638 break; 1639 1640 case M_DATA: 1641 if (((stp->ss_flags & (IDNSFAST|IDNSRAW)) == 0) || 1642 (stp->ss_state != DL_IDLE) || 1643 (sip == NULL)) { 1644 PR_DLPI("%s: fl=0x%lx, st=0x%lx, ret(EPROTO)\n", 1645 proc, stp->ss_flags, stp->ss_state); 1646 merror(wq, mp, EPROTO); 1647 1648 } else if (wq->q_first) { 1649 if (putq(wq, mp) == 0) 1650 freemsg(mp); 1651 /* 1652 * We're only holding the reader lock, 1653 * but that's okay since this field 1654 * is just a soft-flag. 1655 */ 1656 sip->si_wantw = 1; 1657 qenable(wq); 1658 1659 } else if (sip->si_flags & IDNPROMISC) { 1660 if (putq(wq, mp) == 0) { 1661 PR_DLPI("%s: putq failed\n", proc); 1662 freemsg(mp); 1663 } else { 1664 PR_DLPI("%s: putq succeeded\n", proc); 1665 } 1666 qenable(wq); 1667 1668 } else { 1669 PR_DLPI("%s: idndl_start(sip=0x%x)\n", 1670 proc, (uint_t)sip); 1671 rw_enter(&stp->ss_rwlock, RW_READER); 1672 (void) idndl_start(wq, mp, sip); 1673 rw_exit(&stp->ss_rwlock); 1674 } 1675 break; 1676 1677 case M_PROTO: 1678 case M_PCPROTO: 1679 /* 1680 * Break the association between the current thread 1681 * and the thread that calls idndl_proto() to resolve 1682 * the problem of idn_chan_server() threads which 1683 * loop back around to call idndl_proto and try to 1684 * recursively acquire internal locks. 1685 */ 1686 if (putq(wq, mp) == 0) 1687 freemsg(mp); 1688 qenable(wq); 1689 break; 1690 1691 case M_FLUSH: 1692 PR_STR("%s: M_FLUSH request (flush = %d)\n", 1693 proc, (int)*mp->b_rptr); 1694 if (*mp->b_rptr & FLUSHW) { 1695 flushq(wq, FLUSHALL); 1696 *mp->b_rptr &= ~FLUSHW; 1697 } 1698 if (*mp->b_rptr & FLUSHR) 1699 qreply(wq, mp); 1700 else 1701 freemsg(mp); 1702 break; 1703 1704 default: 1705 PR_STR("%s: unexpected DB_TYPE 0x%x\n", 1706 proc, DB_TYPE(mp)); 1707 freemsg(mp); 1708 break; 1709 } 1710 1711 return (0); 1712 } 1713 1714 /* 1715 * ---------------------------------------------- 1716 */ 1717 static int 1718 idnwsrv(queue_t *wq) 1719 { 1720 mblk_t *mp; 1721 int err = 0; 1722 struct idnstr *stp; 1723 struct idn *sip; 1724 procname_t proc = "idnwsrv"; 1725 1726 stp = (struct idnstr *)wq->q_ptr; 1727 sip = stp->ss_sip; 1728 1729 while (mp = getq(wq)) { 1730 switch (DB_TYPE(mp)) { 1731 case M_DATA: 1732 if (sip) { 1733 PR_DLPI("%s: idndl_start(sip=0x%x)\n", 1734 proc, (uint_t)sip); 1735 rw_enter(&stp->ss_rwlock, RW_READER); 1736 err = idndl_start(wq, mp, sip); 1737 rw_exit(&stp->ss_rwlock); 1738 if (err) 1739 goto done; 1740 } else { 1741 PR_DLPI("%s: NO sip to start msg\n", proc); 1742 freemsg(mp); 1743 } 1744 break; 1745 1746 case M_PROTO: 1747 case M_PCPROTO: 1748 idndl_proto(wq, mp); 1749 break; 1750 1751 default: 1752 ASSERT(0); 1753 PR_STR("%s: unexpected db_type (%d)\n", 1754 proc, DB_TYPE(mp)); 1755 freemsg(mp); 1756 break; 1757 } 1758 } 1759 done: 1760 return (0); 1761 } 1762 1763 /* 1764 * ---------------------------------------------- 1765 */ 1766 static int 1767 idnrput(register queue_t *rq, register mblk_t *mp) 1768 { 1769 register int err = 0; 1770 procname_t proc = "idnrput"; 1771 1772 switch (DB_TYPE(mp)) { 1773 case M_DATA: 1774 /* 1775 * Should not reach here with data packets 1776 * if running DLPI. 1777 */ 1778 cmn_err(CE_WARN, 1779 "IDN: 123: unexpected M_DATA packets for " 1780 "q_stream 0x%x", (uint_t)rq->q_stream); 1781 freemsg(mp); 1782 err = ENXIO; 1783 break; 1784 1785 case M_FLUSH: 1786 PR_STR("%s: M_FLUSH request (flush = %d)\n", 1787 proc, (int)*mp->b_rptr); 1788 if (*mp->b_rptr & FLUSHR) 1789 flushq(rq, FLUSHALL); 1790 (void) putnext(rq, mp); 1791 break; 1792 1793 case M_ERROR: 1794 PR_STR("%s: M_ERROR (error = %d) coming through\n", 1795 proc, (int)*mp->b_rptr); 1796 (void) putnext(rq, mp); 1797 break; 1798 default: 1799 PR_STR("%s: unexpected DB_TYPE 0x%x\n", 1800 proc, DB_TYPE(mp)); 1801 freemsg(mp); 1802 err = ENXIO; 1803 break; 1804 } 1805 1806 return (err); 1807 } 1808 1809 /* 1810 * ---------------------------------------------- 1811 * Not allowed to enqueue messages! Only M_DATA messages 1812 * can be enqueued on the write stream. 1813 * ---------------------------------------------- 1814 */ 1815 static void 1816 idnioctl(register queue_t *wq, register mblk_t *mp) 1817 { 1818 register struct iocblk *iocp; 1819 register int cmd; 1820 idnop_t *idnop = NULL; 1821 int error = 0; 1822 int argsize; 1823 procname_t proc = "idnioctl"; 1824 1825 iocp = (struct iocblk *)mp->b_rptr; 1826 cmd = iocp->ioc_cmd; 1827 1828 /* 1829 * Intercept DLPI ioctl's. 1830 */ 1831 if (VALID_DLPIOP(cmd)) { 1832 PR_STR("%s: DLPI ioctl(%d)\n", proc, cmd); 1833 error = idnioc_dlpi(wq, mp, &argsize); 1834 goto done; 1835 } 1836 1837 /* 1838 * Validate expected arguments. 1839 */ 1840 if (!VALID_IDNIOCTL(cmd)) { 1841 PR_STR("%s: invalid cmd (0x%x)\n", proc, cmd); 1842 error = EINVAL; 1843 goto done; 1844 1845 } else if (!VALID_NDOP(cmd)) { 1846 error = miocpullup(mp, sizeof (idnop_t)); 1847 if (error != 0) { 1848 PR_STR("%s: idnioc(cmd = 0x%x) miocpullup " 1849 "failed (%d)\n", proc, cmd, error); 1850 goto done; 1851 } 1852 } 1853 1854 argsize = mp->b_cont->b_wptr - mp->b_cont->b_rptr; 1855 idnop = (idnop_t *)mp->b_cont->b_rptr; 1856 1857 switch (cmd) { 1858 case IDNIOC_LINK: 1859 error = idnioc_link(idnop); 1860 break; 1861 1862 case IDNIOC_UNLINK: 1863 error = idnioc_unlink(idnop); 1864 break; 1865 1866 case IDNIOC_MEM_RW: 1867 error = idn_rw_mem(idnop); 1868 break; 1869 1870 case IDNIOC_PING: 1871 error = idn_send_ping(idnop); 1872 break; 1873 1874 case ND_SET: 1875 IDN_GLOCK_EXCL(); 1876 if (!nd_getset(wq, idn_ndlist, mp)) { 1877 IDN_GUNLOCK(); 1878 error = ENOENT; 1879 break; 1880 } 1881 IDN_GUNLOCK(); 1882 qreply(wq, mp); 1883 return; 1884 1885 case ND_GET: 1886 IDN_GLOCK_SHARED(); 1887 if (!nd_getset(wq, idn_ndlist, mp)) { 1888 IDN_GUNLOCK(); 1889 error = ENOENT; 1890 break; 1891 } 1892 IDN_GUNLOCK(); 1893 qreply(wq, mp); 1894 return; 1895 1896 default: 1897 PR_STR("%s: invalid cmd 0x%x\n", proc, cmd); 1898 error = EINVAL; 1899 break; 1900 } 1901 1902 done: 1903 if (error == 0) 1904 miocack(wq, mp, argsize, 0); 1905 else 1906 miocnak(wq, mp, 0, error); 1907 } 1908 1909 /* 1910 * This thread actually services the SSI_LINK/UNLINK calls 1911 * asynchronously that come via BBSRAM. This is necessary 1912 * since we can't process them from within the context of 1913 * the interrupt handler in which idn_sigbhandler() is 1914 * called. 1915 */ 1916 static void 1917 idn_sigbhandler_thread(struct sigbintr **sbpp) 1918 { 1919 int d, pri, rv; 1920 struct sigbintr *sbp; 1921 sigbmbox_t *mbp; 1922 idn_fin_t fintype; 1923 idnsb_data_t *sdp; 1924 idnsb_info_t *sfp; 1925 idnsb_error_t *sep; 1926 idn_domain_t *dp; 1927 procname_t proc = "idn_sigbhandler_thread"; 1928 1929 1930 sbp = *sbpp; 1931 1932 PR_PROTO("%s: KICKED OFF (sigbintr pointer = 0x%x)\n", 1933 proc, (uint_t)sbp); 1934 1935 ASSERT(sbp == &idn.sigbintr); 1936 1937 mutex_enter(&idn.sigbintr.sb_mutex); 1938 1939 while (sbp->sb_busy != IDNSIGB_DIE) { 1940 cpu_sgnblk_t *sigbp; 1941 1942 while ((sbp->sb_busy != IDNSIGB_ACTIVE) && 1943 (sbp->sb_busy != IDNSIGB_DIE)) { 1944 cv_wait(&sbp->sb_cv, &idn.sigbintr.sb_mutex); 1945 PR_PROTO("%s: AWAKENED (busy = %d)\n", 1946 proc, (int)sbp->sb_busy); 1947 } 1948 if (sbp->sb_busy == IDNSIGB_DIE) { 1949 PR_PROTO("%s: DIE REQUESTED\n", proc); 1950 break; 1951 } 1952 1953 if ((sigbp = cpu_sgnblkp[sbp->sb_cpuid]) == NULL) { 1954 cmn_err(CE_WARN, 1955 "IDN: 124: sigblk for CPU ID %d " 1956 "is NULL", sbp->sb_cpuid); 1957 sbp->sb_busy = IDNSIGB_INACTIVE; 1958 continue; 1959 } 1960 1961 mbp = &sigbp->sigb_host_mbox; 1962 1963 if (mbp->flag != SIGB_MBOX_BUSY) { 1964 PR_PROTO("%s: sigblk mbox flag (%d) != BUSY (%d)\n", 1965 proc, mbp->flag, SIGB_MBOX_BUSY); 1966 sbp->sb_busy = IDNSIGB_INACTIVE; 1967 continue; 1968 } 1969 /* 1970 * The sb_busy bit is set and the mailbox flag 1971 * indicates BUSY also, so we effectively have things locked. 1972 * So, we can drop the critical sb_mutex which we want to 1973 * do since it pushes us to PIL 14 while we hold it and we 1974 * don't want to run at PIL 14 across IDN code. 1975 */ 1976 mutex_exit(&idn.sigbintr.sb_mutex); 1977 1978 sdp = (idnsb_data_t *)mbp->data; 1979 sep = (idnsb_error_t *)&sdp->ssb_error; 1980 INIT_IDNKERR(sep); 1981 1982 if (mbp->len != sizeof (idnsb_data_t)) { 1983 PR_PROTO("%s: sigblk mbox length (%d) != " 1984 "expected (%d)\n", proc, mbp->len, 1985 sizeof (idnsb_data_t)); 1986 SET_IDNKERR_ERRNO(sep, EINVAL); 1987 SET_IDNKERR_IDNERR(sep, IDNKERR_DATA_LEN); 1988 SET_IDNKERR_PARAM0(sep, sizeof (idnsb_data_t)); 1989 1990 goto sberr; 1991 1992 } 1993 if (idn.enabled == 0) { 1994 #ifdef DEBUG 1995 cmn_err(CE_NOTE, 1996 "IDN: 102: driver disabled " 1997 "- check OBP environment " 1998 "(idn-smr-size)"); 1999 #else /* DEBUG */ 2000 cmn_err(CE_NOTE, 2001 "!IDN: 102: driver disabled " 2002 "- check OBP environment " 2003 "(idn-smr-size)"); 2004 #endif /* DEBUG */ 2005 SET_IDNKERR_ERRNO(sep, EACCES); 2006 SET_IDNKERR_IDNERR(sep, IDNKERR_DRV_DISABLED); 2007 2008 goto sberr; 2009 2010 } 2011 2012 switch (mbp->cmd) { 2013 2014 case SSI_LINK: 2015 { 2016 idnsb_link_t slp; 2017 2018 bcopy(&sdp->ssb_link, &slp, sizeof (slp)); 2019 2020 if (slp.master_pri < 0) { 2021 pri = IDNVOTE_MINPRI; 2022 } else if (slp.master_pri > 0) { 2023 /* 2024 * If I'm already in a IDN network, 2025 * then my vote priority is set to 2026 * the max, otherwise it's one-less. 2027 */ 2028 pri = IDNVOTE_MAXPRI; 2029 IDN_GLOCK_SHARED(); 2030 if (idn.ndomains <= 1) 2031 pri--; 2032 IDN_GUNLOCK(); 2033 } else { 2034 pri = IDNVOTE_DEFPRI; 2035 } 2036 2037 PR_PROTO("%s: SSI_LINK(cpuid = %d, domid = %d, " 2038 "pri = %d (req = %d), t/o = %d)\n", 2039 proc, slp.cpuid, slp.domid, pri, 2040 slp.master_pri, slp.timeout); 2041 2042 rv = idn_link(slp.domid, slp.cpuid, pri, 2043 slp.timeout, sep); 2044 SET_IDNKERR_ERRNO(sep, rv); 2045 (void) idn_info(&sdp->ssb_info); 2046 break; 2047 } 2048 2049 case SSI_UNLINK: 2050 { 2051 idnsb_unlink_t sup; 2052 idn_domain_t *xdp; 2053 domainset_t domset; 2054 2055 bcopy(&sdp->ssb_unlink, &sup, sizeof (sup)); 2056 2057 PR_PROTO("%s: SSI_UNLINK(c = %d, d = %d, bs = 0x%x, " 2058 "f = %d, is = 0x%x, t/o = %d)\n", 2059 proc, sup.cpuid, sup.domid, sup.boardset, 2060 sup.force, sup.idnset, sup.timeout); 2061 2062 domset = idn.domset.ds_trans_on | 2063 idn.domset.ds_connected | 2064 idn.domset.ds_trans_off | 2065 idn.domset.ds_awol | 2066 idn.domset.ds_relink; 2067 2068 if (VALID_DOMAINID(sup.domid)) { 2069 dp = &idn_domain[sup.domid]; 2070 } else if (VALID_CPUID(sup.cpuid)) { 2071 for (d = 0; d < MAX_DOMAINS; d++) { 2072 xdp = &idn_domain[d]; 2073 2074 if ((xdp->dcpu == IDN_NIL_DCPU) && 2075 !DOMAIN_IN_SET(domset, d)) 2076 continue; 2077 2078 if (CPU_IN_SET(xdp->dcpuset, 2079 sup.cpuid)) 2080 break; 2081 } 2082 dp = (d == MAX_DOMAINS) ? NULL : xdp; 2083 } 2084 if ((dp == NULL) && sup.boardset) { 2085 for (d = 0; d < MAX_DOMAINS; d++) { 2086 xdp = &idn_domain[d]; 2087 2088 if ((xdp->dcpu == IDN_NIL_DCPU) && 2089 !DOMAIN_IN_SET(domset, d)) 2090 continue; 2091 2092 if (xdp->dhw.dh_boardset & 2093 sup.boardset) 2094 break; 2095 } 2096 dp = (d == MAX_DOMAINS) ? NULL : xdp; 2097 } 2098 if (dp == NULL) { 2099 SET_IDNKERR_ERRNO(sep, EINVAL); 2100 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN); 2101 SET_IDNKERR_PARAM0(sep, sup.domid); 2102 SET_IDNKERR_PARAM1(sep, sup.cpuid); 2103 (void) idn_info(&sdp->ssb_info); 2104 goto sberr; 2105 } else { 2106 sup.domid = dp->domid; 2107 } 2108 2109 switch (sup.force) { 2110 case SSIFORCE_OFF: 2111 fintype = IDNFIN_NORMAL; 2112 break; 2113 2114 case SSIFORCE_SOFT: 2115 fintype = IDNFIN_FORCE_SOFT; 2116 break; 2117 2118 case SSIFORCE_HARD: 2119 fintype = IDNFIN_FORCE_HARD; 2120 break; 2121 default: 2122 SET_IDNKERR_ERRNO(sep, EINVAL); 2123 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_FORCE); 2124 SET_IDNKERR_PARAM0(sep, sup.force); 2125 (void) idn_info(&sdp->ssb_info); 2126 goto sberr; 2127 } 2128 2129 rv = idn_unlink(sup.domid, sup.idnset, fintype, 2130 IDNFIN_OPT_UNLINK, sup.timeout, sep); 2131 SET_IDNKERR_ERRNO(sep, rv); 2132 (void) idn_info(&sdp->ssb_info); 2133 break; 2134 } 2135 2136 case SSI_INFO: 2137 sfp = &sdp->ssb_info; 2138 2139 PR_PROTO("%s: SSI_INFO\n", proc); 2140 2141 rv = idn_info(sfp); 2142 SET_IDNKERR_ERRNO(sep, rv); 2143 if (rv != 0) { 2144 SET_IDNKERR_IDNERR(sep, IDNKERR_INFO_FAILED); 2145 } 2146 break; 2147 2148 default: 2149 ASSERT(0); 2150 SET_IDNKERR_ERRNO(sep, EINVAL); 2151 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_CMD); 2152 SET_IDNKERR_PARAM0(sep, mbp->cmd); 2153 break; 2154 } 2155 2156 sberr: 2157 2158 if (GET_IDNKERR_ERRNO(sep) != 0) { 2159 cmn_err(CE_WARN, 2160 #ifdef DEBUG 2161 "IDN: 125: op (%s) failed, returning " 2162 "(%d/0x%x [%d, %d, %d])", 2163 #else /* DEBUG */ 2164 "!IDN: 125: op (%s) failed, returning " 2165 "(%d/0x%x [%d, %d, %d])", 2166 #endif /* DEBUG */ 2167 (mbp->cmd == SSI_LINK) ? "LINK" : 2168 (mbp->cmd == SSI_UNLINK) ? "UNLINK" : 2169 (mbp->cmd == SSI_INFO) ? 2170 "INFO" : "UNKNOWN", 2171 GET_IDNKERR_ERRNO(sep), 2172 GET_IDNKERR_IDNERR(sep), 2173 GET_IDNKERR_PARAM0(sep), 2174 GET_IDNKERR_PARAM1(sep), 2175 GET_IDNKERR_PARAM2(sep)); 2176 } 2177 2178 PR_PROTO("%s: returning errno = %d, idnerr = %d, " 2179 "params = [%d, %d, %d]\n", 2180 proc, GET_IDNKERR_ERRNO(sep), GET_IDNKERR_IDNERR(sep), 2181 GET_IDNKERR_PARAM0(sep), GET_IDNKERR_PARAM1(sep), 2182 GET_IDNKERR_PARAM2(sep)); 2183 2184 mutex_enter(&idn.sigbintr.sb_mutex); 2185 ASSERT((sbp->sb_busy == IDNSIGB_ACTIVE) || 2186 (sbp->sb_busy == IDNSIGB_DIE)); 2187 mbp->cmd |= SSI_ACK; 2188 if (sbp->sb_busy == IDNSIGB_ACTIVE) 2189 sbp->sb_busy = IDNSIGB_INACTIVE; 2190 /* 2191 * Set flag which kicks off response to SSP. 2192 */ 2193 membar_stst_ldst(); 2194 mbp->flag = HOST_TO_CBS; 2195 } 2196 2197 /* 2198 * Wake up the dude that killed us! 2199 */ 2200 idn.sigb_threadp = NULL; 2201 cv_signal(&sbp->sb_cv); 2202 mutex_exit(&idn.sigbintr.sb_mutex); 2203 thread_exit(); 2204 } 2205 2206 /* 2207 * Create the thread that will service sigb interrupts. 2208 */ 2209 static void 2210 idn_sigbhandler_create() 2211 { 2212 struct sigbintr *sbp; 2213 2214 if (idn.sigb_threadp) { 2215 cmn_err(CE_WARN, 2216 "IDN: 126: sigbhandler thread already " 2217 "exists (0x%x)", (uint_t)idn.sigb_threadp); 2218 return; 2219 } 2220 cv_init(&idn.sigbintr.sb_cv, NULL, CV_DEFAULT, NULL); 2221 sbp = &idn.sigbintr; 2222 sbp->sb_busy = IDNSIGB_INACTIVE; 2223 idn.sigb_threadp = thread_create(NULL, 0, 2224 idn_sigbhandler_thread, &sbp, sizeof (sbp), &p0, 2225 TS_RUN, minclsyspri); 2226 sbp->sb_inum = add_softintr((uint_t)idn_sigbpil, 2227 idn_sigbhandler_wakeup, 0); 2228 } 2229 2230 static void 2231 idn_sigbhandler_kill() 2232 { 2233 if (idn.sigb_threadp) { 2234 struct sigbintr *sbp; 2235 2236 sbp = &idn.sigbintr; 2237 if (sbp->sb_inum != 0) 2238 rem_softintr(sbp->sb_inum); 2239 sbp->sb_inum = 0; 2240 sbp->sb_busy = IDNSIGB_DIE; 2241 cv_signal(&sbp->sb_cv); 2242 while (idn.sigb_threadp != NULL) 2243 cv_wait(&sbp->sb_cv, &idn.sigbintr.sb_mutex); 2244 sbp->sb_busy = IDNSIGB_INACTIVE; 2245 cv_destroy(&sbp->sb_cv); 2246 } 2247 } 2248 2249 /*ARGSUSED0*/ 2250 static uint_t 2251 idn_sigbhandler_wakeup(caddr_t arg1, caddr_t arg2) 2252 { 2253 mutex_enter(&idn.sigbintr.sb_mutex); 2254 if (idn.sigbintr.sb_busy == IDNSIGB_STARTED) { 2255 idn.sigbintr.sb_busy = IDNSIGB_ACTIVE; 2256 cv_signal(&idn.sigbintr.sb_cv); 2257 } 2258 mutex_exit(&idn.sigbintr.sb_mutex); 2259 2260 return (DDI_INTR_CLAIMED); 2261 } 2262 2263 static void 2264 idn_sigbhandler(processorid_t cpuid, cpu_sgnblk_t *sgnblkp) 2265 { 2266 struct sigbintr *sbp = &idn.sigbintr; 2267 sigbmbox_t *mbp; 2268 idnsb_data_t *sdp; 2269 idnsb_error_t *sep; 2270 uint32_t cmd; 2271 int sigb_lock = 0; 2272 2273 ASSERT(sgnblkp); 2274 2275 mbp = &sgnblkp->sigb_host_mbox; 2276 sdp = (idnsb_data_t *)mbp->data; 2277 sep = &sdp->ssb_error; 2278 cmd = mbp->cmd; 2279 2280 if ((mbp->flag != CBS_TO_HOST) || !VALID_IDNSIGBCMD(cmd)) { 2281 /* 2282 * Not a valid IDN command. Just bail out. 2283 */ 2284 return; 2285 } 2286 2287 mbp->flag = SIGB_MBOX_BUSY; 2288 SET_IDNKERR_ERRNO(sep, 0); 2289 2290 if (cmd & SSI_ACK) { 2291 /* 2292 * Hmmm...weird, the ACK bit is set. 2293 */ 2294 SET_IDNKERR_ERRNO(sep, EPROTO); 2295 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_CMD); 2296 SET_IDNKERR_PARAM0(sep, cmd); 2297 goto sigb_done; 2298 } 2299 2300 if (!mutex_tryenter(&idn.sigbintr.sb_mutex)) { 2301 /* 2302 * Couldn't get the lock. Driver is either 2303 * not quite all the way up or is shutting down 2304 * for some reason. Caller should spin again. 2305 */ 2306 cmd |= SSI_ACK; 2307 SET_IDNKERR_ERRNO(sep, EBUSY); 2308 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_LOCKED); 2309 goto sigb_done; 2310 } 2311 sigb_lock = 1; 2312 2313 if ((idn.sigb_threadp == NULL) || 2314 (sbp->sb_busy == IDNSIGB_NOTREADY)) { 2315 cmd |= SSI_ACK; 2316 SET_IDNKERR_ERRNO(sep, EAGAIN); 2317 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_NOTRDY); 2318 goto sigb_done; 2319 } 2320 2321 if (sbp->sb_busy != IDNSIGB_INACTIVE) { 2322 cmd |= SSI_ACK; 2323 SET_IDNKERR_ERRNO(sep, EBUSY); 2324 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_BUSY); 2325 goto sigb_done; 2326 } 2327 2328 sbp->sb_cpuid = (uchar_t)cpuid & 0xff; 2329 membar_stst_ldst(); 2330 sbp->sb_busy = IDNSIGB_STARTED; 2331 /* 2332 * The sb_busy bit is set and the mailbox flag 2333 * indicates BUSY also, so we effectively have things locked. 2334 * So, we can drop the critical sb_mutex which we want to 2335 * do since it pushes us to PIL 14 while we hold it and we 2336 * don't want to run at PIL 14 across IDN code. 2337 * 2338 * Send interrupt to cause idn_sigbhandler_thread to wakeup. 2339 * We cannot do wakeup (cv_signal) directly from here since 2340 * we're executing from a high-level (14) interrupt. 2341 */ 2342 setsoftint(sbp->sb_inum); 2343 2344 sigb_done: 2345 2346 if (GET_IDNKERR_ERRNO(sep) != 0) { 2347 mbp->len = sizeof (idnsb_data_t); 2348 mbp->cmd = cmd; 2349 membar_stst_ldst(); 2350 mbp->flag = HOST_TO_CBS; 2351 } 2352 2353 if (sigb_lock) 2354 mutex_exit(&idn.sigbintr.sb_mutex); 2355 } 2356 2357 static int 2358 idn_info(idnsb_info_t *sfp) 2359 { 2360 int count, d; 2361 idn_domain_t *dp; 2362 idnsb_info_t sinfo; 2363 int local_id, master_id; 2364 procname_t proc = "idn_info"; 2365 2366 bzero(&sinfo, sizeof (sinfo)); 2367 sinfo.master_index = (uchar_t)-1; 2368 sinfo.master_cpuid = (uchar_t)-1; 2369 sinfo.local_index = (uchar_t)-1; 2370 sinfo.local_cpuid = (uchar_t)-1; 2371 2372 IDN_GLOCK_SHARED(); 2373 2374 sinfo.idn_state = (uchar_t)idn.state; 2375 2376 switch (idn.state) { 2377 case IDNGS_OFFLINE: 2378 sinfo.idn_active = SSISTATE_INACTIVE; 2379 PR_PROTO("%s: idn_state (%s) = INACTIVE\n", 2380 proc, idngs_str[idn.state]); 2381 break; 2382 2383 case IDNGS_IGNORE: 2384 PR_PROTO("%s: IGNORING IDN_INFO call...\n", proc); 2385 IDN_GUNLOCK(); 2386 return (EIO); 2387 2388 default: 2389 sinfo.idn_active = SSISTATE_ACTIVE; 2390 PR_PROTO("%s: idn_state (%s) = ACTIVE\n", 2391 proc, idngs_str[idn.state]); 2392 break; 2393 } 2394 master_id = IDN_GET_MASTERID(); 2395 local_id = idn.localid; 2396 2397 /* 2398 * Need to drop idn.grwlock before acquiring domain locks. 2399 */ 2400 IDN_GUNLOCK(); 2401 2402 IDN_SYNC_LOCK(); 2403 2404 sinfo.awol_domset = (ushort_t)idn.domset.ds_awol; 2405 sinfo.conn_domset = (ushort_t)(idn.domset.ds_connected & 2406 ~idn.domset.ds_trans_on); 2407 DOMAINSET_ADD(sinfo.conn_domset, idn.localid); 2408 2409 count = 0; 2410 for (d = 0; d < MAX_DOMAINS; d++) { 2411 dp = &idn_domain[d]; 2412 2413 if (dp->dcpu == IDN_NIL_DCPU) 2414 continue; 2415 2416 IDN_DLOCK_SHARED(d); 2417 if ((dp->dcpu == IDN_NIL_DCPU) || 2418 (dp->dstate == IDNDS_CLOSED)) { 2419 IDN_DUNLOCK(d); 2420 continue; 2421 } 2422 2423 count++; 2424 if (d == local_id) { 2425 sinfo.local_index = (uchar_t)d; 2426 sinfo.local_cpuid = (uchar_t)dp->dcpu; 2427 PR_PROTO("%s: domid %d is LOCAL (cpuid = %d)\n", 2428 proc, d, dp->dcpu); 2429 } 2430 if (d == master_id) { 2431 sinfo.master_index = (uchar_t)d; 2432 sinfo.master_cpuid = (uchar_t)dp->dcpu; 2433 PR_PROTO("%s: domid %d is MASTER (cpuid = %d)\n", 2434 proc, d, dp->dcpu); 2435 } 2436 2437 sinfo.domain_boardset[d] = (ushort_t)dp->dhw.dh_boardset; 2438 2439 IDN_DUNLOCK(d); 2440 } 2441 2442 IDN_SYNC_UNLOCK(); 2443 2444 bcopy(&sinfo, sfp, sizeof (*sfp)); 2445 2446 PR_PROTO("%s: Found %d domains within IDNnet\n", proc, count); 2447 2448 return (0); 2449 } 2450 2451 /* 2452 * ---------------------------------------------- 2453 * ndd param support routines. 2454 * - Borrowed from tcp. 2455 * ---------------------------------------------- 2456 */ 2457 static void 2458 idn_param_cleanup() 2459 { 2460 IDN_GLOCK_EXCL(); 2461 if (!idn.strup && idn_ndlist) 2462 nd_free(&idn_ndlist); 2463 IDN_GUNLOCK(); 2464 } 2465 2466 /*ARGSUSED*/ 2467 static int 2468 idn_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 2469 { 2470 idnparam_t *idnpa = (idnparam_t *)cp; 2471 2472 /* 2473 * lock grabbed before calling nd_getset. 2474 */ 2475 ASSERT(IDN_GLOCK_IS_HELD()); 2476 2477 (void) mi_mpprintf(mp, "%ld", idnpa->sp_val); 2478 2479 return (0); 2480 } 2481 2482 /*ARGSUSED*/ 2483 static int 2484 idn_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 2485 { 2486 char *end; 2487 ulong_t new_value; 2488 idnparam_t *idnpa = (idnparam_t *)cp; 2489 2490 /* 2491 * lock grabbed before calling nd_getset. 2492 */ 2493 ASSERT(IDN_GLOCK_IS_EXCL()); 2494 2495 new_value = (ulong_t)mi_strtol(value, &end, 10); 2496 2497 if ((end == value) || 2498 (new_value < idnpa->sp_min) || 2499 (new_value > idnpa->sp_max)) 2500 return (EINVAL); 2501 2502 if (idn.enabled == 0) { 2503 #ifdef DEBUG 2504 cmn_err(CE_NOTE, 2505 "IDN: 102: driver disabled " 2506 "- check OBP environment " 2507 "(idn-smr-size)"); 2508 #else /* DEBUG */ 2509 cmn_err(CE_NOTE, 2510 "!IDN: 102: driver disabled " 2511 "- check OBP environment " 2512 "(idn-smr-size)"); 2513 #endif /* DEBUG */ 2514 return (EACCES); 2515 } 2516 2517 idnpa->sp_val = new_value; 2518 2519 return (0); 2520 } 2521 2522 static int 2523 idn_param_register(register idnparam_t *idnpa, int count) 2524 { 2525 ASSERT(IDN_GLOCK_IS_EXCL()); 2526 2527 for (; count > 0; count--, idnpa++) { 2528 if (idnpa->sp_name && idnpa->sp_name[0]) { 2529 register int i; 2530 ndsetf_t set_func; 2531 char *p; 2532 /* 2533 * Don't advertise in non-DEBUG parameters. 2534 */ 2535 for (i = 0; idn_param_debug_only[i]; i++) { 2536 p = idn_param_debug_only[i]; 2537 if (strcmp(idnpa->sp_name, p) == 0) 2538 break; 2539 } 2540 if (idn_param_debug_only[i]) 2541 continue; 2542 2543 /* 2544 * Do not register a "set" function for 2545 * read-only parameters. 2546 */ 2547 for (i = 0; idn_param_read_only[i]; i++) { 2548 p = idn_param_read_only[i]; 2549 if (strcmp(idnpa->sp_name, p) == 0) 2550 break; 2551 } 2552 if (idn_param_read_only[i]) 2553 set_func = NULL; 2554 else 2555 set_func = idn_param_set; 2556 2557 if (!nd_load(&idn_ndlist, idnpa->sp_name, 2558 idn_param_get, set_func, 2559 (caddr_t)idnpa)) { 2560 nd_free(&idn_ndlist); 2561 return (-1); 2562 } 2563 } 2564 } 2565 if (!nd_load(&idn_ndlist, "idn_slabpool", idn_slabpool_report, 2566 NULL, NULL)) { 2567 nd_free(&idn_ndlist); 2568 return (-1); 2569 } 2570 if (!nd_load(&idn_ndlist, "idn_buffers", idn_buffer_report, 2571 NULL, NULL)) { 2572 nd_free(&idn_ndlist); 2573 return (-1); 2574 } 2575 if (!nd_load(&idn_ndlist, "idn_mboxtbl", idn_mboxtbl_report, 2576 NULL, MBXTBL_PART_REPORT)) { 2577 nd_free(&idn_ndlist); 2578 return (-1); 2579 } 2580 if (!nd_load(&idn_ndlist, "idn_mboxtbl_all", idn_mboxtbl_report, 2581 NULL, MBXTBL_FULL_REPORT)) { 2582 nd_free(&idn_ndlist); 2583 return (-1); 2584 } 2585 if (!nd_load(&idn_ndlist, "idn_mainmbox", idn_mainmbox_report, 2586 NULL, NULL)) { 2587 nd_free(&idn_ndlist); 2588 return (-1); 2589 } 2590 if (!nd_load(&idn_ndlist, "idn_global", idn_global_report, 2591 NULL, NULL)) { 2592 nd_free(&idn_ndlist); 2593 return (-1); 2594 } 2595 if (!nd_load(&idn_ndlist, "idn_domain", idn_domain_report, 2596 NULL, (caddr_t)0)) { 2597 nd_free(&idn_ndlist); 2598 return (-1); 2599 } 2600 if (!nd_load(&idn_ndlist, "idn_domain_all", idn_domain_report, 2601 NULL, (caddr_t)1)) { 2602 nd_free(&idn_ndlist); 2603 return (-1); 2604 } 2605 if (!nd_load(&idn_ndlist, "idn_bind_net", idn_get_net_binding, 2606 idn_set_net_binding, NULL)) { 2607 nd_free(&idn_ndlist); 2608 return (-1); 2609 } 2610 2611 return (0); 2612 } 2613 2614 /*ARGSUSED*/ 2615 static int 2616 idn_set_net_binding(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr) 2617 { 2618 char *end, *cpup; 2619 long net; 2620 processorid_t cpuid; 2621 2622 /* 2623 * lock grabbed before calling nd_getset. 2624 */ 2625 ASSERT(IDN_GLOCK_IS_EXCL()); 2626 2627 if ((cpup = strchr(value, '=')) == NULL) 2628 return (EINVAL); 2629 2630 *cpup++ = '\0'; 2631 2632 net = mi_strtol(value, &end, 10); 2633 if ((end == value) || (net < 0) || (net >= IDN_MAX_NETS) || 2634 !CHAN_IN_SET(idn.chanset, net)) 2635 return (EINVAL); 2636 2637 cpuid = (processorid_t)mi_strtol(cpup, &end, 10); 2638 if ((end == cpup) || ((cpuid != -1) && 2639 (!VALID_CPUID(cpuid) || 2640 !CPU_IN_SET(cpu_ready_set, cpuid)))) 2641 return (EINVAL); 2642 2643 idn_chanserver_bind(net, cpuid); 2644 2645 return (0); 2646 } 2647 2648 /*ARGSUSED*/ 2649 static int 2650 idn_get_net_binding(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr) 2651 { 2652 int c; 2653 2654 /* 2655 * lock grabbed before calling nd_getset. 2656 */ 2657 ASSERT(IDN_GLOCK_IS_HELD()); 2658 2659 (void) mi_mpprintf(mp, 2660 "IDN network interfaces/channels active = %d", 2661 idn.nchannels); 2662 2663 if (idn.nchannels == 0) 2664 return (0); 2665 2666 mi_mpprintf(mp, "Net Cpu"); 2667 2668 for (c = 0; c < IDN_MAX_NETS; c++) { 2669 int bc; 2670 idn_chansvr_t *csp; 2671 2672 if (!CHAN_IN_SET(idn.chanset, c)) 2673 continue; 2674 2675 csp = &idn.chan_servers[c]; 2676 2677 if ((bc = csp->ch_bound_cpuid) == -1) 2678 bc = csp->ch_bound_cpuid_pending; 2679 2680 if (c < 10) 2681 mi_mpprintf(mp, " %d %d", c, bc); 2682 else 2683 mi_mpprintf(mp, " %d %d", c, bc); 2684 } 2685 2686 return (0); 2687 } 2688 2689 static int 2690 idnioc_link(idnop_t *idnop) 2691 { 2692 int rv; 2693 int pri; 2694 idnsb_error_t err; 2695 procname_t proc = "idnioc_link"; 2696 2697 if (idnop->link.master < 0) 2698 pri = IDNVOTE_MINPRI; 2699 else if (idnop->link.master > 0) 2700 pri = IDNVOTE_MAXPRI; 2701 else 2702 pri = IDNVOTE_DEFPRI; 2703 2704 PR_DRV("%s: domid = %d, cpuid = %d, pri = %d\n", 2705 proc, idnop->link.domid, idnop->link.cpuid, pri); 2706 2707 rv = idn_link(idnop->link.domid, idnop->link.cpuid, 2708 pri, idnop->link.wait, &err); 2709 2710 return (rv); 2711 } 2712 2713 static int 2714 idnioc_unlink(idnop_t *idnop) 2715 { 2716 int d, cpuid, domid, rv; 2717 boardset_t idnset; 2718 idn_fin_t fintype; 2719 idn_domain_t *dp, *xdp; 2720 idnsb_error_t err; 2721 procname_t proc = "idnioc_unlink"; 2722 2723 PR_DRV("%s: domid = %d, cpuid = %d, force = %d\n", 2724 proc, idnop->unlink.domid, idnop->unlink.cpuid, 2725 idnop->unlink.force); 2726 2727 idnset = BOARDSET_ALL; 2728 domid = idnop->unlink.domid; 2729 cpuid = idnop->unlink.cpuid; 2730 dp = NULL; 2731 2732 if (domid == IDN_NIL_DOMID) 2733 domid = idn.localid; 2734 2735 if (VALID_DOMAINID(domid)) { 2736 dp = &idn_domain[domid]; 2737 if (VALID_CPUID(cpuid) && (dp->dcpu != IDN_NIL_DCPU) && 2738 !CPU_IN_SET(dp->dcpuset, cpuid)) { 2739 dp = NULL; 2740 PR_PROTO("%s: ERROR: invalid cpuid " 2741 "(%d) for domain (%d) [cset = 0x%x.x%x]\n", 2742 proc, cpuid, domid, 2743 UPPER32_CPUMASK(dp->dcpuset), 2744 LOWER32_CPUMASK(dp->dcpuset)); 2745 } 2746 } else if (VALID_CPUID(cpuid)) { 2747 for (d = 0; d < MAX_DOMAINS; d++) { 2748 xdp = &idn_domain[d]; 2749 2750 if (xdp->dcpu == IDN_NIL_DCPU) 2751 continue; 2752 2753 if (CPU_IN_SET(xdp->dcpuset, cpuid)) 2754 break; 2755 } 2756 dp = (d == MAX_DOMAINS) ? NULL : xdp; 2757 } 2758 2759 if ((dp == NULL) || (dp->dcpu == IDN_NIL_DCPU)) 2760 return (0); 2761 2762 domid = dp->domid; 2763 2764 switch (idnop->unlink.force) { 2765 case SSIFORCE_OFF: 2766 fintype = IDNFIN_NORMAL; 2767 break; 2768 2769 case SSIFORCE_SOFT: 2770 fintype = IDNFIN_FORCE_SOFT; 2771 break; 2772 2773 case SSIFORCE_HARD: 2774 fintype = IDNFIN_FORCE_HARD; 2775 break; 2776 default: 2777 PR_PROTO("%s: invalid force parameter \"%d\"", 2778 proc, idnop->unlink.force); 2779 return (EINVAL); 2780 } 2781 2782 rv = idn_unlink(domid, idnset, fintype, IDNFIN_OPT_UNLINK, 2783 idnop->unlink.wait, &err); 2784 2785 return (rv); 2786 } 2787 2788 static int 2789 idn_send_ping(idnop_t *idnop) 2790 { 2791 int domid = idnop->ping.domid; 2792 int cpuid = idnop->ping.cpuid; 2793 int ocpuid; 2794 idn_domain_t *dp; 2795 idn_msgtype_t mt; 2796 procname_t proc = "idn_send_ping"; 2797 2798 if ((domid == IDN_NIL_DOMID) && (cpuid == IDN_NIL_DCPU)) { 2799 cmn_err(CE_WARN, 2800 "IDN: %s: no valid domain ID or CPU ID given", 2801 proc); 2802 return (EINVAL); 2803 } 2804 if (domid == IDN_NIL_DOMID) 2805 domid = MAX_DOMAINS - 1; 2806 2807 dp = &idn_domain[domid]; 2808 IDN_DLOCK_EXCL(domid); 2809 if ((dp->dcpu == IDN_NIL_DCPU) && (cpuid == IDN_NIL_DCPU)) { 2810 cmn_err(CE_WARN, 2811 "IDN: %s: no valid target CPU specified", 2812 proc); 2813 IDN_DUNLOCK(domid); 2814 return (EINVAL); 2815 } 2816 if (cpuid == IDN_NIL_DCPU) 2817 cpuid = dp->dcpu; 2818 2819 ocpuid = dp->dcpu; 2820 dp->dcpu = cpuid; 2821 2822 /* 2823 * XXX - Need a special PING IDN command. 2824 */ 2825 mt.mt_mtype = IDNP_DATA | IDNP_ACK; 2826 mt.mt_atype = 0; 2827 2828 (void) IDNXDC(domid, &mt, 0x100, 0x200, 0x300, 0x400); 2829 2830 dp->dcpu = ocpuid; 2831 IDN_DUNLOCK(domid); 2832 2833 return (0); 2834 } 2835 2836 /* 2837 * ---------------------------------------------- 2838 */ 2839 static void 2840 idn_dopers_init() 2841 { 2842 int i; 2843 dop_waitlist_t *dwl; 2844 2845 if (idn.dopers) 2846 return; 2847 2848 idn.dopers = GETSTRUCT(struct dopers, 1); 2849 2850 bzero(idn.dopers, sizeof (struct dopers)); 2851 2852 dwl = &idn.dopers->_dop_wcache[0]; 2853 for (i = 0; i < (IDNOP_CACHE_SIZE-1); i++) 2854 dwl[i].dw_next = &dwl[i+1]; 2855 dwl[i].dw_next = NULL; 2856 2857 idn.dopers->dop_freelist = &idn.dopers->_dop_wcache[0]; 2858 idn.dopers->dop_waitcount = 0; 2859 idn.dopers->dop_domset = 0; 2860 idn.dopers->dop_waitlist = NULL; 2861 2862 cv_init(&idn.dopers->dop_cv, NULL, CV_DEFAULT, NULL); 2863 mutex_init(&idn.dopers->dop_mutex, NULL, MUTEX_DEFAULT, NULL); 2864 } 2865 2866 static void 2867 idn_dopers_deinit() 2868 { 2869 dop_waitlist_t *dwl, *next_dwl; 2870 2871 2872 if (idn.dopers == NULL) 2873 return; 2874 2875 for (dwl = idn.dopers->dop_waitlist; dwl; dwl = next_dwl) { 2876 next_dwl = dwl->dw_next; 2877 if (!IDNOP_IN_CACHE(dwl)) 2878 FREESTRUCT(dwl, dop_waitlist_t, 1); 2879 } 2880 2881 cv_destroy(&idn.dopers->dop_cv); 2882 mutex_destroy(&idn.dopers->dop_mutex); 2883 2884 FREESTRUCT(idn.dopers, struct dopers, 1); 2885 idn.dopers = NULL; 2886 } 2887 2888 /* 2889 * Reset the dop_errset field in preparation for an 2890 * IDN operation attempt. This is only called from 2891 * idn_link() and idn_unlink(). 2892 */ 2893 void * 2894 idn_init_op(idn_opflag_t opflag, domainset_t domset, idnsb_error_t *sep) 2895 { 2896 dop_waitlist_t *dwl; 2897 /* 2898 * Clear any outstanding error ops in preparation 2899 * for an IDN (link/unlink) operation. 2900 */ 2901 mutex_enter(&idn.dopers->dop_mutex); 2902 if ((dwl = idn.dopers->dop_freelist) == NULL) { 2903 dwl = GETSTRUCT(dop_waitlist_t, 1); 2904 } else { 2905 idn.dopers->dop_freelist = dwl->dw_next; 2906 bzero(dwl, sizeof (*dwl)); 2907 } 2908 dwl->dw_op = opflag; 2909 dwl->dw_reqset = domset; 2910 dwl->dw_idnerr = sep; 2911 dwl->dw_next = idn.dopers->dop_waitlist; 2912 2913 idn.dopers->dop_waitlist = dwl; 2914 idn.dopers->dop_waitcount++; 2915 idn.dopers->dop_domset |= domset; 2916 mutex_exit(&idn.dopers->dop_mutex); 2917 2918 return (dwl); 2919 } 2920 2921 /* 2922 * Anybody waiting on a opflag operation for any one 2923 * of the domains in domset, needs to be updated to 2924 * additionally wait for new domains in domset. 2925 * This is used, for example, when needing to connect 2926 * to more domains than known at the time of the 2927 * original request. 2928 */ 2929 void 2930 idn_add_op(idn_opflag_t opflag, domainset_t domset) 2931 { 2932 dop_waitlist_t *dwl; 2933 2934 mutex_enter(&idn.dopers->dop_mutex); 2935 if ((idn.dopers->dop_waitcount == 0) || 2936 ((idn.dopers->dop_domset & domset) == 0)) { 2937 mutex_exit(&idn.dopers->dop_mutex); 2938 return; 2939 } 2940 for (dwl = idn.dopers->dop_waitlist; dwl; dwl = dwl->dw_next) 2941 if ((dwl->dw_op == opflag) && (dwl->dw_reqset & domset)) 2942 dwl->dw_reqset |= domset; 2943 mutex_exit(&idn.dopers->dop_mutex); 2944 } 2945 2946 /* 2947 * Mechanism to wakeup any potential users which may be waiting 2948 * for a link/unlink operation to complete. If an error occurred 2949 * don't update dop_errset unless there was no previous error. 2950 */ 2951 void 2952 idn_update_op(idn_opflag_t opflag, domainset_t domset, idnsb_error_t *sep) 2953 { 2954 int do_wakeup = 0; 2955 dop_waitlist_t *dw; 2956 procname_t proc = "idn_update_op"; 2957 2958 mutex_enter(&idn.dopers->dop_mutex); 2959 /* 2960 * If there are no waiters, or nobody is waiting for 2961 * the particular domainset in question, then 2962 * just bail. 2963 */ 2964 if ((idn.dopers->dop_waitcount == 0) || 2965 ((idn.dopers->dop_domset & domset) == 0)) { 2966 mutex_exit(&idn.dopers->dop_mutex); 2967 PR_PROTO("%s: NO waiters exist (domset=0x%x)\n", 2968 proc, domset); 2969 return; 2970 } 2971 2972 for (dw = idn.dopers->dop_waitlist; dw; dw = dw->dw_next) { 2973 int d; 2974 domainset_t dset, rset; 2975 2976 if ((dset = dw->dw_reqset & domset) == 0) 2977 continue; 2978 2979 if (opflag == IDNOP_ERROR) { 2980 dw->dw_errset |= dset; 2981 if (sep) { 2982 for (d = 0; d < MAX_DOMAINS; d++) { 2983 if (!DOMAIN_IN_SET(dset, d)) 2984 continue; 2985 2986 dw->dw_errors[d] = 2987 (short)GET_IDNKERR_ERRNO(sep); 2988 } 2989 bcopy(sep, dw->dw_idnerr, sizeof (*sep)); 2990 } 2991 } else if (opflag == dw->dw_op) { 2992 dw->dw_domset |= dset; 2993 } 2994 2995 /* 2996 * Check if all the domains are spoken for that 2997 * a particular waiter may have been waiting for. 2998 * If there's at least one, we'll need to broadcast. 2999 */ 3000 rset = (dw->dw_errset | dw->dw_domset) & dw->dw_reqset; 3001 if (rset == dw->dw_reqset) 3002 do_wakeup++; 3003 } 3004 3005 PR_PROTO("%s: found %d waiters ready for wakeup\n", proc, do_wakeup); 3006 3007 if (do_wakeup > 0) 3008 cv_broadcast(&idn.dopers->dop_cv); 3009 3010 mutex_exit(&idn.dopers->dop_mutex); 3011 } 3012 3013 void 3014 idn_deinit_op(void *cookie) 3015 { 3016 domainset_t domset; 3017 dop_waitlist_t *hw, *tw; 3018 dop_waitlist_t *dwl = (dop_waitlist_t *)cookie; 3019 3020 mutex_enter(&idn.dopers->dop_mutex); 3021 3022 ASSERT(idn.dopers->dop_waitlist); 3023 3024 if (dwl == idn.dopers->dop_waitlist) { 3025 idn.dopers->dop_waitlist = dwl->dw_next; 3026 if (IDNOP_IN_CACHE(dwl)) { 3027 dwl->dw_next = idn.dopers->dop_freelist; 3028 idn.dopers->dop_freelist = dwl; 3029 } else { 3030 FREESTRUCT(dwl, dop_waitlist_t, 1); 3031 } 3032 } else { 3033 for (tw = idn.dopers->dop_waitlist, hw = tw->dw_next; 3034 hw; 3035 tw = hw, hw = hw->dw_next) { 3036 if (dwl == hw) 3037 break; 3038 } 3039 ASSERT(hw); 3040 3041 tw->dw_next = hw->dw_next; 3042 } 3043 3044 /* 3045 * Recompute domainset for which waiters might be waiting. 3046 * It's possible there may be other waiters waiting for 3047 * the same domainset that the current waiter that's leaving 3048 * may have been waiting for, so we can't simply delete 3049 * the leaving waiter's domainset from dop_domset. 3050 */ 3051 for (hw = idn.dopers->dop_waitlist, domset = 0; hw; hw = hw->dw_next) 3052 domset |= hw->dw_reqset; 3053 3054 idn.dopers->dop_waitcount--; 3055 idn.dopers->dop_domset = domset; 3056 3057 mutex_exit(&idn.dopers->dop_mutex); 3058 } 3059 3060 /* 3061 * Wait until the specified operation succeeds or fails with 3062 * respect to the given domains. Note the function terminates 3063 * if at least one error occurs. 3064 * This process is necessary since link/unlink operations occur 3065 * asynchronously and we need some way of waiting to find out 3066 * if it indeed completed. 3067 * Timeout value is received indirectly from the SSP and 3068 * represents seconds. 3069 */ 3070 int 3071 idn_wait_op(void *cookie, domainset_t *domsetp, int wait_timeout) 3072 { 3073 int d, rv, err = 0; 3074 dop_waitlist_t *dwl; 3075 3076 3077 dwl = (dop_waitlist_t *)cookie; 3078 3079 ASSERT(wait_timeout > 0); 3080 ASSERT((dwl->dw_op == IDNOP_CONNECTED) || 3081 (dwl->dw_op == IDNOP_DISCONNECTED)); 3082 3083 mutex_enter(&idn.dopers->dop_mutex); 3084 3085 while (((dwl->dw_domset | dwl->dw_errset) != dwl->dw_reqset) && !err) { 3086 rv = cv_timedwait_sig(&idn.dopers->dop_cv, 3087 &idn.dopers->dop_mutex, 3088 lbolt + (wait_timeout * hz)); 3089 3090 if ((dwl->dw_domset | dwl->dw_errset) == dwl->dw_reqset) 3091 break; 3092 3093 switch (rv) { 3094 case -1: 3095 /* 3096 * timed out 3097 */ 3098 cmn_err(CE_WARN, 3099 "!IDN: 129: %s operation timed out", 3100 (dwl->dw_op == IDNOP_CONNECTED) ? "LINK" : 3101 (dwl->dw_op == IDNOP_DISCONNECTED) ? "UNLINK" : 3102 "UNKNOWN"); 3103 /*FALLTHROUGH*/ 3104 case 0: 3105 /* 3106 * signal, e.g. kill(2) 3107 */ 3108 err = 1; 3109 break; 3110 3111 default: 3112 break; 3113 } 3114 } 3115 3116 if (dwl->dw_domset == dwl->dw_reqset) { 3117 rv = 0; 3118 } else { 3119 /* 3120 * Op failed for some domains or we were awakened. 3121 */ 3122 for (d = rv = 0; (d < MAX_DOMAINS) && !rv; d++) 3123 rv = dwl->dw_errors[d]; 3124 } 3125 *domsetp = dwl->dw_domset; 3126 3127 mutex_exit(&idn.dopers->dop_mutex); 3128 3129 idn_deinit_op(cookie); 3130 3131 return (rv); 3132 } 3133 3134 /* 3135 * -------------------------------------------------- 3136 * Return any valid (& ready) cpuid for the given board based on 3137 * the given cpuset. 3138 * -------------------------------------------------- 3139 */ 3140 int 3141 board_to_ready_cpu(int board, cpuset_t cpuset) 3142 { 3143 int base_cpuid; 3144 int ncpu_board = MAX_CPU_PER_BRD; 3145 3146 board *= ncpu_board; 3147 for (base_cpuid = board; 3148 base_cpuid < (board + ncpu_board); 3149 base_cpuid++) 3150 if (CPU_IN_SET(cpuset, base_cpuid)) 3151 return (base_cpuid); 3152 3153 return (-1); 3154 } 3155 3156 void 3157 idn_domain_resetentry(idn_domain_t *dp) 3158 { 3159 register int i; 3160 procname_t proc = "idn_domain_resetentry"; 3161 3162 ASSERT(dp); 3163 ASSERT(dp->dstate == IDNDS_CLOSED); 3164 ASSERT(IDN_DLOCK_IS_EXCL(dp->domid)); 3165 ASSERT(IDN_GLOCK_IS_EXCL()); 3166 3167 ASSERT(dp->domid == (dp - &idn_domain[0])); 3168 3169 IDN_FSTATE_TRANSITION(dp, IDNFIN_OFF); 3170 dp->dname[0] = '\0'; 3171 dp->dnetid = (ushort_t)-1; 3172 dp->dmtu = 0; 3173 dp->dbufsize = 0; 3174 dp->dslabsize = 0; 3175 dp->dnwrsize = 0; 3176 dp->dncpus = 0; 3177 dp->dcpuindex = 0; 3178 CPUSET_ZERO(dp->dcpuset); 3179 dp->dcpu = dp->dcpu_last = dp->dcpu_save = IDN_NIL_DCPU; 3180 dp->dvote.ticket = 0; 3181 dp->dslab = NULL; 3182 dp->dslab_state = DSLAB_STATE_UNKNOWN; 3183 dp->dnslabs = 0; 3184 dp->dio = 0; 3185 dp->dioerr = 0; 3186 lock_clear(&dp->diowanted); 3187 bzero(&dp->dhw, sizeof (dp->dhw)); 3188 dp->dxp = NULL; 3189 IDN_XSTATE_TRANSITION(dp, IDNXS_NIL); 3190 dp->dsync.s_cmd = IDNSYNC_NIL; 3191 dp->dfin_sync = IDNFIN_SYNC_OFF; 3192 IDN_RESET_COOKIES(dp->domid); 3193 dp->dcookie_err = 0; 3194 bzero(&dp->dawol, sizeof (dp->dawol)); 3195 dp->dtmp = -1; 3196 3197 if (dp->dtimerq.tq_queue != NULL) { 3198 PR_PROTO("%s: WARNING: MSG timerq not empty (count = %d)\n", 3199 proc, dp->dtimerq.tq_count); 3200 IDN_MSGTIMER_STOP(dp->domid, 0, 0); 3201 } 3202 3203 for (i = 0; i < NCPU; i++) 3204 dp->dcpumap[i] = (uchar_t)-1; 3205 } 3206 3207 int 3208 idn_open_domain(int domid, int cpuid, uint_t ticket) 3209 { 3210 int c, new_cpuid; 3211 idn_domain_t *dp, *ldp; 3212 procname_t proc = "idn_open_domain"; 3213 3214 ASSERT(IDN_SYNC_IS_LOCKED()); 3215 ASSERT(IDN_DLOCK_IS_EXCL(domid)); 3216 3217 if (!VALID_DOMAINID(domid)) { 3218 PR_PROTO("%s: INVALID domainid (%d) " 3219 "[cpuid = %d, ticket = 0x%x]\n", 3220 proc, domid, cpuid, ticket); 3221 return (-1); 3222 } 3223 3224 dp = &idn_domain[domid]; 3225 ldp = &idn_domain[idn.localid]; 3226 3227 if (dp->dcpu >= 0) { 3228 PR_PROTO("%s:%d: domain already OPEN (state = %s)\n", 3229 proc, domid, idnds_str[dp->dstate]); 3230 return (1); 3231 } 3232 3233 if (DOMAIN_IN_SET(idn.domset.ds_relink, domid)) { 3234 if (dp->dcpu_save == IDN_NIL_DCPU) 3235 new_cpuid = cpuid; 3236 else 3237 new_cpuid = dp->dcpu_save; 3238 } else { 3239 new_cpuid = cpuid; 3240 } 3241 3242 if (new_cpuid == IDN_NIL_DCPU) { 3243 PR_PROTO("%s:%d: WARNING: invalid cpuid (%d) specified\n", 3244 proc, domid, new_cpuid); 3245 return (-1); 3246 } 3247 3248 IDN_GLOCK_EXCL(); 3249 3250 idn_domain_resetentry(dp); 3251 3252 PR_STATE("%s:%d: requested cpuid %d, assigning cpuid %d\n", 3253 proc, domid, cpuid, new_cpuid); 3254 3255 idn_assign_cookie(domid); 3256 3257 dp->dcpu = dp->dcpu_save = new_cpuid; 3258 dp->dvote.ticket = ticket; 3259 CPUSET_ADD(dp->dcpuset, new_cpuid); 3260 dp->dncpus = 1; 3261 for (c = 0; c < NCPU; c++) 3262 dp->dcpumap[c] = (uchar_t)new_cpuid; 3263 dp->dhw.dh_nboards = 1; 3264 dp->dhw.dh_boardset = BOARDSET(CPUID_TO_BOARDID(new_cpuid)); 3265 3266 if (domid != idn.localid) 3267 IDN_DLOCK_EXCL(idn.localid); 3268 3269 if (idn.ndomains == 1) { 3270 struct hwconfig local_hw; 3271 3272 /* 3273 * We're attempting to connect to our first domain. 3274 * Recheck our local hardware configuration before 3275 * we go any further in case it changed due to a DR, 3276 * and update any structs dependent on this. 3277 * ASSUMPTION: 3278 * IDN is unlinked before performing any DRs. 3279 */ 3280 PR_PROTO("%s: RECHECKING local HW config.\n", proc); 3281 if (get_hw_config(&local_hw)) { 3282 dp->dcpu = IDN_NIL_DCPU; 3283 cmn_err(CE_WARN, 3284 "IDN: 118: hardware config not appropriate"); 3285 if (domid != idn.localid) 3286 IDN_DUNLOCK(idn.localid); 3287 IDN_GUNLOCK(); 3288 return (-1); 3289 } 3290 (void) update_local_hw_config(ldp, &local_hw); 3291 } 3292 3293 idn.ndomains++; 3294 3295 if (domid != idn.localid) 3296 IDN_DUNLOCK(idn.localid); 3297 IDN_GUNLOCK(); 3298 3299 IDN_MBOX_LOCK(domid); 3300 dp->dmbox.m_tbl = NULL; 3301 3302 if (domid != idn.localid) { 3303 dp->dmbox.m_send = idn_mainmbox_init(domid, 3304 IDNMMBOX_TYPE_SEND); 3305 dp->dmbox.m_recv = idn_mainmbox_init(domid, 3306 IDNMMBOX_TYPE_RECV); 3307 } else { 3308 /* 3309 * The local domain does not need send/recv 3310 * mailboxes in its idn_domain[] entry. 3311 */ 3312 dp->dmbox.m_send = NULL; 3313 dp->dmbox.m_recv = NULL; 3314 } 3315 IDN_MBOX_UNLOCK(domid); 3316 3317 PR_PROTO("%s:%d: new domain (cpu = %d, vote = 0x%x)\n", 3318 proc, domid, dp->dcpu, dp->dvote.ticket); 3319 3320 return (0); 3321 } 3322 3323 /* 3324 * The local domain never "closes" itself unless the driver 3325 * is doing a idndetach. It will be reopened during idnattach 3326 * when idn_domains_init is called. 3327 */ 3328 void 3329 idn_close_domain(int domid) 3330 { 3331 uint_t token; 3332 idn_domain_t *dp; 3333 procname_t proc = "idn_close_domain"; 3334 3335 ASSERT(IDN_SYNC_IS_LOCKED()); 3336 ASSERT(IDN_DLOCK_IS_EXCL(domid)); 3337 3338 dp = &idn_domain[domid]; 3339 3340 ASSERT(dp->dstate == IDNDS_CLOSED); 3341 3342 if (dp->dcpu == IDN_NIL_DCPU) { 3343 PR_PROTO("%s:%d: DOMAIN ALREADY CLOSED!\n", 3344 proc, domid); 3345 return; 3346 } 3347 3348 token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL); 3349 3350 (void) idn_retry_terminate(token); 3351 3352 DOMAINSET_DEL(idn.domset.ds_trans_on, domid); 3353 DOMAINSET_DEL(idn.domset.ds_ready_on, domid); 3354 DOMAINSET_DEL(idn.domset.ds_connected, domid); 3355 DOMAINSET_DEL(idn.domset.ds_trans_off, domid); 3356 DOMAINSET_DEL(idn.domset.ds_ready_off, domid); 3357 DOMAINSET_DEL(idn.domset.ds_hwlinked, domid); 3358 DOMAINSET_DEL(idn.domset.ds_flush, domid); 3359 3360 idn_sync_exit(domid, IDNSYNC_CONNECT); 3361 idn_sync_exit(domid, IDNSYNC_DISCONNECT); 3362 3363 IDN_GLOCK_EXCL(); 3364 3365 if (DOMAIN_IN_SET(idn.domset.ds_awol, domid)) 3366 idn_clear_awol(domid); 3367 3368 idn.ndomains--; 3369 3370 IDN_GUNLOCK(); 3371 3372 IDN_MBOX_LOCK(domid); 3373 dp->dmbox.m_tbl = NULL; 3374 3375 if (dp->dmbox.m_send) { 3376 idn_mainmbox_deinit(domid, dp->dmbox.m_send); 3377 dp->dmbox.m_send = NULL; 3378 } 3379 3380 if (dp->dmbox.m_recv) { 3381 idn_mainmbox_deinit(domid, dp->dmbox.m_recv); 3382 dp->dmbox.m_recv = NULL; 3383 } 3384 3385 IDN_MBOX_UNLOCK(domid); 3386 3387 cmn_err(CE_NOTE, 3388 "!IDN: 142: link (domain %d, CPU %d) disconnected", 3389 dp->domid, dp->dcpu); 3390 3391 dp->dcpu = IDN_NIL_DCPU; /* ultimate demise */ 3392 3393 IDN_RESET_COOKIES(domid); 3394 3395 ASSERT(dp->dio <= 0); 3396 ASSERT(dp->dioerr == 0); 3397 ASSERT(dp->dslab == NULL); 3398 ASSERT(dp->dnslabs == 0); 3399 3400 IDN_GKSTAT_GLOBAL_EVENT(gk_unlinks, gk_unlink_last); 3401 } 3402 3403 3404 /* 3405 * ----------------------------------------------------------------------- 3406 */ 3407 static void 3408 idn_domains_init(struct hwconfig *local_hw) 3409 { 3410 register int i, d; 3411 idn_domain_t *ldp; 3412 uchar_t *cpumap; 3413 3414 ASSERT(local_hw != NULL); 3415 3416 cpumap = GETSTRUCT(uchar_t, NCPU * MAX_DOMAINS); 3417 3418 for (d = 0; d < MAX_DOMAINS; d++) { 3419 register idn_domain_t *dp; 3420 3421 dp = &idn_domain[d]; 3422 3423 dp->domid = d; 3424 3425 rw_init(&dp->drwlock, NULL, RW_DEFAULT, NULL); 3426 3427 IDN_TIMERQ_INIT(&dp->dtimerq); 3428 3429 dp->dstate = IDNDS_CLOSED; 3430 3431 mutex_init(&dp->dmbox.m_mutex, NULL, MUTEX_DEFAULT, NULL); 3432 3433 dp->dcpumap = cpumap; 3434 3435 rw_init(&dp->dslab_rwlock, NULL, RW_DEFAULT, NULL); 3436 3437 IDN_DLOCK_EXCL(d); 3438 IDN_GLOCK_EXCL(); 3439 3440 idn_domain_resetentry(dp); 3441 3442 IDN_GUNLOCK(); 3443 3444 IDNSB_DOMAIN_UPDATE(dp); 3445 3446 IDN_DUNLOCK(d); 3447 3448 cpumap += NCPU; 3449 } 3450 3451 IDN_SYNC_LOCK(); 3452 3453 /* 3454 * Update local domain information. 3455 */ 3456 ASSERT(idn.smr.locpfn); 3457 ASSERT(local_hw->dh_nboards && local_hw->dh_boardset); 3458 3459 idn.ndomains = 0; /* note that open_domain will get us to 1 */ 3460 3461 IDN_DLOCK_EXCL(idn.localid); 3462 d = idn_open_domain(idn.localid, (int)CPU->cpu_id, 0); 3463 ASSERT(d == 0); 3464 IDN_GLOCK_EXCL(); 3465 IDN_SET_MASTERID(IDN_NIL_DOMID); 3466 IDN_SET_NEW_MASTERID(IDN_NIL_DOMID); 3467 3468 ldp = &idn_domain[idn.localid]; 3469 3470 strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1); 3471 ldp->dname[MAXDNAME-1] = '\0'; 3472 bcopy(local_hw, &ldp->dhw, sizeof (ldp->dhw)); 3473 ASSERT(idn.ndomains == 1); 3474 ASSERT((ldp->dhw.dh_nboards > 0) && 3475 (ldp->dhw.dh_nboards <= MAX_BOARDS)); 3476 ldp->dnetid = IDN_DOMID2NETID(ldp->domid); 3477 ldp->dmtu = IDN_MTU; 3478 ldp->dbufsize = IDN_SMR_BUFSIZE; 3479 ldp->dslabsize = (short)IDN_SLAB_BUFCOUNT; 3480 ldp->dnwrsize = (short)IDN_NWR_SIZE; 3481 ldp->dcpuset = cpu_ready_set; 3482 ldp->dncpus = (short)ncpus; 3483 ldp->dvote.ticket = IDNVOTE_INITIAL_TICKET; 3484 ldp->dvote.v.master = 0; 3485 ldp->dvote.v.nmembrds = ldp->dhw.dh_nmcadr - 1; 3486 ldp->dvote.v.ncpus = (int)ldp->dncpus - 1; 3487 ldp->dvote.v.board = CPUID_TO_BOARDID(ldp->dcpu); 3488 i = -1; 3489 for (d = 0; d < NCPU; d++) { 3490 BUMP_INDEX(ldp->dcpuset, i); 3491 ldp->dcpumap[d] = (uchar_t)i; 3492 } 3493 3494 CPUSET_ZERO(idn.dc_cpuset); 3495 CPUSET_OR(idn.dc_cpuset, ldp->dcpuset); 3496 idn.dc_boardset = ldp->dhw.dh_boardset; 3497 3498 /* 3499 * Setting the state for ourselves is only relevant 3500 * for loopback performance testing. Anyway, it 3501 * makes sense that we always have an established 3502 * connection with ourself regardless of IDN :-o 3503 */ 3504 IDN_DSTATE_TRANSITION(ldp, IDNDS_CONNECTED); 3505 3506 IDN_GUNLOCK(); 3507 IDN_DUNLOCK(idn.localid); 3508 IDN_SYNC_UNLOCK(); 3509 } 3510 3511 static void 3512 idn_domains_deinit() 3513 { 3514 register int d; 3515 3516 IDN_SYNC_LOCK(); 3517 IDN_DLOCK_EXCL(idn.localid); 3518 IDN_DSTATE_TRANSITION(&idn_domain[idn.localid], IDNDS_CLOSED); 3519 idn_close_domain(idn.localid); 3520 IDN_DUNLOCK(idn.localid); 3521 IDN_SYNC_UNLOCK(); 3522 idn.localid = IDN_NIL_DOMID; 3523 3524 FREESTRUCT(idn_domain[0].dcpumap, uchar_t, NCPU * MAX_DOMAINS); 3525 3526 for (d = 0; d < MAX_DOMAINS; d++) { 3527 idn_domain_t *dp; 3528 3529 dp = &idn_domain[d]; 3530 3531 rw_destroy(&dp->dslab_rwlock); 3532 mutex_destroy(&dp->dmbox.m_mutex); 3533 rw_destroy(&dp->drwlock); 3534 IDN_TIMERQ_DEINIT(&dp->dtimerq); 3535 dp->dcpumap = NULL; 3536 } 3537 } 3538 3539 /* 3540 * ----------------------------------------------------------------------- 3541 */ 3542 static void 3543 idn_retrytask_init() 3544 { 3545 ASSERT(idn.retryqueue.rq_cache == NULL); 3546 3547 mutex_init(&idn.retryqueue.rq_mutex, NULL, MUTEX_DEFAULT, NULL); 3548 idn.retryqueue.rq_cache = kmem_cache_create("idn_retryjob_cache", 3549 sizeof (idn_retry_job_t), 3550 0, NULL, NULL, NULL, 3551 NULL, NULL, 0); 3552 } 3553 3554 static void 3555 idn_retrytask_deinit() 3556 { 3557 if (idn.retryqueue.rq_cache == NULL) 3558 return; 3559 3560 kmem_cache_destroy(idn.retryqueue.rq_cache); 3561 mutex_destroy(&idn.retryqueue.rq_mutex); 3562 3563 bzero(&idn.retryqueue, sizeof (idn.retryqueue)); 3564 } 3565 3566 /* 3567 * ----------------------------------------------------------------------- 3568 */ 3569 static void 3570 idn_timercache_init() 3571 { 3572 ASSERT(idn.timer_cache == NULL); 3573 3574 idn.timer_cache = kmem_cache_create("idn_timer_cache", 3575 sizeof (idn_timer_t), 3576 0, NULL, NULL, NULL, 3577 NULL, NULL, 0); 3578 } 3579 3580 static void 3581 idn_timercache_deinit() 3582 { 3583 if (idn.timer_cache == NULL) 3584 return; 3585 3586 kmem_cache_destroy(idn.timer_cache); 3587 idn.timer_cache = NULL; 3588 } 3589 3590 idn_timer_t * 3591 idn_timer_alloc() 3592 { 3593 idn_timer_t *tp; 3594 3595 tp = kmem_cache_alloc(idn.timer_cache, KM_SLEEP); 3596 bzero(tp, sizeof (*tp)); 3597 tp->t_forw = tp->t_back = tp; 3598 3599 return (tp); 3600 } 3601 3602 void 3603 idn_timer_free(idn_timer_t *tp) 3604 { 3605 if (tp == NULL) 3606 return; 3607 kmem_cache_free(idn.timer_cache, tp); 3608 } 3609 3610 void 3611 idn_timerq_init(idn_timerq_t *tq) 3612 { 3613 mutex_init(&tq->tq_mutex, NULL, MUTEX_DEFAULT, NULL); 3614 tq->tq_count = 0; 3615 tq->tq_queue = NULL; 3616 } 3617 3618 void 3619 idn_timerq_deinit(idn_timerq_t *tq) 3620 { 3621 ASSERT(tq->tq_queue == NULL); 3622 mutex_destroy(&tq->tq_mutex); 3623 } 3624 3625 /* 3626 * Dequeue all the timers of the given subtype from the 3627 * given timerQ. If subtype is 0, then dequeue all the 3628 * timers. 3629 */ 3630 idn_timer_t * 3631 idn_timer_get(idn_timerq_t *tq, int type, ushort_t tcookie) 3632 { 3633 register idn_timer_t *tp, *tphead; 3634 3635 ASSERT(IDN_TIMERQ_IS_LOCKED(tq)); 3636 3637 if ((tp = tq->tq_queue) == NULL) 3638 return (NULL); 3639 3640 if (!type) { 3641 tq->tq_queue = NULL; 3642 tq->tq_count = 0; 3643 tphead = tp; 3644 } else { 3645 int count; 3646 idn_timer_t *tpnext; 3647 3648 tphead = NULL; 3649 count = tq->tq_count; 3650 do { 3651 tpnext = tp->t_forw; 3652 if ((tp->t_type == type) && 3653 (!tcookie || (tp->t_cookie == tcookie))) { 3654 tp->t_forw->t_back = tp->t_back; 3655 tp->t_back->t_forw = tp->t_forw; 3656 if (tphead == NULL) { 3657 tp->t_forw = tp->t_back = tp; 3658 } else { 3659 tp->t_forw = tphead; 3660 tp->t_back = tphead->t_back; 3661 tp->t_back->t_forw = tp; 3662 tphead->t_back = tp; 3663 } 3664 tphead = tp; 3665 if (--(tq->tq_count) == 0) 3666 tq->tq_queue = NULL; 3667 else if (tq->tq_queue == tp) 3668 tq->tq_queue = tpnext; 3669 } 3670 tp = tpnext; 3671 } while (--count > 0); 3672 } 3673 3674 if (tphead) { 3675 tphead->t_back->t_forw = NULL; 3676 3677 for (tp = tphead; tp; tp = tp->t_forw) 3678 tp->t_onq = 0; 3679 } 3680 3681 return (tphead); 3682 } 3683 3684 ushort_t 3685 idn_timer_start(idn_timerq_t *tq, idn_timer_t *tp, clock_t tval) 3686 { 3687 idn_timer_t *otp; 3688 ushort_t tcookie; 3689 procname_t proc = "idn_timer_start"; 3690 STRING(str); 3691 3692 ASSERT(tq && tp && (tval > 0)); 3693 ASSERT((tp->t_forw == tp) && (tp->t_back == tp)); 3694 ASSERT(tp->t_type != 0); 3695 3696 IDN_TIMERQ_LOCK(tq); 3697 /* 3698 * Assign a unique non-zero 8-bit cookie to this timer 3699 * if the caller hasn't already preassigned one. 3700 */ 3701 while ((tcookie = tp->t_cookie) == 0) { 3702 tp->t_cookie = (tp->t_type & 0xf) | 3703 ((++tq->tq_cookie & 0xf) << 4); 3704 /* 3705 * Calculated cookie must never conflict 3706 * with the public timer cookie. 3707 */ 3708 ASSERT(tp->t_cookie != IDN_TIMER_PUBLIC_COOKIE); 3709 } 3710 3711 /* 3712 * First have to remove old timers of the 3713 * same type and cookie, and get rid of them. 3714 */ 3715 otp = idn_timer_get(tq, tp->t_type, tcookie); 3716 3717 tq->tq_count++; 3718 3719 if (tq->tq_queue == NULL) { 3720 tq->tq_queue = tp; 3721 ASSERT((tp->t_forw == tp) && (tp->t_back == tp)); 3722 } else { 3723 /* 3724 * Put me at the end of the list. 3725 */ 3726 tp->t_forw = tq->tq_queue; 3727 tp->t_back = tq->tq_queue->t_back; 3728 tp->t_back->t_forw = tp; 3729 tp->t_forw->t_back = tp; 3730 } 3731 3732 tp->t_onq = 1; 3733 tp->t_q = tq; 3734 tp->t_id = timeout(idn_timer_expired, (caddr_t)tp, tval); 3735 3736 3737 INUM2STR(tp->t_type, str); 3738 PR_TIMER("%s: started %s timer (domain = %d, cookie = 0x%x)\n", 3739 proc, str, tp->t_domid, tcookie); 3740 3741 IDN_TIMERQ_UNLOCK(tq); 3742 3743 if (otp) 3744 (void) idn_timer_stopall(otp); 3745 3746 return (tcookie); 3747 } 3748 3749 /* 3750 * Stop all timers of the given subtype. 3751 * If subtype is 0, then stop all timers 3752 * in this timerQ. 3753 */ 3754 void 3755 idn_timer_stop(idn_timerq_t *tq, int type, ushort_t tcookie) 3756 { 3757 idn_timer_t *tphead; 3758 procname_t proc = "idn_timer_stop"; 3759 STRING(str); 3760 3761 ASSERT(tq); 3762 3763 INUM2STR(type, str); 3764 3765 IDN_TIMERQ_LOCK(tq); 3766 3767 if (tq->tq_count == 0) { 3768 PR_TIMER("%s: found no %s timers (count=0)\n", proc, str); 3769 IDN_TIMERQ_UNLOCK(tq); 3770 return; 3771 } 3772 tphead = idn_timer_get(tq, type, tcookie); 3773 #ifdef DEBUG 3774 if (tphead == NULL) 3775 PR_TIMER("%s: found no %s (cookie = 0x%x) " 3776 "timers (count=%d)!!\n", 3777 proc, str, tcookie, tq->tq_count); 3778 #endif /* DEBUG */ 3779 IDN_TIMERQ_UNLOCK(tq); 3780 3781 if (tphead) 3782 (void) idn_timer_stopall(tphead); 3783 } 3784 3785 int 3786 idn_timer_stopall(idn_timer_t *tp) 3787 { 3788 int count = 0; 3789 int nonactive; 3790 uint_t type; 3791 idn_timer_t *ntp; 3792 procname_t proc = "idn_timer_stopall"; 3793 STRING(str); 3794 3795 nonactive = 0; 3796 3797 if (tp) { 3798 /* 3799 * Circle should have been broken. 3800 */ 3801 ASSERT(tp->t_back->t_forw == NULL); 3802 type = tp->t_type; 3803 INUM2STR(type, str); 3804 } 3805 3806 for (; tp; tp = ntp) { 3807 ntp = tp->t_forw; 3808 count++; 3809 ASSERT(tp->t_id != (timeout_id_t)0); 3810 if (untimeout(tp->t_id) < 0) { 3811 nonactive++; 3812 PR_TIMER("%s: bad %s untimeout (domain=%d)\n", 3813 proc, str, tp->t_domid); 3814 } else { 3815 PR_TIMER("%s: good %s untimeout (domain=%d)\n", 3816 proc, str, tp->t_domid); 3817 } 3818 /* 3819 * There are two possible outcomes from 3820 * the untimeout(). Each ultimately result 3821 * in us having to free the timeout structure. 3822 * 3823 * 1. We successfully aborted a timeout call. 3824 * 3825 * 2. We failed to find the given timer. He 3826 * probably just fired off. 3827 */ 3828 idn_timer_free(tp); 3829 } 3830 PR_TIMER("%s: stopped %d of %d %s timers\n", 3831 proc, count - nonactive, count, str); 3832 3833 return (count); 3834 } 3835 3836 void 3837 idn_timer_dequeue(idn_timerq_t *tq, idn_timer_t *tp) 3838 { 3839 ASSERT(tq && tp); 3840 ASSERT(IDN_TIMERQ_IS_LOCKED(tq)); 3841 3842 ASSERT(tp->t_q == tq); 3843 3844 if (tp->t_onq == 0) { 3845 /* 3846 * We've already been dequeued. 3847 */ 3848 ASSERT(tp == tp->t_forw); 3849 ASSERT(tp == tp->t_back); 3850 } else { 3851 /* 3852 * We're still in the queue, get out. 3853 */ 3854 if (tq->tq_queue == tp) 3855 tq->tq_queue = tp->t_forw; 3856 tp->t_forw->t_back = tp->t_back; 3857 tp->t_back->t_forw = tp->t_forw; 3858 tp->t_onq = 0; 3859 if (--(tq->tq_count) == 0) { 3860 ASSERT(tq->tq_queue == tp); 3861 tq->tq_queue = NULL; 3862 } 3863 tp->t_forw = tp->t_back = tp; 3864 } 3865 } 3866 3867 /* 3868 * ----------------------------------------------------------------------- 3869 */ 3870 /*ARGSUSED*/ 3871 static int 3872 idn_slabpool_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 3873 { 3874 register int p, nfree; 3875 char dsetstr[128]; 3876 3877 ASSERT(IDN_GLOCK_IS_HELD()); 3878 3879 if (idn.slabpool == NULL) { 3880 (void) mi_mpprintf(mp, 3881 "IDN slabpool not initialized (masterid = %d)", 3882 IDN_GET_MASTERID()); 3883 return (0); 3884 } 3885 3886 for (p = nfree = 0; p < idn.slabpool->npools; p++) 3887 nfree += idn.slabpool->pool[p].nfree; 3888 3889 (void) mi_mpprintf(mp, 3890 "IDN slabpool (ntotal_slabs = %d, nalloc = %d, " 3891 "npools = %d)", 3892 idn.slabpool->ntotslabs, 3893 idn.slabpool->ntotslabs - nfree, 3894 idn.slabpool->npools); 3895 3896 (void) mi_mpprintf(mp, "pool nslabs nfree domains"); 3897 3898 for (p = 0; p < idn.slabpool->npools; p++) { 3899 register int d, s; 3900 uint_t domset; 3901 3902 domset = 0; 3903 for (s = 0; s < idn.slabpool->pool[p].nslabs; s++) { 3904 short dd; 3905 3906 dd = idn.slabpool->pool[p].sarray[s].sl_domid; 3907 if (dd != (short)IDN_NIL_DOMID) 3908 DOMAINSET_ADD(domset, dd); 3909 } 3910 dsetstr[0] = '\0'; 3911 if (domset) { 3912 for (d = 0; d < MAX_DOMAINS; d++) { 3913 if (!DOMAIN_IN_SET(domset, d)) 3914 continue; 3915 3916 if (dsetstr[0] == '\0') 3917 (void) sprintf(dsetstr, "%d", d); 3918 else 3919 (void) sprintf(dsetstr, "%s %d", 3920 dsetstr, d); 3921 } 3922 } 3923 3924 if (p < 10) 3925 (void) mi_mpprintf(mp, " %d %d %d %s", 3926 p, idn.slabpool->pool[p].nslabs, 3927 idn.slabpool->pool[p].nfree, 3928 dsetstr); 3929 else 3930 (void) mi_mpprintf(mp, " %d %d %d %s", 3931 p, idn.slabpool->pool[p].nslabs, 3932 idn.slabpool->pool[p].nfree, 3933 dsetstr); 3934 } 3935 return (0); 3936 } 3937 3938 /*ARGSUSED*/ 3939 static int 3940 idn_buffer_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 3941 { 3942 smr_slab_t *sp; 3943 register int d, cnt; 3944 int bufcount[MAX_DOMAINS]; 3945 int spl; 3946 3947 ASSERT(IDN_GLOCK_IS_HELD()); 3948 3949 if (idn.localid == IDN_NIL_DOMID) { 3950 (void) mi_mpprintf(mp, "IDN not initialized (localid = %d)", 3951 idn.localid); 3952 return (0); 3953 } 3954 3955 (void) mi_mpprintf(mp, "Local domain has %d slabs allocated.", 3956 idn_domain[idn.localid].dnslabs); 3957 3958 DSLAB_LOCK_SHARED(idn.localid); 3959 if ((sp = idn_domain[idn.localid].dslab) == NULL) { 3960 DSLAB_UNLOCK(idn.localid); 3961 return (0); 3962 } 3963 3964 bzero(bufcount, sizeof (bufcount)); 3965 cnt = 0; 3966 3967 spl = splhi(); 3968 for (; sp; sp = sp->sl_next) { 3969 smr_slabbuf_t *bp; 3970 3971 while (!lock_try(&sp->sl_lock)) 3972 ; 3973 for (bp = sp->sl_inuse; bp; bp = bp->sb_next) { 3974 bufcount[bp->sb_domid]++; 3975 cnt++; 3976 } 3977 lock_clear(&sp->sl_lock); 3978 } 3979 splx(spl); 3980 3981 DSLAB_UNLOCK(idn.localid); 3982 3983 (void) mi_mpprintf(mp, "Local domain has %d buffers outstanding.", cnt); 3984 if (cnt == 0) 3985 return (0); 3986 3987 (void) mi_mpprintf(mp, "Domain nbufs"); 3988 for (d = 0; d < MAX_DOMAINS; d++) 3989 if (bufcount[d]) { 3990 if (d < 10) 3991 (void) mi_mpprintf(mp, " %d %d", 3992 d, bufcount[d]); 3993 else 3994 (void) mi_mpprintf(mp, " %d %d", 3995 d, bufcount[d]); 3996 } 3997 3998 return (0); 3999 } 4000 4001 static const char * 4002 _get_spaces(int w, int s, int W) 4003 { 4004 static const char *const _spaces[] = { 4005 "", /* 0 */ 4006 " ", /* 1 */ 4007 " ", /* 2 */ 4008 " ", /* 3 */ 4009 " ", /* 4 */ 4010 " ", /* 5 */ 4011 " ", /* 6 */ 4012 " ", /* 7 */ 4013 " ", /* 8 */ 4014 " ", /* 9 */ 4015 " ", /* 10 */ 4016 " ", /* 11 */ 4017 " ", /* 12 */ 4018 " ", /* 13 */ 4019 " ", /* 14 */ 4020 " ", /* 15 */ 4021 " ", /* 16 */ 4022 " ", /* 17 */ 4023 " ", /* 18 */ 4024 " ", /* 19 */ 4025 }; 4026 return (_spaces[w+s-W]); 4027 } 4028 4029 #define _SSS(X, W, w, s) \ 4030 (((w) >= (W)) && (X)) ? _get_spaces((w), (s), (W)) 4031 4032 static const char * 4033 _hexspace(uint64_t v, int sz, int width, int padding) 4034 { 4035 int maxnbl = 16; 4036 int diff; 4037 uchar_t *np; 4038 4039 diff = sizeof (uint64_t) - sz; 4040 np = (uchar_t *)&v + diff; 4041 maxnbl -= diff << 1; 4042 while (sz-- > 0) { 4043 if ((*np & 0xf0) && (width >= maxnbl)) 4044 return (_get_spaces(width, padding, maxnbl)); 4045 maxnbl--; 4046 if ((*np & 0x0f) && (width >= maxnbl)) 4047 return (_get_spaces(width, padding, maxnbl)); 4048 maxnbl--; 4049 np++; 4050 } 4051 return (_get_spaces(width, padding, 1)); 4052 } 4053 4054 #define HEXSPACE(v, t, w, s) _hexspace((uint64_t)(v), sizeof (t), (w), (s)) 4055 4056 #define DECSPACE(n, w, s) \ 4057 (_SSS((n) >= 10000000, 8, (w), (s)) : \ 4058 _SSS((n) >= 1000000, 7, (w), (s)) : \ 4059 _SSS((n) >= 100000, 6, (w), (s)) : \ 4060 _SSS((n) >= 10000, 5, (w), (s)) : \ 4061 _SSS((n) >= 1000, 4, (w), (s)) : \ 4062 _SSS((n) >= 100, 3, (w), (s)) : \ 4063 _SSS((n) >= 10, 2, (w), (s)) : \ 4064 _get_spaces((w), (s), 1)) 4065 4066 #define MBXINFO(mtp) \ 4067 &mtp->mt_header, \ 4068 HEXSPACE(&mtp->mt_header, &mtp->mt_header, 16, 2), \ 4069 mtp->mt_header.mh_svr_ready_ptr, \ 4070 HEXSPACE(mtp->mt_header.mh_svr_ready_ptr, \ 4071 mtp->mt_header.mh_svr_ready_ptr, 8, 1), \ 4072 mtp->mt_header.mh_svr_active_ptr, \ 4073 HEXSPACE(mtp->mt_header.mh_svr_active_ptr, \ 4074 mtp->mt_header.mh_svr_active_ptr, 8, 2), \ 4075 *(ushort_t *)(IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_ready_ptr)), \ 4076 DECSPACE(*(ushort_t *) \ 4077 (IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_ready_ptr)), \ 4078 1, 1), \ 4079 *(ushort_t *)(IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_active_ptr)), \ 4080 DECSPACE(*(ushort_t *) \ 4081 (IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_active_ptr)), \ 4082 1, 5), \ 4083 mtp->mt_header.mh_cookie, \ 4084 HEXSPACE(mtp->mt_header.mh_cookie, \ 4085 mtp->mt_header.mh_cookie, 8, 2), \ 4086 &mtp->mt_queue[0], \ 4087 HEXSPACE(&mtp->mt_queue[0], &mtp->mt_queue[0], 16, 2) 4088 4089 /*ARGSUSED*/ 4090 static int 4091 idn_mboxtbl_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 4092 { 4093 register int c, n, domid, subdomid; 4094 register idn_mboxtbl_t *mtp; 4095 register idn_mboxmsg_t *msp; 4096 idn_mboxtbl_t *map, *mtbasep; 4097 4098 4099 ASSERT((cp == MBXTBL_PART_REPORT) || (cp == MBXTBL_FULL_REPORT)); 4100 4101 if (IDN_GLOCK_TRY_SHARED() == 0) { 4102 (void) mi_mpprintf(mp, "Local domain busy, try again."); 4103 return (0); 4104 } 4105 4106 if ((map = idn.mboxarea) == NULL) { 4107 (void) mi_mpprintf(mp, 4108 "WARNING: Local domain is not master, " 4109 "ASSUMING idn.smr.vaddr."); 4110 map = (idn_mboxtbl_t *)idn.smr.vaddr; 4111 } 4112 4113 if (map) { 4114 (void) mi_mpprintf(mp, "Mailbox Area starts @ 0x%p", 4115 map); 4116 } else { 4117 (void) mi_mpprintf(mp, "Mailbox Area not found."); 4118 goto repdone; 4119 } 4120 4121 if (!idn.nchannels) { 4122 (void) mi_mpprintf(mp, "No OPEN channels found"); 4123 goto repdone; 4124 } 4125 4126 for (c = 0; c < IDN_MAX_NETS; c++) { 4127 4128 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]); 4129 if (!IDN_CHANNEL_IS_ATTACHED(&idn.chan_servers[c])) { 4130 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]); 4131 continue; 4132 } 4133 4134 (void) mi_mpprintf(mp, 4135 "Channel %d ---------------------------" 4136 "--------------------------" 4137 "-----------------------------", c); 4138 (void) mi_mpprintf(mp, 4139 " Domain Header " 4140 "Ready/Active Ptrs " 4141 "rdy/actv cookie Queue " 4142 "busy"); 4143 4144 for (domid = 0; domid < MAX_DOMAINS; domid++) { 4145 register int busy_count; 4146 4147 if ((cp == MBXTBL_PART_REPORT) && 4148 (idn_domain[domid].dcpu == IDN_NIL_DCPU)) 4149 continue; 4150 4151 mtbasep = IDN_MBOXAREA_BASE(map, domid); 4152 4153 for (subdomid = 0; subdomid < MAX_DOMAINS; 4154 subdomid++) { 4155 mtp = IDN_MBOXTBL_PTR(mtbasep, subdomid); 4156 mtp = IDN_MBOXTBL_PTR_CHAN(mtp, c); 4157 4158 if (subdomid == domid) { 4159 if (subdomid == 0) 4160 (void) mi_mpprintf(mp, 4161 " %x.%x-%d%s%s", 4162 domid, subdomid, c, 4163 /*CONSTCOND*/ 4164 DECSPACE(c, 2, 2), 4165 "-- unused --"); 4166 else 4167 (void) mi_mpprintf(mp, 4168 " .%x-%d%s%s", 4169 subdomid, c, 4170 /*CONSTCOND*/ 4171 DECSPACE(c, 2, 2), 4172 "-- unused --"); 4173 continue; 4174 } 4175 busy_count = 0; 4176 msp = &mtp->mt_queue[0]; 4177 for (n = 0; n < IDN_MMBOX_NUMENTRIES; n++) { 4178 if (msp[n].ms_owner) 4179 busy_count++; 4180 } 4181 if (subdomid == 0) { 4182 (void) mi_mpprintf(mp, 4183 " %x.%x-%d%s%p%s%x%s/ %x%s" 4184 "%d%s/ %d%s%x%s%p%s%d%s", 4185 domid, subdomid, c, 4186 /*CONSTCOND*/ 4187 DECSPACE(c, 2, 2), 4188 MBXINFO(mtp), busy_count, 4189 busy_count ? " <<<<<":""); 4190 } else { 4191 (void) mi_mpprintf(mp, 4192 " .%x-%d%s%p%s%x%s/ %x%s" 4193 "%d%s/ %d%s%x%s%p%s%d%s", 4194 subdomid, c, 4195 /*CONSTCOND*/ 4196 DECSPACE(c, 2, 2), 4197 MBXINFO(mtp), busy_count, 4198 busy_count ? " <<<<<":""); 4199 } 4200 } 4201 } 4202 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]); 4203 } 4204 4205 repdone: 4206 IDN_GUNLOCK(); 4207 4208 return (0); 4209 } 4210 4211 /*ARGSUSED*/ 4212 static void 4213 idn_mainmbox_domain_report(queue_t *wq, mblk_t *mp, int domid, 4214 idn_mainmbox_t *mmp, char *mbxtype) 4215 { 4216 register int c; 4217 4218 if (mmp == NULL) { 4219 (void) mi_mpprintf(mp, " %x.%s -- none --", domid, mbxtype); 4220 return; 4221 } 4222 4223 for (c = 0; c < IDN_MAX_NETS; mmp++, c++) { 4224 int mm_count; 4225 4226 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]); 4227 if (IDN_CHANNEL_IS_DETACHED(&idn.chan_servers[c])) { 4228 (void) mi_mpprintf(mp, " %x.%s %u -- not open --", 4229 domid, mbxtype, (int)mmp->mm_channel); 4230 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]); 4231 continue; 4232 } 4233 4234 mm_count = ((mmp->mm_count < 0) ? 0 : mmp->mm_count) / 1000; 4235 4236 (void) mi_mpprintf(mp, " %x.%s %d%s%d%s%d%s%p%s%p%s%p%s%d/%d", 4237 domid, mbxtype, 4238 (int)mmp->mm_channel, 4239 /*CONSTCOND*/ 4240 DECSPACE((int)mmp->mm_channel, 5, 2), 4241 mm_count, DECSPACE(mm_count, 8, 2), 4242 mmp->mm_dropped, 4243 DECSPACE(mmp->mm_dropped, 8, 2), 4244 mmp->mm_smr_mboxp, 4245 HEXSPACE(mmp->mm_smr_mboxp, 4246 mmp->mm_smr_mboxp, 16, 2), 4247 mmp->mm_smr_readyp, 4248 HEXSPACE(mmp->mm_smr_readyp, 4249 mmp->mm_smr_readyp, 16, 2), 4250 mmp->mm_smr_activep, 4251 HEXSPACE(mmp->mm_smr_activep, 4252 mmp->mm_smr_activep, 16, 2), 4253 mmp->mm_qiget, mmp->mm_qiput); 4254 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]); 4255 } 4256 } 4257 4258 /*ARGSUSED2*/ 4259 static int 4260 idn_mainmbox_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 4261 { 4262 int domid; 4263 int header = 0; 4264 4265 /* 4266 * Domain 0 never has a send/recv mainmbox so 4267 * don't bother printing him. 4268 */ 4269 for (domid = 1; domid < MAX_DOMAINS; domid++) { 4270 idn_domain_t *dp; 4271 4272 dp = &idn_domain[domid]; 4273 4274 if (dp->dcpu == IDN_NIL_DCPU) 4275 continue; 4276 IDN_DLOCK_SHARED(domid); 4277 if (dp->dcpu == IDN_NIL_DCPU) { 4278 IDN_DUNLOCK(domid); 4279 continue; 4280 } 4281 if (!header) { 4282 (void) mi_mpprintf(mp, 4283 "Domain Chan PktCntK " 4284 "PktDrop SMRMbox " 4285 "ReadyPtr " 4286 "ActvPtr Miget/Miput"); 4287 header = 1; 4288 } 4289 4290 mutex_enter(&dp->dmbox.m_mutex); 4291 idn_mainmbox_domain_report(wq, mp, domid, 4292 idn_domain[domid].dmbox.m_send, 4293 "snd"); 4294 idn_mainmbox_domain_report(wq, mp, domid, 4295 idn_domain[domid].dmbox.m_recv, 4296 "rcv"); 4297 mutex_exit(&dp->dmbox.m_mutex); 4298 4299 IDN_DUNLOCK(domid); 4300 4301 (void) mi_mpprintf(mp, 4302 " ---------------------------------------" 4303 "------------------------" 4304 "----------------------------"); 4305 } 4306 4307 if (!header) 4308 (void) mi_mpprintf(mp, "No ACTIVE domain connections exist"); 4309 4310 return (0); 4311 } 4312 4313 /*ARGSUSED*/ 4314 static int 4315 idn_global_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 4316 { 4317 int i, nactive, masterid, nretry; 4318 uint_t locpfn_upper, locpfn_lower, 4319 rempfn_upper, rempfn_lower; 4320 uint_t marea_upper, marea_lower, 4321 iarea_upper, iarea_lower; 4322 char alt_dbuffer[64]; 4323 idn_retry_job_t *rp; 4324 domainset_t retryset; 4325 domainset_t connected; 4326 idn_synczone_t *zp; 4327 idn_syncop_t *sp; 4328 idn_domain_t *dp; 4329 char *dbp, *dbuffer; 4330 4331 if (IDN_SYNC_TRYLOCK() == 0) { 4332 (void) mi_mpprintf(mp, "Sync lock busy, try again."); 4333 return (0); 4334 } 4335 4336 if (IDN_GLOCK_TRY_SHARED() == 0) { 4337 (void) mi_mpprintf(mp, "Local domain busy, try again."); 4338 IDN_SYNC_UNLOCK(); 4339 return (0); 4340 } 4341 if ((dbp = dbuffer = ALLOC_DISPSTRING()) == NULL) 4342 dbp = alt_dbuffer; 4343 4344 (void) mi_mpprintf(mp, "IDN\n Global State = %s (%d)", 4345 idngs_str[idn.state], idn.state); 4346 4347 (void) mi_mpprintf(mp, "SMR"); 4348 (void) mi_mpprintf(mp, " vaddr "); 4349 (void) mi_mpprintf(mp, " 0x%p", idn.smr.vaddr); 4350 4351 (void) mi_mpprintf(mp, " paddr-local paddr-remote"); 4352 masterid = IDN_GET_MASTERID(); 4353 locpfn_upper = (uint_t)(idn.smr.locpfn >> (32 - PAGESHIFT)); 4354 locpfn_lower = (uint_t)(idn.smr.locpfn << PAGESHIFT); 4355 if (idn.smr.rempfn == PFN_INVALID) { 4356 rempfn_upper = rempfn_lower = 0; 4357 } else { 4358 rempfn_upper = (uint_t)(idn.smr.rempfn >> (32 - PAGESHIFT)); 4359 rempfn_lower = (uint_t)(idn.smr.rempfn << PAGESHIFT); 4360 } 4361 (void) mi_mpprintf(mp, " 0x%x.%x%s0x%x.%x", 4362 locpfn_upper, locpfn_lower, 4363 HEXSPACE(locpfn_lower, locpfn_lower, 8, 4364 (locpfn_upper < 0x10) ? 4 : 3), 4365 rempfn_upper, rempfn_lower); 4366 4367 (void) mi_mpprintf(mp, " SMR length = %d MBytes", IDN_SMR_SIZE); 4368 (void) mi_mpprintf(mp, " SMR bufsize = %d Bytes", IDN_SMR_BUFSIZE); 4369 (void) mi_mpprintf(mp, " NWR length = %d MBytes", IDN_NWR_SIZE); 4370 marea_upper = (uint_t)((uint64_t)IDN_MBOXAREA_SIZE >> 32); 4371 marea_lower = (uint_t)((uint64_t)IDN_MBOXAREA_SIZE & 0xffffffff); 4372 iarea_upper = (uint_t)((uint64_t)(MB2B(IDN_NWR_SIZE) - 4373 (size_t)IDN_MBOXAREA_SIZE) >> 32); 4374 iarea_lower = (uint_t)((MB2B(IDN_NWR_SIZE) - 4375 (size_t)IDN_MBOXAREA_SIZE) & 0xffffffff); 4376 (void) mi_mpprintf(mp, 4377 " [ mbox area = 0x%x.%x Bytes, " 4378 "iobuf area = 0x%x.%x Bytes ]", 4379 marea_upper, marea_lower, iarea_upper, iarea_lower); 4380 4381 (void) mi_mpprintf(mp, 4382 "\nIDNnet (local domain [id:%d] [name:%s] is %s)", 4383 idn.localid, 4384 idn_domain[idn.localid].dname, 4385 (masterid == IDN_NIL_DOMID) ? "IDLE" : 4386 (idn.localid == masterid) ? "MASTER" : 4387 "SLAVE"); 4388 nactive = 0; 4389 for (i = 0; i < IDN_MAX_NETS; i++) { 4390 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[i]); 4391 if (IDN_CHANNEL_IS_ACTIVE(&idn.chan_servers[i])) 4392 nactive++; 4393 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[i]); 4394 } 4395 (void) mi_mpprintf(mp, " I/O Networks: (Open = %d, " 4396 "Active = %d, Max = %d)", 4397 idn.nchannels, nactive, IDN_MAX_NETS); 4398 (void) mi_mpprintf(mp, " Number of Domains = %d", idn.ndomains); 4399 (void) mi_mpprintf(mp, " Number of AWOLs = %d", idn.nawols); 4400 /* 4401 * During connect domains can possibly be in ds_connected 4402 * while still in ds_trans_on. Only once they leave ds_trans_on 4403 * are they really connected. 4404 */ 4405 connected = idn.domset.ds_connected & ~idn.domset.ds_trans_on; 4406 DOMAINSET_ADD(connected, idn.localid); 4407 boardset2str(connected, dbp); 4408 (void) mi_mpprintf(mp, " Connected Domains = %s", dbp); 4409 domainset2str(idn.domset.ds_trans_on, dbp); 4410 (void) mi_mpprintf(mp, " Pending Domain Links = %s", 4411 idn.domset.ds_trans_on ? dbp : "<>"); 4412 domainset2str(idn.domset.ds_trans_off, dbp); 4413 (void) mi_mpprintf(mp, " Pending Domain Unlinks = %s", 4414 idn.domset.ds_trans_off ? dbp : "<>"); 4415 mutex_enter(&idn.retryqueue.rq_mutex); 4416 nretry = idn.retryqueue.rq_count; 4417 retryset = 0; 4418 for (i = 0, rp = idn.retryqueue.rq_jobs; i < nretry; i++, 4419 rp = rp->rj_next) { 4420 int domid; 4421 4422 domid = IDN_RETRY_TOKEN2DOMID(rp->rj_token); 4423 if (VALID_DOMAINID(domid)) { 4424 DOMAINSET_ADD(retryset, domid); 4425 } 4426 } 4427 mutex_exit(&idn.retryqueue.rq_mutex); 4428 domainset2str(retryset, dbp); 4429 (void) mi_mpprintf(mp, " Retry Jobs:Domains = %d:%s", 4430 nretry, retryset ? dbp : "<>"); 4431 domainset2str(idn.domset.ds_hitlist, dbp); 4432 (void) mi_mpprintf(mp, " Hitlist Domains = %s", 4433 idn.domset.ds_hitlist ? dbp : "<>"); 4434 domainset2str(idn.domset.ds_relink, dbp); 4435 (void) mi_mpprintf(mp, " Reconfig Domains = %s", 4436 idn.domset.ds_relink ? dbp : "<>"); 4437 if (idn.domset.ds_relink) 4438 (void) mi_mpprintf(mp, " new master id = %d", 4439 IDN_GET_NEW_MASTERID()); 4440 if (masterid == IDN_NIL_DOMID) { 4441 (void) mi_mpprintf(mp, " Master Domain: no master"); 4442 } else { 4443 idn_domain_t *mdp; 4444 4445 mdp = &idn_domain[masterid]; 4446 4447 (void) mi_mpprintf(mp, 4448 " Master Domain (id:name/brds - state):"); 4449 4450 if (strlen(mdp->dname) > 0) 4451 strcpy(dbp, mdp->dname); 4452 else 4453 boardset2str(mdp->dhw.dh_boardset, dbp); 4454 if (masterid < 10) 4455 (void) mi_mpprintf(mp, " %d: %s - %s", 4456 masterid, dbp, 4457 idnds_str[mdp->dstate]); 4458 else 4459 (void) mi_mpprintf(mp, " %d: %s - %s", 4460 masterid, dbp, 4461 idnds_str[mdp->dstate]); 4462 } 4463 if (idn.ndomains <= 1) { 4464 (void) mi_mpprintf(mp, " Slave Domains: none"); 4465 } else { 4466 int d; 4467 4468 (void) mi_mpprintf(mp, 4469 " Slave Domains (id:name/brds - state):"); 4470 for (d = 0; d < MAX_DOMAINS; d++) { 4471 dp = &idn_domain[d]; 4472 4473 if ((dp->dcpu == IDN_NIL_DCPU) || (d == masterid)) 4474 continue; 4475 4476 if (strlen(dp->dname) > 0) 4477 strcpy(dbp, dp->dname); 4478 else 4479 boardset2str(dp->dhw.dh_boardset, dbp); 4480 if (d < 10) 4481 (void) mi_mpprintf(mp, " %d: %s - %s", 4482 d, dbp, 4483 idnds_str[dp->dstate]); 4484 else 4485 (void) mi_mpprintf(mp, " %d: %s - %s", 4486 d, dbp, 4487 idnds_str[dp->dstate]); 4488 } 4489 } 4490 4491 if (idn.nawols == 0) { 4492 (void) mi_mpprintf(mp, " AWOL Domains: none"); 4493 } else { 4494 int d; 4495 4496 (void) mi_mpprintf(mp, " AWOL Domains (id:name/brds):"); 4497 for (d = 0; d < MAX_DOMAINS; d++) { 4498 dp = &idn_domain[d]; 4499 4500 if (!DOMAIN_IN_SET(idn.domset.ds_awol, d) || 4501 (dp->dcpu == IDN_NIL_DCPU)) 4502 continue; 4503 4504 if (strlen(dp->dname) > 0) 4505 strcpy(dbp, dp->dname); 4506 else 4507 boardset2str(dp->dhw.dh_boardset, dbp); 4508 if (d < 10) 4509 (void) mi_mpprintf(mp, " %d: %s", 4510 d, dbp); 4511 else 4512 (void) mi_mpprintf(mp, " %d: %s", 4513 d, dbp); 4514 } 4515 } 4516 4517 /*CONSTCOND*/ 4518 i = IDN_SYNC_GETZONE(IDNSYNC_CONNECT); 4519 zp = &idn.sync.sz_zone[i]; 4520 if (zp->sc_cnt == 0) { 4521 (void) mi_mpprintf(mp, " Sync Zone (con): [empty]"); 4522 } else { 4523 (void) mi_mpprintf(mp, " Sync Zone (con): [%d domains]", 4524 zp->sc_cnt); 4525 sp = zp->sc_op; 4526 for (i = 0; (i < zp->sc_cnt) && sp; i++) { 4527 (void) mi_mpprintf(mp, 4528 " " 4529 "%x: x_set =%s0x%x, r_set =%s0x%x", 4530 sp->s_domid, 4531 HEXSPACE(sp->s_set_exp, 4532 sp->s_set_exp, 4, 1), 4533 sp->s_set_exp, 4534 HEXSPACE(sp->s_set_rdy, 4535 sp->s_set_rdy, 4, 1), 4536 sp->s_set_rdy); 4537 sp = sp->s_next; 4538 } 4539 } 4540 /*CONSTCOND*/ 4541 i = IDN_SYNC_GETZONE(IDNSYNC_DISCONNECT); 4542 zp = &idn.sync.sz_zone[i]; 4543 if (zp->sc_cnt == 0) { 4544 (void) mi_mpprintf(mp, " Sync Zone (dis): [empty]"); 4545 } else { 4546 (void) mi_mpprintf(mp, " Sync Zone (dis): [%d domains]", 4547 zp->sc_cnt); 4548 sp = zp->sc_op; 4549 for (i = 0; (i < zp->sc_cnt) && sp; i++) { 4550 (void) mi_mpprintf(mp, 4551 " " 4552 "%x: x_set =%s0x%x, r_set =%s0x%x", 4553 sp->s_domid, 4554 HEXSPACE(sp->s_set_exp, 4555 sp->s_set_exp, 4, 1), 4556 sp->s_set_exp, 4557 HEXSPACE(sp->s_set_rdy, 4558 sp->s_set_rdy, 4, 1), 4559 sp->s_set_rdy); 4560 sp = sp->s_next; 4561 } 4562 } 4563 4564 IDN_GUNLOCK(); 4565 IDN_SYNC_UNLOCK(); 4566 4567 if (dbuffer) { 4568 FREE_DISPSTRING(dbuffer); 4569 } 4570 4571 return (0); 4572 } 4573 4574 /*ARGSUSED*/ 4575 static int 4576 idn_domain_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr) 4577 { 4578 int d, nchan; 4579 uint_t domset; 4580 idn_chanset_t chanset; 4581 idn_domain_t *dp; 4582 uint_t pset_upper, pset_lower; 4583 char *dbuffer, *dbp; 4584 char alt_dbuffer[64]; 4585 4586 4587 if (IDN_SYNC_TRYLOCK() == 0) { 4588 (void) mi_mpprintf(mp, "Sync lock busy, try again."); 4589 return (0); 4590 } 4591 4592 if (IDN_GLOCK_TRY_SHARED() == 0) { 4593 (void) mi_mpprintf(mp, "Local domain busy, try again."); 4594 IDN_SYNC_UNLOCK(); 4595 return (0); 4596 } 4597 4598 if ((dbp = dbuffer = ALLOC_DISPSTRING()) == NULL) 4599 dbp = alt_dbuffer; 4600 4601 if ((int)cp == 0) 4602 domset = DOMAINSET(idn.localid); 4603 else 4604 domset = DOMAINSET_ALL; 4605 4606 for (d = 0; d < MAX_DOMAINS; d++) { 4607 4608 if (DOMAIN_IN_SET(domset, d) == 0) 4609 continue; 4610 4611 dp = &idn_domain[d]; 4612 4613 if (dp->dcpu == IDN_NIL_DCPU) 4614 continue; 4615 4616 if (IDN_DLOCK_TRY_SHARED(d) == 0) { 4617 if (d < 10) 4618 (void) mi_mpprintf(mp, 4619 "Domain %d (0x%p) busy...", 4620 d, dp); 4621 else 4622 (void) mi_mpprintf(mp, 4623 "Domain %d (0x%p) busy...", 4624 d, dp); 4625 continue; 4626 } 4627 if (dp->dcpu == IDN_NIL_DCPU) { 4628 IDN_DUNLOCK(d); 4629 continue; 4630 } 4631 if (d < 10) 4632 (void) mi_mpprintf(mp, "%sDomain %d (0x%p)", 4633 (d && (idn.ndomains > 1)) ? "\n" : "", 4634 d, dp); 4635 else 4636 (void) mi_mpprintf(mp, "%sDomain %d (0x%p)", 4637 (d && (idn.ndomains > 1)) ? "\n" : "", 4638 d, dp); 4639 4640 if (d == idn.localid) 4641 (void) mi_mpprintf(mp, " (local) State = %s (%d)", 4642 idnds_str[dp->dstate], dp->dstate); 4643 else 4644 (void) mi_mpprintf(mp, " State = %s (%d)", 4645 idnds_str[dp->dstate], dp->dstate); 4646 (void) mi_mpprintf(mp, " Name = %s, Netid = %d", 4647 (strlen(dp->dname) > 0) ? dp->dname : "<>", 4648 (int)dp->dnetid); 4649 4650 CHANSET_ZERO(chanset); 4651 nchan = idn_domain_is_registered(d, -1, &chanset); 4652 if (dbuffer) 4653 mask2str(chanset, dbp, 32); 4654 else 4655 (void) sprintf(dbp, "0x%x", chanset); 4656 (void) mi_mpprintf(mp, " Nchans = %d, Chanset = %s", 4657 nchan, nchan ? dbp : "<>"); 4658 pset_upper = UPPER32_CPUMASK(dp->dcpuset); 4659 pset_lower = LOWER32_CPUMASK(dp->dcpuset); 4660 if (dbuffer) 4661 boardset2str(dp->dhw.dh_boardset, dbp); 4662 else 4663 (void) sprintf(dbp, "0x%x", dp->dhw.dh_boardset); 4664 4665 (void) mi_mpprintf(mp, " Nboards = %d, Brdset = %s", 4666 dp->dhw.dh_nboards, 4667 dp->dhw.dh_nboards ? dbp : "<>"); 4668 (void) sprintf(dbp, "0x%x.%x", pset_upper, pset_lower); 4669 (void) mi_mpprintf(mp, " Ncpus = %d, Cpuset = %s", 4670 dp->dncpus, dp->dncpus ? dbp : "<>"); 4671 (void) mi_mpprintf(mp, " Nmcadr = %d", 4672 dp->dhw.dh_nmcadr); 4673 (void) mi_mpprintf(mp, 4674 " MsgTimer = %s (cnt = %d)", 4675 (dp->dtimerq.tq_count > 0) 4676 ? "active" : "idle", 4677 dp->dtimerq.tq_count); 4678 (void) mi_mpprintf(mp, " Dcpu = %d " 4679 "(lastcpu = %d, cpuindex = %d)", 4680 dp->dcpu, dp->dcpu_last, dp->dcpuindex); 4681 (void) mi_mpprintf(mp, " Dio = %d " 4682 "(ioerr = %d, iochk = %d, iowanted = %d)", 4683 dp->dio, dp->dioerr, dp->diocheck ? 1 : 0, 4684 dp->diowanted ? 1 : 0); 4685 if (dp->dsync.s_cmd == IDNSYNC_NIL) { 4686 (void) mi_mpprintf(mp, " Dsync = %s", 4687 idnsync_str[IDNSYNC_NIL]); 4688 } else { 4689 (void) mi_mpprintf(mp, 4690 " Dsync = %s " 4691 "(x_set = 0x%x, r_set = 0x%x)", 4692 idnsync_str[dp->dsync.s_cmd], 4693 (uint_t)dp->dsync.s_set_exp, 4694 (uint_t)dp->dsync.s_set_rdy); 4695 } 4696 (void) mi_mpprintf(mp, " Dvote = 0x%x", 4697 dp->dvote.ticket); 4698 (void) mi_mpprintf(mp, " Dfin = %s (Sync = %s)", 4699 idnfin_str[dp->dfin], 4700 (dp->dfin_sync == IDNFIN_SYNC_OFF) ? "OFF" : 4701 (dp->dfin_sync == IDNFIN_SYNC_YES) ? "YES" : 4702 "NO"); 4703 (void) mi_mpprintf(mp, " Dcookie_err = %s (cnt = %d)", 4704 dp->dcookie_err ? "YES" : "NO", 4705 dp->dcookie_errcnt); 4706 IDN_DUNLOCK(d); 4707 } 4708 4709 IDN_GUNLOCK(); 4710 4711 if (dbuffer) { 4712 FREE_DISPSTRING(dbuffer); 4713 } 4714 4715 IDN_SYNC_UNLOCK(); 4716 4717 return (0); 4718 } 4719 4720 #define SNOOP_ENTRIES 2048 /* power of 2 */ 4721 4722 struct snoop_buffer { 4723 /* 0 */ char io; 4724 /* 1 */ char board; 4725 /* 2 */ char trans[14]; 4726 4727 /* 10 */ uint_t xargs[4]; 4728 } *snoop_data, snoop_buffer[SNOOP_ENTRIES+1]; 4729 4730 4731 int snoop_index; 4732 kmutex_t snoop_mutex; 4733 static char _bd2hexascii[] = { 4734 '0', '1', '2', '3', '4', '5', '6', '7', 4735 '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' 4736 }; 4737 4738 #define SNOOP_IDN(in, tr, bd, arg1, arg2, arg3, arg4) \ 4739 { \ 4740 if (idn_snoop) { \ 4741 mutex_enter(&snoop_mutex); \ 4742 if (snoop_data == NULL) { \ 4743 snoop_data = (struct snoop_buffer *) \ 4744 (((uint_t)snoop_buffer + 0xf) & ~0xf); \ 4745 } \ 4746 snoop_data[snoop_index].io = ((in) == 0) ? 'o' : 'i'; \ 4747 snoop_data[snoop_index].board = \ 4748 ((bd) == -1) ? 'X' : _bd2hexascii[bd]; \ 4749 (void) strncpy(snoop_data[snoop_index].trans, (tr), 14); \ 4750 snoop_data[snoop_index].xargs[0] = (arg1); \ 4751 snoop_data[snoop_index].xargs[1] = (arg2); \ 4752 snoop_data[snoop_index].xargs[2] = (arg3); \ 4753 snoop_data[snoop_index].xargs[3] = (arg4); \ 4754 snoop_index++; \ 4755 snoop_index &= SNOOP_ENTRIES - 1; \ 4756 mutex_exit(&snoop_mutex); \ 4757 } \ 4758 } 4759 4760 /* 4761 * Allocate the circular buffers to be used for 4762 * DMV interrupt processing. 4763 */ 4764 static int 4765 idn_init_handler() 4766 { 4767 int i, c; 4768 size_t len; 4769 idn_dmv_msg_t *basep, *ivp; 4770 uint32_t ivp_offset; 4771 procname_t proc = "idn_init_handler"; 4772 4773 if (idn.intr.dmv_data != NULL) { 4774 cmn_err(CE_WARN, 4775 "IDN: 130: IDN DMV handler already initialized"); 4776 return (-1); 4777 } 4778 4779 /* 4780 * This memory will be touched by the low-level 4781 * DMV trap handler for IDN. 4782 */ 4783 len = sizeof (idn_dmv_data_t); 4784 len = roundup(len, sizeof (uint64_t)); 4785 len += NCPU * idn_dmv_pending_max * sizeof (idn_dmv_msg_t); 4786 len = roundup(len, PAGESIZE); 4787 4788 PR_PROTO("%s: sizeof (idn_dmv_data_t) = %d\n", 4789 proc, sizeof (idn_dmv_data_t)); 4790 PR_PROTO("%s: allocating %d bytes for dmv data area\n", proc, len); 4791 4792 idn.intr.dmv_data_len = len; 4793 idn.intr.dmv_data = kmem_zalloc(len, KM_SLEEP); 4794 4795 PR_PROTO("%s: DMV data area = %p\n", proc, idn.intr.dmv_data); 4796 4797 idn_dmv_data = (idn_dmv_data_t *)idn.intr.dmv_data; 4798 basep = (idn_dmv_msg_t *)roundup((size_t)idn.intr.dmv_data + 4799 sizeof (idn_dmv_data_t), 4800 sizeof (uint64_t)); 4801 idn_dmv_data->idn_dmv_qbase = (uint64_t)basep; 4802 4803 ivp = basep; 4804 ivp_offset = 0; 4805 /* 4806 * The buffer queues are allocated per-cpu. 4807 */ 4808 for (c = 0, ivp = basep; c < NCPU; ivp++, c++) { 4809 idn_dmv_data->idn_dmv_cpu[c].idn_dmv_current = ivp_offset; 4810 idn_iv_queue[c] = ivp; 4811 ivp_offset += sizeof (idn_dmv_msg_t); 4812 for (i = 1; i < idn_dmv_pending_max; ivp++, i++) { 4813 ivp->iv_next = ivp_offset; 4814 ivp->iv_ready = 0; 4815 lock_set(&ivp->iv_ready); 4816 ivp_offset += sizeof (idn_dmv_msg_t); 4817 } 4818 ivp->iv_next = idn_dmv_data->idn_dmv_cpu[c].idn_dmv_current; 4819 ivp->iv_ready = 0; 4820 lock_set(&ivp->iv_ready); 4821 } 4822 4823 idn.intr.dmv_inum = STARFIRE_DMV_IDN_BASE; 4824 idn.intr.soft_inum = add_softintr((uint_t)idn_pil, idn_handler, 0); 4825 idn_dmv_data->idn_soft_inum = idn.intr.soft_inum; 4826 /* 4827 * Make sure everything is out there before 4828 * we effectively set it free for use. 4829 */ 4830 membar_stld_stst(); 4831 4832 if (dmv_add_intr(idn.intr.dmv_inum, idn_dmv_handler, 4833 (caddr_t)idn_dmv_data)) { 4834 idn_deinit_handler(); 4835 cmn_err(CE_WARN, "IDN: 132: failed to add IDN DMV handler"); 4836 return (-1); 4837 } 4838 4839 return (0); 4840 } 4841 4842 static void 4843 idn_deinit_handler() 4844 { 4845 if (idn.intr.dmv_data == NULL) 4846 return; 4847 4848 (void) dmv_rem_intr(idn.intr.dmv_inum); 4849 rem_softintr(idn.intr.soft_inum); 4850 kmem_free(idn.intr.dmv_data, idn.intr.dmv_data_len); 4851 idn.intr.dmv_data = NULL; 4852 } 4853 4854 /* 4855 * High-level (soft interrupt) handler for DMV interrupts 4856 */ 4857 /*ARGSUSED0*/ 4858 static uint_t 4859 idn_handler(caddr_t unused, caddr_t unused2) 4860 { 4861 #ifdef DEBUG 4862 int count = 0; 4863 #endif /* DEBUG */ 4864 int cpuid = (int)CPU->cpu_id; 4865 ushort_t mtype, atype; 4866 idn_dmv_msg_t *xp, *xplimit; 4867 procname_t proc = "idn_handler"; 4868 4869 ASSERT(getpil() >= idn_pil); 4870 flush_windows(); 4871 4872 /* 4873 * Clear the synchronization flag to indicate that 4874 * processing has started. As long as idn_dmv_active 4875 * is non-zero, idn_dmv_handler will queue work without 4876 * initiating a soft interrupt. Since we clear it 4877 * first thing at most one pil-interrupt for IDN will 4878 * queue up behind the currently active one. We don't 4879 * want to clear this flag at the end because it leaves 4880 * a window where an interrupt could get lost (unless it's 4881 * pushed by a subsequent interrupt). The objective in 4882 * doing this is to prevent exhausting a cpu's intr_req 4883 * structures with interrupts of the same pil level. 4884 */ 4885 lock_clear(&idn_dmv_data->idn_dmv_cpu[cpuid].idn_dmv_active); 4886 4887 xp = idn_iv_queue[cpuid]; 4888 xplimit = xp + idn_dmv_pending_max; 4889 xp += idn_intr_index[cpuid]; 4890 /* 4891 * As long as there's stuff that's READY in the 4892 * queue, keep processing. 4893 */ 4894 while (lock_try(&xp->iv_ready)) { 4895 4896 ASSERT(lock_try(&xp->iv_inuse) == 0); 4897 4898 mtype = (ushort_t)xp->iv_mtype; 4899 mtype &= IDNP_MSGTYPE_MASK | IDNP_ACKNACK_MASK; 4900 atype = (ushort_t)xp->iv_atype; 4901 4902 if (((int)xp->iv_ver == idn.version) && mtype) { 4903 idn_protojob_t *jp; 4904 #ifdef DEBUG 4905 STRING(mstr); 4906 STRING(astr); 4907 4908 INUM2STR(mtype, mstr); 4909 if ((mtype & IDNP_MSGTYPE_MASK) == 0) { 4910 INUM2STR(atype, astr); 4911 strcat(mstr, "/"); 4912 strcat(mstr, astr); 4913 } 4914 4915 count++; 4916 4917 PR_XDC("%s:%d:%d RECV: scpu = %d, msg = 0x%x(%s)\n", 4918 proc, (int)xp->iv_domid, count, 4919 (int)xp->iv_cpuid, mtype, mstr); 4920 PR_XDC("%s:%d:%d R-DATA: a0 = 0x%x, a1 = 0x%x\n", 4921 proc, (int)xp->iv_domid, count, 4922 xp->iv_xargs0, xp->iv_xargs1); 4923 PR_XDC("%s:%d:%d R-DATA: a2 = 0x%x, a3 = 0x%x\n", 4924 proc, (int)xp->iv_domid, count, 4925 xp->iv_xargs2, xp->iv_xargs3); 4926 #endif /* DEBUG */ 4927 4928 if (mtype == IDNP_DATA) { 4929 jp = NULL; 4930 /* 4931 * The only time we receive pure 4932 * data messages at this level is 4933 * to wake up the channel server. 4934 * Since this is often an urgent 4935 * request we'll do it from here 4936 * instead of waiting for a proto 4937 * server to do it. 4938 */ 4939 idn_signal_data_server((int)xp->iv_domid, 4940 (ushort_t)xp->iv_xargs0); 4941 } else { 4942 jp = idn_protojob_alloc(KM_NOSLEEP); 4943 /* 4944 * If the allocation fails, just drop 4945 * the message and get on with life. 4946 * If memory pressure is this great then 4947 * dropping this message is probably 4948 * the least of our worries! 4949 */ 4950 if (jp) { 4951 jp->j_msg.m_domid = (int)xp->iv_domid; 4952 jp->j_msg.m_cpuid = (int)xp->iv_cpuid; 4953 jp->j_msg.m_msgtype = mtype; 4954 jp->j_msg.m_acktype = atype; 4955 jp->j_msg.m_cookie = xp->iv_cookie; 4956 SET_XARGS(jp->j_msg.m_xargs, 4957 xp->iv_xargs0, xp->iv_xargs1, 4958 xp->iv_xargs2, xp->iv_xargs3); 4959 } 4960 4961 } 4962 membar_ldst_stst(); 4963 4964 lock_clear(&xp->iv_inuse); 4965 4966 if (jp) 4967 idn_protojob_submit(jp->j_msg.m_domid, jp); 4968 } else { 4969 membar_ldst_stst(); 4970 IDN_GKSTAT_INC(gk_dropped_intrs); 4971 lock_clear(&xp->iv_inuse); 4972 } 4973 4974 if (++xp == xplimit) 4975 xp = idn_iv_queue[cpuid]; 4976 } 4977 4978 idn_intr_index[cpuid] = xp - idn_iv_queue[cpuid]; 4979 4980 return (DDI_INTR_CLAIMED); 4981 } 4982 4983 void 4984 idn_awol_event_set(boardset_t boardset) 4985 { 4986 idnsb_event_t *sbp; 4987 procname_t proc = "idn_awol_event_set"; 4988 4989 ASSERT(IDN_GLOCK_IS_EXCL()); 4990 4991 mutex_enter(&idn.idnsb_mutex); 4992 sbp = idn.idnsb_eventp; 4993 if (sbp == NULL) { 4994 cmn_err(CE_WARN, "IDN: 133: sigblock event area missing"); 4995 cmn_err(CE_CONT, 4996 "IDN: 134: unable to mark boardset (0x%x) AWOL\n", 4997 boardset); 4998 mutex_exit(&idn.idnsb_mutex); 4999 return; 5000 } 5001 5002 if (boardset == 0) { 5003 PR_PROTO("%s: AWOL BOARDSET is 0, NO EVENT <<<<<<<<<<<<<<<\n", 5004 proc); 5005 mutex_exit(&idn.idnsb_mutex); 5006 return; 5007 } else { 5008 PR_PROTO("%s: MARKING BOARDSET (0x%x) AWOL\n", proc, boardset); 5009 } 5010 SSIEVENT_ADD(sbp, SSIEVENT_AWOL, boardset); 5011 mutex_exit(&idn.idnsb_mutex); 5012 } 5013 5014 void 5015 idn_awol_event_clear(boardset_t boardset) 5016 { 5017 idnsb_event_t *sbp; 5018 procname_t proc = "idn_awol_event_clear"; 5019 5020 ASSERT(IDN_GLOCK_IS_EXCL()); 5021 5022 mutex_enter(&idn.idnsb_mutex); 5023 sbp = idn.idnsb_eventp; 5024 if (sbp == NULL) { 5025 cmn_err(CE_WARN, "IDN: 133: sigblock event area missing"); 5026 cmn_err(CE_CONT, 5027 "IDN: 134: unable to mark boardset (0x%x) AWOL\n", 5028 boardset); 5029 mutex_exit(&idn.idnsb_mutex); 5030 return; 5031 } 5032 5033 if (boardset == 0) { 5034 PR_PROTO("%s: AWOL BOARDSET is 0, NO EVENT <<<<<<<<<<<<<<<\n", 5035 proc); 5036 mutex_exit(&idn.idnsb_mutex); 5037 return; 5038 } else { 5039 PR_PROTO("%s: CLEARING BOARDSET (0x%x) AWOL\n", proc, boardset); 5040 } 5041 SSIEVENT_DEL(sbp, SSIEVENT_AWOL, boardset); 5042 mutex_exit(&idn.idnsb_mutex); 5043 } 5044 5045 static void 5046 idn_gkstat_init() 5047 { 5048 struct kstat *ksp; 5049 struct idn_gkstat_named *sgkp; 5050 5051 #ifdef kstat 5052 if ((ksp = kstat_create(IDNNAME, ddi_get_instance(idn.dip), 5053 IDNNAME, "net", KSTAT_TYPE_NAMED, 5054 sizeof (struct idn_gkstat_named) / sizeof (kstat_named_t), 5055 KSTAT_FLAG_PERSISTENT)) == NULL) { 5056 #else 5057 if ((ksp = kstat_create(IDNNAME, ddi_get_instance(idn.dip), 5058 IDNNAME, "net", KSTAT_TYPE_NAMED, 5059 sizeof (struct idn_gkstat_named) / 5060 sizeof (kstat_named_t), 0)) == NULL) { 5061 #endif /* kstat */ 5062 cmn_err(CE_CONT, "IDN: 135: %s: %s\n", 5063 IDNNAME, "kstat_create failed"); 5064 return; 5065 } 5066 5067 idn.ksp = ksp; 5068 sgkp = (struct idn_gkstat_named *)(ksp->ks_data); 5069 kstat_named_init(&sgkp->sk_curtime, "curtime", 5070 KSTAT_DATA_ULONG); 5071 kstat_named_init(&sgkp->sk_reconfigs, "reconfigs", 5072 KSTAT_DATA_ULONG); 5073 kstat_named_init(&sgkp->sk_reconfig_last, "reconfig_last", 5074 KSTAT_DATA_ULONG); 5075 kstat_named_init(&sgkp->sk_reaps, "reaps", 5076 KSTAT_DATA_ULONG); 5077 kstat_named_init(&sgkp->sk_reap_last, "reap_last", 5078 KSTAT_DATA_ULONG); 5079 kstat_named_init(&sgkp->sk_links, "links", 5080 KSTAT_DATA_ULONG); 5081 kstat_named_init(&sgkp->sk_link_last, "link_last", 5082 KSTAT_DATA_ULONG); 5083 kstat_named_init(&sgkp->sk_unlinks, "unlinks", 5084 KSTAT_DATA_ULONG); 5085 kstat_named_init(&sgkp->sk_unlink_last, "unlink_last", 5086 KSTAT_DATA_ULONG); 5087 kstat_named_init(&sgkp->sk_buffail, "buf_fail", 5088 KSTAT_DATA_ULONG); 5089 kstat_named_init(&sgkp->sk_buffail_last, "buf_fail_last", 5090 KSTAT_DATA_ULONG); 5091 kstat_named_init(&sgkp->sk_slabfail, "slab_fail", 5092 KSTAT_DATA_ULONG); 5093 kstat_named_init(&sgkp->sk_slabfail_last, "slab_fail_last", 5094 KSTAT_DATA_ULONG); 5095 kstat_named_init(&sgkp->sk_slabfail_last, "slab_fail_last", 5096 KSTAT_DATA_ULONG); 5097 kstat_named_init(&sgkp->sk_reap_count, "reap_count", 5098 KSTAT_DATA_ULONG); 5099 kstat_named_init(&sgkp->sk_dropped_intrs, "dropped_intrs", 5100 KSTAT_DATA_ULONG); 5101 ksp->ks_update = idn_gkstat_update; 5102 ksp->ks_private = (void *)NULL; 5103 kstat_install(ksp); 5104 } 5105 5106 static void 5107 idn_gkstat_deinit() 5108 { 5109 if (idn.ksp) 5110 kstat_delete(idn.ksp); 5111 idn.ksp = NULL; 5112 } 5113 5114 static int 5115 idn_gkstat_update(kstat_t *ksp, int rw) 5116 { 5117 struct idn_gkstat_named *sgkp; 5118 5119 sgkp = (struct idn_gkstat_named *)ksp->ks_data; 5120 5121 if (rw == KSTAT_WRITE) { 5122 sg_kstat.gk_reconfigs = sgkp->sk_reconfigs.value.ul; 5123 sg_kstat.gk_reconfig_last = sgkp->sk_reconfig_last.value.ul; 5124 sg_kstat.gk_reaps = sgkp->sk_reaps.value.ul; 5125 sg_kstat.gk_reap_last = sgkp->sk_reap_last.value.ul; 5126 sg_kstat.gk_links = sgkp->sk_links.value.ul; 5127 sg_kstat.gk_link_last = sgkp->sk_link_last.value.ul; 5128 sg_kstat.gk_unlinks = sgkp->sk_unlinks.value.ul; 5129 sg_kstat.gk_unlink_last = sgkp->sk_unlink_last.value.ul; 5130 sg_kstat.gk_buffail = sgkp->sk_buffail.value.ul; 5131 sg_kstat.gk_buffail_last = sgkp->sk_buffail_last.value.ul; 5132 sg_kstat.gk_slabfail = sgkp->sk_slabfail.value.ul; 5133 sg_kstat.gk_slabfail_last = sgkp->sk_slabfail_last.value.ul; 5134 sg_kstat.gk_reap_count = sgkp->sk_reap_count.value.ul; 5135 sg_kstat.gk_dropped_intrs = sgkp->sk_dropped_intrs.value.ul; 5136 } else { 5137 sgkp->sk_curtime.value.ul = lbolt; 5138 sgkp->sk_reconfigs.value.ul = sg_kstat.gk_reconfigs; 5139 sgkp->sk_reconfig_last.value.ul = sg_kstat.gk_reconfig_last; 5140 sgkp->sk_reaps.value.ul = sg_kstat.gk_reaps; 5141 sgkp->sk_reap_last.value.ul = sg_kstat.gk_reap_last; 5142 sgkp->sk_links.value.ul = sg_kstat.gk_links; 5143 sgkp->sk_link_last.value.ul = sg_kstat.gk_link_last; 5144 sgkp->sk_unlinks.value.ul = sg_kstat.gk_unlinks; 5145 sgkp->sk_unlink_last.value.ul = sg_kstat.gk_unlink_last; 5146 sgkp->sk_buffail.value.ul = sg_kstat.gk_buffail; 5147 sgkp->sk_buffail_last.value.ul = sg_kstat.gk_buffail_last; 5148 sgkp->sk_slabfail.value.ul = sg_kstat.gk_slabfail; 5149 sgkp->sk_slabfail_last.value.ul = sg_kstat.gk_slabfail_last; 5150 sgkp->sk_reap_count.value.ul = sg_kstat.gk_reap_count; 5151 sgkp->sk_dropped_intrs.value.ul = sg_kstat.gk_dropped_intrs; 5152 } 5153 5154 return (0); 5155 } 5156 5157 #ifdef DEBUG 5158 #define RW_HISTORY 100 5159 static uint_t rw_history[NCPU][RW_HISTORY]; 5160 static int rw_index[NCPU]; 5161 #endif /* DEBUG */ 5162 5163 static int 5164 idn_rw_mem(idnop_t *idnop) 5165 { 5166 uint_t lo_off, hi_off; 5167 int rw, blksize, num; 5168 int cpuid; 5169 register int n, idx; 5170 char *ibuf, *obuf; 5171 char *smraddr; 5172 struct seg *segp; 5173 ulong_t randx; 5174 kmutex_t slock; 5175 kcondvar_t scv; 5176 static int orig_gstate = IDNGS_IGNORE; 5177 extern struct seg ktextseg; 5178 5179 #define RANDOM_INIT() (randx = lbolt) 5180 #define RANDOM(a, b) \ 5181 (((a) >= (b)) ? \ 5182 (a) : (((randx = randx * 1103515245L + 12345) % ((b)-(a))) + (a))) 5183 5184 RANDOM_INIT(); 5185 5186 lo_off = idnop->rwmem.lo_off; 5187 hi_off = idnop->rwmem.hi_off; 5188 blksize = idnop->rwmem.blksize; 5189 num = idnop->rwmem.num; 5190 rw = idnop->rwmem.rw; /* 0 = rd, 1 = wr, 2 = rd/wr */ 5191 5192 if (((hi_off > (uint_t)MB2B(IDN_SMR_SIZE)) || (lo_off >= hi_off) || 5193 (blksize <= 0) || (blksize > (hi_off - lo_off)) || (num <= 0)) && 5194 (idnop->rwmem.goawol == -1)) { 5195 return (EINVAL); 5196 } 5197 5198 if (idnop->rwmem.goawol && (orig_gstate == IDNGS_IGNORE)) { 5199 IDN_GLOCK_EXCL(); 5200 cmn_err(CE_WARN, "IDN: Local domain going into IGNORE MODE!!"); 5201 orig_gstate = idn.state; 5202 IDN_GSTATE_TRANSITION(IDNGS_IGNORE); 5203 IDN_GUNLOCK(); 5204 5205 } else if (!idnop->rwmem.goawol && (orig_gstate != IDNGS_IGNORE)) { 5206 IDN_GLOCK_EXCL(); 5207 cmn_err(CE_WARN, 5208 "IDN: Local domain restoring original state %s(%d)", 5209 idngs_str[orig_gstate], (int)orig_gstate); 5210 IDN_GSTATE_TRANSITION(orig_gstate); 5211 orig_gstate = IDNGS_IGNORE; 5212 IDN_GUNLOCK(); 5213 } 5214 /* 5215 * Just requested AWOL. 5216 */ 5217 if (num == 0) 5218 return (0); 5219 /* 5220 * Default READ only. 5221 */ 5222 ibuf = (char *)kmem_alloc(blksize, KM_SLEEP); 5223 if (rw == 1) { 5224 /* 5225 * WRITE only. 5226 */ 5227 obuf = ibuf; 5228 ibuf = NULL; 5229 } else if (rw == 2) { 5230 /* 5231 * READ/WRITE. 5232 */ 5233 obuf = (char *)kmem_alloc(blksize, KM_SLEEP); 5234 for (segp = &ktextseg; segp; segp = AS_SEGNEXT(&kas, segp)) { 5235 if (segp->s_size >= blksize) 5236 break; 5237 } 5238 if (segp == NULL) { 5239 cmn_err(CE_WARN, 5240 "IDN: blksize (%d) too large", blksize); 5241 return (EINVAL); 5242 } 5243 bcopy(segp->s_base, obuf, blksize); 5244 } 5245 5246 mutex_init(&slock, NULL, MUTEX_DEFAULT, NULL); 5247 cv_init(&scv, NULL, CV_DEFAULT, NULL); 5248 5249 cmn_err(CE_NOTE, 5250 "IDN: starting %s of %d blocks of %d bytes each...", 5251 (rw == 1) ? "W-ONLY" : (rw == 2) ? "RW" : "R-ONLY", 5252 num, blksize); 5253 5254 for (n = 0; n < num; n++) { 5255 uint_t rpos; 5256 5257 if ((hi_off - lo_off) > blksize) 5258 rpos = RANDOM(lo_off, (hi_off - blksize)); 5259 else 5260 rpos = lo_off; 5261 smraddr = IDN_OFFSET2ADDR(rpos); 5262 5263 cpuid = (int)CPU->cpu_id; 5264 #ifdef DEBUG 5265 idx = rw_index[cpuid]++ % RW_HISTORY; 5266 rw_history[cpuid][idx] = rpos; 5267 #endif /* DEBUG */ 5268 5269 switch (rw) { 5270 case 0: 5271 bcopy(smraddr, ibuf, blksize); 5272 break; 5273 case 1: 5274 bcopy(obuf, smraddr, blksize); 5275 break; 5276 case 2: 5277 if (n & 1) 5278 bcopy(obuf, smraddr, blksize); 5279 else 5280 bcopy(smraddr, ibuf, blksize); 5281 break; 5282 default: 5283 break; 5284 } 5285 if (!(n % 1000)) { 5286 int rv; 5287 5288 mutex_enter(&slock); 5289 rv = cv_timedwait_sig(&scv, &slock, lbolt+hz); 5290 mutex_exit(&slock); 5291 if (rv == 0) 5292 break; 5293 } 5294 } 5295 5296 cv_destroy(&scv); 5297 mutex_destroy(&slock); 5298 5299 if (ibuf) 5300 kmem_free(ibuf, blksize); 5301 if (obuf) 5302 kmem_free(obuf, blksize); 5303 5304 return (0); 5305 } 5306 5307 void 5308 inum2str(uint_t inum, char str[]) 5309 { 5310 uint_t acknack; 5311 5312 str[0] = '\0'; 5313 5314 acknack = (inum & IDNP_ACKNACK_MASK); 5315 inum &= ~IDNP_ACKNACK_MASK; 5316 5317 if (!inum && !acknack) { 5318 strcpy(str, idnm_str[0]); 5319 return; 5320 } 5321 5322 if (inum == 0) { 5323 strcpy(str, (acknack & IDNP_ACK) ? "ack" : "nack"); 5324 } else { 5325 if (inum < IDN_NUM_MSGTYPES) 5326 strcpy(str, idnm_str[inum]); 5327 else 5328 sprintf(str, "0x%x?", inum); 5329 if (acknack) { 5330 if (acknack & IDNP_ACK) 5331 strcat(str, "+ack"); 5332 else 5333 strcat(str, "+nack"); 5334 } 5335 } 5336 } 5337 5338 boardset_t 5339 cpuset2boardset(cpuset_t portset) 5340 { 5341 register int c; 5342 register boardset_t bset; 5343 5344 bset = 0; 5345 for (c = 0; c < NCPU; ) 5346 if (CPU_IN_SET(portset, c)) { 5347 BOARDSET_ADD(bset, CPUID_TO_BOARDID(c)); 5348 c = (c + 4) & ~3; 5349 } else { 5350 c++; 5351 } 5352 5353 return (bset); 5354 } 5355 5356 void 5357 cpuset2str(cpuset_t cset, char buffer[]) 5358 { 5359 register int c, n; 5360 5361 buffer[0] = '\0'; 5362 for (c = n = 0; c < NCPU; c++) { 5363 if (!CPU_IN_SET(cset, c)) 5364 continue; 5365 #ifdef DEBUG 5366 if (strlen(buffer) >= _DSTRLEN) { 5367 PR_PROTO("************* WARNING WARNING WARNING\n"); 5368 PR_PROTO("cpuset2str(cpu = %d) buffer " 5369 "OVERFLOW <<<<<<\n", c); 5370 PR_PROTO("*******************************\n"); 5371 (void) sprintf(&buffer[_DSTRLEN-6], "*OVER\0"); 5372 return; 5373 } 5374 #endif /* DEBUG */ 5375 if (n == 0) 5376 (void) sprintf(buffer, "%d\0", c); 5377 else 5378 (void) sprintf(buffer, "%s, %d\0", buffer, c); 5379 n++; 5380 } 5381 } 5382 5383 void 5384 domainset2str(domainset_t dset, char buffer[]) 5385 { 5386 /* 5387 * Since domainset_t and boardset_t are the 5388 * same (max = MAX_DOMAINS = MAX_BOARDS) we 5389 * can just overload boardset2str(). 5390 */ 5391 mask2str((uint_t)dset, buffer, MAX_DOMAINS); 5392 } 5393 5394 void 5395 boardset2str(boardset_t bset, char buffer[]) 5396 { 5397 mask2str((uint_t)bset, buffer, MAX_BOARDS); 5398 } 5399 5400 void 5401 mask2str(uint_t mask, char buffer[], int maxnum) 5402 { 5403 int n, i; 5404 5405 buffer[0] = '\0'; 5406 for (i = n = 0; i < maxnum; i++) { 5407 if ((mask & (1 << i)) == 0) 5408 continue; 5409 if (n == 0) 5410 (void) sprintf(buffer, "%d\0", i); 5411 else 5412 (void) sprintf(buffer, "%s, %d\0", buffer, i); 5413 n++; 5414 } 5415 } 5416 5417 int 5418 idnxdc(int domid, idn_msgtype_t *mtp, 5419 uint_t arg1, uint_t arg2, 5420 uint_t arg3, uint_t arg4) 5421 { 5422 int rv, cpuid, tcpuid; 5423 uint_t cookie; 5424 uint64_t pdata; 5425 uint64_t dmv_word0, dmv_word1, dmv_word2; 5426 idn_domain_t *dp = &idn_domain[domid]; 5427 extern kmutex_t xc_sys_mutex; 5428 extern int xc_spl_enter[]; 5429 procname_t proc = "idnxdc"; 5430 5431 5432 if (idn_snoop) { 5433 int bd; 5434 STRING(str); 5435 STRING(mstr); 5436 STRING(astr); 5437 5438 INUM2STR(mtp->mt_mtype, mstr); 5439 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == 0) { 5440 INUM2STR(arg1, astr); 5441 sprintf(str, "%s/%s", mstr, astr); 5442 } else { 5443 strcpy(str, mstr); 5444 } 5445 if (dp->dcpu == IDN_NIL_DCPU) 5446 bd = -1; 5447 else 5448 bd = CPUID_TO_BOARDID(dp->dcpu); 5449 SNOOP_IDN(0, str, bd, arg1, arg2, arg3, arg4); 5450 } 5451 5452 /* 5453 * For NEGO messages we send the remote domain the cookie we 5454 * expect it to use in subsequent messages that it sends 5455 * to us (dcookie_recv). 5456 * For other messages, we must use the cookie that the 5457 * remote domain assigned to us for sending (dcookie_send). 5458 */ 5459 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == IDNP_NEGO) 5460 cookie = IDN_MAKE_COOKIE(dp->dcookie_recv, mtp->mt_cookie); 5461 else 5462 cookie = IDN_MAKE_COOKIE(dp->dcookie_send, mtp->mt_cookie); 5463 5464 pdata = IDN_MAKE_PDATA(mtp->mt_mtype, mtp->mt_atype, cookie); 5465 5466 dmv_word0 = DMV_MAKE_DMV(idn.intr.dmv_inum, pdata); 5467 dmv_word1 = ((uint64_t)arg1 << 32) | (uint64_t)arg2; 5468 dmv_word2 = ((uint64_t)arg3 << 32) | (uint64_t)arg4; 5469 5470 ASSERT((dp->dcpu != IDN_NIL_DCPU) || 5471 (dp->dcpu_last != IDN_NIL_DCPU)); 5472 5473 tcpuid = (dp->dcpu == IDN_NIL_DCPU) ? 5474 dp->dcpu_last : dp->dcpu; 5475 5476 if (tcpuid == IDN_NIL_DCPU) { 5477 PR_PROTO("%s:%d: cpu/cpu_last == NIL_DCPU\n", 5478 proc, domid); 5479 return (-1); 5480 } 5481 5482 mutex_enter(&xc_sys_mutex); 5483 cpuid = (int)CPU->cpu_id; 5484 xc_spl_enter[cpuid] = 1; 5485 5486 idnxf_init_mondo(dmv_word0, dmv_word1, dmv_word2); 5487 5488 rv = idnxf_send_mondo(STARFIRE_UPAID2HWMID(tcpuid)); 5489 5490 xc_spl_enter[cpuid] = 0; 5491 mutex_exit(&xc_sys_mutex); 5492 5493 return (rv); 5494 } 5495 5496 void 5497 idnxdc_broadcast(domainset_t domset, idn_msgtype_t *mtp, 5498 uint_t arg1, uint_t arg2, 5499 uint_t arg3, uint_t arg4) 5500 { 5501 int d; 5502 5503 for (d = 0; d < MAX_DOMAINS; d++) { 5504 idn_domain_t *dp; 5505 5506 if (!DOMAIN_IN_SET(domset, d)) 5507 continue; 5508 5509 dp = &idn_domain[d]; 5510 if (dp->dcpu == IDN_NIL_DCPU) 5511 continue; 5512 5513 (void) IDNXDC(d, mtp, arg1, arg2, arg3, arg4); 5514 } 5515 } 5516 5517 #define PROM_SMRSIZE 0x1 5518 #define PROM_SMRADDR 0x2 5519 #define PROM_SMRPROPS (PROM_SMRSIZE | PROM_SMRADDR) 5520 /* 5521 * Locate the idn-smr-size property to determine the size of the SMR 5522 * region for the SSI. Value inherently enables/disables SSI capability. 5523 */ 5524 static int 5525 idn_prom_getsmr(uint_t *smrsz, uint64_t *paddrp, uint64_t *sizep) 5526 { 5527 dnode_t nodeid; 5528 int found = 0; 5529 int len; 5530 uint_t smrsize = 0; 5531 uint64_t obpaddr, obpsize; 5532 struct smraddr { 5533 uint32_t hi_addr; 5534 uint32_t lo_addr; 5535 uint32_t hi_size; 5536 uint32_t lo_size; 5537 } smraddr; 5538 procname_t proc = "idn_prom_getsmr"; 5539 5540 bzero(&smraddr, sizeof (smraddr)); 5541 /* 5542 * idn-smr-size is a property of the "memory" node and 5543 * is defined in megabytes. 5544 */ 5545 nodeid = prom_finddevice("/memory"); 5546 5547 if (nodeid != OBP_NONODE) { 5548 len = prom_getproplen(nodeid, IDN_PROP_SMRSIZE); 5549 if (len == sizeof (smrsize)) { 5550 (void) prom_getprop(nodeid, IDN_PROP_SMRSIZE, 5551 (caddr_t)&smrsize); 5552 found |= PROM_SMRSIZE; 5553 } 5554 len = prom_getproplen(nodeid, IDN_PROP_SMRADDR); 5555 if (len == sizeof (smraddr)) { 5556 (void) prom_getprop(nodeid, IDN_PROP_SMRADDR, 5557 (caddr_t)&smraddr); 5558 found |= PROM_SMRADDR; 5559 } 5560 } 5561 5562 if (found != PROM_SMRPROPS) { 5563 if ((found & PROM_SMRSIZE) == 0) 5564 cmn_err(CE_WARN, 5565 "IDN: 136: \"%s\" property not found, " 5566 "disabling IDN", 5567 IDN_PROP_SMRSIZE); 5568 if (smrsize && ((found & PROM_SMRADDR) == 0)) 5569 cmn_err(CE_WARN, 5570 "IDN: 136: \"%s\" property not found, " 5571 "disabling IDN", 5572 IDN_PROP_SMRADDR); 5573 return (-1); 5574 } 5575 5576 if (smrsize == 0) { 5577 PR_SMR("%s: IDN DISABLED (idn_smr_size = 0)\n", proc); 5578 cmn_err(CE_NOTE, "!IDN: 137: SMR size is 0, disabling IDN"); 5579 5580 } else if (smrsize > IDN_SMR_MAXSIZE) { 5581 PR_SMR("%s: IDN DISABLED (idn_smr_size too big %d > %d MB)\n", 5582 proc, smrsize, IDN_SMR_MAXSIZE); 5583 cmn_err(CE_WARN, 5584 "!IDN: 138: SMR size (%dMB) is too big (max = %dMB), " 5585 "disabling IDN", 5586 smrsize, IDN_SMR_MAXSIZE); 5587 smrsize = 0; 5588 } else { 5589 *smrsz = smrsize; 5590 found &= ~PROM_SMRSIZE; 5591 } 5592 5593 obpaddr = ((uint64_t)smraddr.hi_addr << 32) | 5594 (uint64_t)smraddr.lo_addr; 5595 obpsize = ((uint64_t)smraddr.hi_size << 32) | 5596 (uint64_t)smraddr.lo_size; 5597 5598 if (obpsize == 0) { 5599 if (smrsize > 0) { 5600 cmn_err(CE_WARN, "!IDN: 139: OBP region for " 5601 "SMR is 0 length"); 5602 } 5603 } else if (obpsize < (uint64_t)MB2B(smrsize)) { 5604 cmn_err(CE_WARN, 5605 "!IDN: 140: OBP region (%lld B) smaller " 5606 "than requested size (%ld B)", 5607 obpsize, MB2B(smrsize)); 5608 } else if ((obpaddr & ((uint64_t)IDN_SMR_ALIGN - 1)) != 0) { 5609 cmn_err(CE_WARN, 5610 "!IDN: 141: OBP region (0x%llx) not on (0x%lx) " 5611 "boundary", obpaddr, IDN_SMR_ALIGN); 5612 } else { 5613 *sizep = obpsize; 5614 *paddrp = obpaddr; 5615 found &= ~PROM_SMRADDR; 5616 } 5617 5618 return (found ? -1 : 0); 5619 } 5620 5621 void 5622 idn_init_autolink() 5623 { 5624 idnsb_event_t *sbp; 5625 procname_t proc = "idn_init_autolink"; 5626 5627 mutex_enter(&idn.idnsb_mutex); 5628 if ((sbp = idn.idnsb_eventp) == NULL) { 5629 PR_PROTO("%s: IDN private sigb (event) area is NULL\n", proc); 5630 mutex_exit(&idn.idnsb_mutex); 5631 return; 5632 } 5633 5634 PR_PROTO("%s: marking domain IDN ready.\n", proc); 5635 5636 bzero(sbp, sizeof (*sbp)); 5637 5638 sbp->idn_version = (uchar_t)idn.version; 5639 SSIEVENT_SET(sbp, SSIEVENT_BOOT, 0); 5640 (void) strncpy(sbp->idn_cookie_str, SSIEVENT_COOKIE, 5641 SSIEVENT_COOKIE_LEN); 5642 mutex_exit(&idn.idnsb_mutex); 5643 } 5644 5645 void 5646 idn_deinit_autolink() 5647 { 5648 idnsb_event_t *sbp; 5649 procname_t proc = "idn_deinit_autolink"; 5650 5651 mutex_enter(&idn.idnsb_mutex); 5652 if ((sbp = idn.idnsb_eventp) == NULL) { 5653 PR_PROTO("%s: IDN private sigb (event) area is NULL\n", proc); 5654 mutex_exit(&idn.idnsb_mutex); 5655 return; 5656 } 5657 5658 PR_PROTO("%s: marking domain IDN unavailable.\n", proc); 5659 5660 sbp->idn_version = (uchar_t)idn.version; 5661 SSIEVENT_CLEAR(sbp, SSIEVENT_BOOT, 0); 5662 (void) strncpy(sbp->idn_cookie_str, SSIEVENT_COOKIE, 5663 SSIEVENT_COOKIE_LEN); 5664 mutex_exit(&idn.idnsb_mutex); 5665 } 5666 5667 void 5668 _make64cpumask(cpuset_t *csetp, uint_t upper, uint_t lower) 5669 { 5670 int c; 5671 5672 CPUSET_ZERO(*csetp); 5673 5674 for (c = 0; c < 32; c++) { 5675 if (lower & (1 << c)) { 5676 CPUSET_ADD(*csetp, c); 5677 } 5678 if (upper & (1 << (c + 32))) { 5679 CPUSET_ADD(*csetp, c + 32); 5680 } 5681 } 5682 } 5683 5684 uint_t 5685 _lower32cpumask(cpuset_t cset) 5686 { 5687 int c; 5688 uint_t set = 0; 5689 5690 for (c = 0; c < 32; c++) 5691 if (CPU_IN_SET(cset, c)) 5692 set |= 1 << c; 5693 5694 return (set); 5695 } 5696 5697 uint_t 5698 _upper32cpumask(cpuset_t cset) 5699 { 5700 int c; 5701 uint_t set = 0; 5702 5703 for (c = 32; c < NCPU; c++) 5704 if (CPU_IN_SET(cset, c)) 5705 set |= 1 << (c - 32); 5706 5707 return (set); 5708 } 5709 5710 #ifdef DEBUG 5711 int 5712 debug_idnxdc(char *f, int domid, idn_msgtype_t *mtp, 5713 uint_t a1, uint_t a2, uint_t a3, uint_t a4) 5714 { 5715 idn_domain_t *dp = &idn_domain[domid]; 5716 int rv, cpuid, bd; 5717 static int xx = 0; 5718 STRING(str); 5719 STRING(mstr); 5720 STRING(astr); 5721 5722 xx++; 5723 INUM2STR(mtp->mt_mtype, mstr); 5724 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == 0) { 5725 INUM2STR(a1, astr); 5726 sprintf(str, "%s/%s", mstr, astr); 5727 } else { 5728 strcpy(str, mstr); 5729 } 5730 5731 if ((cpuid = dp->dcpu) == IDN_NIL_DCPU) 5732 bd = -1; 5733 else 5734 bd = CPUID_TO_BOARDID(cpuid); 5735 5736 SNOOP_IDN(0, str, bd, a1, a2, a3, a4); 5737 5738 PR_XDC("%s:%d:%d SENT: scpu = %d, msg = 0x%x(%s)\n", 5739 f, domid, xx, cpuid, mtp->mt_mtype, str); 5740 PR_XDC("%s:%d:%d S-DATA: a1 = 0x%x, a2 = 0x%x\n", 5741 f, domid, xx, a1, a2); 5742 PR_XDC("%s:%d:%d S-DATA: a3 = 0x%x, a4 = 0x%x\n", 5743 f, domid, xx, a3, a4); 5744 5745 rv = idnxdc(domid, mtp, a1, a2, a3, a4); 5746 if (rv != 0) { 5747 PR_XDC("%s:%d:%d: WARNING: idnxdc(cpu %d) FAILED\n", 5748 f, domid, xx, cpuid); 5749 } 5750 5751 return (rv); 5752 } 5753 5754 caddr_t 5755 _idn_getstruct(char *structname, int size) 5756 { 5757 caddr_t ptr; 5758 procname_t proc = "GETSTRUCT"; 5759 5760 ptr = kmem_zalloc(size, KM_SLEEP); 5761 5762 PR_ALLOC("%s: ptr 0x%x, struct(%s), size = %d\n", 5763 proc, (uint_t)ptr, structname, size); 5764 5765 return (ptr); 5766 } 5767 5768 void 5769 _idn_freestruct(caddr_t ptr, char *structname, int size) 5770 { 5771 procname_t proc = "FREESTRUCT"; 5772 5773 PR_ALLOC("%s: ptr 0x%x, struct(%s), size = %d\n", 5774 proc, (uint_t)ptr, structname, size); 5775 5776 ASSERT(ptr != NULL); 5777 kmem_free(ptr, size); 5778 } 5779 #endif /* DEBUG */ 5780