1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 #include <sys/types.h>
27 #include <sys/sysmacros.h>
28 #include <sys/open.h>
29 #include <sys/param.h>
30 #include <sys/machparam.h>
31 #include <sys/systm.h>
32 #include <sys/signal.h>
33 #include <sys/cred.h>
34 #include <sys/user.h>
35 #include <sys/proc.h>
36 #include <sys/vnode.h>
37 #include <sys/uio.h>
38 #include <sys/buf.h>
39 #include <sys/file.h>
40 #include <sys/kmem.h>
41 #include <sys/stat.h>
42 #include <sys/stream.h>
43 #include <sys/stropts.h>
44 #include <sys/strsubr.h>
45 #include <sys/strsun.h>
46 #include <inet/common.h>
47 #include <inet/mi.h>
48 #include <inet/nd.h>
49 #include <sys/poll.h>
50 #include <sys/utsname.h>
51 #include <sys/debug.h>
52 #include <sys/conf.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 #include <sys/errno.h>
56 #include <sys/modctl.h>
57 #include <sys/machsystm.h>
58 #include <sys/promif.h>
59 #include <sys/prom_plat.h>
60 #include <sys/obpdefs.h>
61 #include <vm/seg_kmem.h>
62 #include <vm/seg_kp.h>
63 #include <sys/kstat.h>
64 #include <sys/membar.h>
65 #include <sys/ivintr.h>
66 #include <sys/vm_machparam.h>
67 #include <sys/x_call.h>
68 #include <sys/cpuvar.h>
69 #include <sys/archsystm.h>
70 #include <sys/dmv.h>
71
72 #include <sys/idn.h>
73 #include <sys/idn_xf.h>
74 #include <sys/cpu_sgnblk_defs.h>
75 #include <sys/cpu_sgn.h>
76
77 struct idn_gkstat sg_kstat;
78
79 #define MBXTBL_PART_REPORT ((caddr_t)1)
80 #define MBXTBL_FULL_REPORT ((caddr_t)2)
81
82 idn_domain_t idn_domain[MAX_DOMAINS];
83 idn_global_t idn;
84 int idn_debug;
85 int idn_snoop;
86 int idn_history;
87
88 typedef enum {
89 IDN_GPROPS_OKAY,
90 IDN_GPROPS_UNCHECKED,
91 IDN_GPROPS_ERROR
92 } idn_gprops_t;
93
94 struct idn_history idnhlog;
95
96 /*
97 * IDN "tunables".
98 */
99 int idn_smr_size;
100 int idn_nwr_size;
101 int idn_lowat;
102 int idn_hiwat;
103 int idn_protocol_nservers;
104 int idn_awolmsg_interval;
105 int idn_smr_bufsize;
106 int idn_slab_bufcount;
107 int idn_slab_prealloc;
108 int idn_slab_maxperdomain;
109 int idn_slab_mintotal;
110 int idn_window_max;
111 int idn_window_incr;
112 int idn_window_emax;
113 int idn_reclaim_min;
114 int idn_reclaim_max;
115 int idn_mbox_per_net;
116 int idn_max_nets;
117
118 int idn_netsvr_spin_count;
119 int idn_netsvr_wait_min;
120 int idn_netsvr_wait_max;
121 int idn_netsvr_wait_shift;
122
123 int idn_checksum;
124
125 int idn_msgwait_nego;
126 int idn_msgwait_cfg;
127 int idn_msgwait_con;
128 int idn_msgwait_fin;
129 int idn_msgwait_cmd;
130 int idn_msgwait_data;
131
132 int idn_retryfreq_nego;
133 int idn_retryfreq_con;
134 int idn_retryfreq_fin;
135
136 int idn_window_emax; /* calculated */
137 int idn_slab_maxperdomain; /* calculated */
138
139 /*
140 * DMV interrupt support.
141 */
142 int idn_pil;
143 int idn_dmv_pending_max;
144 idn_dmv_msg_t *idn_iv_queue[NCPU];
145 int idn_intr_index[NCPU]; /* idn_handler ONLY */
146 static idn_dmv_data_t *idn_dmv_data;
147
148 int idn_sigbpil;
149
150 idnparam_t idn_param_arr[] = {
151 { 0, 1, 0, /* 0 */ "idn_modunloadable" },
152 };
153
154 /*
155 * Parameters that are only accessible in a DEBUG driver.
156 */
157 static char *idn_param_debug_only[] = {
158 #if 0
159 "idn_checksum",
160 #endif /* 0 */
161 0
162 };
163
164 /*
165 * Parameters that are READ-ONLY.
166 */
167 static char *idn_param_read_only[] = {
168 #if 0
169 "idn_window_emax",
170 "idn_slab_maxperdomain",
171 #endif /* 0 */
172 0
173 };
174
175 static struct idn_global_props {
176 int p_min, p_max, p_def;
177 char *p_string;
178 int *p_var;
179 } idn_global_props[] = {
180 { 0, 0, 0, "idn_debug", &idn_debug },
181 { 0, 1, 0, "idn_history", &idn_history },
182 { 0, IDN_SMR_MAXSIZE,
183 0, "idn_smr_size", &idn_smr_size },
184 { 0, IDN_SMR_MAXSIZE,
185 0, "idn_nwr_size", &idn_nwr_size },
186 { 1, 512*1024,
187 1, "idn_lowat", &idn_lowat },
188 { 1*1024,
189 1*1024*1024,
190 256*1024,
191 "idn_hiwat", &idn_hiwat },
192 { IDN_SMR_BUFSIZE_MIN,
193 IDN_SMR_BUFSIZE_MAX,
194 IDN_SMR_BUFSIZE_DEF,
195 "idn_smr_bufsize", &idn_smr_bufsize },
196 { 4, 1024, 32, "idn_slab_bufcount", &idn_slab_bufcount },
197 { 0, 10, 0, "idn_slab_prealloc", &idn_slab_prealloc },
198 { 2, MAX_DOMAINS,
199 8, "idn_slab_mintotal", &idn_slab_mintotal },
200 { 8, 256, 64, "idn_window_max", &idn_window_max },
201 { 0, 32, 8, "idn_window_incr", &idn_window_incr },
202 { 1, 128, 5, "idn_reclaim_min", &idn_reclaim_min },
203 { 0, 128, 0, "idn_reclaim_max", &idn_reclaim_max },
204 { 1, IDN_MAXMAX_NETS,
205 8, "idn_max_nets", &idn_max_nets },
206 { 31, 511, 127, "idn_mbox_per_net", &idn_mbox_per_net },
207 { 0, 1, 1, "idn_checksum", &idn_checksum },
208 { 0, 10000, 500, "idn_netsvr_spin_count",
209 &idn_netsvr_spin_count },
210 { 0, 30*100, 40, "idn_netsvr_wait_min", &idn_netsvr_wait_min },
211 { 0, 60*100, 16*100, "idn_netsvr_wait_max", &idn_netsvr_wait_max },
212 { 1, 5, 1, "idn_netsvr_wait_shift",
213 &idn_netsvr_wait_shift },
214 { 1, MAX_DOMAINS,
215 IDN_PROTOCOL_NSERVERS,
216 "idn_protocol_nservers",
217 &idn_protocol_nservers },
218 { 0, 3600, IDN_AWOLMSG_INTERVAL,
219 "idn_awolmsg_interval", &idn_awolmsg_interval },
220 { 10, 300, IDN_MSGWAIT_NEGO,
221 "idn_msgwait_nego", &idn_msgwait_nego },
222 { 10, 300, IDN_MSGWAIT_CFG,
223 "idn_msgwait_cfg", &idn_msgwait_cfg },
224 { 10, 300, IDN_MSGWAIT_CON,
225 "idn_msgwait_con", &idn_msgwait_con },
226 { 10, 300, IDN_MSGWAIT_FIN,
227 "idn_msgwait_fin", &idn_msgwait_fin },
228 { 10, 300, IDN_MSGWAIT_CMD,
229 "idn_msgwait_cmd", &idn_msgwait_cmd },
230 { 10, 300, IDN_MSGWAIT_DATA,
231 "idn_msgwait_data", &idn_msgwait_data },
232 { 1, 60, IDN_RETRYFREQ_NEGO,
233 "idn_retryfreq_nego", &idn_retryfreq_nego },
234 { 1, 60, IDN_RETRYFREQ_CON,
235 "idn_retryfreq_con", &idn_retryfreq_con },
236 { 1, 60, IDN_RETRYFREQ_FIN,
237 "idn_retryfreq_fin", &idn_retryfreq_fin },
238 { 1, 9, IDN_PIL,
239 "idn_pil", &idn_pil },
240 { 1, 9, IDN_SIGBPIL,
241 "idn_sigbpil", &idn_sigbpil },
242 { 8, 512, IDN_DMV_PENDING_MAX,
243 "idn_dmv_pending_max", &idn_dmv_pending_max },
244 { 0, 0, 0, NULL, NULL }
245 };
246
247 struct idn *idn_i2s_table[IDN_MAXMAX_NETS << 1];
248 clock_t idn_msg_waittime[IDN_NUM_MSGTYPES];
249 clock_t idn_msg_retrytime[(int)IDN_NUM_RETRYTYPES];
250
251 static caddr_t idn_ndlist; /* head of 'named dispatch' var list */
252
253 static int idnattach(dev_info_t *, ddi_attach_cmd_t);
254 static int idndetach(dev_info_t *, ddi_detach_cmd_t);
255 static int idnopen(register queue_t *, dev_t *, int, int, cred_t *);
256 static int idnclose(queue_t *, int, cred_t *);
257 static int idnwput(queue_t *, mblk_t *);
258 static int idnwsrv(queue_t *);
259 static int idnrput(queue_t *, mblk_t *);
260 static void idnioctl(queue_t *, mblk_t *);
261 static idn_gprops_t idn_check_conf(dev_info_t *dip, processorid_t *cpuid);
262 static int idn_size_check();
263 static void idn_xmit_monitor_init();
264 static void idn_xmit_monitor_deinit();
265 static void idn_init_msg_waittime();
266 static void idn_init_msg_retrytime();
267 static void idn_sigb_setup(cpu_sgnblk_t *sigbp, void *arg);
268 static int idn_init(dev_info_t *dip);
269 static int idn_deinit();
270 static void idn_sigbhandler_create();
271 static void idn_sigbhandler_kill();
272 static uint_t idn_sigbhandler_wakeup(caddr_t arg1, caddr_t arg2);
273 static void idn_sigbhandler_thread(struct sigbintr **sbpp);
274 static void idn_sigbhandler(processorid_t cpuid, cpu_sgnblk_t *sgnblkp);
275 static int idn_info(idnsb_info_t *sfp);
276 static int idn_init_smr();
277 static void idn_deinit_smr();
278 static int idn_prom_getsmr(uint_t *smrsz, uint64_t *paddrp,
279 uint64_t *sizep);
280 static int idn_init_handler();
281 static void idn_deinit_handler();
282 static uint_t idn_handler(caddr_t unused, caddr_t unused2);
283 /*
284 * ioctl services
285 */
286 static int idnioc_link(idnop_t *idnop);
287 static int idnioc_unlink(idnop_t *idnop);
288 static int idn_rw_mem(idnop_t *idnop);
289 static int idn_send_ping(idnop_t *idnop);
290
291 static void idn_domains_init(struct hwconfig *local_hw);
292 static void idn_domains_deinit();
293 static void idn_retrytask_init();
294 static void idn_retrytask_deinit();
295 static void idn_gkstat_init();
296 static void idn_gkstat_deinit();
297 static int idn_gkstat_update();
298 static void idn_timercache_init();
299 static void idn_timercache_deinit();
300 static void idn_dopers_init();
301 static void idn_dopers_deinit();
302
303 static void idn_param_cleanup();
304 static int idn_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr);
305 static int idn_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
306 cred_t *cr);
307 static int idn_param_register(register idnparam_t *idnpa, int count);
308 static int idn_slabpool_report(queue_t *wq, mblk_t *mp, caddr_t cp,
309 cred_t *cr);
310 static int idn_buffer_report(queue_t *wq, mblk_t *mp, caddr_t cp,
311 cred_t *cr);
312 static int idn_mboxtbl_report(queue_t *wq, mblk_t *mp, caddr_t cp,
313 cred_t *cr);
314 static int idn_mainmbox_report(queue_t *wq, mblk_t *mp, caddr_t cp,
315 cred_t *cr);
316 static void idn_mainmbox_domain_report(queue_t *wq, mblk_t *mp, int domid,
317 idn_mainmbox_t *mmp, char *mbxtype);
318 static int idn_global_report(queue_t *wq, mblk_t *mp, caddr_t cp,
319 cred_t *cr);
320 static int idn_domain_report(queue_t *wq, mblk_t *mp, caddr_t cp,
321 cred_t *cr);
322 static int idn_get_net_binding(queue_t *wq, mblk_t *mp, caddr_t cp,
323 cred_t *cr);
324 static int idn_set_net_binding(queue_t *wq, mblk_t *mp, char *value,
325 caddr_t cp, cred_t *cr);
326
327 /*
328 * String definitions used for DEBUG and non-DEBUG.
329 */
330 const char *idnm_str[] = {
331 /* 0 */ "null",
332 /* 1 */ "nego",
333 /* 2 */ "con",
334 /* 3 */ "cfg",
335 /* 4 */ "fin",
336 /* 5 */ "cmd",
337 /* 6 */ "data",
338 };
339
340 const char *idnds_str[] = {
341 /* 0 */ "CLOSED",
342 /* 1 */ "NEGO_PEND",
343 /* 2 */ "NEGO_SENT",
344 /* 3 */ "NEGO_RCVD",
345 /* 4 */ "CONFIG",
346 /* 5 */ "CON_PEND",
347 /* 6 */ "CON_SENT",
348 /* 7 */ "CON_RCVD",
349 /* 8 */ "CON_READY",
350 /* 9 */ "CONNECTED",
351 /* 10 */ "FIN_PEND",
352 /* 11 */ "FIN_SENT",
353 /* 12 */ "FIN_RCVD",
354 /* 13 */ "DMAP"
355 };
356
357 const char *idnxs_str[] = {
358 /* 0 */ "PEND",
359 /* 1 */ "SENT",
360 /* 2 */ "RCVD",
361 /* 3 */ "FINAL",
362 /* 4 */ "NIL"
363 };
364
365 const char *idngs_str[] = {
366 /* 0 */ "OFFLINE",
367 /* 1 */ "CONNECT",
368 /* 2 */ "ONLINE",
369 /* 3 */ "DISCONNECT",
370 /* 4 */ "RECONFIG",
371 /* 5 */ "unknown",
372 /* 6 */ "unknown",
373 /* 7 */ "unknown",
374 /* 8 */ "unknown",
375 /* 9 */ "unknown",
376 /* 10 */ "IGNORE"
377 };
378
379 const char *idncmd_str[] = {
380 /* 0 */ "unknown",
381 /* 1 */ "SLABALLOC",
382 /* 2 */ "SLABFREE",
383 /* 3 */ "SLABREAP",
384 /* 4 */ "NODENAME"
385 };
386
387 const char *idncon_str[] = {
388 /* 0 */ "OFF",
389 /* 1 */ "NORMAL",
390 /* 2 */ "QUERY"
391 };
392
393 const char *idnfin_str[] = {
394 /* 0 */ "OFF",
395 /* 1 */ "NORMAL",
396 /* 2 */ "FORCE_SOFT",
397 /* 3 */ "FORCE_HARD",
398 /* 4 */ "QUERY"
399 };
400
401 const char *idnfinopt_str[] = {
402 /* 0 */ "NONE",
403 /* 1 */ "UNLINK",
404 /* 2 */ "RELINK"
405 };
406
407 const char *idnfinarg_str[] = {
408 /* 0 */ "NONE",
409 /* 1 */ "SMRBAD",
410 /* 2 */ "CPUCFG",
411 /* 3 */ "HWERR",
412 /* 4 */ "CFGERR_FATAL",
413 /* 5 */ "CFGERR_MTU",
414 /* 6 */ "CFGERR_BUF",
415 /* 7 */ "CFGERR_SLAB",
416 /* 8 */ "CFGERR_NWR",
417 /* 9 */ "CFGERR_NETS",
418 /* 10 */ "CFGERR_MBOX",
419 /* 11 */ "CFGERR_NMCADR",
420 /* 12 */ "CFGERR_MCADR",
421 /* 13 */ "CFGERR_CKSUM",
422 /* 14 */ "CFGERR_SMR",
423 };
424
425 const char *idnsync_str[] = {
426 /* 0 */ "NIL",
427 /* 1 */ "CONNECT",
428 /* 2 */ "DISCONNECT"
429 };
430
431 const char *idnreg_str[] = {
432 /* 0 */ "REG",
433 /* 1 */ "NEW",
434 /* 2 */ "QUERY"
435 };
436
437 const char *idnnack_str[] = {
438 /* 0 */ "unknown",
439 /* 1 */ "NOCONN",
440 /* 2 */ "BADCHAN",
441 /* 3 */ "BADCFG",
442 /* 4 */ "BADCMD",
443 /* 5 */ "RETRY",
444 /* 6 */ "DUP",
445 /* 7 */ "EXIT",
446 /* 8 */ "--reserved1",
447 /* 9 */ "--reserved2",
448 /* 10 */ "--reserved3"
449 };
450
451 const char *idnop_str[] = {
452 /* 0 */ "DISCONNECTED",
453 /* 1 */ "CONNECTED",
454 /* 2 */ "ERROR"
455 };
456
457 const char *chanop_str[] = {
458 /* 0 */ "OPEN",
459 /* 1 */ "SOFT_CLOSE",
460 /* 2 */ "HARD_CLOSE",
461 /* 3 */ "OFFLINE",
462 /* 4 */ "ONLINE"
463 };
464
465 const char *chanaction_str[] = {
466 /* 0 */ "DETACH",
467 /* 1 */ "STOP",
468 /* 2 */ "SUSPEND",
469 /* 3 */ "RESUME",
470 /* 4 */ "RESTART",
471 /* 5 */ "ATTACH"
472 };
473
474 const char *timer_str[] = {
475 /* 0 */ "NIL",
476 /* 1 */ "MSG"
477 };
478
479 static struct module_info idnrinfo = {
480 IDNIDNUM, /* mi_idnum */
481 IDNNAME, /* mi_idname */
482 IDNMINPSZ, /* mi_minpsz */
483 IDNMAXPSZ, /* mi_maxpsz */
484 0, /* mi_hiwat - see IDN_HIWAT */
485 0 /* mi_lowat - see IDN_LOWAT */
486 };
487
488 static struct module_info idnwinfo = {
489 IDNIDNUM, /* mi_idnum */
490 IDNNAME, /* mi_idname */
491 IDNMINPSZ, /* mi_minpsz */
492 IDNMAXPSZ, /* mi_maxpsz */
493 0, /* mi_hiwat - see IDN_HIWAT */
494 0 /* mi_lowat - see IDN_LOWAT */
495 };
496
497 static struct qinit idnrinit = {
498 idnrput, /* qi_putp */
499 NULL, /* qi_srvp */
500 idnopen, /* qi_qopen */
501 idnclose, /* qi_qclose */
502 NULL, /* qi_qadmin */
503 &idnrinfo, /* qi_minfo */
504 NULL, /* qi_mstat */
505 NULL, /* qi_rwp */
506 NULL, /* qi_infop */
507 STRUIOT_DONTCARE /* qi_struiot */
508 };
509
510 static struct qinit idnwinit = {
511 idnwput, /* qi_putp */
512 idnwsrv, /* qi_srvp */
513 NULL, /* qi_qopen */
514 NULL, /* qi_qclose */
515 NULL, /* qi_qadmin */
516 &idnwinfo, /* qi_minfo */
517 NULL, /* qi_mstat */
518 NULL, /* qi_rwp */
519 NULL, /* qi_infop */
520 STRUIOT_DONTCARE /* qi_struiot */
521 };
522
523 struct streamtab idninfo = {
524 &idnrinit, /* st_rdinit */
525 &idnwinit, /* st_wrinit */
526 NULL, /* st_muxrinit */
527 NULL, /* st_muxwinit */
528 };
529
530 /*
531 * Module linkage information (cb_ops & dev_ops) for the kernel.
532 */
533
534 static struct cb_ops cb_idnops = {
535 nulldev, /* cb_open */
536 nulldev, /* cb_close */
537 nodev, /* cb_strategy */
538 nodev, /* cb_print */
539 nodev, /* cb_dump */
540 nodev, /* cb_read */
541 nodev, /* cb_write */
542 nodev, /* cb_ioctl */
543 nodev, /* cb_devmap */
544 nodev, /* cb_mmap */
545 nodev, /* cb_segmap */
546 nochpoll, /* cb_chpoll */
547 ddi_prop_op, /* cb_prop_op */
548 &idninfo, /* cb_stream */
549 D_MP, /* cb_flag */
550 CB_REV, /* cb_rev */
551 nodev, /* cb_aread */
552 nodev, /* cb_awrite */
553 };
554
555 static struct dev_ops idnops = {
556 DEVO_REV, /* devo_rev */
557 0, /* devo_refcnt */
558 ddi_no_info, /* devo_getinfo */
559 nulldev, /* devo_identify */
560 nulldev, /* devo_probe */
561 idnattach, /* devo_attach */
562 idndetach, /* devo_detach */
563 nodev, /* devo_reset */
564 &cb_idnops, /* devo_cb_ops */
565 (struct bus_ops *)NULL, /* devo_bus_ops */
566 NULL, /* devo_power */
567 ddi_quiesce_not_needed, /* quiesce */
568 };
569
570 extern cpuset_t cpu_ready_set;
571
572 static struct modldrv modldrv = {
573 &mod_driverops, /* This module is a pseudo driver */
574 IDNDESC " 1.58",
575 &idnops
576 };
577
578 static struct modlinkage modlinkage = {
579 MODREV_1,
580 &modldrv,
581 NULL
582 };
583
584 /*
585 * --------------------------------------------------
586 */
587 int
_init(void)588 _init(void)
589 {
590 idn.version = IDN_VERSION;
591
592 return (mod_install(&modlinkage));
593 }
594
595 int
_fini(void)596 _fini(void)
597 {
598 return (mod_remove(&modlinkage));
599 }
600
601 int
_info(struct modinfo * modinfop)602 _info(struct modinfo *modinfop)
603 {
604 return (mod_info(&modlinkage, modinfop));
605 }
606
607 /*
608 * ----------------------------------------------
609 */
610 static int
idnattach(dev_info_t * dip,ddi_attach_cmd_t cmd)611 idnattach(dev_info_t *dip, ddi_attach_cmd_t cmd)
612 {
613 int instance;
614 int doinit = 0;
615 processorid_t bcpuid;
616 struct idn *sip;
617 struct idnstr *stp;
618 procname_t proc = "idnattach";
619
620
621 #ifndef lint
622 ASSERT(sizeof (idnsb_t) == IDNSB_SIZE);
623 ASSERT(offsetof(struct idnsb, id_hwchkpt[0]) == 0x40);
624 #endif /* lint */
625
626 switch (cmd) {
627 case DDI_RESUME:
628 sip = ddi_get_driver_private(dip);
629 /*
630 * sip may have not yet been set if the
631 * OBP environment variable (idn-smr-size)
632 * was not set.
633 */
634 if (sip == NULL)
635 return (DDI_FAILURE);
636 /*
637 * RESUME IDN services.
638 */
639 IDN_GLOCK_SHARED();
640 if (idn.state != IDNGS_OFFLINE) {
641 cmn_err(CE_WARN,
642 "IDN: 101: not in expected OFFLINE state "
643 "for DDI_RESUME");
644 ASSERT(0);
645 }
646 IDN_GUNLOCK();
647
648 /*
649 * RESUME DLPI services.
650 */
651 sip->si_flags &= ~IDNSUSPENDED;
652
653 rw_enter(&idn.struprwlock, RW_READER);
654 for (stp = idn.strup; stp; stp = stp->ss_nextp)
655 if (stp->ss_sip == sip) {
656 doinit = 1;
657 break;
658 }
659 rw_exit(&idn.struprwlock);
660 if (doinit)
661 (void) idndl_init(sip);
662
663 return (DDI_SUCCESS);
664
665 case DDI_ATTACH:
666 break;
667
668 default:
669 return (DDI_FAILURE);
670 }
671
672 instance = ddi_get_instance(dip);
673
674 PR_DRV("%s: instance = %d\n", proc, instance);
675
676 if (idn_check_conf(dip, &bcpuid) == IDN_GPROPS_ERROR)
677 return (DDI_FAILURE);
678
679 mutex_enter(&idn.siplock);
680
681 if (ddi_create_minor_node(dip, IDNNAME, S_IFCHR, instance,
682 DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
683 mutex_exit(&idn.siplock);
684 return (DDI_FAILURE);
685 }
686
687 if (idn.smr.ready == 0) {
688 if (idn_init_smr() == 0) {
689 idn.enabled = 1;
690 #ifdef DEBUG
691 cmn_err(CE_NOTE, "!IDN: Driver enabled");
692 #endif /* DEBUG */
693 } else {
694 cmn_err(CE_NOTE,
695 "!IDN: 102: driver disabled "
696 "- check OBP environment "
697 "(idn-smr-size)");
698 mutex_exit(&idn.siplock);
699 return (DDI_SUCCESS);
700 }
701 }
702
703 ASSERT(idn.smr.ready || idn.enabled);
704
705 if (idn.dip == NULL) {
706 doinit = 1;
707
708 if (idn_size_check()) {
709 idn_deinit_smr();
710 ddi_remove_minor_node(dip, NULL);
711 mutex_exit(&idn.siplock);
712 return (DDI_FAILURE);
713 }
714
715 if (idn_init(dip)) {
716 idn_deinit_smr();
717 ddi_remove_minor_node(dip, NULL);
718 mutex_exit(&idn.siplock);
719 return (DDI_FAILURE);
720 }
721 }
722
723 ASSERT(idn.dip);
724
725 /*
726 * This must occur _after_ idn_init() since
727 * it assumes idn_chanservers_init() has been
728 * called.
729 */
730 idn_chanserver_bind(ddi_get_instance(dip), bcpuid);
731
732 /*
733 * DLPI supporting stuff.
734 */
735 sip = GETSTRUCT(struct idn, 1);
736 sip->si_dip = dip;
737 ddi_set_driver_private(dip, sip);
738 sip->si_nextp = idn.sip;
739 idn.sip = sip;
740 IDN_SET_INST2SIP(instance, sip);
741 mutex_exit(&idn.siplock);
742
743 if (doinit)
744 idndl_dlpi_init(); /* initializes idninfoack */
745 /*
746 * Get our local IDN ethernet address.
747 */
748 idndl_localetheraddr(sip, &sip->si_ouraddr);
749 idndl_statinit(sip);
750
751 if (doinit) {
752 idn_gkstat_init();
753 /*
754 * Add our sigblock SSP interrupt handler.
755 */
756 mutex_enter(&idn.sigbintr.sb_mutex);
757 idn_sigbhandler_create();
758 mutex_exit(&idn.sigbintr.sb_mutex);
759
760 if (sgnblk_poll_register(idn_sigbhandler) == 0) {
761 mutex_enter(&idn.sigbintr.sb_mutex);
762 idn_sigbhandler_kill();
763 idn.sigbintr.sb_cpuid = (uchar_t)-1;
764 idn.sigbintr.sb_busy = IDNSIGB_INACTIVE;
765 mutex_exit(&idn.sigbintr.sb_mutex);
766
767 idn_gkstat_deinit();
768
769 mutex_enter(&idn.siplock);
770 (void) idn_deinit();
771 IDN_SET_INST2SIP(instance, NULL);
772 idn.sip = sip->si_nextp;
773 mutex_exit(&idn.siplock);
774
775 ddi_remove_minor_node(dip, NULL);
776
777 return (DDI_FAILURE);
778 }
779 /*
780 * We require sigblkp[cpu0] to be mapped for hardware
781 * configuration determination and also auto-linking
782 * on bootup.
783 */
784 if (sgnblk_poll_reference(idn_sigb_setup, NULL) != 0) {
785 (void) sgnblk_poll_unregister(idn_sigbhandler);
786 mutex_enter(&idn.sigbintr.sb_mutex);
787 idn_sigbhandler_kill();
788 idn.sigbintr.sb_cpuid = (uchar_t)-1;
789 idn.sigbintr.sb_busy = IDNSIGB_INACTIVE;
790 mutex_exit(&idn.sigbintr.sb_mutex);
791
792 idn_gkstat_deinit();
793
794 mutex_enter(&idn.siplock);
795 (void) idn_deinit();
796 IDN_SET_INST2SIP(instance, NULL);
797 idn.sip = sip->si_nextp;
798 mutex_exit(&idn.siplock);
799
800 ddi_remove_minor_node(dip, NULL);
801
802 cmn_err(CE_WARN,
803 "IDN: 103: unable to reference sigblock area");
804
805 return (DDI_FAILURE);
806 }
807
808 idn_init_autolink();
809 }
810
811 ddi_report_dev(dip);
812
813 return (DDI_SUCCESS);
814 }
815
816 /*
817 * ----------------------------------------------
818 */
819 static int
idndetach(dev_info_t * dip,ddi_detach_cmd_t cmd)820 idndetach(dev_info_t *dip, ddi_detach_cmd_t cmd)
821 {
822 int err = 0;
823 int instance;
824 struct idn *sip, *hsip, *tsip;
825 procname_t proc = "idndetach";
826
827 sip = ddi_get_driver_private(dip);
828 instance = ddi_get_instance(dip);
829
830 switch (cmd) {
831 case DDI_SUSPEND:
832 if (sip == NULL)
833 return (DDI_FAILURE);
834 /*
835 * SUSPEND IDN services.
836 * - Actually don't suspend anything, we just
837 * make sure we're not connected per DR protocol.
838 * If we really wanted to suspend it should
839 * be done _after_ DLPI is suspended so that
840 * we're not competing with that traffic.
841 */
842 IDN_GLOCK_SHARED();
843
844 if (idn.state != IDNGS_OFFLINE) {
845 int d;
846
847 cmn_err(CE_WARN,
848 "IDN: 104: cannot suspend while active "
849 "(state = %s)",
850 idngs_str[idn.state]);
851
852 for (d = 0; d < MAX_DOMAINS; d++) {
853 idn_domain_t *dp;
854
855 dp = &idn_domain[d];
856 if (dp->dcpu < 0)
857 continue;
858
859 cmn_err(CE_CONT,
860 "IDN: 121: domain %d (CPU %d, name "
861 "\"%s\", state %s)\n",
862 d, dp->dcpu, dp->dname,
863 idnds_str[dp->dstate]);
864 }
865 err = 1;
866 }
867
868 IDN_GUNLOCK();
869
870 if (err)
871 return (DDI_FAILURE);
872 /*
873 * SUSPEND DLPI services.
874 */
875 sip->si_flags |= IDNSUSPENDED;
876
877 idndl_uninit(sip);
878
879 return (DDI_FAILURE);
880
881 case DDI_DETACH:
882 if (idn.enabled == 0) {
883 ddi_remove_minor_node(dip, NULL);
884 ASSERT(idn.dip == NULL);
885 return (DDI_SUCCESS);
886 }
887 if (!IDN_MODUNLOADABLE)
888 return (DDI_FAILURE);
889 break;
890
891 default:
892 return (DDI_FAILURE);
893 }
894
895 PR_DRV("%s: instance = %d\n", proc, instance);
896
897 if (sip == NULL) {
898 /*
899 * No resources allocated.
900 */
901 return (DDI_SUCCESS);
902 }
903
904 mutex_enter(&idn.siplock);
905 if (idn.sip && (idn.sip->si_nextp == NULL)) {
906 /*
907 * This is our last stream connection
908 * going away. Time to deinit and flag
909 * the SSP we're (IDN) DOWN.
910 */
911 if (idn_deinit()) {
912 /*
913 * Must still be active.
914 */
915 mutex_exit(&idn.siplock);
916 return (DDI_FAILURE);
917 }
918 idn_deinit_autolink();
919 /*
920 * Remove our sigblock SSP interrupt handler.
921 */
922 (void) sgnblk_poll_unregister(idn_sigbhandler);
923 mutex_enter(&idn.sigbintr.sb_mutex);
924 idn_sigbhandler_kill();
925 idn.sigbintr.sb_cpuid = (uchar_t)-1;
926 idn.sigbintr.sb_busy = IDNSIGB_NOTREADY;
927 mutex_exit(&idn.sigbintr.sb_mutex);
928 /*
929 * Remove our reference to the sigblock area.
930 */
931 sgnblk_poll_unreference(idn_sigb_setup);
932 idn_gkstat_deinit();
933 }
934
935 ddi_remove_minor_node(dip, NULL);
936
937 /*
938 * Remove this instance from our linked list.
939 */
940 IDN_SET_INST2SIP(instance, NULL);
941 if ((hsip = tsip = idn.sip) == sip) {
942 idn.sip = sip->si_nextp;
943 } else {
944 for (; hsip && (sip != hsip); tsip = hsip,
945 hsip = hsip->si_nextp)
946 ;
947 if (hsip)
948 tsip->si_nextp = hsip->si_nextp;
949 }
950 mutex_exit(&idn.siplock);
951 if (sip->si_ksp)
952 kstat_delete(sip->si_ksp);
953
954 ddi_set_driver_private(dip, NULL);
955
956 FREESTRUCT(sip, struct idn, 1);
957
958 return (DDI_SUCCESS);
959 }
960
961 /*
962 * ----------------------------------------------
963 */
964 static idn_gprops_t
idn_check_conf(dev_info_t * dip,processorid_t * cpuid)965 idn_check_conf(dev_info_t *dip, processorid_t *cpuid)
966 {
967 static idn_gprops_t global_props = IDN_GPROPS_UNCHECKED;
968
969 if (global_props == IDN_GPROPS_UNCHECKED) {
970 int p;
971
972 global_props = IDN_GPROPS_OKAY;
973
974 for (p = 0; idn_global_props[p].p_string; p++) {
975 char *str;
976 int *var;
977 int val, v_min, v_max, v_def;
978
979 str = idn_global_props[p].p_string;
980 var = (int *)idn_global_props[p].p_var;
981 v_min = idn_global_props[p].p_min;
982 v_max = idn_global_props[p].p_max;
983 v_def = idn_global_props[p].p_def;
984 ASSERT(str && var);
985
986 val = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
987 DDI_PROP_DONTPASS |
988 DDI_PROP_NOTPROM,
989 str, v_def);
990 if ((v_min != v_max) &&
991 ((val < v_min) || (val > v_max))) {
992 cmn_err(CE_WARN,
993 "IDN: 105: driver parameter "
994 "(%s) specified (%d) out of "
995 "range [%d - %d]",
996 str, val, v_min, v_max);
997 global_props = IDN_GPROPS_ERROR;
998 } else {
999 *var = val;
1000 }
1001 }
1002 }
1003
1004 *cpuid = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1005 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1006 "bind_cpu", -1);
1007
1008 return (global_props);
1009 }
1010
1011 static int
idn_size_check()1012 idn_size_check()
1013 {
1014 int i, cnt;
1015 int rv = 0;
1016 ulong_t mboxareasize;
1017 int max_num_slabs;
1018 procname_t proc = "idn_size_check";
1019
1020 if (IDN_NWR_SIZE == 0)
1021 IDN_NWR_SIZE = IDN_SMR_SIZE;
1022
1023 if (IDN_NWR_SIZE > IDN_SMR_SIZE) {
1024 cmn_err(CE_WARN,
1025 "IDN: 106: idn_nwr_size(%d) > idn_smr_size(%d)"
1026 " - Limiting to %d MB",
1027 IDN_NWR_SIZE, IDN_SMR_SIZE, IDN_SMR_SIZE);
1028 IDN_NWR_SIZE = IDN_SMR_SIZE;
1029 }
1030
1031 if (MB2B(IDN_NWR_SIZE) < IDN_SLAB_SIZE) {
1032 cmn_err(CE_WARN,
1033 "IDN: 107: memory region(%lu) < slab size(%u)",
1034 MB2B(IDN_NWR_SIZE), IDN_SLAB_SIZE);
1035 rv = -1;
1036 }
1037
1038 if (IDN_LOWAT >= IDN_HIWAT) {
1039 cmn_err(CE_WARN,
1040 "IDN: 108: idn_lowat(%d) >= idn_hiwat(%d)",
1041 IDN_LOWAT, IDN_HIWAT);
1042 rv = -1;
1043 }
1044
1045 mboxareasize = (ulong_t)(IDN_MBOXAREA_SIZE + (IDN_SMR_BUFSIZE - 1));
1046 mboxareasize &= ~((ulong_t)IDN_SMR_BUFSIZE - 1);
1047 #ifdef DEBUG
1048 if ((ulong_t)IDN_SLAB_SIZE < mboxareasize) {
1049 PR_DRV("%s: slab size(%d) < mailbox area(%ld)",
1050 proc, IDN_SLAB_SIZE, mboxareasize);
1051 /* not fatal */
1052 }
1053 #endif /* DEBUG */
1054
1055 if ((mboxareasize + (ulong_t)IDN_SLAB_SIZE) > MB2B(IDN_NWR_SIZE)) {
1056 cmn_err(CE_WARN,
1057 "IDN: 109: mailbox area(%lu) + slab size(%u) "
1058 "> nwr region(%lu)",
1059 mboxareasize, IDN_SLAB_SIZE,
1060 MB2B(IDN_NWR_SIZE));
1061 rv = -1;
1062 }
1063
1064 max_num_slabs = (int)((MB2B(IDN_NWR_SIZE) - mboxareasize) /
1065 (ulong_t)IDN_SLAB_SIZE);
1066 if (max_num_slabs < IDN_SLAB_MINTOTAL) {
1067 cmn_err(CE_WARN,
1068 "IDN: 110: maximum number of slabs(%d) < "
1069 "minimum required(%d)",
1070 max_num_slabs, IDN_SLAB_MINTOTAL);
1071 rv = -1;
1072 } else {
1073 IDN_SLAB_MAXPERDOMAIN = max_num_slabs / IDN_SLAB_MINTOTAL;
1074 }
1075
1076 #if 0
1077 if ((IDN_MTU + sizeof (struct ether_header)) > IDN_DATA_SIZE) {
1078 cmn_err(CE_WARN,
1079 "IDN: (IDN_MTU(%d) + ether_header(%d)) "
1080 "> IDN_DATA_SIZE(%lu)",
1081 IDN_MTU, sizeof (struct ether_header),
1082 IDN_DATA_SIZE);
1083 rv = -1;
1084 }
1085 #endif /* 0 */
1086
1087 if (IDN_SMR_BUFSIZE & (IDN_ALIGNSIZE - 1)) {
1088 cmn_err(CE_WARN,
1089 "IDN: 111: idn_smr_bufsize(%d) not on a "
1090 "64 byte boundary", IDN_SMR_BUFSIZE);
1091 rv = -1;
1092 }
1093
1094 for (i = cnt = 0;
1095 (cnt <= 1) && (((ulong_t)1 << i) < MB2B(IDN_NWR_SIZE));
1096 i++)
1097 if ((1 << i) & IDN_SMR_BUFSIZE)
1098 cnt++;
1099 if ((i > 0) && (!cnt || (cnt > 1))) {
1100 cmn_err(CE_WARN,
1101 "IDN: 112: idn_smr_bufsize(%d) not a power of 2",
1102 IDN_SMR_BUFSIZE);
1103 rv = -1;
1104 }
1105
1106 if ((IDN_MBOX_PER_NET & 1) == 0) {
1107 cmn_err(CE_WARN,
1108 "IDN: 113: idn_mbox_per_net(%d) must be an "
1109 "odd number", IDN_MBOX_PER_NET);
1110 rv = -1;
1111 }
1112
1113 if (idn.nchannels > 0)
1114 IDN_WINDOW_EMAX = IDN_WINDOW_MAX +
1115 ((idn.nchannels - 1) * IDN_WINDOW_INCR);
1116
1117 if (IDN_NETSVR_WAIT_MIN > IDN_NETSVR_WAIT_MAX) {
1118 cmn_err(CE_WARN,
1119 "IDN: 115: idn_netsvr_wait_min(%d) cannot be "
1120 "greater than idn_netsvr_wait_max(%d)",
1121 IDN_NETSVR_WAIT_MIN,
1122 IDN_NETSVR_WAIT_MAX);
1123 rv = -1;
1124 }
1125
1126 return (rv);
1127 }
1128
1129 static int
idn_init_smr()1130 idn_init_smr()
1131 {
1132 uint64_t obp_paddr;
1133 uint64_t obp_size; /* in Bytes */
1134 uint_t smr_size; /* in MBytes */
1135 pgcnt_t npages;
1136 procname_t proc = "idn_init_smr";
1137
1138 if (idn.smr.ready)
1139 return (0);
1140
1141 if (idn_prom_getsmr(&smr_size, &obp_paddr, &obp_size) < 0)
1142 return (-1);
1143
1144 PR_PROTO("%s: smr_size = %d, obp_paddr = 0x%lx, obp_size = 0x%lx\n",
1145 proc, smr_size, obp_paddr, obp_size);
1146
1147 if (IDN_SMR_SIZE)
1148 smr_size = MIN(smr_size, IDN_SMR_SIZE);
1149
1150 npages = btopr(MB2B(smr_size));
1151
1152 idn.smr.prom_paddr = obp_paddr;
1153 idn.smr.prom_size = obp_size;
1154 idn.smr.vaddr = vmem_alloc(heap_arena, ptob(npages), VM_SLEEP);
1155 ASSERT(((ulong_t)idn.smr.vaddr & MMU_PAGEOFFSET) == 0);
1156 idn.smr.locpfn = (pfn_t)(obp_paddr >> MMU_PAGESHIFT);
1157 idn.smr.rempfn = idn.smr.rempfnlim = PFN_INVALID;
1158 IDN_SMR_SIZE = smr_size;
1159
1160 PR_PROTO("%s: smr vaddr = %p\n", proc, (void *)idn.smr.vaddr);
1161
1162 smr_remap(&kas, idn.smr.vaddr, idn.smr.locpfn, IDN_SMR_SIZE);
1163
1164 idn.localid = PADDR_TO_DOMAINID(obp_paddr);
1165
1166 idn.smr.ready = 1;
1167
1168 return (0);
1169 }
1170
1171 static void
idn_deinit_smr()1172 idn_deinit_smr()
1173 {
1174 pgcnt_t npages;
1175
1176 if (idn.smr.ready == 0)
1177 return;
1178
1179 smr_remap(&kas, idn.smr.vaddr, PFN_INVALID, IDN_SMR_SIZE);
1180
1181 npages = btopr(MB2B(IDN_SMR_SIZE));
1182
1183 vmem_free(heap_arena, idn.smr.vaddr, ptob(npages));
1184
1185 idn.localid = IDN_NIL_DOMID;
1186
1187 IDN_SMR_SIZE = 0;
1188
1189 idn.smr.ready = 0;
1190 }
1191
1192 /*ARGSUSED1*/
1193 static void
idn_sigb_setup(cpu_sgnblk_t * sigbp,void * arg)1194 idn_sigb_setup(cpu_sgnblk_t *sigbp, void *arg)
1195 {
1196 procname_t proc = "idn_sigb_setup";
1197
1198 PR_PROTO("%s: Setting sigb to %p\n", proc, (void *)sigbp);
1199
1200 mutex_enter(&idn.idnsb_mutex);
1201 if (sigbp == NULL) {
1202 idn.idnsb = NULL;
1203 idn.idnsb_eventp = NULL;
1204 mutex_exit(&idn.idnsb_mutex);
1205 return;
1206 }
1207 idn.idnsb_eventp = (idnsb_event_t *)sigbp->sigb_idn;
1208 idn.idnsb = (idnsb_t *)&idn.idnsb_eventp->idn_reserved1;
1209 mutex_exit(&idn.idnsb_mutex);
1210 }
1211
1212 static int
idn_init(dev_info_t * dip)1213 idn_init(dev_info_t *dip)
1214 {
1215 struct hwconfig local_hw;
1216 procname_t proc = "idn_init";
1217
1218
1219 ASSERT(MUTEX_HELD(&idn.siplock));
1220
1221 if (!idn.enabled) {
1222 cmn_err(CE_WARN,
1223 "IDN: 117: IDN not enabled");
1224 return (-1);
1225 }
1226
1227 if (idn.dip != NULL) {
1228 PR_DRV("%s: already initialized (dip = 0x%p)\n",
1229 proc, (void *)idn.dip);
1230 return (0);
1231 }
1232
1233 /*
1234 * Determine our local domain's hardware configuration.
1235 */
1236 if (get_hw_config(&local_hw)) {
1237 cmn_err(CE_WARN,
1238 "IDN: 118: hardware config not appropriate");
1239 return (-1);
1240 }
1241
1242 PR_DRV("%s: locpfn = 0x%lx\n", proc, idn.smr.locpfn);
1243 PR_DRV("%s: rempfn = 0x%lx\n", proc, idn.smr.rempfn);
1244 PR_DRV("%s: smrsize = %d MB\n", proc, IDN_SMR_SIZE);
1245
1246 rw_init(&idn.grwlock, NULL, RW_DEFAULT, NULL);
1247 rw_init(&idn.struprwlock, NULL, RW_DEFAULT, NULL);
1248 mutex_init(&idn.sync.sz_mutex, NULL, MUTEX_DEFAULT, NULL);
1249 mutex_init(&idn.sipwenlock, NULL, MUTEX_DEFAULT, NULL);
1250
1251 /*
1252 * Calculate proper value for idn.bframe_shift.
1253 * Kind of hokey as it assume knowledge of the format
1254 * of the idnparam_t structure.
1255 */
1256 {
1257 int s;
1258
1259 for (s = 0; (1 << s) < IDN_SMR_BUFSIZE_MIN; s++)
1260 ;
1261 idn.bframe_shift = s;
1262 PR_DRV("%s: idn.bframe_shift = %d, minbuf = %d\n",
1263 proc, idn.bframe_shift, IDN_SMR_BUFSIZE_MIN);
1264
1265 ASSERT((uint_t)IDN_OFFSET2BFRAME(MB2B(idn_smr_size)) <
1266 (1 << 24));
1267 }
1268
1269 idn_xmit_monitor_init();
1270
1271 /*
1272 * Initialize the domain op (dopers) stuff.
1273 */
1274 idn_dopers_init();
1275
1276 /*
1277 * Initialize the timer (kmem) cache used for timeout
1278 * structures.
1279 */
1280 idn_timercache_init();
1281
1282 /*
1283 * Initialize the slab waiting areas.
1284 */
1285 (void) smr_slabwaiter_init();
1286
1287 /*
1288 * Initialize retryjob kmem cache.
1289 */
1290 idn_retrytask_init();
1291
1292 idn_init_msg_waittime();
1293 idn_init_msg_retrytime();
1294
1295 /*
1296 * Initialize idn_domains[] and local domains information
1297 * include idn_global information.
1298 */
1299 idn_domains_init(&local_hw);
1300
1301 /*
1302 * Start up IDN protocol servers.
1303 */
1304 if (idn_protocol_init(idn_protocol_nservers) <= 0) {
1305 cmn_err(CE_WARN,
1306 "IDN: 119: failed to initialize %d protocol servers",
1307 idn_protocol_nservers);
1308 idn_domains_deinit();
1309 idn_retrytask_deinit();
1310 smr_slabwaiter_deinit();
1311 idn_timercache_deinit();
1312 idn_dopers_deinit();
1313 idn_xmit_monitor_deinit();
1314 mutex_destroy(&idn.sipwenlock);
1315 mutex_destroy(&idn.sync.sz_mutex);
1316 rw_destroy(&idn.grwlock);
1317 rw_destroy(&idn.struprwlock);
1318 return (-1);
1319 }
1320
1321 /*
1322 * Initialize chan_servers array.
1323 */
1324 (void) idn_chanservers_init();
1325
1326 /*
1327 * Need to register the IDN handler with the DMV subsystem.
1328 *
1329 * Need to prevent the IDN driver from being unloaded
1330 * once loaded since DMV's may come in at any time.
1331 * If the driver is not loaded and the idn_dmv_handler
1332 * has been registered with the DMV, system will crash.
1333 */
1334 (void) idn_init_handler();
1335
1336 idn.dip = dip;
1337 IDN_GLOCK_EXCL();
1338 IDN_GSTATE_TRANSITION(IDNGS_OFFLINE);
1339 IDN_GUNLOCK();
1340
1341 return (0);
1342 }
1343
1344 static int
idn_deinit()1345 idn_deinit()
1346 {
1347 procname_t proc = "idn_deinit";
1348
1349 ASSERT(MUTEX_HELD(&idn.siplock));
1350
1351 IDN_GLOCK_EXCL();
1352
1353 if (idn.state != IDNGS_OFFLINE) {
1354 int d;
1355
1356 cmn_err(CE_WARN,
1357 "IDN: 120: cannot deinit while active "
1358 "(state = %s)", idngs_str[idn.state]);
1359
1360 for (d = 0; d < MAX_DOMAINS; d++) {
1361 idn_domain_t *dp;
1362
1363 dp = &idn_domain[d];
1364 if (dp->dcpu < 0)
1365 continue;
1366
1367 cmn_err(CE_CONT,
1368 "IDN: 121: domain %d (CPU %d, "
1369 "name \"%s\", state %s)\n",
1370 d, dp->dcpu, dp->dname,
1371 idnds_str[dp->dstate]);
1372 }
1373 IDN_GUNLOCK();
1374 return (-1);
1375 }
1376
1377 if (idn.dip == NULL) {
1378 PR_DRV("%s: already deinitialized\n", proc);
1379 IDN_GUNLOCK();
1380 return (0);
1381 }
1382
1383 IDN_GSTATE_TRANSITION(IDNGS_IGNORE);
1384
1385 IDN_GUNLOCK();
1386
1387 idn_xmit_monitor_deinit();
1388
1389 idn_deinit_handler();
1390
1391 idn_chanservers_deinit();
1392
1393 idn.nchannels = 0;
1394 ASSERT(idn.chan_servers == NULL);
1395
1396 smr_slabpool_deinit();
1397
1398 idn_protocol_deinit();
1399
1400 idn_domains_deinit();
1401
1402 smr_slabwaiter_deinit();
1403
1404 idn_retrytask_deinit();
1405
1406 idn_timercache_deinit();
1407
1408 idn_dopers_deinit();
1409
1410 ASSERT(idn.localid == IDN_NIL_DOMID);
1411
1412 IDN_SET_MASTERID(IDN_NIL_DOMID);
1413
1414 idn_deinit_smr();
1415
1416 mutex_destroy(&idn.sipwenlock);
1417 mutex_destroy(&idn.sync.sz_mutex);
1418 rw_destroy(&idn.grwlock);
1419 rw_destroy(&idn.struprwlock);
1420
1421 idn.dip = NULL;
1422
1423 return (0);
1424 }
1425
1426 static void
idn_xmit_monitor_init()1427 idn_xmit_monitor_init()
1428 {
1429 mutex_init(&idn.xmit_lock, NULL, MUTEX_DEFAULT, NULL);
1430 idn.xmit_tid = (timeout_id_t)NULL;
1431 CHANSET_ZERO(idn.xmit_chanset_wanted);
1432 }
1433
1434 static void
idn_xmit_monitor_deinit()1435 idn_xmit_monitor_deinit()
1436 {
1437 timeout_id_t tid;
1438
1439 mutex_enter(&idn.xmit_lock);
1440 CHANSET_ZERO(idn.xmit_chanset_wanted);
1441 if ((tid = idn.xmit_tid) != (timeout_id_t)NULL) {
1442 idn.xmit_tid = (timeout_id_t)NULL;
1443 mutex_exit(&idn.xmit_lock);
1444 (void) untimeout(tid);
1445 } else {
1446 mutex_exit(&idn.xmit_lock);
1447 }
1448 mutex_destroy(&idn.xmit_lock);
1449 }
1450
1451 static void
idn_init_msg_waittime()1452 idn_init_msg_waittime()
1453 {
1454 idn_msg_waittime[IDNP_NULL] = -1;
1455 idn_msg_waittime[IDNP_NEGO] = idn_msgwait_nego * hz;
1456 idn_msg_waittime[IDNP_CFG] = idn_msgwait_cfg * hz;
1457 idn_msg_waittime[IDNP_CON] = idn_msgwait_con * hz;
1458 idn_msg_waittime[IDNP_FIN] = idn_msgwait_fin * hz;
1459 idn_msg_waittime[IDNP_CMD] = idn_msgwait_cmd * hz;
1460 idn_msg_waittime[IDNP_DATA] = idn_msgwait_data * hz;
1461 }
1462
1463 static void
idn_init_msg_retrytime()1464 idn_init_msg_retrytime()
1465 {
1466 idn_msg_retrytime[(int)IDNRETRY_NIL] = -1;
1467 idn_msg_retrytime[(int)IDNRETRY_NEGO] = idn_retryfreq_nego * hz;
1468 idn_msg_retrytime[(int)IDNRETRY_CON] = idn_retryfreq_con * hz;
1469 idn_msg_retrytime[(int)IDNRETRY_CONQ] = idn_retryfreq_con * hz;
1470 idn_msg_retrytime[(int)IDNRETRY_FIN] = idn_retryfreq_fin * hz;
1471 idn_msg_retrytime[(int)IDNRETRY_FINQ] = idn_retryfreq_fin * hz;
1472 }
1473
1474 /*
1475 * ----------------------------------------------
1476 */
1477 /*ARGSUSED*/
1478 static int
idnopen(register queue_t * rq,dev_t * devp,int flag,int sflag,cred_t * crp)1479 idnopen(register queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *crp)
1480 {
1481 register int err = 0;
1482 int minordev;
1483 struct idnstr *stp, **pstp;
1484 procname_t proc = "idnopen";
1485
1486 ASSERT(sflag != MODOPEN);
1487
1488 IDN_GLOCK_EXCL();
1489
1490 rw_enter(&idn.struprwlock, RW_WRITER);
1491 mutex_enter(&idn.sipwenlock);
1492 pstp = &idn.strup;
1493
1494 if (idn.enabled == 0) {
1495 PR_DRV("%s: Driver disabled (check OBP:idn-smr-size)\n",
1496 proc);
1497 mutex_exit(&idn.sipwenlock);
1498 rw_exit(&idn.struprwlock);
1499 IDN_GUNLOCK();
1500 return (EACCES);
1501 }
1502
1503 if (!idn_ndlist &&
1504 idn_param_register(idn_param_arr, A_CNT(idn_param_arr))) {
1505 PR_DRV("%s: failed to register ndd parameters\n", proc);
1506 mutex_exit(&idn.sipwenlock);
1507 rw_exit(&idn.struprwlock);
1508 IDN_GUNLOCK();
1509 return (ENOMEM);
1510 }
1511 IDN_GUNLOCK();
1512
1513 if (sflag == CLONEOPEN) {
1514 minordev = 0;
1515 for (stp = *pstp; stp; pstp = &stp->ss_nextp, stp = *pstp) {
1516 if (minordev < stp->ss_minor)
1517 break;
1518 minordev++;
1519 }
1520 *devp = makedevice(getmajor(*devp), minordev);
1521 } else {
1522 minordev = getminor(*devp);
1523 }
1524 if (rq->q_ptr)
1525 goto done;
1526
1527 stp = GETSTRUCT(struct idnstr, 1);
1528 stp->ss_rq = rq;
1529 stp->ss_minor = minordev;
1530 rw_init(&stp->ss_rwlock, NULL, RW_DEFAULT, NULL);
1531 /*
1532 * DLPI stuff
1533 */
1534 stp->ss_sip = NULL;
1535 stp->ss_state = DL_UNATTACHED;
1536 stp->ss_sap = 0;
1537 stp->ss_flags = 0;
1538 stp->ss_mccount = 0;
1539 stp->ss_mctab = NULL;
1540
1541 /*
1542 * Link new entry into list of actives.
1543 */
1544 stp->ss_nextp = *pstp;
1545 *pstp = stp;
1546
1547 WR(rq)->q_ptr = rq->q_ptr = (void *)stp;
1548 /*
1549 * Disable automatic enabling of our write service
1550 * procedure. We control this explicitly.
1551 */
1552 noenable(WR(rq));
1553
1554 /*
1555 * Set our STREAMs queue maximum packet size that
1556 * we'll accept and our high/low water marks.
1557 */
1558 (void) strqset(WR(rq), QMAXPSZ, 0, IDN_DATA_SIZE);
1559 (void) strqset(WR(rq), QLOWAT, 0, IDN_LOWAT);
1560 (void) strqset(WR(rq), QHIWAT, 0, IDN_HIWAT);
1561 (void) strqset(rq, QMAXPSZ, 0, IDN_DATA_SIZE);
1562 (void) strqset(rq, QLOWAT, 0, IDN_LOWAT);
1563 (void) strqset(rq, QHIWAT, 0, IDN_HIWAT);
1564
1565 done:
1566 mutex_exit(&idn.sipwenlock);
1567 rw_exit(&idn.struprwlock);
1568
1569 (void) qassociate(rq, -1);
1570 qprocson(rq);
1571
1572 return (err);
1573 }
1574
1575 /*
1576 * ----------------------------------------------
1577 */
1578 /*ARGSUSED1*/
1579 static int
idnclose(queue_t * rq,int flag,cred_t * crp)1580 idnclose(queue_t *rq, int flag, cred_t *crp)
1581 {
1582 struct idnstr *stp, **pstp;
1583
1584 ASSERT(rq->q_ptr);
1585
1586 qprocsoff(rq);
1587 /*
1588 * Guaranteed to be single threaded with respect
1589 * to this stream at this point.
1590 */
1591
1592 stp = (struct idnstr *)rq->q_ptr;
1593
1594 if (stp->ss_sip)
1595 idndl_dodetach(stp);
1596
1597 rw_enter(&idn.struprwlock, RW_WRITER);
1598 mutex_enter(&idn.sipwenlock);
1599 pstp = &idn.strup;
1600 for (stp = *pstp; stp; pstp = &stp->ss_nextp, stp = *pstp)
1601 if (stp == (struct idnstr *)rq->q_ptr)
1602 break;
1603 ASSERT(stp);
1604 ASSERT(stp->ss_rq == rq);
1605 *pstp = stp->ss_nextp;
1606
1607 rw_destroy(&stp->ss_rwlock);
1608 FREESTRUCT(stp, struct idnstr, 1);
1609
1610 WR(rq)->q_ptr = rq->q_ptr = NULL;
1611 mutex_exit(&idn.sipwenlock);
1612 rw_exit(&idn.struprwlock);
1613
1614 idn_param_cleanup();
1615 (void) qassociate(rq, -1);
1616
1617 return (0);
1618 }
1619
1620 /*
1621 * ----------------------------------------------
1622 */
1623 static int
idnwput(register queue_t * wq,register mblk_t * mp)1624 idnwput(register queue_t *wq, register mblk_t *mp)
1625 {
1626 register struct idnstr *stp;
1627 struct idn *sip;
1628 procname_t proc = "idnwput";
1629
1630 stp = (struct idnstr *)wq->q_ptr;
1631 sip = stp->ss_sip;
1632
1633 switch (DB_TYPE(mp)) {
1634 case M_IOCTL:
1635 idnioctl(wq, mp);
1636 break;
1637
1638 case M_DATA:
1639 if (((stp->ss_flags & (IDNSFAST|IDNSRAW)) == 0) ||
1640 (stp->ss_state != DL_IDLE) ||
1641 (sip == NULL)) {
1642 PR_DLPI("%s: fl=0x%x, st=0x%x, ret(EPROTO)\n",
1643 proc, stp->ss_flags, stp->ss_state);
1644 merror(wq, mp, EPROTO);
1645
1646 } else if (wq->q_first) {
1647 if (putq(wq, mp) == 0)
1648 freemsg(mp);
1649 /*
1650 * We're only holding the reader lock,
1651 * but that's okay since this field
1652 * is just a soft-flag.
1653 */
1654 sip->si_wantw = 1;
1655 qenable(wq);
1656
1657 } else if (sip->si_flags & IDNPROMISC) {
1658 if (putq(wq, mp) == 0) {
1659 PR_DLPI("%s: putq failed\n", proc);
1660 freemsg(mp);
1661 } else {
1662 PR_DLPI("%s: putq succeeded\n", proc);
1663 }
1664 qenable(wq);
1665
1666 } else {
1667 PR_DLPI("%s: idndl_start(sip=0x%p)\n",
1668 proc, (void *)sip);
1669 rw_enter(&stp->ss_rwlock, RW_READER);
1670 (void) idndl_start(wq, mp, sip);
1671 rw_exit(&stp->ss_rwlock);
1672 }
1673 break;
1674
1675 case M_PROTO:
1676 case M_PCPROTO:
1677 /*
1678 * Break the association between the current thread
1679 * and the thread that calls idndl_proto() to resolve
1680 * the problem of idn_chan_server() threads which
1681 * loop back around to call idndl_proto and try to
1682 * recursively acquire internal locks.
1683 */
1684 if (putq(wq, mp) == 0)
1685 freemsg(mp);
1686 qenable(wq);
1687 break;
1688
1689 case M_FLUSH:
1690 PR_STR("%s: M_FLUSH request (flush = %d)\n",
1691 proc, (int)*mp->b_rptr);
1692 if (*mp->b_rptr & FLUSHW) {
1693 flushq(wq, FLUSHALL);
1694 *mp->b_rptr &= ~FLUSHW;
1695 }
1696 if (*mp->b_rptr & FLUSHR)
1697 qreply(wq, mp);
1698 else
1699 freemsg(mp);
1700 break;
1701
1702 default:
1703 PR_STR("%s: unexpected DB_TYPE 0x%x\n",
1704 proc, DB_TYPE(mp));
1705 freemsg(mp);
1706 break;
1707 }
1708
1709 return (0);
1710 }
1711
1712 /*
1713 * ----------------------------------------------
1714 */
1715 static int
idnwsrv(queue_t * wq)1716 idnwsrv(queue_t *wq)
1717 {
1718 mblk_t *mp;
1719 int err = 0;
1720 struct idnstr *stp;
1721 struct idn *sip;
1722 procname_t proc = "idnwsrv";
1723
1724 stp = (struct idnstr *)wq->q_ptr;
1725 sip = stp->ss_sip;
1726
1727 while (mp = getq(wq)) {
1728 switch (DB_TYPE(mp)) {
1729 case M_DATA:
1730 if (sip) {
1731 PR_DLPI("%s: idndl_start(sip=0x%p)\n",
1732 proc, (void *)sip);
1733 rw_enter(&stp->ss_rwlock, RW_READER);
1734 err = idndl_start(wq, mp, sip);
1735 rw_exit(&stp->ss_rwlock);
1736 if (err)
1737 goto done;
1738 } else {
1739 PR_DLPI("%s: NO sip to start msg\n", proc);
1740 freemsg(mp);
1741 }
1742 break;
1743
1744 case M_PROTO:
1745 case M_PCPROTO:
1746 idndl_proto(wq, mp);
1747 break;
1748
1749 default:
1750 ASSERT(0);
1751 PR_STR("%s: unexpected db_type (%d)\n",
1752 proc, DB_TYPE(mp));
1753 freemsg(mp);
1754 break;
1755 }
1756 }
1757 done:
1758 return (0);
1759 }
1760
1761 /*
1762 * ----------------------------------------------
1763 */
1764 static int
idnrput(register queue_t * rq,register mblk_t * mp)1765 idnrput(register queue_t *rq, register mblk_t *mp)
1766 {
1767 register int err = 0;
1768 procname_t proc = "idnrput";
1769
1770 switch (DB_TYPE(mp)) {
1771 case M_DATA:
1772 /*
1773 * Should not reach here with data packets
1774 * if running DLPI.
1775 */
1776 cmn_err(CE_WARN,
1777 "IDN: 123: unexpected M_DATA packets for "
1778 "q_stream 0x%p", (void *)rq->q_stream);
1779 freemsg(mp);
1780 err = ENXIO;
1781 break;
1782
1783 case M_FLUSH:
1784 PR_STR("%s: M_FLUSH request (flush = %d)\n",
1785 proc, (int)*mp->b_rptr);
1786 if (*mp->b_rptr & FLUSHR)
1787 flushq(rq, FLUSHALL);
1788 (void) putnext(rq, mp);
1789 break;
1790
1791 case M_ERROR:
1792 PR_STR("%s: M_ERROR (error = %d) coming through\n",
1793 proc, (int)*mp->b_rptr);
1794 (void) putnext(rq, mp);
1795 break;
1796 default:
1797 PR_STR("%s: unexpected DB_TYPE 0x%x\n",
1798 proc, DB_TYPE(mp));
1799 freemsg(mp);
1800 err = ENXIO;
1801 break;
1802 }
1803
1804 return (err);
1805 }
1806
1807 /*
1808 * ----------------------------------------------
1809 * Not allowed to enqueue messages! Only M_DATA messages
1810 * can be enqueued on the write stream.
1811 * ----------------------------------------------
1812 */
1813 static void
idnioctl(register queue_t * wq,register mblk_t * mp)1814 idnioctl(register queue_t *wq, register mblk_t *mp)
1815 {
1816 register struct iocblk *iocp;
1817 register int cmd;
1818 idnop_t *idnop = NULL;
1819 int error = 0;
1820 int argsize;
1821 procname_t proc = "idnioctl";
1822
1823 iocp = (struct iocblk *)mp->b_rptr;
1824 cmd = iocp->ioc_cmd;
1825
1826 /*
1827 * Intercept DLPI ioctl's.
1828 */
1829 if (VALID_DLPIOP(cmd)) {
1830 PR_STR("%s: DLPI ioctl(%d)\n", proc, cmd);
1831 error = idnioc_dlpi(wq, mp, &argsize);
1832 goto done;
1833 }
1834
1835 /*
1836 * Validate expected arguments.
1837 */
1838 if (!VALID_IDNIOCTL(cmd)) {
1839 PR_STR("%s: invalid cmd (0x%x)\n", proc, cmd);
1840 error = EINVAL;
1841 goto done;
1842
1843 } else if (!VALID_NDOP(cmd)) {
1844 error = miocpullup(mp, sizeof (idnop_t));
1845 if (error != 0) {
1846 PR_STR("%s: idnioc(cmd = 0x%x) miocpullup "
1847 "failed (%d)\n", proc, cmd, error);
1848 goto done;
1849 }
1850 }
1851
1852 argsize = mp->b_cont->b_wptr - mp->b_cont->b_rptr;
1853 idnop = (idnop_t *)mp->b_cont->b_rptr;
1854
1855 switch (cmd) {
1856 case IDNIOC_LINK:
1857 error = idnioc_link(idnop);
1858 break;
1859
1860 case IDNIOC_UNLINK:
1861 error = idnioc_unlink(idnop);
1862 break;
1863
1864 case IDNIOC_MEM_RW:
1865 error = idn_rw_mem(idnop);
1866 break;
1867
1868 case IDNIOC_PING:
1869 error = idn_send_ping(idnop);
1870 break;
1871
1872 case ND_SET:
1873 IDN_GLOCK_EXCL();
1874 if (!nd_getset(wq, idn_ndlist, mp)) {
1875 IDN_GUNLOCK();
1876 error = ENOENT;
1877 break;
1878 }
1879 IDN_GUNLOCK();
1880 qreply(wq, mp);
1881 return;
1882
1883 case ND_GET:
1884 IDN_GLOCK_SHARED();
1885 if (!nd_getset(wq, idn_ndlist, mp)) {
1886 IDN_GUNLOCK();
1887 error = ENOENT;
1888 break;
1889 }
1890 IDN_GUNLOCK();
1891 qreply(wq, mp);
1892 return;
1893
1894 default:
1895 PR_STR("%s: invalid cmd 0x%x\n", proc, cmd);
1896 error = EINVAL;
1897 break;
1898 }
1899
1900 done:
1901 if (error == 0)
1902 miocack(wq, mp, argsize, 0);
1903 else
1904 miocnak(wq, mp, 0, error);
1905 }
1906
1907 /*
1908 * This thread actually services the SSI_LINK/UNLINK calls
1909 * asynchronously that come via BBSRAM. This is necessary
1910 * since we can't process them from within the context of
1911 * the interrupt handler in which idn_sigbhandler() is
1912 * called.
1913 */
1914 static void
idn_sigbhandler_thread(struct sigbintr ** sbpp)1915 idn_sigbhandler_thread(struct sigbintr **sbpp)
1916 {
1917 int d, pri, rv;
1918 struct sigbintr *sbp;
1919 sigbmbox_t *mbp;
1920 idn_fin_t fintype;
1921 idnsb_data_t *sdp;
1922 idnsb_info_t *sfp;
1923 idnsb_error_t *sep;
1924 idn_domain_t *dp;
1925 procname_t proc = "idn_sigbhandler_thread";
1926
1927
1928 sbp = *sbpp;
1929
1930 PR_PROTO("%s: KICKED OFF (sigbintr pointer = 0x%p)\n",
1931 proc, (void *)sbp);
1932
1933 ASSERT(sbp == &idn.sigbintr);
1934
1935 mutex_enter(&idn.sigbintr.sb_mutex);
1936
1937 while (sbp->sb_busy != IDNSIGB_DIE) {
1938 cpu_sgnblk_t *sigbp;
1939
1940 while ((sbp->sb_busy != IDNSIGB_ACTIVE) &&
1941 (sbp->sb_busy != IDNSIGB_DIE)) {
1942 cv_wait(&sbp->sb_cv, &idn.sigbintr.sb_mutex);
1943 PR_PROTO("%s: AWAKENED (busy = %d)\n",
1944 proc, (int)sbp->sb_busy);
1945 }
1946 if (sbp->sb_busy == IDNSIGB_DIE) {
1947 PR_PROTO("%s: DIE REQUESTED\n", proc);
1948 break;
1949 }
1950
1951 if ((sigbp = cpu_sgnblkp[sbp->sb_cpuid]) == NULL) {
1952 cmn_err(CE_WARN,
1953 "IDN: 124: sigblk for CPU ID %d "
1954 "is NULL", sbp->sb_cpuid);
1955 sbp->sb_busy = IDNSIGB_INACTIVE;
1956 continue;
1957 }
1958
1959 mbp = &sigbp->sigb_host_mbox;
1960
1961 if (mbp->flag != SIGB_MBOX_BUSY) {
1962 PR_PROTO("%s: sigblk mbox flag (%d) != BUSY (%d)\n",
1963 proc, mbp->flag, SIGB_MBOX_BUSY);
1964 sbp->sb_busy = IDNSIGB_INACTIVE;
1965 continue;
1966 }
1967 /*
1968 * The sb_busy bit is set and the mailbox flag
1969 * indicates BUSY also, so we effectively have things locked.
1970 * So, we can drop the critical sb_mutex which we want to
1971 * do since it pushes us to PIL 14 while we hold it and we
1972 * don't want to run at PIL 14 across IDN code.
1973 */
1974 mutex_exit(&idn.sigbintr.sb_mutex);
1975
1976 sdp = (idnsb_data_t *)mbp->data;
1977 sep = (idnsb_error_t *)&sdp->ssb_error;
1978 INIT_IDNKERR(sep);
1979
1980 if (mbp->len != sizeof (idnsb_data_t)) {
1981 PR_PROTO("%s: sigblk mbox length (%d) != "
1982 "expected (%lu)\n", proc, mbp->len,
1983 sizeof (idnsb_data_t));
1984 SET_IDNKERR_ERRNO(sep, EINVAL);
1985 SET_IDNKERR_IDNERR(sep, IDNKERR_DATA_LEN);
1986 SET_IDNKERR_PARAM0(sep, sizeof (idnsb_data_t));
1987
1988 goto sberr;
1989
1990 }
1991 if (idn.enabled == 0) {
1992 #ifdef DEBUG
1993 cmn_err(CE_NOTE,
1994 "IDN: 102: driver disabled "
1995 "- check OBP environment "
1996 "(idn-smr-size)");
1997 #else /* DEBUG */
1998 cmn_err(CE_NOTE,
1999 "!IDN: 102: driver disabled "
2000 "- check OBP environment "
2001 "(idn-smr-size)");
2002 #endif /* DEBUG */
2003 SET_IDNKERR_ERRNO(sep, EACCES);
2004 SET_IDNKERR_IDNERR(sep, IDNKERR_DRV_DISABLED);
2005
2006 goto sberr;
2007
2008 }
2009
2010 switch (mbp->cmd) {
2011
2012 case SSI_LINK:
2013 {
2014 idnsb_link_t slp;
2015
2016 bcopy(&sdp->ssb_link, &slp, sizeof (slp));
2017
2018 if (slp.master_pri < 0) {
2019 pri = IDNVOTE_MINPRI;
2020 } else if (slp.master_pri > 0) {
2021 /*
2022 * If I'm already in a IDN network,
2023 * then my vote priority is set to
2024 * the max, otherwise it's one-less.
2025 */
2026 pri = IDNVOTE_MAXPRI;
2027 IDN_GLOCK_SHARED();
2028 if (idn.ndomains <= 1)
2029 pri--;
2030 IDN_GUNLOCK();
2031 } else {
2032 pri = IDNVOTE_DEFPRI;
2033 }
2034
2035 PR_PROTO("%s: SSI_LINK(cpuid = %d, domid = %d, "
2036 "pri = %d (req = %d), t/o = %d)\n",
2037 proc, slp.cpuid, slp.domid, pri,
2038 slp.master_pri, slp.timeout);
2039
2040 rv = idn_link(slp.domid, slp.cpuid, pri,
2041 slp.timeout, sep);
2042 SET_IDNKERR_ERRNO(sep, rv);
2043 (void) idn_info(&sdp->ssb_info);
2044 break;
2045 }
2046
2047 case SSI_UNLINK:
2048 {
2049 idnsb_unlink_t sup;
2050 idn_domain_t *xdp;
2051 domainset_t domset;
2052
2053 bcopy(&sdp->ssb_unlink, &sup, sizeof (sup));
2054
2055 PR_PROTO("%s: SSI_UNLINK(c = %d, d = %d, bs = 0x%x, "
2056 "f = %d, is = 0x%x, t/o = %d)\n",
2057 proc, sup.cpuid, sup.domid, sup.boardset,
2058 sup.force, sup.idnset, sup.timeout);
2059
2060 domset = idn.domset.ds_trans_on |
2061 idn.domset.ds_connected |
2062 idn.domset.ds_trans_off |
2063 idn.domset.ds_awol |
2064 idn.domset.ds_relink;
2065
2066 if (VALID_DOMAINID(sup.domid)) {
2067 dp = &idn_domain[sup.domid];
2068 } else if (VALID_CPUID(sup.cpuid)) {
2069 for (d = 0; d < MAX_DOMAINS; d++) {
2070 xdp = &idn_domain[d];
2071
2072 if ((xdp->dcpu == IDN_NIL_DCPU) &&
2073 !DOMAIN_IN_SET(domset, d))
2074 continue;
2075
2076 if (CPU_IN_SET(xdp->dcpuset,
2077 sup.cpuid))
2078 break;
2079 }
2080 dp = (d == MAX_DOMAINS) ? NULL : xdp;
2081 }
2082 if ((dp == NULL) && sup.boardset) {
2083 for (d = 0; d < MAX_DOMAINS; d++) {
2084 xdp = &idn_domain[d];
2085
2086 if ((xdp->dcpu == IDN_NIL_DCPU) &&
2087 !DOMAIN_IN_SET(domset, d))
2088 continue;
2089
2090 if (xdp->dhw.dh_boardset &
2091 sup.boardset)
2092 break;
2093 }
2094 dp = (d == MAX_DOMAINS) ? NULL : xdp;
2095 }
2096 if (dp == NULL) {
2097 SET_IDNKERR_ERRNO(sep, EINVAL);
2098 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_DOMAIN);
2099 SET_IDNKERR_PARAM0(sep, sup.domid);
2100 SET_IDNKERR_PARAM1(sep, sup.cpuid);
2101 (void) idn_info(&sdp->ssb_info);
2102 goto sberr;
2103 } else {
2104 sup.domid = dp->domid;
2105 }
2106
2107 switch (sup.force) {
2108 case SSIFORCE_OFF:
2109 fintype = IDNFIN_NORMAL;
2110 break;
2111
2112 case SSIFORCE_SOFT:
2113 fintype = IDNFIN_FORCE_SOFT;
2114 break;
2115
2116 case SSIFORCE_HARD:
2117 fintype = IDNFIN_FORCE_HARD;
2118 break;
2119 default:
2120 SET_IDNKERR_ERRNO(sep, EINVAL);
2121 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_FORCE);
2122 SET_IDNKERR_PARAM0(sep, sup.force);
2123 (void) idn_info(&sdp->ssb_info);
2124 goto sberr;
2125 }
2126
2127 rv = idn_unlink(sup.domid, sup.idnset, fintype,
2128 IDNFIN_OPT_UNLINK, sup.timeout, sep);
2129 SET_IDNKERR_ERRNO(sep, rv);
2130 (void) idn_info(&sdp->ssb_info);
2131 break;
2132 }
2133
2134 case SSI_INFO:
2135 sfp = &sdp->ssb_info;
2136
2137 PR_PROTO("%s: SSI_INFO\n", proc);
2138
2139 rv = idn_info(sfp);
2140 SET_IDNKERR_ERRNO(sep, rv);
2141 if (rv != 0) {
2142 SET_IDNKERR_IDNERR(sep, IDNKERR_INFO_FAILED);
2143 }
2144 break;
2145
2146 default:
2147 ASSERT(0);
2148 SET_IDNKERR_ERRNO(sep, EINVAL);
2149 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_CMD);
2150 SET_IDNKERR_PARAM0(sep, mbp->cmd);
2151 break;
2152 }
2153
2154 sberr:
2155
2156 if (GET_IDNKERR_ERRNO(sep) != 0) {
2157 cmn_err(CE_WARN,
2158 #ifdef DEBUG
2159 "IDN: 125: op (%s) failed, returning "
2160 "(%d/0x%x [%d, %d, %d])",
2161 #else /* DEBUG */
2162 "!IDN: 125: op (%s) failed, returning "
2163 "(%d/0x%x [%d, %d, %d])",
2164 #endif /* DEBUG */
2165 (mbp->cmd == SSI_LINK) ? "LINK" :
2166 (mbp->cmd == SSI_UNLINK) ? "UNLINK" :
2167 (mbp->cmd == SSI_INFO) ?
2168 "INFO" : "UNKNOWN",
2169 GET_IDNKERR_ERRNO(sep),
2170 GET_IDNKERR_IDNERR(sep),
2171 GET_IDNKERR_PARAM0(sep),
2172 GET_IDNKERR_PARAM1(sep),
2173 GET_IDNKERR_PARAM2(sep));
2174 }
2175
2176 PR_PROTO("%s: returning errno = %d, idnerr = %d, "
2177 "params = [%d, %d, %d]\n",
2178 proc, GET_IDNKERR_ERRNO(sep), GET_IDNKERR_IDNERR(sep),
2179 GET_IDNKERR_PARAM0(sep), GET_IDNKERR_PARAM1(sep),
2180 GET_IDNKERR_PARAM2(sep));
2181
2182 mutex_enter(&idn.sigbintr.sb_mutex);
2183 ASSERT((sbp->sb_busy == IDNSIGB_ACTIVE) ||
2184 (sbp->sb_busy == IDNSIGB_DIE));
2185 mbp->cmd |= SSI_ACK;
2186 if (sbp->sb_busy == IDNSIGB_ACTIVE)
2187 sbp->sb_busy = IDNSIGB_INACTIVE;
2188 /*
2189 * Set flag which kicks off response to SSP.
2190 */
2191 membar_stst_ldst();
2192 mbp->flag = HOST_TO_CBS;
2193 }
2194
2195 /*
2196 * Wake up the dude that killed us!
2197 */
2198 idn.sigb_threadp = NULL;
2199 cv_signal(&sbp->sb_cv);
2200 mutex_exit(&idn.sigbintr.sb_mutex);
2201 thread_exit();
2202 }
2203
2204 /*
2205 * Create the thread that will service sigb interrupts.
2206 */
2207 static void
idn_sigbhandler_create()2208 idn_sigbhandler_create()
2209 {
2210 struct sigbintr *sbp;
2211
2212 if (idn.sigb_threadp) {
2213 cmn_err(CE_WARN,
2214 "IDN: 126: sigbhandler thread already "
2215 "exists (0x%p)", (void *)idn.sigb_threadp);
2216 return;
2217 }
2218 cv_init(&idn.sigbintr.sb_cv, NULL, CV_DEFAULT, NULL);
2219 sbp = &idn.sigbintr;
2220 sbp->sb_busy = IDNSIGB_INACTIVE;
2221 idn.sigb_threadp = thread_create(NULL, 0,
2222 idn_sigbhandler_thread, &sbp, sizeof (sbp), &p0,
2223 TS_RUN, minclsyspri);
2224 sbp->sb_inum = add_softintr((uint_t)idn_sigbpil,
2225 idn_sigbhandler_wakeup, 0, SOFTINT_ST);
2226 }
2227
2228 static void
idn_sigbhandler_kill()2229 idn_sigbhandler_kill()
2230 {
2231 if (idn.sigb_threadp) {
2232 struct sigbintr *sbp;
2233
2234 sbp = &idn.sigbintr;
2235 if (sbp->sb_inum != 0)
2236 (void) rem_softintr(sbp->sb_inum);
2237 sbp->sb_inum = 0;
2238 sbp->sb_busy = IDNSIGB_DIE;
2239 cv_signal(&sbp->sb_cv);
2240 while (idn.sigb_threadp != NULL)
2241 cv_wait(&sbp->sb_cv, &idn.sigbintr.sb_mutex);
2242 sbp->sb_busy = IDNSIGB_INACTIVE;
2243 cv_destroy(&sbp->sb_cv);
2244 }
2245 }
2246
2247 /*ARGSUSED0*/
2248 static uint_t
idn_sigbhandler_wakeup(caddr_t arg1,caddr_t arg2)2249 idn_sigbhandler_wakeup(caddr_t arg1, caddr_t arg2)
2250 {
2251 mutex_enter(&idn.sigbintr.sb_mutex);
2252 if (idn.sigbintr.sb_busy == IDNSIGB_STARTED) {
2253 idn.sigbintr.sb_busy = IDNSIGB_ACTIVE;
2254 cv_signal(&idn.sigbintr.sb_cv);
2255 }
2256 mutex_exit(&idn.sigbintr.sb_mutex);
2257
2258 return (DDI_INTR_CLAIMED);
2259 }
2260
2261 static void
idn_sigbhandler(processorid_t cpuid,cpu_sgnblk_t * sgnblkp)2262 idn_sigbhandler(processorid_t cpuid, cpu_sgnblk_t *sgnblkp)
2263 {
2264 struct sigbintr *sbp = &idn.sigbintr;
2265 sigbmbox_t *mbp;
2266 idnsb_data_t *sdp;
2267 idnsb_error_t *sep;
2268 uint32_t cmd;
2269 int sigb_lock = 0;
2270
2271 ASSERT(sgnblkp);
2272
2273 mbp = &sgnblkp->sigb_host_mbox;
2274 sdp = (idnsb_data_t *)mbp->data;
2275 sep = &sdp->ssb_error;
2276 cmd = mbp->cmd;
2277
2278 if ((mbp->flag != CBS_TO_HOST) || !VALID_IDNSIGBCMD(cmd)) {
2279 /*
2280 * Not a valid IDN command. Just bail out.
2281 */
2282 return;
2283 }
2284
2285 mbp->flag = SIGB_MBOX_BUSY;
2286 SET_IDNKERR_ERRNO(sep, 0);
2287
2288 if (cmd & SSI_ACK) {
2289 /*
2290 * Hmmm...weird, the ACK bit is set.
2291 */
2292 SET_IDNKERR_ERRNO(sep, EPROTO);
2293 SET_IDNKERR_IDNERR(sep, IDNKERR_INVALID_CMD);
2294 SET_IDNKERR_PARAM0(sep, cmd);
2295 goto sigb_done;
2296 }
2297
2298 if (!mutex_tryenter(&idn.sigbintr.sb_mutex)) {
2299 /*
2300 * Couldn't get the lock. Driver is either
2301 * not quite all the way up or is shutting down
2302 * for some reason. Caller should spin again.
2303 */
2304 cmd |= SSI_ACK;
2305 SET_IDNKERR_ERRNO(sep, EBUSY);
2306 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_LOCKED);
2307 goto sigb_done;
2308 }
2309 sigb_lock = 1;
2310
2311 if ((idn.sigb_threadp == NULL) ||
2312 (sbp->sb_busy == IDNSIGB_NOTREADY)) {
2313 cmd |= SSI_ACK;
2314 SET_IDNKERR_ERRNO(sep, EAGAIN);
2315 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_NOTRDY);
2316 goto sigb_done;
2317 }
2318
2319 if (sbp->sb_busy != IDNSIGB_INACTIVE) {
2320 cmd |= SSI_ACK;
2321 SET_IDNKERR_ERRNO(sep, EBUSY);
2322 SET_IDNKERR_IDNERR(sep, IDNKERR_SIGBINTR_BUSY);
2323 goto sigb_done;
2324 }
2325
2326 sbp->sb_cpuid = (uchar_t)cpuid & 0xff;
2327 membar_stst_ldst();
2328 sbp->sb_busy = IDNSIGB_STARTED;
2329 /*
2330 * The sb_busy bit is set and the mailbox flag
2331 * indicates BUSY also, so we effectively have things locked.
2332 * So, we can drop the critical sb_mutex which we want to
2333 * do since it pushes us to PIL 14 while we hold it and we
2334 * don't want to run at PIL 14 across IDN code.
2335 *
2336 * Send interrupt to cause idn_sigbhandler_thread to wakeup.
2337 * We cannot do wakeup (cv_signal) directly from here since
2338 * we're executing from a high-level (14) interrupt.
2339 */
2340 setsoftint(sbp->sb_inum);
2341
2342 sigb_done:
2343
2344 if (GET_IDNKERR_ERRNO(sep) != 0) {
2345 mbp->len = sizeof (idnsb_data_t);
2346 mbp->cmd = cmd;
2347 membar_stst_ldst();
2348 mbp->flag = HOST_TO_CBS;
2349 }
2350
2351 if (sigb_lock)
2352 mutex_exit(&idn.sigbintr.sb_mutex);
2353 }
2354
2355 static int
idn_info(idnsb_info_t * sfp)2356 idn_info(idnsb_info_t *sfp)
2357 {
2358 int count, d;
2359 idn_domain_t *dp;
2360 idnsb_info_t sinfo;
2361 int local_id, master_id;
2362 procname_t proc = "idn_info";
2363
2364 bzero(&sinfo, sizeof (sinfo));
2365 sinfo.master_index = (uchar_t)-1;
2366 sinfo.master_cpuid = (uchar_t)-1;
2367 sinfo.local_index = (uchar_t)-1;
2368 sinfo.local_cpuid = (uchar_t)-1;
2369
2370 IDN_GLOCK_SHARED();
2371
2372 sinfo.idn_state = (uchar_t)idn.state;
2373
2374 switch (idn.state) {
2375 case IDNGS_OFFLINE:
2376 sinfo.idn_active = SSISTATE_INACTIVE;
2377 PR_PROTO("%s: idn_state (%s) = INACTIVE\n",
2378 proc, idngs_str[idn.state]);
2379 break;
2380
2381 case IDNGS_IGNORE:
2382 PR_PROTO("%s: IGNORING IDN_INFO call...\n", proc);
2383 IDN_GUNLOCK();
2384 return (EIO);
2385
2386 default:
2387 sinfo.idn_active = SSISTATE_ACTIVE;
2388 PR_PROTO("%s: idn_state (%s) = ACTIVE\n",
2389 proc, idngs_str[idn.state]);
2390 break;
2391 }
2392 master_id = IDN_GET_MASTERID();
2393 local_id = idn.localid;
2394
2395 /*
2396 * Need to drop idn.grwlock before acquiring domain locks.
2397 */
2398 IDN_GUNLOCK();
2399
2400 IDN_SYNC_LOCK();
2401
2402 sinfo.awol_domset = (ushort_t)idn.domset.ds_awol;
2403 sinfo.conn_domset = (ushort_t)(idn.domset.ds_connected &
2404 ~idn.domset.ds_trans_on);
2405 DOMAINSET_ADD(sinfo.conn_domset, idn.localid);
2406
2407 count = 0;
2408 for (d = 0; d < MAX_DOMAINS; d++) {
2409 dp = &idn_domain[d];
2410
2411 if (dp->dcpu == IDN_NIL_DCPU)
2412 continue;
2413
2414 IDN_DLOCK_SHARED(d);
2415 if ((dp->dcpu == IDN_NIL_DCPU) ||
2416 (dp->dstate == IDNDS_CLOSED)) {
2417 IDN_DUNLOCK(d);
2418 continue;
2419 }
2420
2421 count++;
2422 if (d == local_id) {
2423 sinfo.local_index = (uchar_t)d;
2424 sinfo.local_cpuid = (uchar_t)dp->dcpu;
2425 PR_PROTO("%s: domid %d is LOCAL (cpuid = %d)\n",
2426 proc, d, dp->dcpu);
2427 }
2428 if (d == master_id) {
2429 sinfo.master_index = (uchar_t)d;
2430 sinfo.master_cpuid = (uchar_t)dp->dcpu;
2431 PR_PROTO("%s: domid %d is MASTER (cpuid = %d)\n",
2432 proc, d, dp->dcpu);
2433 }
2434
2435 sinfo.domain_boardset[d] = (ushort_t)dp->dhw.dh_boardset;
2436
2437 IDN_DUNLOCK(d);
2438 }
2439
2440 IDN_SYNC_UNLOCK();
2441
2442 bcopy(&sinfo, sfp, sizeof (*sfp));
2443
2444 PR_PROTO("%s: Found %d domains within IDNnet\n", proc, count);
2445
2446 return (0);
2447 }
2448
2449 /*
2450 * ----------------------------------------------
2451 * ndd param support routines.
2452 * - Borrowed from tcp.
2453 * ----------------------------------------------
2454 */
2455 static void
idn_param_cleanup()2456 idn_param_cleanup()
2457 {
2458 IDN_GLOCK_EXCL();
2459 if (!idn.strup && idn_ndlist)
2460 nd_free(&idn_ndlist);
2461 IDN_GUNLOCK();
2462 }
2463
2464 /*ARGSUSED*/
2465 static int
idn_param_get(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * cr)2466 idn_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
2467 {
2468 idnparam_t *idnpa = (idnparam_t *)cp;
2469
2470 /*
2471 * lock grabbed before calling nd_getset.
2472 */
2473 ASSERT(IDN_GLOCK_IS_HELD());
2474
2475 (void) mi_mpprintf(mp, "%ld", idnpa->sp_val);
2476
2477 return (0);
2478 }
2479
2480 /*ARGSUSED*/
2481 static int
idn_param_set(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * cr)2482 idn_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr)
2483 {
2484 char *end;
2485 ulong_t new_value;
2486 idnparam_t *idnpa = (idnparam_t *)cp;
2487
2488 /*
2489 * lock grabbed before calling nd_getset.
2490 */
2491 ASSERT(IDN_GLOCK_IS_EXCL());
2492
2493 new_value = (ulong_t)mi_strtol(value, &end, 10);
2494
2495 if ((end == value) ||
2496 (new_value < idnpa->sp_min) ||
2497 (new_value > idnpa->sp_max))
2498 return (EINVAL);
2499
2500 if (idn.enabled == 0) {
2501 #ifdef DEBUG
2502 cmn_err(CE_NOTE,
2503 "IDN: 102: driver disabled "
2504 "- check OBP environment "
2505 "(idn-smr-size)");
2506 #else /* DEBUG */
2507 cmn_err(CE_NOTE,
2508 "!IDN: 102: driver disabled "
2509 "- check OBP environment "
2510 "(idn-smr-size)");
2511 #endif /* DEBUG */
2512 return (EACCES);
2513 }
2514
2515 idnpa->sp_val = new_value;
2516
2517 return (0);
2518 }
2519
2520 static int
idn_param_register(register idnparam_t * idnpa,int count)2521 idn_param_register(register idnparam_t *idnpa, int count)
2522 {
2523 ASSERT(IDN_GLOCK_IS_EXCL());
2524
2525 for (; count > 0; count--, idnpa++) {
2526 if (idnpa->sp_name && idnpa->sp_name[0]) {
2527 register int i;
2528 ndsetf_t set_func;
2529 char *p;
2530 /*
2531 * Don't advertise in non-DEBUG parameters.
2532 */
2533 for (i = 0; idn_param_debug_only[i]; i++) {
2534 p = idn_param_debug_only[i];
2535 if (strcmp(idnpa->sp_name, p) == 0)
2536 break;
2537 }
2538 if (idn_param_debug_only[i])
2539 continue;
2540
2541 /*
2542 * Do not register a "set" function for
2543 * read-only parameters.
2544 */
2545 for (i = 0; idn_param_read_only[i]; i++) {
2546 p = idn_param_read_only[i];
2547 if (strcmp(idnpa->sp_name, p) == 0)
2548 break;
2549 }
2550 if (idn_param_read_only[i])
2551 set_func = NULL;
2552 else
2553 set_func = idn_param_set;
2554
2555 if (!nd_load(&idn_ndlist, idnpa->sp_name,
2556 idn_param_get, set_func,
2557 (caddr_t)idnpa)) {
2558 nd_free(&idn_ndlist);
2559 return (-1);
2560 }
2561 }
2562 }
2563 if (!nd_load(&idn_ndlist, "idn_slabpool", idn_slabpool_report,
2564 NULL, NULL)) {
2565 nd_free(&idn_ndlist);
2566 return (-1);
2567 }
2568 if (!nd_load(&idn_ndlist, "idn_buffers", idn_buffer_report,
2569 NULL, NULL)) {
2570 nd_free(&idn_ndlist);
2571 return (-1);
2572 }
2573 if (!nd_load(&idn_ndlist, "idn_mboxtbl", idn_mboxtbl_report,
2574 NULL, MBXTBL_PART_REPORT)) {
2575 nd_free(&idn_ndlist);
2576 return (-1);
2577 }
2578 if (!nd_load(&idn_ndlist, "idn_mboxtbl_all", idn_mboxtbl_report,
2579 NULL, MBXTBL_FULL_REPORT)) {
2580 nd_free(&idn_ndlist);
2581 return (-1);
2582 }
2583 if (!nd_load(&idn_ndlist, "idn_mainmbox", idn_mainmbox_report,
2584 NULL, NULL)) {
2585 nd_free(&idn_ndlist);
2586 return (-1);
2587 }
2588 if (!nd_load(&idn_ndlist, "idn_global", idn_global_report,
2589 NULL, NULL)) {
2590 nd_free(&idn_ndlist);
2591 return (-1);
2592 }
2593 if (!nd_load(&idn_ndlist, "idn_domain", idn_domain_report,
2594 NULL, (caddr_t)0)) {
2595 nd_free(&idn_ndlist);
2596 return (-1);
2597 }
2598 if (!nd_load(&idn_ndlist, "idn_domain_all", idn_domain_report,
2599 NULL, (caddr_t)1)) {
2600 nd_free(&idn_ndlist);
2601 return (-1);
2602 }
2603 if (!nd_load(&idn_ndlist, "idn_bind_net", idn_get_net_binding,
2604 idn_set_net_binding, NULL)) {
2605 nd_free(&idn_ndlist);
2606 return (-1);
2607 }
2608
2609 return (0);
2610 }
2611
2612 /*ARGSUSED*/
2613 static int
idn_set_net_binding(queue_t * q,mblk_t * mp,char * value,caddr_t cp,cred_t * cr)2614 idn_set_net_binding(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr)
2615 {
2616 char *end, *cpup;
2617 long net;
2618 processorid_t cpuid;
2619
2620 /*
2621 * lock grabbed before calling nd_getset.
2622 */
2623 ASSERT(IDN_GLOCK_IS_EXCL());
2624
2625 if ((cpup = strchr(value, '=')) == NULL)
2626 return (EINVAL);
2627
2628 *cpup++ = '\0';
2629
2630 net = mi_strtol(value, &end, 10);
2631 if ((end == value) || (net < 0) || (net >= IDN_MAX_NETS) ||
2632 !CHAN_IN_SET(idn.chanset, net))
2633 return (EINVAL);
2634
2635 cpuid = (processorid_t)mi_strtol(cpup, &end, 10);
2636 if ((end == cpup) || ((cpuid != -1) &&
2637 (!VALID_CPUID(cpuid) ||
2638 !CPU_IN_SET(cpu_ready_set, cpuid))))
2639 return (EINVAL);
2640
2641 idn_chanserver_bind(net, cpuid);
2642
2643 return (0);
2644 }
2645
2646 /*ARGSUSED*/
2647 static int
idn_get_net_binding(queue_t * q,mblk_t * mp,caddr_t cp,cred_t * cr)2648 idn_get_net_binding(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
2649 {
2650 int c;
2651
2652 /*
2653 * lock grabbed before calling nd_getset.
2654 */
2655 ASSERT(IDN_GLOCK_IS_HELD());
2656
2657 (void) mi_mpprintf(mp,
2658 "IDN network interfaces/channels active = %d",
2659 idn.nchannels);
2660
2661 if (idn.nchannels == 0)
2662 return (0);
2663
2664 (void) mi_mpprintf(mp, "Net Cpu");
2665
2666 for (c = 0; c < IDN_MAX_NETS; c++) {
2667 int bc;
2668 idn_chansvr_t *csp;
2669
2670 if (!CHAN_IN_SET(idn.chanset, c))
2671 continue;
2672
2673 csp = &idn.chan_servers[c];
2674
2675 if ((bc = csp->ch_bound_cpuid) == -1)
2676 bc = csp->ch_bound_cpuid_pending;
2677
2678 if (c < 10)
2679 (void) mi_mpprintf(mp, " %d %d", c, bc);
2680 else
2681 (void) mi_mpprintf(mp, " %d %d", c, bc);
2682 }
2683
2684 return (0);
2685 }
2686
2687 static int
idnioc_link(idnop_t * idnop)2688 idnioc_link(idnop_t *idnop)
2689 {
2690 int rv;
2691 int pri;
2692 idnsb_error_t err;
2693 procname_t proc = "idnioc_link";
2694
2695 if (idnop->link.master < 0)
2696 pri = IDNVOTE_MINPRI;
2697 else if (idnop->link.master > 0)
2698 pri = IDNVOTE_MAXPRI;
2699 else
2700 pri = IDNVOTE_DEFPRI;
2701
2702 PR_DRV("%s: domid = %d, cpuid = %d, pri = %d\n",
2703 proc, idnop->link.domid, idnop->link.cpuid, pri);
2704
2705 rv = idn_link(idnop->link.domid, idnop->link.cpuid,
2706 pri, idnop->link.wait, &err);
2707
2708 return (rv);
2709 }
2710
2711 static int
idnioc_unlink(idnop_t * idnop)2712 idnioc_unlink(idnop_t *idnop)
2713 {
2714 int d, cpuid, domid, rv;
2715 boardset_t idnset;
2716 idn_fin_t fintype;
2717 idn_domain_t *dp, *xdp;
2718 idnsb_error_t err;
2719 procname_t proc = "idnioc_unlink";
2720
2721 PR_DRV("%s: domid = %d, cpuid = %d, force = %d\n",
2722 proc, idnop->unlink.domid, idnop->unlink.cpuid,
2723 idnop->unlink.force);
2724
2725 idnset = BOARDSET_ALL;
2726 domid = idnop->unlink.domid;
2727 cpuid = idnop->unlink.cpuid;
2728 dp = NULL;
2729
2730 if (domid == IDN_NIL_DOMID)
2731 domid = idn.localid;
2732
2733 if (VALID_DOMAINID(domid)) {
2734 dp = &idn_domain[domid];
2735 if (VALID_CPUID(cpuid) && (dp->dcpu != IDN_NIL_DCPU) &&
2736 !CPU_IN_SET(dp->dcpuset, cpuid)) {
2737 dp = NULL;
2738 PR_PROTO("%s: ERROR: invalid cpuid "
2739 "(%d) for domain (%d) [cset = 0x%x.x%x]\n",
2740 proc, cpuid, domid,
2741 UPPER32_CPUMASK(dp->dcpuset),
2742 LOWER32_CPUMASK(dp->dcpuset));
2743 }
2744 } else if (VALID_CPUID(cpuid)) {
2745 for (d = 0; d < MAX_DOMAINS; d++) {
2746 xdp = &idn_domain[d];
2747
2748 if (xdp->dcpu == IDN_NIL_DCPU)
2749 continue;
2750
2751 if (CPU_IN_SET(xdp->dcpuset, cpuid))
2752 break;
2753 }
2754 dp = (d == MAX_DOMAINS) ? NULL : xdp;
2755 }
2756
2757 if ((dp == NULL) || (dp->dcpu == IDN_NIL_DCPU))
2758 return (0);
2759
2760 domid = dp->domid;
2761
2762 switch (idnop->unlink.force) {
2763 case SSIFORCE_OFF:
2764 fintype = IDNFIN_NORMAL;
2765 break;
2766
2767 case SSIFORCE_SOFT:
2768 fintype = IDNFIN_FORCE_SOFT;
2769 break;
2770
2771 case SSIFORCE_HARD:
2772 fintype = IDNFIN_FORCE_HARD;
2773 break;
2774 default:
2775 PR_PROTO("%s: invalid force parameter \"%d\"",
2776 proc, idnop->unlink.force);
2777 return (EINVAL);
2778 }
2779
2780 rv = idn_unlink(domid, idnset, fintype, IDNFIN_OPT_UNLINK,
2781 idnop->unlink.wait, &err);
2782
2783 return (rv);
2784 }
2785
2786 static int
idn_send_ping(idnop_t * idnop)2787 idn_send_ping(idnop_t *idnop)
2788 {
2789 int domid = idnop->ping.domid;
2790 int cpuid = idnop->ping.cpuid;
2791 int ocpuid;
2792 idn_domain_t *dp;
2793 idn_msgtype_t mt;
2794 procname_t proc = "idn_send_ping";
2795
2796 if ((domid == IDN_NIL_DOMID) && (cpuid == IDN_NIL_DCPU)) {
2797 cmn_err(CE_WARN,
2798 "IDN: %s: no valid domain ID or CPU ID given",
2799 proc);
2800 return (EINVAL);
2801 }
2802 if (domid == IDN_NIL_DOMID)
2803 domid = MAX_DOMAINS - 1;
2804
2805 dp = &idn_domain[domid];
2806 IDN_DLOCK_EXCL(domid);
2807 if ((dp->dcpu == IDN_NIL_DCPU) && (cpuid == IDN_NIL_DCPU)) {
2808 cmn_err(CE_WARN,
2809 "IDN: %s: no valid target CPU specified",
2810 proc);
2811 IDN_DUNLOCK(domid);
2812 return (EINVAL);
2813 }
2814 if (cpuid == IDN_NIL_DCPU)
2815 cpuid = dp->dcpu;
2816
2817 ocpuid = dp->dcpu;
2818 dp->dcpu = cpuid;
2819
2820 /*
2821 * XXX - Need a special PING IDN command.
2822 */
2823 mt.mt_mtype = IDNP_DATA | IDNP_ACK;
2824 mt.mt_atype = 0;
2825
2826 (void) IDNXDC(domid, &mt, 0x100, 0x200, 0x300, 0x400);
2827
2828 dp->dcpu = ocpuid;
2829 IDN_DUNLOCK(domid);
2830
2831 return (0);
2832 }
2833
2834 /*
2835 * ----------------------------------------------
2836 */
2837 static void
idn_dopers_init()2838 idn_dopers_init()
2839 {
2840 int i;
2841 dop_waitlist_t *dwl;
2842
2843 if (idn.dopers)
2844 return;
2845
2846 idn.dopers = GETSTRUCT(struct dopers, 1);
2847
2848 bzero(idn.dopers, sizeof (struct dopers));
2849
2850 dwl = &idn.dopers->_dop_wcache[0];
2851 for (i = 0; i < (IDNOP_CACHE_SIZE-1); i++)
2852 dwl[i].dw_next = &dwl[i+1];
2853 dwl[i].dw_next = NULL;
2854
2855 idn.dopers->dop_freelist = &idn.dopers->_dop_wcache[0];
2856 idn.dopers->dop_waitcount = 0;
2857 idn.dopers->dop_domset = 0;
2858 idn.dopers->dop_waitlist = NULL;
2859
2860 cv_init(&idn.dopers->dop_cv, NULL, CV_DEFAULT, NULL);
2861 mutex_init(&idn.dopers->dop_mutex, NULL, MUTEX_DEFAULT, NULL);
2862 }
2863
2864 static void
idn_dopers_deinit()2865 idn_dopers_deinit()
2866 {
2867 dop_waitlist_t *dwl, *next_dwl;
2868
2869
2870 if (idn.dopers == NULL)
2871 return;
2872
2873 for (dwl = idn.dopers->dop_waitlist; dwl; dwl = next_dwl) {
2874 next_dwl = dwl->dw_next;
2875 if (!IDNOP_IN_CACHE(dwl))
2876 FREESTRUCT(dwl, dop_waitlist_t, 1);
2877 }
2878
2879 cv_destroy(&idn.dopers->dop_cv);
2880 mutex_destroy(&idn.dopers->dop_mutex);
2881
2882 FREESTRUCT(idn.dopers, struct dopers, 1);
2883 idn.dopers = NULL;
2884 }
2885
2886 /*
2887 * Reset the dop_errset field in preparation for an
2888 * IDN operation attempt. This is only called from
2889 * idn_link() and idn_unlink().
2890 */
2891 void *
idn_init_op(idn_opflag_t opflag,domainset_t domset,idnsb_error_t * sep)2892 idn_init_op(idn_opflag_t opflag, domainset_t domset, idnsb_error_t *sep)
2893 {
2894 dop_waitlist_t *dwl;
2895 /*
2896 * Clear any outstanding error ops in preparation
2897 * for an IDN (link/unlink) operation.
2898 */
2899 mutex_enter(&idn.dopers->dop_mutex);
2900 if ((dwl = idn.dopers->dop_freelist) == NULL) {
2901 dwl = GETSTRUCT(dop_waitlist_t, 1);
2902 } else {
2903 idn.dopers->dop_freelist = dwl->dw_next;
2904 bzero(dwl, sizeof (*dwl));
2905 }
2906 dwl->dw_op = opflag;
2907 dwl->dw_reqset = domset;
2908 dwl->dw_idnerr = sep;
2909 dwl->dw_next = idn.dopers->dop_waitlist;
2910
2911 idn.dopers->dop_waitlist = dwl;
2912 idn.dopers->dop_waitcount++;
2913 idn.dopers->dop_domset |= domset;
2914 mutex_exit(&idn.dopers->dop_mutex);
2915
2916 return (dwl);
2917 }
2918
2919 /*
2920 * Anybody waiting on a opflag operation for any one
2921 * of the domains in domset, needs to be updated to
2922 * additionally wait for new domains in domset.
2923 * This is used, for example, when needing to connect
2924 * to more domains than known at the time of the
2925 * original request.
2926 */
2927 void
idn_add_op(idn_opflag_t opflag,domainset_t domset)2928 idn_add_op(idn_opflag_t opflag, domainset_t domset)
2929 {
2930 dop_waitlist_t *dwl;
2931
2932 mutex_enter(&idn.dopers->dop_mutex);
2933 if ((idn.dopers->dop_waitcount == 0) ||
2934 ((idn.dopers->dop_domset & domset) == 0)) {
2935 mutex_exit(&idn.dopers->dop_mutex);
2936 return;
2937 }
2938 for (dwl = idn.dopers->dop_waitlist; dwl; dwl = dwl->dw_next)
2939 if ((dwl->dw_op == opflag) && (dwl->dw_reqset & domset))
2940 dwl->dw_reqset |= domset;
2941 mutex_exit(&idn.dopers->dop_mutex);
2942 }
2943
2944 /*
2945 * Mechanism to wakeup any potential users which may be waiting
2946 * for a link/unlink operation to complete. If an error occurred
2947 * don't update dop_errset unless there was no previous error.
2948 */
2949 void
idn_update_op(idn_opflag_t opflag,domainset_t domset,idnsb_error_t * sep)2950 idn_update_op(idn_opflag_t opflag, domainset_t domset, idnsb_error_t *sep)
2951 {
2952 int do_wakeup = 0;
2953 dop_waitlist_t *dw;
2954 procname_t proc = "idn_update_op";
2955
2956 mutex_enter(&idn.dopers->dop_mutex);
2957 /*
2958 * If there are no waiters, or nobody is waiting for
2959 * the particular domainset in question, then
2960 * just bail.
2961 */
2962 if ((idn.dopers->dop_waitcount == 0) ||
2963 ((idn.dopers->dop_domset & domset) == 0)) {
2964 mutex_exit(&idn.dopers->dop_mutex);
2965 PR_PROTO("%s: NO waiters exist (domset=0x%x)\n",
2966 proc, domset);
2967 return;
2968 }
2969
2970 for (dw = idn.dopers->dop_waitlist; dw; dw = dw->dw_next) {
2971 int d;
2972 domainset_t dset, rset;
2973
2974 if ((dset = dw->dw_reqset & domset) == 0)
2975 continue;
2976
2977 if (opflag == IDNOP_ERROR) {
2978 dw->dw_errset |= dset;
2979 if (sep) {
2980 for (d = 0; d < MAX_DOMAINS; d++) {
2981 if (!DOMAIN_IN_SET(dset, d))
2982 continue;
2983
2984 dw->dw_errors[d] =
2985 (short)GET_IDNKERR_ERRNO(sep);
2986 }
2987 bcopy(sep, dw->dw_idnerr, sizeof (*sep));
2988 }
2989 } else if (opflag == dw->dw_op) {
2990 dw->dw_domset |= dset;
2991 }
2992
2993 /*
2994 * Check if all the domains are spoken for that
2995 * a particular waiter may have been waiting for.
2996 * If there's at least one, we'll need to broadcast.
2997 */
2998 rset = (dw->dw_errset | dw->dw_domset) & dw->dw_reqset;
2999 if (rset == dw->dw_reqset)
3000 do_wakeup++;
3001 }
3002
3003 PR_PROTO("%s: found %d waiters ready for wakeup\n", proc, do_wakeup);
3004
3005 if (do_wakeup > 0)
3006 cv_broadcast(&idn.dopers->dop_cv);
3007
3008 mutex_exit(&idn.dopers->dop_mutex);
3009 }
3010
3011 void
idn_deinit_op(void * cookie)3012 idn_deinit_op(void *cookie)
3013 {
3014 domainset_t domset;
3015 dop_waitlist_t *hw, *tw;
3016 dop_waitlist_t *dwl = (dop_waitlist_t *)cookie;
3017
3018 mutex_enter(&idn.dopers->dop_mutex);
3019
3020 ASSERT(idn.dopers->dop_waitlist);
3021
3022 if (dwl == idn.dopers->dop_waitlist) {
3023 idn.dopers->dop_waitlist = dwl->dw_next;
3024 if (IDNOP_IN_CACHE(dwl)) {
3025 dwl->dw_next = idn.dopers->dop_freelist;
3026 idn.dopers->dop_freelist = dwl;
3027 } else {
3028 FREESTRUCT(dwl, dop_waitlist_t, 1);
3029 }
3030 } else {
3031 for (tw = idn.dopers->dop_waitlist, hw = tw->dw_next;
3032 hw;
3033 tw = hw, hw = hw->dw_next) {
3034 if (dwl == hw)
3035 break;
3036 }
3037 ASSERT(hw);
3038
3039 tw->dw_next = hw->dw_next;
3040 }
3041
3042 /*
3043 * Recompute domainset for which waiters might be waiting.
3044 * It's possible there may be other waiters waiting for
3045 * the same domainset that the current waiter that's leaving
3046 * may have been waiting for, so we can't simply delete
3047 * the leaving waiter's domainset from dop_domset.
3048 */
3049 for (hw = idn.dopers->dop_waitlist, domset = 0; hw; hw = hw->dw_next)
3050 domset |= hw->dw_reqset;
3051
3052 idn.dopers->dop_waitcount--;
3053 idn.dopers->dop_domset = domset;
3054
3055 mutex_exit(&idn.dopers->dop_mutex);
3056 }
3057
3058 /*
3059 * Wait until the specified operation succeeds or fails with
3060 * respect to the given domains. Note the function terminates
3061 * if at least one error occurs.
3062 * This process is necessary since link/unlink operations occur
3063 * asynchronously and we need some way of waiting to find out
3064 * if it indeed completed.
3065 * Timeout value is received indirectly from the SSP and
3066 * represents seconds.
3067 */
3068 int
idn_wait_op(void * cookie,domainset_t * domsetp,int wait_timeout)3069 idn_wait_op(void *cookie, domainset_t *domsetp, int wait_timeout)
3070 {
3071 int d, rv, err = 0;
3072 dop_waitlist_t *dwl;
3073
3074
3075 dwl = (dop_waitlist_t *)cookie;
3076
3077 ASSERT(wait_timeout > 0);
3078 ASSERT((dwl->dw_op == IDNOP_CONNECTED) ||
3079 (dwl->dw_op == IDNOP_DISCONNECTED));
3080
3081 mutex_enter(&idn.dopers->dop_mutex);
3082
3083 while (((dwl->dw_domset | dwl->dw_errset) != dwl->dw_reqset) && !err) {
3084 rv = cv_reltimedwait_sig(&idn.dopers->dop_cv,
3085 &idn.dopers->dop_mutex, (wait_timeout * hz), TR_CLOCK_TICK);
3086
3087 if ((dwl->dw_domset | dwl->dw_errset) == dwl->dw_reqset)
3088 break;
3089
3090 switch (rv) {
3091 case -1:
3092 /*
3093 * timed out
3094 */
3095 cmn_err(CE_WARN,
3096 "!IDN: 129: %s operation timed out",
3097 (dwl->dw_op == IDNOP_CONNECTED) ? "LINK" :
3098 (dwl->dw_op == IDNOP_DISCONNECTED) ? "UNLINK" :
3099 "UNKNOWN");
3100 /*FALLTHROUGH*/
3101 case 0:
3102 /*
3103 * signal, e.g. kill(2)
3104 */
3105 err = 1;
3106 break;
3107
3108 default:
3109 break;
3110 }
3111 }
3112
3113 if (dwl->dw_domset == dwl->dw_reqset) {
3114 rv = 0;
3115 } else {
3116 /*
3117 * Op failed for some domains or we were awakened.
3118 */
3119 for (d = rv = 0; (d < MAX_DOMAINS) && !rv; d++)
3120 rv = dwl->dw_errors[d];
3121 }
3122 *domsetp = dwl->dw_domset;
3123
3124 mutex_exit(&idn.dopers->dop_mutex);
3125
3126 idn_deinit_op(cookie);
3127
3128 return (rv);
3129 }
3130
3131 /*
3132 * --------------------------------------------------
3133 * Return any valid (& ready) cpuid for the given board based on
3134 * the given cpuset.
3135 * --------------------------------------------------
3136 */
3137 int
board_to_ready_cpu(int board,cpuset_t cpuset)3138 board_to_ready_cpu(int board, cpuset_t cpuset)
3139 {
3140 int base_cpuid;
3141 int ncpu_board = MAX_CPU_PER_BRD;
3142
3143 board *= ncpu_board;
3144 for (base_cpuid = board;
3145 base_cpuid < (board + ncpu_board);
3146 base_cpuid++)
3147 if (CPU_IN_SET(cpuset, base_cpuid))
3148 return (base_cpuid);
3149
3150 return (-1);
3151 }
3152
3153 void
idn_domain_resetentry(idn_domain_t * dp)3154 idn_domain_resetentry(idn_domain_t *dp)
3155 {
3156 register int i;
3157 procname_t proc = "idn_domain_resetentry";
3158
3159 ASSERT(dp);
3160 ASSERT(dp->dstate == IDNDS_CLOSED);
3161 ASSERT(IDN_DLOCK_IS_EXCL(dp->domid));
3162 ASSERT(IDN_GLOCK_IS_EXCL());
3163
3164 ASSERT(dp->domid == (dp - &idn_domain[0]));
3165
3166 IDN_FSTATE_TRANSITION(dp, IDNFIN_OFF);
3167 dp->dname[0] = '\0';
3168 dp->dnetid = (ushort_t)-1;
3169 dp->dmtu = 0;
3170 dp->dbufsize = 0;
3171 dp->dslabsize = 0;
3172 dp->dnwrsize = 0;
3173 dp->dncpus = 0;
3174 dp->dcpuindex = 0;
3175 CPUSET_ZERO(dp->dcpuset);
3176 dp->dcpu = dp->dcpu_last = dp->dcpu_save = IDN_NIL_DCPU;
3177 dp->dvote.ticket = 0;
3178 dp->dslab = NULL;
3179 dp->dslab_state = DSLAB_STATE_UNKNOWN;
3180 dp->dnslabs = 0;
3181 dp->dio = 0;
3182 dp->dioerr = 0;
3183 lock_clear(&dp->diowanted);
3184 bzero(&dp->dhw, sizeof (dp->dhw));
3185 dp->dxp = NULL;
3186 IDN_XSTATE_TRANSITION(dp, IDNXS_NIL);
3187 dp->dsync.s_cmd = IDNSYNC_NIL;
3188 dp->dfin_sync = IDNFIN_SYNC_OFF;
3189 IDN_RESET_COOKIES(dp->domid);
3190 dp->dcookie_err = 0;
3191 bzero(&dp->dawol, sizeof (dp->dawol));
3192 dp->dtmp = -1;
3193
3194 if (dp->dtimerq.tq_queue != NULL) {
3195 PR_PROTO("%s: WARNING: MSG timerq not empty (count = %d)\n",
3196 proc, dp->dtimerq.tq_count);
3197 IDN_MSGTIMER_STOP(dp->domid, 0, 0);
3198 }
3199
3200 for (i = 0; i < NCPU; i++)
3201 dp->dcpumap[i] = (uchar_t)-1;
3202 }
3203
3204 int
idn_open_domain(int domid,int cpuid,uint_t ticket)3205 idn_open_domain(int domid, int cpuid, uint_t ticket)
3206 {
3207 int c, new_cpuid;
3208 idn_domain_t *dp, *ldp;
3209 procname_t proc = "idn_open_domain";
3210
3211 ASSERT(IDN_SYNC_IS_LOCKED());
3212 ASSERT(IDN_DLOCK_IS_EXCL(domid));
3213
3214 if (!VALID_DOMAINID(domid)) {
3215 PR_PROTO("%s: INVALID domainid (%d) "
3216 "[cpuid = %d, ticket = 0x%x]\n",
3217 proc, domid, cpuid, ticket);
3218 return (-1);
3219 }
3220
3221 dp = &idn_domain[domid];
3222 ldp = &idn_domain[idn.localid];
3223
3224 if (dp->dcpu >= 0) {
3225 PR_PROTO("%s:%d: domain already OPEN (state = %s)\n",
3226 proc, domid, idnds_str[dp->dstate]);
3227 return (1);
3228 }
3229
3230 if (DOMAIN_IN_SET(idn.domset.ds_relink, domid)) {
3231 if (dp->dcpu_save == IDN_NIL_DCPU)
3232 new_cpuid = cpuid;
3233 else
3234 new_cpuid = dp->dcpu_save;
3235 } else {
3236 new_cpuid = cpuid;
3237 }
3238
3239 if (new_cpuid == IDN_NIL_DCPU) {
3240 PR_PROTO("%s:%d: WARNING: invalid cpuid (%d) specified\n",
3241 proc, domid, new_cpuid);
3242 return (-1);
3243 }
3244
3245 IDN_GLOCK_EXCL();
3246
3247 idn_domain_resetentry(dp);
3248
3249 PR_STATE("%s:%d: requested cpuid %d, assigning cpuid %d\n",
3250 proc, domid, cpuid, new_cpuid);
3251
3252 idn_assign_cookie(domid);
3253
3254 dp->dcpu = dp->dcpu_save = new_cpuid;
3255 dp->dvote.ticket = ticket;
3256 CPUSET_ADD(dp->dcpuset, new_cpuid);
3257 dp->dncpus = 1;
3258 for (c = 0; c < NCPU; c++)
3259 dp->dcpumap[c] = (uchar_t)new_cpuid;
3260 dp->dhw.dh_nboards = 1;
3261 dp->dhw.dh_boardset = BOARDSET(CPUID_TO_BOARDID(new_cpuid));
3262
3263 if (domid != idn.localid)
3264 IDN_DLOCK_EXCL(idn.localid);
3265
3266 if (idn.ndomains == 1) {
3267 struct hwconfig local_hw;
3268
3269 /*
3270 * We're attempting to connect to our first domain.
3271 * Recheck our local hardware configuration before
3272 * we go any further in case it changed due to a DR,
3273 * and update any structs dependent on this.
3274 * ASSUMPTION:
3275 * IDN is unlinked before performing any DRs.
3276 */
3277 PR_PROTO("%s: RECHECKING local HW config.\n", proc);
3278 if (get_hw_config(&local_hw)) {
3279 dp->dcpu = IDN_NIL_DCPU;
3280 cmn_err(CE_WARN,
3281 "IDN: 118: hardware config not appropriate");
3282 if (domid != idn.localid)
3283 IDN_DUNLOCK(idn.localid);
3284 IDN_GUNLOCK();
3285 return (-1);
3286 }
3287 (void) update_local_hw_config(ldp, &local_hw);
3288 }
3289
3290 idn.ndomains++;
3291
3292 if (domid != idn.localid)
3293 IDN_DUNLOCK(idn.localid);
3294 IDN_GUNLOCK();
3295
3296 IDN_MBOX_LOCK(domid);
3297 dp->dmbox.m_tbl = NULL;
3298
3299 if (domid != idn.localid) {
3300 dp->dmbox.m_send = idn_mainmbox_init(domid,
3301 IDNMMBOX_TYPE_SEND);
3302 dp->dmbox.m_recv = idn_mainmbox_init(domid,
3303 IDNMMBOX_TYPE_RECV);
3304 } else {
3305 /*
3306 * The local domain does not need send/recv
3307 * mailboxes in its idn_domain[] entry.
3308 */
3309 dp->dmbox.m_send = NULL;
3310 dp->dmbox.m_recv = NULL;
3311 }
3312 IDN_MBOX_UNLOCK(domid);
3313
3314 PR_PROTO("%s:%d: new domain (cpu = %d, vote = 0x%x)\n",
3315 proc, domid, dp->dcpu, dp->dvote.ticket);
3316
3317 return (0);
3318 }
3319
3320 /*
3321 * The local domain never "closes" itself unless the driver
3322 * is doing a idndetach. It will be reopened during idnattach
3323 * when idn_domains_init is called.
3324 */
3325 void
idn_close_domain(int domid)3326 idn_close_domain(int domid)
3327 {
3328 uint_t token;
3329 idn_domain_t *dp;
3330 procname_t proc = "idn_close_domain";
3331
3332 ASSERT(IDN_SYNC_IS_LOCKED());
3333 ASSERT(IDN_DLOCK_IS_EXCL(domid));
3334
3335 dp = &idn_domain[domid];
3336
3337 ASSERT(dp->dstate == IDNDS_CLOSED);
3338
3339 if (dp->dcpu == IDN_NIL_DCPU) {
3340 PR_PROTO("%s:%d: DOMAIN ALREADY CLOSED!\n",
3341 proc, domid);
3342 return;
3343 }
3344
3345 token = IDN_RETRY_TOKEN(domid, IDN_RETRY_TYPEALL);
3346
3347 (void) idn_retry_terminate(token);
3348
3349 DOMAINSET_DEL(idn.domset.ds_trans_on, domid);
3350 DOMAINSET_DEL(idn.domset.ds_ready_on, domid);
3351 DOMAINSET_DEL(idn.domset.ds_connected, domid);
3352 DOMAINSET_DEL(idn.domset.ds_trans_off, domid);
3353 DOMAINSET_DEL(idn.domset.ds_ready_off, domid);
3354 DOMAINSET_DEL(idn.domset.ds_hwlinked, domid);
3355 DOMAINSET_DEL(idn.domset.ds_flush, domid);
3356
3357 idn_sync_exit(domid, IDNSYNC_CONNECT);
3358 idn_sync_exit(domid, IDNSYNC_DISCONNECT);
3359
3360 IDN_GLOCK_EXCL();
3361
3362 if (DOMAIN_IN_SET(idn.domset.ds_awol, domid))
3363 idn_clear_awol(domid);
3364
3365 idn.ndomains--;
3366
3367 IDN_GUNLOCK();
3368
3369 IDN_MBOX_LOCK(domid);
3370 dp->dmbox.m_tbl = NULL;
3371
3372 if (dp->dmbox.m_send) {
3373 idn_mainmbox_deinit(domid, dp->dmbox.m_send);
3374 dp->dmbox.m_send = NULL;
3375 }
3376
3377 if (dp->dmbox.m_recv) {
3378 idn_mainmbox_deinit(domid, dp->dmbox.m_recv);
3379 dp->dmbox.m_recv = NULL;
3380 }
3381
3382 IDN_MBOX_UNLOCK(domid);
3383
3384 cmn_err(CE_NOTE,
3385 "!IDN: 142: link (domain %d, CPU %d) disconnected",
3386 dp->domid, dp->dcpu);
3387
3388 dp->dcpu = IDN_NIL_DCPU; /* ultimate demise */
3389
3390 IDN_RESET_COOKIES(domid);
3391
3392 ASSERT(dp->dio <= 0);
3393 ASSERT(dp->dioerr == 0);
3394 ASSERT(dp->dslab == NULL);
3395 ASSERT(dp->dnslabs == 0);
3396
3397 IDN_GKSTAT_GLOBAL_EVENT(gk_unlinks, gk_unlink_last);
3398 }
3399
3400
3401 /*
3402 * -----------------------------------------------------------------------
3403 */
3404 static void
idn_domains_init(struct hwconfig * local_hw)3405 idn_domains_init(struct hwconfig *local_hw)
3406 {
3407 register int i, d;
3408 idn_domain_t *ldp;
3409 uchar_t *cpumap;
3410
3411 ASSERT(local_hw != NULL);
3412
3413 cpumap = GETSTRUCT(uchar_t, NCPU * MAX_DOMAINS);
3414
3415 for (d = 0; d < MAX_DOMAINS; d++) {
3416 register idn_domain_t *dp;
3417
3418 dp = &idn_domain[d];
3419
3420 dp->domid = d;
3421
3422 rw_init(&dp->drwlock, NULL, RW_DEFAULT, NULL);
3423
3424 IDN_TIMERQ_INIT(&dp->dtimerq);
3425
3426 dp->dstate = IDNDS_CLOSED;
3427
3428 mutex_init(&dp->dmbox.m_mutex, NULL, MUTEX_DEFAULT, NULL);
3429
3430 dp->dcpumap = cpumap;
3431
3432 rw_init(&dp->dslab_rwlock, NULL, RW_DEFAULT, NULL);
3433
3434 IDN_DLOCK_EXCL(d);
3435 IDN_GLOCK_EXCL();
3436
3437 idn_domain_resetentry(dp);
3438
3439 IDN_GUNLOCK();
3440
3441 IDNSB_DOMAIN_UPDATE(dp);
3442
3443 IDN_DUNLOCK(d);
3444
3445 cpumap += NCPU;
3446 }
3447
3448 IDN_SYNC_LOCK();
3449
3450 /*
3451 * Update local domain information.
3452 */
3453 ASSERT(idn.smr.locpfn);
3454 ASSERT(local_hw->dh_nboards && local_hw->dh_boardset);
3455
3456 idn.ndomains = 0; /* note that open_domain will get us to 1 */
3457
3458 IDN_DLOCK_EXCL(idn.localid);
3459 d = idn_open_domain(idn.localid, (int)CPU->cpu_id, 0);
3460 ASSERT(d == 0);
3461 IDN_GLOCK_EXCL();
3462 IDN_SET_MASTERID(IDN_NIL_DOMID);
3463 IDN_SET_NEW_MASTERID(IDN_NIL_DOMID);
3464
3465 ldp = &idn_domain[idn.localid];
3466
3467 (void) strncpy(ldp->dname, utsname.nodename, MAXDNAME - 1);
3468 ldp->dname[MAXDNAME-1] = '\0';
3469 bcopy(local_hw, &ldp->dhw, sizeof (ldp->dhw));
3470 ASSERT(idn.ndomains == 1);
3471 ASSERT((ldp->dhw.dh_nboards > 0) &&
3472 (ldp->dhw.dh_nboards <= MAX_BOARDS));
3473 ldp->dnetid = IDN_DOMID2NETID(ldp->domid);
3474 ldp->dmtu = IDN_MTU;
3475 ldp->dbufsize = IDN_SMR_BUFSIZE;
3476 ldp->dslabsize = (short)IDN_SLAB_BUFCOUNT;
3477 ldp->dnwrsize = (short)IDN_NWR_SIZE;
3478 ldp->dcpuset = cpu_ready_set;
3479 ldp->dncpus = (short)ncpus;
3480 ldp->dvote.ticket = IDNVOTE_INITIAL_TICKET;
3481 ldp->dvote.v.master = 0;
3482 ldp->dvote.v.nmembrds = ldp->dhw.dh_nmcadr - 1;
3483 ldp->dvote.v.ncpus = (int)ldp->dncpus - 1;
3484 ldp->dvote.v.board = CPUID_TO_BOARDID(ldp->dcpu);
3485 i = -1;
3486 for (d = 0; d < NCPU; d++) {
3487 BUMP_INDEX(ldp->dcpuset, i);
3488 ldp->dcpumap[d] = (uchar_t)i;
3489 }
3490
3491 CPUSET_ZERO(idn.dc_cpuset);
3492 CPUSET_OR(idn.dc_cpuset, ldp->dcpuset);
3493 idn.dc_boardset = ldp->dhw.dh_boardset;
3494
3495 /*
3496 * Setting the state for ourselves is only relevant
3497 * for loopback performance testing. Anyway, it
3498 * makes sense that we always have an established
3499 * connection with ourself regardless of IDN :-o
3500 */
3501 IDN_DSTATE_TRANSITION(ldp, IDNDS_CONNECTED);
3502
3503 IDN_GUNLOCK();
3504 IDN_DUNLOCK(idn.localid);
3505 IDN_SYNC_UNLOCK();
3506 }
3507
3508 static void
idn_domains_deinit()3509 idn_domains_deinit()
3510 {
3511 register int d;
3512
3513 IDN_SYNC_LOCK();
3514 IDN_DLOCK_EXCL(idn.localid);
3515 IDN_DSTATE_TRANSITION(&idn_domain[idn.localid], IDNDS_CLOSED);
3516 idn_close_domain(idn.localid);
3517 IDN_DUNLOCK(idn.localid);
3518 IDN_SYNC_UNLOCK();
3519 idn.localid = IDN_NIL_DOMID;
3520
3521 FREESTRUCT(idn_domain[0].dcpumap, uchar_t, NCPU * MAX_DOMAINS);
3522
3523 for (d = 0; d < MAX_DOMAINS; d++) {
3524 idn_domain_t *dp;
3525
3526 dp = &idn_domain[d];
3527
3528 rw_destroy(&dp->dslab_rwlock);
3529 mutex_destroy(&dp->dmbox.m_mutex);
3530 rw_destroy(&dp->drwlock);
3531 IDN_TIMERQ_DEINIT(&dp->dtimerq);
3532 dp->dcpumap = NULL;
3533 }
3534 }
3535
3536 /*
3537 * -----------------------------------------------------------------------
3538 */
3539 static void
idn_retrytask_init()3540 idn_retrytask_init()
3541 {
3542 ASSERT(idn.retryqueue.rq_cache == NULL);
3543
3544 mutex_init(&idn.retryqueue.rq_mutex, NULL, MUTEX_DEFAULT, NULL);
3545 idn.retryqueue.rq_cache = kmem_cache_create("idn_retryjob_cache",
3546 sizeof (idn_retry_job_t),
3547 0, NULL, NULL, NULL,
3548 NULL, NULL, 0);
3549 }
3550
3551 static void
idn_retrytask_deinit()3552 idn_retrytask_deinit()
3553 {
3554 if (idn.retryqueue.rq_cache == NULL)
3555 return;
3556
3557 kmem_cache_destroy(idn.retryqueue.rq_cache);
3558 mutex_destroy(&idn.retryqueue.rq_mutex);
3559
3560 bzero(&idn.retryqueue, sizeof (idn.retryqueue));
3561 }
3562
3563 /*
3564 * -----------------------------------------------------------------------
3565 */
3566 static void
idn_timercache_init()3567 idn_timercache_init()
3568 {
3569 ASSERT(idn.timer_cache == NULL);
3570
3571 idn.timer_cache = kmem_cache_create("idn_timer_cache",
3572 sizeof (idn_timer_t),
3573 0, NULL, NULL, NULL,
3574 NULL, NULL, 0);
3575 }
3576
3577 static void
idn_timercache_deinit()3578 idn_timercache_deinit()
3579 {
3580 if (idn.timer_cache == NULL)
3581 return;
3582
3583 kmem_cache_destroy(idn.timer_cache);
3584 idn.timer_cache = NULL;
3585 }
3586
3587 idn_timer_t *
idn_timer_alloc()3588 idn_timer_alloc()
3589 {
3590 idn_timer_t *tp;
3591
3592 tp = kmem_cache_alloc(idn.timer_cache, KM_SLEEP);
3593 bzero(tp, sizeof (*tp));
3594 tp->t_forw = tp->t_back = tp;
3595
3596 return (tp);
3597 }
3598
3599 void
idn_timer_free(idn_timer_t * tp)3600 idn_timer_free(idn_timer_t *tp)
3601 {
3602 if (tp == NULL)
3603 return;
3604 kmem_cache_free(idn.timer_cache, tp);
3605 }
3606
3607 void
idn_timerq_init(idn_timerq_t * tq)3608 idn_timerq_init(idn_timerq_t *tq)
3609 {
3610 mutex_init(&tq->tq_mutex, NULL, MUTEX_DEFAULT, NULL);
3611 tq->tq_count = 0;
3612 tq->tq_queue = NULL;
3613 }
3614
3615 void
idn_timerq_deinit(idn_timerq_t * tq)3616 idn_timerq_deinit(idn_timerq_t *tq)
3617 {
3618 ASSERT(tq->tq_queue == NULL);
3619 mutex_destroy(&tq->tq_mutex);
3620 }
3621
3622 /*
3623 * Dequeue all the timers of the given subtype from the
3624 * given timerQ. If subtype is 0, then dequeue all the
3625 * timers.
3626 */
3627 idn_timer_t *
idn_timer_get(idn_timerq_t * tq,int type,ushort_t tcookie)3628 idn_timer_get(idn_timerq_t *tq, int type, ushort_t tcookie)
3629 {
3630 register idn_timer_t *tp, *tphead;
3631
3632 ASSERT(IDN_TIMERQ_IS_LOCKED(tq));
3633
3634 if ((tp = tq->tq_queue) == NULL)
3635 return (NULL);
3636
3637 if (!type) {
3638 tq->tq_queue = NULL;
3639 tq->tq_count = 0;
3640 tphead = tp;
3641 } else {
3642 int count;
3643 idn_timer_t *tpnext;
3644
3645 tphead = NULL;
3646 count = tq->tq_count;
3647 do {
3648 tpnext = tp->t_forw;
3649 if ((tp->t_type == type) &&
3650 (!tcookie || (tp->t_cookie == tcookie))) {
3651 tp->t_forw->t_back = tp->t_back;
3652 tp->t_back->t_forw = tp->t_forw;
3653 if (tphead == NULL) {
3654 tp->t_forw = tp->t_back = tp;
3655 } else {
3656 tp->t_forw = tphead;
3657 tp->t_back = tphead->t_back;
3658 tp->t_back->t_forw = tp;
3659 tphead->t_back = tp;
3660 }
3661 tphead = tp;
3662 if (--(tq->tq_count) == 0)
3663 tq->tq_queue = NULL;
3664 else if (tq->tq_queue == tp)
3665 tq->tq_queue = tpnext;
3666 }
3667 tp = tpnext;
3668 } while (--count > 0);
3669 }
3670
3671 if (tphead) {
3672 tphead->t_back->t_forw = NULL;
3673
3674 for (tp = tphead; tp; tp = tp->t_forw)
3675 tp->t_onq = 0;
3676 }
3677
3678 return (tphead);
3679 }
3680
3681 ushort_t
idn_timer_start(idn_timerq_t * tq,idn_timer_t * tp,clock_t tval)3682 idn_timer_start(idn_timerq_t *tq, idn_timer_t *tp, clock_t tval)
3683 {
3684 idn_timer_t *otp;
3685 ushort_t tcookie;
3686 procname_t proc = "idn_timer_start";
3687 STRING(str);
3688
3689 ASSERT(tq && tp && (tval > 0));
3690 ASSERT((tp->t_forw == tp) && (tp->t_back == tp));
3691 ASSERT(tp->t_type != 0);
3692
3693 IDN_TIMERQ_LOCK(tq);
3694 /*
3695 * Assign a unique non-zero 8-bit cookie to this timer
3696 * if the caller hasn't already preassigned one.
3697 */
3698 while ((tcookie = tp->t_cookie) == 0) {
3699 tp->t_cookie = (tp->t_type & 0xf) |
3700 ((++tq->tq_cookie & 0xf) << 4);
3701 /*
3702 * Calculated cookie must never conflict
3703 * with the public timer cookie.
3704 */
3705 ASSERT(tp->t_cookie != IDN_TIMER_PUBLIC_COOKIE);
3706 }
3707
3708 /*
3709 * First have to remove old timers of the
3710 * same type and cookie, and get rid of them.
3711 */
3712 otp = idn_timer_get(tq, tp->t_type, tcookie);
3713
3714 tq->tq_count++;
3715
3716 if (tq->tq_queue == NULL) {
3717 tq->tq_queue = tp;
3718 ASSERT((tp->t_forw == tp) && (tp->t_back == tp));
3719 } else {
3720 /*
3721 * Put me at the end of the list.
3722 */
3723 tp->t_forw = tq->tq_queue;
3724 tp->t_back = tq->tq_queue->t_back;
3725 tp->t_back->t_forw = tp;
3726 tp->t_forw->t_back = tp;
3727 }
3728
3729 tp->t_onq = 1;
3730 tp->t_q = tq;
3731 tp->t_id = timeout(idn_timer_expired, (caddr_t)tp, tval);
3732
3733
3734 INUM2STR(tp->t_type, str);
3735 PR_TIMER("%s: started %s timer (domain = %d, cookie = 0x%x)\n",
3736 proc, str, tp->t_domid, tcookie);
3737
3738 IDN_TIMERQ_UNLOCK(tq);
3739
3740 if (otp)
3741 (void) idn_timer_stopall(otp);
3742
3743 return (tcookie);
3744 }
3745
3746 /*
3747 * Stop all timers of the given subtype.
3748 * If subtype is 0, then stop all timers
3749 * in this timerQ.
3750 */
3751 void
idn_timer_stop(idn_timerq_t * tq,int type,ushort_t tcookie)3752 idn_timer_stop(idn_timerq_t *tq, int type, ushort_t tcookie)
3753 {
3754 idn_timer_t *tphead;
3755 procname_t proc = "idn_timer_stop";
3756 STRING(str);
3757
3758 ASSERT(tq);
3759
3760 INUM2STR(type, str);
3761
3762 IDN_TIMERQ_LOCK(tq);
3763
3764 if (tq->tq_count == 0) {
3765 PR_TIMER("%s: found no %s timers (count=0)\n", proc, str);
3766 IDN_TIMERQ_UNLOCK(tq);
3767 return;
3768 }
3769 tphead = idn_timer_get(tq, type, tcookie);
3770 #ifdef DEBUG
3771 if (tphead == NULL)
3772 PR_TIMER("%s: found no %s (cookie = 0x%x) "
3773 "timers (count=%d)!!\n",
3774 proc, str, tcookie, tq->tq_count);
3775 #endif /* DEBUG */
3776 IDN_TIMERQ_UNLOCK(tq);
3777
3778 if (tphead)
3779 (void) idn_timer_stopall(tphead);
3780 }
3781
3782 int
idn_timer_stopall(idn_timer_t * tp)3783 idn_timer_stopall(idn_timer_t *tp)
3784 {
3785 int count = 0;
3786 int nonactive;
3787 uint_t type;
3788 idn_timer_t *ntp;
3789 procname_t proc = "idn_timer_stopall";
3790 STRING(str);
3791
3792 nonactive = 0;
3793
3794 if (tp) {
3795 /*
3796 * Circle should have been broken.
3797 */
3798 ASSERT(tp->t_back->t_forw == NULL);
3799 type = tp->t_type;
3800 INUM2STR(type, str);
3801 }
3802
3803 for (; tp; tp = ntp) {
3804 ntp = tp->t_forw;
3805 count++;
3806 ASSERT(tp->t_id != (timeout_id_t)0);
3807 if (untimeout(tp->t_id) < 0) {
3808 nonactive++;
3809 PR_TIMER("%s: bad %s untimeout (domain=%d)\n",
3810 proc, str, tp->t_domid);
3811 } else {
3812 PR_TIMER("%s: good %s untimeout (domain=%d)\n",
3813 proc, str, tp->t_domid);
3814 }
3815 /*
3816 * There are two possible outcomes from
3817 * the untimeout(). Each ultimately result
3818 * in us having to free the timeout structure.
3819 *
3820 * 1. We successfully aborted a timeout call.
3821 *
3822 * 2. We failed to find the given timer. He
3823 * probably just fired off.
3824 */
3825 idn_timer_free(tp);
3826 }
3827 PR_TIMER("%s: stopped %d of %d %s timers\n",
3828 proc, count - nonactive, count, str);
3829
3830 return (count);
3831 }
3832
3833 void
idn_timer_dequeue(idn_timerq_t * tq,idn_timer_t * tp)3834 idn_timer_dequeue(idn_timerq_t *tq, idn_timer_t *tp)
3835 {
3836 ASSERT(tq && tp);
3837 ASSERT(IDN_TIMERQ_IS_LOCKED(tq));
3838
3839 ASSERT(tp->t_q == tq);
3840
3841 if (tp->t_onq == 0) {
3842 /*
3843 * We've already been dequeued.
3844 */
3845 ASSERT(tp == tp->t_forw);
3846 ASSERT(tp == tp->t_back);
3847 } else {
3848 /*
3849 * We're still in the queue, get out.
3850 */
3851 if (tq->tq_queue == tp)
3852 tq->tq_queue = tp->t_forw;
3853 tp->t_forw->t_back = tp->t_back;
3854 tp->t_back->t_forw = tp->t_forw;
3855 tp->t_onq = 0;
3856 if (--(tq->tq_count) == 0) {
3857 ASSERT(tq->tq_queue == tp);
3858 tq->tq_queue = NULL;
3859 }
3860 tp->t_forw = tp->t_back = tp;
3861 }
3862 }
3863
3864 /*
3865 * -----------------------------------------------------------------------
3866 */
3867 /*ARGSUSED*/
3868 static int
idn_slabpool_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)3869 idn_slabpool_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
3870 {
3871 register int p, nfree;
3872 char dsetstr[128];
3873
3874 ASSERT(IDN_GLOCK_IS_HELD());
3875
3876 if (idn.slabpool == NULL) {
3877 (void) mi_mpprintf(mp,
3878 "IDN slabpool not initialized (masterid = %d)",
3879 IDN_GET_MASTERID());
3880 return (0);
3881 }
3882
3883 for (p = nfree = 0; p < idn.slabpool->npools; p++)
3884 nfree += idn.slabpool->pool[p].nfree;
3885
3886 (void) mi_mpprintf(mp,
3887 "IDN slabpool (ntotal_slabs = %d, nalloc = %d, "
3888 "npools = %d)",
3889 idn.slabpool->ntotslabs,
3890 idn.slabpool->ntotslabs - nfree,
3891 idn.slabpool->npools);
3892
3893 (void) mi_mpprintf(mp, "pool nslabs nfree domains");
3894
3895 for (p = 0; p < idn.slabpool->npools; p++) {
3896 register int d, s;
3897 uint_t domset;
3898
3899 domset = 0;
3900 for (s = 0; s < idn.slabpool->pool[p].nslabs; s++) {
3901 short dd;
3902
3903 dd = idn.slabpool->pool[p].sarray[s].sl_domid;
3904 if (dd != (short)IDN_NIL_DOMID)
3905 DOMAINSET_ADD(domset, dd);
3906 }
3907 dsetstr[0] = '\0';
3908 if (domset) {
3909 for (d = 0; d < MAX_DOMAINS; d++) {
3910 if (!DOMAIN_IN_SET(domset, d))
3911 continue;
3912
3913 if (dsetstr[0] == '\0')
3914 (void) sprintf(dsetstr, "%d", d);
3915 else
3916 (void) sprintf(dsetstr, "%s %d",
3917 dsetstr, d);
3918 }
3919 }
3920
3921 if (p < 10)
3922 (void) mi_mpprintf(mp, " %d %d %d %s",
3923 p, idn.slabpool->pool[p].nslabs,
3924 idn.slabpool->pool[p].nfree,
3925 dsetstr);
3926 else
3927 (void) mi_mpprintf(mp, " %d %d %d %s",
3928 p, idn.slabpool->pool[p].nslabs,
3929 idn.slabpool->pool[p].nfree,
3930 dsetstr);
3931 }
3932 return (0);
3933 }
3934
3935 /*ARGSUSED*/
3936 static int
idn_buffer_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)3937 idn_buffer_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
3938 {
3939 smr_slab_t *sp;
3940 register int d, cnt;
3941 int bufcount[MAX_DOMAINS];
3942 int spl;
3943
3944 ASSERT(IDN_GLOCK_IS_HELD());
3945
3946 if (idn.localid == IDN_NIL_DOMID) {
3947 (void) mi_mpprintf(mp, "IDN not initialized (localid = %d)",
3948 idn.localid);
3949 return (0);
3950 }
3951
3952 (void) mi_mpprintf(mp, "Local domain has %d slabs allocated.",
3953 idn_domain[idn.localid].dnslabs);
3954
3955 DSLAB_LOCK_SHARED(idn.localid);
3956 if ((sp = idn_domain[idn.localid].dslab) == NULL) {
3957 DSLAB_UNLOCK(idn.localid);
3958 return (0);
3959 }
3960
3961 bzero(bufcount, sizeof (bufcount));
3962 cnt = 0;
3963
3964 spl = splhi();
3965 for (; sp; sp = sp->sl_next) {
3966 smr_slabbuf_t *bp;
3967
3968 while (!lock_try(&sp->sl_lock))
3969 ;
3970 for (bp = sp->sl_inuse; bp; bp = bp->sb_next) {
3971 bufcount[bp->sb_domid]++;
3972 cnt++;
3973 }
3974 lock_clear(&sp->sl_lock);
3975 }
3976 splx(spl);
3977
3978 DSLAB_UNLOCK(idn.localid);
3979
3980 (void) mi_mpprintf(mp, "Local domain has %d buffers outstanding.", cnt);
3981 if (cnt == 0)
3982 return (0);
3983
3984 (void) mi_mpprintf(mp, "Domain nbufs");
3985 for (d = 0; d < MAX_DOMAINS; d++)
3986 if (bufcount[d]) {
3987 if (d < 10)
3988 (void) mi_mpprintf(mp, " %d %d",
3989 d, bufcount[d]);
3990 else
3991 (void) mi_mpprintf(mp, " %d %d",
3992 d, bufcount[d]);
3993 }
3994
3995 return (0);
3996 }
3997
3998 static const char *
_get_spaces(int w,int s,int W)3999 _get_spaces(int w, int s, int W)
4000 {
4001 static const char *const _spaces[] = {
4002 "", /* 0 */
4003 " ", /* 1 */
4004 " ", /* 2 */
4005 " ", /* 3 */
4006 " ", /* 4 */
4007 " ", /* 5 */
4008 " ", /* 6 */
4009 " ", /* 7 */
4010 " ", /* 8 */
4011 " ", /* 9 */
4012 " ", /* 10 */
4013 " ", /* 11 */
4014 " ", /* 12 */
4015 " ", /* 13 */
4016 " ", /* 14 */
4017 " ", /* 15 */
4018 " ", /* 16 */
4019 " ", /* 17 */
4020 " ", /* 18 */
4021 " ", /* 19 */
4022 };
4023 return (_spaces[w+s-W]);
4024 }
4025
4026 #define _SSS(X, W, w, s) \
4027 (((w) >= (W)) && (X)) ? _get_spaces((w), (s), (W))
4028
4029 static const char *
_hexspace(uint64_t v,int sz,int width,int padding)4030 _hexspace(uint64_t v, int sz, int width, int padding)
4031 {
4032 int maxnbl = 16;
4033 int diff;
4034 uchar_t *np;
4035
4036 diff = sizeof (uint64_t) - sz;
4037 np = (uchar_t *)&v + diff;
4038 maxnbl -= diff << 1;
4039 while (sz-- > 0) {
4040 if ((*np & 0xf0) && (width >= maxnbl))
4041 return (_get_spaces(width, padding, maxnbl));
4042 maxnbl--;
4043 if ((*np & 0x0f) && (width >= maxnbl))
4044 return (_get_spaces(width, padding, maxnbl));
4045 maxnbl--;
4046 np++;
4047 }
4048 return (_get_spaces(width, padding, 1));
4049 }
4050
4051 #define HEXSPACE(v, t, w, s) _hexspace((uint64_t)(v), sizeof (t), (w), (s))
4052
4053 #define DECSPACE(n, w, s) \
4054 (_SSS((uint_t)(n) >= 10000000, 8, (w), (s)) : \
4055 _SSS((uint_t)(n) >= 1000000, 7, (w), (s)) : \
4056 _SSS((uint_t)(n) >= 100000, 6, (w), (s)) : \
4057 _SSS((uint_t)(n) >= 10000, 5, (w), (s)) : \
4058 _SSS((uint_t)(n) >= 1000, 4, (w), (s)) : \
4059 _SSS((uint_t)(n) >= 100, 3, (w), (s)) : \
4060 _SSS((uint_t)(n) >= 10, 2, (w), (s)) : \
4061 _get_spaces((w), (s), 1))
4062
4063 #define DECSPACE16(n, w, s) \
4064 (_SSS((n) >= 10000, 5, (w), (s)) : \
4065 _SSS((n) >= 1000, 4, (w), (s)) : \
4066 _SSS((n) >= 100, 3, (w), (s)) : \
4067 _SSS((n) >= 10, 2, (w), (s)) : \
4068 _get_spaces((w), (s), 1))
4069
4070 #define MBXINFO(mtp) \
4071 (void *)&mtp->mt_header, \
4072 HEXSPACE(&mtp->mt_header, &mtp->mt_header, 16, 2), \
4073 mtp->mt_header.mh_svr_ready_ptr, \
4074 HEXSPACE(mtp->mt_header.mh_svr_ready_ptr, \
4075 mtp->mt_header.mh_svr_ready_ptr, 8, 1), \
4076 mtp->mt_header.mh_svr_active_ptr, \
4077 HEXSPACE(mtp->mt_header.mh_svr_active_ptr, \
4078 mtp->mt_header.mh_svr_active_ptr, 8, 2), \
4079 *(ushort_t *)(IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_ready_ptr)), \
4080 DECSPACE16(*(ushort_t *) \
4081 (IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_ready_ptr)), \
4082 1, 1), \
4083 *(ushort_t *)(IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_active_ptr)), \
4084 DECSPACE16(*(ushort_t *) \
4085 (IDN_OFFSET2ADDR(mtp->mt_header.mh_svr_active_ptr)), \
4086 1, 5), \
4087 mtp->mt_header.mh_cookie, \
4088 HEXSPACE(mtp->mt_header.mh_cookie, \
4089 mtp->mt_header.mh_cookie, 8, 2), \
4090 (void *)&mtp->mt_queue[0], \
4091 HEXSPACE(&mtp->mt_queue[0], &mtp->mt_queue[0], 16, 2)
4092
4093 /*ARGSUSED*/
4094 static int
idn_mboxtbl_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)4095 idn_mboxtbl_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
4096 {
4097 register int c, n, domid, subdomid;
4098 register idn_mboxtbl_t *mtp;
4099 register idn_mboxmsg_t *msp;
4100 idn_mboxtbl_t *map, *mtbasep;
4101
4102
4103 ASSERT((cp == MBXTBL_PART_REPORT) || (cp == MBXTBL_FULL_REPORT));
4104
4105 if (IDN_GLOCK_TRY_SHARED() == 0) {
4106 (void) mi_mpprintf(mp, "Local domain busy, try again.");
4107 return (0);
4108 }
4109
4110 if ((map = idn.mboxarea) == NULL) {
4111 (void) mi_mpprintf(mp,
4112 "WARNING: Local domain is not master, "
4113 "ASSUMING idn.smr.vaddr.");
4114 map = (idn_mboxtbl_t *)idn.smr.vaddr;
4115 }
4116
4117 if (map) {
4118 (void) mi_mpprintf(mp, "Mailbox Area starts @ 0x%p",
4119 (void *)map);
4120 } else {
4121 (void) mi_mpprintf(mp, "Mailbox Area not found.");
4122 goto repdone;
4123 }
4124
4125 if (!idn.nchannels) {
4126 (void) mi_mpprintf(mp, "No OPEN channels found");
4127 goto repdone;
4128 }
4129
4130 for (c = 0; c < IDN_MAX_NETS; c++) {
4131
4132 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]);
4133 if (!IDN_CHANNEL_IS_ATTACHED(&idn.chan_servers[c])) {
4134 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
4135 continue;
4136 }
4137
4138 (void) mi_mpprintf(mp,
4139 "Channel %d ---------------------------"
4140 "--------------------------"
4141 "-----------------------------", c);
4142 (void) mi_mpprintf(mp,
4143 " Domain Header "
4144 "Ready/Active Ptrs "
4145 "rdy/actv cookie Queue "
4146 "busy");
4147
4148 for (domid = 0; domid < MAX_DOMAINS; domid++) {
4149 register int busy_count;
4150
4151 if ((cp == MBXTBL_PART_REPORT) &&
4152 (idn_domain[domid].dcpu == IDN_NIL_DCPU))
4153 continue;
4154
4155 mtbasep = IDN_MBOXAREA_BASE(map, domid);
4156
4157 for (subdomid = 0; subdomid < MAX_DOMAINS;
4158 subdomid++) {
4159 mtp = IDN_MBOXTBL_PTR(mtbasep, subdomid);
4160 mtp = IDN_MBOXTBL_PTR_CHAN(mtp, c);
4161
4162 if (subdomid == domid) {
4163 if (subdomid == 0)
4164 (void) mi_mpprintf(mp,
4165 " %x.%x-%d%s%s",
4166 domid, subdomid, c,
4167 /*CONSTCOND*/
4168 DECSPACE(c, 2, 2),
4169 "-- unused --");
4170 else
4171 (void) mi_mpprintf(mp,
4172 " .%x-%d%s%s",
4173 subdomid, c,
4174 /*CONSTCOND*/
4175 DECSPACE(c, 2, 2),
4176 "-- unused --");
4177 continue;
4178 }
4179 busy_count = 0;
4180 msp = &mtp->mt_queue[0];
4181 for (n = 0; n < IDN_MMBOX_NUMENTRIES; n++) {
4182 if (msp[n].ms_owner)
4183 busy_count++;
4184 }
4185 if (subdomid == 0) {
4186 (void) mi_mpprintf(mp,
4187 " %x.%x-%d%s%p%s%x%s/ %x%s"
4188 "%d%s/ %d%s%x%s%p%s%d%s",
4189 domid, subdomid, c,
4190 /*CONSTCOND*/
4191 DECSPACE(c, 2, 2),
4192 MBXINFO(mtp), busy_count,
4193 busy_count ? " <<<<<":"");
4194 } else {
4195 (void) mi_mpprintf(mp,
4196 " .%x-%d%s%p%s%x%s/ %x%s"
4197 "%d%s/ %d%s%x%s%p%s%d%s",
4198 subdomid, c,
4199 /*CONSTCOND*/
4200 DECSPACE(c, 2, 2),
4201 MBXINFO(mtp), busy_count,
4202 busy_count ? " <<<<<":"");
4203 }
4204 }
4205 }
4206 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
4207 }
4208
4209 repdone:
4210 IDN_GUNLOCK();
4211
4212 return (0);
4213 }
4214
4215 /*ARGSUSED*/
4216 static void
idn_mainmbox_domain_report(queue_t * wq,mblk_t * mp,int domid,idn_mainmbox_t * mmp,char * mbxtype)4217 idn_mainmbox_domain_report(queue_t *wq, mblk_t *mp, int domid,
4218 idn_mainmbox_t *mmp, char *mbxtype)
4219 {
4220 register int c;
4221
4222 if (mmp == NULL) {
4223 (void) mi_mpprintf(mp, " %x.%s -- none --", domid, mbxtype);
4224 return;
4225 }
4226
4227 for (c = 0; c < IDN_MAX_NETS; mmp++, c++) {
4228 int mm_count;
4229
4230 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[c]);
4231 if (IDN_CHANNEL_IS_DETACHED(&idn.chan_servers[c])) {
4232 (void) mi_mpprintf(mp, " %x.%s %u -- not open --",
4233 domid, mbxtype, (int)mmp->mm_channel);
4234 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
4235 continue;
4236 }
4237
4238 mm_count = ((mmp->mm_count < 0) ? 0 : mmp->mm_count) / 1000;
4239
4240 (void) mi_mpprintf(mp, " %x.%s %d%s%d%s%d%s%p%s%p%s%p%s%d/%d",
4241 domid, mbxtype,
4242 (int)mmp->mm_channel,
4243 /*CONSTCOND*/
4244 DECSPACE((int)mmp->mm_channel, 5, 2),
4245 mm_count, DECSPACE(mm_count, 8, 2),
4246 mmp->mm_dropped,
4247 DECSPACE(mmp->mm_dropped, 8, 2),
4248 (void *)mmp->mm_smr_mboxp,
4249 HEXSPACE(mmp->mm_smr_mboxp,
4250 mmp->mm_smr_mboxp, 16, 2),
4251 (void *)mmp->mm_smr_readyp,
4252 HEXSPACE(mmp->mm_smr_readyp,
4253 mmp->mm_smr_readyp, 16, 2),
4254 (void *)mmp->mm_smr_activep,
4255 HEXSPACE(mmp->mm_smr_activep,
4256 mmp->mm_smr_activep, 16, 2),
4257 mmp->mm_qiget, mmp->mm_qiput);
4258 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[c]);
4259 }
4260 }
4261
4262 /*ARGSUSED2*/
4263 static int
idn_mainmbox_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)4264 idn_mainmbox_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
4265 {
4266 int domid;
4267 int header = 0;
4268
4269 /*
4270 * Domain 0 never has a send/recv mainmbox so
4271 * don't bother printing him.
4272 */
4273 for (domid = 1; domid < MAX_DOMAINS; domid++) {
4274 idn_domain_t *dp;
4275
4276 dp = &idn_domain[domid];
4277
4278 if (dp->dcpu == IDN_NIL_DCPU)
4279 continue;
4280 IDN_DLOCK_SHARED(domid);
4281 if (dp->dcpu == IDN_NIL_DCPU) {
4282 IDN_DUNLOCK(domid);
4283 continue;
4284 }
4285 if (!header) {
4286 (void) mi_mpprintf(mp,
4287 "Domain Chan PktCntK "
4288 "PktDrop SMRMbox "
4289 "ReadyPtr "
4290 "ActvPtr Miget/Miput");
4291 header = 1;
4292 }
4293
4294 mutex_enter(&dp->dmbox.m_mutex);
4295 idn_mainmbox_domain_report(wq, mp, domid,
4296 idn_domain[domid].dmbox.m_send,
4297 "snd");
4298 idn_mainmbox_domain_report(wq, mp, domid,
4299 idn_domain[domid].dmbox.m_recv,
4300 "rcv");
4301 mutex_exit(&dp->dmbox.m_mutex);
4302
4303 IDN_DUNLOCK(domid);
4304
4305 (void) mi_mpprintf(mp,
4306 " ---------------------------------------"
4307 "------------------------"
4308 "----------------------------");
4309 }
4310
4311 if (!header)
4312 (void) mi_mpprintf(mp, "No ACTIVE domain connections exist");
4313
4314 return (0);
4315 }
4316
4317 /*ARGSUSED*/
4318 static int
idn_global_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)4319 idn_global_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
4320 {
4321 int i, nactive, masterid, nretry;
4322 uint_t locpfn_upper, locpfn_lower,
4323 rempfn_upper, rempfn_lower;
4324 uint_t marea_upper, marea_lower,
4325 iarea_upper, iarea_lower;
4326 char alt_dbuffer[64];
4327 idn_retry_job_t *rp;
4328 domainset_t retryset;
4329 domainset_t connected;
4330 idn_synczone_t *zp;
4331 idn_syncop_t *sp;
4332 idn_domain_t *dp;
4333 char *dbp, *dbuffer;
4334
4335 if (IDN_SYNC_TRYLOCK() == 0) {
4336 (void) mi_mpprintf(mp, "Sync lock busy, try again.");
4337 return (0);
4338 }
4339
4340 if (IDN_GLOCK_TRY_SHARED() == 0) {
4341 (void) mi_mpprintf(mp, "Local domain busy, try again.");
4342 IDN_SYNC_UNLOCK();
4343 return (0);
4344 }
4345 if ((dbp = dbuffer = ALLOC_DISPSTRING()) == NULL)
4346 dbp = alt_dbuffer;
4347
4348 (void) mi_mpprintf(mp, "IDN\n Global State = %s (%d)",
4349 idngs_str[idn.state], idn.state);
4350
4351 (void) mi_mpprintf(mp, "SMR");
4352 (void) mi_mpprintf(mp, " vaddr ");
4353 (void) mi_mpprintf(mp, " 0x%p", (void *)idn.smr.vaddr);
4354
4355 (void) mi_mpprintf(mp, " paddr-local paddr-remote");
4356 masterid = IDN_GET_MASTERID();
4357 locpfn_upper = (uint_t)(idn.smr.locpfn >> (32 - PAGESHIFT));
4358 locpfn_lower = (uint_t)(idn.smr.locpfn << PAGESHIFT);
4359 if (idn.smr.rempfn == PFN_INVALID) {
4360 rempfn_upper = rempfn_lower = 0;
4361 } else {
4362 rempfn_upper = (uint_t)(idn.smr.rempfn >> (32 - PAGESHIFT));
4363 rempfn_lower = (uint_t)(idn.smr.rempfn << PAGESHIFT);
4364 }
4365 (void) mi_mpprintf(mp, " 0x%x.%x%s0x%x.%x",
4366 locpfn_upper, locpfn_lower,
4367 HEXSPACE(locpfn_lower, locpfn_lower, 8,
4368 (locpfn_upper < 0x10) ? 4 : 3),
4369 rempfn_upper, rempfn_lower);
4370
4371 (void) mi_mpprintf(mp, " SMR length = %d MBytes", IDN_SMR_SIZE);
4372 (void) mi_mpprintf(mp, " SMR bufsize = %d Bytes", IDN_SMR_BUFSIZE);
4373 (void) mi_mpprintf(mp, " NWR length = %d MBytes", IDN_NWR_SIZE);
4374 marea_upper = (uint_t)((uint64_t)IDN_MBOXAREA_SIZE >> 32);
4375 marea_lower = (uint_t)((uint64_t)IDN_MBOXAREA_SIZE & 0xffffffff);
4376 iarea_upper = (uint_t)((uint64_t)(MB2B(IDN_NWR_SIZE) -
4377 (size_t)IDN_MBOXAREA_SIZE) >> 32);
4378 iarea_lower = (uint_t)((MB2B(IDN_NWR_SIZE) -
4379 (size_t)IDN_MBOXAREA_SIZE) & 0xffffffff);
4380 (void) mi_mpprintf(mp,
4381 " [ mbox area = 0x%x.%x Bytes, "
4382 "iobuf area = 0x%x.%x Bytes ]",
4383 marea_upper, marea_lower, iarea_upper, iarea_lower);
4384
4385 (void) mi_mpprintf(mp,
4386 "\nIDNnet (local domain [id:%d] [name:%s] is %s)",
4387 idn.localid,
4388 idn_domain[idn.localid].dname,
4389 (masterid == IDN_NIL_DOMID) ? "IDLE" :
4390 (idn.localid == masterid) ? "MASTER" :
4391 "SLAVE");
4392 nactive = 0;
4393 for (i = 0; i < IDN_MAX_NETS; i++) {
4394 IDN_CHAN_LOCK_GLOBAL(&idn.chan_servers[i]);
4395 if (IDN_CHANNEL_IS_ACTIVE(&idn.chan_servers[i]))
4396 nactive++;
4397 IDN_CHAN_UNLOCK_GLOBAL(&idn.chan_servers[i]);
4398 }
4399 (void) mi_mpprintf(mp, " I/O Networks: (Open = %d, "
4400 "Active = %d, Max = %d)",
4401 idn.nchannels, nactive, IDN_MAX_NETS);
4402 (void) mi_mpprintf(mp, " Number of Domains = %d", idn.ndomains);
4403 (void) mi_mpprintf(mp, " Number of AWOLs = %d", idn.nawols);
4404 /*
4405 * During connect domains can possibly be in ds_connected
4406 * while still in ds_trans_on. Only once they leave ds_trans_on
4407 * are they really connected.
4408 */
4409 connected = idn.domset.ds_connected & ~idn.domset.ds_trans_on;
4410 DOMAINSET_ADD(connected, idn.localid);
4411 boardset2str(connected, dbp);
4412 (void) mi_mpprintf(mp, " Connected Domains = %s", dbp);
4413 domainset2str(idn.domset.ds_trans_on, dbp);
4414 (void) mi_mpprintf(mp, " Pending Domain Links = %s",
4415 idn.domset.ds_trans_on ? dbp : "<>");
4416 domainset2str(idn.domset.ds_trans_off, dbp);
4417 (void) mi_mpprintf(mp, " Pending Domain Unlinks = %s",
4418 idn.domset.ds_trans_off ? dbp : "<>");
4419 mutex_enter(&idn.retryqueue.rq_mutex);
4420 nretry = idn.retryqueue.rq_count;
4421 retryset = 0;
4422 for (i = 0, rp = idn.retryqueue.rq_jobs; i < nretry; i++,
4423 rp = rp->rj_next) {
4424 int domid;
4425
4426 domid = IDN_RETRY_TOKEN2DOMID(rp->rj_token);
4427 if (VALID_DOMAINID(domid)) {
4428 DOMAINSET_ADD(retryset, domid);
4429 }
4430 }
4431 mutex_exit(&idn.retryqueue.rq_mutex);
4432 domainset2str(retryset, dbp);
4433 (void) mi_mpprintf(mp, " Retry Jobs:Domains = %d:%s",
4434 nretry, retryset ? dbp : "<>");
4435 domainset2str(idn.domset.ds_hitlist, dbp);
4436 (void) mi_mpprintf(mp, " Hitlist Domains = %s",
4437 idn.domset.ds_hitlist ? dbp : "<>");
4438 domainset2str(idn.domset.ds_relink, dbp);
4439 (void) mi_mpprintf(mp, " Reconfig Domains = %s",
4440 idn.domset.ds_relink ? dbp : "<>");
4441 if (idn.domset.ds_relink)
4442 (void) mi_mpprintf(mp, " new master id = %d",
4443 IDN_GET_NEW_MASTERID());
4444 if (masterid == IDN_NIL_DOMID) {
4445 (void) mi_mpprintf(mp, " Master Domain: no master");
4446 } else {
4447 idn_domain_t *mdp;
4448
4449 mdp = &idn_domain[masterid];
4450
4451 (void) mi_mpprintf(mp,
4452 " Master Domain (id:name/brds - state):");
4453
4454 if (strlen(mdp->dname) > 0)
4455 (void) strcpy(dbp, mdp->dname);
4456 else
4457 boardset2str(mdp->dhw.dh_boardset, dbp);
4458 if (masterid < 10)
4459 (void) mi_mpprintf(mp, " %d: %s - %s",
4460 masterid, dbp,
4461 idnds_str[mdp->dstate]);
4462 else
4463 (void) mi_mpprintf(mp, " %d: %s - %s",
4464 masterid, dbp,
4465 idnds_str[mdp->dstate]);
4466 }
4467 if (idn.ndomains <= 1) {
4468 (void) mi_mpprintf(mp, " Slave Domains: none");
4469 } else {
4470 int d;
4471
4472 (void) mi_mpprintf(mp,
4473 " Slave Domains (id:name/brds - state):");
4474 for (d = 0; d < MAX_DOMAINS; d++) {
4475 dp = &idn_domain[d];
4476
4477 if ((dp->dcpu == IDN_NIL_DCPU) || (d == masterid))
4478 continue;
4479
4480 if (strlen(dp->dname) > 0)
4481 (void) strcpy(dbp, dp->dname);
4482 else
4483 boardset2str(dp->dhw.dh_boardset, dbp);
4484 if (d < 10)
4485 (void) mi_mpprintf(mp, " %d: %s - %s",
4486 d, dbp,
4487 idnds_str[dp->dstate]);
4488 else
4489 (void) mi_mpprintf(mp, " %d: %s - %s",
4490 d, dbp,
4491 idnds_str[dp->dstate]);
4492 }
4493 }
4494
4495 if (idn.nawols == 0) {
4496 (void) mi_mpprintf(mp, " AWOL Domains: none");
4497 } else {
4498 int d;
4499
4500 (void) mi_mpprintf(mp, " AWOL Domains (id:name/brds):");
4501 for (d = 0; d < MAX_DOMAINS; d++) {
4502 dp = &idn_domain[d];
4503
4504 if (!DOMAIN_IN_SET(idn.domset.ds_awol, d) ||
4505 (dp->dcpu == IDN_NIL_DCPU))
4506 continue;
4507
4508 if (strlen(dp->dname) > 0)
4509 (void) strcpy(dbp, dp->dname);
4510 else
4511 boardset2str(dp->dhw.dh_boardset, dbp);
4512 if (d < 10)
4513 (void) mi_mpprintf(mp, " %d: %s",
4514 d, dbp);
4515 else
4516 (void) mi_mpprintf(mp, " %d: %s",
4517 d, dbp);
4518 }
4519 }
4520
4521 /*CONSTCOND*/
4522 i = IDN_SYNC_GETZONE(IDNSYNC_CONNECT);
4523 zp = &idn.sync.sz_zone[i];
4524 if (zp->sc_cnt == 0) {
4525 (void) mi_mpprintf(mp, " Sync Zone (con): [empty]");
4526 } else {
4527 (void) mi_mpprintf(mp, " Sync Zone (con): [%d domains]",
4528 zp->sc_cnt);
4529 sp = zp->sc_op;
4530 for (i = 0; (i < zp->sc_cnt) && sp; i++) {
4531 (void) mi_mpprintf(mp,
4532 " "
4533 "%x: x_set =%s0x%x, r_set =%s0x%x",
4534 sp->s_domid,
4535 HEXSPACE(sp->s_set_exp,
4536 sp->s_set_exp, 4, 1),
4537 sp->s_set_exp,
4538 HEXSPACE(sp->s_set_rdy,
4539 sp->s_set_rdy, 4, 1),
4540 sp->s_set_rdy);
4541 sp = sp->s_next;
4542 }
4543 }
4544 /*CONSTCOND*/
4545 i = IDN_SYNC_GETZONE(IDNSYNC_DISCONNECT);
4546 zp = &idn.sync.sz_zone[i];
4547 if (zp->sc_cnt == 0) {
4548 (void) mi_mpprintf(mp, " Sync Zone (dis): [empty]");
4549 } else {
4550 (void) mi_mpprintf(mp, " Sync Zone (dis): [%d domains]",
4551 zp->sc_cnt);
4552 sp = zp->sc_op;
4553 for (i = 0; (i < zp->sc_cnt) && sp; i++) {
4554 (void) mi_mpprintf(mp,
4555 " "
4556 "%x: x_set =%s0x%x, r_set =%s0x%x",
4557 sp->s_domid,
4558 HEXSPACE(sp->s_set_exp,
4559 sp->s_set_exp, 4, 1),
4560 sp->s_set_exp,
4561 HEXSPACE(sp->s_set_rdy,
4562 sp->s_set_rdy, 4, 1),
4563 sp->s_set_rdy);
4564 sp = sp->s_next;
4565 }
4566 }
4567
4568 IDN_GUNLOCK();
4569 IDN_SYNC_UNLOCK();
4570
4571 if (dbuffer) {
4572 FREE_DISPSTRING(dbuffer);
4573 }
4574
4575 return (0);
4576 }
4577
4578 /*ARGSUSED*/
4579 static int
idn_domain_report(queue_t * wq,mblk_t * mp,caddr_t cp,cred_t * cr)4580 idn_domain_report(queue_t *wq, mblk_t *mp, caddr_t cp, cred_t *cr)
4581 {
4582 int d, nchan;
4583 uint_t domset;
4584 idn_chanset_t chanset;
4585 idn_domain_t *dp;
4586 uint_t pset_upper, pset_lower;
4587 char *dbuffer, *dbp;
4588 char alt_dbuffer[64];
4589
4590
4591 if (IDN_SYNC_TRYLOCK() == 0) {
4592 (void) mi_mpprintf(mp, "Sync lock busy, try again.");
4593 return (0);
4594 }
4595
4596 if (IDN_GLOCK_TRY_SHARED() == 0) {
4597 (void) mi_mpprintf(mp, "Local domain busy, try again.");
4598 IDN_SYNC_UNLOCK();
4599 return (0);
4600 }
4601
4602 if ((dbp = dbuffer = ALLOC_DISPSTRING()) == NULL)
4603 dbp = alt_dbuffer;
4604
4605 if (cp == NULL)
4606 domset = DOMAINSET(idn.localid);
4607 else
4608 domset = DOMAINSET_ALL;
4609
4610 for (d = 0; d < MAX_DOMAINS; d++) {
4611
4612 if (DOMAIN_IN_SET(domset, d) == 0)
4613 continue;
4614
4615 dp = &idn_domain[d];
4616
4617 if (dp->dcpu == IDN_NIL_DCPU)
4618 continue;
4619
4620 if (IDN_DLOCK_TRY_SHARED(d) == 0) {
4621 if (d < 10)
4622 (void) mi_mpprintf(mp,
4623 "Domain %d (0x%p) busy...",
4624 d, (void *)dp);
4625 else
4626 (void) mi_mpprintf(mp,
4627 "Domain %d (0x%p) busy...",
4628 d, (void *)dp);
4629 continue;
4630 }
4631 if (dp->dcpu == IDN_NIL_DCPU) {
4632 IDN_DUNLOCK(d);
4633 continue;
4634 }
4635 if (d < 10)
4636 (void) mi_mpprintf(mp, "%sDomain %d (0x%p)",
4637 (d && (idn.ndomains > 1)) ? "\n" : "",
4638 d, (void *)dp);
4639 else
4640 (void) mi_mpprintf(mp, "%sDomain %d (0x%p)",
4641 (d && (idn.ndomains > 1)) ? "\n" : "",
4642 d, (void *)dp);
4643
4644 if (d == idn.localid)
4645 (void) mi_mpprintf(mp, " (local) State = %s (%d)",
4646 idnds_str[dp->dstate], dp->dstate);
4647 else
4648 (void) mi_mpprintf(mp, " State = %s (%d)",
4649 idnds_str[dp->dstate], dp->dstate);
4650 (void) mi_mpprintf(mp, " Name = %s, Netid = %d",
4651 (strlen(dp->dname) > 0) ? dp->dname : "<>",
4652 (int)dp->dnetid);
4653
4654 CHANSET_ZERO(chanset);
4655 nchan = idn_domain_is_registered(d, -1, &chanset);
4656 if (dbuffer)
4657 mask2str(chanset, dbp, 32);
4658 else
4659 (void) sprintf(dbp, "0x%x", chanset);
4660 (void) mi_mpprintf(mp, " Nchans = %d, Chanset = %s",
4661 nchan, nchan ? dbp : "<>");
4662 pset_upper = UPPER32_CPUMASK(dp->dcpuset);
4663 pset_lower = LOWER32_CPUMASK(dp->dcpuset);
4664 if (dbuffer)
4665 boardset2str(dp->dhw.dh_boardset, dbp);
4666 else
4667 (void) sprintf(dbp, "0x%x", dp->dhw.dh_boardset);
4668
4669 (void) mi_mpprintf(mp, " Nboards = %d, Brdset = %s",
4670 dp->dhw.dh_nboards,
4671 dp->dhw.dh_nboards ? dbp : "<>");
4672 (void) sprintf(dbp, "0x%x.%x", pset_upper, pset_lower);
4673 (void) mi_mpprintf(mp, " Ncpus = %d, Cpuset = %s",
4674 dp->dncpus, dp->dncpus ? dbp : "<>");
4675 (void) mi_mpprintf(mp, " Nmcadr = %d",
4676 dp->dhw.dh_nmcadr);
4677 (void) mi_mpprintf(mp,
4678 " MsgTimer = %s (cnt = %d)",
4679 (dp->dtimerq.tq_count > 0)
4680 ? "active" : "idle",
4681 dp->dtimerq.tq_count);
4682 (void) mi_mpprintf(mp, " Dcpu = %d "
4683 "(lastcpu = %d, cpuindex = %d)",
4684 dp->dcpu, dp->dcpu_last, dp->dcpuindex);
4685 (void) mi_mpprintf(mp, " Dio = %d "
4686 "(ioerr = %d, iochk = %d, iowanted = %d)",
4687 dp->dio, dp->dioerr, dp->diocheck ? 1 : 0,
4688 dp->diowanted ? 1 : 0);
4689 if (dp->dsync.s_cmd == IDNSYNC_NIL) {
4690 (void) mi_mpprintf(mp, " Dsync = %s",
4691 idnsync_str[IDNSYNC_NIL]);
4692 } else {
4693 (void) mi_mpprintf(mp,
4694 " Dsync = %s "
4695 "(x_set = 0x%x, r_set = 0x%x)",
4696 idnsync_str[dp->dsync.s_cmd],
4697 (uint_t)dp->dsync.s_set_exp,
4698 (uint_t)dp->dsync.s_set_rdy);
4699 }
4700 (void) mi_mpprintf(mp, " Dvote = 0x%x",
4701 dp->dvote.ticket);
4702 (void) mi_mpprintf(mp, " Dfin = %s (Sync = %s)",
4703 idnfin_str[dp->dfin],
4704 (dp->dfin_sync == IDNFIN_SYNC_OFF) ? "OFF" :
4705 (dp->dfin_sync == IDNFIN_SYNC_YES) ? "YES" :
4706 "NO");
4707 (void) mi_mpprintf(mp, " Dcookie_err = %s (cnt = %d)",
4708 dp->dcookie_err ? "YES" : "NO",
4709 dp->dcookie_errcnt);
4710 IDN_DUNLOCK(d);
4711 }
4712
4713 IDN_GUNLOCK();
4714
4715 if (dbuffer) {
4716 FREE_DISPSTRING(dbuffer);
4717 }
4718
4719 IDN_SYNC_UNLOCK();
4720
4721 return (0);
4722 }
4723
4724 #define SNOOP_ENTRIES 2048 /* power of 2 */
4725
4726 struct snoop_buffer {
4727 /* 0 */ char io;
4728 /* 1 */ char board;
4729 /* 2 */ char trans[14];
4730
4731 /* 10 */ uint_t xargs[4];
4732 } *snoop_data, snoop_buffer[SNOOP_ENTRIES+1];
4733
4734
4735 int snoop_index;
4736 kmutex_t snoop_mutex;
4737 static char _bd2hexascii[] = {
4738 '0', '1', '2', '3', '4', '5', '6', '7',
4739 '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
4740 };
4741
4742 #define SNOOP_IDN(in, tr, bd, arg1, arg2, arg3, arg4) \
4743 { \
4744 if (idn_snoop) { \
4745 mutex_enter(&snoop_mutex); \
4746 if (snoop_data == NULL) { \
4747 snoop_data = (struct snoop_buffer *) \
4748 (((uint_t)(uintptr_t)snoop_buffer + 0xf) & \
4749 ~0xf); \
4750 } \
4751 snoop_data[snoop_index].io = ((in) == 0) ? 'o' : 'i'; \
4752 snoop_data[snoop_index].board = \
4753 ((bd) == -1) ? 'X' : _bd2hexascii[bd]; \
4754 (void) strncpy(snoop_data[snoop_index].trans, (tr), 14); \
4755 snoop_data[snoop_index].xargs[0] = (arg1); \
4756 snoop_data[snoop_index].xargs[1] = (arg2); \
4757 snoop_data[snoop_index].xargs[2] = (arg3); \
4758 snoop_data[snoop_index].xargs[3] = (arg4); \
4759 snoop_index++; \
4760 snoop_index &= SNOOP_ENTRIES - 1; \
4761 mutex_exit(&snoop_mutex); \
4762 } \
4763 }
4764
4765 /*
4766 * Allocate the circular buffers to be used for
4767 * DMV interrupt processing.
4768 */
4769 static int
idn_init_handler()4770 idn_init_handler()
4771 {
4772 int i, c;
4773 size_t len;
4774 idn_dmv_msg_t *basep, *ivp;
4775 uint32_t ivp_offset;
4776 procname_t proc = "idn_init_handler";
4777
4778 if (idn.intr.dmv_data != NULL) {
4779 cmn_err(CE_WARN,
4780 "IDN: 130: IDN DMV handler already initialized");
4781 return (-1);
4782 }
4783
4784 /*
4785 * This memory will be touched by the low-level
4786 * DMV trap handler for IDN.
4787 */
4788 len = sizeof (idn_dmv_data_t);
4789 len = roundup(len, sizeof (uint64_t));
4790 len += NCPU * idn_dmv_pending_max * sizeof (idn_dmv_msg_t);
4791 len = roundup(len, PAGESIZE);
4792
4793 PR_PROTO("%s: sizeof (idn_dmv_data_t) = %lu\n",
4794 proc, sizeof (idn_dmv_data_t));
4795 PR_PROTO("%s: allocating %lu bytes for dmv data area\n", proc, len);
4796
4797 idn.intr.dmv_data_len = len;
4798 idn.intr.dmv_data = kmem_zalloc(len, KM_SLEEP);
4799
4800 PR_PROTO("%s: DMV data area = %p\n", proc, (void *)idn.intr.dmv_data);
4801
4802 idn_dmv_data = (idn_dmv_data_t *)idn.intr.dmv_data;
4803 basep = (idn_dmv_msg_t *)roundup((size_t)idn.intr.dmv_data +
4804 sizeof (idn_dmv_data_t),
4805 sizeof (uint64_t));
4806 idn_dmv_data->idn_dmv_qbase = (uint64_t)basep;
4807
4808 ivp = basep;
4809 ivp_offset = 0;
4810 /*
4811 * The buffer queues are allocated per-cpu.
4812 */
4813 for (c = 0, ivp = basep; c < NCPU; ivp++, c++) {
4814 idn_dmv_data->idn_dmv_cpu[c].idn_dmv_current = ivp_offset;
4815 idn_iv_queue[c] = ivp;
4816 ivp_offset += sizeof (idn_dmv_msg_t);
4817 for (i = 1; i < idn_dmv_pending_max; ivp++, i++) {
4818 ivp->iv_next = ivp_offset;
4819 ivp->iv_ready = 0;
4820 lock_set(&ivp->iv_ready);
4821 ivp_offset += sizeof (idn_dmv_msg_t);
4822 }
4823 ivp->iv_next = idn_dmv_data->idn_dmv_cpu[c].idn_dmv_current;
4824 ivp->iv_ready = 0;
4825 lock_set(&ivp->iv_ready);
4826 }
4827
4828 idn.intr.dmv_inum = STARFIRE_DMV_IDN_BASE;
4829 idn.intr.soft_inum = add_softintr((uint_t)idn_pil, idn_handler, 0,
4830 SOFTINT_ST);
4831 idn_dmv_data->idn_soft_inum = idn.intr.soft_inum;
4832 /*
4833 * Make sure everything is out there before
4834 * we effectively set it free for use.
4835 */
4836 membar_stld_stst();
4837
4838 if (dmv_add_intr(idn.intr.dmv_inum, idn_dmv_handler,
4839 (caddr_t)idn_dmv_data)) {
4840 idn_deinit_handler();
4841 cmn_err(CE_WARN, "IDN: 132: failed to add IDN DMV handler");
4842 return (-1);
4843 }
4844
4845 return (0);
4846 }
4847
4848 static void
idn_deinit_handler()4849 idn_deinit_handler()
4850 {
4851 if (idn.intr.dmv_data == NULL)
4852 return;
4853
4854 (void) dmv_rem_intr(idn.intr.dmv_inum);
4855 (void) rem_softintr(idn.intr.soft_inum);
4856 kmem_free(idn.intr.dmv_data, idn.intr.dmv_data_len);
4857 idn.intr.dmv_data = NULL;
4858 }
4859
4860 /*
4861 * High-level (soft interrupt) handler for DMV interrupts
4862 */
4863 /*ARGSUSED0*/
4864 static uint_t
idn_handler(caddr_t unused,caddr_t unused2)4865 idn_handler(caddr_t unused, caddr_t unused2)
4866 {
4867 #ifdef DEBUG
4868 int count = 0;
4869 #endif /* DEBUG */
4870 int cpuid = (int)CPU->cpu_id;
4871 ushort_t mtype, atype;
4872 idn_dmv_msg_t *xp, *xplimit;
4873 procname_t proc = "idn_handler";
4874
4875 ASSERT(getpil() >= idn_pil);
4876 flush_windows();
4877
4878 /*
4879 * Clear the synchronization flag to indicate that
4880 * processing has started. As long as idn_dmv_active
4881 * is non-zero, idn_dmv_handler will queue work without
4882 * initiating a soft interrupt. Since we clear it
4883 * first thing at most one pil-interrupt for IDN will
4884 * queue up behind the currently active one. We don't
4885 * want to clear this flag at the end because it leaves
4886 * a window where an interrupt could get lost (unless it's
4887 * pushed by a subsequent interrupt). The objective in
4888 * doing this is to prevent exhausting a cpu's intr_vec
4889 * structures with interrupts of the same pil level.
4890 */
4891 lock_clear(&idn_dmv_data->idn_dmv_cpu[cpuid].idn_dmv_active);
4892
4893 xp = idn_iv_queue[cpuid];
4894 xplimit = xp + idn_dmv_pending_max;
4895 xp += idn_intr_index[cpuid];
4896 /*
4897 * As long as there's stuff that's READY in the
4898 * queue, keep processing.
4899 */
4900 while (lock_try(&xp->iv_ready)) {
4901
4902 ASSERT(lock_try(&xp->iv_inuse) == 0);
4903
4904 mtype = (ushort_t)xp->iv_mtype;
4905 mtype &= IDNP_MSGTYPE_MASK | IDNP_ACKNACK_MASK;
4906 atype = (ushort_t)xp->iv_atype;
4907
4908 if (((int)xp->iv_ver == idn.version) && mtype) {
4909 idn_protojob_t *jp;
4910 #ifdef DEBUG
4911 STRING(mstr);
4912 STRING(astr);
4913
4914 INUM2STR(mtype, mstr);
4915 if ((mtype & IDNP_MSGTYPE_MASK) == 0) {
4916 INUM2STR(atype, astr);
4917 (void) strcat(mstr, "/");
4918 (void) strcat(mstr, astr);
4919 }
4920
4921 count++;
4922
4923 PR_XDC("%s:%d:%d RECV: scpu = %d, msg = 0x%x(%s)\n",
4924 proc, (int)xp->iv_domid, count,
4925 (int)xp->iv_cpuid, mtype, mstr);
4926 PR_XDC("%s:%d:%d R-DATA: a0 = 0x%x, a1 = 0x%x\n",
4927 proc, (int)xp->iv_domid, count,
4928 xp->iv_xargs0, xp->iv_xargs1);
4929 PR_XDC("%s:%d:%d R-DATA: a2 = 0x%x, a3 = 0x%x\n",
4930 proc, (int)xp->iv_domid, count,
4931 xp->iv_xargs2, xp->iv_xargs3);
4932 #endif /* DEBUG */
4933
4934 if (mtype == IDNP_DATA) {
4935 jp = NULL;
4936 /*
4937 * The only time we receive pure
4938 * data messages at this level is
4939 * to wake up the channel server.
4940 * Since this is often an urgent
4941 * request we'll do it from here
4942 * instead of waiting for a proto
4943 * server to do it.
4944 */
4945 idn_signal_data_server((int)xp->iv_domid,
4946 (ushort_t)xp->iv_xargs0);
4947 } else {
4948 jp = idn_protojob_alloc(KM_NOSLEEP);
4949 /*
4950 * If the allocation fails, just drop
4951 * the message and get on with life.
4952 * If memory pressure is this great then
4953 * dropping this message is probably
4954 * the least of our worries!
4955 */
4956 if (jp) {
4957 jp->j_msg.m_domid = (int)xp->iv_domid;
4958 jp->j_msg.m_cpuid = (int)xp->iv_cpuid;
4959 jp->j_msg.m_msgtype = mtype;
4960 jp->j_msg.m_acktype = atype;
4961 jp->j_msg.m_cookie = xp->iv_cookie;
4962 SET_XARGS(jp->j_msg.m_xargs,
4963 xp->iv_xargs0, xp->iv_xargs1,
4964 xp->iv_xargs2, xp->iv_xargs3);
4965 }
4966
4967 }
4968 membar_ldst_stst();
4969
4970 lock_clear(&xp->iv_inuse);
4971
4972 if (jp)
4973 idn_protojob_submit(jp->j_msg.m_domid, jp);
4974 } else {
4975 membar_ldst_stst();
4976 IDN_GKSTAT_INC(gk_dropped_intrs);
4977 lock_clear(&xp->iv_inuse);
4978 }
4979
4980 if (++xp == xplimit)
4981 xp = idn_iv_queue[cpuid];
4982 }
4983
4984 idn_intr_index[cpuid] = xp - idn_iv_queue[cpuid];
4985
4986 return (DDI_INTR_CLAIMED);
4987 }
4988
4989 void
idn_awol_event_set(boardset_t boardset)4990 idn_awol_event_set(boardset_t boardset)
4991 {
4992 idnsb_event_t *sbp;
4993 procname_t proc = "idn_awol_event_set";
4994
4995 ASSERT(IDN_GLOCK_IS_EXCL());
4996
4997 mutex_enter(&idn.idnsb_mutex);
4998 sbp = idn.idnsb_eventp;
4999 if (sbp == NULL) {
5000 cmn_err(CE_WARN, "IDN: 133: sigblock event area missing");
5001 cmn_err(CE_CONT,
5002 "IDN: 134: unable to mark boardset (0x%x) AWOL\n",
5003 boardset);
5004 mutex_exit(&idn.idnsb_mutex);
5005 return;
5006 }
5007
5008 if (boardset == 0) {
5009 PR_PROTO("%s: AWOL BOARDSET is 0, NO EVENT <<<<<<<<<<<<<<<\n",
5010 proc);
5011 mutex_exit(&idn.idnsb_mutex);
5012 return;
5013 } else {
5014 PR_PROTO("%s: MARKING BOARDSET (0x%x) AWOL\n", proc, boardset);
5015 }
5016 SSIEVENT_ADD(sbp, SSIEVENT_AWOL, boardset);
5017 mutex_exit(&idn.idnsb_mutex);
5018 }
5019
5020 void
idn_awol_event_clear(boardset_t boardset)5021 idn_awol_event_clear(boardset_t boardset)
5022 {
5023 idnsb_event_t *sbp;
5024 procname_t proc = "idn_awol_event_clear";
5025
5026 ASSERT(IDN_GLOCK_IS_EXCL());
5027
5028 mutex_enter(&idn.idnsb_mutex);
5029 sbp = idn.idnsb_eventp;
5030 if (sbp == NULL) {
5031 cmn_err(CE_WARN, "IDN: 133: sigblock event area missing");
5032 cmn_err(CE_CONT,
5033 "IDN: 134: unable to mark boardset (0x%x) AWOL\n",
5034 boardset);
5035 mutex_exit(&idn.idnsb_mutex);
5036 return;
5037 }
5038
5039 if (boardset == 0) {
5040 PR_PROTO("%s: AWOL BOARDSET is 0, NO EVENT <<<<<<<<<<<<<<<\n",
5041 proc);
5042 mutex_exit(&idn.idnsb_mutex);
5043 return;
5044 } else {
5045 PR_PROTO("%s: CLEARING BOARDSET (0x%x) AWOL\n", proc, boardset);
5046 }
5047 SSIEVENT_DEL(sbp, SSIEVENT_AWOL, boardset);
5048 mutex_exit(&idn.idnsb_mutex);
5049 }
5050
5051 static void
idn_gkstat_init()5052 idn_gkstat_init()
5053 {
5054 struct kstat *ksp;
5055 struct idn_gkstat_named *sgkp;
5056
5057 #ifdef kstat
5058 if ((ksp = kstat_create(IDNNAME, ddi_get_instance(idn.dip),
5059 IDNNAME, "net", KSTAT_TYPE_NAMED,
5060 sizeof (struct idn_gkstat_named) / sizeof (kstat_named_t),
5061 KSTAT_FLAG_PERSISTENT)) == NULL) {
5062 #else
5063 if ((ksp = kstat_create(IDNNAME, ddi_get_instance(idn.dip),
5064 IDNNAME, "net", KSTAT_TYPE_NAMED,
5065 sizeof (struct idn_gkstat_named) /
5066 sizeof (kstat_named_t), 0)) == NULL) {
5067 #endif /* kstat */
5068 cmn_err(CE_CONT, "IDN: 135: %s: %s\n",
5069 IDNNAME, "kstat_create failed");
5070 return;
5071 }
5072
5073 idn.ksp = ksp;
5074 sgkp = (struct idn_gkstat_named *)(ksp->ks_data);
5075 kstat_named_init(&sgkp->sk_curtime, "curtime",
5076 KSTAT_DATA_ULONG);
5077 kstat_named_init(&sgkp->sk_reconfigs, "reconfigs",
5078 KSTAT_DATA_ULONG);
5079 kstat_named_init(&sgkp->sk_reconfig_last, "reconfig_last",
5080 KSTAT_DATA_ULONG);
5081 kstat_named_init(&sgkp->sk_reaps, "reaps",
5082 KSTAT_DATA_ULONG);
5083 kstat_named_init(&sgkp->sk_reap_last, "reap_last",
5084 KSTAT_DATA_ULONG);
5085 kstat_named_init(&sgkp->sk_links, "links",
5086 KSTAT_DATA_ULONG);
5087 kstat_named_init(&sgkp->sk_link_last, "link_last",
5088 KSTAT_DATA_ULONG);
5089 kstat_named_init(&sgkp->sk_unlinks, "unlinks",
5090 KSTAT_DATA_ULONG);
5091 kstat_named_init(&sgkp->sk_unlink_last, "unlink_last",
5092 KSTAT_DATA_ULONG);
5093 kstat_named_init(&sgkp->sk_buffail, "buf_fail",
5094 KSTAT_DATA_ULONG);
5095 kstat_named_init(&sgkp->sk_buffail_last, "buf_fail_last",
5096 KSTAT_DATA_ULONG);
5097 kstat_named_init(&sgkp->sk_slabfail, "slab_fail",
5098 KSTAT_DATA_ULONG);
5099 kstat_named_init(&sgkp->sk_slabfail_last, "slab_fail_last",
5100 KSTAT_DATA_ULONG);
5101 kstat_named_init(&sgkp->sk_slabfail_last, "slab_fail_last",
5102 KSTAT_DATA_ULONG);
5103 kstat_named_init(&sgkp->sk_reap_count, "reap_count",
5104 KSTAT_DATA_ULONG);
5105 kstat_named_init(&sgkp->sk_dropped_intrs, "dropped_intrs",
5106 KSTAT_DATA_ULONG);
5107 ksp->ks_update = idn_gkstat_update;
5108 ksp->ks_private = (void *)NULL;
5109 kstat_install(ksp);
5110 }
5111
5112 static void
5113 idn_gkstat_deinit()
5114 {
5115 if (idn.ksp)
5116 kstat_delete(idn.ksp);
5117 idn.ksp = NULL;
5118 }
5119
5120 static int
5121 idn_gkstat_update(kstat_t *ksp, int rw)
5122 {
5123 struct idn_gkstat_named *sgkp;
5124
5125 sgkp = (struct idn_gkstat_named *)ksp->ks_data;
5126
5127 if (rw == KSTAT_WRITE) {
5128 sg_kstat.gk_reconfigs = sgkp->sk_reconfigs.value.ul;
5129 sg_kstat.gk_reconfig_last = sgkp->sk_reconfig_last.value.ul;
5130 sg_kstat.gk_reaps = sgkp->sk_reaps.value.ul;
5131 sg_kstat.gk_reap_last = sgkp->sk_reap_last.value.ul;
5132 sg_kstat.gk_links = sgkp->sk_links.value.ul;
5133 sg_kstat.gk_link_last = sgkp->sk_link_last.value.ul;
5134 sg_kstat.gk_unlinks = sgkp->sk_unlinks.value.ul;
5135 sg_kstat.gk_unlink_last = sgkp->sk_unlink_last.value.ul;
5136 sg_kstat.gk_buffail = sgkp->sk_buffail.value.ul;
5137 sg_kstat.gk_buffail_last = sgkp->sk_buffail_last.value.ul;
5138 sg_kstat.gk_slabfail = sgkp->sk_slabfail.value.ul;
5139 sg_kstat.gk_slabfail_last = sgkp->sk_slabfail_last.value.ul;
5140 sg_kstat.gk_reap_count = sgkp->sk_reap_count.value.ul;
5141 sg_kstat.gk_dropped_intrs = sgkp->sk_dropped_intrs.value.ul;
5142 } else {
5143 sgkp->sk_curtime.value.ul = ddi_get_lbolt();
5144 sgkp->sk_reconfigs.value.ul = sg_kstat.gk_reconfigs;
5145 sgkp->sk_reconfig_last.value.ul = sg_kstat.gk_reconfig_last;
5146 sgkp->sk_reaps.value.ul = sg_kstat.gk_reaps;
5147 sgkp->sk_reap_last.value.ul = sg_kstat.gk_reap_last;
5148 sgkp->sk_links.value.ul = sg_kstat.gk_links;
5149 sgkp->sk_link_last.value.ul = sg_kstat.gk_link_last;
5150 sgkp->sk_unlinks.value.ul = sg_kstat.gk_unlinks;
5151 sgkp->sk_unlink_last.value.ul = sg_kstat.gk_unlink_last;
5152 sgkp->sk_buffail.value.ul = sg_kstat.gk_buffail;
5153 sgkp->sk_buffail_last.value.ul = sg_kstat.gk_buffail_last;
5154 sgkp->sk_slabfail.value.ul = sg_kstat.gk_slabfail;
5155 sgkp->sk_slabfail_last.value.ul = sg_kstat.gk_slabfail_last;
5156 sgkp->sk_reap_count.value.ul = sg_kstat.gk_reap_count;
5157 sgkp->sk_dropped_intrs.value.ul = sg_kstat.gk_dropped_intrs;
5158 }
5159
5160 return (0);
5161 }
5162
5163 #ifdef DEBUG
5164 #define RW_HISTORY 100
5165 static uint_t rw_history[NCPU][RW_HISTORY];
5166 static int rw_index[NCPU];
5167 #endif /* DEBUG */
5168
5169 static int
5170 idn_rw_mem(idnop_t *idnop)
5171 {
5172 uint_t lo_off, hi_off;
5173 int rw, blksize, num;
5174 int cpuid;
5175 register int n, idx;
5176 char *ibuf, *obuf;
5177 char *smraddr;
5178 struct seg *segp;
5179 ulong_t randx;
5180 kmutex_t slock;
5181 kcondvar_t scv;
5182 static int orig_gstate = IDNGS_IGNORE;
5183 extern struct seg ktextseg;
5184
5185 #define RANDOM_INIT() (randx = ddi_get_lbolt())
5186 #define RANDOM(a, b) \
5187 (((a) >= (b)) ? \
5188 (a) : (((randx = randx * 1103515245L + 12345) % ((b)-(a))) + (a)))
5189
5190 RANDOM_INIT();
5191
5192 lo_off = idnop->rwmem.lo_off;
5193 hi_off = idnop->rwmem.hi_off;
5194 blksize = idnop->rwmem.blksize;
5195 num = idnop->rwmem.num;
5196 rw = idnop->rwmem.rw; /* 0 = rd, 1 = wr, 2 = rd/wr */
5197
5198 if (((hi_off > (uint_t)MB2B(IDN_SMR_SIZE)) || (lo_off >= hi_off) ||
5199 (blksize <= 0) || (blksize > (hi_off - lo_off)) || (num <= 0)) &&
5200 (idnop->rwmem.goawol == -1)) {
5201 return (EINVAL);
5202 }
5203
5204 if (idnop->rwmem.goawol && (orig_gstate == IDNGS_IGNORE)) {
5205 IDN_GLOCK_EXCL();
5206 cmn_err(CE_WARN, "IDN: Local domain going into IGNORE MODE!!");
5207 orig_gstate = idn.state;
5208 IDN_GSTATE_TRANSITION(IDNGS_IGNORE);
5209 IDN_GUNLOCK();
5210
5211 } else if (!idnop->rwmem.goawol && (orig_gstate != IDNGS_IGNORE)) {
5212 IDN_GLOCK_EXCL();
5213 cmn_err(CE_WARN,
5214 "IDN: Local domain restoring original state %s(%d)",
5215 idngs_str[orig_gstate], (int)orig_gstate);
5216 IDN_GSTATE_TRANSITION(orig_gstate);
5217 orig_gstate = IDNGS_IGNORE;
5218 IDN_GUNLOCK();
5219 }
5220 /*
5221 * Just requested AWOL.
5222 */
5223 if (num == 0)
5224 return (0);
5225 /*
5226 * Default READ only.
5227 */
5228 ibuf = (char *)kmem_alloc(blksize, KM_SLEEP);
5229 if (rw == 1) {
5230 /*
5231 * WRITE only.
5232 */
5233 obuf = ibuf;
5234 ibuf = NULL;
5235 } else if (rw == 2) {
5236 /*
5237 * READ/WRITE.
5238 */
5239 obuf = (char *)kmem_alloc(blksize, KM_SLEEP);
5240 for (segp = &ktextseg; segp; segp = AS_SEGNEXT(&kas, segp)) {
5241 if (segp->s_size >= blksize)
5242 break;
5243 }
5244 if (segp == NULL) {
5245 cmn_err(CE_WARN,
5246 "IDN: blksize (%d) too large", blksize);
5247 return (EINVAL);
5248 }
5249 bcopy(segp->s_base, obuf, blksize);
5250 }
5251
5252 mutex_init(&slock, NULL, MUTEX_DEFAULT, NULL);
5253 cv_init(&scv, NULL, CV_DEFAULT, NULL);
5254
5255 cmn_err(CE_NOTE,
5256 "IDN: starting %s of %d blocks of %d bytes each...",
5257 (rw == 1) ? "W-ONLY" : (rw == 2) ? "RW" : "R-ONLY",
5258 num, blksize);
5259
5260 for (n = 0; n < num; n++) {
5261 uint_t rpos;
5262
5263 if ((hi_off - lo_off) > blksize)
5264 rpos = RANDOM(lo_off, (hi_off - blksize));
5265 else
5266 rpos = lo_off;
5267 smraddr = IDN_OFFSET2ADDR(rpos);
5268
5269 cpuid = (int)CPU->cpu_id;
5270 #ifdef DEBUG
5271 idx = rw_index[cpuid]++ % RW_HISTORY;
5272 rw_history[cpuid][idx] = rpos;
5273 #endif /* DEBUG */
5274
5275 switch (rw) {
5276 case 0:
5277 bcopy(smraddr, ibuf, blksize);
5278 break;
5279 case 1:
5280 bcopy(obuf, smraddr, blksize);
5281 break;
5282 case 2:
5283 if (n & 1)
5284 bcopy(obuf, smraddr, blksize);
5285 else
5286 bcopy(smraddr, ibuf, blksize);
5287 break;
5288 default:
5289 break;
5290 }
5291 if (!(n % 1000)) {
5292 int rv;
5293
5294 mutex_enter(&slock);
5295 rv = cv_reltimedwait_sig(&scv, &slock, hz,
5296 TR_CLOCK_TICK);
5297 mutex_exit(&slock);
5298 if (rv == 0)
5299 break;
5300 }
5301 }
5302
5303 cv_destroy(&scv);
5304 mutex_destroy(&slock);
5305
5306 if (ibuf)
5307 kmem_free(ibuf, blksize);
5308 if (obuf)
5309 kmem_free(obuf, blksize);
5310
5311 return (0);
5312 }
5313
5314 void
5315 inum2str(uint_t inum, char str[])
5316 {
5317 uint_t acknack;
5318
5319 str[0] = '\0';
5320
5321 acknack = (inum & IDNP_ACKNACK_MASK);
5322 inum &= ~IDNP_ACKNACK_MASK;
5323
5324 if (!inum && !acknack) {
5325 (void) strcpy(str, idnm_str[0]);
5326 return;
5327 }
5328
5329 if (inum == 0) {
5330 (void) strcpy(str, (acknack & IDNP_ACK) ? "ack" : "nack");
5331 } else {
5332 if (inum < IDN_NUM_MSGTYPES)
5333 (void) strcpy(str, idnm_str[inum]);
5334 else
5335 (void) sprintf(str, "0x%x?", inum);
5336 if (acknack) {
5337 if (acknack & IDNP_ACK)
5338 (void) strcat(str, "+ack");
5339 else
5340 (void) strcat(str, "+nack");
5341 }
5342 }
5343 }
5344
5345 boardset_t
5346 cpuset2boardset(cpuset_t portset)
5347 {
5348 register int c;
5349 register boardset_t bset;
5350
5351 bset = 0;
5352 for (c = 0; c < NCPU; )
5353 if (CPU_IN_SET(portset, c)) {
5354 BOARDSET_ADD(bset, CPUID_TO_BOARDID(c));
5355 c = (c + 4) & ~3;
5356 } else {
5357 c++;
5358 }
5359
5360 return (bset);
5361 }
5362
5363 void
5364 cpuset2str(cpuset_t cset, char buffer[])
5365 {
5366 register int c, n;
5367
5368 buffer[0] = '\0';
5369 for (c = n = 0; c < NCPU; c++) {
5370 if (!CPU_IN_SET(cset, c))
5371 continue;
5372 #ifdef DEBUG
5373 if (strlen(buffer) >= _DSTRLEN) {
5374 PR_PROTO("************* WARNING WARNING WARNING\n");
5375 PR_PROTO("cpuset2str(cpu = %d) buffer "
5376 "OVERFLOW <<<<<<\n", c);
5377 PR_PROTO("*******************************\n");
5378 (void) sprintf(&buffer[_DSTRLEN-6], "*OVER");
5379 return;
5380 }
5381 #endif /* DEBUG */
5382 if (n == 0)
5383 (void) sprintf(buffer, "%d", c);
5384 else
5385 (void) sprintf(buffer, "%s, %d", buffer, c);
5386 n++;
5387 }
5388 }
5389
5390 void
5391 domainset2str(domainset_t dset, char buffer[])
5392 {
5393 /*
5394 * Since domainset_t and boardset_t are the
5395 * same (max = MAX_DOMAINS = MAX_BOARDS) we
5396 * can just overload boardset2str().
5397 */
5398 mask2str((uint_t)dset, buffer, MAX_DOMAINS);
5399 }
5400
5401 void
5402 boardset2str(boardset_t bset, char buffer[])
5403 {
5404 mask2str((uint_t)bset, buffer, MAX_BOARDS);
5405 }
5406
5407 void
5408 mask2str(uint_t mask, char buffer[], int maxnum)
5409 {
5410 int n, i;
5411
5412 buffer[0] = '\0';
5413 for (i = n = 0; i < maxnum; i++) {
5414 if ((mask & (1 << i)) == 0)
5415 continue;
5416 if (n == 0)
5417 (void) sprintf(buffer, "%d", i);
5418 else
5419 (void) sprintf(buffer, "%s, %d", buffer, i);
5420 n++;
5421 }
5422 }
5423
5424 int
5425 idnxdc(int domid, idn_msgtype_t *mtp,
5426 uint_t arg1, uint_t arg2,
5427 uint_t arg3, uint_t arg4)
5428 {
5429 int rv, cpuid, tcpuid;
5430 uint_t cookie;
5431 uint64_t pdata;
5432 uint64_t dmv_word0, dmv_word1, dmv_word2;
5433 idn_domain_t *dp = &idn_domain[domid];
5434 extern kmutex_t xc_sys_mutex;
5435 extern int xc_spl_enter[];
5436 procname_t proc = "idnxdc";
5437
5438
5439 if (idn_snoop) {
5440 int bd;
5441 STRING(str);
5442 STRING(mstr);
5443 STRING(astr);
5444
5445 INUM2STR(mtp->mt_mtype, mstr);
5446 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == 0) {
5447 INUM2STR(arg1, astr);
5448 (void) sprintf(str, "%s/%s", mstr, astr);
5449 } else {
5450 (void) strcpy(str, mstr);
5451 }
5452 if (dp->dcpu == IDN_NIL_DCPU)
5453 bd = -1;
5454 else
5455 bd = CPUID_TO_BOARDID(dp->dcpu);
5456 SNOOP_IDN(0, str, bd, arg1, arg2, arg3, arg4);
5457 }
5458
5459 /*
5460 * For NEGO messages we send the remote domain the cookie we
5461 * expect it to use in subsequent messages that it sends
5462 * to us (dcookie_recv).
5463 * For other messages, we must use the cookie that the
5464 * remote domain assigned to us for sending (dcookie_send).
5465 */
5466 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == IDNP_NEGO)
5467 cookie = IDN_MAKE_COOKIE(dp->dcookie_recv, mtp->mt_cookie);
5468 else
5469 cookie = IDN_MAKE_COOKIE(dp->dcookie_send, mtp->mt_cookie);
5470
5471 pdata = IDN_MAKE_PDATA(mtp->mt_mtype, mtp->mt_atype, cookie);
5472
5473 dmv_word0 = DMV_MAKE_DMV(idn.intr.dmv_inum, pdata);
5474 dmv_word1 = ((uint64_t)arg1 << 32) | (uint64_t)arg2;
5475 dmv_word2 = ((uint64_t)arg3 << 32) | (uint64_t)arg4;
5476
5477 ASSERT((dp->dcpu != IDN_NIL_DCPU) ||
5478 (dp->dcpu_last != IDN_NIL_DCPU));
5479
5480 tcpuid = (dp->dcpu == IDN_NIL_DCPU) ?
5481 dp->dcpu_last : dp->dcpu;
5482
5483 if (tcpuid == IDN_NIL_DCPU) {
5484 PR_PROTO("%s:%d: cpu/cpu_last == NIL_DCPU\n",
5485 proc, domid);
5486 return (-1);
5487 }
5488
5489 mutex_enter(&xc_sys_mutex);
5490 cpuid = (int)CPU->cpu_id;
5491 xc_spl_enter[cpuid] = 1;
5492
5493 idnxf_init_mondo(dmv_word0, dmv_word1, dmv_word2);
5494
5495 rv = idnxf_send_mondo(STARFIRE_UPAID2HWMID(tcpuid));
5496
5497 xc_spl_enter[cpuid] = 0;
5498 mutex_exit(&xc_sys_mutex);
5499
5500 return (rv);
5501 }
5502
5503 void
5504 idnxdc_broadcast(domainset_t domset, idn_msgtype_t *mtp,
5505 uint_t arg1, uint_t arg2,
5506 uint_t arg3, uint_t arg4)
5507 {
5508 int d;
5509
5510 for (d = 0; d < MAX_DOMAINS; d++) {
5511 idn_domain_t *dp;
5512
5513 if (!DOMAIN_IN_SET(domset, d))
5514 continue;
5515
5516 dp = &idn_domain[d];
5517 if (dp->dcpu == IDN_NIL_DCPU)
5518 continue;
5519
5520 (void) IDNXDC(d, mtp, arg1, arg2, arg3, arg4);
5521 }
5522 }
5523
5524 #define PROM_SMRSIZE 0x1
5525 #define PROM_SMRADDR 0x2
5526 #define PROM_SMRPROPS (PROM_SMRSIZE | PROM_SMRADDR)
5527 /*
5528 * Locate the idn-smr-size property to determine the size of the SMR
5529 * region for the SSI. Value inherently enables/disables SSI capability.
5530 */
5531 static int
5532 idn_prom_getsmr(uint_t *smrsz, uint64_t *paddrp, uint64_t *sizep)
5533 {
5534 pnode_t nodeid;
5535 int found = 0;
5536 int len;
5537 uint_t smrsize = 0;
5538 uint64_t obpaddr, obpsize;
5539 struct smraddr {
5540 uint32_t hi_addr;
5541 uint32_t lo_addr;
5542 uint32_t hi_size;
5543 uint32_t lo_size;
5544 } smraddr;
5545 procname_t proc = "idn_prom_getsmr";
5546
5547 bzero(&smraddr, sizeof (smraddr));
5548 /*
5549 * idn-smr-size is a property of the "memory" node and
5550 * is defined in megabytes.
5551 */
5552 nodeid = prom_finddevice("/memory");
5553
5554 if (nodeid != OBP_NONODE) {
5555 len = prom_getproplen(nodeid, IDN_PROP_SMRSIZE);
5556 if (len == sizeof (smrsize)) {
5557 (void) prom_getprop(nodeid, IDN_PROP_SMRSIZE,
5558 (caddr_t)&smrsize);
5559 found |= PROM_SMRSIZE;
5560 }
5561 len = prom_getproplen(nodeid, IDN_PROP_SMRADDR);
5562 if (len == sizeof (smraddr)) {
5563 (void) prom_getprop(nodeid, IDN_PROP_SMRADDR,
5564 (caddr_t)&smraddr);
5565 found |= PROM_SMRADDR;
5566 }
5567 }
5568
5569 if (found != PROM_SMRPROPS) {
5570 if ((found & PROM_SMRSIZE) == 0)
5571 cmn_err(CE_WARN,
5572 "IDN: 136: \"%s\" property not found, "
5573 "disabling IDN",
5574 IDN_PROP_SMRSIZE);
5575 if (smrsize && ((found & PROM_SMRADDR) == 0))
5576 cmn_err(CE_WARN,
5577 "IDN: 136: \"%s\" property not found, "
5578 "disabling IDN",
5579 IDN_PROP_SMRADDR);
5580 return (-1);
5581 }
5582
5583 if (smrsize == 0) {
5584 PR_SMR("%s: IDN DISABLED (idn_smr_size = 0)\n", proc);
5585 cmn_err(CE_NOTE, "!IDN: 137: SMR size is 0, disabling IDN");
5586
5587 } else if (smrsize > IDN_SMR_MAXSIZE) {
5588 PR_SMR("%s: IDN DISABLED (idn_smr_size too big %d > %d MB)\n",
5589 proc, smrsize, IDN_SMR_MAXSIZE);
5590 cmn_err(CE_WARN,
5591 "!IDN: 138: SMR size (%dMB) is too big (max = %dMB), "
5592 "disabling IDN",
5593 smrsize, IDN_SMR_MAXSIZE);
5594 smrsize = 0;
5595 } else {
5596 *smrsz = smrsize;
5597 found &= ~PROM_SMRSIZE;
5598 }
5599
5600 obpaddr = ((uint64_t)smraddr.hi_addr << 32) |
5601 (uint64_t)smraddr.lo_addr;
5602 obpsize = ((uint64_t)smraddr.hi_size << 32) |
5603 (uint64_t)smraddr.lo_size;
5604
5605 if (obpsize == 0) {
5606 if (smrsize > 0) {
5607 cmn_err(CE_WARN, "!IDN: 139: OBP region for "
5608 "SMR is 0 length");
5609 }
5610 } else if (obpsize < (uint64_t)MB2B(smrsize)) {
5611 cmn_err(CE_WARN,
5612 "!IDN: 140: OBP region (%ld B) smaller "
5613 "than requested size (%ld B)",
5614 obpsize, MB2B(smrsize));
5615 } else if ((obpaddr & ((uint64_t)IDN_SMR_ALIGN - 1)) != 0) {
5616 cmn_err(CE_WARN,
5617 "!IDN: 141: OBP region (0x%lx) not on (0x%x) "
5618 "boundary", obpaddr, IDN_SMR_ALIGN);
5619 } else {
5620 *sizep = obpsize;
5621 *paddrp = obpaddr;
5622 found &= ~PROM_SMRADDR;
5623 }
5624
5625 return (found ? -1 : 0);
5626 }
5627
5628 void
5629 idn_init_autolink()
5630 {
5631 idnsb_event_t *sbp;
5632 procname_t proc = "idn_init_autolink";
5633
5634 mutex_enter(&idn.idnsb_mutex);
5635 if ((sbp = idn.idnsb_eventp) == NULL) {
5636 PR_PROTO("%s: IDN private sigb (event) area is NULL\n", proc);
5637 mutex_exit(&idn.idnsb_mutex);
5638 return;
5639 }
5640
5641 PR_PROTO("%s: marking domain IDN ready.\n", proc);
5642
5643 bzero(sbp, sizeof (*sbp));
5644
5645 sbp->idn_version = (uchar_t)idn.version;
5646 SSIEVENT_SET(sbp, SSIEVENT_BOOT, 0);
5647 (void) strncpy(sbp->idn_cookie_str, SSIEVENT_COOKIE,
5648 SSIEVENT_COOKIE_LEN);
5649 mutex_exit(&idn.idnsb_mutex);
5650 }
5651
5652 void
5653 idn_deinit_autolink()
5654 {
5655 idnsb_event_t *sbp;
5656 procname_t proc = "idn_deinit_autolink";
5657
5658 mutex_enter(&idn.idnsb_mutex);
5659 if ((sbp = idn.idnsb_eventp) == NULL) {
5660 PR_PROTO("%s: IDN private sigb (event) area is NULL\n", proc);
5661 mutex_exit(&idn.idnsb_mutex);
5662 return;
5663 }
5664
5665 PR_PROTO("%s: marking domain IDN unavailable.\n", proc);
5666
5667 sbp->idn_version = (uchar_t)idn.version;
5668 SSIEVENT_CLEAR(sbp, SSIEVENT_BOOT, 0);
5669 (void) strncpy(sbp->idn_cookie_str, SSIEVENT_COOKIE,
5670 SSIEVENT_COOKIE_LEN);
5671 mutex_exit(&idn.idnsb_mutex);
5672 }
5673
5674 void
5675 _make64cpumask(cpuset_t *csetp, uint_t upper, uint_t lower)
5676 {
5677 int c;
5678
5679 CPUSET_ZERO(*csetp);
5680
5681 for (c = 0; c < 32; c++) {
5682 if (lower & (1 << c)) {
5683 CPUSET_ADD(*csetp, c);
5684 }
5685 if (upper & (1 << (c + 32))) {
5686 CPUSET_ADD(*csetp, c + 32);
5687 }
5688 }
5689 }
5690
5691 uint_t
5692 _lower32cpumask(cpuset_t cset)
5693 {
5694 int c;
5695 uint_t set = 0;
5696
5697 for (c = 0; c < 32; c++)
5698 if (CPU_IN_SET(cset, c))
5699 set |= 1 << c;
5700
5701 return (set);
5702 }
5703
5704 uint_t
5705 _upper32cpumask(cpuset_t cset)
5706 {
5707 int c;
5708 uint_t set = 0;
5709
5710 for (c = 32; c < NCPU; c++)
5711 if (CPU_IN_SET(cset, c))
5712 set |= 1 << (c - 32);
5713
5714 return (set);
5715 }
5716
5717 #ifdef DEBUG
5718 int
5719 debug_idnxdc(char *f, int domid, idn_msgtype_t *mtp,
5720 uint_t a1, uint_t a2, uint_t a3, uint_t a4)
5721 {
5722 idn_domain_t *dp = &idn_domain[domid];
5723 int rv, cpuid, bd;
5724 static int xx = 0;
5725 STRING(str);
5726 STRING(mstr);
5727 STRING(astr);
5728
5729 xx++;
5730 INUM2STR(mtp->mt_mtype, mstr);
5731 if ((mtp->mt_mtype & IDNP_MSGTYPE_MASK) == 0) {
5732 INUM2STR(a1, astr);
5733 (void) sprintf(str, "%s/%s", mstr, astr);
5734 } else {
5735 (void) strcpy(str, mstr);
5736 }
5737
5738 if ((cpuid = dp->dcpu) == IDN_NIL_DCPU)
5739 bd = -1;
5740 else
5741 bd = CPUID_TO_BOARDID(cpuid);
5742
5743 SNOOP_IDN(0, str, bd, a1, a2, a3, a4);
5744
5745 PR_XDC("%s:%d:%d SENT: scpu = %d, msg = 0x%x(%s)\n",
5746 f, domid, xx, cpuid, mtp->mt_mtype, str);
5747 PR_XDC("%s:%d:%d S-DATA: a1 = 0x%x, a2 = 0x%x\n",
5748 f, domid, xx, a1, a2);
5749 PR_XDC("%s:%d:%d S-DATA: a3 = 0x%x, a4 = 0x%x\n",
5750 f, domid, xx, a3, a4);
5751
5752 rv = idnxdc(domid, mtp, a1, a2, a3, a4);
5753 if (rv != 0) {
5754 PR_XDC("%s:%d:%d: WARNING: idnxdc(cpu %d) FAILED\n",
5755 f, domid, xx, cpuid);
5756 }
5757
5758 return (rv);
5759 }
5760
5761 caddr_t
5762 _idn_getstruct(char *structname, int size)
5763 {
5764 caddr_t ptr;
5765 procname_t proc = "GETSTRUCT";
5766
5767 ptr = kmem_zalloc(size, KM_SLEEP);
5768
5769 PR_ALLOC("%s: ptr 0x%p, struct(%s), size = %d\n",
5770 proc, (void *)ptr, structname, size);
5771
5772 return (ptr);
5773 }
5774
5775 void
5776 _idn_freestruct(caddr_t ptr, char *structname, int size)
5777 {
5778 procname_t proc = "FREESTRUCT";
5779
5780 PR_ALLOC("%s: ptr 0x%p, struct(%s), size = %d\n",
5781 proc, (void *)ptr, structname, size);
5782
5783 ASSERT(ptr != NULL);
5784 kmem_free(ptr, size);
5785 }
5786 #endif /* DEBUG */
5787