1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * DESCRIPTION 30 * 31 * ttymux - Multiplexer driver for multiplexing termio compliant streams onto 32 * a single upper stream. 33 * 34 * ADD2FRONT macro can be used to specify the order in which a console 35 * device is put in the queue of multiplexed physical serial devices, 36 * during the association and disassociation of a console interface. 37 * When this macro is defined, the device is placed in front of the queue, 38 * otherwise by default it is placed at the end. 39 * Console I/O happens to each of the physical devices in the order of 40 * their position in this queue. 41 */ 42 43 #include <sys/types.h> 44 #include <sys/file.h> 45 #include <sys/stream.h> 46 #include <sys/strsubr.h> 47 #include <sys/strlog.h> 48 #include <sys/strsun.h> 49 #include <sys/modctl.h> 50 #include <sys/debug.h> 51 #include <sys/kbio.h> 52 #include <sys/devops.h> 53 #include <sys/errno.h> 54 #include <sys/stat.h> 55 #include <sys/kmem.h> 56 #include <sys/ddi.h> 57 #include <sys/consdev.h> 58 #include <sys/tty.h> 59 #include <sys/ptyvar.h> 60 #include <sys/termio.h> 61 #include <sys/fcntl.h> 62 #include <sys/mkdev.h> 63 #include <sys/ser_sync.h> 64 #include <sys/esunddi.h> 65 #include <sys/policy.h> 66 67 #include <sys/ttymux.h> 68 #include "ttymux_impl.h" 69 70 /* 71 * Extern declarations 72 */ 73 extern mblk_t *mkiocb(uint_t); 74 extern int nulldev(); 75 extern uintptr_t space_fetch(char *key); 76 77 extern int sm_ioctl_cmd(sm_uqi_t *, mblk_t *); 78 extern int ttymux_abort_ioctl(mblk_t *); 79 extern int ttymux_device_fini(sm_lqi_t *); 80 extern int ttymux_device_init(sm_lqi_t *); 81 82 /* 83 * Exported interfaces 84 */ 85 int sm_disassociate(int, sm_lqi_t *, ulong_t); 86 int sm_associate(int, sm_lqi_t *, ulong_t, uint_t, char *); 87 88 /* 89 * Variables defined here and visible only internally 90 */ 91 sm_ss_t *sm_ssp = 0; 92 static int sm_instance = 0; 93 static int smctlunit; 94 95 static uint_t sm_default_trflag = 0; 96 uint_t sm_max_units = 6; 97 uint_t sm_minor_cnt = 0; 98 static uint_t sm_refuse_opens = 0; 99 100 /* 101 * Local definitions. 102 */ 103 104 /* force these flags to be unset on console devices */ 105 static ulong_t sm_cmask = (ulong_t)(CRTSXOFF|CRTSCTS); 106 107 /* 108 * SECTION 109 * Implementation Section: 110 */ 111 void 112 sm_debug(char *msg, ...) 113 { 114 va_list args; 115 char buf[256]; 116 int sz; 117 118 va_start(args, msg); 119 sz = vsnprintf(buf, sizeof (buf), msg, args); 120 va_end(args); 121 122 if (sz < 0) 123 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1, 124 SL_TRACE, "vsnprintf parse error\n"); 125 else if (sz > sizeof (buf)) { 126 char *b; 127 size_t len = sz + 1; 128 129 b = kmem_alloc(len, KM_SLEEP); 130 va_start(args, msg); 131 sz = vsnprintf(b, len, msg, args); 132 va_end(args); 133 if (sz > 0) 134 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), 135 sm_instance, 1, SL_TRACE, b); 136 kmem_free(b, len); 137 } else { 138 139 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 140 1, SL_TRACE, buf); 141 } 142 } 143 144 void 145 sm_log(char *msg, ...) 146 { 147 va_list args; 148 char buf[128]; 149 int sz; 150 151 va_start(args, msg); 152 sz = vsnprintf(buf, sizeof (buf), msg, args); 153 va_end(args); 154 155 if (sz < 0) 156 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1, 157 SL_TRACE, "vsnprintf parse error\n"); 158 else if (sz > sizeof (buf)) { 159 char *b; 160 size_t len = sz + 1; 161 162 b = kmem_alloc(len, KM_SLEEP); 163 va_start(args, msg); 164 sz = vsnprintf(b, len, msg, args); 165 va_end(args); 166 if (sz > 0) 167 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), 168 sm_instance, 1, SL_NOTE, b); 169 kmem_free(b, len); 170 } else { 171 172 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 173 1, SL_NOTE, buf); 174 } 175 } 176 177 /* 178 * Should only be called if the caller can guarantee that the vnode 179 * and/or the stream won't disappear while finding the dip. 180 * This routine is only called during an I_PLINK request so it's safe. 181 * The routine obtains the dev_t for a linked se stream. 182 */ 183 static void 184 sm_setdip(queue_t *q, sm_lqi_t *lqi) 185 { 186 lqi->sm_dev = q && STREAM(q) ? STREAM(q)->sd_vnode->v_rdev : NODEV; 187 } 188 189 /* 190 * Called from driver close, state change reports and I_PUNLINK ioctl. 191 * A lower stream has been unlinked - clean up the state associated with it. 192 */ 193 void 194 sm_lqifree(sm_lqi_t *lqi) 195 { 196 int mu_owned; 197 sm_lqi_t **pplqi; 198 199 ASSERT(mutex_owned(lqi->sm_umutex)); 200 ASSERT(SM_RQ(lqi) != 0); 201 202 /* 203 * Clear all state associated with this lower queue except 204 * the identity of the queues themselves and the link id which 205 * can only be cleared by issuing a streams I_PUNLINK ioctl. 206 * 207 * The association of a lower queue is a two step process: 208 * 1. initialise the lower q data structure on I_PLINK 209 * 2. associate an upper q with the lower q on SM_CMD_ASSOCIATE. 210 * 211 * If step 2 has ocurred then 212 * remove this lower queue info from the logical unit. 213 */ 214 if (lqi->sm_uqi) { 215 sm_dbg('Y', ("lqifree unit %d, ", lqi->sm_uqi->sm_lunit)); 216 if ((mu_owned = mutex_owned(lqi->sm_uqi->sm_umutex)) == 0) 217 LOCK_UNIT(lqi->sm_uqi); 218 219 pplqi = &lqi->sm_uqi->sm_lqs; 220 while (*pplqi != lqi) { 221 ASSERT(*pplqi); 222 pplqi = &((*pplqi)->sm_nlqi); 223 } 224 *pplqi = lqi->sm_nlqi; 225 lqi->sm_uqi->sm_nlqs--; 226 227 if (mu_owned == 0) 228 UNLOCK_UNIT(lqi->sm_uqi); 229 230 lqi->sm_uqi = 0; 231 } 232 } 233 234 /* 235 * Given a q return the associated lower queue data structure or NULL. 236 * Return the data locked. 237 */ 238 static sm_lqi_t * 239 get_lqi_byq(queue_t *q) 240 { 241 int i; 242 sm_lqi_t *lqi, *flqi = 0; 243 244 for (i = 0; i < MAX_LQS; i++) { 245 lqi = &sm_ssp->sm_lqs[i]; 246 LOCK_UNIT(lqi); 247 if (flqi == 0 && lqi->sm_linkid == 0) /* assumes muxids != 0 */ 248 flqi = lqi; 249 else if (SM_RQ(lqi) == q || SM_WQ(lqi) == q) { 250 if (flqi) 251 UNLOCK_UNIT(flqi); 252 return (lqi); 253 } 254 else 255 UNLOCK_UNIT(lqi); 256 } 257 return (flqi); 258 } 259 260 /* 261 * Given a streams link identifier return the associated lower queue data 262 * structure or NULL. 263 */ 264 sm_lqi_t * 265 get_lqi_byid(int linkid) 266 { 267 int i; 268 sm_lqi_t *lqi; 269 270 if (linkid == 0) 271 return (NULL); 272 for (i = 0; i < MAX_LQS; i++) { 273 lqi = &sm_ssp->sm_lqs[i]; 274 if (lqi->sm_linkid == linkid) 275 return (lqi); 276 } 277 return (NULL); 278 } 279 280 /* 281 * Given a dev_t for a lower stream return the associated lower queue data 282 * structure or NULL. 283 */ 284 sm_lqi_t * 285 get_lqi_bydevt(dev_t dev) 286 { 287 int i; 288 sm_lqi_t *lqi; 289 290 if (dev == NODEV) 291 return (NULL); 292 293 for (i = 0; i < MAX_LQS; i++) { 294 lqi = &sm_ssp->sm_lqs[i]; 295 if (lqi->sm_dev == dev) 296 return (lqi); 297 } 298 return (NULL); 299 } 300 301 /* 302 * Determine whether the input flag is set on at least 303 * howmany queues. 304 */ 305 static int 306 sm_is_flag_set(sm_uqi_t *uqi, uint_t flag, uint_t howmany) 307 { 308 sm_lqi_t *lqi; 309 310 if (howmany == 0) 311 return (0); 312 313 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 314 if (lqi->sm_flags & flag) 315 if (--howmany == 0) 316 return (1); 317 } 318 return (0); 319 } 320 321 /* 322 * How many usable queues are associated with a given upper stream 323 */ 324 static int 325 sm_uwq_error(sm_uqi_t *uqi) 326 { 327 return (sm_is_flag_set(uqi, (WERROR_MODE|HANGUP_MODE), uqi->sm_nlqs)); 328 } 329 330 /* 331 * How many of the queues associated with a given upper stream 332 * - do not - have the given flags set. 333 */ 334 static int 335 sm_q_count(sm_uqi_t *uqi, uint_t flag) 336 { 337 sm_lqi_t *lqi; 338 int count = 0; 339 340 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 341 if ((lqi->sm_flags & flag) == 0) 342 count++; 343 } 344 return (count); 345 } 346 347 /* 348 * How many of the queues associated with a given upper stream 349 * - do not - have the given flags set. 350 */ 351 static int 352 sm_qs_without(sm_uqi_t *uqi, uint_t flag, uint_t ioflag) 353 { 354 sm_lqi_t *lqi; 355 int count = 0; 356 357 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 358 if ((lqi->sm_flags & flag) == 0 && 359 (lqi->sm_ioflag & ioflag) == 0) 360 count++; 361 } 362 return (count); 363 } 364 365 /* 366 * How many usable queues are associated with a given upper stream 367 */ 368 static int 369 sm_good_qs(sm_uqi_t *uqi) 370 { 371 return (sm_q_count(uqi, (WERROR_MODE|HANGUP_MODE))); 372 } 373 374 static int 375 sm_cnt_oqs(sm_uqi_t *uqi) 376 { 377 return (sm_qs_without(uqi, (WERROR_MODE|HANGUP_MODE), 378 (uint_t)FOROUTPUT)); 379 } 380 381 /* 382 * Send an ioctl downstream and remember that it was sent so that 383 * its response can be caught on the way back up. 384 */ 385 static void 386 sm_issue_ioctl(void *arg) 387 { 388 sm_lqi_t *lqi = arg; 389 uint_t cmdflag = 0; 390 queue_t *q = SM_WQ(lqi); 391 int iocmd, size; 392 393 LOCK_UNIT(lqi); 394 395 lqi->sm_bid = 0; 396 if ((lqi->sm_flags & (WERROR_MODE|HANGUP_MODE)) == 0 && 397 (lqi->sm_flags & (WANT_CDSTAT|WANT_TCSET))) { 398 mblk_t *pioc; 399 400 if (lqi->sm_flags & WANT_TCSET) { 401 lqi->sm_flags &= ~WANT_TCSET; 402 iocmd = TCSETS; 403 cmdflag = WANT_TCSET; 404 } else if (lqi->sm_flags & WANT_SC) { 405 lqi->sm_flags &= ~WANT_SC; 406 iocmd = TIOCGSOFTCAR; 407 cmdflag = WANT_SC; 408 } else if (lqi->sm_flags & WANT_CD) { 409 lqi->sm_flags &= ~WANT_CD; 410 iocmd = TIOCMGET; 411 } else if (lqi->sm_flags & WANT_CL) { 412 lqi->sm_flags &= ~WANT_CL; 413 iocmd = TCGETS; 414 cmdflag = WANT_CL; 415 } else { 416 UNLOCK_UNIT(lqi); 417 return; 418 } 419 420 if (pioc = mkiocb(iocmd)) { 421 if (cmdflag == WANT_TCSET) { 422 pioc->b_cont = 423 sm_allocb(sizeof (struct termios), 424 BPRI_MED); 425 if (pioc->b_cont == 0) { 426 freemsg(pioc); 427 pioc = 0; 428 } else { 429 struct termios *tc = (struct termios *) 430 pioc->b_cont->b_wptr; 431 432 bzero((caddr_t)tc, 433 sizeof (struct termios)); 434 tc->c_cflag = lqi->sm_ttycommon-> 435 t_cflag; 436 pioc->b_cont->b_rptr = 437 pioc->b_cont->b_wptr; 438 pioc->b_cont->b_wptr += 439 sizeof (struct termios); 440 } 441 size = sizeof (struct iocblk) + 442 sizeof (struct termios); 443 } 444 else 445 size = sizeof (struct iocblk); 446 } 447 else 448 size = sizeof (struct iocblk); 449 450 if (pioc != 0) { 451 452 lqi->sm_piocid = ((struct iocblk *)pioc->b_rptr)-> 453 ioc_id; 454 lqi->sm_flags |= SM_IOCPENDING; 455 456 /* lqi->sm_flags |= cmdflag; */ 457 UNLOCK_UNIT(lqi); 458 (void) putq(q, pioc); 459 } else { 460 UNLOCK_UNIT(lqi); 461 lqi->sm_bid = qbufcall(WR(q), size, BPRI_MED, 462 sm_issue_ioctl, lqi); 463 } 464 } 465 else 466 UNLOCK_UNIT(lqi); 467 } 468 469 /* 470 * Associate one of the drivers minor nodes with a serial device. 471 */ 472 int 473 sm_associate(int unit, sm_lqi_t *plqi, ulong_t tag, uint_t ioflag, char *dp) 474 { 475 sm_uqi_t *uqi; 476 int rval = 0; 477 478 sm_dbg('Y', ("sm_associate(%d, %d, %d): ", 479 (plqi) ? plqi->sm_linkid : 0, unit, ioflag)); 480 /* 481 * Check the data is valid. 482 * Associate a lower queue with a logical unit. 483 */ 484 485 if (unit < 0 || unit >= NLUNITS || plqi == 0 || 486 (uqi = get_uqi(sm_ssp, unit)) == 0) { 487 sm_dbg('@', (" invalid: lqi=0x%p lui=0x%p:", plqi, uqi)); 488 rval = EINVAL; 489 } else { 490 if ((ioflag & FORIO) == 0) 491 ioflag = FORIO; 492 493 LOCK_UNIT(plqi); 494 495 if (plqi->sm_uqi) { 496 if (plqi->sm_uqi->sm_lunit == unit) { 497 if ((ioflag & (uint_t)FORIO) != 0) 498 plqi->sm_ioflag = 499 (ioflag & (uint_t)FORIO); 500 rval = 0; 501 } else { 502 sm_dbg('@', ("already associated with unit %d:", 503 plqi->sm_uqi->sm_lunit)); 504 rval = EINVAL; 505 } 506 } else { 507 508 LOCK_UNIT(uqi); 509 510 if ((ioflag & (uint_t)FORIO) != 0) 511 plqi->sm_ioflag = (ioflag & (uint_t)FORIO); 512 513 plqi->sm_ttycommon->t_cflag = uqi->sm_ttycommon-> 514 t_cflag; 515 plqi->sm_ttycommon->t_flags = uqi->sm_ttycommon-> 516 t_flags; 517 plqi->sm_uqi = uqi; 518 plqi->sm_mbits = 0; 519 plqi->sm_tag = tag; 520 521 if (*dp == '/') 522 (void) strncpy(plqi->sm_path, dp, MAXPATHLEN); 523 else 524 *(plqi->sm_path) = '\0'; 525 526 plqi->sm_flags |= WANT_TCSET; 527 #ifdef ADD2FRONT 528 plqi->sm_nlqi = uqi->sm_lqs; 529 uqi->sm_lqs = plqi; 530 #else 531 plqi->sm_nlqi = 0; 532 if (uqi->sm_lqs) { 533 sm_lqi_t *lq; 534 for (lq = uqi->sm_lqs; lq->sm_nlqi; 535 lq = lq->sm_nlqi) { 536 } 537 lq->sm_nlqi = plqi; 538 } else 539 uqi->sm_lqs = plqi; 540 #endif 541 uqi->sm_nlqs++; 542 543 (void) ttymux_device_init(plqi); 544 545 UNLOCK_UNIT(uqi); 546 rval = 0; 547 /* 548 * Everything looks good so it's now ok to enable lower 549 * queue processing. 550 * Note the lower queue should be enabled as soon as 551 * I_PLINK returns (used in sm_get_ttymodes etc). 552 * Schedule ioctls to obtain the terminal settings. 553 */ 554 555 if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_waitq) 556 plqi->sm_uqflags |= SM_UQVALID; 557 558 qenable(SM_RQ(plqi)); 559 if (plqi->sm_flags & (WANT_CDSTAT|WANT_TCSET)) { 560 /* 561 * Bypass the lower half of the driver (hence 562 * no qwriter) and apply the current termio 563 * settings on the lower stream. 564 */ 565 UNLOCK_UNIT(plqi); 566 if (plqi->sm_bid) { 567 qunbufcall(SM_WQ(plqi), plqi->sm_bid); 568 plqi->sm_bid = 0; 569 } 570 /* 571 * Only set cflags on the lower q if we know 572 * the settings on any other lower queue. 573 */ 574 sm_issue_ioctl(plqi); 575 LOCK_UNIT(plqi); 576 577 } 578 } 579 580 UNLOCK_UNIT(plqi); 581 } 582 sm_dbg('Y', ("sm_associate: rval=%d.\n", rval)); 583 return (rval); 584 } 585 586 /* 587 * Break an association between one of the driver's minor nodes and 588 * a serial device. 589 */ 590 int 591 sm_disassociate(int unit, sm_lqi_t *plqi, ulong_t tag) 592 { 593 sm_uqi_t *uqi; 594 int rval = 0; 595 596 sm_dbg('Y', ("sm_disassociate: link %d, unit %d: ", 597 (plqi) ? plqi->sm_linkid : 0, unit)); 598 /* 599 * Check the data is valid. 600 * Disassociate a lower queue with a logical unit. 601 */ 602 if (unit < 0 || unit >= NLUNITS || plqi == 0 || 603 (uqi = get_uqi(sm_ssp, unit)) == 0) { 604 sm_dbg('@', ("invalid: lqi=0x%p lui=0x%p", plqi, uqi)); 605 rval = EINVAL; 606 } else { 607 LOCK_UNIT(plqi); 608 609 if (plqi->sm_uqi == NULL) { 610 sm_dbg('@', ("unit not associated")); 611 rval = EINVAL; 612 } else if (plqi->sm_uqi->sm_lunit != unit) { 613 sm_dbg('@', ("unit and linkid not related", 614 plqi->sm_uqi->sm_lunit)); 615 rval = EINVAL; 616 } else if (plqi->sm_tag != tag) { 617 sm_dbg('@', 618 ("Invalid tag for TTYMUX_DISASSOC ioctl\n")); 619 rval = EPERM; 620 } else { 621 sm_dbg('Y', ("disassociating ")); 622 623 (void) ttymux_device_fini(plqi); 624 625 /* 626 * Indicate that carrier status is no 627 * longer required and that the upper 628 * queue should not be used by plqi 629 */ 630 plqi->sm_flags &= ~(WANT_CDSTAT|WANT_TCSET); 631 plqi->sm_uqflags &= ~(SM_UQVALID|SM_OBPCNDEV); 632 plqi->sm_ioflag = 0u; 633 634 sm_lqifree(plqi); 635 rval = 0; 636 } 637 UNLOCK_UNIT(plqi); 638 } 639 sm_dbg('Y', (" rval=%d.\n", rval)); 640 return (rval); 641 642 } 643 644 /* 645 * Streams helper routines; 646 */ 647 648 /* 649 * Schedule a qbufcall for an upper queue. 650 * Must be called within the perimiter of the parameter q. 651 * fn must reenable the q. 652 * Called: 653 * whenever a message must be placed on multiple queues and allocb fails; 654 */ 655 static void 656 sm_sched_uqcb(queue_t *q, int memreq, int pri, void (*fn)()) 657 { 658 sm_uqi_t *uqi = q->q_ptr; 659 660 if (uqi->sm_ttybid != 0) 661 qunbufcall(q, uqi->sm_ttybid); 662 663 noenable(q); 664 665 uqi->sm_ttybid = qbufcall(q, memreq, pri, fn, uqi); 666 } 667 668 /* 669 * qbufcall routine to restart the queues when memory is available. 670 */ 671 static void 672 sm_reenable_q(sm_uqi_t *uqi) 673 { 674 queue_t *wq = SM_WQ(uqi); 675 676 if ((uqi->sm_flags & SM_STOPPED) == 0) { 677 enableok(wq); 678 qenable(wq); 679 } 680 } 681 682 /* 683 * Place a message on the write queue of each stream associated with 684 * the given upper stream. 685 */ 686 static void 687 sm_senddown(sm_uqi_t *uqi) 688 { 689 sm_lqi_t *lqi; 690 691 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 692 if (lqi->sm_mp != 0) { 693 putnext(SM_WQ(lqi), lqi->sm_mp); 694 lqi->sm_mp = 0; 695 } 696 } 697 } 698 699 /* 700 * For each lower device that should receive a write message duplicate 701 * the message block. 702 */ 703 static int 704 sm_dupmsg(sm_uqi_t *uqi, mblk_t *mp) 705 { 706 sm_lqi_t *lqi; 707 mblk_t *origmp = mp; 708 709 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 710 lqi->sm_mp = 0; 711 if (lqi->sm_flags & WERROR_MODE) { 712 continue; 713 } 714 if ((lqi->sm_ioflag & (uint_t)FOROUTPUT) == 0) { 715 if (DB_TYPE(mp) == M_DATA) 716 continue; 717 } 718 if (lqi->sm_nlqi == 0) { 719 lqi->sm_mp = mp; 720 origmp = NULL; 721 } else if ((lqi->sm_mp = sm_copymsg(mp)) == 0) { 722 sm_lqi_t *flqi; 723 724 for (flqi = uqi->sm_lqs; flqi != lqi; 725 flqi = flqi->sm_nlqi) { 726 if (lqi->sm_mp) { 727 /* must have been sm_copymsg */ 728 sm_freemsg(lqi->sm_mp); 729 lqi->sm_mp = 0; 730 } 731 } 732 return (sm_cnt_oqs(uqi) * msgdsize(mp)); 733 } 734 } 735 if (origmp != NULL) 736 freemsg(origmp); 737 return (0); 738 } 739 740 /* 741 * Return 1 if all associated lower devices have room for another message 742 * otherwise return 0. 743 */ 744 static int 745 sm_cansenddown(sm_uqi_t *uqi) 746 { 747 748 register sm_lqi_t *lqi; 749 750 if (uqi->sm_lqs == 0) 751 return (0); 752 753 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 754 if ((lqi->sm_flags & WERROR_MODE) == 0 && 755 canputnext(SM_WQ(lqi)) == 0) 756 return (0); 757 } 758 return (1); 759 } 760 761 /* 762 * Put a message down all associated lower queues. 763 * Return 1 if the q function was called. 764 */ 765 static int 766 sm_putqs(queue_t *q, mblk_t *mp, int (*qfn)()) 767 { 768 register sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 769 register int memreq; 770 int pri = (DB_TYPE(mp) < QPCTL) ? BPRI_MED : BPRI_HI; 771 int rval = 0; 772 773 if (uqi->sm_lqs == 0 || (uqi->sm_flags & WERROR_MODE)) { 774 775 sm_dbg('Q', ("sm_putqs: freeing (0x%p 0x%p).\n", uqi->sm_lqs, 776 uqi->sm_flags)); 777 freemsg(mp); 778 } else if (pri != BPRI_HI && sm_cansenddown(uqi) == 0) { 779 /* a lower q is flow controlled */ 780 (void) qfn(q, mp); 781 rval = 1; 782 } else if ((memreq = sm_dupmsg(uqi, mp)) == 0) { 783 784 sm_senddown(uqi); 785 786 } else { 787 sm_log("sm_putqs: msg 0x%x - can't alloc %d bytes (pri %d).\n", 788 DB_TYPE(mp), memreq, pri); 789 sm_sched_uqcb(q, memreq, pri, sm_reenable_q); 790 791 (void) qfn(q, mp); 792 rval = 1; 793 794 } 795 796 return (rval); 797 } 798 799 /* 800 * Service a streams link and unlink requests. 801 */ 802 static void 803 sm_link_req(queue_t *wq, mblk_t *mp) 804 { 805 struct linkblk *linkp; 806 int rval; 807 int cmd; 808 sm_lqi_t *plqi; 809 810 ASSERT(DB_TYPE(mp) == M_IOCTL); 811 812 cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 813 switch (cmd) { 814 815 case I_LINK: 816 case I_PLINK: 817 sm_dbg('G', ("sm_link_req: M_IOCTL %x (I_PLINK).\n", cmd)); 818 819 linkp = (struct linkblk *)mp->b_cont->b_rptr; 820 821 /* 822 * 1. Sanity check the link block. 823 * 2. Validate that the queue is not already linked 824 * (and resources available). 825 * 3. Validate that the lower queue is not associated with 826 * a logical unit. 827 * 4. Remember that this lower queue is linked to the driver. 828 */ 829 if ((linkp == NULL) || (MBLKL(mp) < sizeof (*linkp)) || 830 linkp->l_qbot == NULL) { 831 sm_dbg('I', ("sm_link_req: invalid link block.\n")); 832 rval = EINVAL; 833 } else if ((plqi = get_lqi_byq(linkp->l_qbot)) == 0) { 834 sm_dbg('I', ("sm_link_req: out of resources.\n")); 835 rval = EBUSY; /* out of resources */ 836 } else if (plqi->sm_uqi) { 837 UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */ 838 sm_dbg('I', ("sm_link_req: already associated.\n")); 839 rval = EBUSY; /* already linked */ 840 } else { 841 SM_WQ(plqi) = linkp->l_qbot; 842 SM_RQ(plqi) = OTHERQ(linkp->l_qbot); 843 844 linkp->l_qbot->q_ptr = 845 OTHERQ(linkp->l_qbot)->q_ptr = plqi; 846 plqi->sm_linkid = linkp->l_index; 847 UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */ 848 849 sm_dbg('H', ("sm_link_req: linkid = %d.\n", 850 linkp->l_index)); 851 852 sm_setdip(linkp->l_qbot, plqi); 853 plqi->sm_ttycommon->t_flags = 0; 854 plqi->sm_ttycommon->t_cflag = 0; 855 plqi->sm_mbits = 0; 856 (void) ttymux_device_init(plqi); 857 rval = 0; 858 } 859 860 break; 861 862 case I_UNLINK: 863 case I_PUNLINK: 864 sm_dbg('G', ("sm_link_req: M_IOCTL (I_PUNLINK).\n")); 865 866 linkp = (struct linkblk *)mp->b_cont->b_rptr; 867 868 if ((linkp == NULL) || 869 (MBLKL(mp) < sizeof (*linkp)) || 870 linkp->l_qbot == NULL) { 871 rval = EINVAL; 872 } else if ((plqi = get_lqi_byid(linkp->l_index)) == 0) { 873 rval = EINVAL; 874 } else { 875 sm_uqi_t *uqi; 876 int werrmode; 877 878 /* 879 * Mark the lower q as invalid. 880 */ 881 sm_dbg('G', ("I_PUNLINK: freeing link %d\n", 882 linkp->l_index)); 883 884 if (plqi->sm_bid) { 885 qunbufcall(SM_RQ(plqi), plqi->sm_bid); 886 plqi->sm_bid = 0; 887 } 888 if (plqi->sm_ttybid) { 889 qunbufcall(SM_RQ(plqi), plqi->sm_ttybid); 890 plqi->sm_ttybid = 0; 891 } 892 893 uqi = plqi->sm_uqi; 894 895 896 (void) ttymux_device_fini(plqi); 897 898 if (uqi) 899 (void) sm_disassociate(uqi->sm_lunit, 900 plqi, plqi->sm_tag); 901 902 LOCK_UNIT(plqi); 903 904 plqi->sm_piocid = 0; 905 906 werrmode = (plqi->sm_flags & (WERROR_MODE|HANGUP_MODE)) 907 ? 1 : 0; 908 909 plqi->sm_mbits = 0; 910 plqi->sm_flags = 0; 911 912 ttycommon_close(plqi->sm_ttycommon); 913 /* SM_RQ(plqi) = SM_WQ(plqi) = 0; */ 914 plqi->sm_ttycommon->t_flags = 0; 915 plqi->sm_ttycommon->t_cflag = 0; 916 plqi->sm_ttycommon->t_iflag = 0; 917 plqi->sm_linkid = 0; 918 plqi->sm_dev = NODEV; 919 plqi->sm_hadkadbchar = 0; 920 plqi->sm_nachar = sm_ssp->sm_abs; 921 922 UNLOCK_UNIT(plqi); 923 if (uqi && 924 werrmode && 925 (uqi->sm_flags & FULLY_OPEN) && 926 sm_uwq_error(uqi) && 927 putnextctl(SM_RQ(uqi), M_HANGUP) == 0) { 928 sm_log("sm_link_req: putnextctl(M_HANGUP)" 929 " failed.\n"); 930 } 931 932 rval = 0; 933 } 934 935 break; 936 default: 937 rval = EINVAL; 938 } 939 if (rval != 0) 940 miocnak(wq, mp, 0, rval); 941 else 942 miocack(wq, mp, 0, 0); 943 } 944 945 static int 946 sm_getiocinfo(mblk_t *mp, struct sm_iocinfo *info) 947 { 948 switch (DB_TYPE(mp)) { 949 case M_COPYOUT: 950 info->sm_id = ((struct copyreq *)mp->b_rptr)->cq_id; 951 info->sm_cmd = ((struct copyreq *)mp->b_rptr)->cq_cmd; 952 info->sm_data = (((struct copyreq *)mp->b_rptr)->cq_size && 953 mp->b_cont) ? (void *)mp->b_cont->b_rptr : 0; 954 break; 955 case M_COPYIN: 956 info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id; 957 info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd; 958 info->sm_data = 0; 959 break; 960 case M_IOCACK: 961 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 962 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 963 /* the se driver has bug so we cannot use ioc_count */ 964 info->sm_data = (((struct iocblk *)mp->b_rptr)-> 965 ioc_error == 0 && mp->b_cont) ? 966 (void *)mp->b_cont->b_rptr : 0; 967 break; 968 case M_IOCNAK: 969 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 970 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 971 info->sm_data = 0; 972 break; 973 case M_IOCDATA: 974 info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id; 975 info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd; 976 info->sm_data = (((struct copyresp *)mp->b_rptr)-> 977 cp_rval == 0 && mp->b_cont) ? 978 (void *)mp->b_cont->b_rptr : 0; 979 break; 980 case M_IOCTL: 981 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 982 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 983 info->sm_data = 0; 984 break; 985 default: 986 return (EINVAL); 987 } 988 return (0); 989 } 990 991 /* 992 * Record the termio settings that have been set on the upper stream 993 */ 994 static int 995 sm_update_ttyinfo(mblk_t *mp, sm_uqi_t *uqi) 996 { 997 int err; 998 struct sm_iocinfo info; 999 1000 if ((err = sm_getiocinfo(mp, &info)) != 0) 1001 return (err); 1002 1003 switch (info.sm_cmd) { 1004 case TIOCSPPS: 1005 case TIOCGPPS: 1006 case TIOCGPPSEV: 1007 return (ENOTSUP); 1008 case TIOCGWINSZ: 1009 case TIOCSWINSZ: 1010 break; 1011 case TCSBRK: 1012 case TIOCSBRK: 1013 case TIOCCBRK: 1014 break; 1015 case TCSETSF: 1016 uqi->sm_flags |= FLUSHR_PEND; 1017 sm_dbg('I', ("TCSETSF: FLUSH is pending\n")); 1018 /*FALLTHROUGH*/ 1019 case TCSETSW: 1020 case TCSETS: 1021 case TCGETS: 1022 if (info.sm_data != 0) { 1023 ((struct termios *)info.sm_data)->c_cflag &= 1024 (tcflag_t)(~uqi->sm_cmask); 1025 uqi->sm_ttycommon->t_cflag = 1026 ((struct termios *)info.sm_data)->c_cflag; 1027 } 1028 break; 1029 case TCSETAF: 1030 sm_dbg('I', ("TCSETAF: FLUSH is pending\n")); 1031 uqi->sm_flags |= FLUSHR_PEND; 1032 /*FALLTHROUGH*/ 1033 case TCSETAW: 1034 case TCSETA: 1035 case TCGETA: 1036 if (info.sm_data != 0) { 1037 ((struct termio *)info.sm_data)->c_cflag &= 1038 (tcflag_t)(~uqi->sm_cmask); 1039 uqi->sm_ttycommon->t_cflag = 1040 (tcflag_t)((struct termio *)info.sm_data)->c_cflag; 1041 } 1042 break; 1043 case TIOCSSOFTCAR: 1044 case TIOCGSOFTCAR: 1045 if (info.sm_data != 0) { 1046 if (*(int *)info.sm_data == 1) 1047 uqi->sm_ttycommon->t_flags |= TS_SOFTCAR; 1048 else 1049 uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR; 1050 } 1051 break; 1052 case TIOCMSET: 1053 case TIOCMGET: 1054 if (info.sm_data != 0) 1055 uqi->sm_mbits = *(int *)info.sm_data; 1056 break; 1057 case TIOCMBIS: 1058 if (info.sm_data != 0) 1059 uqi->sm_mbits |= *(int *)info.sm_data; 1060 break; 1061 case TIOCMBIC: 1062 if (info.sm_data != 0) 1063 uqi->sm_mbits &= ~(*(int *)info.sm_data); 1064 break; 1065 default: 1066 return (EINVAL); 1067 /* NOTREACHED */ 1068 } /* end switch cmd */ 1069 1070 if ((uqi->sm_mbits & TIOCM_CD) || 1071 (uqi->sm_ttycommon->t_flags & TS_SOFTCAR) || 1072 (uqi->sm_ttycommon->t_cflag & CLOCAL)) 1073 uqi->sm_flags |= SM_CARON; 1074 else 1075 uqi->sm_flags &= ~SM_CARON; 1076 1077 return (0); 1078 } 1079 1080 /* 1081 * SECTION 1082 * STREAM's interface to the OS. 1083 * Routines directly callable from the OS. 1084 */ 1085 1086 /* 1087 * Processes high priority messages comming from modules above the 1088 * multiplexor. 1089 * Return 1 if the queue was disabled. 1090 */ 1091 static int 1092 sm_hp_uwput(queue_t *wq, mblk_t *mp) 1093 { 1094 sm_uqi_t *uqi = (sm_uqi_t *)(wq->q_ptr); 1095 int rval = 0; 1096 sm_lqi_t *plqi; 1097 int msgtype = DB_TYPE(mp); 1098 1099 switch (msgtype) { 1100 1101 case M_FLUSH: 1102 /* 1103 * How to flush the bottom half: 1104 * putctl1(SM_WQ(plqi), *mp->b_rptr) 1105 * will work on the bottom half but if FLUSHR is set 1106 * when is the right time to flush the upper read queue. 1107 * 1108 * Could set uqi->sm_flags & WANT_FLUSH but then what happens 1109 * if FLUSHR is set and the driver sends up a FLUSHR 1110 * before it handles the current FLUSHR request 1111 * (if only there was an id for the message that could 1112 * be matched when it returns back from the drivers. 1113 * 1114 * Thus I'm going by the book - the bottom half acts like 1115 * a stream head and turns around FLUSHW back down to 1116 * the driver (see lrput). The upper half acts like a 1117 * driver and turns around FLUSHR: 1118 */ 1119 1120 sm_dbg('I', ("sm_hp_uwput: FLUSH request 0x%x\n", *mp->b_rptr)); 1121 /* flush the upper write queue */ 1122 if (*mp->b_rptr & FLUSHW) 1123 flushq(wq, FLUSHDATA); 1124 1125 /* 1126 * flush each associated lower write queue 1127 * and pass down the driver (ignore the FLUSHR and deal with 1128 * it when it comes back up the read side. 1129 */ 1130 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1131 if ((plqi->sm_flags & WERROR_MODE) == 0 && 1132 SM_WQ(plqi)) { 1133 sm_dbg('I', ("flush lq 0x%p\n", SM_WQ(plqi))); 1134 if (*mp->b_rptr & FLUSHW) 1135 flushq(SM_WQ(plqi), FLUSHDATA); 1136 (void) putnextctl1(SM_WQ(plqi), M_FLUSH, 1137 *mp->b_rptr); 1138 } 1139 } 1140 break; 1141 1142 case M_STARTI: 1143 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1144 plqi->sm_flags &= ~SM_ISTOPPED; 1145 if ((plqi->sm_flags & WERROR_MODE) == 0) 1146 (void) putnextctl(SM_WQ(plqi), msgtype); 1147 } 1148 break; 1149 1150 case M_STOPI: 1151 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1152 plqi->sm_flags |= SM_ISTOPPED; 1153 if ((plqi->sm_flags & WERROR_MODE) == 0) 1154 (void) putnextctl(SM_WQ(plqi), msgtype); 1155 } 1156 break; 1157 1158 case M_STOP: /* must never be queued */ 1159 uqi->sm_flags |= SM_STOPPED; 1160 noenable(wq); 1161 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) 1162 if ((plqi->sm_flags & WERROR_MODE) == 0) 1163 (void) putnextctl(SM_WQ(plqi), msgtype); 1164 1165 rval = 1; 1166 break; 1167 1168 case M_START: /* never be queued */ 1169 uqi->sm_flags &= ~SM_STOPPED; 1170 enableok(wq); 1171 qenable(wq); 1172 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) 1173 if ((plqi->sm_flags & WERROR_MODE) == 0) 1174 (void) putnextctl(SM_WQ(plqi), msgtype); 1175 1176 break; 1177 1178 case M_PCSIG: 1179 case M_COPYOUT: 1180 case M_COPYIN: 1181 case M_IOCACK: 1182 case M_IOCNAK: 1183 /* Wrong direction for message */ 1184 break; 1185 case M_READ: 1186 break; 1187 case M_PCPROTO: 1188 case M_PCRSE: 1189 default: 1190 sm_dbg('I', ("sm_hp_uwput: default case %d.\n", msgtype)); 1191 break; 1192 } /* end switch on high pri message type */ 1193 1194 freemsg(mp); 1195 return (rval); 1196 } 1197 1198 static int 1199 sm_default_uwioctl(queue_t *wq, mblk_t *mp, int (*qfn)()) 1200 { 1201 int err; 1202 struct iocblk *iobp; 1203 sm_uqi_t *uqi; 1204 1205 uqi = (sm_uqi_t *)(wq->q_ptr); 1206 iobp = (struct iocblk *)mp->b_rptr; 1207 1208 switch (iobp->ioc_cmd) { 1209 case TIOCEXCL: 1210 case TIOCNXCL: 1211 case TIOCSTI: 1212 /* 1213 * The three ioctl types we support do not require any 1214 * additional allocation and should not return a pending 1215 * ioctl state. For this reason it is safe for us to ignore 1216 * the return value from ttycommon_ioctl(). 1217 * Additionally, we translate any error response from 1218 * ttycommon_ioctl() into EINVAL. 1219 */ 1220 (void) ttycommon_ioctl(uqi->sm_ttycommon, wq, mp, &err); 1221 if (err < 0) 1222 miocnak(wq, mp, 0, EINVAL); 1223 else 1224 miocack(wq, mp, 0, 0); 1225 return (0); 1226 default: 1227 break; 1228 } 1229 if ((err = sm_update_ttyinfo(mp, uqi)) != 0) { 1230 miocnak(wq, mp, 0, err); 1231 return (0); 1232 } 1233 1234 /* 1235 * If uqi->sm_siocdata.sm_iocid just overwrite it since the stream 1236 * head will have timed it out 1237 */ 1238 uqi->sm_siocdata.sm_iocid = iobp->ioc_id; 1239 uqi->sm_siocdata.sm_acked = 0; 1240 uqi->sm_siocdata.sm_nacks = sm_good_qs(uqi); 1241 uqi->sm_siocdata.sm_acnt = 0; 1242 uqi->sm_siocdata.sm_policy = uqi->sm_policy; 1243 uqi->sm_siocdata.sm_flags = 0; 1244 sm_dbg('Z', (" want %d acks for id %d.\n", 1245 uqi->sm_siocdata.sm_nacks, iobp->ioc_id)); 1246 1247 return (sm_putqs(wq, mp, qfn)); 1248 } 1249 1250 /* 1251 * 1252 * sm_uwput - put function for an upper STREAM write. 1253 */ 1254 static int 1255 sm_uwput(queue_t *wq, mblk_t *mp) 1256 { 1257 sm_uqi_t *uqi; 1258 uchar_t msgtype; 1259 int cmd; 1260 struct iocblk *iobp; 1261 1262 uqi = (sm_uqi_t *)(wq->q_ptr); 1263 msgtype = DB_TYPE(mp); 1264 1265 ASSERT(uqi != 0 && sm_ssp != 0); 1266 1267 if (msgtype >= QPCTL && msgtype != M_IOCDATA) { 1268 (void) sm_hp_uwput(wq, mp); 1269 return (0); 1270 } 1271 1272 switch (DB_TYPE(mp)) { 1273 case M_DATA: 1274 case M_DELAY: 1275 case M_BREAK: 1276 default: 1277 (void) sm_putqs(wq, mp, putq); 1278 break; 1279 1280 case M_CTL: 1281 if (((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_CANONQUERY) { 1282 (void) putnextctl1(OTHERQ(wq), M_CTL, MC_NOCANON); 1283 } 1284 freemsg(mp); 1285 break; 1286 case M_IOCDATA: /* not handled as high pri because may need to putbq */ 1287 sm_dbg('M', ("sm_uwput(M_IOCDATA)\n")); 1288 /*FALLTHROUGH*/ 1289 case M_IOCTL: 1290 cmd = (msgtype == M_IOCDATA) ? 1291 ((struct copyresp *)mp->b_rptr)->cp_cmd : 1292 ((struct iocblk *)mp->b_rptr)->ioc_cmd; 1293 1294 iobp = (struct iocblk *)mp->b_rptr; 1295 iobp->ioc_rval = 0; 1296 1297 sm_dbg('M', ("sm_uwput(M_IOCTL:%d)\n", cmd)); 1298 1299 switch (cmd) { 1300 1301 case CONSGETABORTENABLE: 1302 iobp->ioc_error = ttymux_abort_ioctl(mp); 1303 DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK; 1304 qreply(wq, mp); 1305 break; 1306 case CONSSETABORTENABLE: 1307 iobp->ioc_error = 1308 secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0 ? 1309 EPERM : ttymux_abort_ioctl(mp); 1310 DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK; 1311 qreply(wq, mp); 1312 break; 1313 case TTYMUX_SETABORT: 1314 if (secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0) { 1315 iobp->ioc_error = EPERM; 1316 DB_TYPE(mp) = M_IOCNAK; 1317 qreply(wq, mp); 1318 break; 1319 } 1320 /*FALLTHROUGH*/ 1321 case TTYMUX_GETABORT: 1322 case TTYMUX_GETABORTSTR: 1323 case TTYMUX_ASSOC: 1324 case TTYMUX_DISASSOC: 1325 case TTYMUX_SETCTL: 1326 case TTYMUX_GETLINK: 1327 case TTYMUX_CONSDEV: 1328 case TTYMUX_GETCTL: 1329 case TTYMUX_LIST: 1330 (void) sm_ioctl_cmd(uqi, mp); 1331 qreply(wq, mp); 1332 break; 1333 case I_LINK: 1334 case I_PLINK: 1335 case I_UNLINK: 1336 case I_PUNLINK: 1337 qwriter(wq, mp, sm_link_req, PERIM_OUTER); 1338 break; 1339 case TCSETSW: 1340 case TCSETSF: 1341 case TCSETAW: 1342 case TCSETAF: 1343 case TCSBRK: 1344 if (wq->q_first) { 1345 sm_dbg('A', ("sm_uwput: TCSET-> on srv q.\n")); 1346 /* keep message order intact */ 1347 (void) putq(wq, mp); 1348 break; 1349 } 1350 /*FALLTHROUGH*/ 1351 default: 1352 (void) sm_default_uwioctl(wq, mp, putq); 1353 break; 1354 } 1355 1356 break; /* M_IOCTL */ 1357 1358 } /* end switch on message type */ 1359 1360 return (0); 1361 } 1362 1363 /* 1364 * sm_uwsrv - service function for an upper STREAM write. 1365 * 'sm_uwsrv' takes a q parameter. The q parameter specifies the queue 1366 * which is to be serviced. This function reads the messages which are on 1367 * this service queue and passes them to the appropriate lower driver queue. 1368 */ 1369 static int 1370 sm_uwsrv(queue_t *q) 1371 { 1372 mblk_t *mp; 1373 sm_uqi_t *uqi = (sm_uqi_t *)(q->q_ptr); 1374 int msgtype; 1375 1376 ASSERT(q == SM_WQ(uqi)); 1377 1378 /* 1379 * Empty the queue unless explicitly stopped. 1380 */ 1381 while (mp = getq(q)) { 1382 msgtype = DB_TYPE(mp); 1383 1384 if (msgtype >= QPCTL && msgtype != M_IOCDATA) 1385 if (sm_hp_uwput(q, mp)) { 1386 sm_dbg('T', ("sm_uwsrv: flowcontrolled.\n")); 1387 break; /* indicates that the is disabled */ 1388 } 1389 else 1390 continue; 1391 1392 if (uqi->sm_flags & SM_STOPPED) { 1393 (void) putbq(q, mp); 1394 sm_dbg('T', ("sm_uwsrv: SM_STOPPED.\n")); 1395 break; 1396 } 1397 1398 /* 1399 * Read any ttycommon data that may 1400 * change (TS_SOFTCAR, CREAD, etc.). 1401 */ 1402 switch (DB_TYPE(mp)) { 1403 case M_IOCTL: 1404 case M_IOCDATA: 1405 if (sm_default_uwioctl(q, mp, putbq)) 1406 return (0); 1407 break; 1408 1409 default: 1410 if (sm_putqs(q, mp, putbq)) 1411 return (0); 1412 } 1413 } 1414 return (0); 1415 } 1416 1417 /* 1418 * Lower write side service routine used for backenabling upstream 1419 * flow control. 1420 */ 1421 static int 1422 sm_lwsrv(queue_t *q) 1423 { 1424 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1425 queue_t *uwq; 1426 1427 LOCK_UNIT(lqi); 1428 if (lqi->sm_uqflags & SM_UQVALID) { 1429 /* 1430 * It's safe to lock uqi since lwsrv runs asynchronously 1431 * with the upper write routines so this cannot be an 1432 * upper half thread. While holding the lqi lock and 1433 * if SM_UQVALID is set we are guaranteed that 1434 * lqi->sm_uqi will be valid. 1435 */ 1436 sm_dbg('I', ("sm_lwsrv: re-enabling upper queue.\n")); 1437 1438 uwq = SM_WQ(lqi->sm_uqi); 1439 UNLOCK_UNIT(lqi); 1440 qenable(uwq); 1441 } else { 1442 UNLOCK_UNIT(lqi); 1443 } 1444 return (0); 1445 } 1446 1447 /* 1448 * Upper read queue ioctl response handler for messages 1449 * passed from the lower half of the driver. 1450 */ 1451 static int 1452 sm_uriocack(queue_t *rq, mblk_t *mp) 1453 { 1454 sm_uqi_t *uqi = (sm_uqi_t *)rq->q_ptr; 1455 int err, flag; 1456 sm_iocdata_t *iodp; 1457 struct sm_iocinfo info; 1458 1459 if ((err = sm_getiocinfo(mp, &info)) != 0) { 1460 sm_dbg('I', ("Unknown ioctl response\n")); 1461 return (err); 1462 } 1463 1464 if (info.sm_id == uqi->sm_piocdata.sm_iocid) { 1465 iodp = &uqi->sm_piocdata; 1466 } else if (info.sm_id == uqi->sm_siocdata.sm_iocid) { 1467 iodp = &uqi->sm_siocdata; 1468 } else { 1469 sm_log("Unexpected ioctl response\n"); 1470 sm_dbg('I', ("Unexpected ioctl response (id %d)\n", 1471 info.sm_id)); 1472 1473 /* 1474 * If the response is sent up it will result in 1475 * duplicate ioctl responses. The ioctl has probably been 1476 * timed out by the stream head so dispose of the response 1477 * (since it has arrived too late. 1478 */ 1479 goto out; 1480 } 1481 1482 flag = SM_COPYIN; 1483 1484 switch (DB_TYPE(mp)) { 1485 case M_COPYOUT: 1486 flag = SM_COPYOUT; 1487 /*FALLTHRU*/ 1488 case M_COPYIN: 1489 if (iodp->sm_flags & flag) 1490 goto out; 1491 iodp->sm_flags |= flag; 1492 1493 break; 1494 case M_IOCACK: 1495 iodp->sm_ackcnt += 1; 1496 iodp->sm_acnt += 1; 1497 if (iodp->sm_policy == FIRSTACK) { 1498 if (iodp->sm_acnt == iodp->sm_nacks) 1499 iodp->sm_iocid = 0; 1500 if (iodp->sm_acnt == 1) 1501 iodp->sm_acked = 1; 1502 else 1503 goto out; 1504 } else { 1505 if (iodp->sm_acnt == iodp->sm_nacks) { 1506 iodp->sm_iocid = 0; 1507 iodp->sm_acked = 1; 1508 } else 1509 goto out; 1510 } 1511 break; 1512 case M_IOCNAK: 1513 iodp->sm_nakcnt += 1; 1514 iodp->sm_acnt += 1; 1515 if (iodp->sm_acnt == iodp->sm_nacks) { 1516 iodp->sm_iocid = 0; 1517 if (iodp->sm_acked == 0) { 1518 iodp->sm_acked = 1; 1519 break; 1520 } 1521 } 1522 goto out; 1523 default: 1524 goto out; 1525 } 1526 1527 /* 1528 * Merge the tty settings each of the associated lower streams. 1529 */ 1530 if (info.sm_data) 1531 (void) sm_update_ttyinfo(mp, uqi); 1532 1533 if (iodp == &uqi->sm_piocdata) { 1534 if (iodp->sm_iocid == 0) { 1535 uqi->sm_flags &= ~SM_IOCPENDING; 1536 } 1537 } else { 1538 sm_dbg('I', ("sm_uriocack: forwarding response for %d.\n", 1539 info.sm_id)); 1540 putnext(rq, mp); 1541 return (0); 1542 } 1543 out: 1544 sm_dbg('I', ("sm_uriocack: freeing response for %d.\n", info.sm_id)); 1545 freemsg(mp); 1546 return (0); 1547 } 1548 1549 /* 1550 * Transfer a message from the lower read side of the multiplexer onto 1551 * the associated upper stream. 1552 */ 1553 static int 1554 sm_ursendup(queue_t *q, mblk_t *mp) 1555 { 1556 sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 1557 1558 if (!canputnext(q) && DB_TYPE(mp) < QPCTL) { 1559 sm_dbg('I', ("sm_ursendup: flow controlled.\n")); 1560 return (1); 1561 } 1562 1563 switch (DB_TYPE(mp)) { 1564 case M_COPYIN: 1565 case M_COPYOUT: 1566 case M_IOCACK: 1567 case M_IOCNAK: 1568 (void) sm_uriocack(q, mp); 1569 break; 1570 case M_HANGUP: 1571 if (sm_uwq_error(uqi)) { 1572 /* there are no usable lower q's */ 1573 uqi->sm_flags &= ~SM_CARON; 1574 putnext(q, mp); 1575 } else { 1576 /* there are still usable q's - don't send up */ 1577 freemsg(mp); 1578 } 1579 break; 1580 case M_ERROR: 1581 if (sm_uwq_error(uqi)) { 1582 /* there are no usable lower q's */ 1583 uqi->sm_flags &= ~SM_CARON; 1584 putnext(q, mp); 1585 } else if (*mp->b_rptr == NOERROR) { 1586 /* the error has cleared */ 1587 uqi->sm_flags &= ~ERROR_MODE; 1588 putnext(q, mp); 1589 } else { 1590 /* there are still usable q's - don't send up */ 1591 freemsg(mp); 1592 } 1593 break; 1594 case M_FLUSH: 1595 flushq(q, FLUSHDATA); 1596 putnext(q, mp); /* time to use FLUSHR_PEND flag */ 1597 break; 1598 case M_CTL: 1599 /* wrong direction - must have come from sm_close */ 1600 uqi->sm_flags |= SM_CLOSE; 1601 sm_dbg('I', ("sm_ursrv: had SM_CLOSE.\n")); 1602 freemsg(mp); 1603 break; 1604 case M_UNHANGUP: 1605 /* just pass them all up - they're harmless */ 1606 uqi->sm_flags |= SM_CARON; 1607 /* FALLTHROUGH */ 1608 default: 1609 putnext(q, mp); 1610 break; 1611 } 1612 1613 return (0); 1614 } 1615 1616 /* 1617 * sm_urput - put function for a lower STREAM read. 1618 */ 1619 static int 1620 sm_urput(queue_t *q, mblk_t *mp) 1621 { 1622 if (sm_ursendup(q, mp) != 0) 1623 (void) putq(q, mp); 1624 1625 return (0); 1626 } 1627 1628 /* 1629 * Upper read side service routine. 1630 * Read side needs to be fast so only check for duplicate M_IOCTL acks. 1631 */ 1632 static int 1633 sm_ursrv(queue_t *q) 1634 { 1635 sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 1636 mblk_t *mp; 1637 int flags = uqi->sm_flags; 1638 1639 while ((mp = getq(q))) { 1640 if (sm_ursendup(q, mp) != 0) { 1641 sm_dbg('I', ("sm_ursrv: flow controlled.\n")); 1642 (void) putbq(q, mp); 1643 uqi->sm_flags |= WANT_RENB; 1644 break; 1645 } 1646 } 1647 1648 /* 1649 * If the q service was called because it was no longer 1650 * flow controled then enable each of the driver queues. 1651 */ 1652 if ((flags & WANT_RENB) && !(uqi->sm_flags & WANT_RENB)) { 1653 sm_lqi_t *lqi; 1654 queue_t *drq; /* read q of linked driver */ 1655 1656 uqi->sm_flags &= ~WANT_RENB; 1657 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 1658 drq = SM_RQ(lqi)->q_next; 1659 if (drq && drq->q_first != 0) 1660 qenable(drq); 1661 } 1662 } 1663 1664 return (0); 1665 } 1666 1667 /* 1668 * Check a message sent from a linked device for abort requests and 1669 * for flow control. 1670 */ 1671 static int 1672 sm_lrmsg_check(queue_t *q, mblk_t *mp) 1673 { 1674 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1675 1676 switch (DB_TYPE(mp)) { 1677 case M_DATA: 1678 LOCK_UNIT(lqi); 1679 /* 1680 * check for abort - only allow abort on I/O consoles 1681 * known to OBP - 1682 * fix it when we do polled io 1683 */ 1684 if ((lqi->sm_ioflag & (uint_t)FORINPUT) == 0) { 1685 freemsg(mp); 1686 UNLOCK_UNIT(lqi); 1687 return (1); 1688 } 1689 if ((lqi->sm_uqflags & SM_OBPCNDEV) && 1690 lqi->sm_ctrla_abort_on && 1691 abort_enable == KIOCABORTALTERNATE) { 1692 1693 uchar_t *rxc; 1694 boolean_t aborted = B_FALSE; 1695 1696 for (rxc = mp->b_rptr; 1697 rxc != mp->b_wptr; 1698 rxc++) 1699 1700 if (*rxc == *lqi->sm_nachar) { 1701 lqi->sm_nachar++; 1702 if (*lqi->sm_nachar == '\0') { 1703 abort_sequence_enter( 1704 (char *)NULL); 1705 lqi->sm_nachar = sm_ssp->sm_abs; 1706 aborted = B_TRUE; 1707 } 1708 } else 1709 lqi->sm_nachar = (*rxc == *sm_ssp-> 1710 sm_abs) ? 1711 sm_ssp-> 1712 sm_abs + 1 : 1713 sm_ssp->sm_abs; 1714 1715 if (aborted) { 1716 freemsg(mp); 1717 UNLOCK_UNIT(lqi); 1718 return (1); 1719 } 1720 } 1721 UNLOCK_UNIT(lqi); 1722 break; 1723 case M_BREAK: /* we'll eventually see this as a flush */ 1724 LOCK_UNIT(lqi); 1725 /* 1726 * Only allow abort on OBP devices. When polled I/O is 1727 * supported allow abort on any console device. 1728 * Parity errors are reported upstream as breaks so 1729 * ensure that there is no data in the message before 1730 * deciding whether to abort. 1731 */ 1732 if ((lqi->sm_uqflags & SM_OBPCNDEV) && /* console stream */ 1733 (mp->b_wptr - mp->b_rptr == 0 && 1734 msgdsize(mp) == 0)) { /* not due to parity */ 1735 1736 if (lqi->sm_break_abort_on && 1737 abort_enable != KIOCABORTALTERNATE) 1738 abort_sequence_enter((char *)NULL); 1739 1740 freemsg(mp); 1741 UNLOCK_UNIT(lqi); 1742 return (1); 1743 } else { 1744 UNLOCK_UNIT(lqi); 1745 } 1746 break; 1747 default: 1748 break; 1749 } 1750 1751 if (DB_TYPE(mp) >= QPCTL) 1752 return (0); 1753 1754 LOCK_UNIT(lqi); /* lock out the upper half */ 1755 if ((lqi->sm_uqflags & SM_UQVALID) && SM_RQ(lqi->sm_uqi)) { 1756 UNLOCK_UNIT(lqi); 1757 if (!canput(SM_RQ(lqi->sm_uqi))) { 1758 sm_dbg('I', ("sm_lrmsg_check: flow controlled.\n")); 1759 (void) putq(q, mp); 1760 return (1); 1761 } 1762 } else { 1763 UNLOCK_UNIT(lqi); 1764 } 1765 1766 return (0); 1767 } 1768 1769 /* 1770 * sm_sendup - deliver a message to the upper read side of the multiplexer 1771 */ 1772 static int 1773 sm_sendup(queue_t *q, mblk_t *mp) 1774 { 1775 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1776 1777 if (sm_ssp == NULL) { 1778 freemsg(mp); 1779 return (0); 1780 } 1781 1782 /* 1783 * Check for CD status change messages from driver. 1784 * (Remark: this is an se driver thread running at soft interupt 1785 * priority and the waiters are in user context). 1786 */ 1787 switch (DB_TYPE(mp)) { 1788 case M_DATA: 1789 case M_BREAK: /* we'll eventually see this as a flush */ 1790 break; 1791 1792 /* high priority messages */ 1793 case M_IOCACK: 1794 case M_IOCNAK: 1795 if ((lqi->sm_flags & SM_IOCPENDING) && lqi->sm_piocid == 1796 ((struct iocblk *)mp->b_rptr)->ioc_id) { 1797 freemsg(mp); 1798 lqi->sm_flags &= ~SM_IOCPENDING; 1799 sm_issue_ioctl(lqi); 1800 return (0); 1801 } 1802 break; 1803 case M_UNHANGUP: 1804 /* 1805 * If the driver can send an M_UNHANGUP it must be able to 1806 * accept messages from above (ie clear WERROR_MODE if set). 1807 */ 1808 sm_dbg('E', ("lrput: M_UNHANGUP\n")); 1809 lqi->sm_mbits |= TIOCM_CD; 1810 lqi->sm_flags &= ~(WERROR_MODE|HANGUP_MODE); 1811 1812 break; 1813 1814 case M_HANGUP: 1815 sm_dbg('E', ("lrput: MHANGUP\n")); 1816 lqi->sm_mbits &= ~TIOCM_CD; 1817 lqi->sm_flags |= (WERROR_MODE|HANGUP_MODE); 1818 break; 1819 1820 case M_ERROR: 1821 1822 sm_dbg('E', ("lrput: MERROR\n")); 1823 /* 1824 * Tell the driver to flush rd/wr queue if its read/write error. 1825 * if its a read/write error flush rq/wq (type in first bytes). 1826 */ 1827 if ((mp->b_wptr - mp->b_rptr) == 2) { 1828 uchar_t rw = 0; 1829 1830 if (*mp->b_rptr == NOERROR) { 1831 /* not in error anymore */ 1832 lqi->sm_flags &= ~ERROR_MODE; 1833 lqi->sm_flags |= WANT_CD; 1834 } else { 1835 if (*mp->b_rptr != 0) { 1836 /* read error */ 1837 rw |= FLUSHR; 1838 lqi->sm_flags |= RERROR_MODE; 1839 } 1840 mp->b_rptr++; 1841 if (*mp->b_rptr != 0) { 1842 /* write error */ 1843 rw |= FLUSHW; 1844 lqi->sm_flags |= WERROR_MODE; 1845 } 1846 1847 mp->b_rptr--; 1848 /* has next driver done qprocsoff */ 1849 if (rw && OTHERQ(q)->q_next != NULL) { 1850 (void) putnextctl1(OTHERQ(q), M_FLUSH, 1851 rw); 1852 } 1853 } 1854 } else if (*mp->b_rptr != 0 && OTHERQ(q)->q_next != NULL) { 1855 sm_dbg('E', ("lrput: old style MERROR (?)\n")); 1856 1857 lqi->sm_flags |= (RERROR_MODE | WERROR_MODE); 1858 (void) putnextctl1(OTHERQ(q), M_FLUSH, FLUSHRW); 1859 } 1860 break; 1861 1862 case M_PCSIG: 1863 case M_SIG: 1864 break; 1865 case M_COPYOUT: 1866 case M_COPYIN: 1867 break; 1868 case M_FLUSH: 1869 /* flush the read queue and pass on up */ 1870 flushq(q, FLUSHDATA); 1871 break; 1872 default: 1873 break; 1874 } 1875 1876 LOCK_UNIT(lqi); /* lock out the upper half */ 1877 if (lqi->sm_uqflags & SM_UQVALID && SM_RQ(lqi->sm_uqi)) { 1878 UNLOCK_UNIT(lqi); 1879 (void) putq(SM_RQ(lqi->sm_uqi), mp); 1880 return (0); 1881 } else { 1882 sm_dbg('I', ("sm_sendup: uq not valid\n")); 1883 freemsg(mp); 1884 } 1885 UNLOCK_UNIT(lqi); 1886 1887 return (0); 1888 } 1889 1890 /* 1891 * sm_lrput - put function for a lower STREAM read. 1892 */ 1893 static int 1894 sm_lrput(queue_t *q, mblk_t *mp) 1895 { 1896 if (sm_lrmsg_check(q, mp) == 0) 1897 (void) sm_sendup(q, mp); 1898 return (0); 1899 } 1900 1901 /* 1902 * sm_lrsrv - service function for the lower read STREAM. 1903 */ 1904 static int 1905 sm_lrsrv(queue_t *q) 1906 { 1907 mblk_t *mp; 1908 1909 sm_dbg('I', ("sm_lrsrv: not controlled.\n")); 1910 while (mp = getq(q)) 1911 (void) sm_sendup(q, mp); 1912 1913 return (0); 1914 } 1915 1916 /* 1917 * Check whether a thread is allowed to open the requested device. 1918 */ 1919 static int 1920 sm_ok_to_open(sm_uqi_t *uqi, int protocol, cred_t *credp, int *abort_waiters) 1921 { 1922 int rval = 0; 1923 int proto; 1924 1925 *abort_waiters = 0; 1926 1927 switch (protocol) { 1928 case ASYNC_DEVICE: /* Standard async protocol */ 1929 if ((uqi->sm_protocol == NULL_PROTOCOL) || 1930 (uqi->sm_protocol == ASYN_PROTOCOL)) { 1931 /* 1932 * Lock out other incompatible protocol requests. 1933 */ 1934 proto = ASYN_PROTOCOL; 1935 rval = 0; 1936 } else 1937 rval = EBUSY; 1938 break; 1939 1940 case OUTLINE: /* Outdial protocol */ 1941 if ((uqi->sm_protocol == NULL_PROTOCOL) || 1942 (uqi->sm_protocol == OUTD_PROTOCOL)) { 1943 proto = OUTD_PROTOCOL; 1944 rval = 0; 1945 } else if (uqi->sm_protocol == ASYN_PROTOCOL) { 1946 /* 1947 * check for dialout request on a line that is already 1948 * open for dial in: 1949 * kick off any thread that is waiting to fully open 1950 */ 1951 if (uqi->sm_flags & FULLY_OPEN) 1952 rval = EBUSY; 1953 else { 1954 proto = OUTD_PROTOCOL; 1955 *abort_waiters = 1; 1956 } 1957 } else 1958 rval = EBUSY; 1959 break; 1960 default: 1961 rval = ENOTSUP; 1962 } 1963 1964 if (rval == 0 && 1965 (uqi->sm_ttycommon->t_flags & TS_XCLUDE) && 1966 secpolicy_excl_open(credp) != 0) { 1967 1968 if (uqi->sm_flags & FULLY_OPEN) { 1969 rval = EBUSY; /* exclusive device already open */ 1970 } else { 1971 /* NB TS_XCLUDE cant be set during open so NOTREACHED */ 1972 /* force any waiters to yield TS_XCLUDE */ 1973 *abort_waiters = 1; 1974 } 1975 } 1976 1977 if (rval == 0) 1978 uqi->sm_protocol = proto; 1979 1980 sm_dbg('A', ("ok_to_open (0x%p, %d) proto=%d rval %d (wabort=%d)", 1981 uqi, protocol, uqi->sm_protocol, rval, *abort_waiters)); 1982 1983 return (rval); 1984 } 1985 1986 /* wait for memory to become available whilst performing a qwait */ 1987 /*ARGSUSED*/ 1988 static void dummy_callback(void *arg) 1989 {} 1990 1991 /* ARGSUSED */ 1992 static int 1993 sm_dump_msg(queue_t *q, mblk_t *mp) 1994 { 1995 freemsg(mp); 1996 return (0); 1997 } 1998 1999 /* 2000 * Wait for a message to arrive - must be called with exclusive 2001 * access at the outer perimiter. 2002 */ 2003 static int 2004 sm_qwait_sig(sm_uqi_t *uqi, queue_t *q) 2005 { 2006 int err; 2007 2008 sm_dbg('C', ("sm_qwait_sig: waiting.\n")); 2009 2010 uqi->sm_waitq = q; 2011 uqi->sm_nwaiters++; /* required by the close routine */ 2012 err = qwait_sig(q); 2013 if (--uqi->sm_nwaiters == 0) 2014 uqi->sm_waitq = 0; 2015 2016 if (err == 0) 2017 err = EINTR; 2018 else if (q->q_ptr == 0) /* can happen if there are multiple waiters */ 2019 err = -1; 2020 else if (uqi->sm_flags & SM_CLOSE) { 2021 uqi->sm_flags &= ~SM_CLOSE; 2022 err = 1; /* a different protocol has closed its stream */ 2023 } 2024 else 2025 err = 0; /* was worth waiting for */ 2026 2027 sm_dbg('C', ("sm_qwait_sig: rval %d\n", err)); 2028 return (err); 2029 } 2030 2031 /* 2032 * Defer the opening of one the drivers devices until the state of each 2033 * associated lower stream is known. 2034 */ 2035 static int 2036 sm_defer_open(sm_uqi_t *uqi, queue_t *q) 2037 { 2038 uint_t cmdflags = WANT_CDSTAT; 2039 int err, nqs; 2040 2041 while ((nqs = sm_good_qs(uqi)) == 0) { 2042 sm_dbg('C', ("sm_defer_open: no good qs\n")); 2043 if (err = sm_qwait_sig(uqi, q)) 2044 return (err); 2045 } 2046 2047 while ((uqi->sm_flags & SM_CARON) == 0) { 2048 int iocmd; 2049 mblk_t *pioc; 2050 2051 sm_dbg('C', ("sm_defer_open: flags 0x%x cmdflags 0x%x\n", 2052 uqi->sm_flags, cmdflags)); 2053 if (cmdflags == 0) { 2054 if (err = sm_qwait_sig(uqi, q)) 2055 return (err); 2056 continue; /* waiting for an M_UNHANGUP */ 2057 } else if (cmdflags & WANT_SC) { 2058 cmdflags &= ~WANT_SC; 2059 iocmd = TIOCGSOFTCAR; 2060 } else if (cmdflags & WANT_CD) { 2061 cmdflags &= ~WANT_CD; 2062 iocmd = TIOCMGET; 2063 } else if (cmdflags & WANT_CL) { 2064 cmdflags &= ~WANT_CL; 2065 iocmd = TCGETS; 2066 } 2067 2068 if (uqi->sm_piocdata.sm_iocid == 0) { 2069 while ((pioc = mkiocb(iocmd)) == 0) { 2070 bufcall_id_t id = 2071 qbufcall(q, sizeof (struct iocblk), 2072 BPRI_MED, dummy_callback, 0); 2073 if (err = sm_qwait_sig(uqi, q)) { 2074 /* wait for the bufcall */ 2075 qunbufcall(q, id); 2076 return (err); 2077 } 2078 qunbufcall(q, id); 2079 } 2080 2081 uqi->sm_flags |= SM_IOCPENDING; 2082 2083 uqi->sm_piocdata.sm_iocid = 2084 ((struct iocblk *)pioc->b_rptr)->ioc_id; 2085 uqi->sm_piocdata.sm_acked = 0; 2086 uqi->sm_piocdata.sm_nacks = nqs; 2087 uqi->sm_piocdata.sm_acnt = 0; 2088 uqi->sm_piocdata.sm_ackcnt = uqi-> 2089 sm_piocdata.sm_nakcnt = 0; 2090 uqi->sm_piocdata.sm_policy = uqi->sm_policy; 2091 uqi->sm_piocdata.sm_flags = SM_INTERNALIOC; 2092 if (sm_putqs(WR(q), pioc, sm_dump_msg) != 0) { 2093 uqi->sm_piocdata.sm_iocid = 0; 2094 sm_log("sm_defer_open: bad putqs\n"); 2095 return (-1); 2096 } 2097 } 2098 2099 sm_dbg('C', ("sm_defer_open: flags 0x%x\n", uqi->sm_flags)); 2100 while ((uqi->sm_flags & SM_CARON) == 0 && 2101 (uqi->sm_flags & SM_IOCPENDING) != 0) 2102 if (err = sm_qwait_sig(uqi, q)) 2103 return (err); 2104 2105 sm_dbg('C', ("defer_open: uq flags 0x%x.\n", uqi->sm_flags)); 2106 } 2107 sm_dbg('C', ("defer_open: return 0.\n")); 2108 return (0); 2109 } 2110 2111 static int 2112 sm_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp) 2113 { 2114 int ftstat; 2115 int unit; 2116 int protocol; 2117 sm_uqi_t *uqi; 2118 int abort_waiters; 2119 2120 if (sm_ssp == NULL) 2121 return (ENXIO); 2122 /* 2123 * sflag = 0 => streams device. 2124 */ 2125 if (sflag != 0 || DEV_TO_UNIT(*devp) >= NLUNITS) { 2126 sm_dbg('C', ("open: sflag=%d or bad dev_t.\n", sflag)); 2127 return (ENXIO); 2128 } 2129 2130 unit = DEV_TO_UNIT(*devp); 2131 protocol = DEV_TO_PROTOBITS(*devp); 2132 2133 uqi = get_uqi(sm_ssp, unit); 2134 2135 sm_dbg('C', ("open(0x%p, %d, 0x%x) :- unit=%d, proto=%d, uqi=0x%p\n", 2136 rq, *devp, flag, unit, protocol, uqi)); 2137 2138 if (uqi == 0) 2139 return (ENXIO); 2140 2141 if (sm_refuse_opens && unit > smctlunit && uqi->sm_nlqs == 0) 2142 return (ENXIO); 2143 2144 if (uqi->sm_flags & EXCL_OPEN && (flag & FEXCL)) { 2145 return (EBUSY); /* device in use */ 2146 } 2147 2148 if ((flag & FEXCL)) { 2149 if (secpolicy_excl_open(credp) != 0) 2150 return (EPERM); 2151 2152 if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_nwaiters > 0) 2153 return (EBUSY); /* device in use */ 2154 2155 uqi->sm_flags |= EXCL_OPEN; 2156 } 2157 2158 if (uqi->sm_protocol == NULL_PROTOCOL) { 2159 struct termios *termiosp; 2160 int len; 2161 2162 if (ddi_getlongprop(DDI_DEV_T_ANY, ddi_root_node(), 2163 DDI_PROP_NOTPROM, "ttymodes", (caddr_t)&termiosp, &len) 2164 == DDI_PROP_SUCCESS && 2165 (len == sizeof (struct termios))) { 2166 2167 sm_dbg('C', ("open: c_cflag=0x%x\n", 2168 termiosp->c_cflag)); 2169 2170 uqi->sm_ttycommon->t_iflag = termiosp->c_iflag; 2171 uqi->sm_ttycommon->t_cflag = termiosp->c_cflag; 2172 uqi->sm_ttycommon->t_stopc = termiosp->c_cc[VSTOP]; 2173 uqi->sm_ttycommon->t_startc = termiosp->c_cc[VSTART]; 2174 2175 /* 2176 * IGNBRK,BRKINT,INPCK,IXON,IXANY,IXOFF - drivers 2177 * PARMRK,IGNPAR,ISTRIP - how to report parity 2178 * INLCR,IGNCR,ICRNL,IUCLC - ldterm (sophisticated I/O) 2179 * IXON, IXANY, IXOFF - flow control input 2180 * CBAUD,CSIZE,CS5-8,CSTOPB,PARENB,PARODD,HUPCL, 2181 * RCV1EN,XMT1EN,LOBLK,XCLUDE,CRTSXOFF,CRTSCTS, 2182 * CIBAUD,PAREXT,CBAUDEXT,CIBAUDEXT,CREAD,CLOCAL 2183 */ 2184 2185 kmem_free(termiosp, len); 2186 } 2187 else 2188 bzero((caddr_t)uqi->sm_ttycommon, 2189 sizeof (uqi->sm_ttycommon)); 2190 2191 if (*devp == rconsdev) { 2192 uqi->sm_cmask = sm_cmask; 2193 uqi->sm_ttycommon->t_flags |= TS_SOFTCAR; 2194 } else { 2195 uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR; 2196 } 2197 2198 /* 2199 * Clear the default CLOCAL and TS_SOFTCAR flags since 2200 * they must correspond to the settings on the real devices. 2201 */ 2202 2203 uqi->sm_ttycommon->t_cflag &= ~(uqi->sm_cmask|CLOCAL); 2204 uqi->sm_mbits = 0; 2205 uqi->sm_policy = FIRSTACK; 2206 if (unit == 0 && sm_ssp->sm_ms == 0) 2207 sm_ssp->sm_ms = (sm_mux_state_t *) 2208 space_fetch(TTYMUXPTR); 2209 if (sm_ssp->sm_ms) { 2210 if (sm_ssp->sm_ms->sm_cons_stdin.sm_dev == *devp || 2211 sm_ssp->sm_ms->sm_cons_stdout.sm_dev == *devp) 2212 sm_ssp->sm_lconsole = uqi; 2213 } 2214 } 2215 2216 /* 2217 * Does this thread need to wait? 2218 */ 2219 2220 sm_dbg('C', ("sm_open: %d %d 0x%p 0x%x\n", 2221 !(flag & (FNDELAY|FNONBLOCK)), !(protocol == OUTLINE), uqi->sm_lqs, 2222 uqi->sm_flags)); 2223 2224 tryopen: 2225 2226 abort_waiters = 0; 2227 if (ftstat = sm_ok_to_open(uqi, protocol, credp, &abort_waiters)) { 2228 sm_dbg('C', ("open failed stat=%d.\n", ftstat)); 2229 2230 if ((uqi->sm_flags & FULLY_OPEN) == 0 && uqi->sm_nwaiters == 0) 2231 uqi->sm_protocol = NULL_PROTOCOL; 2232 if (flag & FEXCL) 2233 uqi->sm_flags &= ~EXCL_OPEN; 2234 return (ftstat); 2235 } 2236 2237 if (abort_waiters) { 2238 uqi->sm_dev = *devp; 2239 /* different device wants to use the unit */ 2240 SM_RQ(uqi) = rq; 2241 SM_WQ(uqi) = WR(rq); 2242 } 2243 if (rq->q_ptr == 0) { 2244 sm_lqi_t *lqi; 2245 2246 uqi->sm_dev = *devp; 2247 rq->q_ptr = WR(rq)->q_ptr = uqi; 2248 SM_RQ(uqi) = rq; 2249 SM_WQ(uqi) = WR(rq); 2250 qprocson(rq); 2251 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 2252 LOCK_UNIT(lqi); 2253 lqi->sm_uqflags |= SM_UQVALID; 2254 UNLOCK_UNIT(lqi); 2255 } 2256 2257 sm_dbg('C', ("sm_open: SM_UQVALID set on lqs.\n")); 2258 } 2259 2260 if (*devp != rconsdev && BLOCKING(uqi, protocol, flag)) { 2261 2262 uqi->sm_flags |= WANT_CDSTAT; 2263 2264 do { 2265 /* 2266 * Wait for notifications of changes in the CLOCAL 2267 * and TS_SOFTCAR flags and a TIOCM_CD flag of a 2268 * TIOCMGET request (come in on the write side queue). 2269 */ 2270 2271 if ((ftstat = sm_defer_open(uqi, rq)) != EINTR) { 2272 if (ftstat) { 2273 goto tryopen; 2274 } else { 2275 continue; 2276 } 2277 } 2278 2279 if (uqi->sm_nwaiters == 0) { /* clean up */ 2280 /* 2281 * only opens on an asynchronous 2282 * protocols reach here so checking 2283 * nwaiters == 0 is sufficient to 2284 * ensure that no other thread 2285 * is waiting on this logical unit 2286 */ 2287 if ((uqi->sm_flags & FULLY_OPEN) == 0) { 2288 2289 sm_lqi_t *lqi; 2290 2291 uqi->sm_dev = NODEV; 2292 sm_dbg('C', ("sm_open FULLY_OPEN=0\n")); 2293 for (lqi = uqi->sm_lqs; lqi != 0; 2294 lqi = lqi->sm_nlqi) { 2295 LOCK_UNIT(lqi); 2296 lqi->sm_uqflags &= ~SM_UQVALID; 2297 UNLOCK_UNIT(lqi); 2298 } 2299 2300 qprocsoff(rq); 2301 rq->q_ptr = WR(rq)->q_ptr = 0; 2302 SM_RQ(uqi) = 0; 2303 SM_WQ(uqi) = 0; 2304 } 2305 } 2306 if ((uqi->sm_flags & FULLY_OPEN) == 0 && 2307 uqi->sm_nwaiters == 0) 2308 uqi->sm_protocol = NULL_PROTOCOL; 2309 if (flag & FEXCL) 2310 uqi->sm_flags &= ~EXCL_OPEN; 2311 sm_dbg('C', ("sm_open: done (ret %d).\n", ftstat)); 2312 return (ftstat); 2313 } while (BLOCKING(uqi, protocol, flag)); 2314 } 2315 2316 uqi->sm_flags |= FULLY_OPEN; 2317 2318 sm_dbg('C', ("sm_open done (ret %d).\n", ftstat)); 2319 return (ftstat); 2320 } 2321 2322 /* 2323 * Multiplexer device close routine. 2324 */ 2325 /*ARGSUSED*/ 2326 static int 2327 sm_close(queue_t *rq, int flag, cred_t *credp) 2328 { 2329 sm_uqi_t *uqi = (sm_uqi_t *)rq->q_ptr; 2330 sm_lqi_t *lqi; 2331 2332 if (sm_ssp == NULL) 2333 return (ENXIO); 2334 2335 if (uqi == NULL) { 2336 sm_dbg('C', ("close: WARN:- q 0x%p already closed.\n", rq)); 2337 return (ENXIO); 2338 } 2339 2340 sm_dbg('C', ("close: uqi=0x%p unit=%d q=0x%p)\n", uqi, uqi->sm_lunit, 2341 rq)); 2342 2343 if (SM_RQ(uqi) != rq) 2344 sm_dbg('C', ("sm_close: rq != current uqi queue\n")); 2345 2346 if (uqi->sm_ttybid) { 2347 qunbufcall(SM_RQ(uqi), uqi->sm_ttybid); 2348 uqi->sm_ttybid = 0; 2349 } 2350 2351 /* 2352 * Tell all the linked queues that the upper queue has gone 2353 * Note close will never get called on a stream while there is a 2354 * thread blocked trying to open the same stream. 2355 * If there is a blocked open on a different stream but on 2356 * the same logical unit it will reset the lower queue flags. 2357 */ 2358 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 2359 LOCK_UNIT(lqi); 2360 lqi->sm_uqflags &= ~SM_UQVALID; 2361 UNLOCK_UNIT(lqi); 2362 } 2363 2364 /* 2365 * Turn off the STREAMs queue processing for this queue. 2366 */ 2367 qprocsoff(rq); 2368 2369 /* 2370 * Similarly we will never get here if there is thread trying to 2371 * open ths stream. 2372 */ 2373 LOCK_UNIT(uqi); 2374 if (uqi->sm_waitq == 0) 2375 uqi->sm_flags = (uqi->sm_flags & SM_OBPCNDEV) ? SM_OBPCNDEV : 2376 0U; 2377 2378 uqi->sm_dev = NODEV; 2379 uqi->sm_protocol = NULL_PROTOCOL; 2380 ttycommon_close(uqi->sm_ttycommon); 2381 /* it just frees any pending ioctl */ 2382 2383 uqi->sm_ttycommon->t_cflag = 0; 2384 uqi->sm_ttycommon->t_flags = 0; 2385 2386 /* 2387 * Reset the queue pointers to NULL. 2388 * If a thread is qwaiting in the open routine it will recheck 2389 * the q_ptr. 2390 */ 2391 rq->q_ptr = NULL; 2392 WR(rq)->q_ptr = NULL; 2393 UNLOCK_UNIT(uqi); 2394 2395 if (sm_ssp->sm_lconsole == uqi) { 2396 /* this will never be the outdial device closing */ 2397 sm_ssp->sm_lconsole = 0; 2398 } 2399 /* 2400 * If there is another thread waiting for this close then unblock 2401 * the thread by putting a message on its read queue. 2402 */ 2403 if (uqi->sm_waitq) { 2404 sm_dbg('C', ("close(0x%p): doing putctl on 0x%p\n", 2405 rq, uqi->sm_waitq)); 2406 if (rq == uqi->sm_waitq) 2407 sm_log("close: waitq and closeq are same q\n"); 2408 (void) putctl(uqi->sm_waitq, M_CTL); 2409 } 2410 2411 uqi->sm_flags &= ~(EXCL_OPEN | FULLY_OPEN); 2412 sm_dbg('C', ("close: returning ok.\n")); 2413 return (0); 2414 } 2415 2416 /* 2417 * Initialise the software abort sequence for use when one of the 2418 * driver's nodes provides the system console. 2419 */ 2420 static void 2421 sm_set_abort() 2422 { 2423 char ds[3] = { '\r', '~', CNTRL('b') }; 2424 char as[SM_MAX_ABSLEN]; 2425 int len = SM_MAX_ABSLEN; 2426 2427 if (ddi_prop_op(DDI_DEV_T_ANY, sm_ssp->sm_dip, PROP_LEN_AND_VAL_BUF, 0, 2428 "abort-str", as, &len) != DDI_PROP_SUCCESS || 2429 (len = strlen(as)) < SM_MIN_ABSLEN) { 2430 (void) strcpy(as, ds); 2431 len = strlen(as); 2432 } else { 2433 char *s; 2434 int i; 2435 2436 for (s = as, i = 0; i < len-1; i++, s++) { 2437 if (as[i] == '^' && as[i+1] >= 'a' && as[i+1] <= 'z') { 2438 *s = as[i+1] - 'a' + 1; 2439 i++; 2440 } else { 2441 *s = as[i]; 2442 } 2443 } 2444 *s++ = as[i]; 2445 *s = '\0'; 2446 len = strlen(as); 2447 } 2448 2449 if (len < SM_MIN_ABSLEN) 2450 (void) strcpy(sm_ssp->sm_abs, ds); 2451 else 2452 (void) strcpy(sm_ssp->sm_abs, as); 2453 } 2454 2455 /* 2456 * 2457 * sm_attach - initialisation routine per driver instance. 2458 */ 2459 static int 2460 sm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2461 { 2462 int unit; 2463 char name[32]; 2464 sm_uqi_t *uqi; 2465 sm_lqi_t *lqip; 2466 2467 /* 2468 * Is this an attach? 2469 */ 2470 if (cmd != DDI_ATTACH) { 2471 return (DDI_FAILURE); 2472 } 2473 2474 /* 2475 * Validate the instance number (sm is a single instance driver). 2476 */ 2477 if (sm_ssp) { /* only one instance allowed */ 2478 return (DDI_FAILURE); 2479 } 2480 2481 sm_instance = ddi_get_instance(dip); 2482 2483 /* 2484 * Create the default minor node which will become the console. 2485 * (create it with three different names).: 2486 * con which appears in the /dev filesystem; 2487 * input which matches the prom /multiplexer:input node; 2488 * output which matches the prom /multiplexer:input node 2489 * Create a minor node for control operations. 2490 */ 2491 if (ddi_create_minor_node(dip, "con", S_IFCHR, 0, 2492 DDI_PSEUDO, 0) != DDI_SUCCESS || 2493 ddi_create_minor_node(dip, "input", S_IFCHR, 0, 2494 DDI_PSEUDO, 0) != DDI_SUCCESS || 2495 ddi_create_minor_node(dip, "output", S_IFCHR, 0, 2496 DDI_PSEUDO, 0) != DDI_SUCCESS || 2497 ddi_create_minor_node(dip, "ctl", S_IFCHR, 1, 2498 DDI_PSEUDO, 0) != DDI_SUCCESS) { 2499 2500 cmn_err(CE_WARN, "sm_attach: create minors failed.\n"); 2501 ddi_remove_minor_node(dip, NULL); 2502 return (DDI_FAILURE); 2503 } 2504 2505 smctlunit = 1; 2506 2507 /* 2508 * Allocate private state for this instance. 2509 */ 2510 sm_ssp = (sm_ss_t *)kmem_zalloc(sizeof (sm_ss_t), KM_SLEEP); 2511 2512 /* 2513 * Initialise per instance data. 2514 */ 2515 sm_ssp->sm_dip = dip; 2516 2517 /* 2518 * Get required debug level. 2519 */ 2520 sm_ssp->sm_trflag = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2521 DDI_PROP_DONTPASS, "sm-trlv", sm_default_trflag); 2522 2523 sm_max_units = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2524 DDI_PROP_DONTPASS, "sm-max-units", sm_max_units); 2525 sm_minor_cnt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2526 DDI_PROP_DONTPASS, "sm-minor-cnt", 0); 2527 2528 sm_refuse_opens = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2529 DDI_PROP_DONTPASS, "sm-refuse-opens", sm_refuse_opens); 2530 2531 sm_ssp->sm_ctrla_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2532 DDI_PROP_DONTPASS, "sm-ctrla-abort-on", 1); 2533 sm_ssp->sm_break_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2534 DDI_PROP_DONTPASS, "sm-break-abort-on", 0); 2535 2536 sm_set_abort(); 2537 2538 sm_ssp->sm_lqs = (sm_lqi_t *)kmem_zalloc(sizeof (sm_lqi_t) * MAX_LQS, 2539 KM_SLEEP); 2540 sm_ssp->sm_uqs = (sm_uqi_t *)kmem_zalloc(sizeof (sm_uqi_t) * NLUNITS, 2541 KM_SLEEP); 2542 2543 for (unit = 2; unit < NLUNITS && unit < sm_minor_cnt + 2; unit++) { 2544 2545 if (snprintf(name, sizeof (name), "sm%c", 'a' + unit-2) > 2546 sizeof (name)) { 2547 cmn_err(CE_WARN, 2548 "sm_attach: create device for unit %d failed.\n", 2549 unit); 2550 } else if (ddi_create_minor_node(dip, name, S_IFCHR, 2551 unit, DDI_NT_SERIAL, NULL) != DDI_SUCCESS) { 2552 ddi_remove_minor_node(dip, NULL); 2553 return (DDI_FAILURE); 2554 } 2555 2556 if (snprintf(name, sizeof (name), "sm%c,cu", 'a' + unit-2) > 2557 sizeof (name)) { 2558 cmn_err(CE_WARN, 2559 "sm_attach: create cu device for unit %d failed.\n", 2560 unit); 2561 continue; 2562 } else if (ddi_create_minor_node(dip, name, S_IFCHR, 2563 unit|OUTLINE, DDI_NT_SERIAL_DO, NULL) != DDI_SUCCESS) { 2564 ddi_remove_minor_node(dip, NULL); 2565 return (DDI_FAILURE); 2566 } 2567 } 2568 2569 for (unit = 0; unit < NLUNITS; unit++) { 2570 2571 uqi = get_uqi(sm_ssp, unit); 2572 uqi->sm_lqs = 0; 2573 uqi->sm_dev = NODEV; 2574 uqi->sm_nlqs = 0; 2575 uqi->sm_lunit = unit; 2576 uqi->sm_protocol = NULL_PROTOCOL; 2577 mutex_init(uqi->sm_umutex, NULL, MUTEX_DRIVER, NULL); 2578 cv_init(uqi->sm_ucv, NULL, CV_DRIVER, NULL); 2579 mutex_init(&uqi->sm_ttycommon->t_excl, NULL, 2580 MUTEX_DRIVER, NULL); 2581 } 2582 2583 for (unit = 0; unit < MAX_LQS; unit++) { 2584 lqip = get_lqi(sm_ssp, unit); 2585 lqip->sm_unit = unit; 2586 lqip->sm_hadkadbchar = 0; 2587 lqip->sm_nachar = sm_ssp->sm_abs; 2588 lqip->sm_ioflag = FORIO; 2589 lqip->sm_ctrla_abort_on = sm_ssp->sm_ctrla_abort_on; 2590 lqip->sm_break_abort_on = sm_ssp->sm_break_abort_on; 2591 mutex_init(lqip->sm_umutex, NULL, MUTEX_DRIVER, NULL); 2592 cv_init(lqip->sm_ucv, NULL, CV_DRIVER, NULL); 2593 mutex_init(&lqip->sm_ttycommon->t_excl, NULL, 2594 MUTEX_DRIVER, NULL); 2595 } 2596 2597 return (DDI_SUCCESS); 2598 } 2599 2600 /* 2601 * 2602 * sm_detach - detach routine per driver instance. 2603 */ 2604 static int 2605 sm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2606 { 2607 sm_uqi_t *lu; 2608 sm_lqi_t *pu; 2609 int unit; 2610 2611 /* 2612 * Is this a detach request for instance 0 (single instance driver). 2613 */ 2614 if (cmd != DDI_DETACH) 2615 return (DDI_FAILURE); 2616 2617 if (sm_ssp == NULL) 2618 return (DDI_FAILURE); 2619 2620 sm_dbg('V', ("detach ...")); 2621 2622 2623 /* 2624 * Check that all the upper and lower queues are closed. 2625 */ 2626 2627 for (unit = 0; unit < NLUNITS; unit++) { 2628 lu = &sm_ssp->sm_uqs[unit]; 2629 if (lu && lu->sm_protocol != NULL_PROTOCOL) { 2630 sm_dbg('V', ("detach: upper unit still open.\n")); 2631 return (DDI_FAILURE); 2632 } 2633 } 2634 for (unit = 0; unit < MAX_LQS; unit++) { 2635 pu = &sm_ssp->sm_lqs[unit]; 2636 if (pu && pu->sm_linkid != 0) { 2637 sm_dbg('V', ("detach: lower unit still linked (%d)\n", 2638 pu->sm_linkid)); 2639 return (DDI_FAILURE); 2640 } 2641 } 2642 2643 for (unit = 0; unit < NLUNITS; unit++) { 2644 lu = &sm_ssp->sm_uqs[unit]; 2645 mutex_destroy(lu->sm_umutex); 2646 cv_destroy(lu->sm_ucv); 2647 mutex_destroy(&lu->sm_ttycommon->t_excl); 2648 } 2649 for (unit = 0; unit < MAX_LQS; unit++) { 2650 pu = &sm_ssp->sm_lqs[unit]; 2651 mutex_destroy(pu->sm_umutex); 2652 cv_destroy(pu->sm_ucv); 2653 mutex_destroy(&pu->sm_ttycommon->t_excl); 2654 } 2655 2656 /* 2657 * Tidy up per instance state. 2658 */ 2659 kmem_free(sm_ssp->sm_lqs, sizeof (sm_lqi_t) * MAX_LQS); 2660 kmem_free(sm_ssp->sm_uqs, sizeof (sm_uqi_t) * NLUNITS); 2661 kmem_free(sm_ssp, sizeof (sm_ss_t)); 2662 2663 sm_ssp = 0; 2664 2665 /* 2666 * Remove all of the devices created in attach. 2667 */ 2668 ddi_remove_minor_node(dip, NULL); 2669 2670 return (DDI_SUCCESS); 2671 } 2672 2673 /* 2674 * SECTION 2675 * Driver interface to the OS. 2676 */ 2677 2678 /* 2679 * The driver is responsible for managing the mapping between the file system 2680 * device types (major/minor pairs) and the corresponding instance of the driver 2681 * or device information pointer (dip). 2682 * sm_info - return the instance or dip corresponding to the dev_t. 2683 */ 2684 /*ARGSUSED*/ 2685 static int 2686 sm_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2687 { 2688 int res = DDI_SUCCESS; 2689 2690 switch (infocmd) { 2691 case DDI_INFO_DEVT2DEVINFO: 2692 if (sm_ssp == NULL) 2693 res = DDI_FAILURE; 2694 else 2695 *result = (void *)sm_ssp->sm_dip; 2696 break; 2697 2698 case DDI_INFO_DEVT2INSTANCE: 2699 *result = (void*)0; /* single instance driver */ 2700 break; 2701 2702 default: 2703 res = DDI_FAILURE; 2704 break; 2705 } 2706 2707 return (res); 2708 } 2709 2710 /* 2711 * End of driver implementation 2712 */ 2713 2714 /* 2715 * Loadable module interface to the kernel 2716 */ 2717 2718 /* 2719 * Firstly the Streams specific interface 2720 */ 2721 2722 /* 2723 * Solaris driver/STREAM initialisation structures. 2724 */ 2725 static struct module_info uinfo = 2726 { 2727 SM_MOD_ID, 2728 TTYMUX_DRVNAME, 2729 0, /* min packet size */ 2730 INFPSZ, /* max packet size */ 2731 2048, /* high water mark */ 2732 256, /* low water mark */ 2733 }; 2734 2735 /* 2736 * Use zero water marks becuase the lower queues are used only for flow control. 2737 */ 2738 static struct module_info linfo = 2739 { 2740 SM_MOD_ID, 2741 TTYMUX_DRVNAME, 2742 0, /* min packet size */ 2743 INFPSZ, /* max packet size */ 2744 0, /* high water mark */ 2745 0 /* low water mark */ 2746 }; 2747 2748 2749 /* 2750 * Solaris upper read STREAM initialisation structure. 2751 */ 2752 static struct qinit urinit = 2753 { 2754 sm_urput, /* put */ 2755 sm_ursrv, /* service */ 2756 sm_open, /* open */ 2757 sm_close, /* close */ 2758 NULL, /* admin */ 2759 &uinfo, /* module info */ 2760 NULL /* stats */ 2761 }; 2762 2763 /* 2764 * Solaris upper write STREAM initialisation structure. 2765 */ 2766 static struct qinit uwinit = 2767 { 2768 sm_uwput, 2769 sm_uwsrv, 2770 NULL, 2771 NULL, 2772 NULL, 2773 &uinfo, 2774 NULL 2775 }; 2776 2777 /* 2778 * Solaris lower read STREAM initialisation structure. 2779 */ 2780 static struct qinit lrinit = 2781 { 2782 sm_lrput, 2783 sm_lrsrv, 2784 NULL, 2785 NULL, NULL, 2786 &linfo, 2787 NULL 2788 }; 2789 2790 /* 2791 * Solaris lower write STREAM initialisation structure. 2792 */ 2793 static struct qinit lwinit = 2794 { 2795 putq, 2796 sm_lwsrv, 2797 NULL, 2798 NULL, 2799 NULL, 2800 &linfo, 2801 NULL 2802 }; 2803 2804 /* 2805 * Multiplexing STREAM structure. 2806 */ 2807 struct streamtab sm_streamtab = 2808 { 2809 &urinit, 2810 &uwinit, 2811 &lrinit, 2812 &lwinit 2813 }; 2814 2815 /* 2816 * Driver operations structure (struct cb_ops) and 2817 * driver dynamic loading functions (struct dev_ops). 2818 */ 2819 2820 /* 2821 * Fold the Stream interface to the kernel into the driver interface 2822 * to the OS. 2823 */ 2824 2825 DDI_DEFINE_STREAM_OPS(sm_ops, \ 2826 nulldev, nulldev, \ 2827 sm_attach, sm_detach, nodev, \ 2828 sm_info, (D_NEW | D_MTQPAIR|D_MTOUTPERIM|D_MTOCEXCL | D_MP), 2829 &sm_streamtab); 2830 2831 /* 2832 * Driver module information. 2833 */ 2834 extern struct mod_ops mod_driverops; 2835 static struct modldrv modldrv = 2836 { 2837 &mod_driverops, 2838 "serial mux driver %I%", 2839 &sm_ops 2840 }; 2841 2842 static struct modlinkage modlinkage = 2843 { 2844 MODREV_1, 2845 &modldrv, 2846 NULL 2847 }; 2848 2849 /* 2850 * Define the body of our interface to the OS. 2851 */ 2852 2853 /* 2854 * '_init' is called by Solaris to initialise any driver 2855 * specific state and to install the driver. 2856 */ 2857 int 2858 _init(void) 2859 { 2860 return (mod_install(&modlinkage)); 2861 } 2862 2863 /* 2864 * _info - return this drivers interface to the kernel. 2865 */ 2866 int 2867 _info(struct modinfo *modinfop) 2868 { 2869 return (mod_info(&modlinkage, modinfop)); 2870 } 2871 2872 /* 2873 * _fini - the OS is finished with the services provided by the driver. 2874 * remove ourself and then remove any footprint that remains. 2875 */ 2876 int 2877 _fini(void) 2878 { 2879 return (mod_remove(&modlinkage)); 2880 } 2881