1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2001-2003 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * DESCRIPTION 31 * 32 * ttymux - Multiplexer driver for multiplexing termio compliant streams onto 33 * a single upper stream. 34 * 35 * ADD2FRONT macro can be used to specify the order in which a console 36 * device is put in the queue of multiplexed physical serial devices, 37 * during the association and disassociation of a console interface. 38 * When this macro is defined, the device is placed in front of the queue, 39 * otherwise by default it is placed at the end. 40 * Console I/O happens to each of the physical devices in the order of 41 * their position in this queue. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/file.h> 46 #include <sys/stream.h> 47 #include <sys/strsubr.h> 48 #include <sys/strlog.h> 49 #include <sys/strsun.h> 50 #include <sys/modctl.h> 51 #include <sys/debug.h> 52 #include <sys/kbio.h> 53 #include <sys/devops.h> 54 #include <sys/errno.h> 55 #include <sys/stat.h> 56 #include <sys/kmem.h> 57 #include <sys/ddi.h> 58 #include <sys/consdev.h> 59 #include <sys/tty.h> 60 #include <sys/ptyvar.h> 61 #include <sys/termio.h> 62 #include <sys/fcntl.h> 63 #include <sys/mkdev.h> 64 #include <sys/ser_sync.h> 65 #include <sys/esunddi.h> 66 #include <sys/policy.h> 67 68 #include <sys/ttymux.h> 69 #include "ttymux_impl.h" 70 71 /* 72 * Extern declarations 73 */ 74 extern mblk_t *mkiocb(uint_t); 75 extern int nulldev(); 76 extern uintptr_t space_fetch(char *key); 77 78 extern int sm_ioctl_cmd(sm_uqi_t *, mblk_t *); 79 extern int ttymux_abort_ioctl(mblk_t *); 80 extern int ttymux_device_fini(sm_lqi_t *); 81 extern int ttymux_device_init(sm_lqi_t *); 82 83 /* 84 * Exported interfaces 85 */ 86 int sm_disassociate(int, sm_lqi_t *, ulong_t); 87 int sm_associate(int, sm_lqi_t *, ulong_t, uint_t, char *); 88 89 /* 90 * Variables defined here and visible only internally 91 */ 92 sm_ss_t *sm_ssp = 0; 93 static int sm_instance = 0; 94 static int smctlunit; 95 96 static uint_t sm_default_trflag = 0; 97 uint_t sm_max_units = 6; 98 uint_t sm_minor_cnt = 0; 99 static uint_t sm_refuse_opens = 0; 100 101 /* 102 * Local definitions. 103 */ 104 105 /* force these flags to be unset on console devices */ 106 static ulong_t sm_cmask = (ulong_t)(CRTSXOFF|CRTSCTS); 107 108 /* 109 * SECTION 110 * Implementation Section: 111 */ 112 void 113 sm_debug(char *msg, ...) 114 { 115 va_list args; 116 char buf[256]; 117 int sz; 118 119 va_start(args, msg); 120 sz = vsnprintf(buf, sizeof (buf), msg, args); 121 va_end(args); 122 123 if (sz < 0) 124 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1, 125 SL_TRACE, "vsnprintf parse error\n"); 126 else if (sz > sizeof (buf)) { 127 char *b; 128 size_t len = sz + 1; 129 130 b = kmem_alloc(len, KM_SLEEP); 131 va_start(args, msg); 132 sz = vsnprintf(b, len, msg, args); 133 va_end(args); 134 if (sz > 0) 135 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), 136 sm_instance, 1, SL_TRACE, b); 137 kmem_free(b, len); 138 } else { 139 140 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 141 1, SL_TRACE, buf); 142 } 143 } 144 145 void 146 sm_log(char *msg, ...) 147 { 148 va_list args; 149 char buf[128]; 150 int sz; 151 152 va_start(args, msg); 153 sz = vsnprintf(buf, sizeof (buf), msg, args); 154 va_end(args); 155 156 if (sz < 0) 157 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 1, 158 SL_TRACE, "vsnprintf parse error\n"); 159 else if (sz > sizeof (buf)) { 160 char *b; 161 size_t len = sz + 1; 162 163 b = kmem_alloc(len, KM_SLEEP); 164 va_start(args, msg); 165 sz = vsnprintf(b, len, msg, args); 166 va_end(args); 167 if (sz > 0) 168 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), 169 sm_instance, 1, SL_NOTE, b); 170 kmem_free(b, len); 171 } else { 172 173 (void) strlog(ddi_driver_major(sm_ssp->sm_dip), sm_instance, 174 1, SL_NOTE, buf); 175 } 176 } 177 178 /* 179 * Should only be called if the caller can guarantee that the vnode 180 * and/or the stream won't disappear while finding the dip. 181 * This routine is only called during an I_PLINK request so it's safe. 182 * The routine obtains the dev_t for a linked se stream. 183 */ 184 static void 185 sm_setdip(queue_t *q, sm_lqi_t *lqi) 186 { 187 lqi->sm_dev = q && STREAM(q) ? STREAM(q)->sd_vnode->v_rdev : NODEV; 188 } 189 190 /* 191 * Called from driver close, state change reports and I_PUNLINK ioctl. 192 * A lower stream has been unlinked - clean up the state associated with it. 193 */ 194 void 195 sm_lqifree(sm_lqi_t *lqi) 196 { 197 int mu_owned; 198 sm_lqi_t **pplqi; 199 200 ASSERT(mutex_owned(lqi->sm_umutex)); 201 ASSERT(SM_RQ(lqi) != 0); 202 203 /* 204 * Clear all state associated with this lower queue except 205 * the identity of the queues themselves and the link id which 206 * can only be cleared by issuing a streams I_PUNLINK ioctl. 207 * 208 * The association of a lower queue is a two step process: 209 * 1. initialise the lower q data structure on I_PLINK 210 * 2. associate an upper q with the lower q on SM_CMD_ASSOCIATE. 211 * 212 * If step 2 has ocurred then 213 * remove this lower queue info from the logical unit. 214 */ 215 if (lqi->sm_uqi) { 216 sm_dbg('Y', ("lqifree unit %d, ", lqi->sm_uqi->sm_lunit)); 217 if ((mu_owned = mutex_owned(lqi->sm_uqi->sm_umutex)) == 0) 218 LOCK_UNIT(lqi->sm_uqi); 219 220 pplqi = &lqi->sm_uqi->sm_lqs; 221 while (*pplqi != lqi) { 222 ASSERT(*pplqi); 223 pplqi = &((*pplqi)->sm_nlqi); 224 } 225 *pplqi = lqi->sm_nlqi; 226 lqi->sm_uqi->sm_nlqs--; 227 228 if (mu_owned == 0) 229 UNLOCK_UNIT(lqi->sm_uqi); 230 231 lqi->sm_uqi = 0; 232 } 233 } 234 235 /* 236 * Given a q return the associated lower queue data structure or NULL. 237 * Return the data locked. 238 */ 239 static sm_lqi_t * 240 get_lqi_byq(queue_t *q) 241 { 242 int i; 243 sm_lqi_t *lqi, *flqi = 0; 244 245 for (i = 0; i < MAX_LQS; i++) { 246 lqi = &sm_ssp->sm_lqs[i]; 247 LOCK_UNIT(lqi); 248 if (flqi == 0 && lqi->sm_linkid == 0) /* assumes muxids != 0 */ 249 flqi = lqi; 250 else if (SM_RQ(lqi) == q || SM_WQ(lqi) == q) { 251 if (flqi) 252 UNLOCK_UNIT(flqi); 253 return (lqi); 254 } 255 else 256 UNLOCK_UNIT(lqi); 257 } 258 return (flqi); 259 } 260 261 /* 262 * Given a streams link identifier return the associated lower queue data 263 * structure or NULL. 264 */ 265 sm_lqi_t * 266 get_lqi_byid(int linkid) 267 { 268 int i; 269 sm_lqi_t *lqi; 270 271 if (linkid == 0) 272 return (NULL); 273 for (i = 0; i < MAX_LQS; i++) { 274 lqi = &sm_ssp->sm_lqs[i]; 275 if (lqi->sm_linkid == linkid) 276 return (lqi); 277 } 278 return (NULL); 279 } 280 281 /* 282 * Given a dev_t for a lower stream return the associated lower queue data 283 * structure or NULL. 284 */ 285 sm_lqi_t * 286 get_lqi_bydevt(dev_t dev) 287 { 288 int i; 289 sm_lqi_t *lqi; 290 291 if (dev == NODEV) 292 return (NULL); 293 294 for (i = 0; i < MAX_LQS; i++) { 295 lqi = &sm_ssp->sm_lqs[i]; 296 if (lqi->sm_dev == dev) 297 return (lqi); 298 } 299 return (NULL); 300 } 301 302 /* 303 * Determine whether the input flag is set on at least 304 * howmany queues. 305 */ 306 static int 307 sm_is_flag_set(sm_uqi_t *uqi, uint_t flag, uint_t howmany) 308 { 309 sm_lqi_t *lqi; 310 311 if (howmany == 0) 312 return (0); 313 314 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 315 if (lqi->sm_flags & flag) 316 if (--howmany == 0) 317 return (1); 318 } 319 return (0); 320 } 321 322 /* 323 * How many usable queues are associated with a given upper stream 324 */ 325 static int 326 sm_uwq_error(sm_uqi_t *uqi) 327 { 328 return (sm_is_flag_set(uqi, (WERROR_MODE|HANGUP_MODE), uqi->sm_nlqs)); 329 } 330 331 /* 332 * How many of the queues associated with a given upper stream 333 * - do not - have the given flags set. 334 */ 335 static int 336 sm_q_count(sm_uqi_t *uqi, uint_t flag) 337 { 338 sm_lqi_t *lqi; 339 int count = 0; 340 341 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 342 if ((lqi->sm_flags & flag) == 0) 343 count++; 344 } 345 return (count); 346 } 347 348 /* 349 * How many of the queues associated with a given upper stream 350 * - do not - have the given flags set. 351 */ 352 static int 353 sm_qs_without(sm_uqi_t *uqi, uint_t flag, uint_t ioflag) 354 { 355 sm_lqi_t *lqi; 356 int count = 0; 357 358 for (lqi = uqi->sm_lqs; lqi; lqi = lqi->sm_nlqi) { 359 if ((lqi->sm_flags & flag) == 0 && 360 (lqi->sm_ioflag & ioflag) == 0) 361 count++; 362 } 363 return (count); 364 } 365 366 /* 367 * How many usable queues are associated with a given upper stream 368 */ 369 static int 370 sm_good_qs(sm_uqi_t *uqi) 371 { 372 return (sm_q_count(uqi, (WERROR_MODE|HANGUP_MODE))); 373 } 374 375 static int 376 sm_cnt_oqs(sm_uqi_t *uqi) 377 { 378 return (sm_qs_without(uqi, (WERROR_MODE|HANGUP_MODE), 379 (uint_t)FOROUTPUT)); 380 } 381 382 /* 383 * Send an ioctl downstream and remember that it was sent so that 384 * its response can be caught on the way back up. 385 */ 386 static void 387 sm_issue_ioctl(void *arg) 388 { 389 sm_lqi_t *lqi = arg; 390 uint_t cmdflag = 0; 391 queue_t *q = SM_WQ(lqi); 392 int iocmd, size; 393 394 LOCK_UNIT(lqi); 395 396 lqi->sm_bid = 0; 397 if ((lqi->sm_flags & (WERROR_MODE|HANGUP_MODE)) == 0 && 398 (lqi->sm_flags & (WANT_CDSTAT|WANT_TCSET))) { 399 mblk_t *pioc; 400 401 if (lqi->sm_flags & WANT_TCSET) { 402 lqi->sm_flags &= ~WANT_TCSET; 403 iocmd = TCSETS; 404 cmdflag = WANT_TCSET; 405 } else if (lqi->sm_flags & WANT_SC) { 406 lqi->sm_flags &= ~WANT_SC; 407 iocmd = TIOCGSOFTCAR; 408 cmdflag = WANT_SC; 409 } else if (lqi->sm_flags & WANT_CD) { 410 lqi->sm_flags &= ~WANT_CD; 411 iocmd = TIOCMGET; 412 } else if (lqi->sm_flags & WANT_CL) { 413 lqi->sm_flags &= ~WANT_CL; 414 iocmd = TCGETS; 415 cmdflag = WANT_CL; 416 } else { 417 UNLOCK_UNIT(lqi); 418 return; 419 } 420 421 if (pioc = mkiocb(iocmd)) { 422 if (cmdflag == WANT_TCSET) { 423 pioc->b_cont = 424 sm_allocb(sizeof (struct termios), 425 BPRI_MED); 426 if (pioc->b_cont == 0) { 427 freemsg(pioc); 428 pioc = 0; 429 } else { 430 struct termios *tc = (struct termios *) 431 pioc->b_cont->b_wptr; 432 433 bzero((caddr_t)tc, 434 sizeof (struct termios)); 435 tc->c_cflag = lqi->sm_ttycommon-> 436 t_cflag; 437 pioc->b_cont->b_rptr = 438 pioc->b_cont->b_wptr; 439 pioc->b_cont->b_wptr += 440 sizeof (struct termios); 441 } 442 size = sizeof (struct iocblk) + 443 sizeof (struct termios); 444 } 445 else 446 size = sizeof (struct iocblk); 447 } 448 else 449 size = sizeof (struct iocblk); 450 451 if (pioc != 0) { 452 453 lqi->sm_piocid = ((struct iocblk *)pioc->b_rptr)-> 454 ioc_id; 455 lqi->sm_flags |= SM_IOCPENDING; 456 457 /* lqi->sm_flags |= cmdflag; */ 458 UNLOCK_UNIT(lqi); 459 (void) putq(q, pioc); 460 } else { 461 UNLOCK_UNIT(lqi); 462 lqi->sm_bid = qbufcall(WR(q), size, BPRI_MED, 463 sm_issue_ioctl, lqi); 464 } 465 } 466 else 467 UNLOCK_UNIT(lqi); 468 } 469 470 /* 471 * Associate one of the drivers minor nodes with a serial device. 472 */ 473 int 474 sm_associate(int unit, sm_lqi_t *plqi, ulong_t tag, uint_t ioflag, char *dp) 475 { 476 sm_uqi_t *uqi; 477 int rval = 0; 478 479 sm_dbg('Y', ("sm_associate(%d, %d, %d): ", 480 (plqi) ? plqi->sm_linkid : 0, unit, ioflag)); 481 /* 482 * Check the data is valid. 483 * Associate a lower queue with a logical unit. 484 */ 485 486 if (unit < 0 || unit >= NLUNITS || plqi == 0 || 487 (uqi = get_uqi(sm_ssp, unit)) == 0) { 488 sm_dbg('@', (" invalid: lqi=0x%p lui=0x%p:", plqi, uqi)); 489 rval = EINVAL; 490 } else { 491 if ((ioflag & FORIO) == 0) 492 ioflag = FORIO; 493 494 LOCK_UNIT(plqi); 495 496 if (plqi->sm_uqi) { 497 if (plqi->sm_uqi->sm_lunit == unit) { 498 if ((ioflag & (uint_t)FORIO) != 0) 499 plqi->sm_ioflag = 500 (ioflag & (uint_t)FORIO); 501 rval = 0; 502 } else { 503 sm_dbg('@', ("already associated with unit %d:", 504 plqi->sm_uqi->sm_lunit)); 505 rval = EINVAL; 506 } 507 } else { 508 509 LOCK_UNIT(uqi); 510 511 if ((ioflag & (uint_t)FORIO) != 0) 512 plqi->sm_ioflag = (ioflag & (uint_t)FORIO); 513 514 plqi->sm_ttycommon->t_cflag = uqi->sm_ttycommon-> 515 t_cflag; 516 plqi->sm_ttycommon->t_flags = uqi->sm_ttycommon-> 517 t_flags; 518 plqi->sm_uqi = uqi; 519 plqi->sm_mbits = 0; 520 plqi->sm_tag = tag; 521 522 if (*dp == '/') 523 (void) strncpy(plqi->sm_path, dp, MAXPATHLEN); 524 else 525 *(plqi->sm_path) = '\0'; 526 527 plqi->sm_flags |= WANT_TCSET; 528 #ifdef ADD2FRONT 529 plqi->sm_nlqi = uqi->sm_lqs; 530 uqi->sm_lqs = plqi; 531 #else 532 plqi->sm_nlqi = 0; 533 if (uqi->sm_lqs) { 534 sm_lqi_t *lq; 535 for (lq = uqi->sm_lqs; lq->sm_nlqi; 536 lq = lq->sm_nlqi) { 537 } 538 lq->sm_nlqi = plqi; 539 } else 540 uqi->sm_lqs = plqi; 541 #endif 542 uqi->sm_nlqs++; 543 544 (void) ttymux_device_init(plqi); 545 546 UNLOCK_UNIT(uqi); 547 rval = 0; 548 /* 549 * Everything looks good so it's now ok to enable lower 550 * queue processing. 551 * Note the lower queue should be enabled as soon as 552 * I_PLINK returns (used in sm_get_ttymodes etc). 553 * Schedule ioctls to obtain the terminal settings. 554 */ 555 556 if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_waitq) 557 plqi->sm_uqflags |= SM_UQVALID; 558 559 qenable(SM_RQ(plqi)); 560 if (plqi->sm_flags & (WANT_CDSTAT|WANT_TCSET)) { 561 /* 562 * Bypass the lower half of the driver (hence 563 * no qwriter) and apply the current termio 564 * settings on the lower stream. 565 */ 566 UNLOCK_UNIT(plqi); 567 if (plqi->sm_bid) { 568 qunbufcall(SM_WQ(plqi), plqi->sm_bid); 569 plqi->sm_bid = 0; 570 } 571 /* 572 * Only set cflags on the lower q if we know 573 * the settings on any other lower queue. 574 */ 575 sm_issue_ioctl(plqi); 576 LOCK_UNIT(plqi); 577 578 } 579 } 580 581 UNLOCK_UNIT(plqi); 582 } 583 sm_dbg('Y', ("sm_associate: rval=%d.\n", rval)); 584 return (rval); 585 } 586 587 /* 588 * Break an association between one of the driver's minor nodes and 589 * a serial device. 590 */ 591 int 592 sm_disassociate(int unit, sm_lqi_t *plqi, ulong_t tag) 593 { 594 sm_uqi_t *uqi; 595 int rval = 0; 596 597 sm_dbg('Y', ("sm_disassociate: link %d, unit %d: ", 598 (plqi) ? plqi->sm_linkid : 0, unit)); 599 /* 600 * Check the data is valid. 601 * Disassociate a lower queue with a logical unit. 602 */ 603 if (unit < 0 || unit >= NLUNITS || plqi == 0 || 604 (uqi = get_uqi(sm_ssp, unit)) == 0) { 605 sm_dbg('@', ("invalid: lqi=0x%p lui=0x%p", plqi, uqi)); 606 rval = EINVAL; 607 } else { 608 LOCK_UNIT(plqi); 609 610 if (plqi->sm_uqi == NULL) { 611 sm_dbg('@', ("unit not associated")); 612 rval = EINVAL; 613 } else if (plqi->sm_uqi->sm_lunit != unit) { 614 sm_dbg('@', ("unit and linkid not related", 615 plqi->sm_uqi->sm_lunit)); 616 rval = EINVAL; 617 } else if (plqi->sm_tag != tag) { 618 sm_dbg('@', 619 ("Invalid tag for TTYMUX_DISASSOC ioctl\n")); 620 rval = EPERM; 621 } else { 622 sm_dbg('Y', ("disassociating ")); 623 624 (void) ttymux_device_fini(plqi); 625 626 /* 627 * Indicate that carrier status is no 628 * longer required and that the upper 629 * queue should not be used by plqi 630 */ 631 plqi->sm_flags &= ~(WANT_CDSTAT|WANT_TCSET); 632 plqi->sm_uqflags &= ~(SM_UQVALID|SM_OBPCNDEV); 633 plqi->sm_ioflag = 0u; 634 635 sm_lqifree(plqi); 636 rval = 0; 637 } 638 UNLOCK_UNIT(plqi); 639 } 640 sm_dbg('Y', (" rval=%d.\n", rval)); 641 return (rval); 642 643 } 644 645 /* 646 * Streams helper routines; 647 */ 648 649 /* 650 * Schedule a qbufcall for an upper queue. 651 * Must be called within the perimiter of the parameter q. 652 * fn must reenable the q. 653 * Called: 654 * whenever a message must be placed on multiple queues and allocb fails; 655 */ 656 static void 657 sm_sched_uqcb(queue_t *q, int memreq, int pri, void (*fn)()) 658 { 659 sm_uqi_t *uqi = q->q_ptr; 660 661 if (uqi->sm_ttybid != 0) 662 qunbufcall(q, uqi->sm_ttybid); 663 664 noenable(q); 665 666 uqi->sm_ttybid = qbufcall(q, memreq, pri, fn, uqi); 667 } 668 669 /* 670 * qbufcall routine to restart the queues when memory is available. 671 */ 672 static void 673 sm_reenable_q(sm_uqi_t *uqi) 674 { 675 queue_t *wq = SM_WQ(uqi); 676 677 if ((uqi->sm_flags & SM_STOPPED) == 0) { 678 enableok(wq); 679 qenable(wq); 680 } 681 } 682 683 /* 684 * Place a message on the write queue of each stream associated with 685 * the given upper stream. 686 */ 687 static void 688 sm_senddown(sm_uqi_t *uqi) 689 { 690 sm_lqi_t *lqi; 691 692 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 693 if (lqi->sm_mp != 0) { 694 putnext(SM_WQ(lqi), lqi->sm_mp); 695 lqi->sm_mp = 0; 696 } 697 } 698 } 699 700 /* 701 * For each lower device that should receive a write message duplicate 702 * the message block. 703 */ 704 static int 705 sm_dupmsg(sm_uqi_t *uqi, mblk_t *mp) 706 { 707 sm_lqi_t *lqi; 708 mblk_t *origmp = mp; 709 710 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 711 lqi->sm_mp = 0; 712 if (lqi->sm_flags & WERROR_MODE) { 713 continue; 714 } 715 if ((lqi->sm_ioflag & (uint_t)FOROUTPUT) == 0) { 716 if (DB_TYPE(mp) == M_DATA) 717 continue; 718 } 719 if (lqi->sm_nlqi == 0) { 720 lqi->sm_mp = mp; 721 origmp = NULL; 722 } else if ((lqi->sm_mp = sm_copymsg(mp)) == 0) { 723 sm_lqi_t *flqi; 724 725 for (flqi = uqi->sm_lqs; flqi != lqi; 726 flqi = flqi->sm_nlqi) { 727 if (lqi->sm_mp) { 728 /* must have been sm_copymsg */ 729 sm_freemsg(lqi->sm_mp); 730 lqi->sm_mp = 0; 731 } 732 } 733 return (sm_cnt_oqs(uqi) * msgdsize(mp)); 734 } 735 } 736 if (origmp != NULL) 737 freemsg(origmp); 738 return (0); 739 } 740 741 /* 742 * Return 1 if all associated lower devices have room for another message 743 * otherwise return 0. 744 */ 745 static int 746 sm_cansenddown(sm_uqi_t *uqi) 747 { 748 749 register sm_lqi_t *lqi; 750 751 if (uqi->sm_lqs == 0) 752 return (0); 753 754 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 755 if ((lqi->sm_flags & WERROR_MODE) == 0 && 756 canputnext(SM_WQ(lqi)) == 0) 757 return (0); 758 } 759 return (1); 760 } 761 762 /* 763 * Put a message down all associated lower queues. 764 * Return 1 if the q function was called. 765 */ 766 static int 767 sm_putqs(queue_t *q, mblk_t *mp, int (*qfn)()) 768 { 769 register sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 770 register int memreq; 771 int pri = (DB_TYPE(mp) < QPCTL) ? BPRI_MED : BPRI_HI; 772 int rval = 0; 773 774 if (uqi->sm_lqs == 0 || (uqi->sm_flags & WERROR_MODE)) { 775 776 sm_dbg('Q', ("sm_putqs: freeing (0x%p 0x%p).\n", uqi->sm_lqs, 777 uqi->sm_flags)); 778 freemsg(mp); 779 } else if (pri != BPRI_HI && sm_cansenddown(uqi) == 0) { 780 /* a lower q is flow controlled */ 781 (void) qfn(q, mp); 782 rval = 1; 783 } else if ((memreq = sm_dupmsg(uqi, mp)) == 0) { 784 785 sm_senddown(uqi); 786 787 } else { 788 sm_log("sm_putqs: msg 0x%x - can't alloc %d bytes (pri %d).\n", 789 DB_TYPE(mp), memreq, pri); 790 sm_sched_uqcb(q, memreq, pri, sm_reenable_q); 791 792 (void) qfn(q, mp); 793 rval = 1; 794 795 } 796 797 return (rval); 798 } 799 800 /* 801 * sm_reply - send an ioctl reply back up the queue. 802 */ 803 static void 804 sm_reply(queue_t *q, mblk_t *mp, uchar_t type, int error) 805 { 806 struct iocblk *iocbp; 807 808 iocbp = (struct iocblk *)mp->b_rptr; 809 810 DB_TYPE(mp) = type; 811 iocbp->ioc_count = 0; 812 iocbp->ioc_error = error; 813 qreply(q, mp); 814 } 815 816 /* 817 * Service a streams link and unlink requests. 818 */ 819 static void 820 sm_link_req(queue_t *wq, mblk_t *mp) 821 { 822 struct linkblk *linkp; 823 int rval; 824 int cmd; 825 sm_lqi_t *plqi; 826 827 ASSERT(DB_TYPE(mp) == M_IOCTL); 828 829 cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 830 switch (cmd) { 831 832 case I_LINK: 833 case I_PLINK: 834 sm_dbg('G', ("sm_link_req: M_IOCTL %x (I_PLINK).\n", cmd)); 835 836 linkp = (struct linkblk *)mp->b_cont->b_rptr; 837 838 /* 839 * 1. Sanity check the link block. 840 * 2. Validate that the queue is not already linked 841 * (and resources available). 842 * 3. Validate that the lower queue is not associated with 843 * a logical unit. 844 * 4. Remember that this lower queue is linked to the driver. 845 */ 846 if ((linkp == NULL) || (MBLKL(mp) < sizeof (*linkp)) || 847 linkp->l_qbot == NULL) { 848 sm_dbg('I', ("sm_link_req: invalid link block.\n")); 849 rval = EINVAL; 850 } else if ((plqi = get_lqi_byq(linkp->l_qbot)) == 0) { 851 sm_dbg('I', ("sm_link_req: out of resources.\n")); 852 rval = EBUSY; /* out of resources */ 853 } else if (plqi->sm_uqi) { 854 UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */ 855 sm_dbg('I', ("sm_link_req: already associated.\n")); 856 rval = EBUSY; /* already linked */ 857 } else { 858 SM_WQ(plqi) = linkp->l_qbot; 859 SM_RQ(plqi) = OTHERQ(linkp->l_qbot); 860 861 linkp->l_qbot->q_ptr = 862 OTHERQ(linkp->l_qbot)->q_ptr = plqi; 863 plqi->sm_linkid = linkp->l_index; 864 UNLOCK_UNIT(plqi); /* was aquired by get_lqi_byq */ 865 866 sm_dbg('H', ("sm_link_req: linkid = %d.\n", 867 linkp->l_index)); 868 869 sm_setdip(linkp->l_qbot, plqi); 870 plqi->sm_ttycommon->t_flags = 0; 871 plqi->sm_ttycommon->t_cflag = 0; 872 plqi->sm_mbits = 0; 873 (void) ttymux_device_init(plqi); 874 rval = 0; 875 } 876 877 break; 878 879 case I_UNLINK: 880 case I_PUNLINK: 881 sm_dbg('G', ("sm_link_req: M_IOCTL (I_PUNLINK).\n")); 882 883 linkp = (struct linkblk *)mp->b_cont->b_rptr; 884 885 if ((linkp == NULL) || 886 (MBLKL(mp) < sizeof (*linkp)) || 887 linkp->l_qbot == NULL) { 888 rval = EINVAL; 889 } else if ((plqi = get_lqi_byid(linkp->l_index)) == 0) { 890 rval = EINVAL; 891 } else { 892 sm_uqi_t *uqi; 893 int werrmode; 894 895 /* 896 * Mark the lower q as invalid. 897 */ 898 sm_dbg('G', ("I_PUNLINK: freeing link %d\n", 899 linkp->l_index)); 900 901 if (plqi->sm_bid) { 902 qunbufcall(SM_RQ(plqi), plqi->sm_bid); 903 plqi->sm_bid = 0; 904 } 905 if (plqi->sm_ttybid) { 906 qunbufcall(SM_RQ(plqi), plqi->sm_ttybid); 907 plqi->sm_ttybid = 0; 908 } 909 910 uqi = plqi->sm_uqi; 911 912 913 (void) ttymux_device_fini(plqi); 914 915 if (uqi) 916 (void) sm_disassociate(uqi->sm_lunit, 917 plqi, plqi->sm_tag); 918 919 LOCK_UNIT(plqi); 920 921 plqi->sm_piocid = 0; 922 923 werrmode = (plqi->sm_flags & (WERROR_MODE|HANGUP_MODE)) 924 ? 1 : 0; 925 926 plqi->sm_mbits = 0; 927 plqi->sm_flags = 0; 928 929 ttycommon_close(plqi->sm_ttycommon); 930 /* SM_RQ(plqi) = SM_WQ(plqi) = 0; */ 931 plqi->sm_ttycommon->t_flags = 0; 932 plqi->sm_ttycommon->t_cflag = 0; 933 plqi->sm_ttycommon->t_iflag = 0; 934 plqi->sm_linkid = 0; 935 plqi->sm_dev = NODEV; 936 plqi->sm_hadkadbchar = 0; 937 plqi->sm_nachar = sm_ssp->sm_abs; 938 939 UNLOCK_UNIT(plqi); 940 if (uqi && 941 werrmode && 942 (uqi->sm_flags & FULLY_OPEN) && 943 sm_uwq_error(uqi) && 944 putnextctl(SM_RQ(uqi), M_HANGUP) == 0) { 945 sm_log("sm_link_req: putnextctl(M_HANGUP)" 946 " failed.\n"); 947 } 948 949 rval = 0; 950 } 951 952 break; 953 default: 954 rval = EINVAL; 955 } 956 sm_reply(wq, mp, (rval) ? M_IOCNAK : M_IOCACK, rval); 957 } 958 959 static int 960 sm_getiocinfo(mblk_t *mp, struct sm_iocinfo *info) 961 { 962 switch (DB_TYPE(mp)) { 963 case M_COPYOUT: 964 info->sm_id = ((struct copyreq *)mp->b_rptr)->cq_id; 965 info->sm_cmd = ((struct copyreq *)mp->b_rptr)->cq_cmd; 966 info->sm_data = (((struct copyreq *)mp->b_rptr)->cq_size && 967 mp->b_cont) ? (void *)mp->b_cont->b_rptr : 0; 968 break; 969 case M_COPYIN: 970 info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id; 971 info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd; 972 info->sm_data = 0; 973 break; 974 case M_IOCACK: 975 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 976 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 977 /* the se driver has bug so we cannot use ioc_count */ 978 info->sm_data = (((struct iocblk *)mp->b_rptr)-> 979 ioc_error == 0 && mp->b_cont) ? 980 (void *)mp->b_cont->b_rptr : 0; 981 break; 982 case M_IOCNAK: 983 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 984 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 985 info->sm_data = 0; 986 break; 987 case M_IOCDATA: 988 info->sm_id = ((struct copyresp *)mp->b_rptr)->cp_id; 989 info->sm_cmd = ((struct copyresp *)mp->b_rptr)->cp_cmd; 990 info->sm_data = (((struct copyresp *)mp->b_rptr)-> 991 cp_rval == 0 && mp->b_cont) ? 992 (void *)mp->b_cont->b_rptr : 0; 993 break; 994 case M_IOCTL: 995 info->sm_id = ((struct iocblk *)mp->b_rptr)->ioc_id; 996 info->sm_cmd = ((struct iocblk *)mp->b_rptr)->ioc_cmd; 997 info->sm_data = 0; 998 break; 999 default: 1000 return (EINVAL); 1001 } 1002 return (0); 1003 } 1004 1005 /* 1006 * Record the termio settings that have been set on the upper stream 1007 */ 1008 static int 1009 sm_update_ttyinfo(mblk_t *mp, sm_uqi_t *uqi) 1010 { 1011 int err; 1012 struct sm_iocinfo info; 1013 1014 if ((err = sm_getiocinfo(mp, &info)) != 0) 1015 return (err); 1016 1017 switch (info.sm_cmd) { 1018 case TIOCSPPS: 1019 case TIOCGPPS: 1020 case TIOCGPPSEV: 1021 return (ENOTSUP); 1022 case TIOCGWINSZ: 1023 case TIOCSWINSZ: 1024 break; 1025 case TCSBRK: 1026 case TIOCSBRK: 1027 case TIOCCBRK: 1028 break; 1029 case TCSETSF: 1030 uqi->sm_flags |= FLUSHR_PEND; 1031 sm_dbg('I', ("TCSETSF: FLUSH is pending\n")); 1032 /*FALLTHROUGH*/ 1033 case TCSETSW: 1034 case TCSETS: 1035 case TCGETS: 1036 if (info.sm_data != 0) { 1037 ((struct termios *)info.sm_data)->c_cflag &= 1038 (tcflag_t)(~uqi->sm_cmask); 1039 uqi->sm_ttycommon->t_cflag = 1040 ((struct termios *)info.sm_data)->c_cflag; 1041 } 1042 break; 1043 case TCSETAF: 1044 sm_dbg('I', ("TCSETAF: FLUSH is pending\n")); 1045 uqi->sm_flags |= FLUSHR_PEND; 1046 /*FALLTHROUGH*/ 1047 case TCSETAW: 1048 case TCSETA: 1049 case TCGETA: 1050 if (info.sm_data != 0) { 1051 ((struct termio *)info.sm_data)->c_cflag &= 1052 (tcflag_t)(~uqi->sm_cmask); 1053 uqi->sm_ttycommon->t_cflag = 1054 (tcflag_t)((struct termio *)info.sm_data)->c_cflag; 1055 } 1056 break; 1057 case TIOCSSOFTCAR: 1058 case TIOCGSOFTCAR: 1059 if (info.sm_data != 0) { 1060 if (*(int *)info.sm_data == 1) 1061 uqi->sm_ttycommon->t_flags |= TS_SOFTCAR; 1062 else 1063 uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR; 1064 } 1065 break; 1066 case TIOCMSET: 1067 case TIOCMGET: 1068 if (info.sm_data != 0) 1069 uqi->sm_mbits = *(int *)info.sm_data; 1070 break; 1071 case TIOCMBIS: 1072 if (info.sm_data != 0) 1073 uqi->sm_mbits |= *(int *)info.sm_data; 1074 break; 1075 case TIOCMBIC: 1076 if (info.sm_data != 0) 1077 uqi->sm_mbits &= ~(*(int *)info.sm_data); 1078 break; 1079 default: 1080 return (EINVAL); 1081 /* NOTREACHED */ 1082 } /* end switch cmd */ 1083 1084 if ((uqi->sm_mbits & TIOCM_CD) || 1085 (uqi->sm_ttycommon->t_flags & TS_SOFTCAR) || 1086 (uqi->sm_ttycommon->t_cflag & CLOCAL)) 1087 uqi->sm_flags |= SM_CARON; 1088 else 1089 uqi->sm_flags &= ~SM_CARON; 1090 1091 return (0); 1092 } 1093 1094 /* 1095 * SECTION 1096 * STREAM's interface to the OS. 1097 * Routines directly callable from the OS. 1098 */ 1099 1100 /* 1101 * Processes high priority messages comming from modules above the 1102 * multiplexor. 1103 * Return 1 if the queue was disabled. 1104 */ 1105 static int 1106 sm_hp_uwput(queue_t *wq, mblk_t *mp) 1107 { 1108 sm_uqi_t *uqi = (sm_uqi_t *)(wq->q_ptr); 1109 int rval = 0; 1110 sm_lqi_t *plqi; 1111 int msgtype = DB_TYPE(mp); 1112 1113 switch (msgtype) { 1114 1115 case M_FLUSH: 1116 /* 1117 * How to flush the bottom half: 1118 * putctl1(SM_WQ(plqi), *mp->b_rptr) 1119 * will work on the bottom half but if FLUSHR is set 1120 * when is the right time to flush the upper read queue. 1121 * 1122 * Could set uqi->sm_flags & WANT_FLUSH but then what happens 1123 * if FLUSHR is set and the driver sends up a FLUSHR 1124 * before it handles the current FLUSHR request 1125 * (if only there was an id for the message that could 1126 * be matched when it returns back from the drivers. 1127 * 1128 * Thus I'm going by the book - the bottom half acts like 1129 * a stream head and turns around FLUSHW back down to 1130 * the driver (see lrput). The upper half acts like a 1131 * driver and turns around FLUSHR: 1132 */ 1133 1134 sm_dbg('I', ("sm_hp_uwput: FLUSH request 0x%x\n", *mp->b_rptr)); 1135 /* flush the upper write queue */ 1136 if (*mp->b_rptr & FLUSHW) 1137 flushq(wq, FLUSHDATA); 1138 1139 /* 1140 * flush each associated lower write queue 1141 * and pass down the driver (ignore the FLUSHR and deal with 1142 * it when it comes back up the read side. 1143 */ 1144 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1145 if ((plqi->sm_flags & WERROR_MODE) == 0 && 1146 SM_WQ(plqi)) { 1147 sm_dbg('I', ("flush lq 0x%p\n", SM_WQ(plqi))); 1148 if (*mp->b_rptr & FLUSHW) 1149 flushq(SM_WQ(plqi), FLUSHDATA); 1150 (void) putnextctl1(SM_WQ(plqi), M_FLUSH, 1151 *mp->b_rptr); 1152 } 1153 } 1154 break; 1155 1156 case M_STARTI: 1157 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1158 plqi->sm_flags &= ~SM_ISTOPPED; 1159 if ((plqi->sm_flags & WERROR_MODE) == 0) 1160 (void) putnextctl(SM_WQ(plqi), msgtype); 1161 } 1162 break; 1163 1164 case M_STOPI: 1165 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) { 1166 plqi->sm_flags |= SM_ISTOPPED; 1167 if ((plqi->sm_flags & WERROR_MODE) == 0) 1168 (void) putnextctl(SM_WQ(plqi), msgtype); 1169 } 1170 break; 1171 1172 case M_STOP: /* must never be queued */ 1173 uqi->sm_flags |= SM_STOPPED; 1174 noenable(wq); 1175 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) 1176 if ((plqi->sm_flags & WERROR_MODE) == 0) 1177 (void) putnextctl(SM_WQ(plqi), msgtype); 1178 1179 rval = 1; 1180 break; 1181 1182 case M_START: /* never be queued */ 1183 uqi->sm_flags &= ~SM_STOPPED; 1184 enableok(wq); 1185 qenable(wq); 1186 for (plqi = uqi->sm_lqs; plqi != 0; plqi = plqi->sm_nlqi) 1187 if ((plqi->sm_flags & WERROR_MODE) == 0) 1188 (void) putnextctl(SM_WQ(plqi), msgtype); 1189 1190 break; 1191 1192 case M_PCSIG: 1193 case M_COPYOUT: 1194 case M_COPYIN: 1195 case M_IOCACK: 1196 case M_IOCNAK: 1197 /* Wrong direction for message */ 1198 break; 1199 case M_READ: 1200 break; 1201 case M_PCPROTO: 1202 case M_PCRSE: 1203 default: 1204 sm_dbg('I', ("sm_hp_uwput: default case %d.\n", msgtype)); 1205 break; 1206 } /* end switch on high pri message type */ 1207 1208 freemsg(mp); 1209 return (rval); 1210 } 1211 1212 static int 1213 sm_default_uwioctl(queue_t *wq, mblk_t *mp, int (*qfn)()) 1214 { 1215 int err; 1216 struct iocblk *iobp; 1217 sm_uqi_t *uqi; 1218 1219 uqi = (sm_uqi_t *)(wq->q_ptr); 1220 iobp = (struct iocblk *)mp->b_rptr; 1221 1222 switch (iobp->ioc_cmd) { 1223 case TIOCEXCL: 1224 case TIOCNXCL: 1225 case TIOCSTI: 1226 (void) ttycommon_ioctl(uqi->sm_ttycommon, wq, mp, &err); 1227 sm_reply(wq, mp, err ? M_IOCACK : M_IOCNAK, err); 1228 return (0); 1229 default: 1230 break; 1231 } 1232 err = sm_update_ttyinfo(mp, uqi); 1233 if (err) { 1234 iobp->ioc_error = err; 1235 mp->b_datap->db_type = M_IOCNAK; 1236 qreply(wq, mp); 1237 return (0); 1238 } 1239 1240 /* 1241 * If uqi->sm_siocdata.sm_iocid just overwrite it since the stream 1242 * head will have timed it out 1243 */ 1244 uqi->sm_siocdata.sm_iocid = iobp->ioc_id; 1245 uqi->sm_siocdata.sm_acked = 0; 1246 uqi->sm_siocdata.sm_nacks = sm_good_qs(uqi); 1247 uqi->sm_siocdata.sm_acnt = 0; 1248 uqi->sm_siocdata.sm_policy = uqi->sm_policy; 1249 uqi->sm_siocdata.sm_flags = 0; 1250 sm_dbg('Z', (" want %d acks for id %d.\n", 1251 uqi->sm_siocdata.sm_nacks, iobp->ioc_id)); 1252 1253 return (sm_putqs(wq, mp, qfn)); 1254 } 1255 1256 /* 1257 * 1258 * sm_uwput - put function for an upper STREAM write. 1259 */ 1260 static int 1261 sm_uwput(queue_t *wq, mblk_t *mp) 1262 { 1263 sm_uqi_t *uqi; 1264 uchar_t msgtype; 1265 int cmd; 1266 struct iocblk *iobp; 1267 1268 uqi = (sm_uqi_t *)(wq->q_ptr); 1269 msgtype = DB_TYPE(mp); 1270 1271 ASSERT(uqi != 0 && sm_ssp != 0); 1272 1273 if (msgtype >= QPCTL && msgtype != M_IOCDATA) { 1274 (void) sm_hp_uwput(wq, mp); 1275 return (0); 1276 } 1277 1278 switch (DB_TYPE(mp)) { 1279 case M_DATA: 1280 case M_DELAY: 1281 case M_BREAK: 1282 default: 1283 (void) sm_putqs(wq, mp, putq); 1284 break; 1285 1286 case M_CTL: 1287 if (((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_CANONQUERY) { 1288 (void) putnextctl1(OTHERQ(wq), M_CTL, MC_NOCANON); 1289 } 1290 freemsg(mp); 1291 break; 1292 case M_IOCDATA: /* not handled as high pri because may need to putbq */ 1293 sm_dbg('M', ("sm_uwput(M_IOCDATA)\n")); 1294 /*FALLTHROUGH*/ 1295 case M_IOCTL: 1296 cmd = (msgtype == M_IOCDATA) ? 1297 ((struct copyresp *)mp->b_rptr)->cp_cmd : 1298 ((struct iocblk *)mp->b_rptr)->ioc_cmd; 1299 1300 iobp = (struct iocblk *)mp->b_rptr; 1301 iobp->ioc_rval = 0; 1302 1303 sm_dbg('M', ("sm_uwput(M_IOCTL:%d)\n", cmd)); 1304 1305 switch (cmd) { 1306 1307 case CONSGETABORTENABLE: 1308 iobp->ioc_error = ttymux_abort_ioctl(mp); 1309 DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK; 1310 qreply(wq, mp); 1311 break; 1312 case CONSSETABORTENABLE: 1313 iobp->ioc_error = 1314 secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0 ? 1315 EPERM : ttymux_abort_ioctl(mp); 1316 DB_TYPE(mp) = iobp->ioc_error ? M_IOCNAK : M_IOCACK; 1317 qreply(wq, mp); 1318 break; 1319 case TTYMUX_SETABORT: 1320 if (secpolicy_sys_config(iobp->ioc_cr, B_FALSE) != 0) { 1321 iobp->ioc_error = EPERM; 1322 DB_TYPE(mp) = M_IOCNAK; 1323 qreply(wq, mp); 1324 break; 1325 } 1326 /*FALLTHROUGH*/ 1327 case TTYMUX_GETABORT: 1328 case TTYMUX_GETABORTSTR: 1329 case TTYMUX_ASSOC: 1330 case TTYMUX_DISASSOC: 1331 case TTYMUX_SETCTL: 1332 case TTYMUX_GETLINK: 1333 case TTYMUX_CONSDEV: 1334 case TTYMUX_GETCTL: 1335 case TTYMUX_LIST: 1336 (void) sm_ioctl_cmd(uqi, mp); 1337 qreply(wq, mp); 1338 break; 1339 case I_LINK: 1340 case I_PLINK: 1341 case I_UNLINK: 1342 case I_PUNLINK: 1343 qwriter(wq, mp, sm_link_req, PERIM_OUTER); 1344 break; 1345 case TCSETSW: 1346 case TCSETSF: 1347 case TCSETAW: 1348 case TCSETAF: 1349 case TCSBRK: 1350 if (wq->q_first) { 1351 sm_dbg('A', ("sm_uwput: TCSET-> on srv q.\n")); 1352 /* keep message order intact */ 1353 (void) putq(wq, mp); 1354 break; 1355 } 1356 /*FALLTHROUGH*/ 1357 default: 1358 (void) sm_default_uwioctl(wq, mp, putq); 1359 break; 1360 } 1361 1362 break; /* M_IOCTL */ 1363 1364 } /* end switch on message type */ 1365 1366 return (0); 1367 } 1368 1369 /* 1370 * sm_uwsrv - service function for an upper STREAM write. 1371 * 'sm_uwsrv' takes a q parameter. The q parameter specifies the queue 1372 * which is to be serviced. This function reads the messages which are on 1373 * this service queue and passes them to the appropriate lower driver queue. 1374 */ 1375 static int 1376 sm_uwsrv(queue_t *q) 1377 { 1378 mblk_t *mp; 1379 sm_uqi_t *uqi = (sm_uqi_t *)(q->q_ptr); 1380 int msgtype; 1381 1382 ASSERT(q == SM_WQ(uqi)); 1383 1384 /* 1385 * Empty the queue unless explicitly stopped. 1386 */ 1387 while (mp = getq(q)) { 1388 msgtype = DB_TYPE(mp); 1389 1390 if (msgtype >= QPCTL && msgtype != M_IOCDATA) 1391 if (sm_hp_uwput(q, mp)) { 1392 sm_dbg('T', ("sm_uwsrv: flowcontrolled.\n")); 1393 break; /* indicates that the is disabled */ 1394 } 1395 else 1396 continue; 1397 1398 if (uqi->sm_flags & SM_STOPPED) { 1399 (void) putbq(q, mp); 1400 sm_dbg('T', ("sm_uwsrv: SM_STOPPED.\n")); 1401 break; 1402 } 1403 1404 /* 1405 * Read any ttycommon data that may 1406 * change (TS_SOFTCAR, CREAD, etc.). 1407 */ 1408 switch (DB_TYPE(mp)) { 1409 case M_IOCTL: 1410 case M_IOCDATA: 1411 if (sm_default_uwioctl(q, mp, putbq)) 1412 return (0); 1413 break; 1414 1415 default: 1416 if (sm_putqs(q, mp, putbq)) 1417 return (0); 1418 } 1419 } 1420 return (0); 1421 } 1422 1423 /* 1424 * Lower write side service routine used for backenabling upstream 1425 * flow control. 1426 */ 1427 static int 1428 sm_lwsrv(queue_t *q) 1429 { 1430 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1431 queue_t *uwq; 1432 1433 LOCK_UNIT(lqi); 1434 if (lqi->sm_uqflags & SM_UQVALID) { 1435 /* 1436 * It's safe to lock uqi since lwsrv runs asynchronously 1437 * with the upper write routines so this cannot be an 1438 * upper half thread. While holding the lqi lock and 1439 * if SM_UQVALID is set we are guaranteed that 1440 * lqi->sm_uqi will be valid. 1441 */ 1442 sm_dbg('I', ("sm_lwsrv: re-enabling upper queue.\n")); 1443 1444 uwq = SM_WQ(lqi->sm_uqi); 1445 UNLOCK_UNIT(lqi); 1446 qenable(uwq); 1447 } else { 1448 UNLOCK_UNIT(lqi); 1449 } 1450 return (0); 1451 } 1452 1453 /* 1454 * Upper read queue ioctl response handler for messages 1455 * passed from the lower half of the driver. 1456 */ 1457 static int 1458 sm_uriocack(queue_t *rq, mblk_t *mp) 1459 { 1460 sm_uqi_t *uqi = (sm_uqi_t *)rq->q_ptr; 1461 int err, flag; 1462 sm_iocdata_t *iodp; 1463 struct sm_iocinfo info; 1464 1465 if ((err = sm_getiocinfo(mp, &info)) != 0) { 1466 sm_dbg('I', ("Unknown ioctl response\n")); 1467 return (err); 1468 } 1469 1470 if (info.sm_id == uqi->sm_piocdata.sm_iocid) { 1471 iodp = &uqi->sm_piocdata; 1472 } else if (info.sm_id == uqi->sm_siocdata.sm_iocid) { 1473 iodp = &uqi->sm_siocdata; 1474 } else { 1475 sm_log("Unexpected ioctl response\n"); 1476 sm_dbg('I', ("Unexpected ioctl response (id %d)\n", 1477 info.sm_id)); 1478 1479 /* 1480 * If the response is sent up it will result in 1481 * duplicate ioctl responses. The ioctl has probably been 1482 * timed out by the stream head so dispose of the response 1483 * (since it has arrived too late. 1484 */ 1485 goto out; 1486 } 1487 1488 flag = SM_COPYIN; 1489 1490 switch (DB_TYPE(mp)) { 1491 case M_COPYOUT: 1492 flag = SM_COPYOUT; 1493 /*FALLTHRU*/ 1494 case M_COPYIN: 1495 if (iodp->sm_flags & flag) 1496 goto out; 1497 iodp->sm_flags |= flag; 1498 1499 break; 1500 case M_IOCACK: 1501 iodp->sm_ackcnt += 1; 1502 iodp->sm_acnt += 1; 1503 if (iodp->sm_policy == FIRSTACK) { 1504 if (iodp->sm_acnt == iodp->sm_nacks) 1505 iodp->sm_iocid = 0; 1506 if (iodp->sm_acnt == 1) 1507 iodp->sm_acked = 1; 1508 else 1509 goto out; 1510 } else { 1511 if (iodp->sm_acnt == iodp->sm_nacks) { 1512 iodp->sm_iocid = 0; 1513 iodp->sm_acked = 1; 1514 } else 1515 goto out; 1516 } 1517 break; 1518 case M_IOCNAK: 1519 iodp->sm_nakcnt += 1; 1520 iodp->sm_acnt += 1; 1521 if (iodp->sm_acnt == iodp->sm_nacks) { 1522 iodp->sm_iocid = 0; 1523 if (iodp->sm_acked == 0) { 1524 iodp->sm_acked = 1; 1525 break; 1526 } 1527 } 1528 goto out; 1529 default: 1530 goto out; 1531 } 1532 1533 /* 1534 * Merge the tty settings each of the associated lower streams. 1535 */ 1536 if (info.sm_data) 1537 (void) sm_update_ttyinfo(mp, uqi); 1538 1539 if (iodp == &uqi->sm_piocdata) { 1540 if (iodp->sm_iocid == 0) { 1541 uqi->sm_flags &= ~SM_IOCPENDING; 1542 } 1543 } else { 1544 sm_dbg('I', ("sm_uriocack: forwarding response for %d.\n", 1545 info.sm_id)); 1546 putnext(rq, mp); 1547 return (0); 1548 } 1549 out: 1550 sm_dbg('I', ("sm_uriocack: freeing response for %d.\n", info.sm_id)); 1551 freemsg(mp); 1552 return (0); 1553 } 1554 1555 /* 1556 * Transfer a message from the lower read side of the multiplexer onto 1557 * the associated upper stream. 1558 */ 1559 static int 1560 sm_ursendup(queue_t *q, mblk_t *mp) 1561 { 1562 sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 1563 1564 if (!canputnext(q) && DB_TYPE(mp) < QPCTL) { 1565 sm_dbg('I', ("sm_ursendup: flow controlled.\n")); 1566 return (1); 1567 } 1568 1569 switch (DB_TYPE(mp)) { 1570 case M_COPYIN: 1571 case M_COPYOUT: 1572 case M_IOCACK: 1573 case M_IOCNAK: 1574 (void) sm_uriocack(q, mp); 1575 break; 1576 case M_HANGUP: 1577 if (sm_uwq_error(uqi)) { 1578 /* there are no usable lower q's */ 1579 uqi->sm_flags &= ~SM_CARON; 1580 putnext(q, mp); 1581 } else { 1582 /* there are still usable q's - don't send up */ 1583 freemsg(mp); 1584 } 1585 break; 1586 case M_ERROR: 1587 if (sm_uwq_error(uqi)) { 1588 /* there are no usable lower q's */ 1589 uqi->sm_flags &= ~SM_CARON; 1590 putnext(q, mp); 1591 } else if (*mp->b_rptr == NOERROR) { 1592 /* the error has cleared */ 1593 uqi->sm_flags &= ~ERROR_MODE; 1594 putnext(q, mp); 1595 } else { 1596 /* there are still usable q's - don't send up */ 1597 freemsg(mp); 1598 } 1599 break; 1600 case M_FLUSH: 1601 flushq(q, FLUSHDATA); 1602 putnext(q, mp); /* time to use FLUSHR_PEND flag */ 1603 break; 1604 case M_CTL: 1605 /* wrong direction - must have come from sm_close */ 1606 uqi->sm_flags |= SM_CLOSE; 1607 sm_dbg('I', ("sm_ursrv: had SM_CLOSE.\n")); 1608 freemsg(mp); 1609 break; 1610 case M_UNHANGUP: 1611 /* just pass them all up - they're harmless */ 1612 uqi->sm_flags |= SM_CARON; 1613 /* FALLTHROUGH */ 1614 default: 1615 putnext(q, mp); 1616 break; 1617 } 1618 1619 return (0); 1620 } 1621 1622 /* 1623 * sm_urput - put function for a lower STREAM read. 1624 */ 1625 static int 1626 sm_urput(queue_t *q, mblk_t *mp) 1627 { 1628 if (sm_ursendup(q, mp) != 0) 1629 (void) putq(q, mp); 1630 1631 return (0); 1632 } 1633 1634 /* 1635 * Upper read side service routine. 1636 * Read side needs to be fast so only check for duplicate M_IOCTL acks. 1637 */ 1638 static int 1639 sm_ursrv(queue_t *q) 1640 { 1641 sm_uqi_t *uqi = (sm_uqi_t *)q->q_ptr; 1642 mblk_t *mp; 1643 int flags = uqi->sm_flags; 1644 1645 while ((mp = getq(q))) { 1646 if (sm_ursendup(q, mp) != 0) { 1647 sm_dbg('I', ("sm_ursrv: flow controlled.\n")); 1648 (void) putbq(q, mp); 1649 uqi->sm_flags |= WANT_RENB; 1650 break; 1651 } 1652 } 1653 1654 /* 1655 * If the q service was called because it was no longer 1656 * flow controled then enable each of the driver queues. 1657 */ 1658 if ((flags & WANT_RENB) && !(uqi->sm_flags & WANT_RENB)) { 1659 sm_lqi_t *lqi; 1660 queue_t *drq; /* read q of linked driver */ 1661 1662 uqi->sm_flags &= ~WANT_RENB; 1663 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 1664 drq = SM_RQ(lqi)->q_next; 1665 if (drq && drq->q_first != 0) 1666 qenable(drq); 1667 } 1668 } 1669 1670 return (0); 1671 } 1672 1673 /* 1674 * Check a message sent from a linked device for abort requests and 1675 * for flow control. 1676 */ 1677 static int 1678 sm_lrmsg_check(queue_t *q, mblk_t *mp) 1679 { 1680 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1681 1682 switch (DB_TYPE(mp)) { 1683 case M_DATA: 1684 LOCK_UNIT(lqi); 1685 /* 1686 * check for abort - only allow abort on I/O consoles 1687 * known to OBP - 1688 * fix it when we do polled io 1689 */ 1690 if ((lqi->sm_ioflag & (uint_t)FORINPUT) == 0) { 1691 freemsg(mp); 1692 UNLOCK_UNIT(lqi); 1693 return (1); 1694 } 1695 if ((lqi->sm_uqflags & SM_OBPCNDEV) && 1696 lqi->sm_ctrla_abort_on && 1697 abort_enable == KIOCABORTALTERNATE) { 1698 1699 uchar_t *rxc; 1700 boolean_t aborted = B_FALSE; 1701 1702 for (rxc = mp->b_rptr; 1703 rxc != mp->b_wptr; 1704 rxc++) 1705 1706 if (*rxc == *lqi->sm_nachar) { 1707 lqi->sm_nachar++; 1708 if (*lqi->sm_nachar == '\0') { 1709 abort_sequence_enter( 1710 (char *)NULL); 1711 lqi->sm_nachar = sm_ssp->sm_abs; 1712 aborted = B_TRUE; 1713 } 1714 } else 1715 lqi->sm_nachar = (*rxc == *sm_ssp-> 1716 sm_abs) ? 1717 sm_ssp-> 1718 sm_abs + 1 : 1719 sm_ssp->sm_abs; 1720 1721 if (aborted) { 1722 freemsg(mp); 1723 UNLOCK_UNIT(lqi); 1724 return (1); 1725 } 1726 } 1727 UNLOCK_UNIT(lqi); 1728 break; 1729 case M_BREAK: /* we'll eventually see this as a flush */ 1730 LOCK_UNIT(lqi); 1731 /* 1732 * Only allow abort on OBP devices. When polled I/O is 1733 * supported allow abort on any console device. 1734 * Parity errors are reported upstream as breaks so 1735 * ensure that there is no data in the message before 1736 * deciding whether to abort. 1737 */ 1738 if ((lqi->sm_uqflags & SM_OBPCNDEV) && /* console stream */ 1739 (mp->b_wptr - mp->b_rptr == 0 && 1740 msgdsize(mp) == 0)) { /* not due to parity */ 1741 1742 if (lqi->sm_break_abort_on && 1743 abort_enable != KIOCABORTALTERNATE) 1744 abort_sequence_enter((char *)NULL); 1745 1746 freemsg(mp); 1747 UNLOCK_UNIT(lqi); 1748 return (1); 1749 } else { 1750 UNLOCK_UNIT(lqi); 1751 } 1752 break; 1753 default: 1754 break; 1755 } 1756 1757 if (DB_TYPE(mp) >= QPCTL) 1758 return (0); 1759 1760 LOCK_UNIT(lqi); /* lock out the upper half */ 1761 if ((lqi->sm_uqflags & SM_UQVALID) && SM_RQ(lqi->sm_uqi)) { 1762 UNLOCK_UNIT(lqi); 1763 if (!canput(SM_RQ(lqi->sm_uqi))) { 1764 sm_dbg('I', ("sm_lrmsg_check: flow controlled.\n")); 1765 (void) putq(q, mp); 1766 return (1); 1767 } 1768 } else { 1769 UNLOCK_UNIT(lqi); 1770 } 1771 1772 return (0); 1773 } 1774 1775 /* 1776 * sm_sendup - deliver a message to the upper read side of the multiplexer 1777 */ 1778 static int 1779 sm_sendup(queue_t *q, mblk_t *mp) 1780 { 1781 sm_lqi_t *lqi = (sm_lqi_t *)q->q_ptr; 1782 1783 if (sm_ssp == NULL) { 1784 freemsg(mp); 1785 return (0); 1786 } 1787 1788 /* 1789 * Check for CD status change messages from driver. 1790 * (Remark: this is an se driver thread running at soft interupt 1791 * priority and the waiters are in user context). 1792 */ 1793 switch (DB_TYPE(mp)) { 1794 case M_DATA: 1795 case M_BREAK: /* we'll eventually see this as a flush */ 1796 break; 1797 1798 /* high priority messages */ 1799 case M_IOCACK: 1800 case M_IOCNAK: 1801 if ((lqi->sm_flags & SM_IOCPENDING) && lqi->sm_piocid == 1802 ((struct iocblk *)mp->b_rptr)->ioc_id) { 1803 freemsg(mp); 1804 lqi->sm_flags &= ~SM_IOCPENDING; 1805 sm_issue_ioctl(lqi); 1806 return (0); 1807 } 1808 break; 1809 case M_UNHANGUP: 1810 /* 1811 * If the driver can send an M_UNHANGUP it must be able to 1812 * accept messages from above (ie clear WERROR_MODE if set). 1813 */ 1814 sm_dbg('E', ("lrput: M_UNHANGUP\n")); 1815 lqi->sm_mbits |= TIOCM_CD; 1816 lqi->sm_flags &= ~(WERROR_MODE|HANGUP_MODE); 1817 1818 break; 1819 1820 case M_HANGUP: 1821 sm_dbg('E', ("lrput: MHANGUP\n")); 1822 lqi->sm_mbits &= ~TIOCM_CD; 1823 lqi->sm_flags |= (WERROR_MODE|HANGUP_MODE); 1824 break; 1825 1826 case M_ERROR: 1827 1828 sm_dbg('E', ("lrput: MERROR\n")); 1829 /* 1830 * Tell the driver to flush rd/wr queue if its read/write error. 1831 * if its a read/write error flush rq/wq (type in first bytes). 1832 */ 1833 if ((mp->b_wptr - mp->b_rptr) == 2) { 1834 uchar_t rw = 0; 1835 1836 if (*mp->b_rptr == NOERROR) { 1837 /* not in error anymore */ 1838 lqi->sm_flags &= ~ERROR_MODE; 1839 lqi->sm_flags |= WANT_CD; 1840 } else { 1841 if (*mp->b_rptr != 0) { 1842 /* read error */ 1843 rw |= FLUSHR; 1844 lqi->sm_flags |= RERROR_MODE; 1845 } 1846 mp->b_rptr++; 1847 if (*mp->b_rptr != 0) { 1848 /* write error */ 1849 rw |= FLUSHW; 1850 lqi->sm_flags |= WERROR_MODE; 1851 } 1852 1853 mp->b_rptr--; 1854 /* has next driver done qprocsoff */ 1855 if (rw && OTHERQ(q)->q_next != NULL) { 1856 (void) putnextctl1(OTHERQ(q), M_FLUSH, 1857 rw); 1858 } 1859 } 1860 } else if (*mp->b_rptr != 0 && OTHERQ(q)->q_next != NULL) { 1861 sm_dbg('E', ("lrput: old style MERROR (?)\n")); 1862 1863 lqi->sm_flags |= (RERROR_MODE | WERROR_MODE); 1864 (void) putnextctl1(OTHERQ(q), M_FLUSH, FLUSHRW); 1865 } 1866 break; 1867 1868 case M_PCSIG: 1869 case M_SIG: 1870 break; 1871 case M_COPYOUT: 1872 case M_COPYIN: 1873 break; 1874 case M_FLUSH: 1875 /* flush the read queue and pass on up */ 1876 flushq(q, FLUSHDATA); 1877 break; 1878 default: 1879 break; 1880 } 1881 1882 LOCK_UNIT(lqi); /* lock out the upper half */ 1883 if (lqi->sm_uqflags & SM_UQVALID && SM_RQ(lqi->sm_uqi)) { 1884 UNLOCK_UNIT(lqi); 1885 (void) putq(SM_RQ(lqi->sm_uqi), mp); 1886 return (0); 1887 } else { 1888 sm_dbg('I', ("sm_sendup: uq not valid\n")); 1889 freemsg(mp); 1890 } 1891 UNLOCK_UNIT(lqi); 1892 1893 return (0); 1894 } 1895 1896 /* 1897 * sm_lrput - put function for a lower STREAM read. 1898 */ 1899 static int 1900 sm_lrput(queue_t *q, mblk_t *mp) 1901 { 1902 if (sm_lrmsg_check(q, mp) == 0) 1903 (void) sm_sendup(q, mp); 1904 return (0); 1905 } 1906 1907 /* 1908 * sm_lrsrv - service function for the lower read STREAM. 1909 */ 1910 static int 1911 sm_lrsrv(queue_t *q) 1912 { 1913 mblk_t *mp; 1914 1915 sm_dbg('I', ("sm_lrsrv: not controlled.\n")); 1916 while (mp = getq(q)) 1917 (void) sm_sendup(q, mp); 1918 1919 return (0); 1920 } 1921 1922 /* 1923 * Check whether a thread is allowed to open the requested device. 1924 */ 1925 static int 1926 sm_ok_to_open(sm_uqi_t *uqi, int protocol, cred_t *credp, int *abort_waiters) 1927 { 1928 int rval = 0; 1929 int proto; 1930 1931 *abort_waiters = 0; 1932 1933 switch (protocol) { 1934 case ASYNC_DEVICE: /* Standard async protocol */ 1935 if ((uqi->sm_protocol == NULL_PROTOCOL) || 1936 (uqi->sm_protocol == ASYN_PROTOCOL)) { 1937 /* 1938 * Lock out other incompatible protocol requests. 1939 */ 1940 proto = ASYN_PROTOCOL; 1941 rval = 0; 1942 } else 1943 rval = EBUSY; 1944 break; 1945 1946 case OUTLINE: /* Outdial protocol */ 1947 if ((uqi->sm_protocol == NULL_PROTOCOL) || 1948 (uqi->sm_protocol == OUTD_PROTOCOL)) { 1949 proto = OUTD_PROTOCOL; 1950 rval = 0; 1951 } else if (uqi->sm_protocol == ASYN_PROTOCOL) { 1952 /* 1953 * check for dialout request on a line that is already 1954 * open for dial in: 1955 * kick off any thread that is waiting to fully open 1956 */ 1957 if (uqi->sm_flags & FULLY_OPEN) 1958 rval = EBUSY; 1959 else { 1960 proto = OUTD_PROTOCOL; 1961 *abort_waiters = 1; 1962 } 1963 } else 1964 rval = EBUSY; 1965 break; 1966 default: 1967 rval = ENOTSUP; 1968 } 1969 1970 if (rval == 0 && 1971 (uqi->sm_ttycommon->t_flags & TS_XCLUDE) && 1972 secpolicy_excl_open(credp) != 0) { 1973 1974 if (uqi->sm_flags & FULLY_OPEN) { 1975 rval = EBUSY; /* exclusive device already open */ 1976 } else { 1977 /* NB TS_XCLUDE cant be set during open so NOTREACHED */ 1978 /* force any waiters to yield TS_XCLUDE */ 1979 *abort_waiters = 1; 1980 } 1981 } 1982 1983 if (rval == 0) 1984 uqi->sm_protocol = proto; 1985 1986 sm_dbg('A', ("ok_to_open (0x%p, %d) proto=%d rval %d (wabort=%d)", 1987 uqi, protocol, uqi->sm_protocol, rval, *abort_waiters)); 1988 1989 return (rval); 1990 } 1991 1992 /* wait for memory to become available whilst performing a qwait */ 1993 /*ARGSUSED*/ 1994 static void dummy_callback(void *arg) 1995 {} 1996 1997 /* ARGSUSED */ 1998 static int 1999 sm_dump_msg(queue_t *q, mblk_t *mp) 2000 { 2001 freemsg(mp); 2002 return (0); 2003 } 2004 2005 /* 2006 * Wait for a message to arrive - must be called with exclusive 2007 * access at the outer perimiter. 2008 */ 2009 static int 2010 sm_qwait_sig(sm_uqi_t *uqi, queue_t *q) 2011 { 2012 int err; 2013 2014 sm_dbg('C', ("sm_qwait_sig: waiting.\n")); 2015 2016 uqi->sm_waitq = q; 2017 uqi->sm_nwaiters++; /* required by the close routine */ 2018 err = qwait_sig(q); 2019 if (--uqi->sm_nwaiters == 0) 2020 uqi->sm_waitq = 0; 2021 2022 if (err == 0) 2023 err = EINTR; 2024 else if (q->q_ptr == 0) /* can happen if there are multiple waiters */ 2025 err = -1; 2026 else if (uqi->sm_flags & SM_CLOSE) { 2027 uqi->sm_flags &= ~SM_CLOSE; 2028 err = 1; /* a different protocol has closed its stream */ 2029 } 2030 else 2031 err = 0; /* was worth waiting for */ 2032 2033 sm_dbg('C', ("sm_qwait_sig: rval %d\n", err)); 2034 return (err); 2035 } 2036 2037 /* 2038 * Defer the opening of one the drivers devices until the state of each 2039 * associated lower stream is known. 2040 */ 2041 static int 2042 sm_defer_open(sm_uqi_t *uqi, queue_t *q) 2043 { 2044 uint_t cmdflags = WANT_CDSTAT; 2045 int err, nqs; 2046 2047 while ((nqs = sm_good_qs(uqi)) == 0) { 2048 sm_dbg('C', ("sm_defer_open: no good qs\n")); 2049 if (err = sm_qwait_sig(uqi, q)) 2050 return (err); 2051 } 2052 2053 while ((uqi->sm_flags & SM_CARON) == 0) { 2054 int iocmd; 2055 mblk_t *pioc; 2056 2057 sm_dbg('C', ("sm_defer_open: flags 0x%x cmdflags 0x%x\n", 2058 uqi->sm_flags, cmdflags)); 2059 if (cmdflags == 0) { 2060 if (err = sm_qwait_sig(uqi, q)) 2061 return (err); 2062 continue; /* waiting for an M_UNHANGUP */ 2063 } else if (cmdflags & WANT_SC) { 2064 cmdflags &= ~WANT_SC; 2065 iocmd = TIOCGSOFTCAR; 2066 } else if (cmdflags & WANT_CD) { 2067 cmdflags &= ~WANT_CD; 2068 iocmd = TIOCMGET; 2069 } else if (cmdflags & WANT_CL) { 2070 cmdflags &= ~WANT_CL; 2071 iocmd = TCGETS; 2072 } 2073 2074 if (uqi->sm_piocdata.sm_iocid == 0) { 2075 while ((pioc = mkiocb(iocmd)) == 0) { 2076 bufcall_id_t id = 2077 qbufcall(q, sizeof (struct iocblk), 2078 BPRI_MED, dummy_callback, 0); 2079 if (err = sm_qwait_sig(uqi, q)) { 2080 /* wait for the bufcall */ 2081 qunbufcall(q, id); 2082 return (err); 2083 } 2084 qunbufcall(q, id); 2085 } 2086 2087 uqi->sm_flags |= SM_IOCPENDING; 2088 2089 uqi->sm_piocdata.sm_iocid = 2090 ((struct iocblk *)pioc->b_rptr)->ioc_id; 2091 uqi->sm_piocdata.sm_acked = 0; 2092 uqi->sm_piocdata.sm_nacks = nqs; 2093 uqi->sm_piocdata.sm_acnt = 0; 2094 uqi->sm_piocdata.sm_ackcnt = uqi-> 2095 sm_piocdata.sm_nakcnt = 0; 2096 uqi->sm_piocdata.sm_policy = uqi->sm_policy; 2097 uqi->sm_piocdata.sm_flags = SM_INTERNALIOC; 2098 if (sm_putqs(WR(q), pioc, sm_dump_msg) != 0) { 2099 uqi->sm_piocdata.sm_iocid = 0; 2100 sm_log("sm_defer_open: bad putqs\n"); 2101 return (-1); 2102 } 2103 } 2104 2105 sm_dbg('C', ("sm_defer_open: flags 0x%x\n", uqi->sm_flags)); 2106 while ((uqi->sm_flags & SM_CARON) == 0 && 2107 (uqi->sm_flags & SM_IOCPENDING) != 0) 2108 if (err = sm_qwait_sig(uqi, q)) 2109 return (err); 2110 2111 sm_dbg('C', ("defer_open: uq flags 0x%x.\n", uqi->sm_flags)); 2112 } 2113 sm_dbg('C', ("defer_open: return 0.\n")); 2114 return (0); 2115 } 2116 2117 static int 2118 sm_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp) 2119 { 2120 int ftstat; 2121 int unit; 2122 int protocol; 2123 sm_uqi_t *uqi; 2124 int abort_waiters; 2125 2126 if (sm_ssp == NULL) 2127 return (ENXIO); 2128 /* 2129 * sflag = 0 => streams device. 2130 */ 2131 if (sflag != 0 || DEV_TO_UNIT(*devp) >= NLUNITS) { 2132 sm_dbg('C', ("open: sflag=%d or bad dev_t.\n", sflag)); 2133 return (ENXIO); 2134 } 2135 2136 unit = DEV_TO_UNIT(*devp); 2137 protocol = DEV_TO_PROTOBITS(*devp); 2138 2139 uqi = get_uqi(sm_ssp, unit); 2140 2141 sm_dbg('C', ("open(0x%p, %d, 0x%x) :- unit=%d, proto=%d, uqi=0x%p\n", 2142 rq, *devp, flag, unit, protocol, uqi)); 2143 2144 if (uqi == 0) 2145 return (ENXIO); 2146 2147 if (sm_refuse_opens && unit > smctlunit && uqi->sm_nlqs == 0) 2148 return (ENXIO); 2149 2150 if (uqi->sm_flags & EXCL_OPEN && (flag & FEXCL)) { 2151 return (EBUSY); /* device in use */ 2152 } 2153 2154 if ((flag & FEXCL)) { 2155 if (secpolicy_excl_open(credp) != 0) 2156 return (EPERM); 2157 2158 if ((uqi->sm_flags & FULLY_OPEN) || uqi->sm_nwaiters > 0) 2159 return (EBUSY); /* device in use */ 2160 2161 uqi->sm_flags |= EXCL_OPEN; 2162 } 2163 2164 if (uqi->sm_protocol == NULL_PROTOCOL) { 2165 struct termios *termiosp; 2166 int len; 2167 2168 if (ddi_getlongprop(DDI_DEV_T_ANY, ddi_root_node(), 2169 DDI_PROP_NOTPROM, "ttymodes", (caddr_t)&termiosp, &len) 2170 == DDI_PROP_SUCCESS && 2171 (len == sizeof (struct termios))) { 2172 2173 sm_dbg('C', ("open: c_cflag=0x%x\n", 2174 termiosp->c_cflag)); 2175 2176 uqi->sm_ttycommon->t_iflag = termiosp->c_iflag; 2177 uqi->sm_ttycommon->t_cflag = termiosp->c_cflag; 2178 uqi->sm_ttycommon->t_stopc = termiosp->c_cc[VSTOP]; 2179 uqi->sm_ttycommon->t_startc = termiosp->c_cc[VSTART]; 2180 2181 /* 2182 * IGNBRK,BRKINT,INPCK,IXON,IXANY,IXOFF - drivers 2183 * PARMRK,IGNPAR,ISTRIP - how to report parity 2184 * INLCR,IGNCR,ICRNL,IUCLC - ldterm (sophisticated I/O) 2185 * IXON, IXANY, IXOFF - flow control input 2186 * CBAUD,CSIZE,CS5-8,CSTOPB,PARENB,PARODD,HUPCL, 2187 * RCV1EN,XMT1EN,LOBLK,XCLUDE,CRTSXOFF,CRTSCTS, 2188 * CIBAUD,PAREXT,CBAUDEXT,CIBAUDEXT,CREAD,CLOCAL 2189 */ 2190 2191 kmem_free(termiosp, len); 2192 } 2193 else 2194 bzero((caddr_t)uqi->sm_ttycommon, 2195 sizeof (uqi->sm_ttycommon)); 2196 2197 if (*devp == rconsdev) { 2198 uqi->sm_cmask = sm_cmask; 2199 uqi->sm_ttycommon->t_flags |= TS_SOFTCAR; 2200 } else { 2201 uqi->sm_ttycommon->t_flags &= ~TS_SOFTCAR; 2202 } 2203 2204 /* 2205 * Clear the default CLOCAL and TS_SOFTCAR flags since 2206 * they must correspond to the settings on the real devices. 2207 */ 2208 2209 uqi->sm_ttycommon->t_cflag &= ~(uqi->sm_cmask|CLOCAL); 2210 uqi->sm_mbits = 0; 2211 uqi->sm_policy = FIRSTACK; 2212 if (unit == 0 && sm_ssp->sm_ms == 0) 2213 sm_ssp->sm_ms = (sm_mux_state_t *) 2214 space_fetch(TTYMUXPTR); 2215 if (sm_ssp->sm_ms) { 2216 if (sm_ssp->sm_ms->sm_cons_stdin.sm_dev == *devp || 2217 sm_ssp->sm_ms->sm_cons_stdout.sm_dev == *devp) 2218 sm_ssp->sm_lconsole = uqi; 2219 } 2220 } 2221 2222 /* 2223 * Does this thread need to wait? 2224 */ 2225 2226 sm_dbg('C', ("sm_open: %d %d 0x%p 0x%x\n", 2227 !(flag & (FNDELAY|FNONBLOCK)), !(protocol == OUTLINE), uqi->sm_lqs, 2228 uqi->sm_flags)); 2229 2230 tryopen: 2231 2232 abort_waiters = 0; 2233 if (ftstat = sm_ok_to_open(uqi, protocol, credp, &abort_waiters)) { 2234 sm_dbg('C', ("open failed stat=%d.\n", ftstat)); 2235 2236 if ((uqi->sm_flags & FULLY_OPEN) == 0 && uqi->sm_nwaiters == 0) 2237 uqi->sm_protocol = NULL_PROTOCOL; 2238 if (flag & FEXCL) 2239 uqi->sm_flags &= ~EXCL_OPEN; 2240 return (ftstat); 2241 } 2242 2243 if (abort_waiters) { 2244 uqi->sm_dev = *devp; 2245 /* different device wants to use the unit */ 2246 SM_RQ(uqi) = rq; 2247 SM_WQ(uqi) = WR(rq); 2248 } 2249 if (rq->q_ptr == 0) { 2250 sm_lqi_t *lqi; 2251 2252 uqi->sm_dev = *devp; 2253 rq->q_ptr = WR(rq)->q_ptr = uqi; 2254 SM_RQ(uqi) = rq; 2255 SM_WQ(uqi) = WR(rq); 2256 qprocson(rq); 2257 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 2258 LOCK_UNIT(lqi); 2259 lqi->sm_uqflags |= SM_UQVALID; 2260 UNLOCK_UNIT(lqi); 2261 } 2262 2263 sm_dbg('C', ("sm_open: SM_UQVALID set on lqs.\n")); 2264 } 2265 2266 if (*devp != rconsdev && BLOCKING(uqi, protocol, flag)) { 2267 2268 uqi->sm_flags |= WANT_CDSTAT; 2269 2270 do { 2271 /* 2272 * Wait for notifications of changes in the CLOCAL 2273 * and TS_SOFTCAR flags and a TIOCM_CD flag of a 2274 * TIOCMGET request (come in on the write side queue). 2275 */ 2276 2277 if ((ftstat = sm_defer_open(uqi, rq)) != EINTR) { 2278 if (ftstat) { 2279 goto tryopen; 2280 } else { 2281 continue; 2282 } 2283 } 2284 2285 if (uqi->sm_nwaiters == 0) { /* clean up */ 2286 /* 2287 * only opens on an asynchronous 2288 * protocols reach here so checking 2289 * nwaiters == 0 is sufficient to 2290 * ensure that no other thread 2291 * is waiting on this logical unit 2292 */ 2293 if ((uqi->sm_flags & FULLY_OPEN) == 0) { 2294 2295 sm_lqi_t *lqi; 2296 2297 uqi->sm_dev = NODEV; 2298 sm_dbg('C', ("sm_open FULLY_OPEN=0\n")); 2299 for (lqi = uqi->sm_lqs; lqi != 0; 2300 lqi = lqi->sm_nlqi) { 2301 LOCK_UNIT(lqi); 2302 lqi->sm_uqflags &= ~SM_UQVALID; 2303 UNLOCK_UNIT(lqi); 2304 } 2305 2306 qprocsoff(rq); 2307 rq->q_ptr = WR(rq)->q_ptr = 0; 2308 SM_RQ(uqi) = 0; 2309 SM_WQ(uqi) = 0; 2310 } 2311 } 2312 if ((uqi->sm_flags & FULLY_OPEN) == 0 && 2313 uqi->sm_nwaiters == 0) 2314 uqi->sm_protocol = NULL_PROTOCOL; 2315 if (flag & FEXCL) 2316 uqi->sm_flags &= ~EXCL_OPEN; 2317 sm_dbg('C', ("sm_open: done (ret %d).\n", ftstat)); 2318 return (ftstat); 2319 } while (BLOCKING(uqi, protocol, flag)); 2320 } 2321 2322 uqi->sm_flags |= FULLY_OPEN; 2323 2324 sm_dbg('C', ("sm_open done (ret %d).\n", ftstat)); 2325 return (ftstat); 2326 } 2327 2328 /* 2329 * Multiplexer device close routine. 2330 */ 2331 /*ARGSUSED*/ 2332 static int 2333 sm_close(queue_t *rq, int flag, cred_t *credp) 2334 { 2335 sm_uqi_t *uqi = (sm_uqi_t *)rq->q_ptr; 2336 sm_lqi_t *lqi; 2337 2338 if (sm_ssp == NULL) 2339 return (ENXIO); 2340 2341 if (uqi == NULL) { 2342 sm_dbg('C', ("close: WARN:- q 0x%p already closed.\n", rq)); 2343 return (ENXIO); 2344 } 2345 2346 sm_dbg('C', ("close: uqi=0x%p unit=%d q=0x%p)\n", uqi, uqi->sm_lunit, 2347 rq)); 2348 2349 if (SM_RQ(uqi) != rq) 2350 sm_dbg('C', ("sm_close: rq != current uqi queue\n")); 2351 2352 if (uqi->sm_ttybid) { 2353 qunbufcall(SM_RQ(uqi), uqi->sm_ttybid); 2354 uqi->sm_ttybid = 0; 2355 } 2356 2357 /* 2358 * Tell all the linked queues that the upper queue has gone 2359 * Note close will never get called on a stream while there is a 2360 * thread blocked trying to open the same stream. 2361 * If there is a blocked open on a different stream but on 2362 * the same logical unit it will reset the lower queue flags. 2363 */ 2364 for (lqi = uqi->sm_lqs; lqi != 0; lqi = lqi->sm_nlqi) { 2365 LOCK_UNIT(lqi); 2366 lqi->sm_uqflags &= ~SM_UQVALID; 2367 UNLOCK_UNIT(lqi); 2368 } 2369 2370 /* 2371 * Turn off the STREAMs queue processing for this queue. 2372 */ 2373 qprocsoff(rq); 2374 2375 /* 2376 * Similarly we will never get here if there is thread trying to 2377 * open ths stream. 2378 */ 2379 LOCK_UNIT(uqi); 2380 if (uqi->sm_waitq == 0) 2381 uqi->sm_flags = (uqi->sm_flags & SM_OBPCNDEV) ? SM_OBPCNDEV : 2382 0U; 2383 2384 uqi->sm_dev = NODEV; 2385 uqi->sm_protocol = NULL_PROTOCOL; 2386 ttycommon_close(uqi->sm_ttycommon); 2387 /* it just frees any pending ioctl */ 2388 2389 uqi->sm_ttycommon->t_cflag = 0; 2390 uqi->sm_ttycommon->t_flags = 0; 2391 2392 /* 2393 * Reset the queue pointers to NULL. 2394 * If a thread is qwaiting in the open routine it will recheck 2395 * the q_ptr. 2396 */ 2397 rq->q_ptr = NULL; 2398 WR(rq)->q_ptr = NULL; 2399 UNLOCK_UNIT(uqi); 2400 2401 if (sm_ssp->sm_lconsole == uqi) { 2402 /* this will never be the outdial device closing */ 2403 sm_ssp->sm_lconsole = 0; 2404 } 2405 /* 2406 * If there is another thread waiting for this close then unblock 2407 * the thread by putting a message on its read queue. 2408 */ 2409 if (uqi->sm_waitq) { 2410 sm_dbg('C', ("close(0x%p): doing putctl on 0x%p\n", 2411 rq, uqi->sm_waitq)); 2412 if (rq == uqi->sm_waitq) 2413 sm_log("close: waitq and closeq are same q\n"); 2414 (void) putctl(uqi->sm_waitq, M_CTL); 2415 } 2416 2417 uqi->sm_flags &= ~(EXCL_OPEN | FULLY_OPEN); 2418 sm_dbg('C', ("close: returning ok.\n")); 2419 return (0); 2420 } 2421 2422 /* 2423 * Initialise the software abort sequence for use when one of the 2424 * driver's nodes provides the system console. 2425 */ 2426 static void 2427 sm_set_abort() 2428 { 2429 char ds[3] = { '\r', '~', CNTRL('b') }; 2430 char as[SM_MAX_ABSLEN]; 2431 int len = SM_MAX_ABSLEN; 2432 2433 if (ddi_prop_op(DDI_DEV_T_ANY, sm_ssp->sm_dip, PROP_LEN_AND_VAL_BUF, 0, 2434 "abort-str", as, &len) != DDI_PROP_SUCCESS || 2435 (len = strlen(as)) < SM_MIN_ABSLEN) { 2436 (void) strcpy(as, ds); 2437 len = strlen(as); 2438 } else { 2439 char *s; 2440 int i; 2441 2442 for (s = as, i = 0; i < len-1; i++, s++) { 2443 if (as[i] == '^' && as[i+1] >= 'a' && as[i+1] <= 'z') { 2444 *s = as[i+1] - 'a' + 1; 2445 i++; 2446 } else { 2447 *s = as[i]; 2448 } 2449 } 2450 *s++ = as[i]; 2451 *s = '\0'; 2452 len = strlen(as); 2453 } 2454 2455 if (len < SM_MIN_ABSLEN) 2456 (void) strcpy(sm_ssp->sm_abs, ds); 2457 else 2458 (void) strcpy(sm_ssp->sm_abs, as); 2459 } 2460 2461 /* 2462 * 2463 * sm_attach - initialisation routine per driver instance. 2464 */ 2465 static int 2466 sm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 2467 { 2468 int unit; 2469 char name[32]; 2470 sm_uqi_t *uqi; 2471 sm_lqi_t *lqip; 2472 2473 /* 2474 * Is this an attach? 2475 */ 2476 if (cmd != DDI_ATTACH) { 2477 return (DDI_FAILURE); 2478 } 2479 2480 /* 2481 * Validate the instance number (sm is a single instance driver). 2482 */ 2483 if (sm_ssp) { /* only one instance allowed */ 2484 return (DDI_FAILURE); 2485 } 2486 2487 sm_instance = ddi_get_instance(dip); 2488 2489 /* 2490 * Create the default minor node which will become the console. 2491 * (create it with three different names).: 2492 * con which appears in the /dev filesystem; 2493 * input which matches the prom /multiplexer:input node; 2494 * output which matches the prom /multiplexer:input node 2495 * Create a minor node for control operations. 2496 */ 2497 if (ddi_create_minor_node(dip, "con", S_IFCHR, 0, 2498 DDI_PSEUDO, 0) != DDI_SUCCESS || 2499 ddi_create_minor_node(dip, "input", S_IFCHR, 0, 2500 DDI_PSEUDO, 0) != DDI_SUCCESS || 2501 ddi_create_minor_node(dip, "output", S_IFCHR, 0, 2502 DDI_PSEUDO, 0) != DDI_SUCCESS || 2503 ddi_create_minor_node(dip, "ctl", S_IFCHR, 1, 2504 DDI_PSEUDO, 0) != DDI_SUCCESS) { 2505 2506 cmn_err(CE_WARN, "sm_attach: create minors failed.\n"); 2507 ddi_remove_minor_node(dip, NULL); 2508 return (DDI_FAILURE); 2509 } 2510 2511 smctlunit = 1; 2512 2513 /* 2514 * Allocate private state for this instance. 2515 */ 2516 sm_ssp = (sm_ss_t *)kmem_zalloc(sizeof (sm_ss_t), KM_SLEEP); 2517 2518 /* 2519 * Initialise per instance data. 2520 */ 2521 sm_ssp->sm_dip = dip; 2522 2523 /* 2524 * Get required debug level. 2525 */ 2526 sm_ssp->sm_trflag = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2527 DDI_PROP_DONTPASS, "sm-trlv", sm_default_trflag); 2528 2529 sm_max_units = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2530 DDI_PROP_DONTPASS, "sm-max-units", sm_max_units); 2531 sm_minor_cnt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2532 DDI_PROP_DONTPASS, "sm-minor-cnt", 0); 2533 2534 sm_refuse_opens = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2535 DDI_PROP_DONTPASS, "sm-refuse-opens", sm_refuse_opens); 2536 2537 sm_ssp->sm_ctrla_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2538 DDI_PROP_DONTPASS, "sm-ctrla-abort-on", 1); 2539 sm_ssp->sm_break_abort_on = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 2540 DDI_PROP_DONTPASS, "sm-break-abort-on", 0); 2541 2542 sm_set_abort(); 2543 2544 sm_ssp->sm_lqs = (sm_lqi_t *)kmem_zalloc(sizeof (sm_lqi_t) * MAX_LQS, 2545 KM_SLEEP); 2546 sm_ssp->sm_uqs = (sm_uqi_t *)kmem_zalloc(sizeof (sm_uqi_t) * NLUNITS, 2547 KM_SLEEP); 2548 2549 for (unit = 2; unit < NLUNITS && unit < sm_minor_cnt + 2; unit++) { 2550 2551 if (snprintf(name, sizeof (name), "sm%c", 'a' + unit-2) > 2552 sizeof (name)) { 2553 cmn_err(CE_WARN, 2554 "sm_attach: create device for unit %d failed.\n", 2555 unit); 2556 } else if (ddi_create_minor_node(dip, name, S_IFCHR, 2557 unit, DDI_NT_SERIAL, NULL) != DDI_SUCCESS) { 2558 ddi_remove_minor_node(dip, NULL); 2559 return (DDI_FAILURE); 2560 } 2561 2562 if (snprintf(name, sizeof (name), "sm%c,cu", 'a' + unit-2) > 2563 sizeof (name)) { 2564 cmn_err(CE_WARN, 2565 "sm_attach: create cu device for unit %d failed.\n", 2566 unit); 2567 continue; 2568 } else if (ddi_create_minor_node(dip, name, S_IFCHR, 2569 unit|OUTLINE, DDI_NT_SERIAL_DO, NULL) != DDI_SUCCESS) { 2570 ddi_remove_minor_node(dip, NULL); 2571 return (DDI_FAILURE); 2572 } 2573 } 2574 2575 for (unit = 0; unit < NLUNITS; unit++) { 2576 2577 uqi = get_uqi(sm_ssp, unit); 2578 uqi->sm_lqs = 0; 2579 uqi->sm_dev = NODEV; 2580 uqi->sm_nlqs = 0; 2581 uqi->sm_lunit = unit; 2582 uqi->sm_protocol = NULL_PROTOCOL; 2583 mutex_init(uqi->sm_umutex, NULL, MUTEX_DRIVER, NULL); 2584 cv_init(uqi->sm_ucv, NULL, CV_DRIVER, NULL); 2585 mutex_init(&uqi->sm_ttycommon->t_excl, NULL, 2586 MUTEX_DRIVER, NULL); 2587 } 2588 2589 for (unit = 0; unit < MAX_LQS; unit++) { 2590 lqip = get_lqi(sm_ssp, unit); 2591 lqip->sm_unit = unit; 2592 lqip->sm_hadkadbchar = 0; 2593 lqip->sm_nachar = sm_ssp->sm_abs; 2594 lqip->sm_ioflag = FORIO; 2595 lqip->sm_ctrla_abort_on = sm_ssp->sm_ctrla_abort_on; 2596 lqip->sm_break_abort_on = sm_ssp->sm_break_abort_on; 2597 mutex_init(lqip->sm_umutex, NULL, MUTEX_DRIVER, NULL); 2598 cv_init(lqip->sm_ucv, NULL, CV_DRIVER, NULL); 2599 mutex_init(&lqip->sm_ttycommon->t_excl, NULL, 2600 MUTEX_DRIVER, NULL); 2601 } 2602 2603 return (DDI_SUCCESS); 2604 } 2605 2606 /* 2607 * 2608 * sm_detach - detach routine per driver instance. 2609 */ 2610 static int 2611 sm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 2612 { 2613 sm_uqi_t *lu; 2614 sm_lqi_t *pu; 2615 int unit; 2616 2617 /* 2618 * Is this a detach request for instance 0 (single instance driver). 2619 */ 2620 if (cmd != DDI_DETACH) 2621 return (DDI_FAILURE); 2622 2623 if (sm_ssp == NULL) 2624 return (DDI_FAILURE); 2625 2626 sm_dbg('V', ("detach ...")); 2627 2628 2629 /* 2630 * Check that all the upper and lower queues are closed. 2631 */ 2632 2633 for (unit = 0; unit < NLUNITS; unit++) { 2634 lu = &sm_ssp->sm_uqs[unit]; 2635 if (lu && lu->sm_protocol != NULL_PROTOCOL) { 2636 sm_dbg('V', ("detach: upper unit still open.\n")); 2637 return (DDI_FAILURE); 2638 } 2639 } 2640 for (unit = 0; unit < MAX_LQS; unit++) { 2641 pu = &sm_ssp->sm_lqs[unit]; 2642 if (pu && pu->sm_linkid != 0) { 2643 sm_dbg('V', ("detach: lower unit still linked (%d)\n", 2644 pu->sm_linkid)); 2645 return (DDI_FAILURE); 2646 } 2647 } 2648 2649 for (unit = 0; unit < NLUNITS; unit++) { 2650 lu = &sm_ssp->sm_uqs[unit]; 2651 mutex_destroy(lu->sm_umutex); 2652 cv_destroy(lu->sm_ucv); 2653 mutex_destroy(&lu->sm_ttycommon->t_excl); 2654 } 2655 for (unit = 0; unit < MAX_LQS; unit++) { 2656 pu = &sm_ssp->sm_lqs[unit]; 2657 mutex_destroy(pu->sm_umutex); 2658 cv_destroy(pu->sm_ucv); 2659 mutex_destroy(&pu->sm_ttycommon->t_excl); 2660 } 2661 2662 /* 2663 * Tidy up per instance state. 2664 */ 2665 kmem_free(sm_ssp->sm_lqs, sizeof (sm_lqi_t) * MAX_LQS); 2666 kmem_free(sm_ssp->sm_uqs, sizeof (sm_uqi_t) * NLUNITS); 2667 kmem_free(sm_ssp, sizeof (sm_ss_t)); 2668 2669 sm_ssp = 0; 2670 2671 /* 2672 * Remove all of the devices created in attach. 2673 */ 2674 ddi_remove_minor_node(dip, NULL); 2675 2676 return (DDI_SUCCESS); 2677 } 2678 2679 /* 2680 * SECTION 2681 * Driver interface to the OS. 2682 */ 2683 2684 /* 2685 * The driver is responsible for managing the mapping between the file system 2686 * device types (major/minor pairs) and the corresponding instance of the driver 2687 * or device information pointer (dip). 2688 * sm_info - return the instance or dip corresponding to the dev_t. 2689 */ 2690 /*ARGSUSED*/ 2691 static int 2692 sm_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 2693 { 2694 int res = DDI_SUCCESS; 2695 2696 switch (infocmd) { 2697 case DDI_INFO_DEVT2DEVINFO: 2698 if (sm_ssp == NULL) 2699 res = DDI_FAILURE; 2700 else 2701 *result = (void *)sm_ssp->sm_dip; 2702 break; 2703 2704 case DDI_INFO_DEVT2INSTANCE: 2705 *result = (void*)0; /* single instance driver */ 2706 break; 2707 2708 default: 2709 res = DDI_FAILURE; 2710 break; 2711 } 2712 2713 return (res); 2714 } 2715 2716 /* 2717 * End of driver implementation 2718 */ 2719 2720 /* 2721 * Loadable module interface to the kernel 2722 */ 2723 2724 /* 2725 * Firstly the Streams specific interface 2726 */ 2727 2728 /* 2729 * Solaris driver/STREAM initialisation structures. 2730 */ 2731 static struct module_info uinfo = 2732 { 2733 SM_MOD_ID, 2734 TTYMUX_DRVNAME, 2735 0, /* min packet size */ 2736 INFPSZ, /* max packet size */ 2737 2048, /* high water mark */ 2738 256, /* low water mark */ 2739 }; 2740 2741 /* 2742 * Use zero water marks becuase the lower queues are used only for flow control. 2743 */ 2744 static struct module_info linfo = 2745 { 2746 SM_MOD_ID, 2747 TTYMUX_DRVNAME, 2748 0, /* min packet size */ 2749 INFPSZ, /* max packet size */ 2750 0, /* high water mark */ 2751 0 /* low water mark */ 2752 }; 2753 2754 2755 /* 2756 * Solaris upper read STREAM initialisation structure. 2757 */ 2758 static struct qinit urinit = 2759 { 2760 sm_urput, /* put */ 2761 sm_ursrv, /* service */ 2762 sm_open, /* open */ 2763 sm_close, /* close */ 2764 NULL, /* admin */ 2765 &uinfo, /* module info */ 2766 NULL /* stats */ 2767 }; 2768 2769 /* 2770 * Solaris upper write STREAM initialisation structure. 2771 */ 2772 static struct qinit uwinit = 2773 { 2774 sm_uwput, 2775 sm_uwsrv, 2776 NULL, 2777 NULL, 2778 NULL, 2779 &uinfo, 2780 NULL 2781 }; 2782 2783 /* 2784 * Solaris lower read STREAM initialisation structure. 2785 */ 2786 static struct qinit lrinit = 2787 { 2788 sm_lrput, 2789 sm_lrsrv, 2790 NULL, 2791 NULL, NULL, 2792 &linfo, 2793 NULL 2794 }; 2795 2796 /* 2797 * Solaris lower write STREAM initialisation structure. 2798 */ 2799 static struct qinit lwinit = 2800 { 2801 putq, 2802 sm_lwsrv, 2803 NULL, 2804 NULL, 2805 NULL, 2806 &linfo, 2807 NULL 2808 }; 2809 2810 /* 2811 * Multiplexing STREAM structure. 2812 */ 2813 struct streamtab sm_streamtab = 2814 { 2815 &urinit, 2816 &uwinit, 2817 &lrinit, 2818 &lwinit 2819 }; 2820 2821 /* 2822 * Driver operations structure (struct cb_ops) and 2823 * driver dynamic loading functions (struct dev_ops). 2824 */ 2825 2826 /* 2827 * Fold the Stream interface to the kernel into the driver interface 2828 * to the OS. 2829 */ 2830 2831 DDI_DEFINE_STREAM_OPS(sm_ops, \ 2832 nulldev, nulldev, \ 2833 sm_attach, sm_detach, nodev, \ 2834 sm_info, (D_NEW | D_MTQPAIR|D_MTOUTPERIM|D_MTOCEXCL | D_MP), 2835 &sm_streamtab); 2836 2837 /* 2838 * Driver module information. 2839 */ 2840 extern struct mod_ops mod_driverops; 2841 static struct modldrv modldrv = 2842 { 2843 &mod_driverops, 2844 "serial mux driver %I%", 2845 &sm_ops 2846 }; 2847 2848 static struct modlinkage modlinkage = 2849 { 2850 MODREV_1, 2851 &modldrv, 2852 NULL 2853 }; 2854 2855 /* 2856 * Define the body of our interface to the OS. 2857 */ 2858 2859 /* 2860 * '_init' is called by Solaris to initialise any driver 2861 * specific state and to install the driver. 2862 */ 2863 int 2864 _init(void) 2865 { 2866 return (mod_install(&modlinkage)); 2867 } 2868 2869 /* 2870 * _info - return this drivers interface to the kernel. 2871 */ 2872 int 2873 _info(struct modinfo *modinfop) 2874 { 2875 return (mod_info(&modlinkage, modinfop)); 2876 } 2877 2878 /* 2879 * _fini - the OS is finished with the services provided by the driver. 2880 * remove ourself and then remove any footprint that remains. 2881 */ 2882 int 2883 _fini(void) 2884 { 2885 return (mod_remove(&modlinkage)); 2886 } 2887