1 /*
2 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
3 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
4 * Use is subject to license terms.
5 */
6
7 /*
8 * Copyright (c) 1983 Regents of the University of California.
9 * All rights reserved. The Berkeley software License Agreement
10 * specifies the terms and conditions for redistribution.
11 */
12
13 /*
14 * PTY - Stream "pseudo-tty" device.
15 * This is the "subsidiary" side.
16 */
17
18
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/filio.h>
22 #include <sys/ioccom.h>
23 #include <sys/termios.h>
24 #include <sys/termio.h>
25 #include <sys/ttold.h>
26 #include <sys/stropts.h>
27 #include <sys/stream.h>
28 #include <sys/strsun.h>
29 #include <sys/tty.h>
30 #include <sys/user.h>
31 #include <sys/conf.h>
32 #include <sys/file.h>
33 #include <sys/vnode.h>
34 #include <sys/proc.h>
35 #include <sys/uio.h>
36 #include <sys/errno.h>
37 #include <sys/strsubr.h>
38 #include <sys/poll.h>
39 #include <sys/sysmacros.h>
40 #include <sys/debug.h>
41 #include <sys/procset.h>
42 #include <sys/cred.h>
43 #include <sys/ptyvar.h>
44 #include <sys/suntty.h>
45 #include <sys/stat.h>
46 #include <sys/policy.h>
47
48 #include <sys/conf.h>
49 #include <sys/ddi.h>
50 #include <sys/sunddi.h>
51
52 extern void gsignal(int pid, int sig);
53
54 extern int npty; /* number of pseudo-ttys configured in */
55 extern struct pty *pty_softc;
56
57 extern struct pollhead ptcph; /* poll head for ptcpoll() use */
58
59 #define IFLAGS (CS7|CREAD|PARENB)
60
61
62 /*
63 * Most of these should be "void", but the people who defined the "streams"
64 * data structure for S5 didn't understand data types.
65 */
66
67 /*
68 * Subsidiary side. This is a streams device.
69 */
70 static int ptslopen(queue_t *, dev_t *, int flag, int, cred_t *);
71 static int ptslclose(queue_t *, int, cred_t *);
72 static int ptslrserv(queue_t *);
73
74 /*
75 * To save instructions, since STREAMS ignores the return value
76 * from this function, it is defined as void here. Kind of icky, but...
77 */
78
79 static int ptslwput(queue_t *q, mblk_t *mp);
80
81 static struct module_info ptslm_info = {
82 0,
83 "ptys",
84 0,
85 INFPSZ,
86 2048,
87 200
88 };
89
90 static struct qinit ptslrinit = {
91 putq,
92 ptslrserv,
93 ptslopen,
94 ptslclose,
95 NULL,
96 &ptslm_info,
97 NULL
98 };
99
100 static struct qinit ptslwinit = {
101 ptslwput,
102 NULL,
103 NULL,
104 NULL,
105 NULL,
106 &ptslm_info,
107 NULL
108 };
109
110 struct streamtab ptysinfo = {
111 &ptslrinit,
112 &ptslwinit,
113 NULL,
114 NULL
115 };
116
117 static void ptslreioctl(void *);
118 static void ptslioctl(struct pty *, queue_t *, mblk_t *);
119 static void pt_sendstop(struct pty *);
120 static void ptcpollwakeup(struct pty *, int);
121
122
123 static int ptsl_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
124 static int ptsl_attach(dev_info_t *, ddi_attach_cmd_t);
125 static dev_info_t *ptsl_dip; /* for dev-to-dip conversions */
126
127 DDI_DEFINE_STREAM_OPS(ptsl_ops, nulldev, nulldev,
128 ptsl_attach, nodev, nodev, ptsl_info, D_MP, &ptysinfo,
129 ddi_quiesce_not_supported);
130
131 #include <sys/types.h>
132 #include <sys/conf.h>
133 #include <sys/param.h>
134 #include <sys/systm.h>
135 #include <sys/errno.h>
136 #include <sys/modctl.h>
137
138 /*
139 * Module linkage information for the kernel.
140 */
141
142 static struct modldrv modldrv = {
143 &mod_driverops, /* Type of module. This one is a pseudo driver */
144 "tty pseudo driver subsidiary 'ptsl'",
145 &ptsl_ops, /* driver ops */
146 };
147
148 static struct modlinkage modlinkage = {
149 MODREV_1,
150 &modldrv,
151 NULL
152 };
153
154 int
_init(void)155 _init(void)
156 {
157 return (mod_install(&modlinkage));
158 }
159
160 int
_fini(void)161 _fini(void)
162 {
163 return (mod_remove(&modlinkage));
164 }
165
166 int
_info(struct modinfo * modinfop)167 _info(struct modinfo *modinfop)
168 {
169 return (mod_info(&modlinkage, modinfop));
170 }
171
172 static char *tty_banks = PTY_BANKS;
173 static char *tty_digits = PTY_DIGITS;
174
175 /* ARGSUSED */
176 static int
ptsl_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)177 ptsl_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
178 {
179 char name[8];
180 int tty_num;
181 char *tty_digit = tty_digits;
182 char *tty_bank = tty_banks;
183
184 for (tty_num = 0; tty_num < npty; tty_num++) {
185 (void) sprintf(name, "tty%c%c", *tty_bank, *tty_digit);
186 if (ddi_create_minor_node(devi, name, S_IFCHR,
187 tty_num, DDI_PSEUDO, 0) == DDI_FAILURE) {
188 ddi_remove_minor_node(devi, NULL);
189 return (-1);
190 }
191 if (*(++tty_digit) == '\0') {
192 tty_digit = tty_digits;
193 if (*(++tty_bank) == '\0')
194 break;
195 }
196 }
197 ptsl_dip = devi;
198 return (DDI_SUCCESS);
199 }
200
201 /* ARGSUSED */
202 static int
ptsl_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)203 ptsl_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
204 void **result)
205 {
206 int error;
207
208 switch (infocmd) {
209 case DDI_INFO_DEVT2DEVINFO:
210 if (ptsl_dip == NULL) {
211 error = DDI_FAILURE;
212 } else {
213 *result = (void *)ptsl_dip;
214 error = DDI_SUCCESS;
215 }
216 break;
217 case DDI_INFO_DEVT2INSTANCE:
218 *result = (void *)0;
219 error = DDI_SUCCESS;
220 break;
221 default:
222 error = DDI_FAILURE;
223 }
224 return (error);
225 }
226
227
228 /*
229 * Open the subsidiary side of a pty.
230 */
231 /*ARGSUSED*/
232 static int
ptslopen(queue_t * q,dev_t * devp,int flag,int sflag,cred_t * cred)233 ptslopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *cred)
234 {
235 minor_t unit;
236 dev_t dev = *devp;
237 struct pty *pty;
238
239 unit = getminor(dev);
240 if (unit >= npty)
241 return (ENXIO);
242
243 pty = &pty_softc[unit];
244
245 mutex_enter(&pty->ptc_lock);
246 /*
247 * Block waiting for controller to open, unless this is a no-delay
248 * open.
249 */
250 again:
251 if (pty->pt_ttycommon.t_writeq == NULL) {
252 pty->pt_ttycommon.t_iflag = 0;
253 pty->pt_ttycommon.t_cflag = (B38400 << IBSHIFT)|B38400|IFLAGS;
254 pty->pt_ttycommon.t_iocpending = NULL;
255 pty->pt_wbufcid = 0;
256 pty->pt_ttycommon.t_size.ws_row = 0;
257 pty->pt_ttycommon.t_size.ws_col = 0;
258 pty->pt_ttycommon.t_size.ws_xpixel = 0;
259 pty->pt_ttycommon.t_size.ws_ypixel = 0;
260 } else if ((pty->pt_ttycommon.t_flags & TS_XCLUDE) &&
261 secpolicy_excl_open(cred) != 0) {
262 mutex_exit(&pty->ptc_lock);
263 return (EBUSY);
264 }
265 if (!(flag & (FNONBLOCK|FNDELAY)) &&
266 !(pty->pt_ttycommon.t_cflag & CLOCAL)) {
267 if (!(pty->pt_flags & PF_CARR_ON)) {
268 pty->pt_flags |= PF_WOPEN;
269 if (!cv_wait_sig(&pty->pt_cv_flags, &pty->ptc_lock)) {
270 pty->pt_flags &= ~PF_WOPEN;
271 mutex_exit(&pty->ptc_lock);
272 return (EINTR);
273 }
274 goto again;
275 }
276 }
277
278 pty->pt_sdev = dev;
279 q->q_ptr = WR(q)->q_ptr = pty;
280 pty->pt_flags &= ~PF_SUBSIDGONE;
281 pty->pt_ttycommon.t_readq = pty->pt_ttycommon.t_writeq = NULL;
282
283 /*
284 * Subsidiary is ready to accept messages but manager still can't send
285 * messages to the subsidiary queue since it is not plumbed
286 * yet. So do qprocson() and finish subsidiary initialization.
287 */
288
289 mutex_exit(&pty->ptc_lock);
290
291 qprocson(q);
292
293 /*
294 * Now it is safe to send messages to q, so wakeup manager possibly
295 * waiting for subsidiary queue to finish open.
296 */
297 mutex_enter(&pty->ptc_lock);
298 /*
299 * queue has already been setup with a pointer to
300 * the stream head that is being referenced
301 */
302 pty->pt_vnode = strq2vp(q);
303 VN_RELE(pty->pt_vnode);
304 pty->pt_ttycommon.t_readq = q;
305 pty->pt_ttycommon.t_writeq = WR(q);
306 /* tell manager device that subsidiary is ready for writing */
307 if (pty->pt_flags & PF_CARR_ON)
308 cv_broadcast(&pty->pt_cv_readq);
309 mutex_exit(&pty->ptc_lock);
310
311 return (0);
312 }
313
314 static int
ptslclose(queue_t * q,int flag,cred_t * cred)315 ptslclose(queue_t *q, int flag, cred_t *cred)
316 {
317 struct pty *pty;
318 bufcall_id_t pt_wbufcid = 0;
319
320 #ifdef lint
321 flag = flag;
322 cred = cred;
323 #endif
324
325 if ((pty = (struct pty *)q->q_ptr) == NULL)
326 return (ENODEV); /* already been closed once */
327
328 /*
329 * Prevent the queues from being uses by manager device. This should
330 * be done before qprocsoff or writer may attempt to use the subsidiary
331 * queue after qprocsoff removed it from the stream and before entering
332 * mutex_enter().
333 */
334 mutex_enter(&pty->ptc_lock);
335 pty->pt_ttycommon.t_readq = NULL;
336 pty->pt_ttycommon.t_writeq = NULL;
337 while (pty->pt_flags & PF_IOCTL) {
338 pty->pt_flags |= PF_WAIT;
339 cv_wait(&pty->pt_cv_flags, &pty->ptc_lock);
340 }
341 pty->pt_vnode = NULL;
342 mutex_exit(&pty->ptc_lock);
343
344 qprocsoff(q);
345
346 mutex_enter(&pty->ptc_lock);
347 /*
348 * ptc_lock mutex is not dropped across
349 * the call to the routine ttycommon_close
350 */
351 ttycommon_close(&pty->pt_ttycommon);
352
353 /*
354 * Cancel outstanding "bufcall" request.
355 */
356 if (pty->pt_wbufcid) {
357 pt_wbufcid = pty->pt_wbufcid;
358 pty->pt_wbufcid = 0;
359 }
360
361 /*
362 * Clear out all the subsidiary-side state.
363 */
364 pty->pt_flags &= ~(PF_WOPEN|PF_STOPPED|PF_NOSTOP);
365 if (pty->pt_flags & PF_CARR_ON) {
366 pty->pt_flags |= PF_SUBSIDGONE; /* let the controller know */
367 ptcpollwakeup(pty, 0); /* wake up readers/selectors */
368 ptcpollwakeup(pty, FWRITE); /* wake up writers/selectors */
369 cv_broadcast(&pty->pt_cv_flags);
370 }
371 pty->pt_sdev = 0;
372 q->q_ptr = WR(q)->q_ptr = NULL;
373 mutex_exit(&pty->ptc_lock);
374
375 if (pt_wbufcid)
376 unbufcall(pt_wbufcid);
377
378 return (0);
379 }
380
381 /*
382 * Put procedure for write queue.
383 * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
384 * queue up M_DATA messages for processing by the controller "read"
385 * routine; discard everything else.
386 */
387 static int
ptslwput(queue_t * q,mblk_t * mp)388 ptslwput(queue_t *q, mblk_t *mp)
389 {
390 struct pty *pty;
391 mblk_t *bp;
392
393 pty = (struct pty *)q->q_ptr;
394
395 mutex_enter(&pty->ptc_lock);
396
397 switch (mp->b_datap->db_type) {
398
399 case M_STOP:
400 if (!(pty->pt_flags & PF_STOPPED)) {
401 pty->pt_flags |= PF_STOPPED;
402 pty->pt_send |= TIOCPKT_STOP;
403 ptcpollwakeup(pty, 0);
404 }
405 freemsg(mp);
406 break;
407
408 case M_START:
409 if (pty->pt_flags & PF_STOPPED) {
410 pty->pt_flags &= ~PF_STOPPED;
411 pty->pt_send = TIOCPKT_START;
412 ptcpollwakeup(pty, 0);
413 }
414 ptcpollwakeup(pty, FREAD); /* permit controller to read */
415 freemsg(mp);
416 break;
417
418 case M_IOCTL:
419 ptslioctl(pty, q, mp);
420 break;
421
422 case M_FLUSH:
423 if (*mp->b_rptr & FLUSHW) {
424 /*
425 * Set the "flush write" flag, so that we
426 * notify the controller if they're in packet
427 * or user control mode.
428 */
429 if (!(pty->pt_send & TIOCPKT_FLUSHWRITE)) {
430 pty->pt_send |= TIOCPKT_FLUSHWRITE;
431 ptcpollwakeup(pty, 0);
432 }
433 /*
434 * Flush our write queue.
435 */
436 flushq(q, FLUSHDATA); /* XXX doesn't flush M_DELAY */
437 *mp->b_rptr &= ~FLUSHW; /* it has been flushed */
438 }
439 if (*mp->b_rptr & FLUSHR) {
440 /*
441 * Set the "flush read" flag, so that we
442 * notify the controller if they're in packet
443 * mode.
444 */
445 if (!(pty->pt_send & TIOCPKT_FLUSHREAD)) {
446 pty->pt_send |= TIOCPKT_FLUSHREAD;
447 ptcpollwakeup(pty, 0);
448 }
449 flushq(RD(q), FLUSHDATA);
450 mutex_exit(&pty->ptc_lock);
451 qreply(q, mp); /* give the read queues a crack at it */
452 return (0);
453 } else
454 freemsg(mp);
455 break;
456
457 case M_DATA:
458 /*
459 * Throw away any leading zero-length blocks, and queue it up
460 * for the controller to read.
461 */
462 if (pty->pt_flags & PF_CARR_ON) {
463 bp = mp;
464 while ((bp->b_wptr - bp->b_rptr) == 0) {
465 mp = bp->b_cont;
466 freeb(bp);
467 if (mp == NULL) {
468 mutex_exit(&pty->ptc_lock);
469 /* damp squib of a message */
470 return (0);
471 }
472 bp = mp;
473 }
474 (void) putq(q, mp);
475 ptcpollwakeup(pty, FREAD); /* soup's on! */
476 } else
477 freemsg(mp); /* nobody listening */
478 break;
479
480 case M_CTL:
481 if ((*(int *)mp->b_rptr) == MC_CANONQUERY) {
482 /*
483 * We're being asked whether we do canonicalization
484 * or not. Send a reply back up indicating whether
485 * we do or not.
486 */
487 (void) putctl1(RD(q), M_CTL,
488 (pty->pt_flags & PF_REMOTE) ?
489 MC_NOCANON : MC_DOCANON);
490 }
491 freemsg(mp);
492 break;
493
494 default:
495 /*
496 * "No, I don't want a subscription to Chain Store Age,
497 * thank you anyway."
498 */
499 freemsg(mp);
500 break;
501 }
502 mutex_exit(&pty->ptc_lock);
503 return (0);
504 }
505
506 /*
507 * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
508 * the buffer we need.
509 */
510 static void
ptslreioctl(void * arg)511 ptslreioctl(void *arg)
512 {
513 struct pty *pty = arg;
514 queue_t *q;
515 mblk_t *mp;
516
517 mutex_enter(&pty->ptc_lock);
518 /*
519 * The bufcall is no longer pending.
520 */
521 if (pty->pt_wbufcid == 0) {
522 mutex_exit(&pty->ptc_lock);
523 return;
524 }
525
526 pty->pt_wbufcid = 0;
527 if ((q = pty->pt_ttycommon.t_writeq) == NULL) {
528 mutex_exit(&pty->ptc_lock);
529 return;
530 }
531 if ((mp = pty->pt_ttycommon.t_iocpending) != NULL) {
532 /* It's not pending any more. */
533 pty->pt_ttycommon.t_iocpending = NULL;
534 ptslioctl(pty, q, mp);
535 }
536 mutex_exit(&pty->ptc_lock);
537 }
538
539 /*
540 * Process an "ioctl" message sent down to us.
541 * Drops pty's ptc_lock mutex and then reacquire
542 */
543 static void
ptslioctl(struct pty * pty,queue_t * q,mblk_t * mp)544 ptslioctl(struct pty *pty, queue_t *q, mblk_t *mp)
545 {
546 struct iocblk *iocp;
547 int cmd;
548 size_t datasize;
549 int error = 0;
550
551 ASSERT(MUTEX_HELD(&pty->ptc_lock));
552
553 iocp = (struct iocblk *)mp->b_rptr;
554 cmd = iocp->ioc_cmd;
555
556 switch (cmd) {
557
558 case TIOCSTI: {
559 /*
560 * The permission checking has already been done at the stream
561 * head, since it has to be done in the context of the process
562 * doing the call.
563 */
564 mblk_t *bp;
565
566 error = miocpullup(mp, sizeof (char));
567 if (error != 0)
568 goto out;
569
570 /*
571 * Simulate typing of a character at the terminal.
572 */
573 if ((bp = allocb(1, BPRI_MED)) != NULL) {
574 *bp->b_wptr++ = *mp->b_cont->b_rptr;
575 if (!(pty->pt_flags & PF_REMOTE)) {
576 if (!canput(pty->pt_ttycommon.t_readq)) {
577 mutex_exit(&pty->ptc_lock);
578 ttycommon_qfull(&pty->pt_ttycommon, q);
579 mutex_enter(&pty->ptc_lock);
580 freemsg(bp);
581 error = EAGAIN;
582 goto out;
583 } else
584 (void) putq(
585 pty->pt_ttycommon.t_readq, bp);
586 } else {
587 if (pty->pt_flags & PF_UCNTL) {
588 /*
589 * XXX - flow control; don't overflow
590 * this "queue".
591 */
592 if (pty->pt_stuffqfirst != NULL) {
593 pty->pt_stuffqlast->b_next = bp;
594 bp->b_prev = pty->pt_stuffqlast;
595 } else {
596 pty->pt_stuffqfirst = bp;
597 bp->b_prev = NULL;
598 }
599 bp->b_next = NULL;
600 pty->pt_stuffqlast = bp;
601 pty->pt_stuffqlen++;
602 ptcpollwakeup(pty, 0);
603 }
604 }
605 } else {
606 error = EAGAIN;
607 goto out;
608 }
609
610 /*
611 * Turn the ioctl message into an ioctl ACK message.
612 */
613 iocp->ioc_count = 0; /* no data returned */
614 mp->b_datap->db_type = M_IOCACK;
615 goto out;
616 }
617
618 case TIOCSSIZE: {
619 tty_common_t *tc = &pty->pt_ttycommon;
620 struct ttysize *tp;
621
622 error = miocpullup(mp, sizeof (struct ttysize));
623 if (error != 0)
624 goto out;
625
626 /*
627 * Set the window size, but don't send a SIGWINCH.
628 */
629 tp = (struct ttysize *)mp->b_cont->b_rptr;
630 tc->t_size.ws_row = tp->ts_lines;
631 tc->t_size.ws_col = tp->ts_cols;
632 tc->t_size.ws_xpixel = 0;
633 tc->t_size.ws_ypixel = 0;
634
635 /*
636 * Send an ACK back.
637 */
638 iocp->ioc_count = 0; /* no data returned */
639 mp->b_datap->db_type = M_IOCACK;
640 goto out;
641 }
642
643 case TIOCGSIZE: {
644 tty_common_t *tc = &pty->pt_ttycommon;
645 mblk_t *datap;
646 struct ttysize *tp;
647
648 if ((datap = allocb(sizeof (struct ttysize),
649 BPRI_HI)) == NULL) {
650 if (pty->pt_wbufcid) {
651 if (pty->pt_ttycommon.t_iocpending)
652 freemsg(pty->pt_ttycommon.t_iocpending);
653 pty->pt_ttycommon.t_iocpending = mp;
654 return;
655 }
656 pty->pt_wbufcid = bufcall(sizeof (struct ttysize),
657 BPRI_HI, ptslreioctl, pty);
658 if (pty->pt_wbufcid == 0) {
659 error = ENOMEM;
660 goto out;
661 }
662 pty->pt_ttycommon.t_iocpending = mp;
663 return;
664 }
665 /*
666 * Return the current size.
667 */
668 tp = (struct ttysize *)datap->b_wptr;
669 tp->ts_lines = tc->t_size.ws_row;
670 tp->ts_cols = tc->t_size.ws_col;
671 datap->b_wptr += sizeof (struct ttysize);
672 iocp->ioc_count = sizeof (struct ttysize);
673
674 if (mp->b_cont != NULL)
675 freemsg(mp->b_cont);
676 mp->b_cont = datap;
677 mp->b_datap->db_type = M_IOCACK;
678 goto out;
679 }
680
681 /*
682 * Imported from ttycommon_ioctl routine
683 */
684
685 case TCSETSF: {
686 tty_common_t *tc = &pty->pt_ttycommon;
687 struct termios *cb;
688
689 error = miocpullup(mp, sizeof (struct termios));
690 if (error != 0)
691 goto out;
692
693 cb = (struct termios *)mp->b_cont->b_rptr;
694
695 flushq(RD(q), FLUSHDATA);
696 mutex_exit(&pty->ptc_lock);
697 (void) putnextctl1(RD(q), M_FLUSH, FLUSHR);
698 mutex_enter(&pty->ptc_lock);
699 mutex_enter(&tc->t_excl);
700 tc->t_iflag = cb->c_iflag;
701 tc->t_cflag = cb->c_cflag;
702 tc->t_stopc = cb->c_cc[VSTOP];
703 tc->t_startc = cb->c_cc[VSTART];
704 mutex_exit(&tc->t_excl);
705
706 /*
707 * Turn the ioctl message into an ioctl ACK message.
708 */
709 iocp->ioc_count = 0; /* no data returned */
710 mp->b_datap->db_type = M_IOCACK;
711 goto ioctldone;
712 }
713
714 case TCSETAF: {
715 tty_common_t *tc = &pty->pt_ttycommon;
716 struct termios *cb;
717
718 error = miocpullup(mp, sizeof (struct termios));
719 if (error != 0)
720 goto out;
721
722 cb = (struct termios *)mp->b_cont->b_rptr;
723
724 flushq(RD(q), FLUSHDATA);
725 mutex_exit(&pty->ptc_lock);
726 (void) putnextctl1(RD(q), M_FLUSH, FLUSHR);
727 mutex_enter(&pty->ptc_lock);
728 mutex_enter(&tc->t_excl);
729 tc->t_iflag = (tc->t_iflag & 0xffff0000 | cb->c_iflag);
730 tc->t_cflag = (tc->t_cflag & 0xffff0000 | cb->c_cflag);
731 mutex_exit(&tc->t_excl);
732
733 /*
734 * Turn the ioctl message into an ioctl ACK message.
735 */
736 iocp->ioc_count = 0; /* no data returned */
737 mp->b_datap->db_type = M_IOCACK;
738 goto ioctldone;
739 }
740
741 case TIOCSWINSZ: {
742 tty_common_t *tc = &pty->pt_ttycommon;
743 struct winsize *ws;
744
745 error = miocpullup(mp, sizeof (struct winsize));
746 if (error != 0)
747 goto out;
748
749 ws = (struct winsize *)mp->b_cont->b_rptr;
750 /*
751 * If the window size changed, send a SIGWINCH.
752 */
753 mutex_enter(&tc->t_excl);
754 if (bcmp(&tc->t_size, ws, sizeof (struct winsize))) {
755 tc->t_size = *ws;
756 mutex_exit(&tc->t_excl);
757 mutex_exit(&pty->ptc_lock);
758 (void) putnextctl1(RD(q), M_PCSIG, SIGWINCH);
759 mutex_enter(&pty->ptc_lock);
760 } else
761 mutex_exit(&tc->t_excl);
762
763 /*
764 * Turn the ioctl message into an ioctl ACK message.
765 */
766 iocp->ioc_count = 0; /* no data returned */
767 mp->b_datap->db_type = M_IOCACK;
768 goto ioctldone;
769 }
770
771 /*
772 * If they were just trying to drain output, that's OK.
773 * If they are actually trying to send a break it's an error.
774 */
775 case TCSBRK:
776 error = miocpullup(mp, sizeof (int));
777 if (error != 0)
778 goto out;
779
780 if (*(int *)mp->b_cont->b_rptr != 0) {
781 /*
782 * Turn the ioctl message into an ioctl ACK message.
783 */
784 iocp->ioc_count = 0; /* no data returned */
785 mp->b_datap->db_type = M_IOCACK;
786 } else {
787 error = ENOTTY;
788 }
789 goto out;
790 }
791
792 /*
793 * The only way in which "ttycommon_ioctl" can fail is if the "ioctl"
794 * requires a response containing data to be returned to the user,
795 * and no mblk could be allocated for the data.
796 * No such "ioctl" alters our state. Thus, we always go ahead and
797 * do any state-changes the "ioctl" calls for. If we couldn't allocate
798 * the data, "ttycommon_ioctl" has stashed the "ioctl" away safely, so
799 * we just call "bufcall" to request that we be called back when we
800 * stand a better chance of allocating the data.
801 */
802 if ((datasize =
803 ttycommon_ioctl(&pty->pt_ttycommon, q, mp, &error)) != 0) {
804 if (pty->pt_wbufcid) {
805 if (pty->pt_ttycommon.t_iocpending)
806 freemsg(pty->pt_ttycommon.t_iocpending);
807 pty->pt_ttycommon.t_iocpending = mp;
808 return;
809 }
810 pty->pt_wbufcid = bufcall(datasize, BPRI_HI, ptslreioctl, pty);
811 if (pty->pt_wbufcid == 0) {
812 error = ENOMEM;
813 goto out;
814 }
815 pty->pt_ttycommon.t_iocpending = mp;
816 return;
817 }
818
819 ioctldone:
820 if (error == 0) {
821 /*
822 * "ttycommon_ioctl" did most of the work; we just use the
823 * data it set up.
824 */
825 switch (cmd) {
826
827 case TCSETSF:
828 case TCSETAF:
829 /*
830 * Set the "flush read" flag, so that we
831 * notify the controller if they're in packet
832 * mode.
833 */
834 if (!(pty->pt_send & TIOCPKT_FLUSHREAD)) {
835 pty->pt_send |= TIOCPKT_FLUSHREAD;
836 ptcpollwakeup(pty, 0);
837 }
838 /*FALLTHROUGH*/
839
840 case TCSETSW:
841 case TCSETAW:
842 cmd = TIOCSETP; /* map backwards to old codes */
843 pt_sendstop(pty);
844 break;
845
846 case TCSETS:
847 case TCSETA:
848 cmd = TIOCSETN; /* map backwards to old codes */
849 pt_sendstop(pty);
850 break;
851 }
852 }
853
854 if (pty->pt_flags & PF_43UCNTL) {
855 if (error < 0) {
856 if ((cmd & ~0xff) == _IO('u', 0)) {
857 if (cmd & 0xff) {
858 pty->pt_ucntl = (uchar_t)cmd & 0xff;
859 ptcpollwakeup(pty, FREAD);
860 }
861 error = 0; /* XXX */
862 goto out;
863 }
864 error = ENOTTY;
865 }
866 } else {
867 if ((pty->pt_flags & PF_UCNTL) &&
868 (cmd & (IOC_INOUT | 0xff00)) == (IOC_IN|('t'<<8)) &&
869 (cmd & 0xff)) {
870 pty->pt_ucntl = (uchar_t)cmd & 0xff;
871 ptcpollwakeup(pty, FREAD);
872 goto out;
873 }
874 if (error < 0)
875 error = ENOTTY;
876 }
877
878 out:
879 if (error != 0) {
880 ((struct iocblk *)mp->b_rptr)->ioc_error = error;
881 mp->b_datap->db_type = M_IOCNAK;
882 }
883
884 mutex_exit(&pty->ptc_lock);
885 qreply(q, mp);
886 mutex_enter(&pty->ptc_lock);
887 }
888
889 /*
890 * Service routine for read queue.
891 * Just wakes the controller side up so it can write some more data
892 * to that queue.
893 */
894 static int
ptslrserv(queue_t * q)895 ptslrserv(queue_t *q)
896 {
897 struct pty *pty = (struct pty *)q->q_ptr;
898 mblk_t *mp;
899 mblk_t *head = NULL, *tail = NULL;
900 /*
901 * Build up the link list of messages, then drop
902 * drop the lock and do putnext()
903 */
904 mutex_enter(&pty->ptc_lock);
905
906 while ((mp = getq(q)) != NULL) {
907 if ((mp->b_datap->db_type < QPCTL) && !canputnext(q)) {
908 (void) putbq(q, mp);
909 break;
910 }
911 if (!head) {
912 head = mp;
913 tail = mp;
914 } else {
915 tail->b_next = mp;
916 tail = mp;
917 }
918 }
919
920 if (q->q_count <= q->q_lowat)
921 ptcpollwakeup((struct pty *)q->q_ptr, FWRITE);
922
923 mutex_exit(&pty->ptc_lock);
924
925 while (head) {
926 mp = head;
927 head = mp->b_next;
928 mp->b_next = NULL;
929 putnext(q, mp);
930 }
931
932 return (0);
933 }
934
935 static void
pt_sendstop(struct pty * pty)936 pt_sendstop(struct pty *pty)
937 {
938 int stop;
939
940 ASSERT(MUTEX_HELD(&pty->ptc_lock));
941
942 if ((pty->pt_ttycommon.t_cflag&CBAUD) == 0) {
943 if (pty->pt_flags & PF_CARR_ON) {
944 /*
945 * Let the manager know, then wake up
946 * readers/selectors and writers/selectors.
947 */
948 pty->pt_flags |= PF_SUBSIDGONE;
949 ptcpollwakeup(pty, 0);
950 ptcpollwakeup(pty, FWRITE);
951 }
952 }
953
954 stop = (pty->pt_ttycommon.t_iflag & IXON) &&
955 pty->pt_ttycommon.t_stopc == CTRL('s') &&
956 pty->pt_ttycommon.t_startc == CTRL('q');
957
958 if (pty->pt_flags & PF_NOSTOP) {
959 if (stop) {
960 pty->pt_send &= ~TIOCPKT_NOSTOP;
961 pty->pt_send |= TIOCPKT_DOSTOP;
962 pty->pt_flags &= ~PF_NOSTOP;
963 ptcpollwakeup(pty, 0);
964 }
965 } else {
966 if (!stop) {
967 pty->pt_send &= ~TIOCPKT_DOSTOP;
968 pty->pt_send |= TIOCPKT_NOSTOP;
969 pty->pt_flags |= PF_NOSTOP;
970 ptcpollwakeup(pty, 0);
971 }
972 }
973 }
974
975 /*
976 * Wake up controller side. "flag" is 0 if a special packet or
977 * user control mode message has been queued up (this data is readable,
978 * so we also treat it as a regular data event; should we send SIGIO,
979 * though?), FREAD if regular data has been queued up, or FWRITE if
980 * the subsidiary's read queue has drained sufficiently to allow writing.
981 */
982 static void
ptcpollwakeup(struct pty * pty,int flag)983 ptcpollwakeup(struct pty *pty, int flag)
984 {
985 ASSERT(MUTEX_HELD(&pty->ptc_lock));
986
987 if (flag == 0) {
988 /*
989 * "Exceptional condition" occurred. This means that
990 * a "read" is now possible, so do a "read" wakeup.
991 */
992 flag = FREAD;
993 pollwakeup(&ptcph, POLLIN | POLLRDBAND);
994 if (pty->pt_flags & PF_ASYNC)
995 gsignal(pty->pt_pgrp, SIGURG);
996 }
997 if (flag & FREAD) {
998 /*
999 * Wake up the parent process as there is regular
1000 * data to read from subsidiary's write queue
1001 */
1002 pollwakeup(&ptcph, POLLIN | POLLRDNORM);
1003 cv_broadcast(&pty->pt_cv_writeq);
1004 if (pty->pt_flags & PF_ASYNC)
1005 gsignal(pty->pt_pgrp, SIGIO);
1006 }
1007 if (flag & FWRITE) {
1008 /*
1009 * Wake up the parent process to write
1010 * data into subsidiary's read queue as the
1011 * read queue has drained enough
1012 */
1013 pollwakeup(&ptcph, POLLOUT | POLLWRNORM);
1014 cv_broadcast(&pty->pt_cv_readq);
1015 if (pty->pt_flags & PF_ASYNC)
1016 gsignal(pty->pt_pgrp, SIGIO);
1017 }
1018 }
1019