1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
23 /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
24 /* All Rights Reserved */
25
26 /*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31
32 /*
33 *
34 * Copyright (c) 2004 Christian Limpach.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. This section intentionally left blank.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60 /*
61 * Section 3 of the above license was updated in response to bug 6379571.
62 */
63
64 /*
65 * Hypervisor virtual console driver
66 */
67
68 #include <sys/param.h>
69 #include <sys/types.h>
70 #include <sys/signal.h>
71 #include <sys/stream.h>
72 #include <sys/termio.h>
73 #include <sys/errno.h>
74 #include <sys/file.h>
75 #include <sys/cmn_err.h>
76 #include <sys/stropts.h>
77 #include <sys/strsubr.h>
78 #include <sys/strtty.h>
79 #include <sys/debug.h>
80 #include <sys/kbio.h>
81 #include <sys/cred.h>
82 #include <sys/stat.h>
83 #include <sys/consdev.h>
84 #include <sys/mkdev.h>
85 #include <sys/kmem.h>
86 #include <sys/cred.h>
87 #include <sys/strsun.h>
88 #ifdef DEBUG
89 #include <sys/promif.h>
90 #endif
91 #include <sys/modctl.h>
92 #include <sys/ddi.h>
93 #include <sys/sunddi.h>
94 #include <sys/sunndi.h>
95 #include <sys/policy.h>
96 #include <sys/atomic.h>
97 #include <sys/psm.h>
98 #include <xen/public/io/console.h>
99
100 #include "xencons.h"
101
102 #include <sys/hypervisor.h>
103 #include <sys/evtchn_impl.h>
104 #include <xen/sys/xenbus_impl.h>
105 #include <xen/sys/xendev.h>
106
107 #ifdef DEBUG
108 #define XENCONS_DEBUG_INIT 0x0001 /* msgs during driver initialization. */
109 #define XENCONS_DEBUG_INPUT 0x0002 /* characters received during int. */
110 #define XENCONS_DEBUG_EOT 0x0004 /* msgs when wait for xmit to finish. */
111 #define XENCONS_DEBUG_CLOSE 0x0008 /* msgs when driver open/close called */
112 #define XENCONS_DEBUG_PROCS 0x0020 /* each proc name as it is entered. */
113 #define XENCONS_DEBUG_OUT 0x0100 /* msgs about output events. */
114 #define XENCONS_DEBUG_BUSY 0x0200 /* msgs when xmit is enabled/disabled */
115 #define XENCONS_DEBUG_MODEM 0x0400 /* msgs about modem status & control. */
116 #define XENCONS_DEBUG_MODM2 0x0800 /* msgs about modem status & control. */
117 #define XENCONS_DEBUG_IOCTL 0x1000 /* Output msgs about ioctl messages. */
118 #define XENCONS_DEBUG_CHIP 0x2000 /* msgs about chip identification. */
119 #define XENCONS_DEBUG_SFLOW 0x4000 /* msgs when S/W flowcontrol active */
120 #define XENCONS_DEBUG(x) (debug & (x))
121 static int debug = 0;
122 #else
123 #define XENCONS_DEBUG(x) B_FALSE
124 #endif
125
126 #define XENCONS_WBUFSIZE 4096
127
128 static boolean_t abort_charseq_recognize(uchar_t);
129
130 /* The async interrupt entry points */
131 static void xcasync_ioctl(struct asyncline *, queue_t *, mblk_t *);
132 static void xcasync_reioctl(void *);
133 static void xcasync_start(struct asyncline *);
134 static void xenconsputchar(cons_polledio_arg_t, uchar_t);
135 static int xenconsgetchar(cons_polledio_arg_t);
136 static boolean_t xenconsischar(cons_polledio_arg_t);
137
138 static uint_t xenconsintr(caddr_t);
139 static uint_t xenconsintr_priv(caddr_t);
140 /*PRINTFLIKE2*/
141 static void xenconserror(int, const char *, ...) __KPRINTFLIKE(2);
142 static void xencons_soft_state_free(struct xencons *);
143 static boolean_t
144 xcasync_flowcontrol_sw_input(struct xencons *, async_flowc_action, int);
145 static void
146 xcasync_flowcontrol_sw_output(struct xencons *, async_flowc_action);
147
148 void *xencons_soft_state;
149 char *xencons_wbuf;
150 struct xencons *xencons_console;
151
152 static void
xenconssetup_avintr(struct xencons * xcp,int attach)153 xenconssetup_avintr(struct xencons *xcp, int attach)
154 {
155 /*
156 * On xen, CPU 0 always exists and can't be taken offline,
157 * so binding this thread to it should always succeed.
158 */
159 mutex_enter(&cpu_lock);
160 thread_affinity_set(curthread, 0);
161 mutex_exit(&cpu_lock);
162
163 if (attach) {
164 /* Setup our interrupt binding. */
165 (void) add_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
166 "xencons", xcp->console_irq, (caddr_t)xcp, NULL, NULL,
167 xcp->dip);
168 } else {
169 /*
170 * Cleanup interrupt configuration. Note that the framework
171 * _should_ ensure that when rem_avintr() returns the interrupt
172 * service routine is not currently executing and that it won't
173 * be invoked again.
174 */
175 (void) rem_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
176 xcp->console_irq);
177 }
178
179 /* Notify our caller that we're done. */
180 mutex_enter(&xcp->excl);
181 cv_signal(&xcp->excl_cv);
182 mutex_exit(&xcp->excl);
183
184 /* Clear our binding to CPU 0 */
185 thread_affinity_clear(curthread);
186
187 }
188
189 static void
xenconssetup_add_avintr(struct xencons * xcp)190 xenconssetup_add_avintr(struct xencons *xcp)
191 {
192 xenconssetup_avintr(xcp, B_TRUE);
193 }
194
195 static void
xenconssetup_rem_avintr(struct xencons * xcp)196 xenconssetup_rem_avintr(struct xencons *xcp)
197 {
198 xenconssetup_avintr(xcp, B_FALSE);
199 }
200
201 static int
xenconsdetach(dev_info_t * devi,ddi_detach_cmd_t cmd)202 xenconsdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
203 {
204 int instance;
205 struct xencons *xcp;
206
207 if (cmd != DDI_DETACH && cmd != DDI_SUSPEND)
208 return (DDI_FAILURE);
209
210 if (cmd == DDI_SUSPEND) {
211 ddi_remove_intr(devi, 0, NULL);
212 return (DDI_SUCCESS);
213 }
214
215 /*
216 * We should never try to detach the console driver on a domU
217 * because it should always be held open
218 */
219 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
220 if (!DOMAIN_IS_INITDOMAIN(xen_info))
221 return (DDI_FAILURE);
222
223 instance = ddi_get_instance(devi); /* find out which unit */
224
225 xcp = ddi_get_soft_state(xencons_soft_state, instance);
226 if (xcp == NULL)
227 return (DDI_FAILURE);
228
229 /*
230 * Cleanup our interrupt bindings. For more info on why we
231 * do this in a seperate thread, see the comments for when we
232 * setup the interrupt bindings.
233 */
234 xencons_console = NULL;
235 mutex_enter(&xcp->excl);
236 (void) taskq_dispatch(system_taskq,
237 (void (*)(void *))xenconssetup_rem_avintr, xcp, TQ_SLEEP);
238 cv_wait(&xcp->excl_cv, &xcp->excl);
239 mutex_exit(&xcp->excl);
240
241 /* remove all minor device node(s) for this device */
242 ddi_remove_minor_node(devi, NULL);
243
244 /* free up state */
245 xencons_soft_state_free(xcp);
246 kmem_free(xencons_wbuf, XENCONS_WBUFSIZE);
247
248 DEBUGNOTE1(XENCONS_DEBUG_INIT, "xencons%d: shutdown complete",
249 instance);
250 return (DDI_SUCCESS);
251 }
252
253 static void
xenconssetup(struct xencons * xcp)254 xenconssetup(struct xencons *xcp)
255 {
256 xcp->ifp = (volatile struct xencons_interface *)HYPERVISOR_console_page;
257
258 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
259 xencons_wbuf = kmem_alloc(XENCONS_WBUFSIZE, KM_SLEEP);
260
261 /*
262 * Activate the xen console virq. Note that xen requires
263 * that VIRQs be bound to CPU 0 when first created.
264 */
265 xcp->console_irq = ec_bind_virq_to_irq(VIRQ_CONSOLE, 0);
266
267 /*
268 * Ok. This is kinda ugly. We want to register an
269 * interrupt handler for the xen console virq, but
270 * virq's are xen sepcific and currently the DDI doesn't
271 * support binding to them. So instead we need to use
272 * add_avintr(). So to make things more complicated,
273 * we already had to bind the xen console VIRQ to CPU 0,
274 * and add_avintr() needs to be invoked on the same CPU
275 * where the VIRQ is bound, in this case on CPU 0. We
276 * could just temporarily bind ourselves to CPU 0, but
277 * we don't want to do that since this attach thread
278 * could have been invoked in a user thread context,
279 * in which case this thread could already have some
280 * pre-existing cpu binding. So to avoid changing our
281 * cpu binding we're going to use a taskq thread that
282 * will bind to CPU 0 and register our interrupts
283 * handler for us.
284 */
285 mutex_enter(&xcp->excl);
286 (void) taskq_dispatch(system_taskq,
287 (void (*)(void *))xenconssetup_add_avintr, xcp, TQ_SLEEP);
288 cv_wait(&xcp->excl_cv, &xcp->excl);
289 mutex_exit(&xcp->excl);
290 } else {
291 (void) xvdi_alloc_evtchn(xcp->dip);
292 xcp->evtchn = xvdi_get_evtchn(xcp->dip);
293 (void) ddi_add_intr(xcp->dip, 0, NULL, NULL, xenconsintr,
294 (caddr_t)xcp);
295 }
296 }
297
298 static int
xenconsattach(dev_info_t * devi,ddi_attach_cmd_t cmd)299 xenconsattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
300 {
301 int instance = ddi_get_instance(devi);
302 struct xencons *xcp;
303 int ret;
304
305 /* There can be only one. */
306 if (instance != 0)
307 return (DDI_FAILURE);
308
309 switch (cmd) {
310 case DDI_RESUME:
311 xcp = xencons_console;
312 xenconssetup(xcp);
313 return (DDI_SUCCESS);
314 case DDI_ATTACH:
315 break;
316 default:
317 return (DDI_FAILURE);
318 }
319
320 ret = ddi_soft_state_zalloc(xencons_soft_state, instance);
321 if (ret != DDI_SUCCESS)
322 return (DDI_FAILURE);
323 xcp = ddi_get_soft_state(xencons_soft_state, instance);
324 ASSERT(xcp != NULL); /* can't fail - we only just allocated it */
325
326 /*
327 * Set up the other components of the xencons structure for this port.
328 */
329 xcp->unit = instance;
330 xcp->dip = devi;
331
332 /* Fill in the polled I/O structure. */
333 xcp->polledio.cons_polledio_version = CONSPOLLEDIO_V0;
334 xcp->polledio.cons_polledio_argument = (cons_polledio_arg_t)xcp;
335 xcp->polledio.cons_polledio_putchar = xenconsputchar;
336 xcp->polledio.cons_polledio_getchar = xenconsgetchar;
337 xcp->polledio.cons_polledio_ischar = xenconsischar;
338 xcp->polledio.cons_polledio_enter = NULL;
339 xcp->polledio.cons_polledio_exit = NULL;
340
341 /*
342 * Initializes the asyncline structure which has TTY protocol-private
343 * data before enabling interrupts.
344 */
345 xcp->priv = kmem_zalloc(sizeof (struct asyncline), KM_SLEEP);
346 xcp->priv->async_common = xcp;
347 cv_init(&xcp->priv->async_flags_cv, NULL, CV_DRIVER, NULL);
348
349 /* Initialize mutexes before accessing the interface. */
350 mutex_init(&xcp->excl, NULL, MUTEX_DRIVER, NULL);
351 cv_init(&xcp->excl_cv, NULL, CV_DEFAULT, NULL);
352
353 /* create minor device node for this device */
354 ret = ddi_create_minor_node(devi, "xencons", S_IFCHR, instance,
355 DDI_NT_SERIAL, NULL);
356 if (ret != DDI_SUCCESS) {
357 ddi_remove_minor_node(devi, NULL);
358 xencons_soft_state_free(xcp);
359 return (DDI_FAILURE);
360 }
361
362 ddi_report_dev(devi);
363 xencons_console = xcp;
364 xenconssetup(xcp);
365 DEBUGCONT1(XENCONS_DEBUG_INIT, "xencons%dattach: done\n", instance);
366 return (DDI_SUCCESS);
367 }
368
369 /*ARGSUSED*/
370 static int
xenconsinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)371 xenconsinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
372 void **result)
373 {
374 dev_t dev = (dev_t)arg;
375 int instance, error;
376 struct xencons *xcp;
377
378 instance = getminor(dev);
379 xcp = ddi_get_soft_state(xencons_soft_state, instance);
380 if (xcp == NULL)
381 return (DDI_FAILURE);
382
383 switch (infocmd) {
384 case DDI_INFO_DEVT2DEVINFO:
385 if (xcp->dip == NULL)
386 error = DDI_FAILURE;
387 else {
388 *result = (void *) xcp->dip;
389 error = DDI_SUCCESS;
390 }
391 break;
392 case DDI_INFO_DEVT2INSTANCE:
393 *result = (void *)(intptr_t)instance;
394 error = DDI_SUCCESS;
395 break;
396 default:
397 error = DDI_FAILURE;
398 }
399 return (error);
400 }
401
402 /* xencons_soft_state_free - local wrapper for ddi_soft_state_free(9F) */
403
404 static void
xencons_soft_state_free(struct xencons * xcp)405 xencons_soft_state_free(struct xencons *xcp)
406 {
407 mutex_destroy(&xcp->excl);
408 cv_destroy(&xcp->excl_cv);
409 kmem_free(xcp->priv, sizeof (struct asyncline));
410 ddi_soft_state_free(xencons_soft_state, xcp->unit);
411 }
412
413 /*ARGSUSED*/
414 static int
xenconsopen(queue_t * rq,dev_t * dev,int flag,int sflag,cred_t * cr)415 xenconsopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
416 {
417 struct xencons *xcp;
418 struct asyncline *async;
419 int unit;
420
421 unit = getminor(*dev);
422 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dopen\n", unit);
423 xcp = ddi_get_soft_state(xencons_soft_state, unit);
424 if (xcp == NULL)
425 return (ENXIO); /* unit not configured */
426 async = xcp->priv;
427 mutex_enter(&xcp->excl);
428
429 again:
430
431 if ((async->async_flags & ASYNC_ISOPEN) == 0) {
432 async->async_ttycommon.t_iflag = 0;
433 async->async_ttycommon.t_iocpending = NULL;
434 async->async_ttycommon.t_size.ws_row = 0;
435 async->async_ttycommon.t_size.ws_col = 0;
436 async->async_ttycommon.t_size.ws_xpixel = 0;
437 async->async_ttycommon.t_size.ws_ypixel = 0;
438 async->async_dev = *dev;
439 async->async_wbufcid = 0;
440
441 async->async_startc = CSTART;
442 async->async_stopc = CSTOP;
443 } else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
444 secpolicy_excl_open(cr) != 0) {
445 mutex_exit(&xcp->excl);
446 return (EBUSY);
447 }
448
449 async->async_ttycommon.t_flags |= TS_SOFTCAR;
450
451 async->async_ttycommon.t_readq = rq;
452 async->async_ttycommon.t_writeq = WR(rq);
453 rq->q_ptr = WR(rq)->q_ptr = (caddr_t)async;
454 mutex_exit(&xcp->excl);
455 /*
456 * Caution here -- qprocson sets the pointers that are used by canput
457 * called by xencons_rxint. ASYNC_ISOPEN must *not* be set until those
458 * pointers are valid.
459 */
460 qprocson(rq);
461 async->async_flags |= ASYNC_ISOPEN;
462 DEBUGCONT1(XENCONS_DEBUG_INIT, "asy%dopen: done\n", unit);
463 return (0);
464 }
465
466
467 /*
468 * Close routine.
469 */
470 /*ARGSUSED*/
471 static int
xenconsclose(queue_t * q,int flag,cred_t * credp)472 xenconsclose(queue_t *q, int flag, cred_t *credp)
473 {
474 struct asyncline *async;
475 struct xencons *xcp;
476 #ifdef DEBUG
477 int instance;
478 #endif
479
480 async = (struct asyncline *)q->q_ptr;
481 ASSERT(async != NULL);
482 xcp = async->async_common;
483 #ifdef DEBUG
484 instance = xcp->unit;
485 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose\n", instance);
486 #endif
487
488 mutex_enter(&xcp->excl);
489 async->async_flags |= ASYNC_CLOSING;
490
491 async->async_ocnt = 0;
492 if (async->async_xmitblk != NULL)
493 freeb(async->async_xmitblk);
494 async->async_xmitblk = NULL;
495
496 out:
497 ttycommon_close(&async->async_ttycommon);
498
499 /*
500 * Cancel outstanding "bufcall" request.
501 */
502 if (async->async_wbufcid != 0) {
503 unbufcall(async->async_wbufcid);
504 async->async_wbufcid = 0;
505 }
506
507 /* Note that qprocsoff can't be done until after interrupts are off */
508 qprocsoff(q);
509 q->q_ptr = WR(q)->q_ptr = NULL;
510 async->async_ttycommon.t_readq = NULL;
511 async->async_ttycommon.t_writeq = NULL;
512
513 /*
514 * Clear out device state, except persistant device property flags.
515 */
516 async->async_flags = 0;
517 cv_broadcast(&async->async_flags_cv);
518 mutex_exit(&xcp->excl);
519
520 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose: done\n", instance);
521 return (0);
522 }
523
524 #define INBUF_IX(ix, ifp) (DOMAIN_IS_INITDOMAIN(xen_info) ? \
525 (ix) : MASK_XENCONS_IDX((ix), (ifp)->in))
526
527 /*
528 * Handle a xen console rx interrupt.
529 */
530 /*ARGSUSED*/
531 static void
xencons_rxint(struct xencons * xcp)532 xencons_rxint(struct xencons *xcp)
533 {
534 struct asyncline *async;
535 short cc;
536 mblk_t *bp;
537 queue_t *q;
538 uchar_t c, buf[16];
539 uchar_t *cp;
540 tty_common_t *tp;
541 int instance;
542 volatile struct xencons_interface *ifp;
543 XENCONS_RING_IDX cons, prod;
544
545 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_rxint\n");
546
547 loop:
548 mutex_enter(&xcp->excl);
549
550 /* sanity check if we should bail */
551 if (xencons_console == NULL) {
552 mutex_exit(&xcp->excl);
553 goto out;
554 }
555
556 async = xcp->priv;
557 instance = xcp->unit;
558 ifp = xcp->ifp;
559 tp = &async->async_ttycommon;
560 q = tp->t_readq;
561
562 if (async->async_flags & ASYNC_OUT_FLW_RESUME) {
563 xcasync_start(async);
564 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
565 }
566
567 /*
568 * If data is available, send it up the stream if there's
569 * somebody listening.
570 */
571 if (!(async->async_flags & ASYNC_ISOPEN)) {
572 mutex_exit(&xcp->excl);
573 goto out;
574 }
575 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
576 cc = HYPERVISOR_console_io(CONSOLEIO_read, 16, (char *)buf);
577 cp = buf;
578 cons = 0;
579 } else {
580 cons = ifp->in_cons;
581 prod = ifp->in_prod;
582
583 cc = prod - cons;
584 cp = (uchar_t *)ifp->in;
585 }
586 if (cc <= 0) {
587 mutex_exit(&xcp->excl);
588 goto out;
589 }
590
591 /*
592 * Check for character break sequence.
593 *
594 * Note that normally asy drivers only check for a character sequence
595 * if abort_enable == KIOCABORTALTERNATE and otherwise use a break
596 * sensed on the line to do an abort_sequence_enter. Since the
597 * hypervisor does not use a real chip for the console we default to
598 * using the alternate sequence.
599 */
600 if ((abort_enable == KIOCABORTENABLE) && (xcp->flags & ASY_CONSOLE)) {
601 XENCONS_RING_IDX i;
602
603 for (i = 0; i < cc; i++) {
604 c = cp[INBUF_IX(cons + i, ifp)];
605 if (abort_charseq_recognize(c)) {
606 /*
607 * Eat abort seg, it's not a valid debugger
608 * command.
609 */
610 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
611 membar_producer();
612 ifp->in_cons = cons + i;
613 } else {
614 cons += i;
615 }
616 abort_sequence_enter((char *)NULL);
617 /*
618 * Back from debugger, resume normal processing
619 */
620 mutex_exit(&xcp->excl);
621 goto loop;
622 }
623 }
624 }
625
626 if (!canput(q)) {
627 if (!(async->async_inflow_source & IN_FLOW_STREAMS)) {
628 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
629 IN_FLOW_STREAMS);
630 }
631 mutex_exit(&xcp->excl);
632 goto out;
633 }
634 if (async->async_inflow_source & IN_FLOW_STREAMS) {
635 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
636 IN_FLOW_STREAMS);
637 }
638 DEBUGCONT2(XENCONS_DEBUG_INPUT,
639 "xencons%d_rxint: %d char(s) in queue.\n", instance, cc);
640 if (!(bp = allocb(cc, BPRI_MED))) {
641 mutex_exit(&xcp->excl);
642 ttycommon_qfull(&async->async_ttycommon, q);
643 goto out;
644 }
645 do {
646 c = cp[INBUF_IX(cons++, ifp)];
647 /*
648 * We handle XON/XOFF char if IXON is set,
649 * but if received char is _POSIX_VDISABLE,
650 * we left it to the up level module.
651 */
652 if (tp->t_iflag & IXON) {
653 if ((c == async->async_stopc) &&
654 (c != _POSIX_VDISABLE)) {
655 xcasync_flowcontrol_sw_output(xcp, FLOW_STOP);
656 continue;
657 } else if ((c == async->async_startc) &&
658 (c != _POSIX_VDISABLE)) {
659 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
660 continue;
661 }
662 if ((tp->t_iflag & IXANY) &&
663 (async->async_flags & ASYNC_SW_OUT_FLW)) {
664 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
665 }
666 }
667 *bp->b_wptr++ = c;
668 } while (--cc);
669 membar_producer();
670 if (!DOMAIN_IS_INITDOMAIN(xen_info))
671 ifp->in_cons = cons;
672 mutex_exit(&xcp->excl);
673 if (bp->b_wptr > bp->b_rptr) {
674 if (!canput(q)) {
675 xenconserror(CE_NOTE, "xencons%d: local queue full",
676 instance);
677 freemsg(bp);
678 } else
679 (void) putq(q, bp);
680 } else
681 freemsg(bp);
682 if (DOMAIN_IS_INITDOMAIN(xen_info))
683 goto loop;
684 out:
685 DEBUGCONT1(XENCONS_DEBUG_PROCS, "xencons%d_rxint: done\n", instance);
686 if (!DOMAIN_IS_INITDOMAIN(xen_info))
687 ec_notify_via_evtchn(xcp->evtchn);
688 }
689
690
691 /*
692 * Handle a xen console tx interrupt.
693 */
694 /*ARGSUSED*/
695 static void
xencons_txint(struct xencons * xcp)696 xencons_txint(struct xencons *xcp)
697 {
698 struct asyncline *async;
699
700 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint\n");
701
702 /*
703 * prevent recursive entry
704 */
705 if (mutex_owner(&xcp->excl) == curthread) {
706 goto out;
707 }
708
709 mutex_enter(&xcp->excl);
710 if (xencons_console == NULL) {
711 mutex_exit(&xcp->excl);
712 goto out;
713 }
714
715 /* make sure the device is open */
716 async = xcp->priv;
717 if ((async->async_flags & ASYNC_ISOPEN) != 0)
718 xcasync_start(async);
719
720 mutex_exit(&xcp->excl);
721 out:
722 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint: done\n");
723 }
724
725
726 /*
727 * Get an event when input ring becomes not empty or output ring becomes not
728 * full.
729 */
730 static uint_t
xenconsintr(caddr_t arg)731 xenconsintr(caddr_t arg)
732 {
733 struct xencons *xcp = (struct xencons *)arg;
734 volatile struct xencons_interface *ifp = xcp->ifp;
735
736 if (ifp->in_prod != ifp->in_cons)
737 xencons_rxint(xcp);
738 if (ifp->out_prod - ifp->out_cons < sizeof (ifp->out))
739 xencons_txint(xcp);
740 return (DDI_INTR_CLAIMED);
741 }
742
743 /*
744 * Console interrupt routine for priviliged domains
745 */
746 static uint_t
xenconsintr_priv(caddr_t arg)747 xenconsintr_priv(caddr_t arg)
748 {
749 struct xencons *xcp = (struct xencons *)arg;
750
751 xencons_rxint(xcp);
752 xencons_txint(xcp);
753 return (DDI_INTR_CLAIMED);
754 }
755
756 /*
757 * Start output on a line, unless it's busy, frozen, or otherwise.
758 */
759 /*ARGSUSED*/
760 static void
xcasync_start(struct asyncline * async)761 xcasync_start(struct asyncline *async)
762 {
763 struct xencons *xcp = async->async_common;
764 int cc;
765 queue_t *q;
766 mblk_t *bp;
767 int len, space, blen;
768 mblk_t *nbp;
769
770 #ifdef DEBUG
771 int instance = xcp->unit;
772
773 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_nstart\n", instance);
774 #endif
775 ASSERT(mutex_owned(&xcp->excl));
776
777 /*
778 * Check only pended sw input flow control.
779 */
780 domore:
781 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_CHECK, IN_FLOW_NULL);
782
783 if ((q = async->async_ttycommon.t_writeq) == NULL) {
784 return; /* not attached to a stream */
785 }
786
787 for (;;) {
788 if ((bp = getq(q)) == NULL)
789 return; /* no data to transmit */
790
791 /*
792 * We have a message block to work on.
793 * Check whether it's a break, a delay, or an ioctl (the latter
794 * occurs if the ioctl in question was waiting for the output
795 * to drain). If it's one of those, process it immediately.
796 */
797 switch (bp->b_datap->db_type) {
798
799 case M_IOCTL:
800 /*
801 * This ioctl was waiting for the output ahead of
802 * it to drain; obviously, it has. Do it, and
803 * then grab the next message after it.
804 */
805 mutex_exit(&xcp->excl);
806 xcasync_ioctl(async, q, bp);
807 mutex_enter(&xcp->excl);
808 continue;
809 }
810
811 while (bp != NULL && (cc = bp->b_wptr - bp->b_rptr) == 0) {
812 nbp = bp->b_cont;
813 freeb(bp);
814 bp = nbp;
815 }
816 if (bp != NULL)
817 break;
818 }
819
820 /*
821 * We have data to transmit. If output is stopped, put
822 * it back and try again later.
823 */
824 if (async->async_flags & (ASYNC_SW_OUT_FLW | ASYNC_STOPPED)) {
825 (void) putbq(q, bp);
826 return;
827 }
828
829
830 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
831 len = 0;
832 space = XENCONS_WBUFSIZE;
833 while (bp != NULL && space) {
834 blen = bp->b_wptr - bp->b_rptr;
835 cc = min(blen, space);
836 bcopy(bp->b_rptr, &xencons_wbuf[len], cc);
837 bp->b_rptr += cc;
838 if (cc == blen) {
839 nbp = bp->b_cont;
840 freeb(bp);
841 bp = nbp;
842 }
843 space -= cc;
844 len += cc;
845 }
846 mutex_exit(&xcp->excl);
847 (void) HYPERVISOR_console_io(CONSOLEIO_write, len,
848 xencons_wbuf);
849 mutex_enter(&xcp->excl);
850 if (bp != NULL)
851 (void) putbq(q, bp); /* not done with this msg yet */
852 /*
853 * There are no completion interrupts when using the
854 * HYPERVISOR_console_io call to write console data
855 * so we loop here till we have sent all the data to the
856 * hypervisor.
857 */
858 goto domore;
859 } else {
860 volatile struct xencons_interface *ifp = xcp->ifp;
861 XENCONS_RING_IDX cons, prod;
862
863 cons = ifp->out_cons;
864 prod = ifp->out_prod;
865 membar_enter();
866 while (bp != NULL && ((prod - cons) < sizeof (ifp->out))) {
867 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] =
868 *bp->b_rptr++;
869 if (bp->b_rptr == bp->b_wptr) {
870 nbp = bp->b_cont;
871 freeb(bp);
872 bp = nbp;
873 }
874 }
875 membar_producer();
876 ifp->out_prod = prod;
877 ec_notify_via_evtchn(xcp->evtchn);
878 if (bp != NULL)
879 (void) putbq(q, bp); /* not done with this msg yet */
880 }
881 }
882
883
884 /*
885 * Process an "ioctl" message sent down to us.
886 * Note that we don't need to get any locks until we are ready to access
887 * the hardware. Nothing we access until then is going to be altered
888 * outside of the STREAMS framework, so we should be safe.
889 */
890 static void
xcasync_ioctl(struct asyncline * async,queue_t * wq,mblk_t * mp)891 xcasync_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
892 {
893 struct xencons *xcp = async->async_common;
894 tty_common_t *tp = &async->async_ttycommon;
895 struct iocblk *iocp;
896 unsigned datasize;
897 int error = 0;
898
899 #ifdef DEBUG
900 int instance = xcp->unit;
901
902 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl\n", instance);
903 #endif
904
905 if (tp->t_iocpending != NULL) {
906 /*
907 * We were holding an "ioctl" response pending the
908 * availability of an "mblk" to hold data to be passed up;
909 * another "ioctl" came through, which means that "ioctl"
910 * must have timed out or been aborted.
911 */
912 freemsg(async->async_ttycommon.t_iocpending);
913 async->async_ttycommon.t_iocpending = NULL;
914 }
915
916 iocp = (struct iocblk *)mp->b_rptr;
917
918 /*
919 * For TIOCMGET and the PPS ioctls, do NOT call ttycommon_ioctl()
920 * because this function frees up the message block (mp->b_cont) that
921 * contains the user location where we pass back the results.
922 *
923 * Similarly, CONSOPENPOLLEDIO needs ioc_count, which ttycommon_ioctl
924 * zaps. We know that ttycommon_ioctl doesn't know any CONS*
925 * ioctls, so keep the others safe too.
926 */
927 DEBUGCONT2(XENCONS_DEBUG_IOCTL, "async%d_ioctl: %s\n",
928 instance,
929 iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
930 iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
931 iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
932 iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" : "other");
933
934 switch (iocp->ioc_cmd) {
935 case TIOCMGET:
936 case TIOCGPPS:
937 case TIOCSPPS:
938 case TIOCGPPSEV:
939 case CONSOPENPOLLEDIO:
940 case CONSCLOSEPOLLEDIO:
941 case CONSSETABORTENABLE:
942 case CONSGETABORTENABLE:
943 error = -1; /* Do Nothing */
944 break;
945 default:
946
947 /*
948 * The only way in which "ttycommon_ioctl" can fail is if the
949 * "ioctl" requires a response containing data to be returned
950 * to the user, and no mblk could be allocated for the data.
951 * No such "ioctl" alters our state. Thus, we always go ahead
952 * and do any state-changes the "ioctl" calls for. If we
953 * couldn't allocate the data, "ttycommon_ioctl" has stashed
954 * the "ioctl" away safely, so we just call "bufcall" to
955 * request that we be called back when we stand a better
956 * chance of allocating the data.
957 */
958 if ((datasize = ttycommon_ioctl(tp, wq, mp, &error)) != 0) {
959 if (async->async_wbufcid)
960 unbufcall(async->async_wbufcid);
961 async->async_wbufcid = bufcall(datasize, BPRI_HI,
962 (void (*)(void *)) xcasync_reioctl,
963 (void *)(intptr_t)async->async_common->unit);
964 return;
965 }
966 }
967
968 mutex_enter(&xcp->excl);
969
970 if (error == 0) {
971 /*
972 * "ttycommon_ioctl" did most of the work; we just use the
973 * data it set up.
974 */
975 switch (iocp->ioc_cmd) {
976
977 case TCSETS:
978 case TCSETSF:
979 case TCSETSW:
980 case TCSETA:
981 case TCSETAW:
982 case TCSETAF:
983 break;
984 }
985 } else if (error < 0) {
986 /*
987 * "ttycommon_ioctl" didn't do anything; we process it here.
988 */
989 error = 0;
990 switch (iocp->ioc_cmd) {
991
992 case TCSBRK:
993 error = miocpullup(mp, sizeof (int));
994 break;
995
996 case TIOCSBRK:
997 mioc2ack(mp, NULL, 0, 0);
998 break;
999
1000 case TIOCCBRK:
1001 mioc2ack(mp, NULL, 0, 0);
1002 break;
1003
1004 case CONSOPENPOLLEDIO:
1005 error = miocpullup(mp, sizeof (cons_polledio_arg_t));
1006 if (error != 0)
1007 break;
1008
1009 *(cons_polledio_arg_t *)mp->b_cont->b_rptr =
1010 (cons_polledio_arg_t)&xcp->polledio;
1011
1012 mp->b_datap->db_type = M_IOCACK;
1013 break;
1014
1015 case CONSCLOSEPOLLEDIO:
1016 mp->b_datap->db_type = M_IOCACK;
1017 iocp->ioc_error = 0;
1018 iocp->ioc_rval = 0;
1019 break;
1020
1021 case CONSSETABORTENABLE:
1022 error = secpolicy_console(iocp->ioc_cr);
1023 if (error != 0)
1024 break;
1025
1026 if (iocp->ioc_count != TRANSPARENT) {
1027 error = EINVAL;
1028 break;
1029 }
1030
1031 if (*(intptr_t *)mp->b_cont->b_rptr)
1032 xcp->flags |= ASY_CONSOLE;
1033 else
1034 xcp->flags &= ~ASY_CONSOLE;
1035
1036 mp->b_datap->db_type = M_IOCACK;
1037 iocp->ioc_error = 0;
1038 iocp->ioc_rval = 0;
1039 break;
1040
1041 case CONSGETABORTENABLE:
1042 /*CONSTANTCONDITION*/
1043 ASSERT(sizeof (boolean_t) <= sizeof (boolean_t *));
1044 /*
1045 * Store the return value right in the payload
1046 * we were passed. Crude.
1047 */
1048 mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
1049 *(boolean_t *)mp->b_cont->b_rptr =
1050 (xcp->flags & ASY_CONSOLE) != 0;
1051 break;
1052
1053 default:
1054 /*
1055 * If we don't understand it, it's an error. NAK it.
1056 */
1057 error = EINVAL;
1058 break;
1059 }
1060 }
1061 if (error != 0) {
1062 iocp->ioc_error = error;
1063 mp->b_datap->db_type = M_IOCNAK;
1064 }
1065 mutex_exit(&xcp->excl);
1066 qreply(wq, mp);
1067 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl: done\n", instance);
1068 }
1069
1070 static int
xenconsrsrv(queue_t * q)1071 xenconsrsrv(queue_t *q)
1072 {
1073 mblk_t *bp;
1074
1075 while (canputnext(q) && (bp = getq(q)))
1076 putnext(q, bp);
1077 return (0);
1078 }
1079
1080 /*
1081 * Put procedure for write queue.
1082 * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
1083 * set the flow control character for M_STOPI and M_STARTI messages;
1084 * queue up M_BREAK, M_DELAY, and M_DATA messages for processing
1085 * by the start routine, and then call the start routine; discard
1086 * everything else. Note that this driver does not incorporate any
1087 * mechanism to negotiate to handle the canonicalization process.
1088 * It expects that these functions are handled in upper module(s),
1089 * as we do in ldterm.
1090 */
1091 static int
xenconswput(queue_t * q,mblk_t * mp)1092 xenconswput(queue_t *q, mblk_t *mp)
1093 {
1094 struct asyncline *async;
1095 struct xencons *xcp;
1096
1097 async = (struct asyncline *)q->q_ptr;
1098 xcp = async->async_common;
1099
1100 switch (mp->b_datap->db_type) {
1101
1102 case M_STOP:
1103 mutex_enter(&xcp->excl);
1104 async->async_flags |= ASYNC_STOPPED;
1105 mutex_exit(&xcp->excl);
1106 freemsg(mp);
1107 break;
1108
1109 case M_START:
1110 mutex_enter(&xcp->excl);
1111 if (async->async_flags & ASYNC_STOPPED) {
1112 async->async_flags &= ~ASYNC_STOPPED;
1113 xcasync_start(async);
1114 }
1115 mutex_exit(&xcp->excl);
1116 freemsg(mp);
1117 break;
1118
1119 case M_IOCTL:
1120 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1121
1122 case TCSETSW:
1123 case TCSETSF:
1124 case TCSETAW:
1125 case TCSETAF:
1126 /*
1127 * The changes do not take effect until all
1128 * output queued before them is drained.
1129 * Put this message on the queue, so that
1130 * "xcasync_start" will see it when it's done
1131 * with the output before it. Poke the
1132 * start routine, just in case.
1133 */
1134 (void) putq(q, mp);
1135 mutex_enter(&xcp->excl);
1136 xcasync_start(async);
1137 mutex_exit(&xcp->excl);
1138 break;
1139
1140 default:
1141 /*
1142 * Do it now.
1143 */
1144 xcasync_ioctl(async, q, mp);
1145 break;
1146 }
1147 break;
1148
1149 case M_FLUSH:
1150 if (*mp->b_rptr & FLUSHW) {
1151 mutex_enter(&xcp->excl);
1152 /*
1153 * Flush our write queue.
1154 */
1155 flushq(q, FLUSHDATA); /* XXX doesn't flush M_DELAY */
1156 if (async->async_xmitblk != NULL) {
1157 freeb(async->async_xmitblk);
1158 async->async_xmitblk = NULL;
1159 }
1160 mutex_exit(&xcp->excl);
1161 *mp->b_rptr &= ~FLUSHW; /* it has been flushed */
1162 }
1163 if (*mp->b_rptr & FLUSHR) {
1164 flushq(RD(q), FLUSHDATA);
1165 qreply(q, mp); /* give the read queues a crack at it */
1166 } else {
1167 freemsg(mp);
1168 }
1169
1170 /*
1171 * We must make sure we process messages that survive the
1172 * write-side flush.
1173 */
1174 mutex_enter(&xcp->excl);
1175 xcasync_start(async);
1176 mutex_exit(&xcp->excl);
1177 break;
1178
1179 case M_BREAK:
1180 case M_DELAY:
1181 case M_DATA:
1182 /*
1183 * Queue the message up to be transmitted,
1184 * and poke the start routine.
1185 */
1186 (void) putq(q, mp);
1187 mutex_enter(&xcp->excl);
1188 xcasync_start(async);
1189 mutex_exit(&xcp->excl);
1190 break;
1191
1192 case M_STOPI:
1193 mutex_enter(&xcp->excl);
1194 mutex_enter(&xcp->excl);
1195 if (!(async->async_inflow_source & IN_FLOW_USER)) {
1196 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
1197 IN_FLOW_USER);
1198 }
1199 mutex_exit(&xcp->excl);
1200 mutex_exit(&xcp->excl);
1201 freemsg(mp);
1202 break;
1203
1204 case M_STARTI:
1205 mutex_enter(&xcp->excl);
1206 mutex_enter(&xcp->excl);
1207 if (async->async_inflow_source & IN_FLOW_USER) {
1208 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
1209 IN_FLOW_USER);
1210 }
1211 mutex_exit(&xcp->excl);
1212 mutex_exit(&xcp->excl);
1213 freemsg(mp);
1214 break;
1215
1216 case M_CTL:
1217 if (MBLKL(mp) >= sizeof (struct iocblk) &&
1218 ((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
1219 ((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
1220 qreply(q, mp);
1221 } else {
1222 freemsg(mp);
1223 }
1224 break;
1225
1226 default:
1227 freemsg(mp);
1228 break;
1229 }
1230 return (0);
1231 }
1232
1233 /*
1234 * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
1235 * the buffer we need.
1236 */
1237 static void
xcasync_reioctl(void * unit)1238 xcasync_reioctl(void *unit)
1239 {
1240 int instance = (uintptr_t)unit;
1241 struct asyncline *async;
1242 struct xencons *xcp;
1243 queue_t *q;
1244 mblk_t *mp;
1245
1246 xcp = ddi_get_soft_state(xencons_soft_state, instance);
1247 ASSERT(xcp != NULL);
1248 async = xcp->priv;
1249
1250 /*
1251 * The bufcall is no longer pending.
1252 */
1253 mutex_enter(&xcp->excl);
1254 async->async_wbufcid = 0;
1255 if ((q = async->async_ttycommon.t_writeq) == NULL) {
1256 mutex_exit(&xcp->excl);
1257 return;
1258 }
1259 if ((mp = async->async_ttycommon.t_iocpending) != NULL) {
1260 /* not pending any more */
1261 async->async_ttycommon.t_iocpending = NULL;
1262 mutex_exit(&xcp->excl);
1263 xcasync_ioctl(async, q, mp);
1264 } else
1265 mutex_exit(&xcp->excl);
1266 }
1267
1268
1269 /*
1270 * debugger/console support routines.
1271 */
1272
1273 /*
1274 * put a character out
1275 * Do not use interrupts. If char is LF, put out CR, LF.
1276 */
1277 /*ARGSUSED*/
1278 static void
xenconsputchar(cons_polledio_arg_t arg,uchar_t c)1279 xenconsputchar(cons_polledio_arg_t arg, uchar_t c)
1280 {
1281 struct xencons *xcp = xencons_console;
1282 volatile struct xencons_interface *ifp = xcp->ifp;
1283 XENCONS_RING_IDX prod;
1284
1285 if (c == '\n')
1286 xenconsputchar(arg, '\r');
1287
1288 /*
1289 * domain 0 can use the console I/O...
1290 */
1291 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1292 char buffer[1];
1293
1294 buffer[0] = c;
1295 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, buffer);
1296 return;
1297 }
1298
1299 /*
1300 * domU has to go through dom0 virtual console.
1301 */
1302 while (ifp->out_prod - ifp->out_cons == sizeof (ifp->out))
1303 (void) HYPERVISOR_yield();
1304
1305 prod = ifp->out_prod;
1306 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] = c;
1307 membar_producer();
1308 ifp->out_prod = prod;
1309 ec_notify_via_evtchn(xcp->evtchn);
1310 }
1311
1312 /*
1313 * See if there's a character available. If no character is
1314 * available, return 0. Run in polled mode, no interrupts.
1315 */
1316 static boolean_t
xenconsischar(cons_polledio_arg_t arg)1317 xenconsischar(cons_polledio_arg_t arg)
1318 {
1319 struct xencons *xcp = (struct xencons *)arg;
1320 volatile struct xencons_interface *ifp = xcp->ifp;
1321
1322 if (xcp->polldix < xcp->polllen)
1323 return (B_TRUE);
1324 /*
1325 * domain 0 can use the console I/O...
1326 */
1327 xcp->polldix = 0;
1328 xcp->polllen = 0;
1329 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1330 xcp->polllen = HYPERVISOR_console_io(CONSOLEIO_read, 1,
1331 (char *)xcp->pollbuf);
1332 return (xcp->polllen != 0);
1333 }
1334
1335 /*
1336 * domU has to go through virtual console device.
1337 */
1338 if (ifp->in_prod != ifp->in_cons) {
1339 XENCONS_RING_IDX cons;
1340
1341 cons = ifp->in_cons;
1342 membar_enter();
1343 xcp->pollbuf[0] = ifp->in[MASK_XENCONS_IDX(cons++, ifp->in)];
1344 membar_producer();
1345 ifp->in_cons = cons;
1346 xcp->polllen = 1;
1347 }
1348 return (xcp->polllen != 0);
1349 }
1350
1351 /*
1352 * Get a character. Run in polled mode, no interrupts.
1353 */
1354 static int
xenconsgetchar(cons_polledio_arg_t arg)1355 xenconsgetchar(cons_polledio_arg_t arg)
1356 {
1357 struct xencons *xcp = (struct xencons *)arg;
1358
1359 ec_wait_on_evtchn(xcp->evtchn, (int (*)(void *))xenconsischar, arg);
1360
1361 return (xcp->pollbuf[xcp->polldix++]);
1362 }
1363
1364 static void
xenconserror(int level,const char * fmt,...)1365 xenconserror(int level, const char *fmt, ...)
1366 {
1367 va_list adx;
1368 static time_t last;
1369 static const char *lastfmt;
1370 time_t now;
1371
1372 /*
1373 * Don't print the same error message too often.
1374 * Print the message only if we have not printed the
1375 * message within the last second.
1376 * Note: that fmt cannot be a pointer to a string
1377 * stored on the stack. The fmt pointer
1378 * must be in the data segment otherwise lastfmt would point
1379 * to non-sense.
1380 */
1381 now = gethrestime_sec();
1382 if (last == now && lastfmt == fmt)
1383 return;
1384
1385 last = now;
1386 lastfmt = fmt;
1387
1388 va_start(adx, fmt);
1389 vcmn_err(level, fmt, adx);
1390 va_end(adx);
1391 }
1392
1393
1394 /*
1395 * Check for abort character sequence
1396 */
1397 static boolean_t
abort_charseq_recognize(uchar_t ch)1398 abort_charseq_recognize(uchar_t ch)
1399 {
1400 static int state = 0;
1401 #define CNTRL(c) ((c)&037)
1402 static char sequence[] = { '\r', '~', CNTRL('b') };
1403
1404 if (ch == sequence[state]) {
1405 if (++state >= sizeof (sequence)) {
1406 state = 0;
1407 return (B_TRUE);
1408 }
1409 } else {
1410 state = (ch == sequence[0]) ? 1 : 0;
1411 }
1412 return (B_FALSE);
1413 }
1414
1415 /*
1416 * Flow control functions
1417 */
1418
1419 /*
1420 * Software output flow control
1421 * This function can be executed sucessfully at any situation.
1422 * It does not handle HW, and just change the SW output flow control flag.
1423 * INPUT VALUE of onoff:
1424 * FLOW_START means to clear SW output flow control flag,
1425 * also set ASYNC_OUT_FLW_RESUME.
1426 * FLOW_STOP means to set SW output flow control flag,
1427 * also clear ASYNC_OUT_FLW_RESUME.
1428 */
1429 static void
xcasync_flowcontrol_sw_output(struct xencons * xcp,async_flowc_action onoff)1430 xcasync_flowcontrol_sw_output(struct xencons *xcp, async_flowc_action onoff)
1431 {
1432 struct asyncline *async = xcp->priv;
1433 int instance = xcp->unit;
1434
1435 ASSERT(mutex_owned(&xcp->excl));
1436
1437 if (!(async->async_ttycommon.t_iflag & IXON))
1438 return;
1439
1440 switch (onoff) {
1441 case FLOW_STOP:
1442 async->async_flags |= ASYNC_SW_OUT_FLW;
1443 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
1444 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1445 "xencons%d: output sflow stop\n", instance);
1446 break;
1447 case FLOW_START:
1448 async->async_flags &= ~ASYNC_SW_OUT_FLW;
1449 async->async_flags |= ASYNC_OUT_FLW_RESUME;
1450 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1451 "xencons%d: output sflow start\n", instance);
1452 break;
1453 default:
1454 break;
1455 }
1456 }
1457
1458 /*
1459 * Software input flow control
1460 * This function can execute software input flow control
1461 * INPUT VALUE of onoff:
1462 * FLOW_START means to send out a XON char
1463 * and clear SW input flow control flag.
1464 * FLOW_STOP means to send out a XOFF char
1465 * and set SW input flow control flag.
1466 * FLOW_CHECK means to check whether there is pending XON/XOFF
1467 * if it is true, send it out.
1468 * INPUT VALUE of type:
1469 * IN_FLOW_STREAMS means flow control is due to STREAMS
1470 * IN_FLOW_USER means flow control is due to user's commands
1471 * RETURN VALUE: B_FALSE means no flow control char is sent
1472 * B_TRUE means one flow control char is sent
1473 */
1474 static boolean_t
xcasync_flowcontrol_sw_input(struct xencons * xcp,async_flowc_action onoff,int type)1475 xcasync_flowcontrol_sw_input(struct xencons *xcp, async_flowc_action onoff,
1476 int type)
1477 {
1478 struct asyncline *async = xcp->priv;
1479 int instance = xcp->unit;
1480 int rval = B_FALSE;
1481
1482 ASSERT(mutex_owned(&xcp->excl));
1483
1484 if (!(async->async_ttycommon.t_iflag & IXOFF))
1485 return (rval);
1486
1487 /*
1488 * If we get this far, then we know IXOFF is set.
1489 */
1490 switch (onoff) {
1491 case FLOW_STOP:
1492 async->async_inflow_source |= type;
1493
1494 /*
1495 * We'll send an XOFF character for each of up to
1496 * three different input flow control attempts to stop input.
1497 * If we already send out one XOFF, but FLOW_STOP comes again,
1498 * it seems that input flow control becomes more serious,
1499 * then send XOFF again.
1500 */
1501 if (async->async_inflow_source & (IN_FLOW_STREAMS |
1502 IN_FLOW_USER))
1503 async->async_flags |= ASYNC_SW_IN_FLOW |
1504 ASYNC_SW_IN_NEEDED;
1505 DEBUGCONT2(XENCONS_DEBUG_SFLOW, "xencons%d: input sflow stop, "
1506 "type = %x\n", instance, async->async_inflow_source);
1507 break;
1508 case FLOW_START:
1509 async->async_inflow_source &= ~type;
1510 if (async->async_inflow_source == 0) {
1511 async->async_flags = (async->async_flags &
1512 ~ASYNC_SW_IN_FLOW) | ASYNC_SW_IN_NEEDED;
1513 DEBUGCONT1(XENCONS_DEBUG_SFLOW, "xencons%d: "
1514 "input sflow start\n", instance);
1515 }
1516 break;
1517 default:
1518 break;
1519 }
1520
1521 if (async->async_flags & ASYNC_SW_IN_NEEDED) {
1522 /*
1523 * If we get this far, then we know we need to send out
1524 * XON or XOFF char.
1525 */
1526 char c;
1527
1528 rval = B_TRUE;
1529 c = (async->async_flags & ASYNC_SW_IN_FLOW) ?
1530 async->async_stopc : async->async_startc;
1531 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1532 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
1533 async->async_flags &= ~ASYNC_SW_IN_NEEDED;
1534 return (rval);
1535 } else {
1536 xenconsputchar(NULL, c);
1537 }
1538 }
1539 return (rval);
1540 }
1541
1542 struct module_info xencons_info = {
1543 0,
1544 "xencons",
1545 0,
1546 INFPSZ,
1547 4096,
1548 128
1549 };
1550
1551 static struct qinit xencons_rint = {
1552 putq,
1553 xenconsrsrv,
1554 xenconsopen,
1555 xenconsclose,
1556 NULL,
1557 &xencons_info,
1558 NULL
1559 };
1560
1561 static struct qinit xencons_wint = {
1562 xenconswput,
1563 NULL,
1564 NULL,
1565 NULL,
1566 NULL,
1567 &xencons_info,
1568 NULL
1569 };
1570
1571 struct streamtab xencons_str_info = {
1572 &xencons_rint,
1573 &xencons_wint,
1574 NULL,
1575 NULL
1576 };
1577
1578 static struct cb_ops cb_xencons_ops = {
1579 nodev, /* cb_open */
1580 nodev, /* cb_close */
1581 nodev, /* cb_strategy */
1582 nodev, /* cb_print */
1583 nodev, /* cb_dump */
1584 nodev, /* cb_read */
1585 nodev, /* cb_write */
1586 nodev, /* cb_ioctl */
1587 nodev, /* cb_devmap */
1588 nodev, /* cb_mmap */
1589 nodev, /* cb_segmap */
1590 nochpoll, /* cb_chpoll */
1591 ddi_prop_op, /* cb_prop_op */
1592 &xencons_str_info, /* cb_stream */
1593 D_MP /* cb_flag */
1594 };
1595
1596 struct dev_ops xencons_ops = {
1597 DEVO_REV, /* devo_rev */
1598 0, /* devo_refcnt */
1599 xenconsinfo, /* devo_getinfo */
1600 nulldev, /* devo_identify */
1601 nulldev, /* devo_probe */
1602 xenconsattach, /* devo_attach */
1603 xenconsdetach, /* devo_detach */
1604 nodev, /* devo_reset */
1605 &cb_xencons_ops, /* devo_cb_ops */
1606 NULL, /* devo_bus_ops */
1607 NULL, /* devo_power */
1608 ddi_quiesce_not_needed, /* devo_quiesce */
1609 };
1610
1611 static struct modldrv modldrv = {
1612 &mod_driverops, /* Type of module. This one is a driver */
1613 "virtual console driver",
1614 &xencons_ops, /* driver ops */
1615 };
1616
1617 static struct modlinkage modlinkage = {
1618 MODREV_1,
1619 (void *)&modldrv,
1620 NULL
1621 };
1622
1623 int
_init(void)1624 _init(void)
1625 {
1626 int rv;
1627
1628 if ((rv = ddi_soft_state_init(&xencons_soft_state,
1629 sizeof (struct xencons), 1)) != 0)
1630 return (rv);
1631 if ((rv = mod_install(&modlinkage)) != 0) {
1632 ddi_soft_state_fini(&xencons_soft_state);
1633 return (rv);
1634 }
1635 DEBUGCONT2(XENCONS_DEBUG_INIT, "%s, debug = %x\n",
1636 modldrv.drv_linkinfo, debug);
1637 return (0);
1638 }
1639
1640 int
_fini(void)1641 _fini(void)
1642 {
1643 int rv;
1644
1645 if ((rv = mod_remove(&modlinkage)) != 0)
1646 return (rv);
1647
1648 ddi_soft_state_fini(&xencons_soft_state);
1649 return (0);
1650 }
1651
1652 int
_info(struct modinfo * modinfop)1653 _info(struct modinfo *modinfop)
1654 {
1655 return (mod_info(&modlinkage, modinfop));
1656 }
1657