1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
23 /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
24 /* All Rights Reserved */
25
26 /*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31
32 /*
33 *
34 * Copyright (c) 2004 Christian Limpach.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. This section intentionally left blank.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60 /*
61 * Section 3 of the above license was updated in response to bug 6379571.
62 */
63
64 /*
65 * Hypervisor virtual console driver
66 */
67
68 #include <sys/param.h>
69 #include <sys/types.h>
70 #include <sys/signal.h>
71 #include <sys/stream.h>
72 #include <sys/termio.h>
73 #include <sys/errno.h>
74 #include <sys/file.h>
75 #include <sys/cmn_err.h>
76 #include <sys/stropts.h>
77 #include <sys/strsubr.h>
78 #include <sys/strtty.h>
79 #include <sys/debug.h>
80 #include <sys/kbio.h>
81 #include <sys/cred.h>
82 #include <sys/stat.h>
83 #include <sys/consdev.h>
84 #include <sys/mkdev.h>
85 #include <sys/kmem.h>
86 #include <sys/cred.h>
87 #include <sys/strsun.h>
88 #ifdef DEBUG
89 #include <sys/promif.h>
90 #endif
91 #include <sys/modctl.h>
92 #include <sys/ddi.h>
93 #include <sys/sunddi.h>
94 #include <sys/sunndi.h>
95 #include <sys/policy.h>
96 #include <sys/atomic.h>
97 #include <sys/psm.h>
98 #include <xen/public/io/console.h>
99
100 #include "xencons.h"
101
102 #include <sys/hypervisor.h>
103 #include <sys/evtchn_impl.h>
104 #include <xen/sys/xenbus_impl.h>
105 #include <xen/sys/xendev.h>
106
107 #ifdef DEBUG
108 #define XENCONS_DEBUG_INIT 0x0001 /* msgs during driver initialization. */
109 #define XENCONS_DEBUG_INPUT 0x0002 /* characters received during int. */
110 #define XENCONS_DEBUG_EOT 0x0004 /* msgs when wait for xmit to finish. */
111 #define XENCONS_DEBUG_CLOSE 0x0008 /* msgs when driver open/close called */
112 #define XENCONS_DEBUG_PROCS 0x0020 /* each proc name as it is entered. */
113 #define XENCONS_DEBUG_OUT 0x0100 /* msgs about output events. */
114 #define XENCONS_DEBUG_BUSY 0x0200 /* msgs when xmit is enabled/disabled */
115 #define XENCONS_DEBUG_MODEM 0x0400 /* msgs about modem status & control. */
116 #define XENCONS_DEBUG_MODM2 0x0800 /* msgs about modem status & control. */
117 #define XENCONS_DEBUG_IOCTL 0x1000 /* Output msgs about ioctl messages. */
118 #define XENCONS_DEBUG_CHIP 0x2000 /* msgs about chip identification. */
119 #define XENCONS_DEBUG_SFLOW 0x4000 /* msgs when S/W flowcontrol active */
120 #define XENCONS_DEBUG(x) (debug & (x))
121 static int debug = 0;
122 #else
123 #define XENCONS_DEBUG(x) B_FALSE
124 #endif
125
126 #define XENCONS_WBUFSIZE 4096
127
128 static boolean_t abort_charseq_recognize(uchar_t);
129
130 /* The async interrupt entry points */
131 static void xcasync_ioctl(struct asyncline *, queue_t *, mblk_t *);
132 static void xcasync_reioctl(void *);
133 static void xcasync_start(struct asyncline *);
134 static void xenconsputchar(cons_polledio_arg_t, uchar_t);
135 static int xenconsgetchar(cons_polledio_arg_t);
136 static boolean_t xenconsischar(cons_polledio_arg_t);
137
138 static uint_t xenconsintr(caddr_t);
139 static uint_t xenconsintr_priv(caddr_t, caddr_t);
140 /*PRINTFLIKE2*/
141 static void xenconserror(int, const char *, ...) __KPRINTFLIKE(2);
142 static void xencons_soft_state_free(struct xencons *);
143 static boolean_t
144 xcasync_flowcontrol_sw_input(struct xencons *, async_flowc_action, int);
145 static void
146 xcasync_flowcontrol_sw_output(struct xencons *, async_flowc_action);
147
148 void *xencons_soft_state;
149 char *xencons_wbuf;
150 struct xencons *xencons_console;
151
152 static void
xenconssetup_avintr(struct xencons * xcp,int attach)153 xenconssetup_avintr(struct xencons *xcp, int attach)
154 {
155 /*
156 * On xen, CPU 0 always exists and can't be taken offline,
157 * so binding this thread to it should always succeed.
158 */
159 mutex_enter(&cpu_lock);
160 thread_affinity_set(curthread, 0);
161 mutex_exit(&cpu_lock);
162
163 if (attach) {
164 /* Setup our interrupt binding. */
165 (void) add_avintr(NULL, IPL_CONS, xenconsintr_priv,
166 "xencons", xcp->console_irq, (caddr_t)xcp, NULL, NULL,
167 xcp->dip);
168 } else {
169 /*
170 * Cleanup interrupt configuration. Note that the framework
171 * _should_ ensure that when rem_avintr() returns the interrupt
172 * service routine is not currently executing and that it won't
173 * be invoked again.
174 */
175 (void) rem_avintr(NULL, IPL_CONS, xenconsintr_priv,
176 xcp->console_irq);
177 }
178
179 /* Notify our caller that we're done. */
180 mutex_enter(&xcp->excl);
181 cv_signal(&xcp->excl_cv);
182 mutex_exit(&xcp->excl);
183
184 /* Clear our binding to CPU 0 */
185 thread_affinity_clear(curthread);
186
187 }
188
189 static void
xenconssetup_add_avintr(struct xencons * xcp)190 xenconssetup_add_avintr(struct xencons *xcp)
191 {
192 xenconssetup_avintr(xcp, B_TRUE);
193 }
194
195 static void
xenconssetup_rem_avintr(struct xencons * xcp)196 xenconssetup_rem_avintr(struct xencons *xcp)
197 {
198 xenconssetup_avintr(xcp, B_FALSE);
199 }
200
201 static int
xenconsdetach(dev_info_t * devi,ddi_detach_cmd_t cmd)202 xenconsdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
203 {
204 int instance;
205 struct xencons *xcp;
206
207 if (cmd != DDI_DETACH && cmd != DDI_SUSPEND)
208 return (DDI_FAILURE);
209
210 if (cmd == DDI_SUSPEND) {
211 ddi_remove_intr(devi, 0, NULL);
212 return (DDI_SUCCESS);
213 }
214
215 /*
216 * We should never try to detach the console driver on a domU
217 * because it should always be held open
218 */
219 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
220 if (!DOMAIN_IS_INITDOMAIN(xen_info))
221 return (DDI_FAILURE);
222
223 instance = ddi_get_instance(devi); /* find out which unit */
224
225 xcp = ddi_get_soft_state(xencons_soft_state, instance);
226 if (xcp == NULL)
227 return (DDI_FAILURE);
228
229 /*
230 * Cleanup our interrupt bindings. For more info on why we
231 * do this in a seperate thread, see the comments for when we
232 * setup the interrupt bindings.
233 */
234 xencons_console = NULL;
235 mutex_enter(&xcp->excl);
236 (void) taskq_dispatch(system_taskq,
237 (void (*)(void *))xenconssetup_rem_avintr, xcp, TQ_SLEEP);
238 cv_wait(&xcp->excl_cv, &xcp->excl);
239 mutex_exit(&xcp->excl);
240
241 /* remove all minor device node(s) for this device */
242 ddi_remove_minor_node(devi, NULL);
243
244 /* free up state */
245 xencons_soft_state_free(xcp);
246 kmem_free(xencons_wbuf, XENCONS_WBUFSIZE);
247
248 DEBUGNOTE1(XENCONS_DEBUG_INIT, "xencons%d: shutdown complete",
249 instance);
250 return (DDI_SUCCESS);
251 }
252
253 static void
xenconssetup(struct xencons * xcp)254 xenconssetup(struct xencons *xcp)
255 {
256 xcp->ifp = (volatile struct xencons_interface *)HYPERVISOR_console_page;
257
258 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
259 xencons_wbuf = kmem_alloc(XENCONS_WBUFSIZE, KM_SLEEP);
260
261 /*
262 * Activate the xen console virq. Note that xen requires
263 * that VIRQs be bound to CPU 0 when first created.
264 */
265 xcp->console_irq = ec_bind_virq_to_irq(VIRQ_CONSOLE, 0);
266
267 /*
268 * Ok. This is kinda ugly. We want to register an
269 * interrupt handler for the xen console virq, but
270 * virq's are xen sepcific and currently the DDI doesn't
271 * support binding to them. So instead we need to use
272 * add_avintr(). So to make things more complicated,
273 * we already had to bind the xen console VIRQ to CPU 0,
274 * and add_avintr() needs to be invoked on the same CPU
275 * where the VIRQ is bound, in this case on CPU 0. We
276 * could just temporarily bind ourselves to CPU 0, but
277 * we don't want to do that since this attach thread
278 * could have been invoked in a user thread context,
279 * in which case this thread could already have some
280 * pre-existing cpu binding. So to avoid changing our
281 * cpu binding we're going to use a taskq thread that
282 * will bind to CPU 0 and register our interrupts
283 * handler for us.
284 */
285 mutex_enter(&xcp->excl);
286 (void) taskq_dispatch(system_taskq,
287 (void (*)(void *))xenconssetup_add_avintr, xcp, TQ_SLEEP);
288 cv_wait(&xcp->excl_cv, &xcp->excl);
289 mutex_exit(&xcp->excl);
290 } else {
291 (void) xvdi_alloc_evtchn(xcp->dip);
292 xcp->evtchn = xvdi_get_evtchn(xcp->dip);
293 (void) ddi_add_intr(xcp->dip, 0, NULL, NULL, xenconsintr,
294 (caddr_t)xcp);
295 }
296 }
297
298 static int
xenconsattach(dev_info_t * devi,ddi_attach_cmd_t cmd)299 xenconsattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
300 {
301 int instance = ddi_get_instance(devi);
302 struct xencons *xcp;
303 int ret;
304
305 /* There can be only one. */
306 if (instance != 0)
307 return (DDI_FAILURE);
308
309 switch (cmd) {
310 case DDI_RESUME:
311 xcp = xencons_console;
312 xenconssetup(xcp);
313 return (DDI_SUCCESS);
314 case DDI_ATTACH:
315 break;
316 default:
317 return (DDI_FAILURE);
318 }
319
320 ret = ddi_soft_state_zalloc(xencons_soft_state, instance);
321 if (ret != DDI_SUCCESS)
322 return (DDI_FAILURE);
323 xcp = ddi_get_soft_state(xencons_soft_state, instance);
324 ASSERT(xcp != NULL); /* can't fail - we only just allocated it */
325
326 /*
327 * Set up the other components of the xencons structure for this port.
328 */
329 xcp->unit = instance;
330 xcp->dip = devi;
331
332 /* Fill in the polled I/O structure. */
333 xcp->polledio.cons_polledio_version = CONSPOLLEDIO_V0;
334 xcp->polledio.cons_polledio_argument = (cons_polledio_arg_t)xcp;
335 xcp->polledio.cons_polledio_putchar = xenconsputchar;
336 xcp->polledio.cons_polledio_getchar = xenconsgetchar;
337 xcp->polledio.cons_polledio_ischar = xenconsischar;
338 xcp->polledio.cons_polledio_enter = NULL;
339 xcp->polledio.cons_polledio_exit = NULL;
340
341 /*
342 * Initializes the asyncline structure which has TTY protocol-private
343 * data before enabling interrupts.
344 */
345 xcp->priv = kmem_zalloc(sizeof (struct asyncline), KM_SLEEP);
346 xcp->priv->async_common = xcp;
347 cv_init(&xcp->priv->async_flags_cv, NULL, CV_DRIVER, NULL);
348
349 /* Initialize mutexes before accessing the interface. */
350 mutex_init(&xcp->excl, NULL, MUTEX_DRIVER, NULL);
351 cv_init(&xcp->excl_cv, NULL, CV_DEFAULT, NULL);
352
353 /* create minor device node for this device */
354 ret = ddi_create_minor_node(devi, "xencons", S_IFCHR, instance,
355 DDI_NT_SERIAL, 0);
356 if (ret != DDI_SUCCESS) {
357 ddi_remove_minor_node(devi, NULL);
358 xencons_soft_state_free(xcp);
359 return (DDI_FAILURE);
360 }
361
362 ddi_report_dev(devi);
363 xencons_console = xcp;
364 xenconssetup(xcp);
365 DEBUGCONT1(XENCONS_DEBUG_INIT, "xencons%dattach: done\n", instance);
366 return (DDI_SUCCESS);
367 }
368
369 /*ARGSUSED*/
370 static int
xenconsinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)371 xenconsinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
372 void **result)
373 {
374 dev_t dev = (dev_t)arg;
375 int instance, error;
376 struct xencons *xcp;
377
378 instance = getminor(dev);
379 xcp = ddi_get_soft_state(xencons_soft_state, instance);
380 if (xcp == NULL)
381 return (DDI_FAILURE);
382
383 switch (infocmd) {
384 case DDI_INFO_DEVT2DEVINFO:
385 if (xcp->dip == NULL)
386 error = DDI_FAILURE;
387 else {
388 *result = (void *) xcp->dip;
389 error = DDI_SUCCESS;
390 }
391 break;
392 case DDI_INFO_DEVT2INSTANCE:
393 *result = (void *)(intptr_t)instance;
394 error = DDI_SUCCESS;
395 break;
396 default:
397 error = DDI_FAILURE;
398 }
399 return (error);
400 }
401
402 /* xencons_soft_state_free - local wrapper for ddi_soft_state_free(9F) */
403
404 static void
xencons_soft_state_free(struct xencons * xcp)405 xencons_soft_state_free(struct xencons *xcp)
406 {
407 mutex_destroy(&xcp->excl);
408 cv_destroy(&xcp->excl_cv);
409 kmem_free(xcp->priv, sizeof (struct asyncline));
410 ddi_soft_state_free(xencons_soft_state, xcp->unit);
411 }
412
413 /*ARGSUSED*/
414 static int
xenconsopen(queue_t * rq,dev_t * dev,int flag,int sflag,cred_t * cr)415 xenconsopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
416 {
417 struct xencons *xcp;
418 struct asyncline *async;
419 int unit;
420
421 unit = getminor(*dev);
422 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dopen\n", unit);
423 xcp = ddi_get_soft_state(xencons_soft_state, unit);
424 if (xcp == NULL)
425 return (ENXIO); /* unit not configured */
426 async = xcp->priv;
427 mutex_enter(&xcp->excl);
428
429 if ((async->async_flags & ASYNC_ISOPEN) == 0) {
430 async->async_ttycommon.t_iflag = 0;
431 async->async_ttycommon.t_iocpending = NULL;
432 async->async_ttycommon.t_size.ws_row = 0;
433 async->async_ttycommon.t_size.ws_col = 0;
434 async->async_ttycommon.t_size.ws_xpixel = 0;
435 async->async_ttycommon.t_size.ws_ypixel = 0;
436 async->async_dev = *dev;
437 async->async_wbufcid = 0;
438
439 async->async_startc = CSTART;
440 async->async_stopc = CSTOP;
441 } else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
442 secpolicy_excl_open(cr) != 0) {
443 mutex_exit(&xcp->excl);
444 return (EBUSY);
445 }
446
447 async->async_ttycommon.t_flags |= TS_SOFTCAR;
448
449 async->async_ttycommon.t_readq = rq;
450 async->async_ttycommon.t_writeq = WR(rq);
451 rq->q_ptr = WR(rq)->q_ptr = (caddr_t)async;
452 mutex_exit(&xcp->excl);
453 /*
454 * Caution here -- qprocson sets the pointers that are used by canput
455 * called by xencons_rxint. ASYNC_ISOPEN must *not* be set until those
456 * pointers are valid.
457 */
458 qprocson(rq);
459 async->async_flags |= ASYNC_ISOPEN;
460 DEBUGCONT1(XENCONS_DEBUG_INIT, "asy%dopen: done\n", unit);
461 return (0);
462 }
463
464
465 /*
466 * Close routine.
467 */
468 /*ARGSUSED*/
469 static int
xenconsclose(queue_t * q,int flag,cred_t * credp)470 xenconsclose(queue_t *q, int flag, cred_t *credp)
471 {
472 struct asyncline *async;
473 struct xencons *xcp;
474 #ifdef DEBUG
475 int instance;
476 #endif
477
478 async = (struct asyncline *)q->q_ptr;
479 ASSERT(async != NULL);
480 xcp = async->async_common;
481 #ifdef DEBUG
482 instance = xcp->unit;
483 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose\n", instance);
484 #endif
485
486 mutex_enter(&xcp->excl);
487 async->async_flags |= ASYNC_CLOSING;
488
489 async->async_ocnt = 0;
490 if (async->async_xmitblk != NULL)
491 freeb(async->async_xmitblk);
492 async->async_xmitblk = NULL;
493
494 ttycommon_close(&async->async_ttycommon);
495
496 /*
497 * Cancel outstanding "bufcall" request.
498 */
499 if (async->async_wbufcid != 0) {
500 unbufcall(async->async_wbufcid);
501 async->async_wbufcid = 0;
502 }
503
504 /* Note that qprocsoff can't be done until after interrupts are off */
505 qprocsoff(q);
506 q->q_ptr = WR(q)->q_ptr = NULL;
507 async->async_ttycommon.t_readq = NULL;
508 async->async_ttycommon.t_writeq = NULL;
509
510 /*
511 * Clear out device state, except persistant device property flags.
512 */
513 async->async_flags = 0;
514 cv_broadcast(&async->async_flags_cv);
515 mutex_exit(&xcp->excl);
516
517 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose: done\n", instance);
518 return (0);
519 }
520
521 #define INBUF_IX(ix, ifp) (DOMAIN_IS_INITDOMAIN(xen_info) ? \
522 (ix) : MASK_XENCONS_IDX((ix), (ifp)->in))
523
524 /*
525 * Handle a xen console rx interrupt.
526 */
527 /*ARGSUSED*/
528 static void
xencons_rxint(struct xencons * xcp)529 xencons_rxint(struct xencons *xcp)
530 {
531 struct asyncline *async;
532 short cc;
533 mblk_t *bp;
534 queue_t *q;
535 uchar_t c, buf[16];
536 uchar_t *cp;
537 tty_common_t *tp;
538 int instance;
539 volatile struct xencons_interface *ifp;
540 XENCONS_RING_IDX cons, prod;
541
542 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_rxint\n");
543
544 loop:
545 mutex_enter(&xcp->excl);
546
547 instance = xcp->unit;
548
549 /* sanity check if we should bail */
550 if (xencons_console == NULL) {
551 mutex_exit(&xcp->excl);
552 DEBUGCONT1(XENCONS_DEBUG_PROCS,
553 "xencons%d_rxint: xencons_console is NULL\n",
554 instance);
555 goto out;
556 }
557
558 async = xcp->priv;
559 ifp = xcp->ifp;
560 tp = &async->async_ttycommon;
561 q = tp->t_readq;
562
563 if (async->async_flags & ASYNC_OUT_FLW_RESUME) {
564 xcasync_start(async);
565 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
566 }
567
568 /*
569 * If data is available, send it up the stream if there's
570 * somebody listening.
571 */
572 if (!(async->async_flags & ASYNC_ISOPEN)) {
573 mutex_exit(&xcp->excl);
574 goto out;
575 }
576 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
577 cc = HYPERVISOR_console_io(CONSOLEIO_read, 16, (char *)buf);
578 cp = buf;
579 cons = 0;
580 } else {
581 cons = ifp->in_cons;
582 prod = ifp->in_prod;
583
584 cc = prod - cons;
585 cp = (uchar_t *)ifp->in;
586 }
587 if (cc <= 0) {
588 mutex_exit(&xcp->excl);
589 goto out;
590 }
591
592 /*
593 * Check for character break sequence.
594 *
595 * Note that normally asy drivers only check for a character sequence
596 * if abort_enable == KIOCABORTALTERNATE and otherwise use a break
597 * sensed on the line to do an abort_sequence_enter. Since the
598 * hypervisor does not use a real chip for the console we default to
599 * using the alternate sequence.
600 */
601 if ((abort_enable == KIOCABORTENABLE) && (xcp->flags & ASY_CONSOLE)) {
602 XENCONS_RING_IDX i;
603
604 for (i = 0; i < cc; i++) {
605 c = cp[INBUF_IX(cons + i, ifp)];
606 if (abort_charseq_recognize(c)) {
607 /*
608 * Eat abort seg, it's not a valid debugger
609 * command.
610 */
611 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
612 membar_producer();
613 ifp->in_cons = cons + i;
614 } else {
615 cons += i;
616 }
617 abort_sequence_enter((char *)NULL);
618 /*
619 * Back from debugger, resume normal processing
620 */
621 mutex_exit(&xcp->excl);
622 goto loop;
623 }
624 }
625 }
626
627 if (!canput(q)) {
628 if (!(async->async_inflow_source & IN_FLOW_STREAMS)) {
629 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
630 IN_FLOW_STREAMS);
631 }
632 mutex_exit(&xcp->excl);
633 goto out;
634 }
635 if (async->async_inflow_source & IN_FLOW_STREAMS) {
636 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
637 IN_FLOW_STREAMS);
638 }
639 DEBUGCONT2(XENCONS_DEBUG_INPUT,
640 "xencons%d_rxint: %d char(s) in queue.\n", instance, cc);
641 if (!(bp = allocb(cc, BPRI_MED))) {
642 mutex_exit(&xcp->excl);
643 ttycommon_qfull(&async->async_ttycommon, q);
644 goto out;
645 }
646 do {
647 c = cp[INBUF_IX(cons++, ifp)];
648 /*
649 * We handle XON/XOFF char if IXON is set,
650 * but if received char is _POSIX_VDISABLE,
651 * we left it to the up level module.
652 */
653 if (tp->t_iflag & IXON) {
654 if ((c == async->async_stopc) &&
655 (c != _POSIX_VDISABLE)) {
656 xcasync_flowcontrol_sw_output(xcp, FLOW_STOP);
657 continue;
658 } else if ((c == async->async_startc) &&
659 (c != _POSIX_VDISABLE)) {
660 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
661 continue;
662 }
663 if ((tp->t_iflag & IXANY) &&
664 (async->async_flags & ASYNC_SW_OUT_FLW)) {
665 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
666 }
667 }
668 *bp->b_wptr++ = c;
669 } while (--cc);
670 membar_producer();
671 if (!DOMAIN_IS_INITDOMAIN(xen_info))
672 ifp->in_cons = cons;
673 mutex_exit(&xcp->excl);
674 if (bp->b_wptr > bp->b_rptr) {
675 if (!canput(q)) {
676 xenconserror(CE_NOTE, "xencons%d: local queue full",
677 instance);
678 freemsg(bp);
679 } else
680 (void) putq(q, bp);
681 } else
682 freemsg(bp);
683 if (DOMAIN_IS_INITDOMAIN(xen_info))
684 goto loop;
685 out:
686 DEBUGCONT1(XENCONS_DEBUG_PROCS, "xencons%d_rxint: done\n", instance);
687 if (!DOMAIN_IS_INITDOMAIN(xen_info))
688 ec_notify_via_evtchn(xcp->evtchn);
689 }
690
691
692 /*
693 * Handle a xen console tx interrupt.
694 */
695 /*ARGSUSED*/
696 static void
xencons_txint(struct xencons * xcp)697 xencons_txint(struct xencons *xcp)
698 {
699 struct asyncline *async;
700
701 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint\n");
702
703 /*
704 * prevent recursive entry
705 */
706 if (mutex_owner(&xcp->excl) == curthread) {
707 goto out;
708 }
709
710 mutex_enter(&xcp->excl);
711 if (xencons_console == NULL) {
712 mutex_exit(&xcp->excl);
713 goto out;
714 }
715
716 /* make sure the device is open */
717 async = xcp->priv;
718 if ((async->async_flags & ASYNC_ISOPEN) != 0)
719 xcasync_start(async);
720
721 mutex_exit(&xcp->excl);
722 out:
723 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint: done\n");
724 }
725
726
727 /*
728 * Get an event when input ring becomes not empty or output ring becomes not
729 * full.
730 */
731 static uint_t
xenconsintr(caddr_t arg)732 xenconsintr(caddr_t arg)
733 {
734 struct xencons *xcp = (struct xencons *)arg;
735 volatile struct xencons_interface *ifp = xcp->ifp;
736
737 if (ifp->in_prod != ifp->in_cons)
738 xencons_rxint(xcp);
739 if (ifp->out_prod - ifp->out_cons < sizeof (ifp->out))
740 xencons_txint(xcp);
741 return (DDI_INTR_CLAIMED);
742 }
743
744 /*
745 * Console interrupt routine for priviliged domains
746 */
747 static uint_t
xenconsintr_priv(caddr_t arg,caddr_t arg1 __unused)748 xenconsintr_priv(caddr_t arg, caddr_t arg1 __unused)
749 {
750 struct xencons *xcp = (struct xencons *)arg;
751
752 xencons_rxint(xcp);
753 xencons_txint(xcp);
754 return (DDI_INTR_CLAIMED);
755 }
756
757 /*
758 * Start output on a line, unless it's busy, frozen, or otherwise.
759 */
760 /*ARGSUSED*/
761 static void
xcasync_start(struct asyncline * async)762 xcasync_start(struct asyncline *async)
763 {
764 struct xencons *xcp = async->async_common;
765 int cc;
766 queue_t *q;
767 mblk_t *bp;
768 int len, space, blen;
769 mblk_t *nbp;
770
771 #ifdef DEBUG
772 int instance = xcp->unit;
773
774 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_nstart\n", instance);
775 #endif
776 ASSERT(mutex_owned(&xcp->excl));
777
778 /*
779 * Check only pended sw input flow control.
780 */
781 domore:
782 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_CHECK, IN_FLOW_NULL);
783
784 if ((q = async->async_ttycommon.t_writeq) == NULL) {
785 return; /* not attached to a stream */
786 }
787
788 for (;;) {
789 if ((bp = getq(q)) == NULL)
790 return; /* no data to transmit */
791
792 /*
793 * We have a message block to work on.
794 * Check whether it's a break, a delay, or an ioctl (the latter
795 * occurs if the ioctl in question was waiting for the output
796 * to drain). If it's one of those, process it immediately.
797 */
798 switch (bp->b_datap->db_type) {
799
800 case M_IOCTL:
801 /*
802 * This ioctl was waiting for the output ahead of
803 * it to drain; obviously, it has. Do it, and
804 * then grab the next message after it.
805 */
806 mutex_exit(&xcp->excl);
807 xcasync_ioctl(async, q, bp);
808 mutex_enter(&xcp->excl);
809 continue;
810 }
811
812 while (bp != NULL && (cc = bp->b_wptr - bp->b_rptr) == 0) {
813 nbp = bp->b_cont;
814 freeb(bp);
815 bp = nbp;
816 }
817 if (bp != NULL)
818 break;
819 }
820
821 /*
822 * We have data to transmit. If output is stopped, put
823 * it back and try again later.
824 */
825 if (async->async_flags & (ASYNC_SW_OUT_FLW | ASYNC_STOPPED)) {
826 (void) putbq(q, bp);
827 return;
828 }
829
830
831 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
832 len = 0;
833 space = XENCONS_WBUFSIZE;
834 while (bp != NULL && space) {
835 blen = bp->b_wptr - bp->b_rptr;
836 cc = min(blen, space);
837 bcopy(bp->b_rptr, &xencons_wbuf[len], cc);
838 bp->b_rptr += cc;
839 if (cc == blen) {
840 nbp = bp->b_cont;
841 freeb(bp);
842 bp = nbp;
843 }
844 space -= cc;
845 len += cc;
846 }
847 mutex_exit(&xcp->excl);
848 (void) HYPERVISOR_console_io(CONSOLEIO_write, len,
849 xencons_wbuf);
850 mutex_enter(&xcp->excl);
851 if (bp != NULL)
852 (void) putbq(q, bp); /* not done with this msg yet */
853 /*
854 * There are no completion interrupts when using the
855 * HYPERVISOR_console_io call to write console data
856 * so we loop here till we have sent all the data to the
857 * hypervisor.
858 */
859 goto domore;
860 } else {
861 volatile struct xencons_interface *ifp = xcp->ifp;
862 XENCONS_RING_IDX cons, prod;
863
864 cons = ifp->out_cons;
865 prod = ifp->out_prod;
866 membar_enter();
867 while (bp != NULL && ((prod - cons) < sizeof (ifp->out))) {
868 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] =
869 *bp->b_rptr++;
870 if (bp->b_rptr == bp->b_wptr) {
871 nbp = bp->b_cont;
872 freeb(bp);
873 bp = nbp;
874 }
875 }
876 membar_producer();
877 ifp->out_prod = prod;
878 ec_notify_via_evtchn(xcp->evtchn);
879 if (bp != NULL)
880 (void) putbq(q, bp); /* not done with this msg yet */
881 }
882 }
883
884
885 /*
886 * Process an "ioctl" message sent down to us.
887 * Note that we don't need to get any locks until we are ready to access
888 * the hardware. Nothing we access until then is going to be altered
889 * outside of the STREAMS framework, so we should be safe.
890 */
891 static void
xcasync_ioctl(struct asyncline * async,queue_t * wq,mblk_t * mp)892 xcasync_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
893 {
894 struct xencons *xcp = async->async_common;
895 tty_common_t *tp = &async->async_ttycommon;
896 struct iocblk *iocp;
897 unsigned datasize;
898 int error = 0;
899
900 #ifdef DEBUG
901 int instance = xcp->unit;
902
903 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl\n", instance);
904 #endif
905
906 if (tp->t_iocpending != NULL) {
907 /*
908 * We were holding an "ioctl" response pending the
909 * availability of an "mblk" to hold data to be passed up;
910 * another "ioctl" came through, which means that "ioctl"
911 * must have timed out or been aborted.
912 */
913 freemsg(async->async_ttycommon.t_iocpending);
914 async->async_ttycommon.t_iocpending = NULL;
915 }
916
917 iocp = (struct iocblk *)mp->b_rptr;
918
919 /*
920 * For TIOCMGET and the PPS ioctls, do NOT call ttycommon_ioctl()
921 * because this function frees up the message block (mp->b_cont) that
922 * contains the user location where we pass back the results.
923 *
924 * Similarly, CONSOPENPOLLEDIO needs ioc_count, which ttycommon_ioctl
925 * zaps. We know that ttycommon_ioctl doesn't know any CONS*
926 * ioctls, so keep the others safe too.
927 */
928 DEBUGCONT2(XENCONS_DEBUG_IOCTL, "async%d_ioctl: %s\n",
929 instance,
930 iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
931 iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
932 iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
933 iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" : "other");
934
935 switch (iocp->ioc_cmd) {
936 case TIOCMGET:
937 case TIOCGPPS:
938 case TIOCSPPS:
939 case TIOCGPPSEV:
940 case CONSOPENPOLLEDIO:
941 case CONSCLOSEPOLLEDIO:
942 case CONSSETABORTENABLE:
943 case CONSGETABORTENABLE:
944 error = -1; /* Do Nothing */
945 break;
946 default:
947
948 /*
949 * The only way in which "ttycommon_ioctl" can fail is if the
950 * "ioctl" requires a response containing data to be returned
951 * to the user, and no mblk could be allocated for the data.
952 * No such "ioctl" alters our state. Thus, we always go ahead
953 * and do any state-changes the "ioctl" calls for. If we
954 * couldn't allocate the data, "ttycommon_ioctl" has stashed
955 * the "ioctl" away safely, so we just call "bufcall" to
956 * request that we be called back when we stand a better
957 * chance of allocating the data.
958 */
959 if ((datasize = ttycommon_ioctl(tp, wq, mp, &error)) != 0) {
960 if (async->async_wbufcid)
961 unbufcall(async->async_wbufcid);
962 async->async_wbufcid = bufcall(datasize, BPRI_HI,
963 (void (*)(void *)) xcasync_reioctl,
964 (void *)(intptr_t)async->async_common->unit);
965 return;
966 }
967 }
968
969 mutex_enter(&xcp->excl);
970
971 if (error == 0) {
972 /*
973 * "ttycommon_ioctl" did most of the work; we just use the
974 * data it set up.
975 */
976 switch (iocp->ioc_cmd) {
977
978 case TCSETS:
979 case TCSETSF:
980 case TCSETSW:
981 case TCSETA:
982 case TCSETAW:
983 case TCSETAF:
984 break;
985 }
986 } else if (error < 0) {
987 /*
988 * "ttycommon_ioctl" didn't do anything; we process it here.
989 */
990 error = 0;
991 switch (iocp->ioc_cmd) {
992
993 case TCSBRK:
994 error = miocpullup(mp, sizeof (int));
995 break;
996
997 case TIOCSBRK:
998 mioc2ack(mp, NULL, 0, 0);
999 break;
1000
1001 case TIOCCBRK:
1002 mioc2ack(mp, NULL, 0, 0);
1003 break;
1004
1005 case CONSOPENPOLLEDIO:
1006 error = miocpullup(mp, sizeof (cons_polledio_arg_t));
1007 if (error != 0)
1008 break;
1009
1010 *(cons_polledio_arg_t *)mp->b_cont->b_rptr =
1011 (cons_polledio_arg_t)&xcp->polledio;
1012
1013 mp->b_datap->db_type = M_IOCACK;
1014 break;
1015
1016 case CONSCLOSEPOLLEDIO:
1017 mp->b_datap->db_type = M_IOCACK;
1018 iocp->ioc_error = 0;
1019 iocp->ioc_rval = 0;
1020 break;
1021
1022 case CONSSETABORTENABLE:
1023 error = secpolicy_console(iocp->ioc_cr);
1024 if (error != 0)
1025 break;
1026
1027 if (iocp->ioc_count != TRANSPARENT) {
1028 error = EINVAL;
1029 break;
1030 }
1031
1032 if (*(intptr_t *)mp->b_cont->b_rptr)
1033 xcp->flags |= ASY_CONSOLE;
1034 else
1035 xcp->flags &= ~ASY_CONSOLE;
1036
1037 mp->b_datap->db_type = M_IOCACK;
1038 iocp->ioc_error = 0;
1039 iocp->ioc_rval = 0;
1040 break;
1041
1042 case CONSGETABORTENABLE:
1043 /*CONSTANTCONDITION*/
1044 ASSERT(sizeof (boolean_t) <= sizeof (boolean_t *));
1045 /*
1046 * Store the return value right in the payload
1047 * we were passed. Crude.
1048 */
1049 mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
1050 *(boolean_t *)mp->b_cont->b_rptr =
1051 (xcp->flags & ASY_CONSOLE) != 0;
1052 break;
1053
1054 default:
1055 /*
1056 * If we don't understand it, it's an error. NAK it.
1057 */
1058 error = EINVAL;
1059 break;
1060 }
1061 }
1062 if (error != 0) {
1063 iocp->ioc_error = error;
1064 mp->b_datap->db_type = M_IOCNAK;
1065 }
1066 mutex_exit(&xcp->excl);
1067 qreply(wq, mp);
1068 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl: done\n", instance);
1069 }
1070
1071 static int
xenconsrsrv(queue_t * q)1072 xenconsrsrv(queue_t *q)
1073 {
1074 mblk_t *bp;
1075
1076 while (canputnext(q) && (bp = getq(q)))
1077 putnext(q, bp);
1078 return (0);
1079 }
1080
1081 /*
1082 * Put procedure for write queue.
1083 * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
1084 * set the flow control character for M_STOPI and M_STARTI messages;
1085 * queue up M_BREAK, M_DELAY, and M_DATA messages for processing
1086 * by the start routine, and then call the start routine; discard
1087 * everything else. Note that this driver does not incorporate any
1088 * mechanism to negotiate to handle the canonicalization process.
1089 * It expects that these functions are handled in upper module(s),
1090 * as we do in ldterm.
1091 */
1092 static int
xenconswput(queue_t * q,mblk_t * mp)1093 xenconswput(queue_t *q, mblk_t *mp)
1094 {
1095 struct asyncline *async;
1096 struct xencons *xcp;
1097
1098 async = (struct asyncline *)q->q_ptr;
1099 xcp = async->async_common;
1100
1101 switch (mp->b_datap->db_type) {
1102
1103 case M_STOP:
1104 mutex_enter(&xcp->excl);
1105 async->async_flags |= ASYNC_STOPPED;
1106 mutex_exit(&xcp->excl);
1107 freemsg(mp);
1108 break;
1109
1110 case M_START:
1111 mutex_enter(&xcp->excl);
1112 if (async->async_flags & ASYNC_STOPPED) {
1113 async->async_flags &= ~ASYNC_STOPPED;
1114 xcasync_start(async);
1115 }
1116 mutex_exit(&xcp->excl);
1117 freemsg(mp);
1118 break;
1119
1120 case M_IOCTL:
1121 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1122
1123 case TCSETSW:
1124 case TCSETSF:
1125 case TCSETAW:
1126 case TCSETAF:
1127 /*
1128 * The changes do not take effect until all
1129 * output queued before them is drained.
1130 * Put this message on the queue, so that
1131 * "xcasync_start" will see it when it's done
1132 * with the output before it. Poke the
1133 * start routine, just in case.
1134 */
1135 (void) putq(q, mp);
1136 mutex_enter(&xcp->excl);
1137 xcasync_start(async);
1138 mutex_exit(&xcp->excl);
1139 break;
1140
1141 default:
1142 /*
1143 * Do it now.
1144 */
1145 xcasync_ioctl(async, q, mp);
1146 break;
1147 }
1148 break;
1149
1150 case M_FLUSH:
1151 if (*mp->b_rptr & FLUSHW) {
1152 mutex_enter(&xcp->excl);
1153 /*
1154 * Flush our write queue.
1155 */
1156 flushq(q, FLUSHDATA); /* XXX doesn't flush M_DELAY */
1157 if (async->async_xmitblk != NULL) {
1158 freeb(async->async_xmitblk);
1159 async->async_xmitblk = NULL;
1160 }
1161 mutex_exit(&xcp->excl);
1162 *mp->b_rptr &= ~FLUSHW; /* it has been flushed */
1163 }
1164 if (*mp->b_rptr & FLUSHR) {
1165 flushq(RD(q), FLUSHDATA);
1166 qreply(q, mp); /* give the read queues a crack at it */
1167 } else {
1168 freemsg(mp);
1169 }
1170
1171 /*
1172 * We must make sure we process messages that survive the
1173 * write-side flush.
1174 */
1175 mutex_enter(&xcp->excl);
1176 xcasync_start(async);
1177 mutex_exit(&xcp->excl);
1178 break;
1179
1180 case M_BREAK:
1181 case M_DELAY:
1182 case M_DATA:
1183 /*
1184 * Queue the message up to be transmitted,
1185 * and poke the start routine.
1186 */
1187 (void) putq(q, mp);
1188 mutex_enter(&xcp->excl);
1189 xcasync_start(async);
1190 mutex_exit(&xcp->excl);
1191 break;
1192
1193 case M_STOPI:
1194 mutex_enter(&xcp->excl);
1195 mutex_enter(&xcp->excl);
1196 if (!(async->async_inflow_source & IN_FLOW_USER)) {
1197 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
1198 IN_FLOW_USER);
1199 }
1200 mutex_exit(&xcp->excl);
1201 mutex_exit(&xcp->excl);
1202 freemsg(mp);
1203 break;
1204
1205 case M_STARTI:
1206 mutex_enter(&xcp->excl);
1207 mutex_enter(&xcp->excl);
1208 if (async->async_inflow_source & IN_FLOW_USER) {
1209 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
1210 IN_FLOW_USER);
1211 }
1212 mutex_exit(&xcp->excl);
1213 mutex_exit(&xcp->excl);
1214 freemsg(mp);
1215 break;
1216
1217 case M_CTL:
1218 if (MBLKL(mp) >= sizeof (struct iocblk) &&
1219 ((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
1220 ((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
1221 qreply(q, mp);
1222 } else {
1223 freemsg(mp);
1224 }
1225 break;
1226
1227 default:
1228 freemsg(mp);
1229 break;
1230 }
1231 return (0);
1232 }
1233
1234 /*
1235 * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
1236 * the buffer we need.
1237 */
1238 static void
xcasync_reioctl(void * unit)1239 xcasync_reioctl(void *unit)
1240 {
1241 int instance = (uintptr_t)unit;
1242 struct asyncline *async;
1243 struct xencons *xcp;
1244 queue_t *q;
1245 mblk_t *mp;
1246
1247 xcp = ddi_get_soft_state(xencons_soft_state, instance);
1248 ASSERT(xcp != NULL);
1249 async = xcp->priv;
1250
1251 /*
1252 * The bufcall is no longer pending.
1253 */
1254 mutex_enter(&xcp->excl);
1255 async->async_wbufcid = 0;
1256 if ((q = async->async_ttycommon.t_writeq) == NULL) {
1257 mutex_exit(&xcp->excl);
1258 return;
1259 }
1260 if ((mp = async->async_ttycommon.t_iocpending) != NULL) {
1261 /* not pending any more */
1262 async->async_ttycommon.t_iocpending = NULL;
1263 mutex_exit(&xcp->excl);
1264 xcasync_ioctl(async, q, mp);
1265 } else
1266 mutex_exit(&xcp->excl);
1267 }
1268
1269
1270 /*
1271 * debugger/console support routines.
1272 */
1273
1274 /*
1275 * put a character out
1276 * Do not use interrupts. If char is LF, put out CR, LF.
1277 */
1278 /*ARGSUSED*/
1279 static void
xenconsputchar(cons_polledio_arg_t arg,uchar_t c)1280 xenconsputchar(cons_polledio_arg_t arg, uchar_t c)
1281 {
1282 struct xencons *xcp = xencons_console;
1283 volatile struct xencons_interface *ifp = xcp->ifp;
1284 XENCONS_RING_IDX prod;
1285
1286 if (c == '\n')
1287 xenconsputchar(arg, '\r');
1288
1289 /*
1290 * domain 0 can use the console I/O...
1291 */
1292 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1293 char buffer[1];
1294
1295 buffer[0] = c;
1296 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, buffer);
1297 return;
1298 }
1299
1300 /*
1301 * domU has to go through dom0 virtual console.
1302 */
1303 while (ifp->out_prod - ifp->out_cons == sizeof (ifp->out))
1304 (void) HYPERVISOR_yield();
1305
1306 prod = ifp->out_prod;
1307 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] = c;
1308 membar_producer();
1309 ifp->out_prod = prod;
1310 ec_notify_via_evtchn(xcp->evtchn);
1311 }
1312
1313 /*
1314 * See if there's a character available. If no character is
1315 * available, return 0. Run in polled mode, no interrupts.
1316 */
1317 static boolean_t
xenconsischar(cons_polledio_arg_t arg)1318 xenconsischar(cons_polledio_arg_t arg)
1319 {
1320 struct xencons *xcp = (struct xencons *)arg;
1321 volatile struct xencons_interface *ifp = xcp->ifp;
1322
1323 if (xcp->polldix < xcp->polllen)
1324 return (B_TRUE);
1325 /*
1326 * domain 0 can use the console I/O...
1327 */
1328 xcp->polldix = 0;
1329 xcp->polllen = 0;
1330 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1331 xcp->polllen = HYPERVISOR_console_io(CONSOLEIO_read, 1,
1332 (char *)xcp->pollbuf);
1333 return (xcp->polllen != 0);
1334 }
1335
1336 /*
1337 * domU has to go through virtual console device.
1338 */
1339 if (ifp->in_prod != ifp->in_cons) {
1340 XENCONS_RING_IDX cons;
1341
1342 cons = ifp->in_cons;
1343 membar_enter();
1344 xcp->pollbuf[0] = ifp->in[MASK_XENCONS_IDX(cons++, ifp->in)];
1345 membar_producer();
1346 ifp->in_cons = cons;
1347 xcp->polllen = 1;
1348 }
1349 return (xcp->polllen != 0);
1350 }
1351
1352 /*
1353 * Get a character. Run in polled mode, no interrupts.
1354 */
1355 static int
xenconsgetchar(cons_polledio_arg_t arg)1356 xenconsgetchar(cons_polledio_arg_t arg)
1357 {
1358 struct xencons *xcp = (struct xencons *)arg;
1359
1360 ec_wait_on_evtchn(xcp->evtchn, (int (*)(void *))xenconsischar, arg);
1361
1362 return (xcp->pollbuf[xcp->polldix++]);
1363 }
1364
1365 static void
xenconserror(int level,const char * fmt,...)1366 xenconserror(int level, const char *fmt, ...)
1367 {
1368 va_list adx;
1369 static time_t last;
1370 static const char *lastfmt;
1371 time_t now;
1372
1373 /*
1374 * Don't print the same error message too often.
1375 * Print the message only if we have not printed the
1376 * message within the last second.
1377 * Note: that fmt cannot be a pointer to a string
1378 * stored on the stack. The fmt pointer
1379 * must be in the data segment otherwise lastfmt would point
1380 * to non-sense.
1381 */
1382 now = gethrestime_sec();
1383 if (last == now && lastfmt == fmt)
1384 return;
1385
1386 last = now;
1387 lastfmt = fmt;
1388
1389 va_start(adx, fmt);
1390 vcmn_err(level, fmt, adx);
1391 va_end(adx);
1392 }
1393
1394
1395 /*
1396 * Check for abort character sequence
1397 */
1398 static boolean_t
abort_charseq_recognize(uchar_t ch)1399 abort_charseq_recognize(uchar_t ch)
1400 {
1401 static int state = 0;
1402 #define CNTRL(c) ((c)&037)
1403 static char sequence[] = { '\r', '~', CNTRL('b') };
1404
1405 if (ch == sequence[state]) {
1406 if (++state >= sizeof (sequence)) {
1407 state = 0;
1408 return (B_TRUE);
1409 }
1410 } else {
1411 state = (ch == sequence[0]) ? 1 : 0;
1412 }
1413 return (B_FALSE);
1414 }
1415
1416 /*
1417 * Flow control functions
1418 */
1419
1420 /*
1421 * Software output flow control
1422 * This function can be executed sucessfully at any situation.
1423 * It does not handle HW, and just change the SW output flow control flag.
1424 * INPUT VALUE of onoff:
1425 * FLOW_START means to clear SW output flow control flag,
1426 * also set ASYNC_OUT_FLW_RESUME.
1427 * FLOW_STOP means to set SW output flow control flag,
1428 * also clear ASYNC_OUT_FLW_RESUME.
1429 */
1430 static void
xcasync_flowcontrol_sw_output(struct xencons * xcp,async_flowc_action onoff)1431 xcasync_flowcontrol_sw_output(struct xencons *xcp, async_flowc_action onoff)
1432 {
1433 struct asyncline *async = xcp->priv;
1434 int instance = xcp->unit;
1435
1436 ASSERT(mutex_owned(&xcp->excl));
1437
1438 if (!(async->async_ttycommon.t_iflag & IXON))
1439 return;
1440
1441 switch (onoff) {
1442 case FLOW_STOP:
1443 async->async_flags |= ASYNC_SW_OUT_FLW;
1444 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
1445 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1446 "xencons%d: output sflow stop\n", instance);
1447 break;
1448 case FLOW_START:
1449 async->async_flags &= ~ASYNC_SW_OUT_FLW;
1450 async->async_flags |= ASYNC_OUT_FLW_RESUME;
1451 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1452 "xencons%d: output sflow start\n", instance);
1453 break;
1454 default:
1455 break;
1456 }
1457 }
1458
1459 /*
1460 * Software input flow control
1461 * This function can execute software input flow control
1462 * INPUT VALUE of onoff:
1463 * FLOW_START means to send out a XON char
1464 * and clear SW input flow control flag.
1465 * FLOW_STOP means to send out a XOFF char
1466 * and set SW input flow control flag.
1467 * FLOW_CHECK means to check whether there is pending XON/XOFF
1468 * if it is true, send it out.
1469 * INPUT VALUE of type:
1470 * IN_FLOW_STREAMS means flow control is due to STREAMS
1471 * IN_FLOW_USER means flow control is due to user's commands
1472 * RETURN VALUE: B_FALSE means no flow control char is sent
1473 * B_TRUE means one flow control char is sent
1474 */
1475 static boolean_t
xcasync_flowcontrol_sw_input(struct xencons * xcp,async_flowc_action onoff,int type)1476 xcasync_flowcontrol_sw_input(struct xencons *xcp, async_flowc_action onoff,
1477 int type)
1478 {
1479 struct asyncline *async = xcp->priv;
1480 int instance = xcp->unit;
1481 int rval = B_FALSE;
1482
1483 ASSERT(mutex_owned(&xcp->excl));
1484
1485 if (!(async->async_ttycommon.t_iflag & IXOFF))
1486 return (rval);
1487
1488 /*
1489 * If we get this far, then we know IXOFF is set.
1490 */
1491 switch (onoff) {
1492 case FLOW_STOP:
1493 async->async_inflow_source |= type;
1494
1495 /*
1496 * We'll send an XOFF character for each of up to
1497 * three different input flow control attempts to stop input.
1498 * If we already send out one XOFF, but FLOW_STOP comes again,
1499 * it seems that input flow control becomes more serious,
1500 * then send XOFF again.
1501 */
1502 if (async->async_inflow_source & (IN_FLOW_STREAMS |
1503 IN_FLOW_USER))
1504 async->async_flags |= ASYNC_SW_IN_FLOW |
1505 ASYNC_SW_IN_NEEDED;
1506 DEBUGCONT2(XENCONS_DEBUG_SFLOW, "xencons%d: input sflow stop, "
1507 "type = %x\n", instance, async->async_inflow_source);
1508 break;
1509 case FLOW_START:
1510 async->async_inflow_source &= ~type;
1511 if (async->async_inflow_source == 0) {
1512 async->async_flags = (async->async_flags &
1513 ~ASYNC_SW_IN_FLOW) | ASYNC_SW_IN_NEEDED;
1514 DEBUGCONT1(XENCONS_DEBUG_SFLOW, "xencons%d: "
1515 "input sflow start\n", instance);
1516 }
1517 break;
1518 default:
1519 break;
1520 }
1521
1522 if (async->async_flags & ASYNC_SW_IN_NEEDED) {
1523 /*
1524 * If we get this far, then we know we need to send out
1525 * XON or XOFF char.
1526 */
1527 char c;
1528
1529 rval = B_TRUE;
1530 c = (async->async_flags & ASYNC_SW_IN_FLOW) ?
1531 async->async_stopc : async->async_startc;
1532 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1533 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
1534 async->async_flags &= ~ASYNC_SW_IN_NEEDED;
1535 return (rval);
1536 } else {
1537 xenconsputchar(NULL, c);
1538 }
1539 }
1540 return (rval);
1541 }
1542
1543 struct module_info xencons_info = {
1544 0,
1545 "xencons",
1546 0,
1547 INFPSZ,
1548 4096,
1549 128
1550 };
1551
1552 static struct qinit xencons_rint = {
1553 putq,
1554 xenconsrsrv,
1555 xenconsopen,
1556 xenconsclose,
1557 NULL,
1558 &xencons_info,
1559 NULL
1560 };
1561
1562 static struct qinit xencons_wint = {
1563 xenconswput,
1564 NULL,
1565 NULL,
1566 NULL,
1567 NULL,
1568 &xencons_info,
1569 NULL
1570 };
1571
1572 struct streamtab xencons_str_info = {
1573 &xencons_rint,
1574 &xencons_wint,
1575 NULL,
1576 NULL
1577 };
1578
1579 static struct cb_ops cb_xencons_ops = {
1580 nodev, /* cb_open */
1581 nodev, /* cb_close */
1582 nodev, /* cb_strategy */
1583 nodev, /* cb_print */
1584 nodev, /* cb_dump */
1585 nodev, /* cb_read */
1586 nodev, /* cb_write */
1587 nodev, /* cb_ioctl */
1588 nodev, /* cb_devmap */
1589 nodev, /* cb_mmap */
1590 nodev, /* cb_segmap */
1591 nochpoll, /* cb_chpoll */
1592 ddi_prop_op, /* cb_prop_op */
1593 &xencons_str_info, /* cb_stream */
1594 D_MP /* cb_flag */
1595 };
1596
1597 struct dev_ops xencons_ops = {
1598 DEVO_REV, /* devo_rev */
1599 0, /* devo_refcnt */
1600 xenconsinfo, /* devo_getinfo */
1601 nulldev, /* devo_identify */
1602 nulldev, /* devo_probe */
1603 xenconsattach, /* devo_attach */
1604 xenconsdetach, /* devo_detach */
1605 nodev, /* devo_reset */
1606 &cb_xencons_ops, /* devo_cb_ops */
1607 NULL, /* devo_bus_ops */
1608 NULL, /* devo_power */
1609 ddi_quiesce_not_needed, /* devo_quiesce */
1610 };
1611
1612 static struct modldrv modldrv = {
1613 &mod_driverops, /* Type of module. This one is a driver */
1614 "virtual console driver",
1615 &xencons_ops, /* driver ops */
1616 };
1617
1618 static struct modlinkage modlinkage = {
1619 MODREV_1,
1620 (void *)&modldrv,
1621 NULL
1622 };
1623
1624 int
_init(void)1625 _init(void)
1626 {
1627 int rv;
1628
1629 if ((rv = ddi_soft_state_init(&xencons_soft_state,
1630 sizeof (struct xencons), 1)) != 0)
1631 return (rv);
1632 if ((rv = mod_install(&modlinkage)) != 0) {
1633 ddi_soft_state_fini(&xencons_soft_state);
1634 return (rv);
1635 }
1636 DEBUGCONT2(XENCONS_DEBUG_INIT, "%s, debug = %x\n",
1637 modldrv.drv_linkinfo, debug);
1638 return (0);
1639 }
1640
1641 int
_fini(void)1642 _fini(void)
1643 {
1644 int rv;
1645
1646 if ((rv = mod_remove(&modlinkage)) != 0)
1647 return (rv);
1648
1649 ddi_soft_state_fini(&xencons_soft_state);
1650 return (0);
1651 }
1652
1653 int
_info(struct modinfo * modinfop)1654 _info(struct modinfo *modinfop)
1655 {
1656 return (mod_info(&modlinkage, modinfop));
1657 }
1658