xref: /titanic_50/usr/src/uts/common/xen/io/xencons.c (revision 7ec363dc481bba196d724969022171de4687989f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*	Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.	*/
23 /*	Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T	*/
24 /*	  All Rights Reserved					*/
25 
26 /*
27  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
28  * Use is subject to license terms.
29  */
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 /*
34  *
35  * Copyright (c) 2004 Christian Limpach.
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. This section intentionally left blank.
47  * 4. The name of the author may not be used to endorse or promote products
48  *    derived from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
51  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
54  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
55  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
59  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60  */
61 /*
62  * Section 3 of the above license was updated in response to bug 6379571.
63  */
64 
65 /*
66  * Hypervisor virtual console driver
67  */
68 
69 #include <sys/param.h>
70 #include <sys/types.h>
71 #include <sys/signal.h>
72 #include <sys/stream.h>
73 #include <sys/termio.h>
74 #include <sys/errno.h>
75 #include <sys/file.h>
76 #include <sys/cmn_err.h>
77 #include <sys/stropts.h>
78 #include <sys/strsubr.h>
79 #include <sys/strtty.h>
80 #include <sys/debug.h>
81 #include <sys/kbio.h>
82 #include <sys/cred.h>
83 #include <sys/stat.h>
84 #include <sys/consdev.h>
85 #include <sys/mkdev.h>
86 #include <sys/kmem.h>
87 #include <sys/cred.h>
88 #include <sys/strsun.h>
89 #ifdef DEBUG
90 #include <sys/promif.h>
91 #endif
92 #include <sys/modctl.h>
93 #include <sys/ddi.h>
94 #include <sys/sunddi.h>
95 #include <sys/sunndi.h>
96 #include <sys/policy.h>
97 #include <sys/atomic.h>
98 #include <sys/psm.h>
99 #include <xen/public/io/console.h>
100 
101 #include "xencons.h"
102 
103 #include <sys/hypervisor.h>
104 #include <sys/evtchn_impl.h>
105 #include <xen/sys/xenbus_impl.h>
106 #include <xen/sys/xendev.h>
107 
108 #ifdef DEBUG
109 #define	XENCONS_DEBUG_INIT	0x0001	/* msgs during driver initialization. */
110 #define	XENCONS_DEBUG_INPUT	0x0002	/* characters received during int. */
111 #define	XENCONS_DEBUG_EOT	0x0004	/* msgs when wait for xmit to finish. */
112 #define	XENCONS_DEBUG_CLOSE	0x0008	/* msgs when driver open/close called */
113 #define	XENCONS_DEBUG_PROCS	0x0020	/* each proc name as it is entered. */
114 #define	XENCONS_DEBUG_OUT	0x0100	/* msgs about output events. */
115 #define	XENCONS_DEBUG_BUSY	0x0200	/* msgs when xmit is enabled/disabled */
116 #define	XENCONS_DEBUG_MODEM	0x0400	/* msgs about modem status & control. */
117 #define	XENCONS_DEBUG_MODM2	0x0800	/* msgs about modem status & control. */
118 #define	XENCONS_DEBUG_IOCTL	0x1000	/* Output msgs about ioctl messages. */
119 #define	XENCONS_DEBUG_CHIP	0x2000	/* msgs about chip identification. */
120 #define	XENCONS_DEBUG_SFLOW	0x4000	/* msgs when S/W flowcontrol active */
121 #define	XENCONS_DEBUG(x) (debug & (x))
122 static int debug  = 0;
123 #else
124 #define	XENCONS_DEBUG(x) B_FALSE
125 #endif
126 
127 #define	XENCONS_WBUFSIZE	4096
128 
129 static boolean_t abort_charseq_recognize(uchar_t);
130 
131 /* The async interrupt entry points */
132 static void	xcasync_ioctl(struct asyncline *, queue_t *, mblk_t *);
133 static void	xcasync_reioctl(void *);
134 static void	xcasync_start(struct asyncline *);
135 static void	xenconsputchar(cons_polledio_arg_t, uchar_t);
136 static int	xenconsgetchar(cons_polledio_arg_t);
137 static boolean_t	xenconsischar(cons_polledio_arg_t);
138 
139 static uint_t	xenconsintr(caddr_t);
140 static uint_t	xenconsintr_priv(caddr_t);
141 /*PRINTFLIKE2*/
142 static void	xenconserror(int, const char *, ...) __KPRINTFLIKE(2);
143 static void	xencons_soft_state_free(struct xencons *);
144 static boolean_t
145 xcasync_flowcontrol_sw_input(struct xencons *, async_flowc_action, int);
146 static void
147 xcasync_flowcontrol_sw_output(struct xencons *, async_flowc_action);
148 
149 void		*xencons_soft_state;
150 char		*xencons_wbuf;
151 struct xencons	*xencons_console;
152 
153 static void
154 xenconssetup_avintr(struct xencons *xcp, int attach)
155 {
156 	/*
157 	 * On xen, CPU 0 always exists and can't be taken offline,
158 	 * so binding this thread to it should always succeed.
159 	 */
160 	mutex_enter(&cpu_lock);
161 	thread_affinity_set(curthread, 0);
162 	mutex_exit(&cpu_lock);
163 
164 	if (attach) {
165 		/* Setup our interrupt binding. */
166 		(void) add_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
167 		    "xencons", xcp->console_irq, (caddr_t)xcp, NULL, NULL,
168 		    xcp->dip);
169 	} else {
170 		/*
171 		 * Cleanup interrupt configuration.  Note that the framework
172 		 * _should_ ensure that when rem_avintr() returns the interrupt
173 		 * service routine is not currently executing and that it won't
174 		 * be invoked again.
175 		 */
176 		(void) rem_avintr(NULL, IPL_CONS, (avfunc)xenconsintr_priv,
177 		    xcp->console_irq);
178 	}
179 
180 	/* Notify our caller that we're done. */
181 	mutex_enter(&xcp->excl);
182 	cv_signal(&xcp->excl_cv);
183 	mutex_exit(&xcp->excl);
184 
185 	/* Clear our binding to CPU 0 */
186 	thread_affinity_clear(curthread);
187 
188 }
189 
190 static void
191 xenconssetup_add_avintr(struct xencons *xcp)
192 {
193 	xenconssetup_avintr(xcp, B_TRUE);
194 }
195 
196 static void
197 xenconssetup_rem_avintr(struct xencons *xcp)
198 {
199 	xenconssetup_avintr(xcp, B_FALSE);
200 }
201 
202 static int
203 xenconsdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
204 {
205 	int instance;
206 	struct xencons *xcp;
207 
208 	if (cmd != DDI_DETACH && cmd != DDI_SUSPEND)
209 		return (DDI_FAILURE);
210 
211 	if (cmd == DDI_SUSPEND) {
212 		ddi_remove_intr(devi, 0, NULL);
213 		return (DDI_SUCCESS);
214 	}
215 
216 	/*
217 	 * We should never try to detach the console driver on a domU
218 	 * because it should always be held open
219 	 */
220 	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
221 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
222 		return (DDI_FAILURE);
223 
224 	instance = ddi_get_instance(devi);	/* find out which unit */
225 
226 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
227 	if (xcp == NULL)
228 		return (DDI_FAILURE);
229 
230 	/*
231 	 * Cleanup our interrupt bindings.  For more info on why we
232 	 * do this in a seperate thread, see the comments for when we
233 	 * setup the interrupt bindings.
234 	 */
235 	xencons_console = NULL;
236 	mutex_enter(&xcp->excl);
237 	(void) taskq_dispatch(system_taskq,
238 	    (void (*)(void *))xenconssetup_rem_avintr, xcp, TQ_SLEEP);
239 	cv_wait(&xcp->excl_cv, &xcp->excl);
240 	mutex_exit(&xcp->excl);
241 
242 	/* remove all minor device node(s) for this device */
243 	ddi_remove_minor_node(devi, NULL);
244 
245 	/* free up state */
246 	xencons_soft_state_free(xcp);
247 	kmem_free(xencons_wbuf, XENCONS_WBUFSIZE);
248 
249 	DEBUGNOTE1(XENCONS_DEBUG_INIT, "xencons%d: shutdown complete",
250 	    instance);
251 	return (DDI_SUCCESS);
252 }
253 
254 static void
255 xenconssetup(struct xencons *xcp)
256 {
257 	xcp->ifp = (volatile struct xencons_interface *)HYPERVISOR_console_page;
258 
259 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
260 		xencons_wbuf = kmem_alloc(XENCONS_WBUFSIZE, KM_SLEEP);
261 
262 		/*
263 		 * Activate the xen console virq.  Note that xen requires
264 		 * that VIRQs be bound to CPU 0 when first created.
265 		 */
266 		xcp->console_irq = ec_bind_virq_to_irq(VIRQ_CONSOLE, 0);
267 
268 		/*
269 		 * Ok.  This is kinda ugly.  We want to register an
270 		 * interrupt handler for the xen console virq, but
271 		 * virq's are xen sepcific and currently the DDI doesn't
272 		 * support binding to them.  So instead we need to use
273 		 * add_avintr().  So to make things more complicated,
274 		 * we already had to bind the xen console VIRQ to CPU 0,
275 		 * and add_avintr() needs to be invoked on the same CPU
276 		 * where the VIRQ is bound, in this case on CPU 0.  We
277 		 * could just temporarily bind ourselves to CPU 0, but
278 		 * we don't want to do that since this attach thread
279 		 * could have been invoked in a user thread context,
280 		 * in which case this thread could already have some
281 		 * pre-existing cpu binding.  So to avoid changing our
282 		 * cpu binding we're going to use a taskq thread that
283 		 * will bind to CPU 0 and register our interrupts
284 		 * handler for us.
285 		 */
286 		mutex_enter(&xcp->excl);
287 		(void) taskq_dispatch(system_taskq,
288 		    (void (*)(void *))xenconssetup_add_avintr, xcp, TQ_SLEEP);
289 		cv_wait(&xcp->excl_cv, &xcp->excl);
290 		mutex_exit(&xcp->excl);
291 	} else {
292 		(void) xvdi_alloc_evtchn(xcp->dip);
293 		(void) ddi_add_intr(xcp->dip, 0, NULL, NULL, xenconsintr,
294 		    (caddr_t)xcp);
295 		xcp->evtchn = xvdi_get_evtchn(xcp->dip);
296 	}
297 }
298 
299 static int
300 xenconsattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
301 {
302 	int instance = ddi_get_instance(devi);
303 	struct xencons *xcp;
304 	int ret;
305 
306 	/* There can be only one. */
307 	if (instance != 0)
308 		return (DDI_FAILURE);
309 
310 	switch (cmd) {
311 	case DDI_RESUME:
312 		xcp = xencons_console;
313 		xenconssetup(xcp);
314 		return (DDI_SUCCESS);
315 	case DDI_ATTACH:
316 		break;
317 	default:
318 		return (DDI_FAILURE);
319 	}
320 
321 	ret = ddi_soft_state_zalloc(xencons_soft_state, instance);
322 	if (ret != DDI_SUCCESS)
323 		return (DDI_FAILURE);
324 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
325 	ASSERT(xcp != NULL);	/* can't fail - we only just allocated it */
326 
327 	/*
328 	 * Set up the other components of the xencons structure for this port.
329 	 */
330 	xcp->unit = instance;
331 	xcp->dip = devi;
332 
333 	/* Fill in the polled I/O structure. */
334 	xcp->polledio.cons_polledio_version = CONSPOLLEDIO_V0;
335 	xcp->polledio.cons_polledio_argument = (cons_polledio_arg_t)xcp;
336 	xcp->polledio.cons_polledio_putchar = xenconsputchar;
337 	xcp->polledio.cons_polledio_getchar = xenconsgetchar;
338 	xcp->polledio.cons_polledio_ischar = xenconsischar;
339 	xcp->polledio.cons_polledio_enter = NULL;
340 	xcp->polledio.cons_polledio_exit = NULL;
341 
342 	/*
343 	 * Initializes the asyncline structure which has TTY protocol-private
344 	 * data before enabling interrupts.
345 	 */
346 	xcp->priv = kmem_zalloc(sizeof (struct asyncline), KM_SLEEP);
347 	xcp->priv->async_common = xcp;
348 	cv_init(&xcp->priv->async_flags_cv, NULL, CV_DRIVER, NULL);
349 
350 	/* Initialize mutexes before accessing the interface. */
351 	mutex_init(&xcp->excl, NULL, MUTEX_DRIVER, NULL);
352 	cv_init(&xcp->excl_cv, NULL, CV_DEFAULT, NULL);
353 
354 	/* create minor device node for this device */
355 	ret = ddi_create_minor_node(devi, "xencons", S_IFCHR, instance,
356 	    DDI_NT_SERIAL, NULL);
357 	if (ret != DDI_SUCCESS) {
358 		ddi_remove_minor_node(devi, NULL);
359 		xencons_soft_state_free(xcp);
360 		return (DDI_FAILURE);
361 	}
362 
363 	ddi_report_dev(devi);
364 	xencons_console = xcp;
365 	xenconssetup(xcp);
366 	DEBUGCONT1(XENCONS_DEBUG_INIT, "xencons%dattach: done\n", instance);
367 	return (DDI_SUCCESS);
368 }
369 
370 /*ARGSUSED*/
371 static int
372 xenconsinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
373 	void **result)
374 {
375 	dev_t dev = (dev_t)arg;
376 	int instance, error;
377 	struct xencons *xcp;
378 
379 	instance = getminor(dev);
380 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
381 	if (xcp == NULL)
382 		return (DDI_FAILURE);
383 
384 	switch (infocmd) {
385 	case DDI_INFO_DEVT2DEVINFO:
386 		if (xcp->dip == NULL)
387 			error = DDI_FAILURE;
388 		else {
389 			*result = (void *) xcp->dip;
390 			error = DDI_SUCCESS;
391 		}
392 		break;
393 	case DDI_INFO_DEVT2INSTANCE:
394 		*result = (void *)(intptr_t)instance;
395 		error = DDI_SUCCESS;
396 		break;
397 	default:
398 		error = DDI_FAILURE;
399 	}
400 	return (error);
401 }
402 
403 /* xencons_soft_state_free - local wrapper for ddi_soft_state_free(9F) */
404 
405 static void
406 xencons_soft_state_free(struct xencons *xcp)
407 {
408 	mutex_destroy(&xcp->excl);
409 	cv_destroy(&xcp->excl_cv);
410 	kmem_free(xcp->priv, sizeof (struct asyncline));
411 	ddi_soft_state_free(xencons_soft_state, xcp->unit);
412 }
413 
414 /*ARGSUSED*/
415 static int
416 xenconsopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
417 {
418 	struct xencons	*xcp;
419 	struct asyncline *async;
420 	int		unit;
421 
422 	unit = getminor(*dev);
423 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dopen\n", unit);
424 	xcp = ddi_get_soft_state(xencons_soft_state, unit);
425 	if (xcp == NULL)
426 		return (ENXIO);		/* unit not configured */
427 	async = xcp->priv;
428 	mutex_enter(&xcp->excl);
429 
430 again:
431 
432 	if ((async->async_flags & ASYNC_ISOPEN) == 0) {
433 		async->async_ttycommon.t_iflag = 0;
434 		async->async_ttycommon.t_iocpending = NULL;
435 		async->async_ttycommon.t_size.ws_row = 0;
436 		async->async_ttycommon.t_size.ws_col = 0;
437 		async->async_ttycommon.t_size.ws_xpixel = 0;
438 		async->async_ttycommon.t_size.ws_ypixel = 0;
439 		async->async_dev = *dev;
440 		async->async_wbufcid = 0;
441 
442 		async->async_startc = CSTART;
443 		async->async_stopc = CSTOP;
444 	} else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
445 	    secpolicy_excl_open(cr) != 0) {
446 		mutex_exit(&xcp->excl);
447 		return (EBUSY);
448 	}
449 
450 	async->async_ttycommon.t_flags |= TS_SOFTCAR;
451 
452 	async->async_ttycommon.t_readq = rq;
453 	async->async_ttycommon.t_writeq = WR(rq);
454 	rq->q_ptr = WR(rq)->q_ptr = (caddr_t)async;
455 	mutex_exit(&xcp->excl);
456 	/*
457 	 * Caution here -- qprocson sets the pointers that are used by canput
458 	 * called by xencons_rxint.  ASYNC_ISOPEN must *not* be set until those
459 	 * pointers are valid.
460 	 */
461 	qprocson(rq);
462 	async->async_flags |= ASYNC_ISOPEN;
463 	DEBUGCONT1(XENCONS_DEBUG_INIT, "asy%dopen: done\n", unit);
464 	return (0);
465 }
466 
467 
468 /*
469  * Close routine.
470  */
471 /*ARGSUSED*/
472 static int
473 xenconsclose(queue_t *q, int flag, cred_t *credp)
474 {
475 	struct asyncline *async;
476 	struct xencons	 *xcp;
477 #ifdef DEBUG
478 	int instance;
479 #endif
480 
481 	async = (struct asyncline *)q->q_ptr;
482 	ASSERT(async != NULL);
483 	xcp = async->async_common;
484 #ifdef DEBUG
485 	instance = xcp->unit;
486 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose\n", instance);
487 #endif
488 
489 	mutex_enter(&xcp->excl);
490 	async->async_flags |= ASYNC_CLOSING;
491 
492 	async->async_ocnt = 0;
493 	if (async->async_xmitblk != NULL)
494 		freeb(async->async_xmitblk);
495 	async->async_xmitblk = NULL;
496 
497 out:
498 	ttycommon_close(&async->async_ttycommon);
499 
500 	/*
501 	 * Cancel outstanding "bufcall" request.
502 	 */
503 	if (async->async_wbufcid != 0) {
504 		unbufcall(async->async_wbufcid);
505 		async->async_wbufcid = 0;
506 	}
507 
508 	/* Note that qprocsoff can't be done until after interrupts are off */
509 	qprocsoff(q);
510 	q->q_ptr = WR(q)->q_ptr = NULL;
511 	async->async_ttycommon.t_readq = NULL;
512 	async->async_ttycommon.t_writeq = NULL;
513 
514 	/*
515 	 * Clear out device state, except persistant device property flags.
516 	 */
517 	async->async_flags = 0;
518 	cv_broadcast(&async->async_flags_cv);
519 	mutex_exit(&xcp->excl);
520 
521 	DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose: done\n", instance);
522 	return (0);
523 }
524 
525 #define	INBUF_IX(ix, ifp)	(DOMAIN_IS_INITDOMAIN(xen_info) ? \
526 	(ix) : MASK_XENCONS_IDX((ix), (ifp)->in))
527 
528 /*
529  * Handle a xen console rx interrupt.
530  */
531 /*ARGSUSED*/
532 static void
533 xencons_rxint(struct xencons *xcp)
534 {
535 	struct asyncline *async;
536 	short	cc;
537 	mblk_t	*bp;
538 	queue_t	*q;
539 	uchar_t	c, buf[16];
540 	uchar_t	*cp;
541 	tty_common_t	*tp;
542 	int instance;
543 	volatile struct xencons_interface *ifp;
544 	XENCONS_RING_IDX cons, prod;
545 
546 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_rxint\n");
547 
548 loop:
549 	mutex_enter(&xcp->excl);
550 
551 	/* sanity check if we should bail */
552 	if (xencons_console == NULL) {
553 		mutex_exit(&xcp->excl);
554 		goto out;
555 	}
556 
557 	async = xcp->priv;
558 	instance = xcp->unit;
559 	ifp = xcp->ifp;
560 	tp = &async->async_ttycommon;
561 	q = tp->t_readq;
562 
563 	if (async->async_flags & ASYNC_OUT_FLW_RESUME) {
564 		xcasync_start(async);
565 		async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
566 	}
567 
568 	/*
569 	 * If data is available, send it up the stream if there's
570 	 * somebody listening.
571 	 */
572 	if (!(async->async_flags & ASYNC_ISOPEN)) {
573 		mutex_exit(&xcp->excl);
574 		goto out;
575 	}
576 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
577 		cc = HYPERVISOR_console_io(CONSOLEIO_read, 16, (char *)buf);
578 		cp = buf;
579 		cons = 0;
580 	} else {
581 		cons = ifp->in_cons;
582 		prod = ifp->in_prod;
583 
584 		cc = prod - cons;
585 		cp = (uchar_t *)ifp->in;
586 	}
587 	if (cc <= 0) {
588 		mutex_exit(&xcp->excl);
589 		goto out;
590 	}
591 
592 	/*
593 	 * Check for character break sequence.
594 	 *
595 	 * Note that normally asy drivers only check for a character sequence
596 	 * if abort_enable == KIOCABORTALTERNATE and otherwise use a break
597 	 * sensed on the line to do an abort_sequence_enter.  Since the
598 	 * hypervisor does not use a real chip for the console we default to
599 	 * using the alternate sequence.
600 	 */
601 	if ((abort_enable == KIOCABORTENABLE) && (xcp->flags & ASY_CONSOLE)) {
602 		XENCONS_RING_IDX i;
603 
604 		for (i = 0; i < cc; i++) {
605 			c = cp[INBUF_IX(cons + i, ifp)];
606 			if (abort_charseq_recognize(c)) {
607 				/*
608 				 * Eat abort seg, it's not a valid debugger
609 				 * command.
610 				 */
611 				if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
612 					membar_producer();
613 					ifp->in_cons = cons + i;
614 				} else {
615 					cons += i;
616 				}
617 				abort_sequence_enter((char *)NULL);
618 				/*
619 				 * Back from debugger, resume normal processing
620 				 */
621 				mutex_exit(&xcp->excl);
622 				goto loop;
623 			}
624 		}
625 	}
626 
627 	if (!canput(q)) {
628 		if (!(async->async_inflow_source & IN_FLOW_STREAMS)) {
629 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
630 			    IN_FLOW_STREAMS);
631 		}
632 		mutex_exit(&xcp->excl);
633 		goto out;
634 	}
635 	if (async->async_inflow_source & IN_FLOW_STREAMS) {
636 		(void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
637 		    IN_FLOW_STREAMS);
638 	}
639 	DEBUGCONT2(XENCONS_DEBUG_INPUT,
640 	    "xencons%d_rxint: %d char(s) in queue.\n", instance, cc);
641 	if (!(bp = allocb(cc, BPRI_MED))) {
642 		mutex_exit(&xcp->excl);
643 		ttycommon_qfull(&async->async_ttycommon, q);
644 		goto out;
645 	}
646 	do {
647 		c = cp[INBUF_IX(cons++, ifp)];
648 		/*
649 		 * We handle XON/XOFF char if IXON is set,
650 		 * but if received char is _POSIX_VDISABLE,
651 		 * we left it to the up level module.
652 		 */
653 		if (tp->t_iflag & IXON) {
654 			if ((c == async->async_stopc) &&
655 			    (c != _POSIX_VDISABLE)) {
656 				xcasync_flowcontrol_sw_output(xcp, FLOW_STOP);
657 				continue;
658 			} else if ((c == async->async_startc) &&
659 			    (c != _POSIX_VDISABLE)) {
660 				xcasync_flowcontrol_sw_output(xcp, FLOW_START);
661 				continue;
662 			}
663 			if ((tp->t_iflag & IXANY) &&
664 			    (async->async_flags & ASYNC_SW_OUT_FLW)) {
665 				xcasync_flowcontrol_sw_output(xcp, FLOW_START);
666 			}
667 		}
668 		*bp->b_wptr++ = c;
669 	} while (--cc);
670 	membar_producer();
671 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
672 		ifp->in_cons = cons;
673 	mutex_exit(&xcp->excl);
674 	if (bp->b_wptr > bp->b_rptr) {
675 		if (!canput(q)) {
676 			xenconserror(CE_NOTE, "xencons%d: local queue full",
677 			    instance);
678 			freemsg(bp);
679 		} else
680 			(void) putq(q, bp);
681 	} else
682 		freemsg(bp);
683 	if (DOMAIN_IS_INITDOMAIN(xen_info))
684 		goto loop;
685 out:
686 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "xencons%d_rxint: done\n", instance);
687 	if (!DOMAIN_IS_INITDOMAIN(xen_info))
688 		ec_notify_via_evtchn(xcp->evtchn);
689 }
690 
691 
692 /*
693  * Handle a xen console tx interrupt.
694  */
695 /*ARGSUSED*/
696 static void
697 xencons_txint(struct xencons *xcp)
698 {
699 	struct asyncline *async;
700 
701 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint\n");
702 
703 	/*
704 	 * prevent recursive entry
705 	 */
706 	if (mutex_owner(&xcp->excl) == curthread) {
707 		goto out;
708 	}
709 
710 	mutex_enter(&xcp->excl);
711 	if (xencons_console == NULL) {
712 		mutex_exit(&xcp->excl);
713 		goto out;
714 	}
715 
716 	/* make sure the device is open */
717 	async = xcp->priv;
718 	if ((async->async_flags & ASYNC_ISOPEN) != 0)
719 		xcasync_start(async);
720 
721 	mutex_exit(&xcp->excl);
722 out:
723 	DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint: done\n");
724 }
725 
726 
727 /*
728  * Get an event when input ring becomes not empty or output ring becomes not
729  * full.
730  */
731 static uint_t
732 xenconsintr(caddr_t arg)
733 {
734 	struct xencons *xcp = (struct xencons *)arg;
735 	volatile struct xencons_interface *ifp = xcp->ifp;
736 
737 	if (ifp->in_prod != ifp->in_cons)
738 		xencons_rxint(xcp);
739 	if (ifp->out_prod - ifp->out_cons < sizeof (ifp->out))
740 		xencons_txint(xcp);
741 	return (DDI_INTR_CLAIMED);
742 }
743 
744 /*
745  * Console interrupt routine for priviliged domains
746  */
747 static uint_t
748 xenconsintr_priv(caddr_t arg)
749 {
750 	struct xencons *xcp = (struct xencons *)arg;
751 
752 	xencons_rxint(xcp);
753 	xencons_txint(xcp);
754 	return (DDI_INTR_CLAIMED);
755 }
756 
757 /*
758  * Start output on a line, unless it's busy, frozen, or otherwise.
759  */
760 /*ARGSUSED*/
761 static void
762 xcasync_start(struct asyncline *async)
763 {
764 	struct xencons *xcp = async->async_common;
765 	int cc;
766 	queue_t *q;
767 	mblk_t *bp;
768 	int	len, space, blen;
769 	mblk_t *nbp;
770 
771 #ifdef DEBUG
772 	int instance = xcp->unit;
773 
774 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_nstart\n", instance);
775 #endif
776 	ASSERT(mutex_owned(&xcp->excl));
777 
778 	/*
779 	 * Check only pended sw input flow control.
780 	 */
781 domore:
782 	(void) xcasync_flowcontrol_sw_input(xcp, FLOW_CHECK, IN_FLOW_NULL);
783 
784 	if ((q = async->async_ttycommon.t_writeq) == NULL) {
785 		return;	/* not attached to a stream */
786 	}
787 
788 	for (;;) {
789 		if ((bp = getq(q)) == NULL)
790 			return;	/* no data to transmit */
791 
792 		/*
793 		 * We have a message block to work on.
794 		 * Check whether it's a break, a delay, or an ioctl (the latter
795 		 * occurs if the ioctl in question was waiting for the output
796 		 * to drain).  If it's one of those, process it immediately.
797 		 */
798 		switch (bp->b_datap->db_type) {
799 
800 		case M_IOCTL:
801 			/*
802 			 * This ioctl was waiting for the output ahead of
803 			 * it to drain; obviously, it has.  Do it, and
804 			 * then grab the next message after it.
805 			 */
806 			mutex_exit(&xcp->excl);
807 			xcasync_ioctl(async, q, bp);
808 			mutex_enter(&xcp->excl);
809 			continue;
810 		}
811 
812 		while (bp != NULL && (cc = bp->b_wptr - bp->b_rptr) == 0) {
813 			nbp = bp->b_cont;
814 			freeb(bp);
815 			bp = nbp;
816 		}
817 		if (bp != NULL)
818 			break;
819 	}
820 
821 	/*
822 	 * We have data to transmit.  If output is stopped, put
823 	 * it back and try again later.
824 	 */
825 	if (async->async_flags & (ASYNC_SW_OUT_FLW | ASYNC_STOPPED)) {
826 		(void) putbq(q, bp);
827 		return;
828 	}
829 
830 
831 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
832 		len = 0;
833 		space = XENCONS_WBUFSIZE;
834 		while (bp != NULL && space) {
835 			blen = bp->b_wptr - bp->b_rptr;
836 			cc = min(blen, space);
837 			bcopy(bp->b_rptr, &xencons_wbuf[len], cc);
838 			bp->b_rptr += cc;
839 			if (cc == blen) {
840 				nbp = bp->b_cont;
841 				freeb(bp);
842 				bp = nbp;
843 			}
844 			space -= cc;
845 			len += cc;
846 		}
847 		mutex_exit(&xcp->excl);
848 		(void) HYPERVISOR_console_io(CONSOLEIO_write, len,
849 		    xencons_wbuf);
850 		mutex_enter(&xcp->excl);
851 		if (bp != NULL)
852 			(void) putbq(q, bp); /* not done with this msg yet */
853 		/*
854 		 * There are no completion interrupts when using the
855 		 * HYPERVISOR_console_io call to write console data
856 		 * so we loop here till we have sent all the data to the
857 		 * hypervisor.
858 		 */
859 		goto domore;
860 	} else {
861 		volatile struct xencons_interface *ifp = xcp->ifp;
862 		XENCONS_RING_IDX cons, prod;
863 
864 		cons = ifp->out_cons;
865 		prod = ifp->out_prod;
866 		membar_enter();
867 		while (bp != NULL && ((prod - cons) < sizeof (ifp->out))) {
868 			ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] =
869 			    *bp->b_rptr++;
870 			if (bp->b_rptr == bp->b_wptr) {
871 				nbp = bp->b_cont;
872 				freeb(bp);
873 				bp = nbp;
874 			}
875 		}
876 		membar_producer();
877 		ifp->out_prod = prod;
878 		ec_notify_via_evtchn(xcp->evtchn);
879 		if (bp != NULL)
880 			(void) putbq(q, bp); /* not done with this msg yet */
881 	}
882 }
883 
884 
885 /*
886  * Process an "ioctl" message sent down to us.
887  * Note that we don't need to get any locks until we are ready to access
888  * the hardware.  Nothing we access until then is going to be altered
889  * outside of the STREAMS framework, so we should be safe.
890  */
891 static void
892 xcasync_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
893 {
894 	struct xencons *xcp = async->async_common;
895 	tty_common_t  *tp = &async->async_ttycommon;
896 	struct iocblk *iocp;
897 	unsigned datasize;
898 	int error = 0;
899 
900 #ifdef DEBUG
901 	int instance = xcp->unit;
902 
903 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl\n", instance);
904 #endif
905 
906 	if (tp->t_iocpending != NULL) {
907 		/*
908 		 * We were holding an "ioctl" response pending the
909 		 * availability of an "mblk" to hold data to be passed up;
910 		 * another "ioctl" came through, which means that "ioctl"
911 		 * must have timed out or been aborted.
912 		 */
913 		freemsg(async->async_ttycommon.t_iocpending);
914 		async->async_ttycommon.t_iocpending = NULL;
915 	}
916 
917 	iocp = (struct iocblk *)mp->b_rptr;
918 
919 	/*
920 	 * For TIOCMGET and the PPS ioctls, do NOT call ttycommon_ioctl()
921 	 * because this function frees up the message block (mp->b_cont) that
922 	 * contains the user location where we pass back the results.
923 	 *
924 	 * Similarly, CONSOPENPOLLEDIO needs ioc_count, which ttycommon_ioctl
925 	 * zaps.  We know that ttycommon_ioctl doesn't know any CONS*
926 	 * ioctls, so keep the others safe too.
927 	 */
928 	DEBUGCONT2(XENCONS_DEBUG_IOCTL, "async%d_ioctl: %s\n",
929 	    instance,
930 	    iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
931 	    iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
932 	    iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
933 	    iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" : "other");
934 
935 	switch (iocp->ioc_cmd) {
936 	case TIOCMGET:
937 	case TIOCGPPS:
938 	case TIOCSPPS:
939 	case TIOCGPPSEV:
940 	case CONSOPENPOLLEDIO:
941 	case CONSCLOSEPOLLEDIO:
942 	case CONSSETABORTENABLE:
943 	case CONSGETABORTENABLE:
944 		error = -1; /* Do Nothing */
945 		break;
946 	default:
947 
948 		/*
949 		 * The only way in which "ttycommon_ioctl" can fail is if the
950 		 * "ioctl" requires a response containing data to be returned
951 		 * to the user, and no mblk could be allocated for the data.
952 		 * No such "ioctl" alters our state.  Thus, we always go ahead
953 		 * and do any state-changes the "ioctl" calls for.  If we
954 		 * couldn't allocate the data, "ttycommon_ioctl" has stashed
955 		 * the "ioctl" away safely, so we just call "bufcall" to
956 		 * request that we be called back when we stand a better
957 		 * chance of allocating the data.
958 		 */
959 		if ((datasize = ttycommon_ioctl(tp, wq, mp, &error)) != 0) {
960 			if (async->async_wbufcid)
961 				unbufcall(async->async_wbufcid);
962 			async->async_wbufcid = bufcall(datasize, BPRI_HI,
963 			    (void (*)(void *)) xcasync_reioctl,
964 			    (void *)(intptr_t)async->async_common->unit);
965 			return;
966 		}
967 	}
968 
969 	mutex_enter(&xcp->excl);
970 
971 	if (error == 0) {
972 		/*
973 		 * "ttycommon_ioctl" did most of the work; we just use the
974 		 * data it set up.
975 		 */
976 		switch (iocp->ioc_cmd) {
977 
978 		case TCSETS:
979 		case TCSETSF:
980 		case TCSETSW:
981 		case TCSETA:
982 		case TCSETAW:
983 		case TCSETAF:
984 			break;
985 		}
986 	} else if (error < 0) {
987 		/*
988 		 * "ttycommon_ioctl" didn't do anything; we process it here.
989 		 */
990 		error = 0;
991 		switch (iocp->ioc_cmd) {
992 
993 		case TCSBRK:
994 			error = miocpullup(mp, sizeof (int));
995 			break;
996 
997 		case TIOCSBRK:
998 			mioc2ack(mp, NULL, 0, 0);
999 			break;
1000 
1001 		case TIOCCBRK:
1002 			mioc2ack(mp, NULL, 0, 0);
1003 			break;
1004 
1005 		case CONSOPENPOLLEDIO:
1006 			error = miocpullup(mp, sizeof (cons_polledio_arg_t));
1007 			if (error != 0)
1008 				break;
1009 
1010 			*(cons_polledio_arg_t *)mp->b_cont->b_rptr =
1011 			    (cons_polledio_arg_t)&xcp->polledio;
1012 
1013 			mp->b_datap->db_type = M_IOCACK;
1014 			break;
1015 
1016 		case CONSCLOSEPOLLEDIO:
1017 			mp->b_datap->db_type = M_IOCACK;
1018 			iocp->ioc_error = 0;
1019 			iocp->ioc_rval = 0;
1020 			break;
1021 
1022 		case CONSSETABORTENABLE:
1023 			error = secpolicy_console(iocp->ioc_cr);
1024 			if (error != 0)
1025 				break;
1026 
1027 			if (iocp->ioc_count != TRANSPARENT) {
1028 				error = EINVAL;
1029 				break;
1030 			}
1031 
1032 			if (*(intptr_t *)mp->b_cont->b_rptr)
1033 				xcp->flags |= ASY_CONSOLE;
1034 			else
1035 				xcp->flags &= ~ASY_CONSOLE;
1036 
1037 			mp->b_datap->db_type = M_IOCACK;
1038 			iocp->ioc_error = 0;
1039 			iocp->ioc_rval = 0;
1040 			break;
1041 
1042 		case CONSGETABORTENABLE:
1043 			/*CONSTANTCONDITION*/
1044 			ASSERT(sizeof (boolean_t) <= sizeof (boolean_t *));
1045 			/*
1046 			 * Store the return value right in the payload
1047 			 * we were passed.  Crude.
1048 			 */
1049 			mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
1050 			*(boolean_t *)mp->b_cont->b_rptr =
1051 			    (xcp->flags & ASY_CONSOLE) != 0;
1052 			break;
1053 
1054 		default:
1055 			/*
1056 			 * If we don't understand it, it's an error.  NAK it.
1057 			 */
1058 			error = EINVAL;
1059 			break;
1060 		}
1061 	}
1062 	if (error != 0) {
1063 		iocp->ioc_error = error;
1064 		mp->b_datap->db_type = M_IOCNAK;
1065 	}
1066 	mutex_exit(&xcp->excl);
1067 	qreply(wq, mp);
1068 	DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl: done\n", instance);
1069 }
1070 
1071 static int
1072 xenconsrsrv(queue_t *q)
1073 {
1074 	mblk_t *bp;
1075 
1076 	while (canputnext(q) && (bp = getq(q)))
1077 		putnext(q, bp);
1078 	return (0);
1079 }
1080 
1081 /*
1082  * Put procedure for write queue.
1083  * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
1084  * set the flow control character for M_STOPI and M_STARTI messages;
1085  * queue up M_BREAK, M_DELAY, and M_DATA messages for processing
1086  * by the start routine, and then call the start routine; discard
1087  * everything else.  Note that this driver does not incorporate any
1088  * mechanism to negotiate to handle the canonicalization process.
1089  * It expects that these functions are handled in upper module(s),
1090  * as we do in ldterm.
1091  */
1092 static int
1093 xenconswput(queue_t *q, mblk_t *mp)
1094 {
1095 	struct asyncline *async;
1096 	struct xencons *xcp;
1097 
1098 	async = (struct asyncline *)q->q_ptr;
1099 	xcp = async->async_common;
1100 
1101 	switch (mp->b_datap->db_type) {
1102 
1103 	case M_STOP:
1104 		mutex_enter(&xcp->excl);
1105 		async->async_flags |= ASYNC_STOPPED;
1106 		mutex_exit(&xcp->excl);
1107 		freemsg(mp);
1108 		break;
1109 
1110 	case M_START:
1111 		mutex_enter(&xcp->excl);
1112 		if (async->async_flags & ASYNC_STOPPED) {
1113 			async->async_flags &= ~ASYNC_STOPPED;
1114 			xcasync_start(async);
1115 		}
1116 		mutex_exit(&xcp->excl);
1117 		freemsg(mp);
1118 		break;
1119 
1120 	case M_IOCTL:
1121 		switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1122 
1123 		case TCSETSW:
1124 		case TCSETSF:
1125 		case TCSETAW:
1126 		case TCSETAF:
1127 			/*
1128 			 * The changes do not take effect until all
1129 			 * output queued before them is drained.
1130 			 * Put this message on the queue, so that
1131 			 * "xcasync_start" will see it when it's done
1132 			 * with the output before it.  Poke the
1133 			 * start routine, just in case.
1134 			 */
1135 			(void) putq(q, mp);
1136 			mutex_enter(&xcp->excl);
1137 			xcasync_start(async);
1138 			mutex_exit(&xcp->excl);
1139 			break;
1140 
1141 		default:
1142 			/*
1143 			 * Do it now.
1144 			 */
1145 			xcasync_ioctl(async, q, mp);
1146 			break;
1147 		}
1148 		break;
1149 
1150 	case M_FLUSH:
1151 		if (*mp->b_rptr & FLUSHW) {
1152 			mutex_enter(&xcp->excl);
1153 			/*
1154 			 * Flush our write queue.
1155 			 */
1156 			flushq(q, FLUSHDATA);	/* XXX doesn't flush M_DELAY */
1157 			if (async->async_xmitblk != NULL) {
1158 				freeb(async->async_xmitblk);
1159 				async->async_xmitblk = NULL;
1160 			}
1161 			mutex_exit(&xcp->excl);
1162 			*mp->b_rptr &= ~FLUSHW;	/* it has been flushed */
1163 		}
1164 		if (*mp->b_rptr & FLUSHR) {
1165 			flushq(RD(q), FLUSHDATA);
1166 			qreply(q, mp);	/* give the read queues a crack at it */
1167 		} else {
1168 			freemsg(mp);
1169 		}
1170 
1171 		/*
1172 		 * We must make sure we process messages that survive the
1173 		 * write-side flush.
1174 		 */
1175 		mutex_enter(&xcp->excl);
1176 		xcasync_start(async);
1177 		mutex_exit(&xcp->excl);
1178 		break;
1179 
1180 	case M_BREAK:
1181 	case M_DELAY:
1182 	case M_DATA:
1183 		/*
1184 		 * Queue the message up to be transmitted,
1185 		 * and poke the start routine.
1186 		 */
1187 		(void) putq(q, mp);
1188 		mutex_enter(&xcp->excl);
1189 		xcasync_start(async);
1190 		mutex_exit(&xcp->excl);
1191 		break;
1192 
1193 	case M_STOPI:
1194 		mutex_enter(&xcp->excl);
1195 		mutex_enter(&xcp->excl);
1196 		if (!(async->async_inflow_source & IN_FLOW_USER)) {
1197 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
1198 			    IN_FLOW_USER);
1199 		}
1200 		mutex_exit(&xcp->excl);
1201 		mutex_exit(&xcp->excl);
1202 		freemsg(mp);
1203 		break;
1204 
1205 	case M_STARTI:
1206 		mutex_enter(&xcp->excl);
1207 		mutex_enter(&xcp->excl);
1208 		if (async->async_inflow_source & IN_FLOW_USER) {
1209 			(void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
1210 			    IN_FLOW_USER);
1211 		}
1212 		mutex_exit(&xcp->excl);
1213 		mutex_exit(&xcp->excl);
1214 		freemsg(mp);
1215 		break;
1216 
1217 	case M_CTL:
1218 		if (MBLKL(mp) >= sizeof (struct iocblk) &&
1219 		    ((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
1220 			((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
1221 			qreply(q, mp);
1222 		} else {
1223 			freemsg(mp);
1224 		}
1225 		break;
1226 
1227 	default:
1228 		freemsg(mp);
1229 		break;
1230 	}
1231 	return (0);
1232 }
1233 
1234 /*
1235  * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
1236  * the buffer we need.
1237  */
1238 static void
1239 xcasync_reioctl(void *unit)
1240 {
1241 	int instance = (uintptr_t)unit;
1242 	struct asyncline *async;
1243 	struct xencons *xcp;
1244 	queue_t	*q;
1245 	mblk_t	*mp;
1246 
1247 	xcp = ddi_get_soft_state(xencons_soft_state, instance);
1248 	ASSERT(xcp != NULL);
1249 	async = xcp->priv;
1250 
1251 	/*
1252 	 * The bufcall is no longer pending.
1253 	 */
1254 	mutex_enter(&xcp->excl);
1255 	async->async_wbufcid = 0;
1256 	if ((q = async->async_ttycommon.t_writeq) == NULL) {
1257 		mutex_exit(&xcp->excl);
1258 		return;
1259 	}
1260 	if ((mp = async->async_ttycommon.t_iocpending) != NULL) {
1261 		/* not pending any more */
1262 		async->async_ttycommon.t_iocpending = NULL;
1263 		mutex_exit(&xcp->excl);
1264 		xcasync_ioctl(async, q, mp);
1265 	} else
1266 		mutex_exit(&xcp->excl);
1267 }
1268 
1269 
1270 /*
1271  * debugger/console support routines.
1272  */
1273 
1274 /*
1275  * put a character out
1276  * Do not use interrupts.  If char is LF, put out CR, LF.
1277  */
1278 /*ARGSUSED*/
1279 static void
1280 xenconsputchar(cons_polledio_arg_t arg, uchar_t c)
1281 {
1282 	struct xencons *xcp = xencons_console;
1283 	volatile struct xencons_interface *ifp = xcp->ifp;
1284 	XENCONS_RING_IDX prod;
1285 
1286 	if (c == '\n')
1287 		xenconsputchar(arg, '\r');
1288 
1289 	/*
1290 	 * domain 0 can use the console I/O...
1291 	 */
1292 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1293 		char	buffer[1];
1294 
1295 		buffer[0] = c;
1296 		(void) HYPERVISOR_console_io(CONSOLEIO_write, 1, buffer);
1297 		return;
1298 	}
1299 
1300 	/*
1301 	 * domU has to go through dom0 virtual console.
1302 	 */
1303 	while (ifp->out_prod - ifp->out_cons == sizeof (ifp->out))
1304 		(void) HYPERVISOR_yield();
1305 
1306 	prod = ifp->out_prod;
1307 	ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] = c;
1308 	membar_producer();
1309 	ifp->out_prod = prod;
1310 	ec_notify_via_evtchn(xcp->evtchn);
1311 }
1312 
1313 /*
1314  * See if there's a character available. If no character is
1315  * available, return 0. Run in polled mode, no interrupts.
1316  */
1317 static boolean_t
1318 xenconsischar(cons_polledio_arg_t arg)
1319 {
1320 	struct xencons *xcp = (struct xencons *)arg;
1321 	volatile struct xencons_interface *ifp = xcp->ifp;
1322 
1323 	if (xcp->polldix < xcp->polllen)
1324 		return (B_TRUE);
1325 	/*
1326 	 * domain 0 can use the console I/O...
1327 	 */
1328 	xcp->polldix = 0;
1329 	xcp->polllen = 0;
1330 	if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1331 		xcp->polllen = HYPERVISOR_console_io(CONSOLEIO_read, 1,
1332 		    (char *)xcp->pollbuf);
1333 		return (xcp->polllen != 0);
1334 	}
1335 
1336 	/*
1337 	 * domU has to go through virtual console device.
1338 	 */
1339 	if (ifp->in_prod != ifp->in_cons) {
1340 		XENCONS_RING_IDX cons;
1341 
1342 		cons = ifp->in_cons;
1343 		membar_enter();
1344 		xcp->pollbuf[0] = ifp->in[MASK_XENCONS_IDX(cons++, ifp->in)];
1345 		membar_producer();
1346 		ifp->in_cons = cons;
1347 		xcp->polllen = 1;
1348 	}
1349 	return (xcp->polllen != 0);
1350 }
1351 
1352 /*
1353  * Get a character. Run in polled mode, no interrupts.
1354  */
1355 static int
1356 xenconsgetchar(cons_polledio_arg_t arg)
1357 {
1358 	struct xencons *xcp = (struct xencons *)arg;
1359 
1360 	ec_wait_on_evtchn(xcp->evtchn, (int (*)(void *))xenconsischar, arg);
1361 
1362 	return (xcp->pollbuf[xcp->polldix++]);
1363 }
1364 
1365 static void
1366 xenconserror(int level, const char *fmt, ...)
1367 {
1368 	va_list adx;
1369 	static time_t	last;
1370 	static const char *lastfmt;
1371 	time_t now;
1372 
1373 	/*
1374 	 * Don't print the same error message too often.
1375 	 * Print the message only if we have not printed the
1376 	 * message within the last second.
1377 	 * Note: that fmt cannot be a pointer to a string
1378 	 * stored on the stack. The fmt pointer
1379 	 * must be in the data segment otherwise lastfmt would point
1380 	 * to non-sense.
1381 	 */
1382 	now = gethrestime_sec();
1383 	if (last == now && lastfmt == fmt)
1384 		return;
1385 
1386 	last = now;
1387 	lastfmt = fmt;
1388 
1389 	va_start(adx, fmt);
1390 	vcmn_err(level, fmt, adx);
1391 	va_end(adx);
1392 }
1393 
1394 
1395 /*
1396  * Check for abort character sequence
1397  */
1398 static boolean_t
1399 abort_charseq_recognize(uchar_t ch)
1400 {
1401 	static int state = 0;
1402 #define	CNTRL(c) ((c)&037)
1403 	static char sequence[] = { '\r', '~', CNTRL('b') };
1404 
1405 	if (ch == sequence[state]) {
1406 		if (++state >= sizeof (sequence)) {
1407 			state = 0;
1408 			return (B_TRUE);
1409 		}
1410 	} else {
1411 		state = (ch == sequence[0]) ? 1 : 0;
1412 	}
1413 	return (B_FALSE);
1414 }
1415 
1416 /*
1417  * Flow control functions
1418  */
1419 
1420 /*
1421  * Software output flow control
1422  * This function can be executed sucessfully at any situation.
1423  * It does not handle HW, and just change the SW output flow control flag.
1424  * INPUT VALUE of onoff:
1425  *                 FLOW_START means to clear SW output flow control flag,
1426  *			also set ASYNC_OUT_FLW_RESUME.
1427  *                 FLOW_STOP means to set SW output flow control flag,
1428  *			also clear ASYNC_OUT_FLW_RESUME.
1429  */
1430 static void
1431 xcasync_flowcontrol_sw_output(struct xencons *xcp, async_flowc_action onoff)
1432 {
1433 	struct asyncline *async = xcp->priv;
1434 	int instance = xcp->unit;
1435 
1436 	ASSERT(mutex_owned(&xcp->excl));
1437 
1438 	if (!(async->async_ttycommon.t_iflag & IXON))
1439 		return;
1440 
1441 	switch (onoff) {
1442 	case FLOW_STOP:
1443 		async->async_flags |= ASYNC_SW_OUT_FLW;
1444 		async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
1445 		DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1446 		    "xencons%d: output sflow stop\n", instance);
1447 		break;
1448 	case FLOW_START:
1449 		async->async_flags &= ~ASYNC_SW_OUT_FLW;
1450 		async->async_flags |= ASYNC_OUT_FLW_RESUME;
1451 		DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1452 		    "xencons%d: output sflow start\n", instance);
1453 		break;
1454 	default:
1455 		break;
1456 	}
1457 }
1458 
1459 /*
1460  * Software input flow control
1461  * This function can execute software input flow control
1462  * INPUT VALUE of onoff:
1463  *               FLOW_START means to send out a XON char
1464  *                          and clear SW input flow control flag.
1465  *               FLOW_STOP means to send out a XOFF char
1466  *                          and set SW input flow control flag.
1467  *               FLOW_CHECK means to check whether there is pending XON/XOFF
1468  *                          if it is true, send it out.
1469  * INPUT VALUE of type:
1470  *		 IN_FLOW_STREAMS means flow control is due to STREAMS
1471  *		 IN_FLOW_USER means flow control is due to user's commands
1472  * RETURN VALUE: B_FALSE means no flow control char is sent
1473  *               B_TRUE means one flow control char is sent
1474  */
1475 static boolean_t
1476 xcasync_flowcontrol_sw_input(struct xencons *xcp, async_flowc_action onoff,
1477     int type)
1478 {
1479 	struct asyncline *async = xcp->priv;
1480 	int instance = xcp->unit;
1481 	int rval = B_FALSE;
1482 
1483 	ASSERT(mutex_owned(&xcp->excl));
1484 
1485 	if (!(async->async_ttycommon.t_iflag & IXOFF))
1486 		return (rval);
1487 
1488 	/*
1489 	 * If we get this far, then we know IXOFF is set.
1490 	 */
1491 	switch (onoff) {
1492 	case FLOW_STOP:
1493 		async->async_inflow_source |= type;
1494 
1495 		/*
1496 		 * We'll send an XOFF character for each of up to
1497 		 * three different input flow control attempts to stop input.
1498 		 * If we already send out one XOFF, but FLOW_STOP comes again,
1499 		 * it seems that input flow control becomes more serious,
1500 		 * then send XOFF again.
1501 		 */
1502 		if (async->async_inflow_source & (IN_FLOW_STREAMS |
1503 		    IN_FLOW_USER))
1504 			async->async_flags |= ASYNC_SW_IN_FLOW |
1505 			    ASYNC_SW_IN_NEEDED;
1506 		DEBUGCONT2(XENCONS_DEBUG_SFLOW, "xencons%d: input sflow stop, "
1507 		    "type = %x\n", instance, async->async_inflow_source);
1508 		break;
1509 	case FLOW_START:
1510 		async->async_inflow_source &= ~type;
1511 		if (async->async_inflow_source == 0) {
1512 			async->async_flags = (async->async_flags &
1513 			    ~ASYNC_SW_IN_FLOW) | ASYNC_SW_IN_NEEDED;
1514 			DEBUGCONT1(XENCONS_DEBUG_SFLOW, "xencons%d: "
1515 			    "input sflow start\n", instance);
1516 		}
1517 		break;
1518 	default:
1519 		break;
1520 	}
1521 
1522 	if (async->async_flags & ASYNC_SW_IN_NEEDED) {
1523 		/*
1524 		 * If we get this far, then we know we need to send out
1525 		 * XON or XOFF char.
1526 		 */
1527 		char c;
1528 
1529 		rval = B_TRUE;
1530 		c = (async->async_flags & ASYNC_SW_IN_FLOW) ?
1531 		    async->async_stopc : async->async_startc;
1532 		if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1533 			(void) HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
1534 			async->async_flags &= ~ASYNC_SW_IN_NEEDED;
1535 			return (rval);
1536 		} else {
1537 			xenconsputchar(NULL, c);
1538 		}
1539 	}
1540 	return (rval);
1541 }
1542 
1543 struct module_info xencons_info = {
1544 	0,
1545 	"xencons",
1546 	0,
1547 	INFPSZ,
1548 	4096,
1549 	128
1550 };
1551 
1552 static struct qinit xencons_rint = {
1553 	putq,
1554 	xenconsrsrv,
1555 	xenconsopen,
1556 	xenconsclose,
1557 	NULL,
1558 	&xencons_info,
1559 	NULL
1560 };
1561 
1562 static struct qinit xencons_wint = {
1563 	xenconswput,
1564 	NULL,
1565 	NULL,
1566 	NULL,
1567 	NULL,
1568 	&xencons_info,
1569 	NULL
1570 };
1571 
1572 struct streamtab xencons_str_info = {
1573 	&xencons_rint,
1574 	&xencons_wint,
1575 	NULL,
1576 	NULL
1577 };
1578 
1579 static struct cb_ops cb_xencons_ops = {
1580 	nodev,			/* cb_open */
1581 	nodev,			/* cb_close */
1582 	nodev,			/* cb_strategy */
1583 	nodev,			/* cb_print */
1584 	nodev,			/* cb_dump */
1585 	nodev,			/* cb_read */
1586 	nodev,			/* cb_write */
1587 	nodev,			/* cb_ioctl */
1588 	nodev,			/* cb_devmap */
1589 	nodev,			/* cb_mmap */
1590 	nodev,			/* cb_segmap */
1591 	nochpoll,		/* cb_chpoll */
1592 	ddi_prop_op,		/* cb_prop_op */
1593 	&xencons_str_info,		/* cb_stream */
1594 	D_MP			/* cb_flag */
1595 };
1596 
1597 struct dev_ops xencons_ops = {
1598 	DEVO_REV,		/* devo_rev */
1599 	0,			/* devo_refcnt */
1600 	xenconsinfo,		/* devo_getinfo */
1601 	nulldev,		/* devo_identify */
1602 	nulldev,		/* devo_probe */
1603 	xenconsattach,		/* devo_attach */
1604 	xenconsdetach,		/* devo_detach */
1605 	nodev,			/* devo_reset */
1606 	&cb_xencons_ops,		/* devo_cb_ops */
1607 };
1608 
1609 static struct modldrv modldrv = {
1610 	&mod_driverops, /* Type of module.  This one is a driver */
1611 	"virtual console driver %I%",
1612 	&xencons_ops,	/* driver ops */
1613 };
1614 
1615 static struct modlinkage modlinkage = {
1616 	MODREV_1,
1617 	(void *)&modldrv,
1618 	NULL
1619 };
1620 
1621 int
1622 _init(void)
1623 {
1624 	int rv;
1625 
1626 	if ((rv = ddi_soft_state_init(&xencons_soft_state,
1627 	    sizeof (struct xencons), 1)) != 0)
1628 		return (rv);
1629 	if ((rv = mod_install(&modlinkage)) != 0) {
1630 		ddi_soft_state_fini(&xencons_soft_state);
1631 		return (rv);
1632 	}
1633 	DEBUGCONT2(XENCONS_DEBUG_INIT, "%s, debug = %x\n",
1634 	    modldrv.drv_linkinfo, debug);
1635 	return (0);
1636 }
1637 
1638 int
1639 _fini(void)
1640 {
1641 	int rv;
1642 
1643 	if ((rv = mod_remove(&modlinkage)) != 0)
1644 		return (rv);
1645 
1646 	ddi_soft_state_fini(&xencons_soft_state);
1647 	return (0);
1648 }
1649 
1650 int
1651 _info(struct modinfo *modinfop)
1652 {
1653 	return (mod_info(&modlinkage, modinfop));
1654 }
1655