1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
23 /* Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T */
24 /* All Rights Reserved */
25
26 /*
27 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
29 */
30
31
32 /*
33 *
34 * Copyright (c) 2004 Christian Limpach.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. This section intentionally left blank.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 */
60 /*
61 * Section 3 of the above license was updated in response to bug 6379571.
62 */
63
64 /*
65 * Hypervisor virtual console driver
66 */
67
68 #include <sys/param.h>
69 #include <sys/types.h>
70 #include <sys/signal.h>
71 #include <sys/stream.h>
72 #include <sys/termio.h>
73 #include <sys/errno.h>
74 #include <sys/file.h>
75 #include <sys/cmn_err.h>
76 #include <sys/stropts.h>
77 #include <sys/strsubr.h>
78 #include <sys/strtty.h>
79 #include <sys/debug.h>
80 #include <sys/kbio.h>
81 #include <sys/cred.h>
82 #include <sys/stat.h>
83 #include <sys/consdev.h>
84 #include <sys/mkdev.h>
85 #include <sys/kmem.h>
86 #include <sys/cred.h>
87 #include <sys/strsun.h>
88 #ifdef DEBUG
89 #include <sys/promif.h>
90 #endif
91 #include <sys/modctl.h>
92 #include <sys/ddi.h>
93 #include <sys/sunddi.h>
94 #include <sys/sunndi.h>
95 #include <sys/policy.h>
96 #include <sys/atomic.h>
97 #include <sys/psm.h>
98 #include <xen/public/io/console.h>
99
100 #include "xencons.h"
101
102 #include <sys/hypervisor.h>
103 #include <sys/evtchn_impl.h>
104 #include <xen/sys/xenbus_impl.h>
105 #include <xen/sys/xendev.h>
106
107 #ifdef DEBUG
108 #define XENCONS_DEBUG_INIT 0x0001 /* msgs during driver initialization. */
109 #define XENCONS_DEBUG_INPUT 0x0002 /* characters received during int. */
110 #define XENCONS_DEBUG_EOT 0x0004 /* msgs when wait for xmit to finish. */
111 #define XENCONS_DEBUG_CLOSE 0x0008 /* msgs when driver open/close called */
112 #define XENCONS_DEBUG_PROCS 0x0020 /* each proc name as it is entered. */
113 #define XENCONS_DEBUG_OUT 0x0100 /* msgs about output events. */
114 #define XENCONS_DEBUG_BUSY 0x0200 /* msgs when xmit is enabled/disabled */
115 #define XENCONS_DEBUG_MODEM 0x0400 /* msgs about modem status & control. */
116 #define XENCONS_DEBUG_MODM2 0x0800 /* msgs about modem status & control. */
117 #define XENCONS_DEBUG_IOCTL 0x1000 /* Output msgs about ioctl messages. */
118 #define XENCONS_DEBUG_CHIP 0x2000 /* msgs about chip identification. */
119 #define XENCONS_DEBUG_SFLOW 0x4000 /* msgs when S/W flowcontrol active */
120 #define XENCONS_DEBUG(x) (debug & (x))
121 static int debug = 0;
122 #else
123 #define XENCONS_DEBUG(x) B_FALSE
124 #endif
125
126 #define XENCONS_WBUFSIZE 4096
127
128 static boolean_t abort_charseq_recognize(uchar_t);
129
130 /* The async interrupt entry points */
131 static void xcasync_ioctl(struct asyncline *, queue_t *, mblk_t *);
132 static void xcasync_reioctl(void *);
133 static void xcasync_start(struct asyncline *);
134 static void xenconsputchar(cons_polledio_arg_t, uchar_t);
135 static int xenconsgetchar(cons_polledio_arg_t);
136 static boolean_t xenconsischar(cons_polledio_arg_t);
137
138 static uint_t xenconsintr(caddr_t);
139 static uint_t xenconsintr_priv(caddr_t, caddr_t);
140 /*PRINTFLIKE2*/
141 static void xenconserror(int, const char *, ...) __KPRINTFLIKE(2);
142 static void xencons_soft_state_free(struct xencons *);
143 static boolean_t
144 xcasync_flowcontrol_sw_input(struct xencons *, async_flowc_action, int);
145 static void
146 xcasync_flowcontrol_sw_output(struct xencons *, async_flowc_action);
147
148 void *xencons_soft_state;
149 char *xencons_wbuf;
150 struct xencons *xencons_console;
151
152 static void
xenconssetup_avintr(struct xencons * xcp,int attach)153 xenconssetup_avintr(struct xencons *xcp, int attach)
154 {
155 /*
156 * On xen, CPU 0 always exists and can't be taken offline,
157 * so binding this thread to it should always succeed.
158 */
159 mutex_enter(&cpu_lock);
160 thread_affinity_set(curthread, 0);
161 mutex_exit(&cpu_lock);
162
163 if (attach) {
164 /* Setup our interrupt binding. */
165 (void) add_avintr(NULL, IPL_CONS, xenconsintr_priv,
166 "xencons", xcp->console_irq, (caddr_t)xcp, NULL, NULL,
167 xcp->dip);
168 } else {
169 /*
170 * Cleanup interrupt configuration. Note that the framework
171 * _should_ ensure that when rem_avintr() returns the interrupt
172 * service routine is not currently executing and that it won't
173 * be invoked again.
174 */
175 (void) rem_avintr(NULL, IPL_CONS, xenconsintr_priv,
176 xcp->console_irq);
177 }
178
179 /* Notify our caller that we're done. */
180 mutex_enter(&xcp->excl);
181 cv_signal(&xcp->excl_cv);
182 mutex_exit(&xcp->excl);
183
184 /* Clear our binding to CPU 0 */
185 thread_affinity_clear(curthread);
186
187 }
188
189 static void
xenconssetup_add_avintr(struct xencons * xcp)190 xenconssetup_add_avintr(struct xencons *xcp)
191 {
192 xenconssetup_avintr(xcp, B_TRUE);
193 }
194
195 static void
xenconssetup_rem_avintr(struct xencons * xcp)196 xenconssetup_rem_avintr(struct xencons *xcp)
197 {
198 xenconssetup_avintr(xcp, B_FALSE);
199 }
200
201 static int
xenconsdetach(dev_info_t * devi,ddi_detach_cmd_t cmd)202 xenconsdetach(dev_info_t *devi, ddi_detach_cmd_t cmd)
203 {
204 int instance;
205 struct xencons *xcp;
206
207 if (cmd != DDI_DETACH && cmd != DDI_SUSPEND)
208 return (DDI_FAILURE);
209
210 if (cmd == DDI_SUSPEND) {
211 ddi_remove_intr(devi, 0, NULL);
212 return (DDI_SUCCESS);
213 }
214
215 /*
216 * We should never try to detach the console driver on a domU
217 * because it should always be held open
218 */
219 ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
220 if (!DOMAIN_IS_INITDOMAIN(xen_info))
221 return (DDI_FAILURE);
222
223 instance = ddi_get_instance(devi); /* find out which unit */
224
225 xcp = ddi_get_soft_state(xencons_soft_state, instance);
226 if (xcp == NULL)
227 return (DDI_FAILURE);
228
229 /*
230 * Cleanup our interrupt bindings. For more info on why we
231 * do this in a seperate thread, see the comments for when we
232 * setup the interrupt bindings.
233 */
234 xencons_console = NULL;
235 mutex_enter(&xcp->excl);
236 (void) taskq_dispatch(system_taskq,
237 (void (*)(void *))xenconssetup_rem_avintr, xcp, TQ_SLEEP);
238 cv_wait(&xcp->excl_cv, &xcp->excl);
239 mutex_exit(&xcp->excl);
240
241 /* remove all minor device node(s) for this device */
242 ddi_remove_minor_node(devi, NULL);
243
244 /* free up state */
245 xencons_soft_state_free(xcp);
246 kmem_free(xencons_wbuf, XENCONS_WBUFSIZE);
247
248 DEBUGNOTE1(XENCONS_DEBUG_INIT, "xencons%d: shutdown complete",
249 instance);
250 return (DDI_SUCCESS);
251 }
252
253 static void
xenconssetup(struct xencons * xcp)254 xenconssetup(struct xencons *xcp)
255 {
256 xcp->ifp = (volatile struct xencons_interface *)HYPERVISOR_console_page;
257
258 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
259 xencons_wbuf = kmem_alloc(XENCONS_WBUFSIZE, KM_SLEEP);
260
261 /*
262 * Activate the xen console virq. Note that xen requires
263 * that VIRQs be bound to CPU 0 when first created.
264 */
265 xcp->console_irq = ec_bind_virq_to_irq(VIRQ_CONSOLE, 0);
266
267 /*
268 * Ok. This is kinda ugly. We want to register an
269 * interrupt handler for the xen console virq, but
270 * virq's are xen sepcific and currently the DDI doesn't
271 * support binding to them. So instead we need to use
272 * add_avintr(). So to make things more complicated,
273 * we already had to bind the xen console VIRQ to CPU 0,
274 * and add_avintr() needs to be invoked on the same CPU
275 * where the VIRQ is bound, in this case on CPU 0. We
276 * could just temporarily bind ourselves to CPU 0, but
277 * we don't want to do that since this attach thread
278 * could have been invoked in a user thread context,
279 * in which case this thread could already have some
280 * pre-existing cpu binding. So to avoid changing our
281 * cpu binding we're going to use a taskq thread that
282 * will bind to CPU 0 and register our interrupts
283 * handler for us.
284 */
285 mutex_enter(&xcp->excl);
286 (void) taskq_dispatch(system_taskq,
287 (void (*)(void *))xenconssetup_add_avintr, xcp, TQ_SLEEP);
288 cv_wait(&xcp->excl_cv, &xcp->excl);
289 mutex_exit(&xcp->excl);
290 } else {
291 (void) xvdi_alloc_evtchn(xcp->dip);
292 xcp->evtchn = xvdi_get_evtchn(xcp->dip);
293 (void) ddi_add_intr(xcp->dip, 0, NULL, NULL, xenconsintr,
294 (caddr_t)xcp);
295 }
296 }
297
298 static int
xenconsattach(dev_info_t * devi,ddi_attach_cmd_t cmd)299 xenconsattach(dev_info_t *devi, ddi_attach_cmd_t cmd)
300 {
301 int instance = ddi_get_instance(devi);
302 struct xencons *xcp;
303 int ret;
304
305 /* There can be only one. */
306 if (instance != 0)
307 return (DDI_FAILURE);
308
309 switch (cmd) {
310 case DDI_RESUME:
311 xcp = xencons_console;
312 xenconssetup(xcp);
313 return (DDI_SUCCESS);
314 case DDI_ATTACH:
315 break;
316 default:
317 return (DDI_FAILURE);
318 }
319
320 ret = ddi_soft_state_zalloc(xencons_soft_state, instance);
321 if (ret != DDI_SUCCESS)
322 return (DDI_FAILURE);
323 xcp = ddi_get_soft_state(xencons_soft_state, instance);
324 ASSERT(xcp != NULL); /* can't fail - we only just allocated it */
325
326 /*
327 * Set up the other components of the xencons structure for this port.
328 */
329 xcp->unit = instance;
330 xcp->dip = devi;
331
332 /* Fill in the polled I/O structure. */
333 xcp->polledio.cons_polledio_version = CONSPOLLEDIO_V0;
334 xcp->polledio.cons_polledio_argument = (cons_polledio_arg_t)xcp;
335 xcp->polledio.cons_polledio_putchar = xenconsputchar;
336 xcp->polledio.cons_polledio_getchar = xenconsgetchar;
337 xcp->polledio.cons_polledio_ischar = xenconsischar;
338 xcp->polledio.cons_polledio_enter = NULL;
339 xcp->polledio.cons_polledio_exit = NULL;
340
341 /*
342 * Initializes the asyncline structure which has TTY protocol-private
343 * data before enabling interrupts.
344 */
345 xcp->priv = kmem_zalloc(sizeof (struct asyncline), KM_SLEEP);
346 xcp->priv->async_common = xcp;
347 cv_init(&xcp->priv->async_flags_cv, NULL, CV_DRIVER, NULL);
348
349 /* Initialize mutexes before accessing the interface. */
350 mutex_init(&xcp->excl, NULL, MUTEX_DRIVER, NULL);
351 cv_init(&xcp->excl_cv, NULL, CV_DEFAULT, NULL);
352
353 /* create minor device node for this device */
354 ret = ddi_create_minor_node(devi, "xencons", S_IFCHR, instance,
355 DDI_NT_SERIAL, 0);
356 if (ret != DDI_SUCCESS) {
357 ddi_remove_minor_node(devi, NULL);
358 xencons_soft_state_free(xcp);
359 return (DDI_FAILURE);
360 }
361
362 ddi_report_dev(devi);
363 xencons_console = xcp;
364 xenconssetup(xcp);
365 DEBUGCONT1(XENCONS_DEBUG_INIT, "xencons%dattach: done\n", instance);
366 return (DDI_SUCCESS);
367 }
368
369 /*ARGSUSED*/
370 static int
xenconsinfo(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)371 xenconsinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg,
372 void **result)
373 {
374 dev_t dev = (dev_t)arg;
375 int instance, error;
376 struct xencons *xcp;
377
378 instance = getminor(dev);
379 xcp = ddi_get_soft_state(xencons_soft_state, instance);
380 if (xcp == NULL)
381 return (DDI_FAILURE);
382
383 switch (infocmd) {
384 case DDI_INFO_DEVT2DEVINFO:
385 if (xcp->dip == NULL)
386 error = DDI_FAILURE;
387 else {
388 *result = (void *) xcp->dip;
389 error = DDI_SUCCESS;
390 }
391 break;
392 case DDI_INFO_DEVT2INSTANCE:
393 *result = (void *)(intptr_t)instance;
394 error = DDI_SUCCESS;
395 break;
396 default:
397 error = DDI_FAILURE;
398 }
399 return (error);
400 }
401
402 /* xencons_soft_state_free - local wrapper for ddi_soft_state_free(9F) */
403
404 static void
xencons_soft_state_free(struct xencons * xcp)405 xencons_soft_state_free(struct xencons *xcp)
406 {
407 mutex_destroy(&xcp->excl);
408 cv_destroy(&xcp->excl_cv);
409 kmem_free(xcp->priv, sizeof (struct asyncline));
410 ddi_soft_state_free(xencons_soft_state, xcp->unit);
411 }
412
413 /*ARGSUSED*/
414 static int
xenconsopen(queue_t * rq,dev_t * dev,int flag,int sflag,cred_t * cr)415 xenconsopen(queue_t *rq, dev_t *dev, int flag, int sflag, cred_t *cr)
416 {
417 struct xencons *xcp;
418 struct asyncline *async;
419 int unit;
420
421 unit = getminor(*dev);
422 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dopen\n", unit);
423 xcp = ddi_get_soft_state(xencons_soft_state, unit);
424 if (xcp == NULL)
425 return (ENXIO); /* unit not configured */
426 async = xcp->priv;
427 mutex_enter(&xcp->excl);
428
429 again:
430
431 if ((async->async_flags & ASYNC_ISOPEN) == 0) {
432 async->async_ttycommon.t_iflag = 0;
433 async->async_ttycommon.t_iocpending = NULL;
434 async->async_ttycommon.t_size.ws_row = 0;
435 async->async_ttycommon.t_size.ws_col = 0;
436 async->async_ttycommon.t_size.ws_xpixel = 0;
437 async->async_ttycommon.t_size.ws_ypixel = 0;
438 async->async_dev = *dev;
439 async->async_wbufcid = 0;
440
441 async->async_startc = CSTART;
442 async->async_stopc = CSTOP;
443 } else if ((async->async_ttycommon.t_flags & TS_XCLUDE) &&
444 secpolicy_excl_open(cr) != 0) {
445 mutex_exit(&xcp->excl);
446 return (EBUSY);
447 }
448
449 async->async_ttycommon.t_flags |= TS_SOFTCAR;
450
451 async->async_ttycommon.t_readq = rq;
452 async->async_ttycommon.t_writeq = WR(rq);
453 rq->q_ptr = WR(rq)->q_ptr = (caddr_t)async;
454 mutex_exit(&xcp->excl);
455 /*
456 * Caution here -- qprocson sets the pointers that are used by canput
457 * called by xencons_rxint. ASYNC_ISOPEN must *not* be set until those
458 * pointers are valid.
459 */
460 qprocson(rq);
461 async->async_flags |= ASYNC_ISOPEN;
462 DEBUGCONT1(XENCONS_DEBUG_INIT, "asy%dopen: done\n", unit);
463 return (0);
464 }
465
466
467 /*
468 * Close routine.
469 */
470 /*ARGSUSED*/
471 static int
xenconsclose(queue_t * q,int flag,cred_t * credp)472 xenconsclose(queue_t *q, int flag, cred_t *credp)
473 {
474 struct asyncline *async;
475 struct xencons *xcp;
476 #ifdef DEBUG
477 int instance;
478 #endif
479
480 async = (struct asyncline *)q->q_ptr;
481 ASSERT(async != NULL);
482 xcp = async->async_common;
483 #ifdef DEBUG
484 instance = xcp->unit;
485 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose\n", instance);
486 #endif
487
488 mutex_enter(&xcp->excl);
489 async->async_flags |= ASYNC_CLOSING;
490
491 async->async_ocnt = 0;
492 if (async->async_xmitblk != NULL)
493 freeb(async->async_xmitblk);
494 async->async_xmitblk = NULL;
495
496 out:
497 ttycommon_close(&async->async_ttycommon);
498
499 /*
500 * Cancel outstanding "bufcall" request.
501 */
502 if (async->async_wbufcid != 0) {
503 unbufcall(async->async_wbufcid);
504 async->async_wbufcid = 0;
505 }
506
507 /* Note that qprocsoff can't be done until after interrupts are off */
508 qprocsoff(q);
509 q->q_ptr = WR(q)->q_ptr = NULL;
510 async->async_ttycommon.t_readq = NULL;
511 async->async_ttycommon.t_writeq = NULL;
512
513 /*
514 * Clear out device state, except persistant device property flags.
515 */
516 async->async_flags = 0;
517 cv_broadcast(&async->async_flags_cv);
518 mutex_exit(&xcp->excl);
519
520 DEBUGCONT1(XENCONS_DEBUG_CLOSE, "xencons%dclose: done\n", instance);
521 return (0);
522 }
523
524 #define INBUF_IX(ix, ifp) (DOMAIN_IS_INITDOMAIN(xen_info) ? \
525 (ix) : MASK_XENCONS_IDX((ix), (ifp)->in))
526
527 /*
528 * Handle a xen console rx interrupt.
529 */
530 /*ARGSUSED*/
531 static void
xencons_rxint(struct xencons * xcp)532 xencons_rxint(struct xencons *xcp)
533 {
534 struct asyncline *async;
535 short cc;
536 mblk_t *bp;
537 queue_t *q;
538 uchar_t c, buf[16];
539 uchar_t *cp;
540 tty_common_t *tp;
541 int instance;
542 volatile struct xencons_interface *ifp;
543 XENCONS_RING_IDX cons, prod;
544
545 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_rxint\n");
546
547 loop:
548 mutex_enter(&xcp->excl);
549
550 instance = xcp->unit;
551
552 /* sanity check if we should bail */
553 if (xencons_console == NULL) {
554 mutex_exit(&xcp->excl);
555 DEBUGCONT1(XENCONS_DEBUG_PROCS,
556 "xencons%d_rxint: xencons_console is NULL\n",
557 instance);
558 goto out;
559 }
560
561 async = xcp->priv;
562 ifp = xcp->ifp;
563 tp = &async->async_ttycommon;
564 q = tp->t_readq;
565
566 if (async->async_flags & ASYNC_OUT_FLW_RESUME) {
567 xcasync_start(async);
568 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
569 }
570
571 /*
572 * If data is available, send it up the stream if there's
573 * somebody listening.
574 */
575 if (!(async->async_flags & ASYNC_ISOPEN)) {
576 mutex_exit(&xcp->excl);
577 goto out;
578 }
579 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
580 cc = HYPERVISOR_console_io(CONSOLEIO_read, 16, (char *)buf);
581 cp = buf;
582 cons = 0;
583 } else {
584 cons = ifp->in_cons;
585 prod = ifp->in_prod;
586
587 cc = prod - cons;
588 cp = (uchar_t *)ifp->in;
589 }
590 if (cc <= 0) {
591 mutex_exit(&xcp->excl);
592 goto out;
593 }
594
595 /*
596 * Check for character break sequence.
597 *
598 * Note that normally asy drivers only check for a character sequence
599 * if abort_enable == KIOCABORTALTERNATE and otherwise use a break
600 * sensed on the line to do an abort_sequence_enter. Since the
601 * hypervisor does not use a real chip for the console we default to
602 * using the alternate sequence.
603 */
604 if ((abort_enable == KIOCABORTENABLE) && (xcp->flags & ASY_CONSOLE)) {
605 XENCONS_RING_IDX i;
606
607 for (i = 0; i < cc; i++) {
608 c = cp[INBUF_IX(cons + i, ifp)];
609 if (abort_charseq_recognize(c)) {
610 /*
611 * Eat abort seg, it's not a valid debugger
612 * command.
613 */
614 if (!DOMAIN_IS_INITDOMAIN(xen_info)) {
615 membar_producer();
616 ifp->in_cons = cons + i;
617 } else {
618 cons += i;
619 }
620 abort_sequence_enter((char *)NULL);
621 /*
622 * Back from debugger, resume normal processing
623 */
624 mutex_exit(&xcp->excl);
625 goto loop;
626 }
627 }
628 }
629
630 if (!canput(q)) {
631 if (!(async->async_inflow_source & IN_FLOW_STREAMS)) {
632 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
633 IN_FLOW_STREAMS);
634 }
635 mutex_exit(&xcp->excl);
636 goto out;
637 }
638 if (async->async_inflow_source & IN_FLOW_STREAMS) {
639 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
640 IN_FLOW_STREAMS);
641 }
642 DEBUGCONT2(XENCONS_DEBUG_INPUT,
643 "xencons%d_rxint: %d char(s) in queue.\n", instance, cc);
644 if (!(bp = allocb(cc, BPRI_MED))) {
645 mutex_exit(&xcp->excl);
646 ttycommon_qfull(&async->async_ttycommon, q);
647 goto out;
648 }
649 do {
650 c = cp[INBUF_IX(cons++, ifp)];
651 /*
652 * We handle XON/XOFF char if IXON is set,
653 * but if received char is _POSIX_VDISABLE,
654 * we left it to the up level module.
655 */
656 if (tp->t_iflag & IXON) {
657 if ((c == async->async_stopc) &&
658 (c != _POSIX_VDISABLE)) {
659 xcasync_flowcontrol_sw_output(xcp, FLOW_STOP);
660 continue;
661 } else if ((c == async->async_startc) &&
662 (c != _POSIX_VDISABLE)) {
663 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
664 continue;
665 }
666 if ((tp->t_iflag & IXANY) &&
667 (async->async_flags & ASYNC_SW_OUT_FLW)) {
668 xcasync_flowcontrol_sw_output(xcp, FLOW_START);
669 }
670 }
671 *bp->b_wptr++ = c;
672 } while (--cc);
673 membar_producer();
674 if (!DOMAIN_IS_INITDOMAIN(xen_info))
675 ifp->in_cons = cons;
676 mutex_exit(&xcp->excl);
677 if (bp->b_wptr > bp->b_rptr) {
678 if (!canput(q)) {
679 xenconserror(CE_NOTE, "xencons%d: local queue full",
680 instance);
681 freemsg(bp);
682 } else
683 (void) putq(q, bp);
684 } else
685 freemsg(bp);
686 if (DOMAIN_IS_INITDOMAIN(xen_info))
687 goto loop;
688 out:
689 DEBUGCONT1(XENCONS_DEBUG_PROCS, "xencons%d_rxint: done\n", instance);
690 if (!DOMAIN_IS_INITDOMAIN(xen_info))
691 ec_notify_via_evtchn(xcp->evtchn);
692 }
693
694
695 /*
696 * Handle a xen console tx interrupt.
697 */
698 /*ARGSUSED*/
699 static void
xencons_txint(struct xencons * xcp)700 xencons_txint(struct xencons *xcp)
701 {
702 struct asyncline *async;
703
704 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint\n");
705
706 /*
707 * prevent recursive entry
708 */
709 if (mutex_owner(&xcp->excl) == curthread) {
710 goto out;
711 }
712
713 mutex_enter(&xcp->excl);
714 if (xencons_console == NULL) {
715 mutex_exit(&xcp->excl);
716 goto out;
717 }
718
719 /* make sure the device is open */
720 async = xcp->priv;
721 if ((async->async_flags & ASYNC_ISOPEN) != 0)
722 xcasync_start(async);
723
724 mutex_exit(&xcp->excl);
725 out:
726 DEBUGCONT0(XENCONS_DEBUG_PROCS, "xencons_txint: done\n");
727 }
728
729
730 /*
731 * Get an event when input ring becomes not empty or output ring becomes not
732 * full.
733 */
734 static uint_t
xenconsintr(caddr_t arg)735 xenconsintr(caddr_t arg)
736 {
737 struct xencons *xcp = (struct xencons *)arg;
738 volatile struct xencons_interface *ifp = xcp->ifp;
739
740 if (ifp->in_prod != ifp->in_cons)
741 xencons_rxint(xcp);
742 if (ifp->out_prod - ifp->out_cons < sizeof (ifp->out))
743 xencons_txint(xcp);
744 return (DDI_INTR_CLAIMED);
745 }
746
747 /*
748 * Console interrupt routine for priviliged domains
749 */
750 static uint_t
xenconsintr_priv(caddr_t arg,caddr_t arg1 __unused)751 xenconsintr_priv(caddr_t arg, caddr_t arg1 __unused)
752 {
753 struct xencons *xcp = (struct xencons *)arg;
754
755 xencons_rxint(xcp);
756 xencons_txint(xcp);
757 return (DDI_INTR_CLAIMED);
758 }
759
760 /*
761 * Start output on a line, unless it's busy, frozen, or otherwise.
762 */
763 /*ARGSUSED*/
764 static void
xcasync_start(struct asyncline * async)765 xcasync_start(struct asyncline *async)
766 {
767 struct xencons *xcp = async->async_common;
768 int cc;
769 queue_t *q;
770 mblk_t *bp;
771 int len, space, blen;
772 mblk_t *nbp;
773
774 #ifdef DEBUG
775 int instance = xcp->unit;
776
777 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_nstart\n", instance);
778 #endif
779 ASSERT(mutex_owned(&xcp->excl));
780
781 /*
782 * Check only pended sw input flow control.
783 */
784 domore:
785 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_CHECK, IN_FLOW_NULL);
786
787 if ((q = async->async_ttycommon.t_writeq) == NULL) {
788 return; /* not attached to a stream */
789 }
790
791 for (;;) {
792 if ((bp = getq(q)) == NULL)
793 return; /* no data to transmit */
794
795 /*
796 * We have a message block to work on.
797 * Check whether it's a break, a delay, or an ioctl (the latter
798 * occurs if the ioctl in question was waiting for the output
799 * to drain). If it's one of those, process it immediately.
800 */
801 switch (bp->b_datap->db_type) {
802
803 case M_IOCTL:
804 /*
805 * This ioctl was waiting for the output ahead of
806 * it to drain; obviously, it has. Do it, and
807 * then grab the next message after it.
808 */
809 mutex_exit(&xcp->excl);
810 xcasync_ioctl(async, q, bp);
811 mutex_enter(&xcp->excl);
812 continue;
813 }
814
815 while (bp != NULL && (cc = bp->b_wptr - bp->b_rptr) == 0) {
816 nbp = bp->b_cont;
817 freeb(bp);
818 bp = nbp;
819 }
820 if (bp != NULL)
821 break;
822 }
823
824 /*
825 * We have data to transmit. If output is stopped, put
826 * it back and try again later.
827 */
828 if (async->async_flags & (ASYNC_SW_OUT_FLW | ASYNC_STOPPED)) {
829 (void) putbq(q, bp);
830 return;
831 }
832
833
834 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
835 len = 0;
836 space = XENCONS_WBUFSIZE;
837 while (bp != NULL && space) {
838 blen = bp->b_wptr - bp->b_rptr;
839 cc = min(blen, space);
840 bcopy(bp->b_rptr, &xencons_wbuf[len], cc);
841 bp->b_rptr += cc;
842 if (cc == blen) {
843 nbp = bp->b_cont;
844 freeb(bp);
845 bp = nbp;
846 }
847 space -= cc;
848 len += cc;
849 }
850 mutex_exit(&xcp->excl);
851 (void) HYPERVISOR_console_io(CONSOLEIO_write, len,
852 xencons_wbuf);
853 mutex_enter(&xcp->excl);
854 if (bp != NULL)
855 (void) putbq(q, bp); /* not done with this msg yet */
856 /*
857 * There are no completion interrupts when using the
858 * HYPERVISOR_console_io call to write console data
859 * so we loop here till we have sent all the data to the
860 * hypervisor.
861 */
862 goto domore;
863 } else {
864 volatile struct xencons_interface *ifp = xcp->ifp;
865 XENCONS_RING_IDX cons, prod;
866
867 cons = ifp->out_cons;
868 prod = ifp->out_prod;
869 membar_enter();
870 while (bp != NULL && ((prod - cons) < sizeof (ifp->out))) {
871 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] =
872 *bp->b_rptr++;
873 if (bp->b_rptr == bp->b_wptr) {
874 nbp = bp->b_cont;
875 freeb(bp);
876 bp = nbp;
877 }
878 }
879 membar_producer();
880 ifp->out_prod = prod;
881 ec_notify_via_evtchn(xcp->evtchn);
882 if (bp != NULL)
883 (void) putbq(q, bp); /* not done with this msg yet */
884 }
885 }
886
887
888 /*
889 * Process an "ioctl" message sent down to us.
890 * Note that we don't need to get any locks until we are ready to access
891 * the hardware. Nothing we access until then is going to be altered
892 * outside of the STREAMS framework, so we should be safe.
893 */
894 static void
xcasync_ioctl(struct asyncline * async,queue_t * wq,mblk_t * mp)895 xcasync_ioctl(struct asyncline *async, queue_t *wq, mblk_t *mp)
896 {
897 struct xencons *xcp = async->async_common;
898 tty_common_t *tp = &async->async_ttycommon;
899 struct iocblk *iocp;
900 unsigned datasize;
901 int error = 0;
902
903 #ifdef DEBUG
904 int instance = xcp->unit;
905
906 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl\n", instance);
907 #endif
908
909 if (tp->t_iocpending != NULL) {
910 /*
911 * We were holding an "ioctl" response pending the
912 * availability of an "mblk" to hold data to be passed up;
913 * another "ioctl" came through, which means that "ioctl"
914 * must have timed out or been aborted.
915 */
916 freemsg(async->async_ttycommon.t_iocpending);
917 async->async_ttycommon.t_iocpending = NULL;
918 }
919
920 iocp = (struct iocblk *)mp->b_rptr;
921
922 /*
923 * For TIOCMGET and the PPS ioctls, do NOT call ttycommon_ioctl()
924 * because this function frees up the message block (mp->b_cont) that
925 * contains the user location where we pass back the results.
926 *
927 * Similarly, CONSOPENPOLLEDIO needs ioc_count, which ttycommon_ioctl
928 * zaps. We know that ttycommon_ioctl doesn't know any CONS*
929 * ioctls, so keep the others safe too.
930 */
931 DEBUGCONT2(XENCONS_DEBUG_IOCTL, "async%d_ioctl: %s\n",
932 instance,
933 iocp->ioc_cmd == TIOCMGET ? "TIOCMGET" :
934 iocp->ioc_cmd == TIOCMSET ? "TIOCMSET" :
935 iocp->ioc_cmd == TIOCMBIS ? "TIOCMBIS" :
936 iocp->ioc_cmd == TIOCMBIC ? "TIOCMBIC" : "other");
937
938 switch (iocp->ioc_cmd) {
939 case TIOCMGET:
940 case TIOCGPPS:
941 case TIOCSPPS:
942 case TIOCGPPSEV:
943 case CONSOPENPOLLEDIO:
944 case CONSCLOSEPOLLEDIO:
945 case CONSSETABORTENABLE:
946 case CONSGETABORTENABLE:
947 error = -1; /* Do Nothing */
948 break;
949 default:
950
951 /*
952 * The only way in which "ttycommon_ioctl" can fail is if the
953 * "ioctl" requires a response containing data to be returned
954 * to the user, and no mblk could be allocated for the data.
955 * No such "ioctl" alters our state. Thus, we always go ahead
956 * and do any state-changes the "ioctl" calls for. If we
957 * couldn't allocate the data, "ttycommon_ioctl" has stashed
958 * the "ioctl" away safely, so we just call "bufcall" to
959 * request that we be called back when we stand a better
960 * chance of allocating the data.
961 */
962 if ((datasize = ttycommon_ioctl(tp, wq, mp, &error)) != 0) {
963 if (async->async_wbufcid)
964 unbufcall(async->async_wbufcid);
965 async->async_wbufcid = bufcall(datasize, BPRI_HI,
966 (void (*)(void *)) xcasync_reioctl,
967 (void *)(intptr_t)async->async_common->unit);
968 return;
969 }
970 }
971
972 mutex_enter(&xcp->excl);
973
974 if (error == 0) {
975 /*
976 * "ttycommon_ioctl" did most of the work; we just use the
977 * data it set up.
978 */
979 switch (iocp->ioc_cmd) {
980
981 case TCSETS:
982 case TCSETSF:
983 case TCSETSW:
984 case TCSETA:
985 case TCSETAW:
986 case TCSETAF:
987 break;
988 }
989 } else if (error < 0) {
990 /*
991 * "ttycommon_ioctl" didn't do anything; we process it here.
992 */
993 error = 0;
994 switch (iocp->ioc_cmd) {
995
996 case TCSBRK:
997 error = miocpullup(mp, sizeof (int));
998 break;
999
1000 case TIOCSBRK:
1001 mioc2ack(mp, NULL, 0, 0);
1002 break;
1003
1004 case TIOCCBRK:
1005 mioc2ack(mp, NULL, 0, 0);
1006 break;
1007
1008 case CONSOPENPOLLEDIO:
1009 error = miocpullup(mp, sizeof (cons_polledio_arg_t));
1010 if (error != 0)
1011 break;
1012
1013 *(cons_polledio_arg_t *)mp->b_cont->b_rptr =
1014 (cons_polledio_arg_t)&xcp->polledio;
1015
1016 mp->b_datap->db_type = M_IOCACK;
1017 break;
1018
1019 case CONSCLOSEPOLLEDIO:
1020 mp->b_datap->db_type = M_IOCACK;
1021 iocp->ioc_error = 0;
1022 iocp->ioc_rval = 0;
1023 break;
1024
1025 case CONSSETABORTENABLE:
1026 error = secpolicy_console(iocp->ioc_cr);
1027 if (error != 0)
1028 break;
1029
1030 if (iocp->ioc_count != TRANSPARENT) {
1031 error = EINVAL;
1032 break;
1033 }
1034
1035 if (*(intptr_t *)mp->b_cont->b_rptr)
1036 xcp->flags |= ASY_CONSOLE;
1037 else
1038 xcp->flags &= ~ASY_CONSOLE;
1039
1040 mp->b_datap->db_type = M_IOCACK;
1041 iocp->ioc_error = 0;
1042 iocp->ioc_rval = 0;
1043 break;
1044
1045 case CONSGETABORTENABLE:
1046 /*CONSTANTCONDITION*/
1047 ASSERT(sizeof (boolean_t) <= sizeof (boolean_t *));
1048 /*
1049 * Store the return value right in the payload
1050 * we were passed. Crude.
1051 */
1052 mcopyout(mp, NULL, sizeof (boolean_t), NULL, NULL);
1053 *(boolean_t *)mp->b_cont->b_rptr =
1054 (xcp->flags & ASY_CONSOLE) != 0;
1055 break;
1056
1057 default:
1058 /*
1059 * If we don't understand it, it's an error. NAK it.
1060 */
1061 error = EINVAL;
1062 break;
1063 }
1064 }
1065 if (error != 0) {
1066 iocp->ioc_error = error;
1067 mp->b_datap->db_type = M_IOCNAK;
1068 }
1069 mutex_exit(&xcp->excl);
1070 qreply(wq, mp);
1071 DEBUGCONT1(XENCONS_DEBUG_PROCS, "async%d_ioctl: done\n", instance);
1072 }
1073
1074 static int
xenconsrsrv(queue_t * q)1075 xenconsrsrv(queue_t *q)
1076 {
1077 mblk_t *bp;
1078
1079 while (canputnext(q) && (bp = getq(q)))
1080 putnext(q, bp);
1081 return (0);
1082 }
1083
1084 /*
1085 * Put procedure for write queue.
1086 * Respond to M_STOP, M_START, M_IOCTL, and M_FLUSH messages here;
1087 * set the flow control character for M_STOPI and M_STARTI messages;
1088 * queue up M_BREAK, M_DELAY, and M_DATA messages for processing
1089 * by the start routine, and then call the start routine; discard
1090 * everything else. Note that this driver does not incorporate any
1091 * mechanism to negotiate to handle the canonicalization process.
1092 * It expects that these functions are handled in upper module(s),
1093 * as we do in ldterm.
1094 */
1095 static int
xenconswput(queue_t * q,mblk_t * mp)1096 xenconswput(queue_t *q, mblk_t *mp)
1097 {
1098 struct asyncline *async;
1099 struct xencons *xcp;
1100
1101 async = (struct asyncline *)q->q_ptr;
1102 xcp = async->async_common;
1103
1104 switch (mp->b_datap->db_type) {
1105
1106 case M_STOP:
1107 mutex_enter(&xcp->excl);
1108 async->async_flags |= ASYNC_STOPPED;
1109 mutex_exit(&xcp->excl);
1110 freemsg(mp);
1111 break;
1112
1113 case M_START:
1114 mutex_enter(&xcp->excl);
1115 if (async->async_flags & ASYNC_STOPPED) {
1116 async->async_flags &= ~ASYNC_STOPPED;
1117 xcasync_start(async);
1118 }
1119 mutex_exit(&xcp->excl);
1120 freemsg(mp);
1121 break;
1122
1123 case M_IOCTL:
1124 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) {
1125
1126 case TCSETSW:
1127 case TCSETSF:
1128 case TCSETAW:
1129 case TCSETAF:
1130 /*
1131 * The changes do not take effect until all
1132 * output queued before them is drained.
1133 * Put this message on the queue, so that
1134 * "xcasync_start" will see it when it's done
1135 * with the output before it. Poke the
1136 * start routine, just in case.
1137 */
1138 (void) putq(q, mp);
1139 mutex_enter(&xcp->excl);
1140 xcasync_start(async);
1141 mutex_exit(&xcp->excl);
1142 break;
1143
1144 default:
1145 /*
1146 * Do it now.
1147 */
1148 xcasync_ioctl(async, q, mp);
1149 break;
1150 }
1151 break;
1152
1153 case M_FLUSH:
1154 if (*mp->b_rptr & FLUSHW) {
1155 mutex_enter(&xcp->excl);
1156 /*
1157 * Flush our write queue.
1158 */
1159 flushq(q, FLUSHDATA); /* XXX doesn't flush M_DELAY */
1160 if (async->async_xmitblk != NULL) {
1161 freeb(async->async_xmitblk);
1162 async->async_xmitblk = NULL;
1163 }
1164 mutex_exit(&xcp->excl);
1165 *mp->b_rptr &= ~FLUSHW; /* it has been flushed */
1166 }
1167 if (*mp->b_rptr & FLUSHR) {
1168 flushq(RD(q), FLUSHDATA);
1169 qreply(q, mp); /* give the read queues a crack at it */
1170 } else {
1171 freemsg(mp);
1172 }
1173
1174 /*
1175 * We must make sure we process messages that survive the
1176 * write-side flush.
1177 */
1178 mutex_enter(&xcp->excl);
1179 xcasync_start(async);
1180 mutex_exit(&xcp->excl);
1181 break;
1182
1183 case M_BREAK:
1184 case M_DELAY:
1185 case M_DATA:
1186 /*
1187 * Queue the message up to be transmitted,
1188 * and poke the start routine.
1189 */
1190 (void) putq(q, mp);
1191 mutex_enter(&xcp->excl);
1192 xcasync_start(async);
1193 mutex_exit(&xcp->excl);
1194 break;
1195
1196 case M_STOPI:
1197 mutex_enter(&xcp->excl);
1198 mutex_enter(&xcp->excl);
1199 if (!(async->async_inflow_source & IN_FLOW_USER)) {
1200 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_STOP,
1201 IN_FLOW_USER);
1202 }
1203 mutex_exit(&xcp->excl);
1204 mutex_exit(&xcp->excl);
1205 freemsg(mp);
1206 break;
1207
1208 case M_STARTI:
1209 mutex_enter(&xcp->excl);
1210 mutex_enter(&xcp->excl);
1211 if (async->async_inflow_source & IN_FLOW_USER) {
1212 (void) xcasync_flowcontrol_sw_input(xcp, FLOW_START,
1213 IN_FLOW_USER);
1214 }
1215 mutex_exit(&xcp->excl);
1216 mutex_exit(&xcp->excl);
1217 freemsg(mp);
1218 break;
1219
1220 case M_CTL:
1221 if (MBLKL(mp) >= sizeof (struct iocblk) &&
1222 ((struct iocblk *)mp->b_rptr)->ioc_cmd == MC_POSIXQUERY) {
1223 ((struct iocblk *)mp->b_rptr)->ioc_cmd = MC_HAS_POSIX;
1224 qreply(q, mp);
1225 } else {
1226 freemsg(mp);
1227 }
1228 break;
1229
1230 default:
1231 freemsg(mp);
1232 break;
1233 }
1234 return (0);
1235 }
1236
1237 /*
1238 * Retry an "ioctl", now that "bufcall" claims we may be able to allocate
1239 * the buffer we need.
1240 */
1241 static void
xcasync_reioctl(void * unit)1242 xcasync_reioctl(void *unit)
1243 {
1244 int instance = (uintptr_t)unit;
1245 struct asyncline *async;
1246 struct xencons *xcp;
1247 queue_t *q;
1248 mblk_t *mp;
1249
1250 xcp = ddi_get_soft_state(xencons_soft_state, instance);
1251 ASSERT(xcp != NULL);
1252 async = xcp->priv;
1253
1254 /*
1255 * The bufcall is no longer pending.
1256 */
1257 mutex_enter(&xcp->excl);
1258 async->async_wbufcid = 0;
1259 if ((q = async->async_ttycommon.t_writeq) == NULL) {
1260 mutex_exit(&xcp->excl);
1261 return;
1262 }
1263 if ((mp = async->async_ttycommon.t_iocpending) != NULL) {
1264 /* not pending any more */
1265 async->async_ttycommon.t_iocpending = NULL;
1266 mutex_exit(&xcp->excl);
1267 xcasync_ioctl(async, q, mp);
1268 } else
1269 mutex_exit(&xcp->excl);
1270 }
1271
1272
1273 /*
1274 * debugger/console support routines.
1275 */
1276
1277 /*
1278 * put a character out
1279 * Do not use interrupts. If char is LF, put out CR, LF.
1280 */
1281 /*ARGSUSED*/
1282 static void
xenconsputchar(cons_polledio_arg_t arg,uchar_t c)1283 xenconsputchar(cons_polledio_arg_t arg, uchar_t c)
1284 {
1285 struct xencons *xcp = xencons_console;
1286 volatile struct xencons_interface *ifp = xcp->ifp;
1287 XENCONS_RING_IDX prod;
1288
1289 if (c == '\n')
1290 xenconsputchar(arg, '\r');
1291
1292 /*
1293 * domain 0 can use the console I/O...
1294 */
1295 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1296 char buffer[1];
1297
1298 buffer[0] = c;
1299 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, buffer);
1300 return;
1301 }
1302
1303 /*
1304 * domU has to go through dom0 virtual console.
1305 */
1306 while (ifp->out_prod - ifp->out_cons == sizeof (ifp->out))
1307 (void) HYPERVISOR_yield();
1308
1309 prod = ifp->out_prod;
1310 ifp->out[MASK_XENCONS_IDX(prod++, ifp->out)] = c;
1311 membar_producer();
1312 ifp->out_prod = prod;
1313 ec_notify_via_evtchn(xcp->evtchn);
1314 }
1315
1316 /*
1317 * See if there's a character available. If no character is
1318 * available, return 0. Run in polled mode, no interrupts.
1319 */
1320 static boolean_t
xenconsischar(cons_polledio_arg_t arg)1321 xenconsischar(cons_polledio_arg_t arg)
1322 {
1323 struct xencons *xcp = (struct xencons *)arg;
1324 volatile struct xencons_interface *ifp = xcp->ifp;
1325
1326 if (xcp->polldix < xcp->polllen)
1327 return (B_TRUE);
1328 /*
1329 * domain 0 can use the console I/O...
1330 */
1331 xcp->polldix = 0;
1332 xcp->polllen = 0;
1333 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1334 xcp->polllen = HYPERVISOR_console_io(CONSOLEIO_read, 1,
1335 (char *)xcp->pollbuf);
1336 return (xcp->polllen != 0);
1337 }
1338
1339 /*
1340 * domU has to go through virtual console device.
1341 */
1342 if (ifp->in_prod != ifp->in_cons) {
1343 XENCONS_RING_IDX cons;
1344
1345 cons = ifp->in_cons;
1346 membar_enter();
1347 xcp->pollbuf[0] = ifp->in[MASK_XENCONS_IDX(cons++, ifp->in)];
1348 membar_producer();
1349 ifp->in_cons = cons;
1350 xcp->polllen = 1;
1351 }
1352 return (xcp->polllen != 0);
1353 }
1354
1355 /*
1356 * Get a character. Run in polled mode, no interrupts.
1357 */
1358 static int
xenconsgetchar(cons_polledio_arg_t arg)1359 xenconsgetchar(cons_polledio_arg_t arg)
1360 {
1361 struct xencons *xcp = (struct xencons *)arg;
1362
1363 ec_wait_on_evtchn(xcp->evtchn, (int (*)(void *))xenconsischar, arg);
1364
1365 return (xcp->pollbuf[xcp->polldix++]);
1366 }
1367
1368 static void
xenconserror(int level,const char * fmt,...)1369 xenconserror(int level, const char *fmt, ...)
1370 {
1371 va_list adx;
1372 static time_t last;
1373 static const char *lastfmt;
1374 time_t now;
1375
1376 /*
1377 * Don't print the same error message too often.
1378 * Print the message only if we have not printed the
1379 * message within the last second.
1380 * Note: that fmt cannot be a pointer to a string
1381 * stored on the stack. The fmt pointer
1382 * must be in the data segment otherwise lastfmt would point
1383 * to non-sense.
1384 */
1385 now = gethrestime_sec();
1386 if (last == now && lastfmt == fmt)
1387 return;
1388
1389 last = now;
1390 lastfmt = fmt;
1391
1392 va_start(adx, fmt);
1393 vcmn_err(level, fmt, adx);
1394 va_end(adx);
1395 }
1396
1397
1398 /*
1399 * Check for abort character sequence
1400 */
1401 static boolean_t
abort_charseq_recognize(uchar_t ch)1402 abort_charseq_recognize(uchar_t ch)
1403 {
1404 static int state = 0;
1405 #define CNTRL(c) ((c)&037)
1406 static char sequence[] = { '\r', '~', CNTRL('b') };
1407
1408 if (ch == sequence[state]) {
1409 if (++state >= sizeof (sequence)) {
1410 state = 0;
1411 return (B_TRUE);
1412 }
1413 } else {
1414 state = (ch == sequence[0]) ? 1 : 0;
1415 }
1416 return (B_FALSE);
1417 }
1418
1419 /*
1420 * Flow control functions
1421 */
1422
1423 /*
1424 * Software output flow control
1425 * This function can be executed sucessfully at any situation.
1426 * It does not handle HW, and just change the SW output flow control flag.
1427 * INPUT VALUE of onoff:
1428 * FLOW_START means to clear SW output flow control flag,
1429 * also set ASYNC_OUT_FLW_RESUME.
1430 * FLOW_STOP means to set SW output flow control flag,
1431 * also clear ASYNC_OUT_FLW_RESUME.
1432 */
1433 static void
xcasync_flowcontrol_sw_output(struct xencons * xcp,async_flowc_action onoff)1434 xcasync_flowcontrol_sw_output(struct xencons *xcp, async_flowc_action onoff)
1435 {
1436 struct asyncline *async = xcp->priv;
1437 int instance = xcp->unit;
1438
1439 ASSERT(mutex_owned(&xcp->excl));
1440
1441 if (!(async->async_ttycommon.t_iflag & IXON))
1442 return;
1443
1444 switch (onoff) {
1445 case FLOW_STOP:
1446 async->async_flags |= ASYNC_SW_OUT_FLW;
1447 async->async_flags &= ~ASYNC_OUT_FLW_RESUME;
1448 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1449 "xencons%d: output sflow stop\n", instance);
1450 break;
1451 case FLOW_START:
1452 async->async_flags &= ~ASYNC_SW_OUT_FLW;
1453 async->async_flags |= ASYNC_OUT_FLW_RESUME;
1454 DEBUGCONT1(XENCONS_DEBUG_SFLOW,
1455 "xencons%d: output sflow start\n", instance);
1456 break;
1457 default:
1458 break;
1459 }
1460 }
1461
1462 /*
1463 * Software input flow control
1464 * This function can execute software input flow control
1465 * INPUT VALUE of onoff:
1466 * FLOW_START means to send out a XON char
1467 * and clear SW input flow control flag.
1468 * FLOW_STOP means to send out a XOFF char
1469 * and set SW input flow control flag.
1470 * FLOW_CHECK means to check whether there is pending XON/XOFF
1471 * if it is true, send it out.
1472 * INPUT VALUE of type:
1473 * IN_FLOW_STREAMS means flow control is due to STREAMS
1474 * IN_FLOW_USER means flow control is due to user's commands
1475 * RETURN VALUE: B_FALSE means no flow control char is sent
1476 * B_TRUE means one flow control char is sent
1477 */
1478 static boolean_t
xcasync_flowcontrol_sw_input(struct xencons * xcp,async_flowc_action onoff,int type)1479 xcasync_flowcontrol_sw_input(struct xencons *xcp, async_flowc_action onoff,
1480 int type)
1481 {
1482 struct asyncline *async = xcp->priv;
1483 int instance = xcp->unit;
1484 int rval = B_FALSE;
1485
1486 ASSERT(mutex_owned(&xcp->excl));
1487
1488 if (!(async->async_ttycommon.t_iflag & IXOFF))
1489 return (rval);
1490
1491 /*
1492 * If we get this far, then we know IXOFF is set.
1493 */
1494 switch (onoff) {
1495 case FLOW_STOP:
1496 async->async_inflow_source |= type;
1497
1498 /*
1499 * We'll send an XOFF character for each of up to
1500 * three different input flow control attempts to stop input.
1501 * If we already send out one XOFF, but FLOW_STOP comes again,
1502 * it seems that input flow control becomes more serious,
1503 * then send XOFF again.
1504 */
1505 if (async->async_inflow_source & (IN_FLOW_STREAMS |
1506 IN_FLOW_USER))
1507 async->async_flags |= ASYNC_SW_IN_FLOW |
1508 ASYNC_SW_IN_NEEDED;
1509 DEBUGCONT2(XENCONS_DEBUG_SFLOW, "xencons%d: input sflow stop, "
1510 "type = %x\n", instance, async->async_inflow_source);
1511 break;
1512 case FLOW_START:
1513 async->async_inflow_source &= ~type;
1514 if (async->async_inflow_source == 0) {
1515 async->async_flags = (async->async_flags &
1516 ~ASYNC_SW_IN_FLOW) | ASYNC_SW_IN_NEEDED;
1517 DEBUGCONT1(XENCONS_DEBUG_SFLOW, "xencons%d: "
1518 "input sflow start\n", instance);
1519 }
1520 break;
1521 default:
1522 break;
1523 }
1524
1525 if (async->async_flags & ASYNC_SW_IN_NEEDED) {
1526 /*
1527 * If we get this far, then we know we need to send out
1528 * XON or XOFF char.
1529 */
1530 char c;
1531
1532 rval = B_TRUE;
1533 c = (async->async_flags & ASYNC_SW_IN_FLOW) ?
1534 async->async_stopc : async->async_startc;
1535 if (DOMAIN_IS_INITDOMAIN(xen_info)) {
1536 (void) HYPERVISOR_console_io(CONSOLEIO_write, 1, &c);
1537 async->async_flags &= ~ASYNC_SW_IN_NEEDED;
1538 return (rval);
1539 } else {
1540 xenconsputchar(NULL, c);
1541 }
1542 }
1543 return (rval);
1544 }
1545
1546 struct module_info xencons_info = {
1547 0,
1548 "xencons",
1549 0,
1550 INFPSZ,
1551 4096,
1552 128
1553 };
1554
1555 static struct qinit xencons_rint = {
1556 putq,
1557 xenconsrsrv,
1558 xenconsopen,
1559 xenconsclose,
1560 NULL,
1561 &xencons_info,
1562 NULL
1563 };
1564
1565 static struct qinit xencons_wint = {
1566 xenconswput,
1567 NULL,
1568 NULL,
1569 NULL,
1570 NULL,
1571 &xencons_info,
1572 NULL
1573 };
1574
1575 struct streamtab xencons_str_info = {
1576 &xencons_rint,
1577 &xencons_wint,
1578 NULL,
1579 NULL
1580 };
1581
1582 static struct cb_ops cb_xencons_ops = {
1583 nodev, /* cb_open */
1584 nodev, /* cb_close */
1585 nodev, /* cb_strategy */
1586 nodev, /* cb_print */
1587 nodev, /* cb_dump */
1588 nodev, /* cb_read */
1589 nodev, /* cb_write */
1590 nodev, /* cb_ioctl */
1591 nodev, /* cb_devmap */
1592 nodev, /* cb_mmap */
1593 nodev, /* cb_segmap */
1594 nochpoll, /* cb_chpoll */
1595 ddi_prop_op, /* cb_prop_op */
1596 &xencons_str_info, /* cb_stream */
1597 D_MP /* cb_flag */
1598 };
1599
1600 struct dev_ops xencons_ops = {
1601 DEVO_REV, /* devo_rev */
1602 0, /* devo_refcnt */
1603 xenconsinfo, /* devo_getinfo */
1604 nulldev, /* devo_identify */
1605 nulldev, /* devo_probe */
1606 xenconsattach, /* devo_attach */
1607 xenconsdetach, /* devo_detach */
1608 nodev, /* devo_reset */
1609 &cb_xencons_ops, /* devo_cb_ops */
1610 NULL, /* devo_bus_ops */
1611 NULL, /* devo_power */
1612 ddi_quiesce_not_needed, /* devo_quiesce */
1613 };
1614
1615 static struct modldrv modldrv = {
1616 &mod_driverops, /* Type of module. This one is a driver */
1617 "virtual console driver",
1618 &xencons_ops, /* driver ops */
1619 };
1620
1621 static struct modlinkage modlinkage = {
1622 MODREV_1,
1623 (void *)&modldrv,
1624 NULL
1625 };
1626
1627 int
_init(void)1628 _init(void)
1629 {
1630 int rv;
1631
1632 if ((rv = ddi_soft_state_init(&xencons_soft_state,
1633 sizeof (struct xencons), 1)) != 0)
1634 return (rv);
1635 if ((rv = mod_install(&modlinkage)) != 0) {
1636 ddi_soft_state_fini(&xencons_soft_state);
1637 return (rv);
1638 }
1639 DEBUGCONT2(XENCONS_DEBUG_INIT, "%s, debug = %x\n",
1640 modldrv.drv_linkinfo, debug);
1641 return (0);
1642 }
1643
1644 int
_fini(void)1645 _fini(void)
1646 {
1647 int rv;
1648
1649 if ((rv = mod_remove(&modlinkage)) != 0)
1650 return (rv);
1651
1652 ddi_soft_state_fini(&xencons_soft_state);
1653 return (0);
1654 }
1655
1656 int
_info(struct modinfo * modinfop)1657 _info(struct modinfo *modinfop)
1658 {
1659 return (mod_info(&modlinkage, modinfop));
1660 }
1661