xref: /titanic_41/usr/src/uts/sun4u/io/rmc_comm.c (revision 74e20cfe817b82802b16fac8690dadcda76f54f5)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * The "rmc_comm" driver provides access to the RMC so that its clients need
27  * not be concerned with the details of the access mechanism, which in this
28  * case is implemented via a packet-based protocol over a serial link via a
29  * 16550 compatible serial port.
30  */
31 
32 #pragma ident	"%Z%%M%	%I%	%E% SMI"
33 
34 /*
35  *  Header files
36  */
37 #include <sys/conf.h>
38 #include <sys/cyclic.h>
39 #include <sys/membar.h>
40 #include <sys/modctl.h>
41 #include <sys/strlog.h>
42 #include <sys/types.h>
43 #include <sys/sunddi.h>
44 #include <sys/ddi.h>
45 #include <sys/rmc_comm_dp_boot.h>
46 #include <sys/rmc_comm_dp.h>
47 #include <sys/rmc_comm_drvintf.h>
48 #include <sys/rmc_comm.h>
49 #include <sys/cpu_sgnblk_defs.h>
50 
51 /*
52  * Local definitions
53  */
54 
55 #define	ddi_driver_major(dip)	ddi_name_to_major(ddi_binding_name(dip))
56 
57 #define	MYNAME			"rmc_comm"
58 #define	NOMAJOR			(~(major_t)0)
59 #define	DUMMY_VALUE		(~(int8_t)0)
60 
61 /*
62  * Local data
63  */
64 static void *rmc_comm_statep;
65 static major_t rmc_comm_major = NOMAJOR;
66 static kmutex_t rmc_comm_attach_lock;
67 static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] =
68 {
69 	DDI_DEVICE_ATTR_V0,
70 	DDI_STRUCTURE_LE_ACC,
71 	DDI_STRICTORDER_ACC
72 };
73 static int watchdog_was_active;
74 extern int watchdog_activated;
75 extern int watchdog_enable;
76 
77 /*
78  * prototypes
79  */
80 
81 extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t);
82 static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t);
83 static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t);
84 static void sio_check_fault_status(struct rmc_comm_state *);
85 static boolean_t sio_data_ready(struct rmc_comm_state *);
86 static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t);
87 static uint_t rmc_comm_hi_intr(caddr_t);
88 static uint_t rmc_comm_softint(caddr_t);
89 static void rmc_comm_cyclic(void *);
90 static void rmc_comm_hw_reset(struct rmc_comm_state *);
91 static void rmc_comm_offline(struct rmc_comm_state *);
92 static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *);
93 static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int,
94     boolean_t, boolean_t, boolean_t);
95 static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t);
96 static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t);
97 
98 /*
99  * for client leaf drivers to register their desire for rmc_comm
100  * to stay attached
101  */
102 int
103 rmc_comm_register()
104 {
105 	struct rmc_comm_state *rcs;
106 
107 	mutex_enter(&rmc_comm_attach_lock);
108 	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
109 	if ((rcs == NULL) || (!rcs->is_attached)) {
110 		mutex_exit(&rmc_comm_attach_lock);
111 		return (DDI_FAILURE);
112 	}
113 	rcs->n_registrations++;
114 	mutex_exit(&rmc_comm_attach_lock);
115 	return (DDI_SUCCESS);
116 }
117 
118 void
119 rmc_comm_unregister()
120 {
121 	struct rmc_comm_state *rcs;
122 
123 	mutex_enter(&rmc_comm_attach_lock);
124 	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
125 	ASSERT(rcs != NULL);
126 	ASSERT(rcs->n_registrations != 0);
127 	rcs->n_registrations--;
128 	mutex_exit(&rmc_comm_attach_lock);
129 }
130 
131 /*
132  * to get the soft state structure of a specific instance
133  */
134 struct rmc_comm_state *
135 rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller)
136 {
137 	struct rmc_comm_state *rcs = NULL;
138 	dev_info_t *sdip = NULL;
139 	major_t dmaj = NOMAJOR;
140 
141 	if (dip != NULL) {
142 		/*
143 		 * Use the instance number from the <dip>; also,
144 		 * check that it really corresponds to this driver
145 		 */
146 		instance = ddi_get_instance(dip);
147 		dmaj = ddi_driver_major(dip);
148 		if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR)
149 			rmc_comm_major = dmaj;
150 		else if (dmaj != rmc_comm_major) {
151 			cmn_err(CE_WARN,
152 			    "%s: major number mismatch (%d vs. %d) in %s(),"
153 			    "probably due to child misconfiguration",
154 			    MYNAME, rmc_comm_major, dmaj, caller);
155 			instance = -1;
156 		}
157 	}
158 	if (instance >= 0)
159 		rcs = ddi_get_soft_state(rmc_comm_statep, instance);
160 	if (rcs != NULL) {
161 		sdip = rcs->dip;
162 		if (dip == NULL && sdip == NULL)
163 			rcs = NULL;
164 		else if (dip != NULL && sdip != NULL && sdip != dip) {
165 			cmn_err(CE_WARN,
166 			    "%s: devinfo mismatch (%p vs. %p) in %s(), "
167 			    "probably due to child misconfiguration", MYNAME,
168 			    (void *)dip, (void *)sdip, caller);
169 			rcs = NULL;
170 		}
171 	}
172 
173 	return (rcs);
174 }
175 
176 
177 /*
178  * Lowest-level serial I/O chip register read/write
179  */
180 static void
181 sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
182 {
183 	DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
184 
185 	if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
186 		/*
187 		 * The chip is mapped as "I/O" (e.g. with the side-effect
188 		 * bit on SPARC), therefore accesses are required to be
189 		 * in-order, with no value cacheing.  However, there can
190 		 * still be write-behind buffering, so it is not guaranteed
191 		 * that a write actually reaches the chip in a given time.
192 		 *
193 		 * To force the access right through to the chip, we follow
194 		 * the write with another write (to the SCRATCH register)
195 		 * and a read (of the value just written to the SCRATCH
196 		 * register).  The SCRATCH register is specifically provided
197 		 * for temporary data and has no effect on the SIO's own
198 		 * operation, making it ideal as a synchronising mechanism.
199 		 *
200 		 * If we didn't do this, it would be possible that the new
201 		 * value wouldn't reach the chip (and have the *intended*
202 		 * side-effects, such as disabling interrupts), for such a
203 		 * long time that the processor could execute a *lot* of
204 		 * instructions - including exiting the interrupt service
205 		 * routine and re-enabling interrupts.  This effect was
206 		 * observed to lead to spurious (unclaimed) interrupts in
207 		 * some circumstances.
208 		 *
209 		 * This will no longer be needed once "synchronous" access
210 		 * handles are available (see PSARC/2000/269 and 2000/531).
211 		 */
212 		ddi_put8(rcs->sd_state.sio_handle,
213 		    rcs->sd_state.sio_regs + reg, val);
214 		ddi_put8(rcs->sd_state.sio_handle,
215 		    rcs->sd_state.sio_regs + SIO_SCR, val);
216 		membar_sync();
217 		(void) ddi_get8(rcs->sd_state.sio_handle,
218 		    rcs->sd_state.sio_regs + SIO_SCR);
219 	}
220 }
221 
222 static uint8_t
223 sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
224 {
225 	uint8_t val;
226 
227 	if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
228 		val = ddi_get8(rcs->sd_state.sio_handle,
229 		    rcs->sd_state.sio_regs + reg);
230 	else
231 		val = DUMMY_VALUE;
232 	DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
233 	return (val);
234 }
235 
236 static void
237 sio_check_fault_status(struct rmc_comm_state *rcs)
238 {
239 	rcs->sd_state.sio_fault =
240 		ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
241 }
242 
243 boolean_t
244 rmc_comm_faulty(struct rmc_comm_state *rcs)
245 {
246 	if (!rcs->sd_state.sio_fault)
247 		sio_check_fault_status(rcs);
248 	return (rcs->sd_state.sio_fault);
249 }
250 
251 /*
252  * Check for data ready.
253  */
254 static boolean_t
255 sio_data_ready(struct rmc_comm_state *rcs)
256 {
257 	uint8_t status;
258 
259 	/*
260 	 * Data is available if the RXDA bit in the LSR is nonzero
261 	 * (if reading it didn't incur a fault).
262 	 */
263 	status = sio_get_reg(rcs, SIO_LSR);
264 	return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
265 }
266 
267 /*
268  * Enable/disable interrupts
269  */
270 static void
271 rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
272 {
273 	uint8_t val;
274 
275 	val = newstate ? SIO_IER_RXHDL_IE : 0;
276 	sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
277 	rcs->sd_state.hw_int_enabled = newstate;
278 }
279 
280 /*
281  * High-level interrupt handler:
282  *	Checks whether initialisation is complete (to avoid a race
283  *	with mutex_init()), and whether chip interrupts are enabled.
284  *	If not, the interrupt's not for us, so just return UNCLAIMED.
285  *	Otherwise, disable the interrupt, trigger a softint, and return
286  *	CLAIMED.  The softint handler will then do all the real work.
287  *
288  *	NOTE: the chip interrupt capability is only re-enabled once the
289  *	receive code has run, but that can be called from a poll loop
290  *	or cyclic callback as well as from the softint.  So it's *not*
291  *	guaranteed that there really is a chip interrupt pending here,
292  *	'cos the work may already have been done and the reason for the
293  *	interrupt gone away before we get here.
294  *
295  *	OTOH, if we come through here twice without the receive code
296  *	having run in between, that's definitely wrong.  In such an
297  *	event, we would notice that chip interrupts haven't yet been
298  *	re-enabled and return UNCLAIMED, allowing the system's jabber
299  *	protect code (if any) to do its job.
300  */
301 static uint_t
302 rmc_comm_hi_intr(caddr_t arg)
303 {
304 	struct rmc_comm_state *rcs = (void *)arg;
305 	uint_t claim;
306 
307 	claim = DDI_INTR_UNCLAIMED;
308 	if (rcs->sd_state.cycid != CYCLIC_NONE) {
309 		/*
310 		 * Handle the case where this interrupt fires during
311 		 * panic processing.  If that occurs, then a thread
312 		 * in rmc_comm might have been idled while holding
313 		 * hw_mutex.  If so, that thread will never make
314 		 * progress, and so we do not want to unconditionally
315 		 * grab hw_mutex.
316 		 */
317 		if (ddi_in_panic() != 0) {
318 			if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
319 				return (claim);
320 			}
321 		} else {
322 			mutex_enter(rcs->sd_state.hw_mutex);
323 		}
324 		if (rcs->sd_state.hw_int_enabled) {
325 			rmc_comm_set_irq(rcs, B_FALSE);
326 			ddi_trigger_softintr(rcs->sd_state.softid);
327 			claim = DDI_INTR_CLAIMED;
328 		}
329 		mutex_exit(rcs->sd_state.hw_mutex);
330 	}
331 	return (claim);
332 }
333 
334 /*
335  * Packet receive handler
336  *
337  * This routine should be called from the low-level softint, or the
338  * cyclic callback, or rmc_comm_cmd() (for polled operation), with the
339  * low-level mutex already held.
340  */
341 void
342 rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
343 {
344 	uint8_t data;
345 
346 	DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
347 
348 	/*
349 	 * Check for access faults before starting the receive
350 	 * loop (we don't want to cause bus errors or suchlike
351 	 * unpleasantness in the event that the SIO has died).
352 	 */
353 	if (!rmc_comm_faulty(rcs)) {
354 
355 		char *rx_buf = rcs->sd_state.serdev_rx_buf;
356 		uint16_t rx_buflen = 0;
357 
358 		/*
359 		 * Read bytes from the FIFO until they're all gone
360 		 * or our buffer overflows (which must be an error)
361 		 */
362 
363 		/*
364 		 * At the moment, the receive buffer is overwritten any
365 		 * time data is received from the serial device.
366 		 * This should not pose problems (probably!) as the data
367 		 * protocol is half-duplex
368 		 * Otherwise, a circular buffer must be implemented!
369 		 */
370 		mutex_enter(rcs->sd_state.hw_mutex);
371 		while (sio_data_ready(rcs)) {
372 			data = sio_get_reg(rcs, SIO_RXD);
373 			rx_buf[rx_buflen++] = data;
374 			if (rx_buflen >= SIO_MAX_RXBUF_SIZE)
375 				break;
376 		}
377 		rcs->sd_state.serdev_rx_count = rx_buflen;
378 
379 		DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
380 
381 		rmc_comm_set_irq(rcs, B_TRUE);
382 		mutex_exit(rcs->sd_state.hw_mutex);
383 
384 		/*
385 		 * call up the data protocol receive handler
386 		 */
387 		rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
388 	}
389 }
390 
391 /*
392  * Low-level softint handler
393  *
394  * This routine should be triggered whenever there's a byte to be read
395  */
396 static uint_t
397 rmc_comm_softint(caddr_t arg)
398 {
399 	struct rmc_comm_state *rcs = (void *)arg;
400 
401 	mutex_enter(rcs->dp_state.dp_mutex);
402 	rmc_comm_serdev_receive(rcs);
403 	mutex_exit(rcs->dp_state.dp_mutex);
404 	return (DDI_INTR_CLAIMED);
405 }
406 
407 /*
408  * Cyclic handler: just calls the receive routine, in case interrupts
409  * are not being delivered and in order to handle command timeout
410  */
411 static void
412 rmc_comm_cyclic(void *arg)
413 {
414 	struct rmc_comm_state *rcs = (void *)arg;
415 
416 	mutex_enter(rcs->dp_state.dp_mutex);
417 	rmc_comm_serdev_receive(rcs);
418 	mutex_exit(rcs->dp_state.dp_mutex);
419 }
420 
421 /*
422  * Serial protocol
423  *
424  * This routine builds a command and sets it in progress.
425  */
426 void
427 rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
428 {
429 	uint8_t *p;
430 	uint8_t status;
431 
432 	/*
433 	 * Check and update the SIO h/w fault status before accessing
434 	 * the chip registers.  If there's a (new or previous) fault,
435 	 * we'll run through the protocol but won't really touch the
436 	 * hardware and all commands will timeout.  If a previously
437 	 * discovered fault has now gone away (!), then we can (try to)
438 	 * proceed with the new command (probably a probe).
439 	 */
440 	sio_check_fault_status(rcs);
441 
442 	/*
443 	 * Send the command now by stuffing the packet into the Tx FIFO.
444 	 */
445 	DATASCOPE(rcs, 'S', buf, buflen)
446 
447 	mutex_enter(rcs->sd_state.hw_mutex);
448 	p = (uint8_t *)buf;
449 	while (p < (uint8_t *)&buf[buflen]) {
450 
451 		/*
452 		 * before writing to the TX holding register, we make sure that
453 		 * it is empty. In this case, there will be no chance to
454 		 * overflow the serial device FIFO (but, on the other hand,
455 		 * it may introduce some latency)
456 		 */
457 		status = sio_get_reg(rcs, SIO_LSR);
458 		while ((status & SIO_LSR_XHRE) == 0) {
459 			drv_usecwait(100);
460 			status = sio_get_reg(rcs, SIO_LSR);
461 		}
462 		sio_put_reg(rcs, SIO_TXD, *p++);
463 	}
464 	mutex_exit(rcs->sd_state.hw_mutex);
465 }
466 
467 /*
468  * wait for the tx fifo to drain - used for urgent nowait requests
469  */
470 void
471 rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
472 {
473 	uint8_t status;
474 
475 	mutex_enter(rcs->sd_state.hw_mutex);
476 	status = sio_get_reg(rcs, SIO_LSR);
477 	while ((status & SIO_LSR_XHRE) == 0) {
478 		drv_usecwait(100);
479 		status = sio_get_reg(rcs, SIO_LSR);
480 	}
481 	mutex_exit(rcs->sd_state.hw_mutex);
482 }
483 
484 /*
485  * Hardware setup - put the SIO chip in the required operational
486  * state,  with all our favourite parameters programmed correctly.
487  * This routine leaves all SIO interrupts disabled.
488  */
489 
490 static void
491 rmc_comm_hw_reset(struct rmc_comm_state *rcs)
492 {
493 	uint16_t divisor;
494 
495 	/*
496 	 * Disable interrupts, soft reset Tx and Rx circuitry,
497 	 * reselect standard modes (bits/char, parity, etc).
498 	 */
499 	rmc_comm_set_irq(rcs, B_FALSE);
500 	sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
501 	sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
502 
503 	/*
504 	 * Select the proper baud rate; if the value is invalid
505 	 * (presumably 0, i.e. not specified, but also if the
506 	 * "baud" property is set to some silly value), we assume
507 	 * the default.
508 	 */
509 	if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
510 		divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) *
511 		    rcs->baud_divisor_factor;
512 	} else {
513 		divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
514 		    rcs->baud_divisor_factor;
515 	}
516 
517 	/*
518 	 * According to the datasheet, it is forbidden for the divisor
519 	 * register to be zero.  So when loading the register in two
520 	 * steps, we have to make sure that the temporary value formed
521 	 * between loads is nonzero.  However, we can't rely on either
522 	 * half already having a nonzero value, as the datasheet also
523 	 * says that these registers are indeterminate after a reset!
524 	 * So, we explicitly set the low byte to a non-zero value first;
525 	 * then we can safely load the high byte, and then the correct
526 	 * value for the low byte, without the result ever being zero.
527 	 */
528 	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
529 	sio_put_reg(rcs, SIO_LBGDL, 0xff);
530 	sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
531 	sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
532 	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
533 
534 	/*
535 	 * Program the remaining device registers as required
536 	 */
537 	sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
538 	sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
539 }
540 
541 /*
542  * Higher-level setup & teardown
543  */
544 static void
545 rmc_comm_offline(struct rmc_comm_state *rcs)
546 {
547 	if (rcs->sd_state.sio_handle != NULL)
548 		ddi_regs_map_free(&rcs->sd_state.sio_handle);
549 	rcs->sd_state.sio_handle = NULL;
550 	rcs->sd_state.sio_regs = NULL;
551 }
552 
553 static int
554 rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
555 {
556 	ddi_acc_handle_t h;
557 	caddr_t p;
558 	int nregs;
559 	int err;
560 
561 	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS)
562 		nregs = 0;
563 	switch (nregs) {
564 	default:
565 	case 1:
566 		/*
567 		 *  regset 0 represents the SIO operating registers
568 		 */
569 		err = ddi_regs_map_setup(dip, 0, &p, 0, 0,
570 		    rmc_comm_dev_acc_attr, &h);
571 		if (err != DDI_SUCCESS)
572 			return (EIO);
573 		rcs->sd_state.sio_handle = h;
574 		rcs->sd_state.sio_regs = (void *)p;
575 		break;
576 	case 0:
577 		/*
578 		 *  If no registers are defined, succeed vacuously;
579 		 *  commands will be accepted, but we fake the accesses.
580 		 */
581 		break;
582 	}
583 
584 	/*
585 	 * Now that the registers are mapped, we can initialise the SIO h/w
586 	 */
587 	rmc_comm_hw_reset(rcs);
588 	return (0);
589 }
590 
591 
592 /*
593  * Initialization of the serial device (data structure, mutex, cv, hardware
594  * and so on). It is called from the attach routine.
595  */
596 
597 int
598 rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
599 {
600 	cyc_handler_t cychand;
601 	cyc_time_t cyctime;
602 	int err = DDI_SUCCESS;
603 
604 	rcs->sd_state.cycid = CYCLIC_NONE;
605 
606 	/*
607 	 *  Online the hardware ...
608 	 */
609 	err = rmc_comm_online(rcs, dip);
610 	if (err != 0)
611 		return (-1);
612 
613 	/*
614 	 * call ddi_get_soft_iblock_cookie() to retrieve the
615 	 * the interrupt block cookie so that the mutexes are initialized
616 	 * before adding the interrupt (to avoid a potential race condition).
617 	 */
618 
619 	err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
620 	    &rcs->dp_state.dp_iblk);
621 	if (err != DDI_SUCCESS)
622 		return (-1);
623 
624 	err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
625 	if (err != DDI_SUCCESS)
626 		return (-1);
627 
628 	/*
629 	 * initialize mutex here before adding hw/sw interrupt handlers
630 	 */
631 	mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
632 	    rcs->dp_state.dp_iblk);
633 
634 	mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
635 	    rcs->sd_state.hw_iblk);
636 
637 	/*
638 	 * Install soft and hard interrupt handler(s)
639 	 *
640 	 * the soft intr. handler will need the data protocol lock (dp_mutex)
641 	 * So, data protocol mutex and iblock cookie are created/initialized
642 	 * here
643 	 */
644 
645 	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
646 	    &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
647 	if (err != DDI_SUCCESS) {
648 		mutex_destroy(rcs->dp_state.dp_mutex);
649 		mutex_destroy(rcs->sd_state.hw_mutex);
650 		return (-1);
651 	}
652 
653 	/*
654 	 * hardware interrupt
655 	 */
656 
657 	if (rcs->sd_state.sio_handle != NULL) {
658 		err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
659 			rmc_comm_hi_intr, (caddr_t)rcs);
660 
661 		/*
662 		 * did we successfully install the h/w interrupt handler?
663 		 */
664 		if (err != DDI_SUCCESS) {
665 			ddi_remove_softintr(rcs->sd_state.softid);
666 			mutex_destroy(rcs->dp_state.dp_mutex);
667 			mutex_destroy(rcs->sd_state.hw_mutex);
668 			return (-1);
669 		}
670 	}
671 
672 
673 	/*
674 	 * Start cyclic callbacks
675 	 */
676 
677 	cychand.cyh_func = rmc_comm_cyclic;
678 	cychand.cyh_arg = rcs;
679 	cychand.cyh_level = CY_LOW_LEVEL;
680 	cyctime.cyt_when = 0;			/* from the next second	*/
681 	cyctime.cyt_interval = 5*RMC_COMM_ONE_SEC; /* call at 5s intervals */
682 	mutex_enter(&cpu_lock);
683 	rcs->sd_state.cycid = cyclic_add(&cychand, &cyctime);
684 	mutex_exit(&cpu_lock);
685 
686 	return (0);
687 }
688 
689 /*
690  * Termination of the serial device (data structure, mutex, cv, hardware
691  * and so on). It is called from the detach routine.
692  */
693 
694 void
695 rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
696 {
697 	rmc_comm_hw_reset(rcs);
698 
699 	if (rcs->sd_state.cycid != CYCLIC_NONE) {
700 		mutex_enter(&cpu_lock);
701 		cyclic_remove(rcs->sd_state.cycid);
702 		mutex_exit(&cpu_lock);
703 
704 		if (rcs->sd_state.sio_handle != NULL)
705 			ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
706 
707 		ddi_remove_softintr(rcs->sd_state.softid);
708 
709 		mutex_destroy(rcs->sd_state.hw_mutex);
710 
711 		mutex_destroy(rcs->dp_state.dp_mutex);
712 	}
713 	rmc_comm_offline(rcs);
714 }
715 
716 /*
717  * device driver entry routines (init/fini, attach/detach, ...)
718  */
719 
720 /*
721  *  Clean up on detach or failure of attach
722  */
723 static void
724 rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
725     boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init)
726 {
727 	if (rcs != NULL) {
728 		/*
729 		 * disable interrupts now
730 		 */
731 		rmc_comm_set_irq(rcs, B_FALSE);
732 
733 		/*
734 		 * driver interface termination (if it has been initialized)
735 		 */
736 		if (drvi_init)
737 			rmc_comm_drvintf_fini(rcs);
738 
739 		/*
740 		 * data protocol termination (if it has been initialized)
741 		 */
742 		if (dp_init)
743 			rmc_comm_dp_fini(rcs);
744 
745 		/*
746 		 * serial device termination (if it has been initialized)
747 		 */
748 		if (sd_init)
749 			rmc_comm_serdev_fini(rcs, dip);
750 
751 		ddi_set_driver_private(dip, NULL);
752 	}
753 	ddi_soft_state_free(rmc_comm_statep, instance);
754 }
755 
756 /*
757  *  Autoconfiguration routines
758  */
759 
760 static int
761 rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
762 {
763 	struct rmc_comm_state *rcs = NULL;
764 	sig_state_t *current_sgn_p;
765 	int instance;
766 
767 	/*
768 	 * only allow one instance
769 	 */
770 	instance = ddi_get_instance(dip);
771 	if (instance != 0)
772 		return (DDI_FAILURE);
773 
774 	switch (cmd) {
775 	default:
776 		return (DDI_FAILURE);
777 
778 	case DDI_RESUME:
779 		if ((rcs = rmc_comm_getstate(dip, instance,
780 		    "rmc_comm_attach")) == NULL)
781 			return (DDI_FAILURE);	/* this "can't happen" */
782 
783 		rmc_comm_hw_reset(rcs);
784 		rmc_comm_set_irq(rcs, B_TRUE);
785 		rcs->dip = dip;
786 
787 		mutex_enter(&tod_lock);
788 		if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
789 		    watchdog_was_active) {
790 			(void) tod_ops.tod_set_watchdog_timer(0);
791 		}
792 		mutex_exit(&tod_lock);
793 
794 		mutex_enter(rcs->dp_state.dp_mutex);
795 		dp_reset(rcs, INITIAL_SEQID, 1, 1);
796 		mutex_exit(rcs->dp_state.dp_mutex);
797 
798 		current_sgn_p = (sig_state_t *)modgetsymvalue(
799 			"current_sgn", 0);
800 		if ((current_sgn_p != NULL) &&
801 			(current_sgn_p->state_t.sig != 0)) {
802 			CPU_SIGNATURE(current_sgn_p->state_t.sig,
803 				current_sgn_p->state_t.state,
804 				current_sgn_p->state_t.sub_state, -1);
805 		}
806 		return (DDI_SUCCESS);
807 
808 	case DDI_ATTACH:
809 		break;
810 	}
811 
812 	/*
813 	 *  Allocate the soft-state structure
814 	 */
815 	if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
816 		return (DDI_FAILURE);
817 	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
818 	    NULL) {
819 		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
820 		return (DDI_FAILURE);
821 	}
822 	ddi_set_driver_private(dip, rcs);
823 
824 	rcs->dip = NULL;
825 
826 	/*
827 	 *  Set various options from .conf properties
828 	 */
829 	rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
830 	    "baud-rate", 0);
831 	rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
832 	    "debug", 0);
833 
834 	/*
835 	 * the baud divisor factor tells us how to scale the result of
836 	 * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
837 	 * use the standard 24MHz uart clock
838 	 */
839 	rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
840 	    DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);
841 
842 	/*
843 	 * try to be reasonable if the scale factor contains a silly value
844 	 */
845 	if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
846 	    (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
847 	    rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
848 
849 	/*
850 	 * initialize serial device
851 	 */
852 	if (rmc_comm_serdev_init(rcs, dip) != 0) {
853 		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
854 		return (DDI_FAILURE);
855 	}
856 
857 	/*
858 	 * initialize data protocol
859 	 */
860 	rmc_comm_dp_init(rcs);
861 
862 	/*
863 	 * initialize driver interface
864 	 */
865 	if (rmc_comm_drvintf_init(rcs) != 0) {
866 		rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
867 		return (DDI_FAILURE);
868 	}
869 
870 	/*
871 	 *  Initialise devinfo-related fields
872 	 */
873 	rcs->majornum = ddi_driver_major(dip);
874 	rcs->instance = instance;
875 	rcs->dip = dip;
876 
877 	/*
878 	 * enable interrupts now
879 	 */
880 	rmc_comm_set_irq(rcs, B_TRUE);
881 
882 	/*
883 	 *  All done, report success
884 	 */
885 	ddi_report_dev(dip);
886 	mutex_enter(&rmc_comm_attach_lock);
887 	rcs->is_attached = B_TRUE;
888 	mutex_exit(&rmc_comm_attach_lock);
889 	return (DDI_SUCCESS);
890 }
891 
892 static int
893 rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
894 {
895 	struct rmc_comm_state *rcs;
896 	int instance;
897 
898 	instance = ddi_get_instance(dip);
899 	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
900 		return (DDI_FAILURE);	/* this "can't happen" */
901 
902 	switch (cmd) {
903 	case DDI_SUSPEND:
904 		mutex_enter(&tod_lock);
905 		if (watchdog_enable && watchdog_activated &&
906 		    tod_ops.tod_clear_watchdog_timer != NULL) {
907 			watchdog_was_active = 1;
908 			(void) tod_ops.tod_clear_watchdog_timer();
909 		} else {
910 			watchdog_was_active = 0;
911 		}
912 		mutex_exit(&tod_lock);
913 
914 		rcs->dip = NULL;
915 		rmc_comm_hw_reset(rcs);
916 
917 		return (DDI_SUCCESS);
918 
919 	case DDI_DETACH:
920 		/*
921 		 * reject detach if any client(s) still registered
922 		 */
923 		mutex_enter(&rmc_comm_attach_lock);
924 		if (rcs->n_registrations != 0) {
925 			mutex_exit(&rmc_comm_attach_lock);
926 			return (DDI_FAILURE);
927 		}
928 		/*
929 		 * Committed to complete the detach;
930 		 * mark as no longer attached, to prevent new clients
931 		 * registering (as part of a coincident attach)
932 		 */
933 		rcs->is_attached = B_FALSE;
934 		mutex_exit(&rmc_comm_attach_lock);
935 		rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
936 		return (DDI_SUCCESS);
937 
938 	default:
939 		return (DDI_FAILURE);
940 	}
941 }
942 
943 /*ARGSUSED*/
944 static int
945 rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
946 {
947 	struct rmc_comm_state *rcs;
948 
949 	if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
950 		return (DDI_FAILURE);
951 	rmc_comm_hw_reset(rcs);
952 	return (DDI_SUCCESS);
953 }
954 
955 /*
956  * System interface structures
957  */
958 static struct dev_ops rmc_comm_dev_ops =
959 {
960 	DEVO_REV,
961 	0,				/* refcount		*/
962 	nodev,				/* getinfo		*/
963 	nulldev,			/* identify		*/
964 	nulldev,			/* probe		*/
965 	rmc_comm_attach,		/* attach		*/
966 	rmc_comm_detach,		/* detach		*/
967 	rmc_comm_reset,			/* reset		*/
968 	(struct cb_ops *)NULL,		/* driver operations	*/
969 	(struct bus_ops *)NULL,		/* bus operations	*/
970 	nulldev 			/* power()		*/
971 };
972 
973 static struct modldrv modldrv =
974 {
975 	&mod_driverops,
976 	"rmc_comm driver, v%I%",
977 	&rmc_comm_dev_ops
978 };
979 
980 static struct modlinkage modlinkage =
981 {
982 	MODREV_1,
983 	{
984 		&modldrv,
985 		NULL
986 	}
987 };
988 
989 /*
990  *  Dynamic loader interface code
991  */
992 int
993 _init(void)
994 {
995 	int err;
996 
997 	mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
998 	err = ddi_soft_state_init(&rmc_comm_statep,
999 		sizeof (struct rmc_comm_state), 0);
1000 	if (err == DDI_SUCCESS)
1001 		if ((err = mod_install(&modlinkage)) != 0) {
1002 			ddi_soft_state_fini(&rmc_comm_statep);
1003 		}
1004 	if (err != DDI_SUCCESS)
1005 		mutex_destroy(&rmc_comm_attach_lock);
1006 	return (err);
1007 }
1008 
1009 int
1010 _info(struct modinfo *mip)
1011 {
1012 	return (mod_info(&modlinkage, mip));
1013 }
1014 
1015 int
1016 _fini(void)
1017 {
1018 	int err;
1019 
1020 	if ((err = mod_remove(&modlinkage)) == 0) {
1021 		ddi_soft_state_fini(&rmc_comm_statep);
1022 		rmc_comm_major = NOMAJOR;
1023 		mutex_destroy(&rmc_comm_attach_lock);
1024 	}
1025 	return (err);
1026 }
1027