xref: /titanic_52/usr/src/uts/sun4u/io/rmc_comm.c (revision 1ce1951135b81c803c8dcf2f3c756009b1b0170a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  *
26  * The "rmc_comm" driver provides access to the RMC so that its clients need
27  * not be concerned with the details of the access mechanism, which in this
28  * case is implemented via a packet-based protocol over a serial link via a
29  * 16550 compatible serial port.
30  */
31 
32 #pragma ident	"%Z%%M%	%I%	%E% SMI"
33 
34 /*
35  *  Header files
36  */
37 #include <sys/conf.h>
38 #include <sys/membar.h>
39 #include <sys/modctl.h>
40 #include <sys/strlog.h>
41 #include <sys/types.h>
42 #include <sys/sunddi.h>
43 #include <sys/ddi.h>
44 #include <sys/rmc_comm_dp_boot.h>
45 #include <sys/rmc_comm_dp.h>
46 #include <sys/rmc_comm_drvintf.h>
47 #include <sys/rmc_comm.h>
48 #include <sys/cpu_sgnblk_defs.h>
49 
50 /*
51  * Local definitions
52  */
53 
54 #define	ddi_driver_major(dip)	ddi_name_to_major(ddi_binding_name(dip))
55 
56 #define	MYNAME			"rmc_comm"
57 #define	NOMAJOR			(~(major_t)0)
58 #define	DUMMY_VALUE		(~(int8_t)0)
59 
60 /*
61  * Local data
62  */
63 static void *rmc_comm_statep;
64 static major_t rmc_comm_major = NOMAJOR;
65 static kmutex_t rmc_comm_attach_lock;
66 static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] =
67 {
68 	DDI_DEVICE_ATTR_V0,
69 	DDI_STRUCTURE_LE_ACC,
70 	DDI_STRICTORDER_ACC
71 };
72 static int watchdog_was_active;
73 extern int watchdog_activated;
74 extern int watchdog_enable;
75 
76 /*
77  * prototypes
78  */
79 
80 extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t);
81 static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t);
82 static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t);
83 static void sio_check_fault_status(struct rmc_comm_state *);
84 static boolean_t sio_data_ready(struct rmc_comm_state *);
85 static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t);
86 static uint_t rmc_comm_hi_intr(caddr_t);
87 static uint_t rmc_comm_softint(caddr_t);
88 static void rmc_comm_cyclic(void *);
89 static void rmc_comm_hw_reset(struct rmc_comm_state *);
90 static void rmc_comm_offline(struct rmc_comm_state *);
91 static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *);
92 static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int,
93     boolean_t, boolean_t, boolean_t);
94 static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t);
95 static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t);
96 
97 /*
98  * for client leaf drivers to register their desire for rmc_comm
99  * to stay attached
100  */
101 int
102 rmc_comm_register()
103 {
104 	struct rmc_comm_state *rcs;
105 
106 	mutex_enter(&rmc_comm_attach_lock);
107 	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
108 	if ((rcs == NULL) || (!rcs->is_attached)) {
109 		mutex_exit(&rmc_comm_attach_lock);
110 		return (DDI_FAILURE);
111 	}
112 	rcs->n_registrations++;
113 	mutex_exit(&rmc_comm_attach_lock);
114 	return (DDI_SUCCESS);
115 }
116 
117 void
118 rmc_comm_unregister()
119 {
120 	struct rmc_comm_state *rcs;
121 
122 	mutex_enter(&rmc_comm_attach_lock);
123 	rcs = ddi_get_soft_state(rmc_comm_statep, 0);
124 	ASSERT(rcs != NULL);
125 	ASSERT(rcs->n_registrations != 0);
126 	rcs->n_registrations--;
127 	mutex_exit(&rmc_comm_attach_lock);
128 }
129 
130 /*
131  * to get the soft state structure of a specific instance
132  */
133 struct rmc_comm_state *
134 rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller)
135 {
136 	struct rmc_comm_state *rcs = NULL;
137 	dev_info_t *sdip = NULL;
138 	major_t dmaj = NOMAJOR;
139 
140 	if (dip != NULL) {
141 		/*
142 		 * Use the instance number from the <dip>; also,
143 		 * check that it really corresponds to this driver
144 		 */
145 		instance = ddi_get_instance(dip);
146 		dmaj = ddi_driver_major(dip);
147 		if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR)
148 			rmc_comm_major = dmaj;
149 		else if (dmaj != rmc_comm_major) {
150 			cmn_err(CE_WARN,
151 			    "%s: major number mismatch (%d vs. %d) in %s(),"
152 			    "probably due to child misconfiguration",
153 			    MYNAME, rmc_comm_major, dmaj, caller);
154 			instance = -1;
155 		}
156 	}
157 	if (instance >= 0)
158 		rcs = ddi_get_soft_state(rmc_comm_statep, instance);
159 	if (rcs != NULL) {
160 		sdip = rcs->dip;
161 		if (dip == NULL && sdip == NULL)
162 			rcs = NULL;
163 		else if (dip != NULL && sdip != NULL && sdip != dip) {
164 			cmn_err(CE_WARN,
165 			    "%s: devinfo mismatch (%p vs. %p) in %s(), "
166 			    "probably due to child misconfiguration", MYNAME,
167 			    (void *)dip, (void *)sdip, caller);
168 			rcs = NULL;
169 		}
170 	}
171 
172 	return (rcs);
173 }
174 
175 
176 /*
177  * Lowest-level serial I/O chip register read/write
178  */
179 static void
180 sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
181 {
182 	DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
183 
184 	if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
185 		/*
186 		 * The chip is mapped as "I/O" (e.g. with the side-effect
187 		 * bit on SPARC), therefore accesses are required to be
188 		 * in-order, with no value cacheing.  However, there can
189 		 * still be write-behind buffering, so it is not guaranteed
190 		 * that a write actually reaches the chip in a given time.
191 		 *
192 		 * To force the access right through to the chip, we follow
193 		 * the write with another write (to the SCRATCH register)
194 		 * and a read (of the value just written to the SCRATCH
195 		 * register).  The SCRATCH register is specifically provided
196 		 * for temporary data and has no effect on the SIO's own
197 		 * operation, making it ideal as a synchronising mechanism.
198 		 *
199 		 * If we didn't do this, it would be possible that the new
200 		 * value wouldn't reach the chip (and have the *intended*
201 		 * side-effects, such as disabling interrupts), for such a
202 		 * long time that the processor could execute a *lot* of
203 		 * instructions - including exiting the interrupt service
204 		 * routine and re-enabling interrupts.  This effect was
205 		 * observed to lead to spurious (unclaimed) interrupts in
206 		 * some circumstances.
207 		 *
208 		 * This will no longer be needed once "synchronous" access
209 		 * handles are available (see PSARC/2000/269 and 2000/531).
210 		 */
211 		ddi_put8(rcs->sd_state.sio_handle,
212 		    rcs->sd_state.sio_regs + reg, val);
213 		ddi_put8(rcs->sd_state.sio_handle,
214 		    rcs->sd_state.sio_regs + SIO_SCR, val);
215 		membar_sync();
216 		(void) ddi_get8(rcs->sd_state.sio_handle,
217 		    rcs->sd_state.sio_regs + SIO_SCR);
218 	}
219 }
220 
221 static uint8_t
222 sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
223 {
224 	uint8_t val;
225 
226 	if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
227 		val = ddi_get8(rcs->sd_state.sio_handle,
228 		    rcs->sd_state.sio_regs + reg);
229 	else
230 		val = DUMMY_VALUE;
231 	DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
232 	return (val);
233 }
234 
235 static void
236 sio_check_fault_status(struct rmc_comm_state *rcs)
237 {
238 	rcs->sd_state.sio_fault =
239 	    ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
240 }
241 
242 boolean_t
243 rmc_comm_faulty(struct rmc_comm_state *rcs)
244 {
245 	if (!rcs->sd_state.sio_fault)
246 		sio_check_fault_status(rcs);
247 	return (rcs->sd_state.sio_fault);
248 }
249 
250 /*
251  * Check for data ready.
252  */
253 static boolean_t
254 sio_data_ready(struct rmc_comm_state *rcs)
255 {
256 	uint8_t status;
257 
258 	/*
259 	 * Data is available if the RXDA bit in the LSR is nonzero
260 	 * (if reading it didn't incur a fault).
261 	 */
262 	status = sio_get_reg(rcs, SIO_LSR);
263 	return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
264 }
265 
266 /*
267  * Enable/disable interrupts
268  */
269 static void
270 rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
271 {
272 	uint8_t val;
273 
274 	val = newstate ? SIO_IER_RXHDL_IE : 0;
275 	sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
276 	rcs->sd_state.hw_int_enabled = newstate;
277 }
278 
279 /*
280  * High-level interrupt handler:
281  *	Checks whether initialisation is complete (to avoid a race
282  *	with mutex_init()), and whether chip interrupts are enabled.
283  *	If not, the interrupt's not for us, so just return UNCLAIMED.
284  *	Otherwise, disable the interrupt, trigger a softint, and return
285  *	CLAIMED.  The softint handler will then do all the real work.
286  *
287  *	NOTE: the chip interrupt capability is only re-enabled once the
288  *	receive code has run, but that can be called from a poll loop
289  *	or cyclic callback as well as from the softint.  So it's *not*
290  *	guaranteed that there really is a chip interrupt pending here,
291  *	'cos the work may already have been done and the reason for the
292  *	interrupt gone away before we get here.
293  *
294  *	OTOH, if we come through here twice without the receive code
295  *	having run in between, that's definitely wrong.  In such an
296  *	event, we would notice that chip interrupts haven't yet been
297  *	re-enabled and return UNCLAIMED, allowing the system's jabber
298  *	protect code (if any) to do its job.
299  */
300 static uint_t
301 rmc_comm_hi_intr(caddr_t arg)
302 {
303 	struct rmc_comm_state *rcs = (void *)arg;
304 	uint_t claim;
305 
306 	claim = DDI_INTR_UNCLAIMED;
307 	if (rcs->sd_state.cycid != NULL) {
308 		/*
309 		 * Handle the case where this interrupt fires during
310 		 * panic processing.  If that occurs, then a thread
311 		 * in rmc_comm might have been idled while holding
312 		 * hw_mutex.  If so, that thread will never make
313 		 * progress, and so we do not want to unconditionally
314 		 * grab hw_mutex.
315 		 */
316 		if (ddi_in_panic() != 0) {
317 			if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
318 				return (claim);
319 			}
320 		} else {
321 			mutex_enter(rcs->sd_state.hw_mutex);
322 		}
323 		if (rcs->sd_state.hw_int_enabled) {
324 			rmc_comm_set_irq(rcs, B_FALSE);
325 			ddi_trigger_softintr(rcs->sd_state.softid);
326 			claim = DDI_INTR_CLAIMED;
327 		}
328 		mutex_exit(rcs->sd_state.hw_mutex);
329 	}
330 	return (claim);
331 }
332 
333 /*
334  * Packet receive handler
335  *
336  * This routine should be called from the low-level softint, or the
337  * cyclic callback, or rmc_comm_cmd() (for polled operation), with the
338  * low-level mutex already held.
339  */
340 void
341 rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
342 {
343 	uint8_t data;
344 
345 	DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
346 
347 	/*
348 	 * Check for access faults before starting the receive
349 	 * loop (we don't want to cause bus errors or suchlike
350 	 * unpleasantness in the event that the SIO has died).
351 	 */
352 	if (!rmc_comm_faulty(rcs)) {
353 
354 		char *rx_buf = rcs->sd_state.serdev_rx_buf;
355 		uint16_t rx_buflen = 0;
356 
357 		/*
358 		 * Read bytes from the FIFO until they're all gone
359 		 * or our buffer overflows (which must be an error)
360 		 */
361 
362 		/*
363 		 * At the moment, the receive buffer is overwritten any
364 		 * time data is received from the serial device.
365 		 * This should not pose problems (probably!) as the data
366 		 * protocol is half-duplex
367 		 * Otherwise, a circular buffer must be implemented!
368 		 */
369 		mutex_enter(rcs->sd_state.hw_mutex);
370 		while (sio_data_ready(rcs)) {
371 			data = sio_get_reg(rcs, SIO_RXD);
372 			rx_buf[rx_buflen++] = data;
373 			if (rx_buflen >= SIO_MAX_RXBUF_SIZE)
374 				break;
375 		}
376 		rcs->sd_state.serdev_rx_count = rx_buflen;
377 
378 		DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
379 
380 		rmc_comm_set_irq(rcs, B_TRUE);
381 		mutex_exit(rcs->sd_state.hw_mutex);
382 
383 		/*
384 		 * call up the data protocol receive handler
385 		 */
386 		rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
387 	}
388 }
389 
390 /*
391  * Low-level softint handler
392  *
393  * This routine should be triggered whenever there's a byte to be read
394  */
395 static uint_t
396 rmc_comm_softint(caddr_t arg)
397 {
398 	struct rmc_comm_state *rcs = (void *)arg;
399 
400 	mutex_enter(rcs->dp_state.dp_mutex);
401 	rmc_comm_serdev_receive(rcs);
402 	mutex_exit(rcs->dp_state.dp_mutex);
403 	return (DDI_INTR_CLAIMED);
404 }
405 
406 /*
407  * Cyclic handler: just calls the receive routine, in case interrupts
408  * are not being delivered and in order to handle command timeout
409  */
410 static void
411 rmc_comm_cyclic(void *arg)
412 {
413 	struct rmc_comm_state *rcs = (void *)arg;
414 
415 	mutex_enter(rcs->dp_state.dp_mutex);
416 	rmc_comm_serdev_receive(rcs);
417 	mutex_exit(rcs->dp_state.dp_mutex);
418 }
419 
420 /*
421  * Serial protocol
422  *
423  * This routine builds a command and sets it in progress.
424  */
425 void
426 rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
427 {
428 	uint8_t *p;
429 	uint8_t status;
430 
431 	/*
432 	 * Check and update the SIO h/w fault status before accessing
433 	 * the chip registers.  If there's a (new or previous) fault,
434 	 * we'll run through the protocol but won't really touch the
435 	 * hardware and all commands will timeout.  If a previously
436 	 * discovered fault has now gone away (!), then we can (try to)
437 	 * proceed with the new command (probably a probe).
438 	 */
439 	sio_check_fault_status(rcs);
440 
441 	/*
442 	 * Send the command now by stuffing the packet into the Tx FIFO.
443 	 */
444 	DATASCOPE(rcs, 'S', buf, buflen)
445 
446 	mutex_enter(rcs->sd_state.hw_mutex);
447 	p = (uint8_t *)buf;
448 	while (p < (uint8_t *)&buf[buflen]) {
449 
450 		/*
451 		 * before writing to the TX holding register, we make sure that
452 		 * it is empty. In this case, there will be no chance to
453 		 * overflow the serial device FIFO (but, on the other hand,
454 		 * it may introduce some latency)
455 		 */
456 		status = sio_get_reg(rcs, SIO_LSR);
457 		while ((status & SIO_LSR_XHRE) == 0) {
458 			drv_usecwait(100);
459 			status = sio_get_reg(rcs, SIO_LSR);
460 		}
461 		sio_put_reg(rcs, SIO_TXD, *p++);
462 	}
463 	mutex_exit(rcs->sd_state.hw_mutex);
464 }
465 
466 /*
467  * wait for the tx fifo to drain - used for urgent nowait requests
468  */
469 void
470 rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
471 {
472 	uint8_t status;
473 
474 	mutex_enter(rcs->sd_state.hw_mutex);
475 	status = sio_get_reg(rcs, SIO_LSR);
476 	while ((status & SIO_LSR_XHRE) == 0) {
477 		drv_usecwait(100);
478 		status = sio_get_reg(rcs, SIO_LSR);
479 	}
480 	mutex_exit(rcs->sd_state.hw_mutex);
481 }
482 
483 /*
484  * Hardware setup - put the SIO chip in the required operational
485  * state,  with all our favourite parameters programmed correctly.
486  * This routine leaves all SIO interrupts disabled.
487  */
488 
489 static void
490 rmc_comm_hw_reset(struct rmc_comm_state *rcs)
491 {
492 	uint16_t divisor;
493 
494 	/*
495 	 * Disable interrupts, soft reset Tx and Rx circuitry,
496 	 * reselect standard modes (bits/char, parity, etc).
497 	 */
498 	rmc_comm_set_irq(rcs, B_FALSE);
499 	sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
500 	sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
501 
502 	/*
503 	 * Select the proper baud rate; if the value is invalid
504 	 * (presumably 0, i.e. not specified, but also if the
505 	 * "baud" property is set to some silly value), we assume
506 	 * the default.
507 	 */
508 	if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
509 		divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) *
510 		    rcs->baud_divisor_factor;
511 	} else {
512 		divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
513 		    rcs->baud_divisor_factor;
514 	}
515 
516 	/*
517 	 * According to the datasheet, it is forbidden for the divisor
518 	 * register to be zero.  So when loading the register in two
519 	 * steps, we have to make sure that the temporary value formed
520 	 * between loads is nonzero.  However, we can't rely on either
521 	 * half already having a nonzero value, as the datasheet also
522 	 * says that these registers are indeterminate after a reset!
523 	 * So, we explicitly set the low byte to a non-zero value first;
524 	 * then we can safely load the high byte, and then the correct
525 	 * value for the low byte, without the result ever being zero.
526 	 */
527 	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
528 	sio_put_reg(rcs, SIO_LBGDL, 0xff);
529 	sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
530 	sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
531 	sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
532 
533 	/*
534 	 * Program the remaining device registers as required
535 	 */
536 	sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
537 	sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
538 }
539 
540 /*
541  * Higher-level setup & teardown
542  */
543 static void
544 rmc_comm_offline(struct rmc_comm_state *rcs)
545 {
546 	if (rcs->sd_state.sio_handle != NULL)
547 		ddi_regs_map_free(&rcs->sd_state.sio_handle);
548 	rcs->sd_state.sio_handle = NULL;
549 	rcs->sd_state.sio_regs = NULL;
550 }
551 
552 static int
553 rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
554 {
555 	ddi_acc_handle_t h;
556 	caddr_t p;
557 	int nregs;
558 	int err;
559 
560 	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS)
561 		nregs = 0;
562 	switch (nregs) {
563 	default:
564 	case 1:
565 		/*
566 		 *  regset 0 represents the SIO operating registers
567 		 */
568 		err = ddi_regs_map_setup(dip, 0, &p, 0, 0,
569 		    rmc_comm_dev_acc_attr, &h);
570 		if (err != DDI_SUCCESS)
571 			return (EIO);
572 		rcs->sd_state.sio_handle = h;
573 		rcs->sd_state.sio_regs = (void *)p;
574 		break;
575 	case 0:
576 		/*
577 		 *  If no registers are defined, succeed vacuously;
578 		 *  commands will be accepted, but we fake the accesses.
579 		 */
580 		break;
581 	}
582 
583 	/*
584 	 * Now that the registers are mapped, we can initialise the SIO h/w
585 	 */
586 	rmc_comm_hw_reset(rcs);
587 	return (0);
588 }
589 
590 
591 /*
592  * Initialization of the serial device (data structure, mutex, cv, hardware
593  * and so on). It is called from the attach routine.
594  */
595 
596 int
597 rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
598 {
599 	int err = DDI_SUCCESS;
600 
601 	rcs->sd_state.cycid = NULL;
602 
603 	/*
604 	 *  Online the hardware ...
605 	 */
606 	err = rmc_comm_online(rcs, dip);
607 	if (err != 0)
608 		return (-1);
609 
610 	/*
611 	 * call ddi_get_soft_iblock_cookie() to retrieve the
612 	 * the interrupt block cookie so that the mutexes are initialized
613 	 * before adding the interrupt (to avoid a potential race condition).
614 	 */
615 
616 	err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
617 	    &rcs->dp_state.dp_iblk);
618 	if (err != DDI_SUCCESS)
619 		return (-1);
620 
621 	err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
622 	if (err != DDI_SUCCESS)
623 		return (-1);
624 
625 	/*
626 	 * initialize mutex here before adding hw/sw interrupt handlers
627 	 */
628 	mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
629 	    rcs->dp_state.dp_iblk);
630 
631 	mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
632 	    rcs->sd_state.hw_iblk);
633 
634 	/*
635 	 * Install soft and hard interrupt handler(s)
636 	 *
637 	 * the soft intr. handler will need the data protocol lock (dp_mutex)
638 	 * So, data protocol mutex and iblock cookie are created/initialized
639 	 * here
640 	 */
641 
642 	err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
643 	    &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
644 	if (err != DDI_SUCCESS) {
645 		mutex_destroy(rcs->dp_state.dp_mutex);
646 		mutex_destroy(rcs->sd_state.hw_mutex);
647 		return (-1);
648 	}
649 
650 	/*
651 	 * hardware interrupt
652 	 */
653 
654 	if (rcs->sd_state.sio_handle != NULL) {
655 		err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
656 		    rmc_comm_hi_intr, (caddr_t)rcs);
657 
658 		/*
659 		 * did we successfully install the h/w interrupt handler?
660 		 */
661 		if (err != DDI_SUCCESS) {
662 			ddi_remove_softintr(rcs->sd_state.softid);
663 			mutex_destroy(rcs->dp_state.dp_mutex);
664 			mutex_destroy(rcs->sd_state.hw_mutex);
665 			return (-1);
666 		}
667 	}
668 
669 	/*
670 	 * Start periodical callbacks
671 	 */
672 	rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
673 	    5 * RMC_COMM_ONE_SEC, DDI_IPL_1);
674 	return (0);
675 }
676 
677 /*
678  * Termination of the serial device (data structure, mutex, cv, hardware
679  * and so on). It is called from the detach routine.
680  */
681 
682 void
683 rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
684 {
685 	rmc_comm_hw_reset(rcs);
686 
687 	if (rcs->sd_state.cycid != NULL) {
688 		ddi_periodic_delete(rcs->sd_state.cycid);
689 		rcs->sd_state.cycid = NULL;
690 
691 		if (rcs->sd_state.sio_handle != NULL)
692 			ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
693 
694 		ddi_remove_softintr(rcs->sd_state.softid);
695 
696 		mutex_destroy(rcs->sd_state.hw_mutex);
697 
698 		mutex_destroy(rcs->dp_state.dp_mutex);
699 	}
700 	rmc_comm_offline(rcs);
701 }
702 
703 /*
704  * device driver entry routines (init/fini, attach/detach, ...)
705  */
706 
707 /*
708  *  Clean up on detach or failure of attach
709  */
710 static void
711 rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
712     boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init)
713 {
714 	if (rcs != NULL) {
715 		/*
716 		 * disable interrupts now
717 		 */
718 		rmc_comm_set_irq(rcs, B_FALSE);
719 
720 		/*
721 		 * driver interface termination (if it has been initialized)
722 		 */
723 		if (drvi_init)
724 			rmc_comm_drvintf_fini(rcs);
725 
726 		/*
727 		 * data protocol termination (if it has been initialized)
728 		 */
729 		if (dp_init)
730 			rmc_comm_dp_fini(rcs);
731 
732 		/*
733 		 * serial device termination (if it has been initialized)
734 		 */
735 		if (sd_init)
736 			rmc_comm_serdev_fini(rcs, dip);
737 
738 		ddi_set_driver_private(dip, NULL);
739 	}
740 	ddi_soft_state_free(rmc_comm_statep, instance);
741 }
742 
743 /*
744  *  Autoconfiguration routines
745  */
746 
747 static int
748 rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
749 {
750 	struct rmc_comm_state *rcs = NULL;
751 	sig_state_t *current_sgn_p;
752 	int instance;
753 
754 	/*
755 	 * only allow one instance
756 	 */
757 	instance = ddi_get_instance(dip);
758 	if (instance != 0)
759 		return (DDI_FAILURE);
760 
761 	switch (cmd) {
762 	default:
763 		return (DDI_FAILURE);
764 
765 	case DDI_RESUME:
766 		if ((rcs = rmc_comm_getstate(dip, instance,
767 		    "rmc_comm_attach")) == NULL)
768 			return (DDI_FAILURE);	/* this "can't happen" */
769 
770 		rmc_comm_hw_reset(rcs);
771 		rmc_comm_set_irq(rcs, B_TRUE);
772 		rcs->dip = dip;
773 
774 		mutex_enter(&tod_lock);
775 		if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
776 		    watchdog_was_active) {
777 			(void) tod_ops.tod_set_watchdog_timer(0);
778 		}
779 		mutex_exit(&tod_lock);
780 
781 		mutex_enter(rcs->dp_state.dp_mutex);
782 		dp_reset(rcs, INITIAL_SEQID, 1, 1);
783 		mutex_exit(rcs->dp_state.dp_mutex);
784 
785 		current_sgn_p = (sig_state_t *)modgetsymvalue(
786 		    "current_sgn", 0);
787 		if ((current_sgn_p != NULL) &&
788 		    (current_sgn_p->state_t.sig != 0)) {
789 			CPU_SIGNATURE(current_sgn_p->state_t.sig,
790 			    current_sgn_p->state_t.state,
791 			    current_sgn_p->state_t.sub_state, -1);
792 		}
793 		return (DDI_SUCCESS);
794 
795 	case DDI_ATTACH:
796 		break;
797 	}
798 
799 	/*
800 	 *  Allocate the soft-state structure
801 	 */
802 	if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
803 		return (DDI_FAILURE);
804 	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
805 	    NULL) {
806 		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
807 		return (DDI_FAILURE);
808 	}
809 	ddi_set_driver_private(dip, rcs);
810 
811 	rcs->dip = NULL;
812 
813 	/*
814 	 *  Set various options from .conf properties
815 	 */
816 	rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
817 	    "baud-rate", 0);
818 	rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
819 	    "debug", 0);
820 
821 	/*
822 	 * the baud divisor factor tells us how to scale the result of
823 	 * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
824 	 * use the standard 24MHz uart clock
825 	 */
826 	rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
827 	    DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);
828 
829 	/*
830 	 * try to be reasonable if the scale factor contains a silly value
831 	 */
832 	if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
833 	    (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
834 		rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
835 
836 	/*
837 	 * initialize serial device
838 	 */
839 	if (rmc_comm_serdev_init(rcs, dip) != 0) {
840 		rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
841 		return (DDI_FAILURE);
842 	}
843 
844 	/*
845 	 * initialize data protocol
846 	 */
847 	rmc_comm_dp_init(rcs);
848 
849 	/*
850 	 * initialize driver interface
851 	 */
852 	if (rmc_comm_drvintf_init(rcs) != 0) {
853 		rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
854 		return (DDI_FAILURE);
855 	}
856 
857 	/*
858 	 *  Initialise devinfo-related fields
859 	 */
860 	rcs->majornum = ddi_driver_major(dip);
861 	rcs->instance = instance;
862 	rcs->dip = dip;
863 
864 	/*
865 	 * enable interrupts now
866 	 */
867 	rmc_comm_set_irq(rcs, B_TRUE);
868 
869 	/*
870 	 *  All done, report success
871 	 */
872 	ddi_report_dev(dip);
873 	mutex_enter(&rmc_comm_attach_lock);
874 	rcs->is_attached = B_TRUE;
875 	mutex_exit(&rmc_comm_attach_lock);
876 	return (DDI_SUCCESS);
877 }
878 
879 static int
880 rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
881 {
882 	struct rmc_comm_state *rcs;
883 	int instance;
884 
885 	instance = ddi_get_instance(dip);
886 	if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
887 		return (DDI_FAILURE);	/* this "can't happen" */
888 
889 	switch (cmd) {
890 	case DDI_SUSPEND:
891 		mutex_enter(&tod_lock);
892 		if (watchdog_enable && watchdog_activated &&
893 		    tod_ops.tod_clear_watchdog_timer != NULL) {
894 			watchdog_was_active = 1;
895 			(void) tod_ops.tod_clear_watchdog_timer();
896 		} else {
897 			watchdog_was_active = 0;
898 		}
899 		mutex_exit(&tod_lock);
900 
901 		rcs->dip = NULL;
902 		rmc_comm_hw_reset(rcs);
903 
904 		return (DDI_SUCCESS);
905 
906 	case DDI_DETACH:
907 		/*
908 		 * reject detach if any client(s) still registered
909 		 */
910 		mutex_enter(&rmc_comm_attach_lock);
911 		if (rcs->n_registrations != 0) {
912 			mutex_exit(&rmc_comm_attach_lock);
913 			return (DDI_FAILURE);
914 		}
915 		/*
916 		 * Committed to complete the detach;
917 		 * mark as no longer attached, to prevent new clients
918 		 * registering (as part of a coincident attach)
919 		 */
920 		rcs->is_attached = B_FALSE;
921 		mutex_exit(&rmc_comm_attach_lock);
922 		rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
923 		return (DDI_SUCCESS);
924 
925 	default:
926 		return (DDI_FAILURE);
927 	}
928 }
929 
930 /*ARGSUSED*/
931 static int
932 rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
933 {
934 	struct rmc_comm_state *rcs;
935 
936 	if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
937 		return (DDI_FAILURE);
938 	rmc_comm_hw_reset(rcs);
939 	return (DDI_SUCCESS);
940 }
941 
942 /*
943  * System interface structures
944  */
945 static struct dev_ops rmc_comm_dev_ops =
946 {
947 	DEVO_REV,
948 	0,				/* refcount		*/
949 	nodev,				/* getinfo		*/
950 	nulldev,			/* identify		*/
951 	nulldev,			/* probe		*/
952 	rmc_comm_attach,		/* attach		*/
953 	rmc_comm_detach,		/* detach		*/
954 	rmc_comm_reset,			/* reset		*/
955 	(struct cb_ops *)NULL,		/* driver operations	*/
956 	(struct bus_ops *)NULL,		/* bus operations	*/
957 	nulldev 			/* power()		*/
958 };
959 
960 static struct modldrv modldrv =
961 {
962 	&mod_driverops,
963 	"rmc_comm driver, v%I%",
964 	&rmc_comm_dev_ops
965 };
966 
967 static struct modlinkage modlinkage =
968 {
969 	MODREV_1,
970 	{
971 		&modldrv,
972 		NULL
973 	}
974 };
975 
976 /*
977  *  Dynamic loader interface code
978  */
979 int
980 _init(void)
981 {
982 	int err;
983 
984 	mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
985 	err = ddi_soft_state_init(&rmc_comm_statep,
986 	    sizeof (struct rmc_comm_state), 0);
987 	if (err == DDI_SUCCESS)
988 		if ((err = mod_install(&modlinkage)) != 0) {
989 			ddi_soft_state_fini(&rmc_comm_statep);
990 		}
991 	if (err != DDI_SUCCESS)
992 		mutex_destroy(&rmc_comm_attach_lock);
993 	return (err);
994 }
995 
996 int
997 _info(struct modinfo *mip)
998 {
999 	return (mod_info(&modlinkage, mip));
1000 }
1001 
1002 int
1003 _fini(void)
1004 {
1005 	int err;
1006 
1007 	if ((err = mod_remove(&modlinkage)) == 0) {
1008 		ddi_soft_state_fini(&rmc_comm_statep);
1009 		rmc_comm_major = NOMAJOR;
1010 		mutex_destroy(&rmc_comm_attach_lock);
1011 	}
1012 	return (err);
1013 }
1014