103831d35Sstevel /*
203831d35Sstevel * CDDL HEADER START
303831d35Sstevel *
403831d35Sstevel * The contents of this file are subject to the terms of the
503831d35Sstevel * Common Development and Distribution License (the "License").
603831d35Sstevel * You may not use this file except in compliance with the License.
703831d35Sstevel *
803831d35Sstevel * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
903831d35Sstevel * or http://www.opensolaris.org/os/licensing.
1003831d35Sstevel * See the License for the specific language governing permissions
1103831d35Sstevel * and limitations under the License.
1203831d35Sstevel *
1303831d35Sstevel * When distributing Covered Code, include this CDDL HEADER in each
1403831d35Sstevel * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1503831d35Sstevel * If applicable, add the following below this CDDL HEADER, with the
1603831d35Sstevel * fields enclosed by brackets "[]" replaced with your own identifying
1703831d35Sstevel * information: Portions Copyright [yyyy] [name of copyright owner]
1803831d35Sstevel *
1903831d35Sstevel * CDDL HEADER END
2003831d35Sstevel */
2103831d35Sstevel
2203831d35Sstevel /*
23*19397407SSherry Moore * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
2403831d35Sstevel * Use is subject to license terms.
2503831d35Sstevel *
2603831d35Sstevel * The "rmc_comm" driver provides access to the RMC so that its clients need
2703831d35Sstevel * not be concerned with the details of the access mechanism, which in this
2803831d35Sstevel * case is implemented via a packet-based protocol over a serial link via a
2903831d35Sstevel * 16550 compatible serial port.
3003831d35Sstevel */
3103831d35Sstevel
3203831d35Sstevel
3303831d35Sstevel /*
3403831d35Sstevel * Header files
3503831d35Sstevel */
3603831d35Sstevel #include <sys/conf.h>
3703831d35Sstevel #include <sys/membar.h>
3803831d35Sstevel #include <sys/modctl.h>
3903831d35Sstevel #include <sys/strlog.h>
4003831d35Sstevel #include <sys/types.h>
4103831d35Sstevel #include <sys/sunddi.h>
4203831d35Sstevel #include <sys/ddi.h>
4303831d35Sstevel #include <sys/rmc_comm_dp_boot.h>
4403831d35Sstevel #include <sys/rmc_comm_dp.h>
4503831d35Sstevel #include <sys/rmc_comm_drvintf.h>
4603831d35Sstevel #include <sys/rmc_comm.h>
4703831d35Sstevel #include <sys/cpu_sgnblk_defs.h>
4803831d35Sstevel
4903831d35Sstevel /*
5003831d35Sstevel * Local definitions
5103831d35Sstevel */
5203831d35Sstevel #define MYNAME "rmc_comm"
5303831d35Sstevel #define NOMAJOR (~(major_t)0)
5403831d35Sstevel #define DUMMY_VALUE (~(int8_t)0)
5503831d35Sstevel
5603831d35Sstevel /*
5703831d35Sstevel * Local data
5803831d35Sstevel */
5903831d35Sstevel static void *rmc_comm_statep;
6003831d35Sstevel static major_t rmc_comm_major = NOMAJOR;
6103831d35Sstevel static kmutex_t rmc_comm_attach_lock;
6203831d35Sstevel static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] =
6303831d35Sstevel {
6403831d35Sstevel DDI_DEVICE_ATTR_V0,
6503831d35Sstevel DDI_STRUCTURE_LE_ACC,
6603831d35Sstevel DDI_STRICTORDER_ACC
6703831d35Sstevel };
6803831d35Sstevel static int watchdog_was_active;
6903831d35Sstevel extern int watchdog_activated;
7003831d35Sstevel extern int watchdog_enable;
7103831d35Sstevel
7203831d35Sstevel /*
7303831d35Sstevel * prototypes
7403831d35Sstevel */
7503831d35Sstevel
7603831d35Sstevel extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t);
7703831d35Sstevel static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t);
7803831d35Sstevel static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t);
7903831d35Sstevel static void sio_check_fault_status(struct rmc_comm_state *);
8003831d35Sstevel static boolean_t sio_data_ready(struct rmc_comm_state *);
8103831d35Sstevel static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t);
8203831d35Sstevel static uint_t rmc_comm_hi_intr(caddr_t);
8303831d35Sstevel static uint_t rmc_comm_softint(caddr_t);
8403831d35Sstevel static void rmc_comm_cyclic(void *);
8503831d35Sstevel static void rmc_comm_hw_reset(struct rmc_comm_state *);
8603831d35Sstevel static void rmc_comm_offline(struct rmc_comm_state *);
8703831d35Sstevel static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *);
8803831d35Sstevel static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int,
8903831d35Sstevel boolean_t, boolean_t, boolean_t);
9003831d35Sstevel static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t);
9103831d35Sstevel static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t);
9203831d35Sstevel
9303831d35Sstevel /*
9403831d35Sstevel * for client leaf drivers to register their desire for rmc_comm
9503831d35Sstevel * to stay attached
9603831d35Sstevel */
9703831d35Sstevel int
rmc_comm_register()9803831d35Sstevel rmc_comm_register()
9903831d35Sstevel {
10003831d35Sstevel struct rmc_comm_state *rcs;
10103831d35Sstevel
10203831d35Sstevel mutex_enter(&rmc_comm_attach_lock);
10303831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, 0);
10403831d35Sstevel if ((rcs == NULL) || (!rcs->is_attached)) {
10503831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
10603831d35Sstevel return (DDI_FAILURE);
10703831d35Sstevel }
10803831d35Sstevel rcs->n_registrations++;
10903831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
11003831d35Sstevel return (DDI_SUCCESS);
11103831d35Sstevel }
11203831d35Sstevel
11303831d35Sstevel void
rmc_comm_unregister()11403831d35Sstevel rmc_comm_unregister()
11503831d35Sstevel {
11603831d35Sstevel struct rmc_comm_state *rcs;
11703831d35Sstevel
11803831d35Sstevel mutex_enter(&rmc_comm_attach_lock);
11903831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, 0);
12003831d35Sstevel ASSERT(rcs != NULL);
12103831d35Sstevel ASSERT(rcs->n_registrations != 0);
12203831d35Sstevel rcs->n_registrations--;
12303831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
12403831d35Sstevel }
12503831d35Sstevel
12603831d35Sstevel /*
12703831d35Sstevel * to get the soft state structure of a specific instance
12803831d35Sstevel */
12903831d35Sstevel struct rmc_comm_state *
rmc_comm_getstate(dev_info_t * dip,int instance,const char * caller)13003831d35Sstevel rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller)
13103831d35Sstevel {
13203831d35Sstevel struct rmc_comm_state *rcs = NULL;
13303831d35Sstevel dev_info_t *sdip = NULL;
13403831d35Sstevel major_t dmaj = NOMAJOR;
13503831d35Sstevel
13603831d35Sstevel if (dip != NULL) {
13703831d35Sstevel /*
13803831d35Sstevel * Use the instance number from the <dip>; also,
13903831d35Sstevel * check that it really corresponds to this driver
14003831d35Sstevel */
14103831d35Sstevel instance = ddi_get_instance(dip);
14203831d35Sstevel dmaj = ddi_driver_major(dip);
14303831d35Sstevel if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR)
14403831d35Sstevel rmc_comm_major = dmaj;
14503831d35Sstevel else if (dmaj != rmc_comm_major) {
14603831d35Sstevel cmn_err(CE_WARN,
14703831d35Sstevel "%s: major number mismatch (%d vs. %d) in %s(),"
14803831d35Sstevel "probably due to child misconfiguration",
14903831d35Sstevel MYNAME, rmc_comm_major, dmaj, caller);
15003831d35Sstevel instance = -1;
15103831d35Sstevel }
15203831d35Sstevel }
15303831d35Sstevel if (instance >= 0)
15403831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, instance);
15503831d35Sstevel if (rcs != NULL) {
15603831d35Sstevel sdip = rcs->dip;
15703831d35Sstevel if (dip == NULL && sdip == NULL)
15803831d35Sstevel rcs = NULL;
15903831d35Sstevel else if (dip != NULL && sdip != NULL && sdip != dip) {
16003831d35Sstevel cmn_err(CE_WARN,
16103831d35Sstevel "%s: devinfo mismatch (%p vs. %p) in %s(), "
16203831d35Sstevel "probably due to child misconfiguration", MYNAME,
16303831d35Sstevel (void *)dip, (void *)sdip, caller);
16403831d35Sstevel rcs = NULL;
16503831d35Sstevel }
16603831d35Sstevel }
16703831d35Sstevel
16803831d35Sstevel return (rcs);
16903831d35Sstevel }
17003831d35Sstevel
17103831d35Sstevel
17203831d35Sstevel /*
17303831d35Sstevel * Lowest-level serial I/O chip register read/write
17403831d35Sstevel */
17503831d35Sstevel static void
sio_put_reg(struct rmc_comm_state * rcs,uint_t reg,uint8_t val)17603831d35Sstevel sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
17703831d35Sstevel {
17803831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
17903831d35Sstevel
18003831d35Sstevel if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
18103831d35Sstevel /*
18203831d35Sstevel * The chip is mapped as "I/O" (e.g. with the side-effect
18303831d35Sstevel * bit on SPARC), therefore accesses are required to be
18403831d35Sstevel * in-order, with no value cacheing. However, there can
18503831d35Sstevel * still be write-behind buffering, so it is not guaranteed
18603831d35Sstevel * that a write actually reaches the chip in a given time.
18703831d35Sstevel *
18803831d35Sstevel * To force the access right through to the chip, we follow
18903831d35Sstevel * the write with another write (to the SCRATCH register)
19003831d35Sstevel * and a read (of the value just written to the SCRATCH
19103831d35Sstevel * register). The SCRATCH register is specifically provided
19203831d35Sstevel * for temporary data and has no effect on the SIO's own
19303831d35Sstevel * operation, making it ideal as a synchronising mechanism.
19403831d35Sstevel *
19503831d35Sstevel * If we didn't do this, it would be possible that the new
19603831d35Sstevel * value wouldn't reach the chip (and have the *intended*
19703831d35Sstevel * side-effects, such as disabling interrupts), for such a
19803831d35Sstevel * long time that the processor could execute a *lot* of
19903831d35Sstevel * instructions - including exiting the interrupt service
20003831d35Sstevel * routine and re-enabling interrupts. This effect was
20103831d35Sstevel * observed to lead to spurious (unclaimed) interrupts in
20203831d35Sstevel * some circumstances.
20303831d35Sstevel *
20403831d35Sstevel * This will no longer be needed once "synchronous" access
20503831d35Sstevel * handles are available (see PSARC/2000/269 and 2000/531).
20603831d35Sstevel */
20703831d35Sstevel ddi_put8(rcs->sd_state.sio_handle,
20803831d35Sstevel rcs->sd_state.sio_regs + reg, val);
20903831d35Sstevel ddi_put8(rcs->sd_state.sio_handle,
21003831d35Sstevel rcs->sd_state.sio_regs + SIO_SCR, val);
21103831d35Sstevel membar_sync();
21203831d35Sstevel (void) ddi_get8(rcs->sd_state.sio_handle,
21303831d35Sstevel rcs->sd_state.sio_regs + SIO_SCR);
21403831d35Sstevel }
21503831d35Sstevel }
21603831d35Sstevel
21703831d35Sstevel static uint8_t
sio_get_reg(struct rmc_comm_state * rcs,uint_t reg)21803831d35Sstevel sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
21903831d35Sstevel {
22003831d35Sstevel uint8_t val;
22103831d35Sstevel
22203831d35Sstevel if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
22303831d35Sstevel val = ddi_get8(rcs->sd_state.sio_handle,
22403831d35Sstevel rcs->sd_state.sio_regs + reg);
22503831d35Sstevel else
22603831d35Sstevel val = DUMMY_VALUE;
22703831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
22803831d35Sstevel return (val);
22903831d35Sstevel }
23003831d35Sstevel
23103831d35Sstevel static void
sio_check_fault_status(struct rmc_comm_state * rcs)23203831d35Sstevel sio_check_fault_status(struct rmc_comm_state *rcs)
23303831d35Sstevel {
23403831d35Sstevel rcs->sd_state.sio_fault =
23503831d35Sstevel ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
23603831d35Sstevel }
23703831d35Sstevel
23803831d35Sstevel boolean_t
rmc_comm_faulty(struct rmc_comm_state * rcs)23903831d35Sstevel rmc_comm_faulty(struct rmc_comm_state *rcs)
24003831d35Sstevel {
24103831d35Sstevel if (!rcs->sd_state.sio_fault)
24203831d35Sstevel sio_check_fault_status(rcs);
24303831d35Sstevel return (rcs->sd_state.sio_fault);
24403831d35Sstevel }
24503831d35Sstevel
24603831d35Sstevel /*
24703831d35Sstevel * Check for data ready.
24803831d35Sstevel */
24903831d35Sstevel static boolean_t
sio_data_ready(struct rmc_comm_state * rcs)25003831d35Sstevel sio_data_ready(struct rmc_comm_state *rcs)
25103831d35Sstevel {
25203831d35Sstevel uint8_t status;
25303831d35Sstevel
25403831d35Sstevel /*
25503831d35Sstevel * Data is available if the RXDA bit in the LSR is nonzero
25603831d35Sstevel * (if reading it didn't incur a fault).
25703831d35Sstevel */
25803831d35Sstevel status = sio_get_reg(rcs, SIO_LSR);
25903831d35Sstevel return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
26003831d35Sstevel }
26103831d35Sstevel
26203831d35Sstevel /*
26303831d35Sstevel * Enable/disable interrupts
26403831d35Sstevel */
26503831d35Sstevel static void
rmc_comm_set_irq(struct rmc_comm_state * rcs,boolean_t newstate)26603831d35Sstevel rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
26703831d35Sstevel {
26803831d35Sstevel uint8_t val;
26903831d35Sstevel
27003831d35Sstevel val = newstate ? SIO_IER_RXHDL_IE : 0;
27103831d35Sstevel sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
27203831d35Sstevel rcs->sd_state.hw_int_enabled = newstate;
27303831d35Sstevel }
27403831d35Sstevel
27503831d35Sstevel /*
27603831d35Sstevel * High-level interrupt handler:
27703831d35Sstevel * Checks whether initialisation is complete (to avoid a race
27803831d35Sstevel * with mutex_init()), and whether chip interrupts are enabled.
27903831d35Sstevel * If not, the interrupt's not for us, so just return UNCLAIMED.
28003831d35Sstevel * Otherwise, disable the interrupt, trigger a softint, and return
28103831d35Sstevel * CLAIMED. The softint handler will then do all the real work.
28203831d35Sstevel *
28303831d35Sstevel * NOTE: the chip interrupt capability is only re-enabled once the
28403831d35Sstevel * receive code has run, but that can be called from a poll loop
28503831d35Sstevel * or cyclic callback as well as from the softint. So it's *not*
28603831d35Sstevel * guaranteed that there really is a chip interrupt pending here,
28703831d35Sstevel * 'cos the work may already have been done and the reason for the
28803831d35Sstevel * interrupt gone away before we get here.
28903831d35Sstevel *
29003831d35Sstevel * OTOH, if we come through here twice without the receive code
29103831d35Sstevel * having run in between, that's definitely wrong. In such an
29203831d35Sstevel * event, we would notice that chip interrupts haven't yet been
29303831d35Sstevel * re-enabled and return UNCLAIMED, allowing the system's jabber
29403831d35Sstevel * protect code (if any) to do its job.
29503831d35Sstevel */
29603831d35Sstevel static uint_t
rmc_comm_hi_intr(caddr_t arg)29703831d35Sstevel rmc_comm_hi_intr(caddr_t arg)
29803831d35Sstevel {
29903831d35Sstevel struct rmc_comm_state *rcs = (void *)arg;
30003831d35Sstevel uint_t claim;
30103831d35Sstevel
30203831d35Sstevel claim = DDI_INTR_UNCLAIMED;
303dd4eeefdSeota if (rcs->sd_state.cycid != NULL) {
3046fa6856eSarutz /*
3056fa6856eSarutz * Handle the case where this interrupt fires during
3066fa6856eSarutz * panic processing. If that occurs, then a thread
3076fa6856eSarutz * in rmc_comm might have been idled while holding
3086fa6856eSarutz * hw_mutex. If so, that thread will never make
3096fa6856eSarutz * progress, and so we do not want to unconditionally
3106fa6856eSarutz * grab hw_mutex.
3116fa6856eSarutz */
3126fa6856eSarutz if (ddi_in_panic() != 0) {
3136fa6856eSarutz if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
3146fa6856eSarutz return (claim);
3156fa6856eSarutz }
3166fa6856eSarutz } else {
31703831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex);
3186fa6856eSarutz }
31903831d35Sstevel if (rcs->sd_state.hw_int_enabled) {
32003831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE);
32103831d35Sstevel ddi_trigger_softintr(rcs->sd_state.softid);
32203831d35Sstevel claim = DDI_INTR_CLAIMED;
32303831d35Sstevel }
32403831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex);
32503831d35Sstevel }
32603831d35Sstevel return (claim);
32703831d35Sstevel }
32803831d35Sstevel
32903831d35Sstevel /*
33003831d35Sstevel * Packet receive handler
33103831d35Sstevel *
33203831d35Sstevel * This routine should be called from the low-level softint, or the
33303831d35Sstevel * cyclic callback, or rmc_comm_cmd() (for polled operation), with the
33403831d35Sstevel * low-level mutex already held.
33503831d35Sstevel */
33603831d35Sstevel void
rmc_comm_serdev_receive(struct rmc_comm_state * rcs)33703831d35Sstevel rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
33803831d35Sstevel {
33903831d35Sstevel uint8_t data;
34003831d35Sstevel
34103831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
34203831d35Sstevel
34303831d35Sstevel /*
34403831d35Sstevel * Check for access faults before starting the receive
34503831d35Sstevel * loop (we don't want to cause bus errors or suchlike
34603831d35Sstevel * unpleasantness in the event that the SIO has died).
34703831d35Sstevel */
34803831d35Sstevel if (!rmc_comm_faulty(rcs)) {
34903831d35Sstevel
35003831d35Sstevel char *rx_buf = rcs->sd_state.serdev_rx_buf;
35103831d35Sstevel uint16_t rx_buflen = 0;
35203831d35Sstevel
35303831d35Sstevel /*
35403831d35Sstevel * Read bytes from the FIFO until they're all gone
35503831d35Sstevel * or our buffer overflows (which must be an error)
35603831d35Sstevel */
35703831d35Sstevel
35803831d35Sstevel /*
35903831d35Sstevel * At the moment, the receive buffer is overwritten any
36003831d35Sstevel * time data is received from the serial device.
36103831d35Sstevel * This should not pose problems (probably!) as the data
36203831d35Sstevel * protocol is half-duplex
36303831d35Sstevel * Otherwise, a circular buffer must be implemented!
36403831d35Sstevel */
36503831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex);
36603831d35Sstevel while (sio_data_ready(rcs)) {
36703831d35Sstevel data = sio_get_reg(rcs, SIO_RXD);
36803831d35Sstevel rx_buf[rx_buflen++] = data;
36903831d35Sstevel if (rx_buflen >= SIO_MAX_RXBUF_SIZE)
37003831d35Sstevel break;
37103831d35Sstevel }
37203831d35Sstevel rcs->sd_state.serdev_rx_count = rx_buflen;
37303831d35Sstevel
37403831d35Sstevel DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
37503831d35Sstevel
37603831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE);
37703831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex);
37803831d35Sstevel
37903831d35Sstevel /*
38003831d35Sstevel * call up the data protocol receive handler
38103831d35Sstevel */
38203831d35Sstevel rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
38303831d35Sstevel }
38403831d35Sstevel }
38503831d35Sstevel
38603831d35Sstevel /*
38703831d35Sstevel * Low-level softint handler
38803831d35Sstevel *
38903831d35Sstevel * This routine should be triggered whenever there's a byte to be read
39003831d35Sstevel */
39103831d35Sstevel static uint_t
rmc_comm_softint(caddr_t arg)39203831d35Sstevel rmc_comm_softint(caddr_t arg)
39303831d35Sstevel {
39403831d35Sstevel struct rmc_comm_state *rcs = (void *)arg;
39503831d35Sstevel
39603831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex);
39703831d35Sstevel rmc_comm_serdev_receive(rcs);
39803831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex);
39903831d35Sstevel return (DDI_INTR_CLAIMED);
40003831d35Sstevel }
40103831d35Sstevel
40203831d35Sstevel /*
40303831d35Sstevel * Cyclic handler: just calls the receive routine, in case interrupts
40403831d35Sstevel * are not being delivered and in order to handle command timeout
40503831d35Sstevel */
40603831d35Sstevel static void
rmc_comm_cyclic(void * arg)40703831d35Sstevel rmc_comm_cyclic(void *arg)
40803831d35Sstevel {
40903831d35Sstevel struct rmc_comm_state *rcs = (void *)arg;
41003831d35Sstevel
41103831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex);
41203831d35Sstevel rmc_comm_serdev_receive(rcs);
41303831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex);
41403831d35Sstevel }
41503831d35Sstevel
41603831d35Sstevel /*
41703831d35Sstevel * Serial protocol
41803831d35Sstevel *
41903831d35Sstevel * This routine builds a command and sets it in progress.
42003831d35Sstevel */
42103831d35Sstevel void
rmc_comm_serdev_send(struct rmc_comm_state * rcs,char * buf,int buflen)42203831d35Sstevel rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
42303831d35Sstevel {
42403831d35Sstevel uint8_t *p;
42503831d35Sstevel uint8_t status;
42603831d35Sstevel
42703831d35Sstevel /*
42803831d35Sstevel * Check and update the SIO h/w fault status before accessing
42903831d35Sstevel * the chip registers. If there's a (new or previous) fault,
43003831d35Sstevel * we'll run through the protocol but won't really touch the
43103831d35Sstevel * hardware and all commands will timeout. If a previously
43203831d35Sstevel * discovered fault has now gone away (!), then we can (try to)
43303831d35Sstevel * proceed with the new command (probably a probe).
43403831d35Sstevel */
43503831d35Sstevel sio_check_fault_status(rcs);
43603831d35Sstevel
43703831d35Sstevel /*
43803831d35Sstevel * Send the command now by stuffing the packet into the Tx FIFO.
43903831d35Sstevel */
44003831d35Sstevel DATASCOPE(rcs, 'S', buf, buflen)
44103831d35Sstevel
44203831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex);
44303831d35Sstevel p = (uint8_t *)buf;
44403831d35Sstevel while (p < (uint8_t *)&buf[buflen]) {
44503831d35Sstevel
44603831d35Sstevel /*
44703831d35Sstevel * before writing to the TX holding register, we make sure that
44803831d35Sstevel * it is empty. In this case, there will be no chance to
44903831d35Sstevel * overflow the serial device FIFO (but, on the other hand,
45003831d35Sstevel * it may introduce some latency)
45103831d35Sstevel */
45203831d35Sstevel status = sio_get_reg(rcs, SIO_LSR);
45303831d35Sstevel while ((status & SIO_LSR_XHRE) == 0) {
45403831d35Sstevel drv_usecwait(100);
45503831d35Sstevel status = sio_get_reg(rcs, SIO_LSR);
45603831d35Sstevel }
45703831d35Sstevel sio_put_reg(rcs, SIO_TXD, *p++);
45803831d35Sstevel }
45903831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex);
46003831d35Sstevel }
46103831d35Sstevel
46203831d35Sstevel /*
46303831d35Sstevel * wait for the tx fifo to drain - used for urgent nowait requests
46403831d35Sstevel */
46503831d35Sstevel void
rmc_comm_serdev_drain(struct rmc_comm_state * rcs)46603831d35Sstevel rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
46703831d35Sstevel {
46803831d35Sstevel uint8_t status;
46903831d35Sstevel
47003831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex);
47103831d35Sstevel status = sio_get_reg(rcs, SIO_LSR);
47203831d35Sstevel while ((status & SIO_LSR_XHRE) == 0) {
47303831d35Sstevel drv_usecwait(100);
47403831d35Sstevel status = sio_get_reg(rcs, SIO_LSR);
47503831d35Sstevel }
47603831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex);
47703831d35Sstevel }
47803831d35Sstevel
47903831d35Sstevel /*
48003831d35Sstevel * Hardware setup - put the SIO chip in the required operational
48103831d35Sstevel * state, with all our favourite parameters programmed correctly.
48203831d35Sstevel * This routine leaves all SIO interrupts disabled.
48303831d35Sstevel */
48403831d35Sstevel
48503831d35Sstevel static void
rmc_comm_hw_reset(struct rmc_comm_state * rcs)48603831d35Sstevel rmc_comm_hw_reset(struct rmc_comm_state *rcs)
48703831d35Sstevel {
48803831d35Sstevel uint16_t divisor;
48903831d35Sstevel
49003831d35Sstevel /*
49103831d35Sstevel * Disable interrupts, soft reset Tx and Rx circuitry,
49203831d35Sstevel * reselect standard modes (bits/char, parity, etc).
49303831d35Sstevel */
49403831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE);
49503831d35Sstevel sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
49603831d35Sstevel sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
49703831d35Sstevel
49803831d35Sstevel /*
49903831d35Sstevel * Select the proper baud rate; if the value is invalid
50003831d35Sstevel * (presumably 0, i.e. not specified, but also if the
50103831d35Sstevel * "baud" property is set to some silly value), we assume
50203831d35Sstevel * the default.
50303831d35Sstevel */
50403831d35Sstevel if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
50503831d35Sstevel divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) *
50603831d35Sstevel rcs->baud_divisor_factor;
50703831d35Sstevel } else {
50803831d35Sstevel divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
50903831d35Sstevel rcs->baud_divisor_factor;
51003831d35Sstevel }
51103831d35Sstevel
51203831d35Sstevel /*
51303831d35Sstevel * According to the datasheet, it is forbidden for the divisor
51403831d35Sstevel * register to be zero. So when loading the register in two
51503831d35Sstevel * steps, we have to make sure that the temporary value formed
51603831d35Sstevel * between loads is nonzero. However, we can't rely on either
51703831d35Sstevel * half already having a nonzero value, as the datasheet also
51803831d35Sstevel * says that these registers are indeterminate after a reset!
51903831d35Sstevel * So, we explicitly set the low byte to a non-zero value first;
52003831d35Sstevel * then we can safely load the high byte, and then the correct
52103831d35Sstevel * value for the low byte, without the result ever being zero.
52203831d35Sstevel */
52303831d35Sstevel sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
52403831d35Sstevel sio_put_reg(rcs, SIO_LBGDL, 0xff);
52503831d35Sstevel sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
52603831d35Sstevel sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
52703831d35Sstevel sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
52803831d35Sstevel
52903831d35Sstevel /*
53003831d35Sstevel * Program the remaining device registers as required
53103831d35Sstevel */
53203831d35Sstevel sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
53303831d35Sstevel sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
53403831d35Sstevel }
53503831d35Sstevel
53603831d35Sstevel /*
53703831d35Sstevel * Higher-level setup & teardown
53803831d35Sstevel */
53903831d35Sstevel static void
rmc_comm_offline(struct rmc_comm_state * rcs)54003831d35Sstevel rmc_comm_offline(struct rmc_comm_state *rcs)
54103831d35Sstevel {
54203831d35Sstevel if (rcs->sd_state.sio_handle != NULL)
54303831d35Sstevel ddi_regs_map_free(&rcs->sd_state.sio_handle);
54403831d35Sstevel rcs->sd_state.sio_handle = NULL;
54503831d35Sstevel rcs->sd_state.sio_regs = NULL;
54603831d35Sstevel }
54703831d35Sstevel
54803831d35Sstevel static int
rmc_comm_online(struct rmc_comm_state * rcs,dev_info_t * dip)54903831d35Sstevel rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
55003831d35Sstevel {
55103831d35Sstevel ddi_acc_handle_t h;
55203831d35Sstevel caddr_t p;
55303831d35Sstevel int nregs;
55403831d35Sstevel int err;
55503831d35Sstevel
55603831d35Sstevel if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS)
55703831d35Sstevel nregs = 0;
55803831d35Sstevel switch (nregs) {
55903831d35Sstevel default:
56003831d35Sstevel case 1:
56103831d35Sstevel /*
56203831d35Sstevel * regset 0 represents the SIO operating registers
56303831d35Sstevel */
56403831d35Sstevel err = ddi_regs_map_setup(dip, 0, &p, 0, 0,
56503831d35Sstevel rmc_comm_dev_acc_attr, &h);
56603831d35Sstevel if (err != DDI_SUCCESS)
56703831d35Sstevel return (EIO);
56803831d35Sstevel rcs->sd_state.sio_handle = h;
56903831d35Sstevel rcs->sd_state.sio_regs = (void *)p;
57003831d35Sstevel break;
57103831d35Sstevel case 0:
57203831d35Sstevel /*
57303831d35Sstevel * If no registers are defined, succeed vacuously;
57403831d35Sstevel * commands will be accepted, but we fake the accesses.
57503831d35Sstevel */
57603831d35Sstevel break;
57703831d35Sstevel }
57803831d35Sstevel
57903831d35Sstevel /*
58003831d35Sstevel * Now that the registers are mapped, we can initialise the SIO h/w
58103831d35Sstevel */
58203831d35Sstevel rmc_comm_hw_reset(rcs);
58303831d35Sstevel return (0);
58403831d35Sstevel }
58503831d35Sstevel
58603831d35Sstevel
58703831d35Sstevel /*
58803831d35Sstevel * Initialization of the serial device (data structure, mutex, cv, hardware
58903831d35Sstevel * and so on). It is called from the attach routine.
59003831d35Sstevel */
59103831d35Sstevel
59203831d35Sstevel int
rmc_comm_serdev_init(struct rmc_comm_state * rcs,dev_info_t * dip)59303831d35Sstevel rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
59403831d35Sstevel {
59503831d35Sstevel int err = DDI_SUCCESS;
59603831d35Sstevel
597dd4eeefdSeota rcs->sd_state.cycid = NULL;
59803831d35Sstevel
59903831d35Sstevel /*
60003831d35Sstevel * Online the hardware ...
60103831d35Sstevel */
60203831d35Sstevel err = rmc_comm_online(rcs, dip);
60303831d35Sstevel if (err != 0)
60403831d35Sstevel return (-1);
60503831d35Sstevel
60603831d35Sstevel /*
60703831d35Sstevel * call ddi_get_soft_iblock_cookie() to retrieve the
60803831d35Sstevel * the interrupt block cookie so that the mutexes are initialized
60903831d35Sstevel * before adding the interrupt (to avoid a potential race condition).
61003831d35Sstevel */
61103831d35Sstevel
61203831d35Sstevel err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW,
61303831d35Sstevel &rcs->dp_state.dp_iblk);
61403831d35Sstevel if (err != DDI_SUCCESS)
61503831d35Sstevel return (-1);
61603831d35Sstevel
61703831d35Sstevel err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
61803831d35Sstevel if (err != DDI_SUCCESS)
61903831d35Sstevel return (-1);
62003831d35Sstevel
62103831d35Sstevel /*
62203831d35Sstevel * initialize mutex here before adding hw/sw interrupt handlers
62303831d35Sstevel */
62403831d35Sstevel mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
62503831d35Sstevel rcs->dp_state.dp_iblk);
62603831d35Sstevel
62703831d35Sstevel mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
62803831d35Sstevel rcs->sd_state.hw_iblk);
62903831d35Sstevel
63003831d35Sstevel /*
63103831d35Sstevel * Install soft and hard interrupt handler(s)
63203831d35Sstevel *
63303831d35Sstevel * the soft intr. handler will need the data protocol lock (dp_mutex)
63403831d35Sstevel * So, data protocol mutex and iblock cookie are created/initialized
63503831d35Sstevel * here
63603831d35Sstevel */
63703831d35Sstevel
63803831d35Sstevel err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
63903831d35Sstevel &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
64003831d35Sstevel if (err != DDI_SUCCESS) {
64103831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex);
64203831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex);
64303831d35Sstevel return (-1);
64403831d35Sstevel }
64503831d35Sstevel
64603831d35Sstevel /*
64703831d35Sstevel * hardware interrupt
64803831d35Sstevel */
64903831d35Sstevel
65003831d35Sstevel if (rcs->sd_state.sio_handle != NULL) {
65103831d35Sstevel err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
65203831d35Sstevel rmc_comm_hi_intr, (caddr_t)rcs);
65303831d35Sstevel
65403831d35Sstevel /*
65503831d35Sstevel * did we successfully install the h/w interrupt handler?
65603831d35Sstevel */
65703831d35Sstevel if (err != DDI_SUCCESS) {
65803831d35Sstevel ddi_remove_softintr(rcs->sd_state.softid);
65903831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex);
66003831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex);
66103831d35Sstevel return (-1);
66203831d35Sstevel }
66303831d35Sstevel }
66403831d35Sstevel
66503831d35Sstevel /*
666dd4eeefdSeota * Start periodical callbacks
66703831d35Sstevel */
668dd4eeefdSeota rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
669dd4eeefdSeota 5 * RMC_COMM_ONE_SEC, DDI_IPL_1);
67003831d35Sstevel return (0);
67103831d35Sstevel }
67203831d35Sstevel
67303831d35Sstevel /*
67403831d35Sstevel * Termination of the serial device (data structure, mutex, cv, hardware
67503831d35Sstevel * and so on). It is called from the detach routine.
67603831d35Sstevel */
67703831d35Sstevel
67803831d35Sstevel void
rmc_comm_serdev_fini(struct rmc_comm_state * rcs,dev_info_t * dip)67903831d35Sstevel rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
68003831d35Sstevel {
68103831d35Sstevel rmc_comm_hw_reset(rcs);
68203831d35Sstevel
683dd4eeefdSeota if (rcs->sd_state.cycid != NULL) {
684dd4eeefdSeota ddi_periodic_delete(rcs->sd_state.cycid);
685dd4eeefdSeota rcs->sd_state.cycid = NULL;
68603831d35Sstevel
68703831d35Sstevel if (rcs->sd_state.sio_handle != NULL)
68803831d35Sstevel ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
68903831d35Sstevel
69003831d35Sstevel ddi_remove_softintr(rcs->sd_state.softid);
69103831d35Sstevel
69203831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex);
69303831d35Sstevel
69403831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex);
69503831d35Sstevel }
69603831d35Sstevel rmc_comm_offline(rcs);
69703831d35Sstevel }
69803831d35Sstevel
69903831d35Sstevel /*
70003831d35Sstevel * device driver entry routines (init/fini, attach/detach, ...)
70103831d35Sstevel */
70203831d35Sstevel
70303831d35Sstevel /*
70403831d35Sstevel * Clean up on detach or failure of attach
70503831d35Sstevel */
70603831d35Sstevel static void
rmc_comm_unattach(struct rmc_comm_state * rcs,dev_info_t * dip,int instance,boolean_t drvi_init,boolean_t dp_init,boolean_t sd_init)70703831d35Sstevel rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
70803831d35Sstevel boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init)
70903831d35Sstevel {
71003831d35Sstevel if (rcs != NULL) {
71103831d35Sstevel /*
71203831d35Sstevel * disable interrupts now
71303831d35Sstevel */
71403831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE);
71503831d35Sstevel
71603831d35Sstevel /*
71703831d35Sstevel * driver interface termination (if it has been initialized)
71803831d35Sstevel */
71903831d35Sstevel if (drvi_init)
72003831d35Sstevel rmc_comm_drvintf_fini(rcs);
72103831d35Sstevel
72203831d35Sstevel /*
72303831d35Sstevel * data protocol termination (if it has been initialized)
72403831d35Sstevel */
72503831d35Sstevel if (dp_init)
72603831d35Sstevel rmc_comm_dp_fini(rcs);
72703831d35Sstevel
72803831d35Sstevel /*
72903831d35Sstevel * serial device termination (if it has been initialized)
73003831d35Sstevel */
73103831d35Sstevel if (sd_init)
73203831d35Sstevel rmc_comm_serdev_fini(rcs, dip);
73303831d35Sstevel
73403831d35Sstevel ddi_set_driver_private(dip, NULL);
73503831d35Sstevel }
73603831d35Sstevel ddi_soft_state_free(rmc_comm_statep, instance);
73703831d35Sstevel }
73803831d35Sstevel
73903831d35Sstevel /*
74003831d35Sstevel * Autoconfiguration routines
74103831d35Sstevel */
74203831d35Sstevel
74303831d35Sstevel static int
rmc_comm_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)74403831d35Sstevel rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
74503831d35Sstevel {
74603831d35Sstevel struct rmc_comm_state *rcs = NULL;
74703831d35Sstevel sig_state_t *current_sgn_p;
74803831d35Sstevel int instance;
74903831d35Sstevel
75003831d35Sstevel /*
75103831d35Sstevel * only allow one instance
75203831d35Sstevel */
75303831d35Sstevel instance = ddi_get_instance(dip);
75403831d35Sstevel if (instance != 0)
75503831d35Sstevel return (DDI_FAILURE);
75603831d35Sstevel
75703831d35Sstevel switch (cmd) {
75803831d35Sstevel default:
75903831d35Sstevel return (DDI_FAILURE);
76003831d35Sstevel
76103831d35Sstevel case DDI_RESUME:
76203831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance,
76303831d35Sstevel "rmc_comm_attach")) == NULL)
76403831d35Sstevel return (DDI_FAILURE); /* this "can't happen" */
76503831d35Sstevel
76603831d35Sstevel rmc_comm_hw_reset(rcs);
76703831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE);
76803831d35Sstevel rcs->dip = dip;
76903831d35Sstevel
77003831d35Sstevel mutex_enter(&tod_lock);
77103831d35Sstevel if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL &&
77203831d35Sstevel watchdog_was_active) {
77303831d35Sstevel (void) tod_ops.tod_set_watchdog_timer(0);
77403831d35Sstevel }
77503831d35Sstevel mutex_exit(&tod_lock);
77603831d35Sstevel
77703831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex);
77803831d35Sstevel dp_reset(rcs, INITIAL_SEQID, 1, 1);
77903831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex);
78003831d35Sstevel
78103831d35Sstevel current_sgn_p = (sig_state_t *)modgetsymvalue(
78203831d35Sstevel "current_sgn", 0);
78303831d35Sstevel if ((current_sgn_p != NULL) &&
78403831d35Sstevel (current_sgn_p->state_t.sig != 0)) {
78503831d35Sstevel CPU_SIGNATURE(current_sgn_p->state_t.sig,
78603831d35Sstevel current_sgn_p->state_t.state,
78703831d35Sstevel current_sgn_p->state_t.sub_state, -1);
78803831d35Sstevel }
78903831d35Sstevel return (DDI_SUCCESS);
79003831d35Sstevel
79103831d35Sstevel case DDI_ATTACH:
79203831d35Sstevel break;
79303831d35Sstevel }
79403831d35Sstevel
79503831d35Sstevel /*
79603831d35Sstevel * Allocate the soft-state structure
79703831d35Sstevel */
79803831d35Sstevel if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS)
79903831d35Sstevel return (DDI_FAILURE);
80003831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
80103831d35Sstevel NULL) {
80203831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
80303831d35Sstevel return (DDI_FAILURE);
80403831d35Sstevel }
80503831d35Sstevel ddi_set_driver_private(dip, rcs);
80603831d35Sstevel
80703831d35Sstevel rcs->dip = NULL;
80803831d35Sstevel
80903831d35Sstevel /*
81003831d35Sstevel * Set various options from .conf properties
81103831d35Sstevel */
81203831d35Sstevel rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
81303831d35Sstevel "baud-rate", 0);
81403831d35Sstevel rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
81503831d35Sstevel "debug", 0);
81603831d35Sstevel
81703831d35Sstevel /*
81803831d35Sstevel * the baud divisor factor tells us how to scale the result of
81903831d35Sstevel * the SIO_BAUD_TO_DIVISOR macro for platforms which do not
82003831d35Sstevel * use the standard 24MHz uart clock
82103831d35Sstevel */
82203831d35Sstevel rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
82303831d35Sstevel DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN);
82403831d35Sstevel
82503831d35Sstevel /*
82603831d35Sstevel * try to be reasonable if the scale factor contains a silly value
82703831d35Sstevel */
82803831d35Sstevel if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
82903831d35Sstevel (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
83003831d35Sstevel rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
83103831d35Sstevel
83203831d35Sstevel /*
83303831d35Sstevel * initialize serial device
83403831d35Sstevel */
83503831d35Sstevel if (rmc_comm_serdev_init(rcs, dip) != 0) {
83603831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
83703831d35Sstevel return (DDI_FAILURE);
83803831d35Sstevel }
83903831d35Sstevel
84003831d35Sstevel /*
84103831d35Sstevel * initialize data protocol
84203831d35Sstevel */
84303831d35Sstevel rmc_comm_dp_init(rcs);
84403831d35Sstevel
84503831d35Sstevel /*
84603831d35Sstevel * initialize driver interface
84703831d35Sstevel */
84803831d35Sstevel if (rmc_comm_drvintf_init(rcs) != 0) {
84903831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
85003831d35Sstevel return (DDI_FAILURE);
85103831d35Sstevel }
85203831d35Sstevel
85303831d35Sstevel /*
85403831d35Sstevel * Initialise devinfo-related fields
85503831d35Sstevel */
85603831d35Sstevel rcs->majornum = ddi_driver_major(dip);
85703831d35Sstevel rcs->instance = instance;
85803831d35Sstevel rcs->dip = dip;
85903831d35Sstevel
86003831d35Sstevel /*
86103831d35Sstevel * enable interrupts now
86203831d35Sstevel */
86303831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE);
86403831d35Sstevel
86503831d35Sstevel /*
86603831d35Sstevel * All done, report success
86703831d35Sstevel */
86803831d35Sstevel ddi_report_dev(dip);
86903831d35Sstevel mutex_enter(&rmc_comm_attach_lock);
87003831d35Sstevel rcs->is_attached = B_TRUE;
87103831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
87203831d35Sstevel return (DDI_SUCCESS);
87303831d35Sstevel }
87403831d35Sstevel
87503831d35Sstevel static int
rmc_comm_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)87603831d35Sstevel rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
87703831d35Sstevel {
87803831d35Sstevel struct rmc_comm_state *rcs;
87903831d35Sstevel int instance;
88003831d35Sstevel
88103831d35Sstevel instance = ddi_get_instance(dip);
88203831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
88303831d35Sstevel return (DDI_FAILURE); /* this "can't happen" */
88403831d35Sstevel
88503831d35Sstevel switch (cmd) {
88603831d35Sstevel case DDI_SUSPEND:
88703831d35Sstevel mutex_enter(&tod_lock);
88803831d35Sstevel if (watchdog_enable && watchdog_activated &&
88903831d35Sstevel tod_ops.tod_clear_watchdog_timer != NULL) {
89003831d35Sstevel watchdog_was_active = 1;
89103831d35Sstevel (void) tod_ops.tod_clear_watchdog_timer();
89203831d35Sstevel } else {
89303831d35Sstevel watchdog_was_active = 0;
89403831d35Sstevel }
89503831d35Sstevel mutex_exit(&tod_lock);
89603831d35Sstevel
89703831d35Sstevel rcs->dip = NULL;
89803831d35Sstevel rmc_comm_hw_reset(rcs);
89903831d35Sstevel
90003831d35Sstevel return (DDI_SUCCESS);
90103831d35Sstevel
90203831d35Sstevel case DDI_DETACH:
90303831d35Sstevel /*
90403831d35Sstevel * reject detach if any client(s) still registered
90503831d35Sstevel */
90603831d35Sstevel mutex_enter(&rmc_comm_attach_lock);
90703831d35Sstevel if (rcs->n_registrations != 0) {
90803831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
90903831d35Sstevel return (DDI_FAILURE);
91003831d35Sstevel }
91103831d35Sstevel /*
91203831d35Sstevel * Committed to complete the detach;
91303831d35Sstevel * mark as no longer attached, to prevent new clients
91403831d35Sstevel * registering (as part of a coincident attach)
91503831d35Sstevel */
91603831d35Sstevel rcs->is_attached = B_FALSE;
91703831d35Sstevel mutex_exit(&rmc_comm_attach_lock);
91803831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
91903831d35Sstevel return (DDI_SUCCESS);
92003831d35Sstevel
92103831d35Sstevel default:
92203831d35Sstevel return (DDI_FAILURE);
92303831d35Sstevel }
92403831d35Sstevel }
92503831d35Sstevel
92603831d35Sstevel /*ARGSUSED*/
92703831d35Sstevel static int
rmc_comm_reset(dev_info_t * dip,ddi_reset_cmd_t cmd)92803831d35Sstevel rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
92903831d35Sstevel {
93003831d35Sstevel struct rmc_comm_state *rcs;
93103831d35Sstevel
93203831d35Sstevel if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
93303831d35Sstevel return (DDI_FAILURE);
93403831d35Sstevel rmc_comm_hw_reset(rcs);
93503831d35Sstevel return (DDI_SUCCESS);
93603831d35Sstevel }
93703831d35Sstevel
93803831d35Sstevel /*
93903831d35Sstevel * System interface structures
94003831d35Sstevel */
94103831d35Sstevel static struct dev_ops rmc_comm_dev_ops =
94203831d35Sstevel {
94303831d35Sstevel DEVO_REV,
94403831d35Sstevel 0, /* refcount */
94503831d35Sstevel nodev, /* getinfo */
94603831d35Sstevel nulldev, /* identify */
94703831d35Sstevel nulldev, /* probe */
94803831d35Sstevel rmc_comm_attach, /* attach */
94903831d35Sstevel rmc_comm_detach, /* detach */
95003831d35Sstevel rmc_comm_reset, /* reset */
95103831d35Sstevel (struct cb_ops *)NULL, /* driver operations */
95203831d35Sstevel (struct bus_ops *)NULL, /* bus operations */
953*19397407SSherry Moore nulldev, /* power() */
954*19397407SSherry Moore ddi_quiesce_not_supported, /* devo_quiesce */
95503831d35Sstevel };
95603831d35Sstevel
95703831d35Sstevel static struct modldrv modldrv =
95803831d35Sstevel {
95903831d35Sstevel &mod_driverops,
960*19397407SSherry Moore "rmc_comm driver",
96103831d35Sstevel &rmc_comm_dev_ops
96203831d35Sstevel };
96303831d35Sstevel
96403831d35Sstevel static struct modlinkage modlinkage =
96503831d35Sstevel {
96603831d35Sstevel MODREV_1,
96703831d35Sstevel {
96803831d35Sstevel &modldrv,
96903831d35Sstevel NULL
97003831d35Sstevel }
97103831d35Sstevel };
97203831d35Sstevel
97303831d35Sstevel /*
97403831d35Sstevel * Dynamic loader interface code
97503831d35Sstevel */
97603831d35Sstevel int
_init(void)97703831d35Sstevel _init(void)
97803831d35Sstevel {
97903831d35Sstevel int err;
98003831d35Sstevel
98103831d35Sstevel mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL);
98203831d35Sstevel err = ddi_soft_state_init(&rmc_comm_statep,
98303831d35Sstevel sizeof (struct rmc_comm_state), 0);
98403831d35Sstevel if (err == DDI_SUCCESS)
98503831d35Sstevel if ((err = mod_install(&modlinkage)) != 0) {
98603831d35Sstevel ddi_soft_state_fini(&rmc_comm_statep);
98703831d35Sstevel }
98803831d35Sstevel if (err != DDI_SUCCESS)
98903831d35Sstevel mutex_destroy(&rmc_comm_attach_lock);
99003831d35Sstevel return (err);
99103831d35Sstevel }
99203831d35Sstevel
99303831d35Sstevel int
_info(struct modinfo * mip)99403831d35Sstevel _info(struct modinfo *mip)
99503831d35Sstevel {
99603831d35Sstevel return (mod_info(&modlinkage, mip));
99703831d35Sstevel }
99803831d35Sstevel
99903831d35Sstevel int
_fini(void)100003831d35Sstevel _fini(void)
100103831d35Sstevel {
100203831d35Sstevel int err;
100303831d35Sstevel
100403831d35Sstevel if ((err = mod_remove(&modlinkage)) == 0) {
100503831d35Sstevel ddi_soft_state_fini(&rmc_comm_statep);
100603831d35Sstevel rmc_comm_major = NOMAJOR;
100703831d35Sstevel mutex_destroy(&rmc_comm_attach_lock);
100803831d35Sstevel }
100903831d35Sstevel return (err);
101003831d35Sstevel }
1011