1 03831d35Sstevel /* 2 03831d35Sstevel * CDDL HEADER START 3 03831d35Sstevel * 4 03831d35Sstevel * The contents of this file are subject to the terms of the 5 03831d35Sstevel * Common Development and Distribution License (the "License"). 6 03831d35Sstevel * You may not use this file except in compliance with the License. 7 03831d35Sstevel * 8 03831d35Sstevel * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 03831d35Sstevel * or http://www.opensolaris.org/os/licensing. 10 03831d35Sstevel * See the License for the specific language governing permissions 11 03831d35Sstevel * and limitations under the License. 12 03831d35Sstevel * 13 03831d35Sstevel * When distributing Covered Code, include this CDDL HEADER in each 14 03831d35Sstevel * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 03831d35Sstevel * If applicable, add the following below this CDDL HEADER, with the 16 03831d35Sstevel * fields enclosed by brackets "[]" replaced with your own identifying 17 03831d35Sstevel * information: Portions Copyright [yyyy] [name of copyright owner] 18 03831d35Sstevel * 19 03831d35Sstevel * CDDL HEADER END 20 03831d35Sstevel */ 21 03831d35Sstevel 22 03831d35Sstevel /* 23 *19397407SSherry Moore * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 03831d35Sstevel * Use is subject to license terms. 25 03831d35Sstevel * 26 03831d35Sstevel * The "rmc_comm" driver provides access to the RMC so that its clients need 27 03831d35Sstevel * not be concerned with the details of the access mechanism, which in this 28 03831d35Sstevel * case is implemented via a packet-based protocol over a serial link via a 29 03831d35Sstevel * 16550 compatible serial port. 30 03831d35Sstevel */ 31 03831d35Sstevel 32 03831d35Sstevel 33 03831d35Sstevel /* 34 03831d35Sstevel * Header files 35 03831d35Sstevel */ 36 03831d35Sstevel #include <sys/conf.h> 37 03831d35Sstevel #include <sys/membar.h> 38 03831d35Sstevel #include <sys/modctl.h> 39 03831d35Sstevel #include <sys/strlog.h> 40 03831d35Sstevel #include <sys/types.h> 41 03831d35Sstevel #include <sys/sunddi.h> 42 03831d35Sstevel #include <sys/ddi.h> 43 03831d35Sstevel #include <sys/rmc_comm_dp_boot.h> 44 03831d35Sstevel #include <sys/rmc_comm_dp.h> 45 03831d35Sstevel #include <sys/rmc_comm_drvintf.h> 46 03831d35Sstevel #include <sys/rmc_comm.h> 47 03831d35Sstevel #include <sys/cpu_sgnblk_defs.h> 48 03831d35Sstevel 49 03831d35Sstevel /* 50 03831d35Sstevel * Local definitions 51 03831d35Sstevel */ 52 03831d35Sstevel #define MYNAME "rmc_comm" 53 03831d35Sstevel #define NOMAJOR (~(major_t)0) 54 03831d35Sstevel #define DUMMY_VALUE (~(int8_t)0) 55 03831d35Sstevel 56 03831d35Sstevel /* 57 03831d35Sstevel * Local data 58 03831d35Sstevel */ 59 03831d35Sstevel static void *rmc_comm_statep; 60 03831d35Sstevel static major_t rmc_comm_major = NOMAJOR; 61 03831d35Sstevel static kmutex_t rmc_comm_attach_lock; 62 03831d35Sstevel static ddi_device_acc_attr_t rmc_comm_dev_acc_attr[1] = 63 03831d35Sstevel { 64 03831d35Sstevel DDI_DEVICE_ATTR_V0, 65 03831d35Sstevel DDI_STRUCTURE_LE_ACC, 66 03831d35Sstevel DDI_STRICTORDER_ACC 67 03831d35Sstevel }; 68 03831d35Sstevel static int watchdog_was_active; 69 03831d35Sstevel extern int watchdog_activated; 70 03831d35Sstevel extern int watchdog_enable; 71 03831d35Sstevel 72 03831d35Sstevel /* 73 03831d35Sstevel * prototypes 74 03831d35Sstevel */ 75 03831d35Sstevel 76 03831d35Sstevel extern void dp_reset(struct rmc_comm_state *, uint8_t, boolean_t, boolean_t); 77 03831d35Sstevel static void sio_put_reg(struct rmc_comm_state *, uint_t, uint8_t); 78 03831d35Sstevel static uint8_t sio_get_reg(struct rmc_comm_state *, uint_t); 79 03831d35Sstevel static void sio_check_fault_status(struct rmc_comm_state *); 80 03831d35Sstevel static boolean_t sio_data_ready(struct rmc_comm_state *); 81 03831d35Sstevel static void rmc_comm_set_irq(struct rmc_comm_state *, boolean_t); 82 03831d35Sstevel static uint_t rmc_comm_hi_intr(caddr_t); 83 03831d35Sstevel static uint_t rmc_comm_softint(caddr_t); 84 03831d35Sstevel static void rmc_comm_cyclic(void *); 85 03831d35Sstevel static void rmc_comm_hw_reset(struct rmc_comm_state *); 86 03831d35Sstevel static void rmc_comm_offline(struct rmc_comm_state *); 87 03831d35Sstevel static int rmc_comm_online(struct rmc_comm_state *, dev_info_t *); 88 03831d35Sstevel static void rmc_comm_unattach(struct rmc_comm_state *, dev_info_t *, int, 89 03831d35Sstevel boolean_t, boolean_t, boolean_t); 90 03831d35Sstevel static int rmc_comm_attach(dev_info_t *, ddi_attach_cmd_t); 91 03831d35Sstevel static int rmc_comm_detach(dev_info_t *, ddi_detach_cmd_t); 92 03831d35Sstevel 93 03831d35Sstevel /* 94 03831d35Sstevel * for client leaf drivers to register their desire for rmc_comm 95 03831d35Sstevel * to stay attached 96 03831d35Sstevel */ 97 03831d35Sstevel int 98 03831d35Sstevel rmc_comm_register() 99 03831d35Sstevel { 100 03831d35Sstevel struct rmc_comm_state *rcs; 101 03831d35Sstevel 102 03831d35Sstevel mutex_enter(&rmc_comm_attach_lock); 103 03831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, 0); 104 03831d35Sstevel if ((rcs == NULL) || (!rcs->is_attached)) { 105 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 106 03831d35Sstevel return (DDI_FAILURE); 107 03831d35Sstevel } 108 03831d35Sstevel rcs->n_registrations++; 109 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 110 03831d35Sstevel return (DDI_SUCCESS); 111 03831d35Sstevel } 112 03831d35Sstevel 113 03831d35Sstevel void 114 03831d35Sstevel rmc_comm_unregister() 115 03831d35Sstevel { 116 03831d35Sstevel struct rmc_comm_state *rcs; 117 03831d35Sstevel 118 03831d35Sstevel mutex_enter(&rmc_comm_attach_lock); 119 03831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, 0); 120 03831d35Sstevel ASSERT(rcs != NULL); 121 03831d35Sstevel ASSERT(rcs->n_registrations != 0); 122 03831d35Sstevel rcs->n_registrations--; 123 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 124 03831d35Sstevel } 125 03831d35Sstevel 126 03831d35Sstevel /* 127 03831d35Sstevel * to get the soft state structure of a specific instance 128 03831d35Sstevel */ 129 03831d35Sstevel struct rmc_comm_state * 130 03831d35Sstevel rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller) 131 03831d35Sstevel { 132 03831d35Sstevel struct rmc_comm_state *rcs = NULL; 133 03831d35Sstevel dev_info_t *sdip = NULL; 134 03831d35Sstevel major_t dmaj = NOMAJOR; 135 03831d35Sstevel 136 03831d35Sstevel if (dip != NULL) { 137 03831d35Sstevel /* 138 03831d35Sstevel * Use the instance number from the <dip>; also, 139 03831d35Sstevel * check that it really corresponds to this driver 140 03831d35Sstevel */ 141 03831d35Sstevel instance = ddi_get_instance(dip); 142 03831d35Sstevel dmaj = ddi_driver_major(dip); 143 03831d35Sstevel if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR) 144 03831d35Sstevel rmc_comm_major = dmaj; 145 03831d35Sstevel else if (dmaj != rmc_comm_major) { 146 03831d35Sstevel cmn_err(CE_WARN, 147 03831d35Sstevel "%s: major number mismatch (%d vs. %d) in %s()," 148 03831d35Sstevel "probably due to child misconfiguration", 149 03831d35Sstevel MYNAME, rmc_comm_major, dmaj, caller); 150 03831d35Sstevel instance = -1; 151 03831d35Sstevel } 152 03831d35Sstevel } 153 03831d35Sstevel if (instance >= 0) 154 03831d35Sstevel rcs = ddi_get_soft_state(rmc_comm_statep, instance); 155 03831d35Sstevel if (rcs != NULL) { 156 03831d35Sstevel sdip = rcs->dip; 157 03831d35Sstevel if (dip == NULL && sdip == NULL) 158 03831d35Sstevel rcs = NULL; 159 03831d35Sstevel else if (dip != NULL && sdip != NULL && sdip != dip) { 160 03831d35Sstevel cmn_err(CE_WARN, 161 03831d35Sstevel "%s: devinfo mismatch (%p vs. %p) in %s(), " 162 03831d35Sstevel "probably due to child misconfiguration", MYNAME, 163 03831d35Sstevel (void *)dip, (void *)sdip, caller); 164 03831d35Sstevel rcs = NULL; 165 03831d35Sstevel } 166 03831d35Sstevel } 167 03831d35Sstevel 168 03831d35Sstevel return (rcs); 169 03831d35Sstevel } 170 03831d35Sstevel 171 03831d35Sstevel 172 03831d35Sstevel /* 173 03831d35Sstevel * Lowest-level serial I/O chip register read/write 174 03831d35Sstevel */ 175 03831d35Sstevel static void 176 03831d35Sstevel sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val) 177 03831d35Sstevel { 178 03831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val)); 179 03831d35Sstevel 180 03831d35Sstevel if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) { 181 03831d35Sstevel /* 182 03831d35Sstevel * The chip is mapped as "I/O" (e.g. with the side-effect 183 03831d35Sstevel * bit on SPARC), therefore accesses are required to be 184 03831d35Sstevel * in-order, with no value cacheing. However, there can 185 03831d35Sstevel * still be write-behind buffering, so it is not guaranteed 186 03831d35Sstevel * that a write actually reaches the chip in a given time. 187 03831d35Sstevel * 188 03831d35Sstevel * To force the access right through to the chip, we follow 189 03831d35Sstevel * the write with another write (to the SCRATCH register) 190 03831d35Sstevel * and a read (of the value just written to the SCRATCH 191 03831d35Sstevel * register). The SCRATCH register is specifically provided 192 03831d35Sstevel * for temporary data and has no effect on the SIO's own 193 03831d35Sstevel * operation, making it ideal as a synchronising mechanism. 194 03831d35Sstevel * 195 03831d35Sstevel * If we didn't do this, it would be possible that the new 196 03831d35Sstevel * value wouldn't reach the chip (and have the *intended* 197 03831d35Sstevel * side-effects, such as disabling interrupts), for such a 198 03831d35Sstevel * long time that the processor could execute a *lot* of 199 03831d35Sstevel * instructions - including exiting the interrupt service 200 03831d35Sstevel * routine and re-enabling interrupts. This effect was 201 03831d35Sstevel * observed to lead to spurious (unclaimed) interrupts in 202 03831d35Sstevel * some circumstances. 203 03831d35Sstevel * 204 03831d35Sstevel * This will no longer be needed once "synchronous" access 205 03831d35Sstevel * handles are available (see PSARC/2000/269 and 2000/531). 206 03831d35Sstevel */ 207 03831d35Sstevel ddi_put8(rcs->sd_state.sio_handle, 208 03831d35Sstevel rcs->sd_state.sio_regs + reg, val); 209 03831d35Sstevel ddi_put8(rcs->sd_state.sio_handle, 210 03831d35Sstevel rcs->sd_state.sio_regs + SIO_SCR, val); 211 03831d35Sstevel membar_sync(); 212 03831d35Sstevel (void) ddi_get8(rcs->sd_state.sio_handle, 213 03831d35Sstevel rcs->sd_state.sio_regs + SIO_SCR); 214 03831d35Sstevel } 215 03831d35Sstevel } 216 03831d35Sstevel 217 03831d35Sstevel static uint8_t 218 03831d35Sstevel sio_get_reg(struct rmc_comm_state *rcs, uint_t reg) 219 03831d35Sstevel { 220 03831d35Sstevel uint8_t val; 221 03831d35Sstevel 222 03831d35Sstevel if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault) 223 03831d35Sstevel val = ddi_get8(rcs->sd_state.sio_handle, 224 03831d35Sstevel rcs->sd_state.sio_regs + reg); 225 03831d35Sstevel else 226 03831d35Sstevel val = DUMMY_VALUE; 227 03831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg)); 228 03831d35Sstevel return (val); 229 03831d35Sstevel } 230 03831d35Sstevel 231 03831d35Sstevel static void 232 03831d35Sstevel sio_check_fault_status(struct rmc_comm_state *rcs) 233 03831d35Sstevel { 234 03831d35Sstevel rcs->sd_state.sio_fault = 235 03831d35Sstevel ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS; 236 03831d35Sstevel } 237 03831d35Sstevel 238 03831d35Sstevel boolean_t 239 03831d35Sstevel rmc_comm_faulty(struct rmc_comm_state *rcs) 240 03831d35Sstevel { 241 03831d35Sstevel if (!rcs->sd_state.sio_fault) 242 03831d35Sstevel sio_check_fault_status(rcs); 243 03831d35Sstevel return (rcs->sd_state.sio_fault); 244 03831d35Sstevel } 245 03831d35Sstevel 246 03831d35Sstevel /* 247 03831d35Sstevel * Check for data ready. 248 03831d35Sstevel */ 249 03831d35Sstevel static boolean_t 250 03831d35Sstevel sio_data_ready(struct rmc_comm_state *rcs) 251 03831d35Sstevel { 252 03831d35Sstevel uint8_t status; 253 03831d35Sstevel 254 03831d35Sstevel /* 255 03831d35Sstevel * Data is available if the RXDA bit in the LSR is nonzero 256 03831d35Sstevel * (if reading it didn't incur a fault). 257 03831d35Sstevel */ 258 03831d35Sstevel status = sio_get_reg(rcs, SIO_LSR); 259 03831d35Sstevel return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs)); 260 03831d35Sstevel } 261 03831d35Sstevel 262 03831d35Sstevel /* 263 03831d35Sstevel * Enable/disable interrupts 264 03831d35Sstevel */ 265 03831d35Sstevel static void 266 03831d35Sstevel rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate) 267 03831d35Sstevel { 268 03831d35Sstevel uint8_t val; 269 03831d35Sstevel 270 03831d35Sstevel val = newstate ? SIO_IER_RXHDL_IE : 0; 271 03831d35Sstevel sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val); 272 03831d35Sstevel rcs->sd_state.hw_int_enabled = newstate; 273 03831d35Sstevel } 274 03831d35Sstevel 275 03831d35Sstevel /* 276 03831d35Sstevel * High-level interrupt handler: 277 03831d35Sstevel * Checks whether initialisation is complete (to avoid a race 278 03831d35Sstevel * with mutex_init()), and whether chip interrupts are enabled. 279 03831d35Sstevel * If not, the interrupt's not for us, so just return UNCLAIMED. 280 03831d35Sstevel * Otherwise, disable the interrupt, trigger a softint, and return 281 03831d35Sstevel * CLAIMED. The softint handler will then do all the real work. 282 03831d35Sstevel * 283 03831d35Sstevel * NOTE: the chip interrupt capability is only re-enabled once the 284 03831d35Sstevel * receive code has run, but that can be called from a poll loop 285 03831d35Sstevel * or cyclic callback as well as from the softint. So it's *not* 286 03831d35Sstevel * guaranteed that there really is a chip interrupt pending here, 287 03831d35Sstevel * 'cos the work may already have been done and the reason for the 288 03831d35Sstevel * interrupt gone away before we get here. 289 03831d35Sstevel * 290 03831d35Sstevel * OTOH, if we come through here twice without the receive code 291 03831d35Sstevel * having run in between, that's definitely wrong. In such an 292 03831d35Sstevel * event, we would notice that chip interrupts haven't yet been 293 03831d35Sstevel * re-enabled and return UNCLAIMED, allowing the system's jabber 294 03831d35Sstevel * protect code (if any) to do its job. 295 03831d35Sstevel */ 296 03831d35Sstevel static uint_t 297 03831d35Sstevel rmc_comm_hi_intr(caddr_t arg) 298 03831d35Sstevel { 299 03831d35Sstevel struct rmc_comm_state *rcs = (void *)arg; 300 03831d35Sstevel uint_t claim; 301 03831d35Sstevel 302 03831d35Sstevel claim = DDI_INTR_UNCLAIMED; 303 dd4eeefdSeota if (rcs->sd_state.cycid != NULL) { 304 6fa6856eSarutz /* 305 6fa6856eSarutz * Handle the case where this interrupt fires during 306 6fa6856eSarutz * panic processing. If that occurs, then a thread 307 6fa6856eSarutz * in rmc_comm might have been idled while holding 308 6fa6856eSarutz * hw_mutex. If so, that thread will never make 309 6fa6856eSarutz * progress, and so we do not want to unconditionally 310 6fa6856eSarutz * grab hw_mutex. 311 6fa6856eSarutz */ 312 6fa6856eSarutz if (ddi_in_panic() != 0) { 313 6fa6856eSarutz if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) { 314 6fa6856eSarutz return (claim); 315 6fa6856eSarutz } 316 6fa6856eSarutz } else { 317 03831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex); 318 6fa6856eSarutz } 319 03831d35Sstevel if (rcs->sd_state.hw_int_enabled) { 320 03831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE); 321 03831d35Sstevel ddi_trigger_softintr(rcs->sd_state.softid); 322 03831d35Sstevel claim = DDI_INTR_CLAIMED; 323 03831d35Sstevel } 324 03831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex); 325 03831d35Sstevel } 326 03831d35Sstevel return (claim); 327 03831d35Sstevel } 328 03831d35Sstevel 329 03831d35Sstevel /* 330 03831d35Sstevel * Packet receive handler 331 03831d35Sstevel * 332 03831d35Sstevel * This routine should be called from the low-level softint, or the 333 03831d35Sstevel * cyclic callback, or rmc_comm_cmd() (for polled operation), with the 334 03831d35Sstevel * low-level mutex already held. 335 03831d35Sstevel */ 336 03831d35Sstevel void 337 03831d35Sstevel rmc_comm_serdev_receive(struct rmc_comm_state *rcs) 338 03831d35Sstevel { 339 03831d35Sstevel uint8_t data; 340 03831d35Sstevel 341 03831d35Sstevel DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n")); 342 03831d35Sstevel 343 03831d35Sstevel /* 344 03831d35Sstevel * Check for access faults before starting the receive 345 03831d35Sstevel * loop (we don't want to cause bus errors or suchlike 346 03831d35Sstevel * unpleasantness in the event that the SIO has died). 347 03831d35Sstevel */ 348 03831d35Sstevel if (!rmc_comm_faulty(rcs)) { 349 03831d35Sstevel 350 03831d35Sstevel char *rx_buf = rcs->sd_state.serdev_rx_buf; 351 03831d35Sstevel uint16_t rx_buflen = 0; 352 03831d35Sstevel 353 03831d35Sstevel /* 354 03831d35Sstevel * Read bytes from the FIFO until they're all gone 355 03831d35Sstevel * or our buffer overflows (which must be an error) 356 03831d35Sstevel */ 357 03831d35Sstevel 358 03831d35Sstevel /* 359 03831d35Sstevel * At the moment, the receive buffer is overwritten any 360 03831d35Sstevel * time data is received from the serial device. 361 03831d35Sstevel * This should not pose problems (probably!) as the data 362 03831d35Sstevel * protocol is half-duplex 363 03831d35Sstevel * Otherwise, a circular buffer must be implemented! 364 03831d35Sstevel */ 365 03831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex); 366 03831d35Sstevel while (sio_data_ready(rcs)) { 367 03831d35Sstevel data = sio_get_reg(rcs, SIO_RXD); 368 03831d35Sstevel rx_buf[rx_buflen++] = data; 369 03831d35Sstevel if (rx_buflen >= SIO_MAX_RXBUF_SIZE) 370 03831d35Sstevel break; 371 03831d35Sstevel } 372 03831d35Sstevel rcs->sd_state.serdev_rx_count = rx_buflen; 373 03831d35Sstevel 374 03831d35Sstevel DATASCOPE(rcs, 'R', rx_buf, rx_buflen) 375 03831d35Sstevel 376 03831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE); 377 03831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex); 378 03831d35Sstevel 379 03831d35Sstevel /* 380 03831d35Sstevel * call up the data protocol receive handler 381 03831d35Sstevel */ 382 03831d35Sstevel rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen); 383 03831d35Sstevel } 384 03831d35Sstevel } 385 03831d35Sstevel 386 03831d35Sstevel /* 387 03831d35Sstevel * Low-level softint handler 388 03831d35Sstevel * 389 03831d35Sstevel * This routine should be triggered whenever there's a byte to be read 390 03831d35Sstevel */ 391 03831d35Sstevel static uint_t 392 03831d35Sstevel rmc_comm_softint(caddr_t arg) 393 03831d35Sstevel { 394 03831d35Sstevel struct rmc_comm_state *rcs = (void *)arg; 395 03831d35Sstevel 396 03831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex); 397 03831d35Sstevel rmc_comm_serdev_receive(rcs); 398 03831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex); 399 03831d35Sstevel return (DDI_INTR_CLAIMED); 400 03831d35Sstevel } 401 03831d35Sstevel 402 03831d35Sstevel /* 403 03831d35Sstevel * Cyclic handler: just calls the receive routine, in case interrupts 404 03831d35Sstevel * are not being delivered and in order to handle command timeout 405 03831d35Sstevel */ 406 03831d35Sstevel static void 407 03831d35Sstevel rmc_comm_cyclic(void *arg) 408 03831d35Sstevel { 409 03831d35Sstevel struct rmc_comm_state *rcs = (void *)arg; 410 03831d35Sstevel 411 03831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex); 412 03831d35Sstevel rmc_comm_serdev_receive(rcs); 413 03831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex); 414 03831d35Sstevel } 415 03831d35Sstevel 416 03831d35Sstevel /* 417 03831d35Sstevel * Serial protocol 418 03831d35Sstevel * 419 03831d35Sstevel * This routine builds a command and sets it in progress. 420 03831d35Sstevel */ 421 03831d35Sstevel void 422 03831d35Sstevel rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen) 423 03831d35Sstevel { 424 03831d35Sstevel uint8_t *p; 425 03831d35Sstevel uint8_t status; 426 03831d35Sstevel 427 03831d35Sstevel /* 428 03831d35Sstevel * Check and update the SIO h/w fault status before accessing 429 03831d35Sstevel * the chip registers. If there's a (new or previous) fault, 430 03831d35Sstevel * we'll run through the protocol but won't really touch the 431 03831d35Sstevel * hardware and all commands will timeout. If a previously 432 03831d35Sstevel * discovered fault has now gone away (!), then we can (try to) 433 03831d35Sstevel * proceed with the new command (probably a probe). 434 03831d35Sstevel */ 435 03831d35Sstevel sio_check_fault_status(rcs); 436 03831d35Sstevel 437 03831d35Sstevel /* 438 03831d35Sstevel * Send the command now by stuffing the packet into the Tx FIFO. 439 03831d35Sstevel */ 440 03831d35Sstevel DATASCOPE(rcs, 'S', buf, buflen) 441 03831d35Sstevel 442 03831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex); 443 03831d35Sstevel p = (uint8_t *)buf; 444 03831d35Sstevel while (p < (uint8_t *)&buf[buflen]) { 445 03831d35Sstevel 446 03831d35Sstevel /* 447 03831d35Sstevel * before writing to the TX holding register, we make sure that 448 03831d35Sstevel * it is empty. In this case, there will be no chance to 449 03831d35Sstevel * overflow the serial device FIFO (but, on the other hand, 450 03831d35Sstevel * it may introduce some latency) 451 03831d35Sstevel */ 452 03831d35Sstevel status = sio_get_reg(rcs, SIO_LSR); 453 03831d35Sstevel while ((status & SIO_LSR_XHRE) == 0) { 454 03831d35Sstevel drv_usecwait(100); 455 03831d35Sstevel status = sio_get_reg(rcs, SIO_LSR); 456 03831d35Sstevel } 457 03831d35Sstevel sio_put_reg(rcs, SIO_TXD, *p++); 458 03831d35Sstevel } 459 03831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex); 460 03831d35Sstevel } 461 03831d35Sstevel 462 03831d35Sstevel /* 463 03831d35Sstevel * wait for the tx fifo to drain - used for urgent nowait requests 464 03831d35Sstevel */ 465 03831d35Sstevel void 466 03831d35Sstevel rmc_comm_serdev_drain(struct rmc_comm_state *rcs) 467 03831d35Sstevel { 468 03831d35Sstevel uint8_t status; 469 03831d35Sstevel 470 03831d35Sstevel mutex_enter(rcs->sd_state.hw_mutex); 471 03831d35Sstevel status = sio_get_reg(rcs, SIO_LSR); 472 03831d35Sstevel while ((status & SIO_LSR_XHRE) == 0) { 473 03831d35Sstevel drv_usecwait(100); 474 03831d35Sstevel status = sio_get_reg(rcs, SIO_LSR); 475 03831d35Sstevel } 476 03831d35Sstevel mutex_exit(rcs->sd_state.hw_mutex); 477 03831d35Sstevel } 478 03831d35Sstevel 479 03831d35Sstevel /* 480 03831d35Sstevel * Hardware setup - put the SIO chip in the required operational 481 03831d35Sstevel * state, with all our favourite parameters programmed correctly. 482 03831d35Sstevel * This routine leaves all SIO interrupts disabled. 483 03831d35Sstevel */ 484 03831d35Sstevel 485 03831d35Sstevel static void 486 03831d35Sstevel rmc_comm_hw_reset(struct rmc_comm_state *rcs) 487 03831d35Sstevel { 488 03831d35Sstevel uint16_t divisor; 489 03831d35Sstevel 490 03831d35Sstevel /* 491 03831d35Sstevel * Disable interrupts, soft reset Tx and Rx circuitry, 492 03831d35Sstevel * reselect standard modes (bits/char, parity, etc). 493 03831d35Sstevel */ 494 03831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE); 495 03831d35Sstevel sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR); 496 03831d35Sstevel sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD); 497 03831d35Sstevel 498 03831d35Sstevel /* 499 03831d35Sstevel * Select the proper baud rate; if the value is invalid 500 03831d35Sstevel * (presumably 0, i.e. not specified, but also if the 501 03831d35Sstevel * "baud" property is set to some silly value), we assume 502 03831d35Sstevel * the default. 503 03831d35Sstevel */ 504 03831d35Sstevel if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) { 505 03831d35Sstevel divisor = SIO_BAUD_TO_DIVISOR(SIO_BAUD_DEFAULT) * 506 03831d35Sstevel rcs->baud_divisor_factor; 507 03831d35Sstevel } else { 508 03831d35Sstevel divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) * 509 03831d35Sstevel rcs->baud_divisor_factor; 510 03831d35Sstevel } 511 03831d35Sstevel 512 03831d35Sstevel /* 513 03831d35Sstevel * According to the datasheet, it is forbidden for the divisor 514 03831d35Sstevel * register to be zero. So when loading the register in two 515 03831d35Sstevel * steps, we have to make sure that the temporary value formed 516 03831d35Sstevel * between loads is nonzero. However, we can't rely on either 517 03831d35Sstevel * half already having a nonzero value, as the datasheet also 518 03831d35Sstevel * says that these registers are indeterminate after a reset! 519 03831d35Sstevel * So, we explicitly set the low byte to a non-zero value first; 520 03831d35Sstevel * then we can safely load the high byte, and then the correct 521 03831d35Sstevel * value for the low byte, without the result ever being zero. 522 03831d35Sstevel */ 523 03831d35Sstevel sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1); 524 03831d35Sstevel sio_put_reg(rcs, SIO_LBGDL, 0xff); 525 03831d35Sstevel sio_put_reg(rcs, SIO_LBGDH, divisor >> 8); 526 03831d35Sstevel sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff); 527 03831d35Sstevel sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0); 528 03831d35Sstevel 529 03831d35Sstevel /* 530 03831d35Sstevel * Program the remaining device registers as required 531 03831d35Sstevel */ 532 03831d35Sstevel sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD); 533 03831d35Sstevel sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD); 534 03831d35Sstevel } 535 03831d35Sstevel 536 03831d35Sstevel /* 537 03831d35Sstevel * Higher-level setup & teardown 538 03831d35Sstevel */ 539 03831d35Sstevel static void 540 03831d35Sstevel rmc_comm_offline(struct rmc_comm_state *rcs) 541 03831d35Sstevel { 542 03831d35Sstevel if (rcs->sd_state.sio_handle != NULL) 543 03831d35Sstevel ddi_regs_map_free(&rcs->sd_state.sio_handle); 544 03831d35Sstevel rcs->sd_state.sio_handle = NULL; 545 03831d35Sstevel rcs->sd_state.sio_regs = NULL; 546 03831d35Sstevel } 547 03831d35Sstevel 548 03831d35Sstevel static int 549 03831d35Sstevel rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip) 550 03831d35Sstevel { 551 03831d35Sstevel ddi_acc_handle_t h; 552 03831d35Sstevel caddr_t p; 553 03831d35Sstevel int nregs; 554 03831d35Sstevel int err; 555 03831d35Sstevel 556 03831d35Sstevel if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) 557 03831d35Sstevel nregs = 0; 558 03831d35Sstevel switch (nregs) { 559 03831d35Sstevel default: 560 03831d35Sstevel case 1: 561 03831d35Sstevel /* 562 03831d35Sstevel * regset 0 represents the SIO operating registers 563 03831d35Sstevel */ 564 03831d35Sstevel err = ddi_regs_map_setup(dip, 0, &p, 0, 0, 565 03831d35Sstevel rmc_comm_dev_acc_attr, &h); 566 03831d35Sstevel if (err != DDI_SUCCESS) 567 03831d35Sstevel return (EIO); 568 03831d35Sstevel rcs->sd_state.sio_handle = h; 569 03831d35Sstevel rcs->sd_state.sio_regs = (void *)p; 570 03831d35Sstevel break; 571 03831d35Sstevel case 0: 572 03831d35Sstevel /* 573 03831d35Sstevel * If no registers are defined, succeed vacuously; 574 03831d35Sstevel * commands will be accepted, but we fake the accesses. 575 03831d35Sstevel */ 576 03831d35Sstevel break; 577 03831d35Sstevel } 578 03831d35Sstevel 579 03831d35Sstevel /* 580 03831d35Sstevel * Now that the registers are mapped, we can initialise the SIO h/w 581 03831d35Sstevel */ 582 03831d35Sstevel rmc_comm_hw_reset(rcs); 583 03831d35Sstevel return (0); 584 03831d35Sstevel } 585 03831d35Sstevel 586 03831d35Sstevel 587 03831d35Sstevel /* 588 03831d35Sstevel * Initialization of the serial device (data structure, mutex, cv, hardware 589 03831d35Sstevel * and so on). It is called from the attach routine. 590 03831d35Sstevel */ 591 03831d35Sstevel 592 03831d35Sstevel int 593 03831d35Sstevel rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip) 594 03831d35Sstevel { 595 03831d35Sstevel int err = DDI_SUCCESS; 596 03831d35Sstevel 597 dd4eeefdSeota rcs->sd_state.cycid = NULL; 598 03831d35Sstevel 599 03831d35Sstevel /* 600 03831d35Sstevel * Online the hardware ... 601 03831d35Sstevel */ 602 03831d35Sstevel err = rmc_comm_online(rcs, dip); 603 03831d35Sstevel if (err != 0) 604 03831d35Sstevel return (-1); 605 03831d35Sstevel 606 03831d35Sstevel /* 607 03831d35Sstevel * call ddi_get_soft_iblock_cookie() to retrieve the 608 03831d35Sstevel * the interrupt block cookie so that the mutexes are initialized 609 03831d35Sstevel * before adding the interrupt (to avoid a potential race condition). 610 03831d35Sstevel */ 611 03831d35Sstevel 612 03831d35Sstevel err = ddi_get_soft_iblock_cookie(dip, DDI_SOFTINT_LOW, 613 03831d35Sstevel &rcs->dp_state.dp_iblk); 614 03831d35Sstevel if (err != DDI_SUCCESS) 615 03831d35Sstevel return (-1); 616 03831d35Sstevel 617 03831d35Sstevel err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk); 618 03831d35Sstevel if (err != DDI_SUCCESS) 619 03831d35Sstevel return (-1); 620 03831d35Sstevel 621 03831d35Sstevel /* 622 03831d35Sstevel * initialize mutex here before adding hw/sw interrupt handlers 623 03831d35Sstevel */ 624 03831d35Sstevel mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER, 625 03831d35Sstevel rcs->dp_state.dp_iblk); 626 03831d35Sstevel 627 03831d35Sstevel mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER, 628 03831d35Sstevel rcs->sd_state.hw_iblk); 629 03831d35Sstevel 630 03831d35Sstevel /* 631 03831d35Sstevel * Install soft and hard interrupt handler(s) 632 03831d35Sstevel * 633 03831d35Sstevel * the soft intr. handler will need the data protocol lock (dp_mutex) 634 03831d35Sstevel * So, data protocol mutex and iblock cookie are created/initialized 635 03831d35Sstevel * here 636 03831d35Sstevel */ 637 03831d35Sstevel 638 03831d35Sstevel err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid, 639 03831d35Sstevel &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs); 640 03831d35Sstevel if (err != DDI_SUCCESS) { 641 03831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex); 642 03831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex); 643 03831d35Sstevel return (-1); 644 03831d35Sstevel } 645 03831d35Sstevel 646 03831d35Sstevel /* 647 03831d35Sstevel * hardware interrupt 648 03831d35Sstevel */ 649 03831d35Sstevel 650 03831d35Sstevel if (rcs->sd_state.sio_handle != NULL) { 651 03831d35Sstevel err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL, 652 03831d35Sstevel rmc_comm_hi_intr, (caddr_t)rcs); 653 03831d35Sstevel 654 03831d35Sstevel /* 655 03831d35Sstevel * did we successfully install the h/w interrupt handler? 656 03831d35Sstevel */ 657 03831d35Sstevel if (err != DDI_SUCCESS) { 658 03831d35Sstevel ddi_remove_softintr(rcs->sd_state.softid); 659 03831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex); 660 03831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex); 661 03831d35Sstevel return (-1); 662 03831d35Sstevel } 663 03831d35Sstevel } 664 03831d35Sstevel 665 03831d35Sstevel /* 666 dd4eeefdSeota * Start periodical callbacks 667 03831d35Sstevel */ 668 dd4eeefdSeota rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs, 669 dd4eeefdSeota 5 * RMC_COMM_ONE_SEC, DDI_IPL_1); 670 03831d35Sstevel return (0); 671 03831d35Sstevel } 672 03831d35Sstevel 673 03831d35Sstevel /* 674 03831d35Sstevel * Termination of the serial device (data structure, mutex, cv, hardware 675 03831d35Sstevel * and so on). It is called from the detach routine. 676 03831d35Sstevel */ 677 03831d35Sstevel 678 03831d35Sstevel void 679 03831d35Sstevel rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip) 680 03831d35Sstevel { 681 03831d35Sstevel rmc_comm_hw_reset(rcs); 682 03831d35Sstevel 683 dd4eeefdSeota if (rcs->sd_state.cycid != NULL) { 684 dd4eeefdSeota ddi_periodic_delete(rcs->sd_state.cycid); 685 dd4eeefdSeota rcs->sd_state.cycid = NULL; 686 03831d35Sstevel 687 03831d35Sstevel if (rcs->sd_state.sio_handle != NULL) 688 03831d35Sstevel ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk); 689 03831d35Sstevel 690 03831d35Sstevel ddi_remove_softintr(rcs->sd_state.softid); 691 03831d35Sstevel 692 03831d35Sstevel mutex_destroy(rcs->sd_state.hw_mutex); 693 03831d35Sstevel 694 03831d35Sstevel mutex_destroy(rcs->dp_state.dp_mutex); 695 03831d35Sstevel } 696 03831d35Sstevel rmc_comm_offline(rcs); 697 03831d35Sstevel } 698 03831d35Sstevel 699 03831d35Sstevel /* 700 03831d35Sstevel * device driver entry routines (init/fini, attach/detach, ...) 701 03831d35Sstevel */ 702 03831d35Sstevel 703 03831d35Sstevel /* 704 03831d35Sstevel * Clean up on detach or failure of attach 705 03831d35Sstevel */ 706 03831d35Sstevel static void 707 03831d35Sstevel rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance, 708 03831d35Sstevel boolean_t drvi_init, boolean_t dp_init, boolean_t sd_init) 709 03831d35Sstevel { 710 03831d35Sstevel if (rcs != NULL) { 711 03831d35Sstevel /* 712 03831d35Sstevel * disable interrupts now 713 03831d35Sstevel */ 714 03831d35Sstevel rmc_comm_set_irq(rcs, B_FALSE); 715 03831d35Sstevel 716 03831d35Sstevel /* 717 03831d35Sstevel * driver interface termination (if it has been initialized) 718 03831d35Sstevel */ 719 03831d35Sstevel if (drvi_init) 720 03831d35Sstevel rmc_comm_drvintf_fini(rcs); 721 03831d35Sstevel 722 03831d35Sstevel /* 723 03831d35Sstevel * data protocol termination (if it has been initialized) 724 03831d35Sstevel */ 725 03831d35Sstevel if (dp_init) 726 03831d35Sstevel rmc_comm_dp_fini(rcs); 727 03831d35Sstevel 728 03831d35Sstevel /* 729 03831d35Sstevel * serial device termination (if it has been initialized) 730 03831d35Sstevel */ 731 03831d35Sstevel if (sd_init) 732 03831d35Sstevel rmc_comm_serdev_fini(rcs, dip); 733 03831d35Sstevel 734 03831d35Sstevel ddi_set_driver_private(dip, NULL); 735 03831d35Sstevel } 736 03831d35Sstevel ddi_soft_state_free(rmc_comm_statep, instance); 737 03831d35Sstevel } 738 03831d35Sstevel 739 03831d35Sstevel /* 740 03831d35Sstevel * Autoconfiguration routines 741 03831d35Sstevel */ 742 03831d35Sstevel 743 03831d35Sstevel static int 744 03831d35Sstevel rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 745 03831d35Sstevel { 746 03831d35Sstevel struct rmc_comm_state *rcs = NULL; 747 03831d35Sstevel sig_state_t *current_sgn_p; 748 03831d35Sstevel int instance; 749 03831d35Sstevel 750 03831d35Sstevel /* 751 03831d35Sstevel * only allow one instance 752 03831d35Sstevel */ 753 03831d35Sstevel instance = ddi_get_instance(dip); 754 03831d35Sstevel if (instance != 0) 755 03831d35Sstevel return (DDI_FAILURE); 756 03831d35Sstevel 757 03831d35Sstevel switch (cmd) { 758 03831d35Sstevel default: 759 03831d35Sstevel return (DDI_FAILURE); 760 03831d35Sstevel 761 03831d35Sstevel case DDI_RESUME: 762 03831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance, 763 03831d35Sstevel "rmc_comm_attach")) == NULL) 764 03831d35Sstevel return (DDI_FAILURE); /* this "can't happen" */ 765 03831d35Sstevel 766 03831d35Sstevel rmc_comm_hw_reset(rcs); 767 03831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE); 768 03831d35Sstevel rcs->dip = dip; 769 03831d35Sstevel 770 03831d35Sstevel mutex_enter(&tod_lock); 771 03831d35Sstevel if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL && 772 03831d35Sstevel watchdog_was_active) { 773 03831d35Sstevel (void) tod_ops.tod_set_watchdog_timer(0); 774 03831d35Sstevel } 775 03831d35Sstevel mutex_exit(&tod_lock); 776 03831d35Sstevel 777 03831d35Sstevel mutex_enter(rcs->dp_state.dp_mutex); 778 03831d35Sstevel dp_reset(rcs, INITIAL_SEQID, 1, 1); 779 03831d35Sstevel mutex_exit(rcs->dp_state.dp_mutex); 780 03831d35Sstevel 781 03831d35Sstevel current_sgn_p = (sig_state_t *)modgetsymvalue( 782 03831d35Sstevel "current_sgn", 0); 783 03831d35Sstevel if ((current_sgn_p != NULL) && 784 03831d35Sstevel (current_sgn_p->state_t.sig != 0)) { 785 03831d35Sstevel CPU_SIGNATURE(current_sgn_p->state_t.sig, 786 03831d35Sstevel current_sgn_p->state_t.state, 787 03831d35Sstevel current_sgn_p->state_t.sub_state, -1); 788 03831d35Sstevel } 789 03831d35Sstevel return (DDI_SUCCESS); 790 03831d35Sstevel 791 03831d35Sstevel case DDI_ATTACH: 792 03831d35Sstevel break; 793 03831d35Sstevel } 794 03831d35Sstevel 795 03831d35Sstevel /* 796 03831d35Sstevel * Allocate the soft-state structure 797 03831d35Sstevel */ 798 03831d35Sstevel if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS) 799 03831d35Sstevel return (DDI_FAILURE); 800 03831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == 801 03831d35Sstevel NULL) { 802 03831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); 803 03831d35Sstevel return (DDI_FAILURE); 804 03831d35Sstevel } 805 03831d35Sstevel ddi_set_driver_private(dip, rcs); 806 03831d35Sstevel 807 03831d35Sstevel rcs->dip = NULL; 808 03831d35Sstevel 809 03831d35Sstevel /* 810 03831d35Sstevel * Set various options from .conf properties 811 03831d35Sstevel */ 812 03831d35Sstevel rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 813 03831d35Sstevel "baud-rate", 0); 814 03831d35Sstevel rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 815 03831d35Sstevel "debug", 0); 816 03831d35Sstevel 817 03831d35Sstevel /* 818 03831d35Sstevel * the baud divisor factor tells us how to scale the result of 819 03831d35Sstevel * the SIO_BAUD_TO_DIVISOR macro for platforms which do not 820 03831d35Sstevel * use the standard 24MHz uart clock 821 03831d35Sstevel */ 822 03831d35Sstevel rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 823 03831d35Sstevel DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN); 824 03831d35Sstevel 825 03831d35Sstevel /* 826 03831d35Sstevel * try to be reasonable if the scale factor contains a silly value 827 03831d35Sstevel */ 828 03831d35Sstevel if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) || 829 03831d35Sstevel (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX)) 830 03831d35Sstevel rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN; 831 03831d35Sstevel 832 03831d35Sstevel /* 833 03831d35Sstevel * initialize serial device 834 03831d35Sstevel */ 835 03831d35Sstevel if (rmc_comm_serdev_init(rcs, dip) != 0) { 836 03831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); 837 03831d35Sstevel return (DDI_FAILURE); 838 03831d35Sstevel } 839 03831d35Sstevel 840 03831d35Sstevel /* 841 03831d35Sstevel * initialize data protocol 842 03831d35Sstevel */ 843 03831d35Sstevel rmc_comm_dp_init(rcs); 844 03831d35Sstevel 845 03831d35Sstevel /* 846 03831d35Sstevel * initialize driver interface 847 03831d35Sstevel */ 848 03831d35Sstevel if (rmc_comm_drvintf_init(rcs) != 0) { 849 03831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 0, 1, 1); 850 03831d35Sstevel return (DDI_FAILURE); 851 03831d35Sstevel } 852 03831d35Sstevel 853 03831d35Sstevel /* 854 03831d35Sstevel * Initialise devinfo-related fields 855 03831d35Sstevel */ 856 03831d35Sstevel rcs->majornum = ddi_driver_major(dip); 857 03831d35Sstevel rcs->instance = instance; 858 03831d35Sstevel rcs->dip = dip; 859 03831d35Sstevel 860 03831d35Sstevel /* 861 03831d35Sstevel * enable interrupts now 862 03831d35Sstevel */ 863 03831d35Sstevel rmc_comm_set_irq(rcs, B_TRUE); 864 03831d35Sstevel 865 03831d35Sstevel /* 866 03831d35Sstevel * All done, report success 867 03831d35Sstevel */ 868 03831d35Sstevel ddi_report_dev(dip); 869 03831d35Sstevel mutex_enter(&rmc_comm_attach_lock); 870 03831d35Sstevel rcs->is_attached = B_TRUE; 871 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 872 03831d35Sstevel return (DDI_SUCCESS); 873 03831d35Sstevel } 874 03831d35Sstevel 875 03831d35Sstevel static int 876 03831d35Sstevel rmc_comm_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 877 03831d35Sstevel { 878 03831d35Sstevel struct rmc_comm_state *rcs; 879 03831d35Sstevel int instance; 880 03831d35Sstevel 881 03831d35Sstevel instance = ddi_get_instance(dip); 882 03831d35Sstevel if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL) 883 03831d35Sstevel return (DDI_FAILURE); /* this "can't happen" */ 884 03831d35Sstevel 885 03831d35Sstevel switch (cmd) { 886 03831d35Sstevel case DDI_SUSPEND: 887 03831d35Sstevel mutex_enter(&tod_lock); 888 03831d35Sstevel if (watchdog_enable && watchdog_activated && 889 03831d35Sstevel tod_ops.tod_clear_watchdog_timer != NULL) { 890 03831d35Sstevel watchdog_was_active = 1; 891 03831d35Sstevel (void) tod_ops.tod_clear_watchdog_timer(); 892 03831d35Sstevel } else { 893 03831d35Sstevel watchdog_was_active = 0; 894 03831d35Sstevel } 895 03831d35Sstevel mutex_exit(&tod_lock); 896 03831d35Sstevel 897 03831d35Sstevel rcs->dip = NULL; 898 03831d35Sstevel rmc_comm_hw_reset(rcs); 899 03831d35Sstevel 900 03831d35Sstevel return (DDI_SUCCESS); 901 03831d35Sstevel 902 03831d35Sstevel case DDI_DETACH: 903 03831d35Sstevel /* 904 03831d35Sstevel * reject detach if any client(s) still registered 905 03831d35Sstevel */ 906 03831d35Sstevel mutex_enter(&rmc_comm_attach_lock); 907 03831d35Sstevel if (rcs->n_registrations != 0) { 908 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 909 03831d35Sstevel return (DDI_FAILURE); 910 03831d35Sstevel } 911 03831d35Sstevel /* 912 03831d35Sstevel * Committed to complete the detach; 913 03831d35Sstevel * mark as no longer attached, to prevent new clients 914 03831d35Sstevel * registering (as part of a coincident attach) 915 03831d35Sstevel */ 916 03831d35Sstevel rcs->is_attached = B_FALSE; 917 03831d35Sstevel mutex_exit(&rmc_comm_attach_lock); 918 03831d35Sstevel rmc_comm_unattach(rcs, dip, instance, 1, 1, 1); 919 03831d35Sstevel return (DDI_SUCCESS); 920 03831d35Sstevel 921 03831d35Sstevel default: 922 03831d35Sstevel return (DDI_FAILURE); 923 03831d35Sstevel } 924 03831d35Sstevel } 925 03831d35Sstevel 926 03831d35Sstevel /*ARGSUSED*/ 927 03831d35Sstevel static int 928 03831d35Sstevel rmc_comm_reset(dev_info_t *dip, ddi_reset_cmd_t cmd) 929 03831d35Sstevel { 930 03831d35Sstevel struct rmc_comm_state *rcs; 931 03831d35Sstevel 932 03831d35Sstevel if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL) 933 03831d35Sstevel return (DDI_FAILURE); 934 03831d35Sstevel rmc_comm_hw_reset(rcs); 935 03831d35Sstevel return (DDI_SUCCESS); 936 03831d35Sstevel } 937 03831d35Sstevel 938 03831d35Sstevel /* 939 03831d35Sstevel * System interface structures 940 03831d35Sstevel */ 941 03831d35Sstevel static struct dev_ops rmc_comm_dev_ops = 942 03831d35Sstevel { 943 03831d35Sstevel DEVO_REV, 944 03831d35Sstevel 0, /* refcount */ 945 03831d35Sstevel nodev, /* getinfo */ 946 03831d35Sstevel nulldev, /* identify */ 947 03831d35Sstevel nulldev, /* probe */ 948 03831d35Sstevel rmc_comm_attach, /* attach */ 949 03831d35Sstevel rmc_comm_detach, /* detach */ 950 03831d35Sstevel rmc_comm_reset, /* reset */ 951 03831d35Sstevel (struct cb_ops *)NULL, /* driver operations */ 952 03831d35Sstevel (struct bus_ops *)NULL, /* bus operations */ 953 *19397407SSherry Moore nulldev, /* power() */ 954 *19397407SSherry Moore ddi_quiesce_not_supported, /* devo_quiesce */ 955 03831d35Sstevel }; 956 03831d35Sstevel 957 03831d35Sstevel static struct modldrv modldrv = 958 03831d35Sstevel { 959 03831d35Sstevel &mod_driverops, 960 *19397407SSherry Moore "rmc_comm driver", 961 03831d35Sstevel &rmc_comm_dev_ops 962 03831d35Sstevel }; 963 03831d35Sstevel 964 03831d35Sstevel static struct modlinkage modlinkage = 965 03831d35Sstevel { 966 03831d35Sstevel MODREV_1, 967 03831d35Sstevel { 968 03831d35Sstevel &modldrv, 969 03831d35Sstevel NULL 970 03831d35Sstevel } 971 03831d35Sstevel }; 972 03831d35Sstevel 973 03831d35Sstevel /* 974 03831d35Sstevel * Dynamic loader interface code 975 03831d35Sstevel */ 976 03831d35Sstevel int 977 03831d35Sstevel _init(void) 978 03831d35Sstevel { 979 03831d35Sstevel int err; 980 03831d35Sstevel 981 03831d35Sstevel mutex_init(&rmc_comm_attach_lock, NULL, MUTEX_DRIVER, NULL); 982 03831d35Sstevel err = ddi_soft_state_init(&rmc_comm_statep, 983 03831d35Sstevel sizeof (struct rmc_comm_state), 0); 984 03831d35Sstevel if (err == DDI_SUCCESS) 985 03831d35Sstevel if ((err = mod_install(&modlinkage)) != 0) { 986 03831d35Sstevel ddi_soft_state_fini(&rmc_comm_statep); 987 03831d35Sstevel } 988 03831d35Sstevel if (err != DDI_SUCCESS) 989 03831d35Sstevel mutex_destroy(&rmc_comm_attach_lock); 990 03831d35Sstevel return (err); 991 03831d35Sstevel } 992 03831d35Sstevel 993 03831d35Sstevel int 994 03831d35Sstevel _info(struct modinfo *mip) 995 03831d35Sstevel { 996 03831d35Sstevel return (mod_info(&modlinkage, mip)); 997 03831d35Sstevel } 998 03831d35Sstevel 999 03831d35Sstevel int 1000 03831d35Sstevel _fini(void) 1001 03831d35Sstevel { 1002 03831d35Sstevel int err; 1003 03831d35Sstevel 1004 03831d35Sstevel if ((err = mod_remove(&modlinkage)) == 0) { 1005 03831d35Sstevel ddi_soft_state_fini(&rmc_comm_statep); 1006 03831d35Sstevel rmc_comm_major = NOMAJOR; 1007 03831d35Sstevel mutex_destroy(&rmc_comm_attach_lock); 1008 03831d35Sstevel } 1009 03831d35Sstevel return (err); 1010 03831d35Sstevel } 1011