Lines Matching defs:rcs
100 struct rmc_comm_state *rcs;
103 rcs = ddi_get_soft_state(rmc_comm_statep, 0);
104 if ((rcs == NULL) || (!rcs->is_attached)) {
108 rcs->n_registrations++;
116 struct rmc_comm_state *rcs;
119 rcs = ddi_get_soft_state(rmc_comm_statep, 0);
120 ASSERT(rcs != NULL);
121 ASSERT(rcs->n_registrations != 0);
122 rcs->n_registrations--;
132 struct rmc_comm_state *rcs = NULL;
154 rcs = ddi_get_soft_state(rmc_comm_statep, instance);
155 if (rcs != NULL) {
156 sdip = rcs->dip;
158 rcs = NULL;
164 rcs = NULL;
168 return (rcs);
176 sio_put_reg(struct rmc_comm_state *rcs, uint_t reg, uint8_t val)
178 DPRINTF(rcs, DSER, (CE_CONT, "REG[%d]<-$%02x", reg, val));
180 if (rcs->sd_state.sio_handle != NULL && !rcs->sd_state.sio_fault) {
207 ddi_put8(rcs->sd_state.sio_handle,
208 rcs->sd_state.sio_regs + reg, val);
209 ddi_put8(rcs->sd_state.sio_handle,
210 rcs->sd_state.sio_regs + SIO_SCR, val);
212 (void) ddi_get8(rcs->sd_state.sio_handle,
213 rcs->sd_state.sio_regs + SIO_SCR);
218 sio_get_reg(struct rmc_comm_state *rcs, uint_t reg)
222 if (rcs->sd_state.sio_handle && !rcs->sd_state.sio_fault)
223 val = ddi_get8(rcs->sd_state.sio_handle,
224 rcs->sd_state.sio_regs + reg);
227 DPRINTF(rcs, DSER, (CE_CONT, "$%02x<-REG[%d]", val, reg));
232 sio_check_fault_status(struct rmc_comm_state *rcs)
234 rcs->sd_state.sio_fault =
235 ddi_check_acc_handle(rcs->sd_state.sio_handle) != DDI_SUCCESS;
239 rmc_comm_faulty(struct rmc_comm_state *rcs)
241 if (!rcs->sd_state.sio_fault)
242 sio_check_fault_status(rcs);
243 return (rcs->sd_state.sio_fault);
250 sio_data_ready(struct rmc_comm_state *rcs)
258 status = sio_get_reg(rcs, SIO_LSR);
259 return ((status & SIO_LSR_RXDA) != 0 && !rmc_comm_faulty(rcs));
266 rmc_comm_set_irq(struct rmc_comm_state *rcs, boolean_t newstate)
271 sio_put_reg(rcs, SIO_IER, SIO_IER_STD | val);
272 rcs->sd_state.hw_int_enabled = newstate;
299 struct rmc_comm_state *rcs = (void *)arg;
303 if (rcs->sd_state.cycid != NULL) {
313 if (mutex_tryenter(rcs->sd_state.hw_mutex) == 0) {
317 mutex_enter(rcs->sd_state.hw_mutex);
319 if (rcs->sd_state.hw_int_enabled) {
320 rmc_comm_set_irq(rcs, B_FALSE);
321 ddi_trigger_softintr(rcs->sd_state.softid);
324 mutex_exit(rcs->sd_state.hw_mutex);
337 rmc_comm_serdev_receive(struct rmc_comm_state *rcs)
341 DPRINTF(rcs, DSER, (CE_CONT, "serdev_receive: soft int handler\n"));
348 if (!rmc_comm_faulty(rcs)) {
350 char *rx_buf = rcs->sd_state.serdev_rx_buf;
365 mutex_enter(rcs->sd_state.hw_mutex);
366 while (sio_data_ready(rcs)) {
367 data = sio_get_reg(rcs, SIO_RXD);
372 rcs->sd_state.serdev_rx_count = rx_buflen;
374 DATASCOPE(rcs, 'R', rx_buf, rx_buflen)
376 rmc_comm_set_irq(rcs, B_TRUE);
377 mutex_exit(rcs->sd_state.hw_mutex);
382 rmc_comm_dp_drecv(rcs, (uint8_t *)rx_buf, rx_buflen);
394 struct rmc_comm_state *rcs = (void *)arg;
396 mutex_enter(rcs->dp_state.dp_mutex);
397 rmc_comm_serdev_receive(rcs);
398 mutex_exit(rcs->dp_state.dp_mutex);
409 struct rmc_comm_state *rcs = (void *)arg;
411 mutex_enter(rcs->dp_state.dp_mutex);
412 rmc_comm_serdev_receive(rcs);
413 mutex_exit(rcs->dp_state.dp_mutex);
422 rmc_comm_serdev_send(struct rmc_comm_state *rcs, char *buf, int buflen)
435 sio_check_fault_status(rcs);
440 DATASCOPE(rcs, 'S', buf, buflen)
442 mutex_enter(rcs->sd_state.hw_mutex);
452 status = sio_get_reg(rcs, SIO_LSR);
455 status = sio_get_reg(rcs, SIO_LSR);
457 sio_put_reg(rcs, SIO_TXD, *p++);
459 mutex_exit(rcs->sd_state.hw_mutex);
466 rmc_comm_serdev_drain(struct rmc_comm_state *rcs)
470 mutex_enter(rcs->sd_state.hw_mutex);
471 status = sio_get_reg(rcs, SIO_LSR);
474 status = sio_get_reg(rcs, SIO_LSR);
476 mutex_exit(rcs->sd_state.hw_mutex);
486 rmc_comm_hw_reset(struct rmc_comm_state *rcs)
494 rmc_comm_set_irq(rcs, B_FALSE);
495 sio_put_reg(rcs, SIO_FCR, SIO_FCR_RXSR | SIO_FCR_TXSR);
496 sio_put_reg(rcs, SIO_LCR, SIO_LCR_STD);
504 if (rcs->baud < SIO_BAUD_MIN || rcs->baud > SIO_BAUD_MAX) {
506 rcs->baud_divisor_factor;
508 divisor = SIO_BAUD_TO_DIVISOR(rcs->baud) *
509 rcs->baud_divisor_factor;
523 sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK1);
524 sio_put_reg(rcs, SIO_LBGDL, 0xff);
525 sio_put_reg(rcs, SIO_LBGDH, divisor >> 8);
526 sio_put_reg(rcs, SIO_LBGDL, divisor & 0xff);
527 sio_put_reg(rcs, SIO_BSR, SIO_BSR_BANK0);
532 sio_put_reg(rcs, SIO_MCR, SIO_MCR_STD);
533 sio_put_reg(rcs, SIO_FCR, SIO_FCR_STD);
540 rmc_comm_offline(struct rmc_comm_state *rcs)
542 if (rcs->sd_state.sio_handle != NULL)
543 ddi_regs_map_free(&rcs->sd_state.sio_handle);
544 rcs->sd_state.sio_handle = NULL;
545 rcs->sd_state.sio_regs = NULL;
549 rmc_comm_online(struct rmc_comm_state *rcs, dev_info_t *dip)
568 rcs->sd_state.sio_handle = h;
569 rcs->sd_state.sio_regs = (void *)p;
582 rmc_comm_hw_reset(rcs);
593 rmc_comm_serdev_init(struct rmc_comm_state *rcs, dev_info_t *dip)
597 rcs->sd_state.cycid = NULL;
602 err = rmc_comm_online(rcs, dip);
613 &rcs->dp_state.dp_iblk);
617 err = ddi_get_iblock_cookie(dip, 0, &rcs->sd_state.hw_iblk);
624 mutex_init(rcs->dp_state.dp_mutex, NULL, MUTEX_DRIVER,
625 rcs->dp_state.dp_iblk);
627 mutex_init(rcs->sd_state.hw_mutex, NULL, MUTEX_DRIVER,
628 rcs->sd_state.hw_iblk);
638 err = ddi_add_softintr(dip, DDI_SOFTINT_LOW, &rcs->sd_state.softid,
639 &rcs->dp_state.dp_iblk, NULL, rmc_comm_softint, (caddr_t)rcs);
641 mutex_destroy(rcs->dp_state.dp_mutex);
642 mutex_destroy(rcs->sd_state.hw_mutex);
650 if (rcs->sd_state.sio_handle != NULL) {
651 err = ddi_add_intr(dip, 0, &rcs->sd_state.hw_iblk, NULL,
652 rmc_comm_hi_intr, (caddr_t)rcs);
658 ddi_remove_softintr(rcs->sd_state.softid);
659 mutex_destroy(rcs->dp_state.dp_mutex);
660 mutex_destroy(rcs->sd_state.hw_mutex);
668 rcs->sd_state.cycid = ddi_periodic_add(rmc_comm_cyclic, rcs,
679 rmc_comm_serdev_fini(struct rmc_comm_state *rcs, dev_info_t *dip)
681 rmc_comm_hw_reset(rcs);
683 if (rcs->sd_state.cycid != NULL) {
684 ddi_periodic_delete(rcs->sd_state.cycid);
685 rcs->sd_state.cycid = NULL;
687 if (rcs->sd_state.sio_handle != NULL)
688 ddi_remove_intr(dip, 0, rcs->sd_state.hw_iblk);
690 ddi_remove_softintr(rcs->sd_state.softid);
692 mutex_destroy(rcs->sd_state.hw_mutex);
694 mutex_destroy(rcs->dp_state.dp_mutex);
696 rmc_comm_offline(rcs);
707 rmc_comm_unattach(struct rmc_comm_state *rcs, dev_info_t *dip, int instance,
710 if (rcs != NULL) {
714 rmc_comm_set_irq(rcs, B_FALSE);
720 rmc_comm_drvintf_fini(rcs);
726 rmc_comm_dp_fini(rcs);
732 rmc_comm_serdev_fini(rcs, dip);
746 struct rmc_comm_state *rcs = NULL;
762 if ((rcs = rmc_comm_getstate(dip, instance,
766 rmc_comm_hw_reset(rcs);
767 rmc_comm_set_irq(rcs, B_TRUE);
768 rcs->dip = dip;
777 mutex_enter(rcs->dp_state.dp_mutex);
778 dp_reset(rcs, INITIAL_SEQID, 1, 1);
779 mutex_exit(rcs->dp_state.dp_mutex);
800 if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) ==
802 rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
805 ddi_set_driver_private(dip, rcs);
807 rcs->dip = NULL;
812 rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
814 rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
822 rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
828 if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) ||
829 (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX))
830 rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN;
835 if (rmc_comm_serdev_init(rcs, dip) != 0) {
836 rmc_comm_unattach(rcs, dip, instance, 0, 0, 0);
843 rmc_comm_dp_init(rcs);
848 if (rmc_comm_drvintf_init(rcs) != 0) {
849 rmc_comm_unattach(rcs, dip, instance, 0, 1, 1);
856 rcs->majornum = ddi_driver_major(dip);
857 rcs->instance = instance;
858 rcs->dip = dip;
863 rmc_comm_set_irq(rcs, B_TRUE);
870 rcs->is_attached = B_TRUE;
878 struct rmc_comm_state *rcs;
882 if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_detach")) == NULL)
897 rcs->dip = NULL;
898 rmc_comm_hw_reset(rcs);
907 if (rcs->n_registrations != 0) {
916 rcs->is_attached = B_FALSE;
918 rmc_comm_unattach(rcs, dip, instance, 1, 1, 1);
930 struct rmc_comm_state *rcs;
932 if ((rcs = rmc_comm_getstate(dip, -1, "rmc_comm_reset")) == NULL)
934 rmc_comm_hw_reset(rcs);