Lines Matching refs:iod
48 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock)) argument
49 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock)) argument
50 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock)) argument
52 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock)) argument
53 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock)) argument
54 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock)) argument
56 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags) argument
62 static int smb_iod_sendall(struct smbiod *iod);
63 static int smb_iod_disconnect(struct smbiod *iod);
78 smb_iod_invrq(struct smbiod *iod) in smb_iod_invrq() argument
85 SMB_IOD_RQLOCK(iod); in smb_iod_invrq()
86 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { in smb_iod_invrq()
90 SMB_IOD_RQUNLOCK(iod); in smb_iod_invrq()
94 smb_iod_closetran(struct smbiod *iod) in smb_iod_closetran() argument
96 struct smb_vc *vcp = iod->iod_vc; in smb_iod_closetran()
97 struct thread *td = iod->iod_td; in smb_iod_closetran()
107 smb_iod_dead(struct smbiod *iod) in smb_iod_dead() argument
109 iod->iod_state = SMBIOD_ST_DEAD; in smb_iod_dead()
110 smb_iod_closetran(iod); in smb_iod_dead()
111 smb_iod_invrq(iod); in smb_iod_dead()
115 smb_iod_connect(struct smbiod *iod) in smb_iod_connect() argument
117 struct smb_vc *vcp = iod->iod_vc; in smb_iod_connect()
118 struct thread *td = iod->iod_td; in smb_iod_connect()
121 SMBIODEBUG("%d\n", iod->iod_state); in smb_iod_connect()
122 switch(iod->iod_state) { in smb_iod_connect()
147 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags); in smb_iod_connect()
148 iod->iod_state = SMBIOD_ST_TRANACTIVE; in smb_iod_connect()
151 error = (int)smb_smb_negotiate(vcp, &iod->iod_scred); in smb_iod_connect()
155 error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred); in smb_iod_connect()
158 iod->iod_state = SMBIOD_ST_VCACTIVE; in smb_iod_connect()
160 smb_iod_invrq(iod); in smb_iod_connect()
164 smb_iod_dead(iod); in smb_iod_connect()
169 smb_iod_disconnect(struct smbiod *iod) in smb_iod_disconnect() argument
171 struct smb_vc *vcp = iod->iod_vc; in smb_iod_disconnect()
174 if (iod->iod_state == SMBIOD_ST_VCACTIVE) { in smb_iod_disconnect()
175 smb_smb_ssnclose(vcp, &iod->iod_scred); in smb_iod_disconnect()
176 iod->iod_state = SMBIOD_ST_TRANACTIVE; in smb_iod_disconnect()
179 smb_iod_closetran(iod); in smb_iod_disconnect()
180 iod->iod_state = SMBIOD_ST_NOTCONN; in smb_iod_disconnect()
185 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp) in smb_iod_treeconnect() argument
189 if (iod->iod_state != SMBIOD_ST_VCACTIVE) { in smb_iod_treeconnect()
190 if (iod->iod_state != SMBIOD_ST_DEAD) in smb_iod_treeconnect()
192 iod->iod_state = SMBIOD_ST_RECONNECT; in smb_iod_treeconnect()
193 error = smb_iod_connect(iod); in smb_iod_treeconnect()
201 error = smb_smb_treeconnect(ssp, &iod->iod_scred); in smb_iod_treeconnect()
210 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp) in smb_iod_sendrq() argument
212 struct thread *td = iod->iod_td; in smb_iod_sendrq()
213 struct smb_vc *vcp = iod->iod_vc; in smb_iod_sendrq()
218 SMBIODEBUG("iod_state = %d\n", iod->iod_state); in smb_iod_sendrq()
219 switch (iod->iod_state) { in smb_iod_sendrq()
224 iod->iod_state = SMBIOD_ST_RECONNECT; in smb_iod_sendrq()
233 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux) in smb_iod_sendrq()
257 iod->iod_lastrqsent = rqp->sr_timesent; in smb_iod_sendrq()
280 smb_iod_recvall(struct smbiod *iod) in smb_iod_recvall() argument
282 struct smb_vc *vcp = iod->iod_vc; in smb_iod_recvall()
283 struct thread *td = iod->iod_td; in smb_iod_recvall()
290 switch (iod->iod_state) { in smb_iod_recvall()
304 smb_iod_dead(iod); in smb_iod_recvall()
329 SMB_IOD_RQLOCK(iod); in smb_iod_recvall()
330 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { in smb_iod_recvall()
349 SMB_IOD_RQUNLOCK(iod); in smb_iod_recvall()
359 SMB_IOD_RQLOCK(iod); in smb_iod_recvall()
360 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { in smb_iod_recvall()
365 SMB_IOD_RQUNLOCK(iod); in smb_iod_recvall()
370 smb_iod_request(struct smbiod *iod, int event, void *ident) in smb_iod_request() argument
379 SMB_IOD_EVLOCK(iod); in smb_iod_request()
380 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link); in smb_iod_request()
382 SMB_IOD_EVUNLOCK(iod); in smb_iod_request()
383 smb_iod_wakeup(iod); in smb_iod_request()
386 smb_iod_wakeup(iod); in smb_iod_request()
387 msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0); in smb_iod_request()
401 struct smbiod *iod = vcp->vc_iod; in smb_iod_addrq() local
406 rqp->sr_cred->scr_td->td_proc == iod->iod_p) { in smb_iod_addrq()
408 SMB_IOD_RQLOCK(iod); in smb_iod_addrq()
409 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link); in smb_iod_addrq()
410 SMB_IOD_RQUNLOCK(iod); in smb_iod_addrq()
412 if (smb_iod_sendrq(iod, rqp) != 0) { in smb_iod_addrq()
413 smb_iod_dead(iod); in smb_iod_addrq()
421 tsleep(&iod->iod_flags, PWAIT, "90sndw", hz); in smb_iod_addrq()
428 switch (iod->iod_state) { in smb_iod_addrq()
440 SMB_IOD_RQLOCK(iod); in smb_iod_addrq()
446 if (iod->iod_muxcnt < vcp->vc_maxmux) in smb_iod_addrq()
448 iod->iod_muxwant++; in smb_iod_addrq()
449 msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod), in smb_iod_addrq()
452 iod->iod_muxcnt++; in smb_iod_addrq()
453 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); in smb_iod_addrq()
454 SMB_IOD_RQUNLOCK(iod); in smb_iod_addrq()
455 smb_iod_wakeup(iod); in smb_iod_addrq()
463 struct smbiod *iod = vcp->vc_iod; in smb_iod_removerq() local
467 SMB_IOD_RQLOCK(iod); in smb_iod_removerq()
468 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); in smb_iod_removerq()
469 SMB_IOD_RQUNLOCK(iod); in smb_iod_removerq()
472 SMB_IOD_RQLOCK(iod); in smb_iod_removerq()
475 msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0); in smb_iod_removerq()
477 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); in smb_iod_removerq()
478 iod->iod_muxcnt--; in smb_iod_removerq()
479 if (iod->iod_muxwant) { in smb_iod_removerq()
480 iod->iod_muxwant--; in smb_iod_removerq()
481 wakeup(&iod->iod_muxwant); in smb_iod_removerq()
483 SMB_IOD_RQUNLOCK(iod); in smb_iod_removerq()
490 struct smbiod *iod = rqp->sr_vc->vc_iod; in smb_iod_waitrq() local
496 smb_iod_sendall(iod); in smb_iod_waitrq()
497 smb_iod_recvall(iod); in smb_iod_waitrq()
500 tsleep(&iod->iod_flags, PWAIT, "90irq", hz); in smb_iod_waitrq()
516 SMB_IOD_RQLOCK(iod); in smb_iod_waitrq()
517 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); in smb_iod_waitrq()
518 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); in smb_iod_waitrq()
519 SMB_IOD_RQUNLOCK(iod); in smb_iod_waitrq()
526 smb_iod_sendall(struct smbiod *iod) in smb_iod_sendall() argument
528 struct smb_vc *vcp = iod->iod_vc; in smb_iod_sendall()
537 SMB_IOD_RQLOCK(iod); in smb_iod_sendall()
538 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { in smb_iod_sendall()
542 SMB_IOD_RQUNLOCK(iod); in smb_iod_sendall()
543 herror = smb_iod_sendrq(iod, rqp); in smb_iod_sendall()
544 SMB_IOD_RQLOCK(iod); in smb_iod_sendall()
566 SMB_IOD_RQUNLOCK(iod); in smb_iod_sendall()
568 smb_iod_dead(iod); in smb_iod_sendall()
576 smb_iod_main(struct smbiod *iod) in smb_iod_main() argument
588 SMB_IOD_EVLOCK(iod); in smb_iod_main()
589 evp = STAILQ_FIRST(&iod->iod_evlist); in smb_iod_main()
591 SMB_IOD_EVUNLOCK(iod); in smb_iod_main()
594 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link); in smb_iod_main()
596 SMB_IOD_EVUNLOCK(iod); in smb_iod_main()
599 iod->iod_state = SMBIOD_ST_RECONNECT; in smb_iod_main()
600 evp->ev_error = smb_iod_connect(iod); in smb_iod_main()
603 evp->ev_error = smb_iod_disconnect(iod); in smb_iod_main()
606 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident); in smb_iod_main()
609 iod->iod_flags |= SMBIOD_SHUTDOWN; in smb_iod_main()
615 SMB_IOD_EVLOCK(iod); in smb_iod_main()
617 SMB_IOD_EVUNLOCK(iod); in smb_iod_main()
622 if (iod->iod_state == SMBIOD_ST_VCACTIVE) { in smb_iod_main()
624 timespecsub(&tsnow, &iod->iod_pingtimo, &tsnow); in smb_iod_main()
625 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) { in smb_iod_main()
626 smb_smb_echo(vcp, &iod->iod_scred); in smb_iod_main()
630 smb_iod_sendall(iod); in smb_iod_main()
631 smb_iod_recvall(iod); in smb_iod_main()
638 struct smbiod *iod = arg; in smb_iod_thread() local
646 iod->iod_td = curthread; in smb_iod_thread()
647 smb_makescred(&iod->iod_scred, iod->iod_td, NULL); in smb_iod_thread()
648 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) { in smb_iod_thread()
649 smb_iod_main(iod); in smb_iod_thread()
650 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo); in smb_iod_thread()
651 if (iod->iod_flags & SMBIOD_SHUTDOWN) in smb_iod_thread()
653 tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo); in smb_iod_thread()
657 smb_sl_destroy(&iod->iod_rqlock); in smb_iod_thread()
658 smb_sl_destroy(&iod->iod_evlock); in smb_iod_thread()
659 free(iod, M_SMBIOD); in smb_iod_thread()
667 struct smbiod *iod; in smb_iod_create() local
670 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK); in smb_iod_create()
671 iod->iod_id = smb_iod_next++; in smb_iod_create()
672 iod->iod_state = SMBIOD_ST_NOTCONN; in smb_iod_create()
673 iod->iod_vc = vcp; in smb_iod_create()
674 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO; in smb_iod_create()
675 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO; in smb_iod_create()
676 getnanotime(&iod->iod_lastrqsent); in smb_iod_create()
677 vcp->vc_iod = iod; in smb_iod_create()
678 smb_sl_init(&iod->iod_rqlock, "90rql"); in smb_iod_create()
679 TAILQ_INIT(&iod->iod_rqlist); in smb_iod_create()
680 smb_sl_init(&iod->iod_evlock, "90evl"); in smb_iod_create()
681 STAILQ_INIT(&iod->iod_evlist); in smb_iod_create()
682 error = kproc_create(smb_iod_thread, iod, &iod->iod_p, in smb_iod_create()
683 RFNOWAIT, 0, "smbiod%d", iod->iod_id); in smb_iod_create()
687 smb_sl_destroy(&iod->iod_rqlock); in smb_iod_create()
688 smb_sl_destroy(&iod->iod_evlock); in smb_iod_create()
689 free(iod, M_SMBIOD); in smb_iod_create()
696 smb_iod_destroy(struct smbiod *iod) in smb_iod_destroy() argument
698 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL); in smb_iod_destroy()