xref: /freebsd/sys/netsmb/smb_iod.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $FreeBSD$
33  */
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/kernel.h>
39 #include <sys/kthread.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/unistd.h>
43 
44 #include <netsmb/smb.h>
45 #include <netsmb/smb_conn.h>
46 #include <netsmb/smb_rq.h>
47 #include <netsmb/smb_tran.h>
48 #include <netsmb/smb_trantcp.h>
49 
50 
51 #define SMBIOD_SLEEP_TIMO	2
52 #define	SMBIOD_PING_TIMO	60	/* seconds */
53 
54 #define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
55 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
56 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
57 
58 #define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
59 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
60 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
61 
62 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
63 
64 
65 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
66 
67 static int smb_iod_next;
68 
69 static int  smb_iod_sendall(struct smbiod *iod);
70 static int  smb_iod_disconnect(struct smbiod *iod);
71 static void smb_iod_thread(void *);
72 
73 static __inline void
74 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
75 {
76 	SMBRQ_SLOCK(rqp);
77 	rqp->sr_lerror = error;
78 	rqp->sr_rpgen++;
79 	rqp->sr_state = SMBRQ_NOTIFIED;
80 	wakeup(&rqp->sr_state);
81 	SMBRQ_SUNLOCK(rqp);
82 }
83 
84 static void
85 smb_iod_invrq(struct smbiod *iod)
86 {
87 	struct smb_rq *rqp;
88 
89 	/*
90 	 * Invalidate all outstanding requests for this connection
91 	 */
92 	SMB_IOD_RQLOCK(iod);
93 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
94 		if (rqp->sr_flags & SMBR_INTERNAL)
95 			SMBRQ_SUNLOCK(rqp);
96 		rqp->sr_flags |= SMBR_RESTART;
97 		smb_iod_rqprocessed(rqp, ENOTCONN);
98 	}
99 	SMB_IOD_RQUNLOCK(iod);
100 }
101 
102 static void
103 smb_iod_closetran(struct smbiod *iod)
104 {
105 	struct smb_vc *vcp = iod->iod_vc;
106 	struct proc *p = iod->iod_p;
107 
108 	if (vcp->vc_tdata == NULL)
109 		return;
110 	SMB_TRAN_DISCONNECT(vcp, p);
111 	SMB_TRAN_DONE(vcp, p);
112 	vcp->vc_tdata = NULL;
113 }
114 
115 static void
116 smb_iod_dead(struct smbiod *iod)
117 {
118 	iod->iod_state = SMBIOD_ST_DEAD;
119 	smb_iod_closetran(iod);
120 	smb_iod_invrq(iod);
121 }
122 
123 static int
124 smb_iod_connect(struct smbiod *iod)
125 {
126 	struct smb_vc *vcp = iod->iod_vc;
127 	struct proc *p = iod->iod_p;
128 	int error;
129 
130 	SMBIODEBUG("%d\n", iod->iod_state);
131 	switch(iod->iod_state) {
132 	    case SMBIOD_ST_VCACTIVE:
133 		SMBERROR("called for already opened connection\n");
134 		return EISCONN;
135 	    case SMBIOD_ST_DEAD:
136 		return ENOTCONN;	/* XXX: last error code ? */
137 	    default:
138 		break;
139 	}
140 	vcp->vc_genid++;
141 	error = 0;
142 	itry {
143 		ithrow(SMB_TRAN_CREATE(vcp, p));
144 		SMBIODEBUG("tcreate\n");
145 		if (vcp->vc_laddr) {
146 			ithrow(SMB_TRAN_BIND(vcp, vcp->vc_laddr, p));
147 		}
148 		SMBIODEBUG("tbind\n");
149 		ithrow(SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, p));
150 		SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
151 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
152 		SMBIODEBUG("tconnect\n");
153 /*		vcp->vc_mid = 0;*/
154 		ithrow(smb_smb_negotiate(vcp, &iod->iod_scred));
155 		SMBIODEBUG("snegotiate\n");
156 		ithrow(smb_smb_ssnsetup(vcp, &iod->iod_scred));
157 		iod->iod_state = SMBIOD_ST_VCACTIVE;
158 		SMBIODEBUG("completed\n");
159 		smb_iod_invrq(iod);
160 	} icatch(error) {
161 		smb_iod_dead(iod);
162 	} ifinally {
163 	} iendtry;
164 	return error;
165 }
166 
167 static int
168 smb_iod_disconnect(struct smbiod *iod)
169 {
170 	struct smb_vc *vcp = iod->iod_vc;
171 
172 	SMBIODEBUG("\n");
173 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
174 		smb_smb_ssnclose(vcp, &iod->iod_scred);
175 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
176 	}
177 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
178 	smb_iod_closetran(iod);
179 	iod->iod_state = SMBIOD_ST_NOTCONN;
180 	return 0;
181 }
182 
183 static int
184 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
185 {
186 	int error;
187 
188 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
189 		if (iod->iod_state != SMBIOD_ST_DEAD)
190 			return ENOTCONN;
191 		iod->iod_state = SMBIOD_ST_RECONNECT;
192 		error = smb_iod_connect(iod);
193 		if (error)
194 			return error;
195 	}
196 	SMBIODEBUG("tree reconnect\n");
197 	SMBS_ST_LOCK(ssp);
198 	ssp->ss_flags |= SMBS_RECONNECTING;
199 	SMBS_ST_UNLOCK(ssp);
200 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
201 	SMBS_ST_LOCK(ssp);
202 	ssp->ss_flags &= ~SMBS_RECONNECTING;
203 	SMBS_ST_UNLOCK(ssp);
204 	wakeup(&ssp->ss_vcgenid);
205 	return error;
206 }
207 
208 static int
209 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
210 {
211 	struct proc *p = iod->iod_p;
212 	struct smb_vc *vcp = iod->iod_vc;
213 	struct smb_share *ssp = rqp->sr_share;
214 	struct mbuf *m;
215 	int error;
216 
217 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
218 	switch (iod->iod_state) {
219 	    case SMBIOD_ST_NOTCONN:
220 		smb_iod_rqprocessed(rqp, ENOTCONN);
221 		return 0;
222 	    case SMBIOD_ST_DEAD:
223 		iod->iod_state = SMBIOD_ST_RECONNECT;
224 		return 0;
225 	    case SMBIOD_ST_RECONNECT:
226 		return 0;
227 	    default:
228 		break;
229 	}
230 	if (rqp->sr_sendcnt == 0) {
231 #ifdef movedtoanotherplace
232 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
233 			return 0;
234 #endif
235 		*rqp->sr_rqtid = htoles(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
236 		*rqp->sr_rquid = htoles(vcp ? vcp->vc_smbuid : 0);
237 		mb_fixhdr(&rqp->sr_rq);
238 	}
239 	if (rqp->sr_sendcnt++ > 5) {
240 		rqp->sr_flags |= SMBR_RESTART;
241 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
242 		/*
243 		 * If all attempts to send a request failed, then
244 		 * something is seriously hosed.
245 		 */
246 		return ENOTCONN;
247 	}
248 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
249 	m_dumpm(rqp->sr_rq.mb_top);
250 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAIT);
251 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, p) : ENOBUFS;
252 	if (error == 0) {
253 		getnanotime(&rqp->sr_timesent);
254 		iod->iod_lastrqsent = rqp->sr_timesent;
255 		rqp->sr_flags |= SMBR_SENT;
256 		rqp->sr_state = SMBRQ_SENT;
257 		return 0;
258 	}
259 	/*
260 	 * Check for fatal errors
261 	 */
262 	if (SMB_TRAN_FATAL(vcp, error)) {
263 		/*
264 		 * No further attempts should be made
265 		 */
266 		return ENOTCONN;
267 	}
268 	if (smb_rq_intr(rqp))
269 		smb_iod_rqprocessed(rqp, EINTR);
270 	return 0;
271 }
272 
273 /*
274  * Process incoming packets
275  */
276 static int
277 smb_iod_recvall(struct smbiod *iod)
278 {
279 	struct smb_vc *vcp = iod->iod_vc;
280 	struct proc *p = iod->iod_p;
281 	struct smb_rq *rqp;
282 	struct mbuf *m;
283 	u_char *hp;
284 	u_short mid;
285 	int error;
286 
287 	switch (iod->iod_state) {
288 	    case SMBIOD_ST_NOTCONN:
289 	    case SMBIOD_ST_DEAD:
290 	    case SMBIOD_ST_RECONNECT:
291 		return 0;
292 	    default:
293 		break;
294 	}
295 	for (;;) {
296 		m = NULL;
297 		error = SMB_TRAN_RECV(vcp, &m, p);
298 		if (error == EWOULDBLOCK)
299 			break;
300 		if (SMB_TRAN_FATAL(vcp, error)) {
301 			smb_iod_dead(iod);
302 			break;
303 		}
304 		if (error)
305 			break;
306 		if (m == NULL) {
307 			SMBERROR("tran return NULL without error\n");
308 			error = EPIPE;
309 			continue;
310 		}
311 		m = m_pullup(m, SMB_HDRLEN);
312 		if (m == NULL)
313 			continue;	/* wait for a good packet */
314 		/*
315 		 * Now we got an entire and possibly invalid SMB packet.
316 		 * Be careful while parsing it.
317 		 */
318 		m_dumpm(m);
319 		hp = mtod(m, u_char*);
320 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
321 			m_freem(m);
322 			continue;
323 		}
324 		mid = SMB_HDRMID(hp);
325 		SMBSDEBUG("mid %04x\n", (u_int)mid);
326 		SMB_IOD_RQLOCK(iod);
327 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
328 			if (rqp->sr_mid != mid)
329 				continue;
330 			SMBRQ_SLOCK(rqp);
331 			if (rqp->sr_rp.md_top == NULL) {
332 				md_initm(&rqp->sr_rp, m);
333 			} else {
334 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
335 					md_append_record(&rqp->sr_rp, m);
336 				} else {
337 					SMBRQ_SUNLOCK(rqp);
338 					SMBERROR("duplicate response %d (ignored)\n", mid);
339 					break;
340 				}
341 			}
342 			SMBRQ_SUNLOCK(rqp);
343 			smb_iod_rqprocessed(rqp, 0);
344 			break;
345 		}
346 		SMB_IOD_RQUNLOCK(iod);
347 		if (rqp == NULL) {
348 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
349 /*			smb_printrqlist(vcp);*/
350 			m_freem(m);
351 		}
352 	}
353 	/*
354 	 * check for interrupts
355 	 */
356 	SMB_IOD_RQLOCK(iod);
357 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
358 		if (smb_proc_intr(rqp->sr_cred->scr_p)) {
359 			smb_iod_rqprocessed(rqp, EINTR);
360 		}
361 	}
362 	SMB_IOD_RQUNLOCK(iod);
363 	return 0;
364 }
365 
366 int
367 smb_iod_request(struct smbiod *iod, int event, void *ident)
368 {
369 	struct smbiod_event *evp;
370 	int error;
371 
372 	SMBIODEBUG("\n");
373 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
374 	evp->ev_type = event;
375 	evp->ev_ident = ident;
376 	SMB_IOD_EVLOCK(iod);
377 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
378 	if ((event & SMBIOD_EV_SYNC) == 0) {
379 		SMB_IOD_EVUNLOCK(iod);
380 		smb_iod_wakeup(iod);
381 		return 0;
382 	}
383 	smb_iod_wakeup(iod);
384 	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
385 	error = evp->ev_error;
386 	free(evp, M_SMBIOD);
387 	return error;
388 }
389 
390 /*
391  * Place request in the queue.
392  * Request from smbiod have a high priority.
393  */
394 int
395 smb_iod_addrq(struct smb_rq *rqp)
396 {
397 	struct smb_vc *vcp = rqp->sr_vc;
398 	struct smbiod *iod = vcp->vc_iod;
399 	int error;
400 
401 	SMBIODEBUG("\n");
402 	if (rqp->sr_cred->scr_p == iod->iod_p) {
403 		rqp->sr_flags |= SMBR_INTERNAL;
404 		SMB_IOD_RQLOCK(iod);
405 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
406 		SMB_IOD_RQUNLOCK(iod);
407 		for (;;) {
408 			if (smb_iod_sendrq(iod, rqp) != 0) {
409 				smb_iod_dead(iod);
410 				break;
411 			}
412 			/*
413 			 * we don't need to lock state field here
414 			 */
415 			if (rqp->sr_state != SMBRQ_NOTSENT)
416 				break;
417 			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
418 		}
419 		if (rqp->sr_lerror)
420 			smb_iod_removerq(rqp);
421 		return rqp->sr_lerror;
422 	}
423 
424 	switch (iod->iod_state) {
425 	    case SMBIOD_ST_NOTCONN:
426 		return ENOTCONN;
427 	    case SMBIOD_ST_DEAD:
428 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
429 		if (error)
430 			return error;
431 		return EXDEV;
432 	    default:
433 		break;
434 	}
435 
436 	SMB_IOD_RQLOCK(iod);
437 	for (;;) {
438 		if (vcp->vc_maxmux == 0) {
439 			SMBERROR("maxmux == 0\n");
440 			break;
441 		}
442 		if (iod->iod_muxcnt < vcp->vc_maxmux)
443 			break;
444 		iod->iod_muxwant++;
445 		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
446 		    PWAIT, "90mux", 0);
447 	}
448 	iod->iod_muxcnt++;
449 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
450 	SMB_IOD_RQUNLOCK(iod);
451 	smb_iod_wakeup(iod);
452 	return 0;
453 }
454 
455 int
456 smb_iod_removerq(struct smb_rq *rqp)
457 {
458 	struct smb_vc *vcp = rqp->sr_vc;
459 	struct smbiod *iod = vcp->vc_iod;
460 
461 	SMBIODEBUG("\n");
462 	if (rqp->sr_flags & SMBR_INTERNAL) {
463 		SMB_IOD_RQLOCK(iod);
464 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
465 		SMB_IOD_RQUNLOCK(iod);
466 		return 0;
467 	}
468 	SMB_IOD_RQLOCK(iod);
469 	while (rqp->sr_flags & SMBR_XLOCK) {
470 		rqp->sr_flags |= SMBR_XLOCKWANT;
471 		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
472 	}
473 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
474 	iod->iod_muxcnt--;
475 	if (iod->iod_muxwant) {
476 		iod->iod_muxwant--;
477 		wakeup(&iod->iod_muxwant);
478 	}
479 	SMB_IOD_RQUNLOCK(iod);
480 	return 0;
481 }
482 
483 int
484 smb_iod_waitrq(struct smb_rq *rqp)
485 {
486 	struct smbiod *iod = rqp->sr_vc->vc_iod;
487 	int error;
488 
489 	SMBIODEBUG("\n");
490 	if (rqp->sr_flags & SMBR_INTERNAL) {
491 		for (;;) {
492 			smb_iod_sendall(iod);
493 			smb_iod_recvall(iod);
494 			if (rqp->sr_rpgen != rqp->sr_rplast)
495 				break;
496 			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
497 		}
498 		smb_iod_removerq(rqp);
499 		return rqp->sr_lerror;
500 
501 	}
502 	SMBRQ_SLOCK(rqp);
503 	if (rqp->sr_rpgen == rqp->sr_rplast)
504 		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
505 	rqp->sr_rplast++;
506 	SMBRQ_SUNLOCK(rqp);
507 	error = rqp->sr_lerror;
508 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
509 		/*
510 		 * If request should stay in the list, then reinsert it
511 		 * at the end of queue so other waiters have chance to concur
512 		 */
513 		SMB_IOD_RQLOCK(iod);
514 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
515 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
516 		SMB_IOD_RQUNLOCK(iod);
517 	} else
518 		smb_iod_removerq(rqp);
519 	return error;
520 }
521 
522 
523 static int
524 smb_iod_sendall(struct smbiod *iod)
525 {
526 	struct smb_vc *vcp = iod->iod_vc;
527 	struct smb_rq *rqp;
528 	struct timespec ts, tstimeout;
529 	int herror;
530 
531 	herror = 0;
532 	/*
533 	 * Loop through the list of requests and send them if possible
534 	 */
535 	SMB_IOD_RQLOCK(iod);
536 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
537 		switch (rqp->sr_state) {
538 		    case SMBRQ_NOTSENT:
539 			rqp->sr_flags |= SMBR_XLOCK;
540 			SMB_IOD_RQUNLOCK(iod);
541 			herror = smb_iod_sendrq(iod, rqp);
542 			SMB_IOD_RQLOCK(iod);
543 			rqp->sr_flags &= ~SMBR_XLOCK;
544 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
545 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
546 				wakeup(rqp);
547 			}
548 			break;
549 		    case SMBRQ_SENT:
550 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
551 			timespecadd(&tstimeout, &tstimeout);
552 			getnanotime(&ts);
553 			timespecsub(&ts, &tstimeout);
554 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
555 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
556 			}
557 			break;
558 		    default:
559 		}
560 		if (herror)
561 			break;
562 	}
563 	SMB_IOD_RQUNLOCK(iod);
564 	if (herror == ENOTCONN)
565 		smb_iod_dead(iod);
566 	return 0;
567 }
568 
569 /*
570  * "main" function for smbiod daemon
571  */
572 static __inline void
573 smb_iod_main(struct smbiod *iod)
574 {
575 /*	struct smb_vc *vcp = iod->iod_vc;*/
576 	struct smbiod_event *evp;
577 /*	struct timespec tsnow;*/
578 	int error;
579 
580 	SMBIODEBUG("\n");
581 	error = 0;
582 
583 	/*
584 	 * Check all interesting events
585 	 */
586 	for (;;) {
587 		SMB_IOD_EVLOCK(iod);
588 		evp = STAILQ_FIRST(&iod->iod_evlist);
589 		if (evp == NULL) {
590 			SMB_IOD_EVUNLOCK(iod);
591 			break;
592 		}
593 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
594 		evp->ev_type |= SMBIOD_EV_PROCESSING;
595 		SMB_IOD_EVUNLOCK(iod);
596 		switch (evp->ev_type & SMBIOD_EV_MASK) {
597 		    case SMBIOD_EV_CONNECT:
598 			iod->iod_state = SMBIOD_ST_RECONNECT;
599 			evp->ev_error = smb_iod_connect(iod);
600 			break;
601 		    case SMBIOD_EV_DISCONNECT:
602 			evp->ev_error = smb_iod_disconnect(iod);
603 			break;
604 		    case SMBIOD_EV_TREECONNECT:
605 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
606 			break;
607 		    case SMBIOD_EV_SHUTDOWN:
608 			iod->iod_flags |= SMBIOD_SHUTDOWN;
609 			break;
610 		    case SMBIOD_EV_NEWRQ:
611 			break;
612 		}
613 		if (evp->ev_type & SMBIOD_EV_SYNC) {
614 			SMB_IOD_EVLOCK(iod);
615 			wakeup(evp);
616 			SMB_IOD_EVUNLOCK(iod);
617 		} else
618 			free(evp, M_SMBIOD);
619 	}
620 #if 0
621 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
622 		getnanotime(&tsnow);
623 		timespecsub(&tsnow, &iod->iod_pingtimo);
624 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
625 			smb_smb_echo(vcp, &iod->iod_scred);
626 		}
627 	}
628 #endif
629 	smb_iod_sendall(iod);
630 	smb_iod_recvall(iod);
631 	return;
632 }
633 
634 #ifndef FB_CURRENT
635 #define	kthread_create_compat	kthread_create2
636 #else
637 #define	kthread_create_compat	kthread_create
638 #endif
639 
640 
641 void
642 smb_iod_thread(void *arg)
643 {
644 	struct smbiod *iod = arg;
645 
646 	mtx_lock(&Giant);
647 	smb_makescred(&iod->iod_scred, iod->iod_p, NULL);
648 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
649 		smb_iod_main(iod);
650 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
651 /*		mtx_unlock(&Giant, MTX_DEF);*/
652 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
653 			break;
654 		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
655 	}
656 /*	mtx_lock(&Giant, MTX_DEF);*/
657 	kthread_exit(0);
658 }
659 
660 int
661 smb_iod_create(struct smb_vc *vcp)
662 {
663 	struct smbiod *iod;
664 	int error;
665 
666 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
667 	iod->iod_id = smb_iod_next++;
668 	iod->iod_state = SMBIOD_ST_NOTCONN;
669 	iod->iod_vc = vcp;
670 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
671 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
672 	getnanotime(&iod->iod_lastrqsent);
673 	vcp->vc_iod = iod;
674 	smb_sl_init(&iod->iod_rqlock, "90rql");
675 	TAILQ_INIT(&iod->iod_rqlist);
676 	smb_sl_init(&iod->iod_evlock, "90evl");
677 	STAILQ_INIT(&iod->iod_evlist);
678 	error = kthread_create_compat(smb_iod_thread, iod, &iod->iod_p,
679 	    RFNOWAIT, "smbiod%d", iod->iod_id);
680 	if (error) {
681 		SMBERROR("can't start smbiod: %d", error);
682 		free(iod, M_SMBIOD);
683 		return error;
684 	}
685 	return 0;
686 }
687 
688 int
689 smb_iod_destroy(struct smbiod *iod)
690 {
691 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
692 	smb_sl_destroy(&iod->iod_rqlock);
693 	smb_sl_destroy(&iod->iod_evlock);
694 	free(iod, M_SMBIOD);
695 	return 0;
696 }
697 
698 int
699 smb_iod_init(void)
700 {
701 	return 0;
702 }
703 
704 int
705 smb_iod_done(void)
706 {
707 	return 0;
708 }
709 
710