xref: /freebsd/sys/netsmb/smb_iod.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/endian.h>
39 #include <sys/proc.h>
40 #include <sys/kernel.h>
41 #include <sys/kthread.h>
42 #include <sys/malloc.h>
43 #include <sys/mbuf.h>
44 #include <sys/unistd.h>
45 
46 #include <netsmb/smb.h>
47 #include <netsmb/smb_conn.h>
48 #include <netsmb/smb_rq.h>
49 #include <netsmb/smb_tran.h>
50 #include <netsmb/smb_trantcp.h>
51 
52 
53 #define SMBIOD_SLEEP_TIMO	2
54 #define	SMBIOD_PING_TIMO	60	/* seconds */
55 
56 #define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
57 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
58 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
59 
60 #define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
61 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
62 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
63 
64 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
65 
66 
67 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
68 
69 static int smb_iod_next;
70 
71 static int  smb_iod_sendall(struct smbiod *iod);
72 static int  smb_iod_disconnect(struct smbiod *iod);
73 static void smb_iod_thread(void *);
74 
75 static __inline void
76 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
77 {
78 	SMBRQ_SLOCK(rqp);
79 	rqp->sr_lerror = error;
80 	rqp->sr_rpgen++;
81 	rqp->sr_state = SMBRQ_NOTIFIED;
82 	wakeup(&rqp->sr_state);
83 	SMBRQ_SUNLOCK(rqp);
84 }
85 
86 static void
87 smb_iod_invrq(struct smbiod *iod)
88 {
89 	struct smb_rq *rqp;
90 
91 	/*
92 	 * Invalidate all outstanding requests for this connection
93 	 */
94 	SMB_IOD_RQLOCK(iod);
95 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
96 		if (rqp->sr_flags & SMBR_INTERNAL)
97 			SMBRQ_SUNLOCK(rqp);
98 		rqp->sr_flags |= SMBR_RESTART;
99 		smb_iod_rqprocessed(rqp, ENOTCONN);
100 	}
101 	SMB_IOD_RQUNLOCK(iod);
102 }
103 
104 static void
105 smb_iod_closetran(struct smbiod *iod)
106 {
107 	struct smb_vc *vcp = iod->iod_vc;
108 	struct thread *td = iod->iod_td;
109 
110 	if (vcp->vc_tdata == NULL)
111 		return;
112 	SMB_TRAN_DISCONNECT(vcp, td);
113 	SMB_TRAN_DONE(vcp, td);
114 	vcp->vc_tdata = NULL;
115 }
116 
117 static void
118 smb_iod_dead(struct smbiod *iod)
119 {
120 	iod->iod_state = SMBIOD_ST_DEAD;
121 	smb_iod_closetran(iod);
122 	smb_iod_invrq(iod);
123 }
124 
125 static int
126 smb_iod_connect(struct smbiod *iod)
127 {
128 	struct smb_vc *vcp = iod->iod_vc;
129 	struct thread *td = iod->iod_td;
130 	int error;
131 
132 	SMBIODEBUG("%d\n", iod->iod_state);
133 	switch(iod->iod_state) {
134 	    case SMBIOD_ST_VCACTIVE:
135 		SMBERROR("called for already opened connection\n");
136 		return EISCONN;
137 	    case SMBIOD_ST_DEAD:
138 		return ENOTCONN;	/* XXX: last error code ? */
139 	    default:
140 		break;
141 	}
142 	vcp->vc_genid++;
143 	error = 0;
144 
145 	error = (int)SMB_TRAN_CREATE(vcp, td);
146 	if (error)
147 		goto fail;
148 	SMBIODEBUG("tcreate\n");
149 	if (vcp->vc_laddr) {
150 		error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
151 		if (error)
152 			goto fail;
153 	}
154 	SMBIODEBUG("tbind\n");
155 	error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
156 	if (error)
157 		goto fail;
158 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
159 	iod->iod_state = SMBIOD_ST_TRANACTIVE;
160 	SMBIODEBUG("tconnect\n");
161 	/* vcp->vc_mid = 0;*/
162 	error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
163 	if (error)
164 		goto fail;
165 	SMBIODEBUG("snegotiate\n");
166 	error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
167 	if (error)
168 		goto fail;
169 	iod->iod_state = SMBIOD_ST_VCACTIVE;
170 	SMBIODEBUG("completed\n");
171 	smb_iod_invrq(iod);
172 	return (0);
173 
174  fail:
175 	smb_iod_dead(iod);
176 	return (error);
177 }
178 
179 static int
180 smb_iod_disconnect(struct smbiod *iod)
181 {
182 	struct smb_vc *vcp = iod->iod_vc;
183 
184 	SMBIODEBUG("\n");
185 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
186 		smb_smb_ssnclose(vcp, &iod->iod_scred);
187 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
188 	}
189 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
190 	smb_iod_closetran(iod);
191 	iod->iod_state = SMBIOD_ST_NOTCONN;
192 	return 0;
193 }
194 
195 static int
196 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
197 {
198 	int error;
199 
200 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
201 		if (iod->iod_state != SMBIOD_ST_DEAD)
202 			return ENOTCONN;
203 		iod->iod_state = SMBIOD_ST_RECONNECT;
204 		error = smb_iod_connect(iod);
205 		if (error)
206 			return error;
207 	}
208 	SMBIODEBUG("tree reconnect\n");
209 	SMBS_ST_LOCK(ssp);
210 	ssp->ss_flags |= SMBS_RECONNECTING;
211 	SMBS_ST_UNLOCK(ssp);
212 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
213 	SMBS_ST_LOCK(ssp);
214 	ssp->ss_flags &= ~SMBS_RECONNECTING;
215 	SMBS_ST_UNLOCK(ssp);
216 	wakeup(&ssp->ss_vcgenid);
217 	return error;
218 }
219 
220 static int
221 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
222 {
223 	struct thread *td = iod->iod_td;
224 	struct smb_vc *vcp = iod->iod_vc;
225 	struct smb_share *ssp = rqp->sr_share;
226 	struct mbuf *m;
227 	int error;
228 
229 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
230 	switch (iod->iod_state) {
231 	    case SMBIOD_ST_NOTCONN:
232 		smb_iod_rqprocessed(rqp, ENOTCONN);
233 		return 0;
234 	    case SMBIOD_ST_DEAD:
235 		iod->iod_state = SMBIOD_ST_RECONNECT;
236 		return 0;
237 	    case SMBIOD_ST_RECONNECT:
238 		return 0;
239 	    default:
240 		break;
241 	}
242 	if (rqp->sr_sendcnt == 0) {
243 #ifdef movedtoanotherplace
244 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
245 			return 0;
246 #endif
247 		*rqp->sr_rqtid = htole16(ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
248 		*rqp->sr_rquid = htole16(vcp ? vcp->vc_smbuid : 0);
249 		mb_fixhdr(&rqp->sr_rq);
250 	}
251 	if (rqp->sr_sendcnt++ > 5) {
252 		rqp->sr_flags |= SMBR_RESTART;
253 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
254 		/*
255 		 * If all attempts to send a request failed, then
256 		 * something is seriously hosed.
257 		 */
258 		return ENOTCONN;
259 	}
260 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
261 	m_dumpm(rqp->sr_rq.mb_top);
262 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_TRYWAIT);
263 	error = rqp->sr_lerror = m ? SMB_TRAN_SEND(vcp, m, td) : ENOBUFS;
264 	if (error == 0) {
265 		getnanotime(&rqp->sr_timesent);
266 		iod->iod_lastrqsent = rqp->sr_timesent;
267 		rqp->sr_flags |= SMBR_SENT;
268 		rqp->sr_state = SMBRQ_SENT;
269 		return 0;
270 	}
271 	/*
272 	 * Check for fatal errors
273 	 */
274 	if (SMB_TRAN_FATAL(vcp, error)) {
275 		/*
276 		 * No further attempts should be made
277 		 */
278 		return ENOTCONN;
279 	}
280 	if (smb_rq_intr(rqp))
281 		smb_iod_rqprocessed(rqp, EINTR);
282 	return 0;
283 }
284 
285 /*
286  * Process incoming packets
287  */
288 static int
289 smb_iod_recvall(struct smbiod *iod)
290 {
291 	struct smb_vc *vcp = iod->iod_vc;
292 	struct thread *td = iod->iod_td;
293 	struct smb_rq *rqp;
294 	struct mbuf *m;
295 	u_char *hp;
296 	u_short mid;
297 	int error;
298 
299 	switch (iod->iod_state) {
300 	    case SMBIOD_ST_NOTCONN:
301 	    case SMBIOD_ST_DEAD:
302 	    case SMBIOD_ST_RECONNECT:
303 		return 0;
304 	    default:
305 		break;
306 	}
307 	for (;;) {
308 		m = NULL;
309 		error = SMB_TRAN_RECV(vcp, &m, td);
310 		if (error == EWOULDBLOCK)
311 			break;
312 		if (SMB_TRAN_FATAL(vcp, error)) {
313 			smb_iod_dead(iod);
314 			break;
315 		}
316 		if (error)
317 			break;
318 		if (m == NULL) {
319 			SMBERROR("tran return NULL without error\n");
320 			error = EPIPE;
321 			continue;
322 		}
323 		m = m_pullup(m, SMB_HDRLEN);
324 		if (m == NULL)
325 			continue;	/* wait for a good packet */
326 		/*
327 		 * Now we got an entire and possibly invalid SMB packet.
328 		 * Be careful while parsing it.
329 		 */
330 		m_dumpm(m);
331 		hp = mtod(m, u_char*);
332 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
333 			m_freem(m);
334 			continue;
335 		}
336 		mid = SMB_HDRMID(hp);
337 		SMBSDEBUG("mid %04x\n", (u_int)mid);
338 		SMB_IOD_RQLOCK(iod);
339 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
340 			if (rqp->sr_mid != mid)
341 				continue;
342 			SMBRQ_SLOCK(rqp);
343 			if (rqp->sr_rp.md_top == NULL) {
344 				md_initm(&rqp->sr_rp, m);
345 			} else {
346 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
347 					md_append_record(&rqp->sr_rp, m);
348 				} else {
349 					SMBRQ_SUNLOCK(rqp);
350 					SMBERROR("duplicate response %d (ignored)\n", mid);
351 					break;
352 				}
353 			}
354 			SMBRQ_SUNLOCK(rqp);
355 			smb_iod_rqprocessed(rqp, 0);
356 			break;
357 		}
358 		SMB_IOD_RQUNLOCK(iod);
359 		if (rqp == NULL) {
360 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
361 /*			smb_printrqlist(vcp);*/
362 			m_freem(m);
363 		}
364 	}
365 	/*
366 	 * check for interrupts
367 	 */
368 	SMB_IOD_RQLOCK(iod);
369 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
370 		if (smb_td_intr(rqp->sr_cred->scr_td)) {
371 			smb_iod_rqprocessed(rqp, EINTR);
372 		}
373 	}
374 	SMB_IOD_RQUNLOCK(iod);
375 	return 0;
376 }
377 
378 int
379 smb_iod_request(struct smbiod *iod, int event, void *ident)
380 {
381 	struct smbiod_event *evp;
382 	int error;
383 
384 	SMBIODEBUG("\n");
385 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
386 	evp->ev_type = event;
387 	evp->ev_ident = ident;
388 	SMB_IOD_EVLOCK(iod);
389 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
390 	if ((event & SMBIOD_EV_SYNC) == 0) {
391 		SMB_IOD_EVUNLOCK(iod);
392 		smb_iod_wakeup(iod);
393 		return 0;
394 	}
395 	smb_iod_wakeup(iod);
396 	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
397 	error = evp->ev_error;
398 	free(evp, M_SMBIOD);
399 	return error;
400 }
401 
402 /*
403  * Place request in the queue.
404  * Request from smbiod have a high priority.
405  */
406 int
407 smb_iod_addrq(struct smb_rq *rqp)
408 {
409 	struct smb_vc *vcp = rqp->sr_vc;
410 	struct smbiod *iod = vcp->vc_iod;
411 	int error;
412 
413 	SMBIODEBUG("\n");
414 	if (rqp->sr_cred->scr_td != NULL &&
415 	    rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
416 		rqp->sr_flags |= SMBR_INTERNAL;
417 		SMB_IOD_RQLOCK(iod);
418 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
419 		SMB_IOD_RQUNLOCK(iod);
420 		for (;;) {
421 			if (smb_iod_sendrq(iod, rqp) != 0) {
422 				smb_iod_dead(iod);
423 				break;
424 			}
425 			/*
426 			 * we don't need to lock state field here
427 			 */
428 			if (rqp->sr_state != SMBRQ_NOTSENT)
429 				break;
430 			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
431 		}
432 		if (rqp->sr_lerror)
433 			smb_iod_removerq(rqp);
434 		return rqp->sr_lerror;
435 	}
436 
437 	switch (iod->iod_state) {
438 	    case SMBIOD_ST_NOTCONN:
439 		return ENOTCONN;
440 	    case SMBIOD_ST_DEAD:
441 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
442 		if (error)
443 			return error;
444 		return EXDEV;
445 	    default:
446 		break;
447 	}
448 
449 	SMB_IOD_RQLOCK(iod);
450 	for (;;) {
451 		if (vcp->vc_maxmux == 0) {
452 			SMBERROR("maxmux == 0\n");
453 			break;
454 		}
455 		if (iod->iod_muxcnt < vcp->vc_maxmux)
456 			break;
457 		iod->iod_muxwant++;
458 		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
459 		    PWAIT, "90mux", 0);
460 	}
461 	iod->iod_muxcnt++;
462 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
463 	SMB_IOD_RQUNLOCK(iod);
464 	smb_iod_wakeup(iod);
465 	return 0;
466 }
467 
468 int
469 smb_iod_removerq(struct smb_rq *rqp)
470 {
471 	struct smb_vc *vcp = rqp->sr_vc;
472 	struct smbiod *iod = vcp->vc_iod;
473 
474 	SMBIODEBUG("\n");
475 	if (rqp->sr_flags & SMBR_INTERNAL) {
476 		SMB_IOD_RQLOCK(iod);
477 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
478 		SMB_IOD_RQUNLOCK(iod);
479 		return 0;
480 	}
481 	SMB_IOD_RQLOCK(iod);
482 	while (rqp->sr_flags & SMBR_XLOCK) {
483 		rqp->sr_flags |= SMBR_XLOCKWANT;
484 		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
485 	}
486 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
487 	iod->iod_muxcnt--;
488 	if (iod->iod_muxwant) {
489 		iod->iod_muxwant--;
490 		wakeup(&iod->iod_muxwant);
491 	}
492 	SMB_IOD_RQUNLOCK(iod);
493 	return 0;
494 }
495 
496 int
497 smb_iod_waitrq(struct smb_rq *rqp)
498 {
499 	struct smbiod *iod = rqp->sr_vc->vc_iod;
500 	int error;
501 
502 	SMBIODEBUG("\n");
503 	if (rqp->sr_flags & SMBR_INTERNAL) {
504 		for (;;) {
505 			smb_iod_sendall(iod);
506 			smb_iod_recvall(iod);
507 			if (rqp->sr_rpgen != rqp->sr_rplast)
508 				break;
509 			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
510 		}
511 		smb_iod_removerq(rqp);
512 		return rqp->sr_lerror;
513 
514 	}
515 	SMBRQ_SLOCK(rqp);
516 	if (rqp->sr_rpgen == rqp->sr_rplast)
517 		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
518 	rqp->sr_rplast++;
519 	SMBRQ_SUNLOCK(rqp);
520 	error = rqp->sr_lerror;
521 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
522 		/*
523 		 * If request should stay in the list, then reinsert it
524 		 * at the end of queue so other waiters have chance to concur
525 		 */
526 		SMB_IOD_RQLOCK(iod);
527 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
528 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
529 		SMB_IOD_RQUNLOCK(iod);
530 	} else
531 		smb_iod_removerq(rqp);
532 	return error;
533 }
534 
535 
536 static int
537 smb_iod_sendall(struct smbiod *iod)
538 {
539 	struct smb_vc *vcp = iod->iod_vc;
540 	struct smb_rq *rqp;
541 	struct timespec ts, tstimeout;
542 	int herror;
543 
544 	herror = 0;
545 	/*
546 	 * Loop through the list of requests and send them if possible
547 	 */
548 	SMB_IOD_RQLOCK(iod);
549 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
550 		switch (rqp->sr_state) {
551 		    case SMBRQ_NOTSENT:
552 			rqp->sr_flags |= SMBR_XLOCK;
553 			SMB_IOD_RQUNLOCK(iod);
554 			herror = smb_iod_sendrq(iod, rqp);
555 			SMB_IOD_RQLOCK(iod);
556 			rqp->sr_flags &= ~SMBR_XLOCK;
557 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
558 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
559 				wakeup(rqp);
560 			}
561 			break;
562 		    case SMBRQ_SENT:
563 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
564 			timespecadd(&tstimeout, &tstimeout);
565 			getnanotime(&ts);
566 			timespecsub(&ts, &tstimeout);
567 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
568 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
569 			}
570 			break;
571 		    default:
572 			break;
573 		}
574 		if (herror)
575 			break;
576 	}
577 	SMB_IOD_RQUNLOCK(iod);
578 	if (herror == ENOTCONN)
579 		smb_iod_dead(iod);
580 	return 0;
581 }
582 
583 /*
584  * "main" function for smbiod daemon
585  */
586 static __inline void
587 smb_iod_main(struct smbiod *iod)
588 {
589 /*	struct smb_vc *vcp = iod->iod_vc;*/
590 	struct smbiod_event *evp;
591 /*	struct timespec tsnow;*/
592 	int error;
593 
594 	SMBIODEBUG("\n");
595 	error = 0;
596 
597 	/*
598 	 * Check all interesting events
599 	 */
600 	for (;;) {
601 		SMB_IOD_EVLOCK(iod);
602 		evp = STAILQ_FIRST(&iod->iod_evlist);
603 		if (evp == NULL) {
604 			SMB_IOD_EVUNLOCK(iod);
605 			break;
606 		}
607 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
608 		evp->ev_type |= SMBIOD_EV_PROCESSING;
609 		SMB_IOD_EVUNLOCK(iod);
610 		switch (evp->ev_type & SMBIOD_EV_MASK) {
611 		    case SMBIOD_EV_CONNECT:
612 			iod->iod_state = SMBIOD_ST_RECONNECT;
613 			evp->ev_error = smb_iod_connect(iod);
614 			break;
615 		    case SMBIOD_EV_DISCONNECT:
616 			evp->ev_error = smb_iod_disconnect(iod);
617 			break;
618 		    case SMBIOD_EV_TREECONNECT:
619 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
620 			break;
621 		    case SMBIOD_EV_SHUTDOWN:
622 			iod->iod_flags |= SMBIOD_SHUTDOWN;
623 			break;
624 		    case SMBIOD_EV_NEWRQ:
625 			break;
626 		}
627 		if (evp->ev_type & SMBIOD_EV_SYNC) {
628 			SMB_IOD_EVLOCK(iod);
629 			wakeup(evp);
630 			SMB_IOD_EVUNLOCK(iod);
631 		} else
632 			free(evp, M_SMBIOD);
633 	}
634 #if 0
635 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
636 		getnanotime(&tsnow);
637 		timespecsub(&tsnow, &iod->iod_pingtimo);
638 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
639 			smb_smb_echo(vcp, &iod->iod_scred);
640 		}
641 	}
642 #endif
643 	smb_iod_sendall(iod);
644 	smb_iod_recvall(iod);
645 	return;
646 }
647 
648 void
649 smb_iod_thread(void *arg)
650 {
651 	struct smbiod *iod = arg;
652 
653 	mtx_lock(&Giant);
654 	/*
655 	 * Here we assume that the thread structure will be the same
656 	 * for an entire kthread (kproc, to be more precise) life.
657 	 */
658 	iod->iod_td = curthread;
659 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
660 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
661 		smb_iod_main(iod);
662 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
663 /*		mtx_unlock(&Giant, MTX_DEF);*/
664 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
665 			break;
666 		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
667 	}
668 /*	mtx_lock(&Giant, MTX_DEF);*/
669 	kthread_exit(0);
670 }
671 
672 int
673 smb_iod_create(struct smb_vc *vcp)
674 {
675 	struct smbiod *iod;
676 	int error;
677 
678 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
679 	iod->iod_id = smb_iod_next++;
680 	iod->iod_state = SMBIOD_ST_NOTCONN;
681 	iod->iod_vc = vcp;
682 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
683 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
684 	getnanotime(&iod->iod_lastrqsent);
685 	vcp->vc_iod = iod;
686 	smb_sl_init(&iod->iod_rqlock, "90rql");
687 	TAILQ_INIT(&iod->iod_rqlist);
688 	smb_sl_init(&iod->iod_evlock, "90evl");
689 	STAILQ_INIT(&iod->iod_evlist);
690 	error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
691 	    RFNOWAIT, 0, "smbiod%d", iod->iod_id);
692 	if (error) {
693 		SMBERROR("can't start smbiod: %d", error);
694 		free(iod, M_SMBIOD);
695 		return error;
696 	}
697 	return 0;
698 }
699 
700 int
701 smb_iod_destroy(struct smbiod *iod)
702 {
703 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
704 	smb_sl_destroy(&iod->iod_rqlock);
705 	smb_sl_destroy(&iod->iod_evlock);
706 	free(iod, M_SMBIOD);
707 	return 0;
708 }
709 
710 int
711 smb_iod_init(void)
712 {
713 	return 0;
714 }
715 
716 int
717 smb_iod_done(void)
718 {
719 	return 0;
720 }
721 
722