xref: /freebsd/sys/netsmb/smb_iod.c (revision 2ba1d4970a06a1660b46f6fd99351d154b295683)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/endian.h>
33 #include <sys/proc.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/unistd.h>
39 
40 #include <netsmb/smb.h>
41 #include <netsmb/smb_conn.h>
42 #include <netsmb/smb_rq.h>
43 #include <netsmb/smb_tran.h>
44 #include <netsmb/smb_trantcp.h>
45 
46 #define SMBIOD_SLEEP_TIMO	2
47 #define	SMBIOD_PING_TIMO	60	/* seconds */
48 
49 #define	SMB_IOD_EVLOCKPTR(iod)	(&((iod)->iod_evlock))
50 #define	SMB_IOD_EVLOCK(iod)	smb_sl_lock(&((iod)->iod_evlock))
51 #define	SMB_IOD_EVUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_evlock))
52 
53 #define	SMB_IOD_RQLOCKPTR(iod)	(&((iod)->iod_rqlock))
54 #define	SMB_IOD_RQLOCK(iod)	smb_sl_lock(&((iod)->iod_rqlock))
55 #define	SMB_IOD_RQUNLOCK(iod)	smb_sl_unlock(&((iod)->iod_rqlock))
56 
57 #define	smb_iod_wakeup(iod)	wakeup(&(iod)->iod_flags)
58 
59 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon");
60 
61 static int smb_iod_next;
62 
63 static int  smb_iod_sendall(struct smbiod *iod);
64 static int  smb_iod_disconnect(struct smbiod *iod);
65 static void smb_iod_thread(void *);
66 
67 static __inline void
68 smb_iod_rqprocessed(struct smb_rq *rqp, int error)
69 {
70 	SMBRQ_SLOCK(rqp);
71 	rqp->sr_lerror = error;
72 	rqp->sr_rpgen++;
73 	rqp->sr_state = SMBRQ_NOTIFIED;
74 	wakeup(&rqp->sr_state);
75 	SMBRQ_SUNLOCK(rqp);
76 }
77 
78 static void
79 smb_iod_invrq(struct smbiod *iod)
80 {
81 	struct smb_rq *rqp;
82 
83 	/*
84 	 * Invalidate all outstanding requests for this connection
85 	 */
86 	SMB_IOD_RQLOCK(iod);
87 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
88 		rqp->sr_flags |= SMBR_RESTART;
89 		smb_iod_rqprocessed(rqp, ENOTCONN);
90 	}
91 	SMB_IOD_RQUNLOCK(iod);
92 }
93 
94 static void
95 smb_iod_closetran(struct smbiod *iod)
96 {
97 	struct smb_vc *vcp = iod->iod_vc;
98 	struct thread *td = iod->iod_td;
99 
100 	if (vcp->vc_tdata == NULL)
101 		return;
102 	SMB_TRAN_DISCONNECT(vcp, td);
103 	SMB_TRAN_DONE(vcp, td);
104 	vcp->vc_tdata = NULL;
105 }
106 
107 static void
108 smb_iod_dead(struct smbiod *iod)
109 {
110 	iod->iod_state = SMBIOD_ST_DEAD;
111 	smb_iod_closetran(iod);
112 	smb_iod_invrq(iod);
113 }
114 
115 static int
116 smb_iod_connect(struct smbiod *iod)
117 {
118 	struct smb_vc *vcp = iod->iod_vc;
119 	struct thread *td = iod->iod_td;
120 	int error;
121 
122 	SMBIODEBUG("%d\n", iod->iod_state);
123 	switch(iod->iod_state) {
124 	    case SMBIOD_ST_VCACTIVE:
125 		SMBERROR("called for already opened connection\n");
126 		return EISCONN;
127 	    case SMBIOD_ST_DEAD:
128 		return ENOTCONN;	/* XXX: last error code ? */
129 	    default:
130 		break;
131 	}
132 	vcp->vc_genid++;
133 	error = 0;
134 
135 	error = (int)SMB_TRAN_CREATE(vcp, td);
136 	if (error)
137 		goto fail;
138 	SMBIODEBUG("tcreate\n");
139 	if (vcp->vc_laddr) {
140 		error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td);
141 		if (error)
142 			goto fail;
143 	}
144 	SMBIODEBUG("tbind\n");
145 	error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td);
146 	if (error)
147 		goto fail;
148 	SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags);
149 	iod->iod_state = SMBIOD_ST_TRANACTIVE;
150 	SMBIODEBUG("tconnect\n");
151 	/* vcp->vc_mid = 0;*/
152 	error = (int)smb_smb_negotiate(vcp, &iod->iod_scred);
153 	if (error)
154 		goto fail;
155 	SMBIODEBUG("snegotiate\n");
156 	error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred);
157 	if (error)
158 		goto fail;
159 	iod->iod_state = SMBIOD_ST_VCACTIVE;
160 	SMBIODEBUG("completed\n");
161 	smb_iod_invrq(iod);
162 	return (0);
163 
164  fail:
165 	smb_iod_dead(iod);
166 	return (error);
167 }
168 
169 static int
170 smb_iod_disconnect(struct smbiod *iod)
171 {
172 	struct smb_vc *vcp = iod->iod_vc;
173 
174 	SMBIODEBUG("\n");
175 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
176 		smb_smb_ssnclose(vcp, &iod->iod_scred);
177 		iod->iod_state = SMBIOD_ST_TRANACTIVE;
178 	}
179 	vcp->vc_smbuid = SMB_UID_UNKNOWN;
180 	smb_iod_closetran(iod);
181 	iod->iod_state = SMBIOD_ST_NOTCONN;
182 	return 0;
183 }
184 
185 static int
186 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp)
187 {
188 	int error;
189 
190 	if (iod->iod_state != SMBIOD_ST_VCACTIVE) {
191 		if (iod->iod_state != SMBIOD_ST_DEAD)
192 			return ENOTCONN;
193 		iod->iod_state = SMBIOD_ST_RECONNECT;
194 		error = smb_iod_connect(iod);
195 		if (error)
196 			return error;
197 	}
198 	SMBIODEBUG("tree reconnect\n");
199 	SMBS_ST_LOCK(ssp);
200 	ssp->ss_flags |= SMBS_RECONNECTING;
201 	SMBS_ST_UNLOCK(ssp);
202 	error = smb_smb_treeconnect(ssp, &iod->iod_scred);
203 	SMBS_ST_LOCK(ssp);
204 	ssp->ss_flags &= ~SMBS_RECONNECTING;
205 	SMBS_ST_UNLOCK(ssp);
206 	wakeup(&ssp->ss_vcgenid);
207 	return error;
208 }
209 
210 static int
211 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp)
212 {
213 	struct thread *td = iod->iod_td;
214 	struct smb_vc *vcp = iod->iod_vc;
215 	struct smb_share *ssp = rqp->sr_share;
216 	struct mbuf *m;
217 	int error;
218 
219 	SMBIODEBUG("iod_state = %d\n", iod->iod_state);
220 	switch (iod->iod_state) {
221 	    case SMBIOD_ST_NOTCONN:
222 		smb_iod_rqprocessed(rqp, ENOTCONN);
223 		return 0;
224 	    case SMBIOD_ST_DEAD:
225 		iod->iod_state = SMBIOD_ST_RECONNECT;
226 		return 0;
227 	    case SMBIOD_ST_RECONNECT:
228 		return 0;
229 	    default:
230 		break;
231 	}
232 	if (rqp->sr_sendcnt == 0) {
233 #ifdef movedtoanotherplace
234 		if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux)
235 			return 0;
236 #endif
237 		le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN);
238 		le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0);
239 		mb_fixhdr(&rqp->sr_rq);
240 		if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE)
241 			smb_rq_sign(rqp);
242 	}
243 	if (rqp->sr_sendcnt++ > 5) {
244 		rqp->sr_flags |= SMBR_RESTART;
245 		smb_iod_rqprocessed(rqp, rqp->sr_lerror);
246 		/*
247 		 * If all attempts to send a request failed, then
248 		 * something is seriously hosed.
249 		 */
250 		return ENOTCONN;
251 	}
252 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0);
253 	m_dumpm(rqp->sr_rq.mb_top);
254 	m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK);
255 	error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td);
256 	if (error == 0) {
257 		getnanotime(&rqp->sr_timesent);
258 		iod->iod_lastrqsent = rqp->sr_timesent;
259 		rqp->sr_flags |= SMBR_SENT;
260 		rqp->sr_state = SMBRQ_SENT;
261 		return 0;
262 	}
263 	/*
264 	 * Check for fatal errors
265 	 */
266 	if (SMB_TRAN_FATAL(vcp, error)) {
267 		/*
268 		 * No further attempts should be made
269 		 */
270 		return ENOTCONN;
271 	}
272 	if (smb_rq_intr(rqp))
273 		smb_iod_rqprocessed(rqp, EINTR);
274 	return 0;
275 }
276 
277 /*
278  * Process incoming packets
279  */
280 static int
281 smb_iod_recvall(struct smbiod *iod)
282 {
283 	struct smb_vc *vcp = iod->iod_vc;
284 	struct thread *td = iod->iod_td;
285 	struct smb_rq *rqp;
286 	struct mbuf *m;
287 	u_char *hp;
288 	u_short mid;
289 	int error;
290 
291 	switch (iod->iod_state) {
292 	    case SMBIOD_ST_NOTCONN:
293 	    case SMBIOD_ST_DEAD:
294 	    case SMBIOD_ST_RECONNECT:
295 		return 0;
296 	    default:
297 		break;
298 	}
299 	for (;;) {
300 		m = NULL;
301 		error = SMB_TRAN_RECV(vcp, &m, td);
302 		if (error == EWOULDBLOCK)
303 			break;
304 		if (SMB_TRAN_FATAL(vcp, error)) {
305 			smb_iod_dead(iod);
306 			break;
307 		}
308 		if (error)
309 			break;
310 		if (m == NULL) {
311 			SMBERROR("tran return NULL without error\n");
312 			error = EPIPE;
313 			continue;
314 		}
315 		m = m_pullup(m, SMB_HDRLEN);
316 		if (m == NULL)
317 			continue;	/* wait for a good packet */
318 		/*
319 		 * Now we got an entire and possibly invalid SMB packet.
320 		 * Be careful while parsing it.
321 		 */
322 		m_dumpm(m);
323 		hp = mtod(m, u_char*);
324 		if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) {
325 			m_freem(m);
326 			continue;
327 		}
328 		mid = SMB_HDRMID(hp);
329 		SMBSDEBUG("mid %04x\n", (u_int)mid);
330 		SMB_IOD_RQLOCK(iod);
331 		TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
332 			if (rqp->sr_mid != mid)
333 				continue;
334 			SMBRQ_SLOCK(rqp);
335 			if (rqp->sr_rp.md_top == NULL) {
336 				md_initm(&rqp->sr_rp, m);
337 			} else {
338 				if (rqp->sr_flags & SMBR_MULTIPACKET) {
339 					md_append_record(&rqp->sr_rp, m);
340 				} else {
341 					SMBRQ_SUNLOCK(rqp);
342 					SMBERROR("duplicate response %d (ignored)\n", mid);
343 					break;
344 				}
345 			}
346 			SMBRQ_SUNLOCK(rqp);
347 			smb_iod_rqprocessed(rqp, 0);
348 			break;
349 		}
350 		SMB_IOD_RQUNLOCK(iod);
351 		if (rqp == NULL) {
352 			SMBERROR("drop resp with mid %d\n", (u_int)mid);
353 /*			smb_printrqlist(vcp);*/
354 			m_freem(m);
355 		}
356 	}
357 	/*
358 	 * check for interrupts
359 	 */
360 	SMB_IOD_RQLOCK(iod);
361 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
362 		if (smb_td_intr(rqp->sr_cred->scr_td)) {
363 			smb_iod_rqprocessed(rqp, EINTR);
364 		}
365 	}
366 	SMB_IOD_RQUNLOCK(iod);
367 	return 0;
368 }
369 
370 int
371 smb_iod_request(struct smbiod *iod, int event, void *ident)
372 {
373 	struct smbiod_event *evp;
374 	int error;
375 
376 	SMBIODEBUG("\n");
377 	evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK);
378 	evp->ev_type = event;
379 	evp->ev_ident = ident;
380 	SMB_IOD_EVLOCK(iod);
381 	STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link);
382 	if ((event & SMBIOD_EV_SYNC) == 0) {
383 		SMB_IOD_EVUNLOCK(iod);
384 		smb_iod_wakeup(iod);
385 		return 0;
386 	}
387 	smb_iod_wakeup(iod);
388 	msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0);
389 	error = evp->ev_error;
390 	free(evp, M_SMBIOD);
391 	return error;
392 }
393 
394 /*
395  * Place request in the queue.
396  * Request from smbiod have a high priority.
397  */
398 int
399 smb_iod_addrq(struct smb_rq *rqp)
400 {
401 	struct smb_vc *vcp = rqp->sr_vc;
402 	struct smbiod *iod = vcp->vc_iod;
403 	int error;
404 
405 	SMBIODEBUG("\n");
406 	if (rqp->sr_cred->scr_td != NULL &&
407 	    rqp->sr_cred->scr_td->td_proc == iod->iod_p) {
408 		rqp->sr_flags |= SMBR_INTERNAL;
409 		SMB_IOD_RQLOCK(iod);
410 		TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link);
411 		SMB_IOD_RQUNLOCK(iod);
412 		for (;;) {
413 			if (smb_iod_sendrq(iod, rqp) != 0) {
414 				smb_iod_dead(iod);
415 				break;
416 			}
417 			/*
418 			 * we don't need to lock state field here
419 			 */
420 			if (rqp->sr_state != SMBRQ_NOTSENT)
421 				break;
422 			tsleep(&iod->iod_flags, PWAIT, "90sndw", hz);
423 		}
424 		if (rqp->sr_lerror)
425 			smb_iod_removerq(rqp);
426 		return rqp->sr_lerror;
427 	}
428 
429 	switch (iod->iod_state) {
430 	    case SMBIOD_ST_NOTCONN:
431 		return ENOTCONN;
432 	    case SMBIOD_ST_DEAD:
433 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL);
434 		if (error)
435 			return error;
436 		return EXDEV;
437 	    default:
438 		break;
439 	}
440 
441 	SMB_IOD_RQLOCK(iod);
442 	for (;;) {
443 		if (vcp->vc_maxmux == 0) {
444 			SMBERROR("maxmux == 0\n");
445 			break;
446 		}
447 		if (iod->iod_muxcnt < vcp->vc_maxmux)
448 			break;
449 		iod->iod_muxwant++;
450 		msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod),
451 		    PWAIT, "90mux", 0);
452 	}
453 	iod->iod_muxcnt++;
454 	TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
455 	SMB_IOD_RQUNLOCK(iod);
456 	smb_iod_wakeup(iod);
457 	return 0;
458 }
459 
460 int
461 smb_iod_removerq(struct smb_rq *rqp)
462 {
463 	struct smb_vc *vcp = rqp->sr_vc;
464 	struct smbiod *iod = vcp->vc_iod;
465 
466 	SMBIODEBUG("\n");
467 	if (rqp->sr_flags & SMBR_INTERNAL) {
468 		SMB_IOD_RQLOCK(iod);
469 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
470 		SMB_IOD_RQUNLOCK(iod);
471 		return 0;
472 	}
473 	SMB_IOD_RQLOCK(iod);
474 	while (rqp->sr_flags & SMBR_XLOCK) {
475 		rqp->sr_flags |= SMBR_XLOCKWANT;
476 		msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0);
477 	}
478 	TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
479 	iod->iod_muxcnt--;
480 	if (iod->iod_muxwant) {
481 		iod->iod_muxwant--;
482 		wakeup(&iod->iod_muxwant);
483 	}
484 	SMB_IOD_RQUNLOCK(iod);
485 	return 0;
486 }
487 
488 int
489 smb_iod_waitrq(struct smb_rq *rqp)
490 {
491 	struct smbiod *iod = rqp->sr_vc->vc_iod;
492 	int error;
493 
494 	SMBIODEBUG("\n");
495 	if (rqp->sr_flags & SMBR_INTERNAL) {
496 		for (;;) {
497 			smb_iod_sendall(iod);
498 			smb_iod_recvall(iod);
499 			if (rqp->sr_rpgen != rqp->sr_rplast)
500 				break;
501 			tsleep(&iod->iod_flags, PWAIT, "90irq", hz);
502 		}
503 		smb_iod_removerq(rqp);
504 		return rqp->sr_lerror;
505 	}
506 	SMBRQ_SLOCK(rqp);
507 	if (rqp->sr_rpgen == rqp->sr_rplast)
508 		msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0);
509 	rqp->sr_rplast++;
510 	SMBRQ_SUNLOCK(rqp);
511 	error = rqp->sr_lerror;
512 	if (rqp->sr_flags & SMBR_MULTIPACKET) {
513 		/*
514 		 * If request should stay in the list, then reinsert it
515 		 * at the end of queue so other waiters have chance to concur
516 		 */
517 		SMB_IOD_RQLOCK(iod);
518 		TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link);
519 		TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link);
520 		SMB_IOD_RQUNLOCK(iod);
521 	} else
522 		smb_iod_removerq(rqp);
523 	return error;
524 }
525 
526 static int
527 smb_iod_sendall(struct smbiod *iod)
528 {
529 	struct smb_vc *vcp = iod->iod_vc;
530 	struct smb_rq *rqp;
531 	struct timespec ts, tstimeout;
532 	int herror;
533 
534 	herror = 0;
535 	/*
536 	 * Loop through the list of requests and send them if possible
537 	 */
538 	SMB_IOD_RQLOCK(iod);
539 	TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) {
540 		switch (rqp->sr_state) {
541 		    case SMBRQ_NOTSENT:
542 			rqp->sr_flags |= SMBR_XLOCK;
543 			SMB_IOD_RQUNLOCK(iod);
544 			herror = smb_iod_sendrq(iod, rqp);
545 			SMB_IOD_RQLOCK(iod);
546 			rqp->sr_flags &= ~SMBR_XLOCK;
547 			if (rqp->sr_flags & SMBR_XLOCKWANT) {
548 				rqp->sr_flags &= ~SMBR_XLOCKWANT;
549 				wakeup(rqp);
550 			}
551 			break;
552 		    case SMBRQ_SENT:
553 			SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout);
554 			timespecadd(&tstimeout, &tstimeout, &tstimeout);
555 			getnanotime(&ts);
556 			timespecsub(&ts, &tstimeout, &ts);
557 			if (timespeccmp(&ts, &rqp->sr_timesent, >)) {
558 				smb_iod_rqprocessed(rqp, ETIMEDOUT);
559 			}
560 			break;
561 		    default:
562 			break;
563 		}
564 		if (herror)
565 			break;
566 	}
567 	SMB_IOD_RQUNLOCK(iod);
568 	if (herror == ENOTCONN)
569 		smb_iod_dead(iod);
570 	return 0;
571 }
572 
573 /*
574  * "main" function for smbiod daemon
575  */
576 static __inline void
577 smb_iod_main(struct smbiod *iod)
578 {
579 /*	struct smb_vc *vcp = iod->iod_vc;*/
580 	struct smbiod_event *evp;
581 /*	struct timespec tsnow;*/
582 
583 	SMBIODEBUG("\n");
584 
585 	/*
586 	 * Check all interesting events
587 	 */
588 	for (;;) {
589 		SMB_IOD_EVLOCK(iod);
590 		evp = STAILQ_FIRST(&iod->iod_evlist);
591 		if (evp == NULL) {
592 			SMB_IOD_EVUNLOCK(iod);
593 			break;
594 		}
595 		STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link);
596 		evp->ev_type |= SMBIOD_EV_PROCESSING;
597 		SMB_IOD_EVUNLOCK(iod);
598 		switch (evp->ev_type & SMBIOD_EV_MASK) {
599 		    case SMBIOD_EV_CONNECT:
600 			iod->iod_state = SMBIOD_ST_RECONNECT;
601 			evp->ev_error = smb_iod_connect(iod);
602 			break;
603 		    case SMBIOD_EV_DISCONNECT:
604 			evp->ev_error = smb_iod_disconnect(iod);
605 			break;
606 		    case SMBIOD_EV_TREECONNECT:
607 			evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident);
608 			break;
609 		    case SMBIOD_EV_SHUTDOWN:
610 			iod->iod_flags |= SMBIOD_SHUTDOWN;
611 			break;
612 		    case SMBIOD_EV_NEWRQ:
613 			break;
614 		}
615 		if (evp->ev_type & SMBIOD_EV_SYNC) {
616 			SMB_IOD_EVLOCK(iod);
617 			wakeup(evp);
618 			SMB_IOD_EVUNLOCK(iod);
619 		} else
620 			free(evp, M_SMBIOD);
621 	}
622 #if 0
623 	if (iod->iod_state == SMBIOD_ST_VCACTIVE) {
624 		getnanotime(&tsnow);
625 		timespecsub(&tsnow, &iod->iod_pingtimo, &tsnow);
626 		if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) {
627 			smb_smb_echo(vcp, &iod->iod_scred);
628 		}
629 	}
630 #endif
631 	smb_iod_sendall(iod);
632 	smb_iod_recvall(iod);
633 	return;
634 }
635 
636 void
637 smb_iod_thread(void *arg)
638 {
639 	struct smbiod *iod = arg;
640 
641 	mtx_lock(&Giant);
642 
643 	/*
644 	 * Here we assume that the thread structure will be the same
645 	 * for an entire kthread (kproc, to be more precise) life.
646 	 */
647 	iod->iod_td = curthread;
648 	smb_makescred(&iod->iod_scred, iod->iod_td, NULL);
649 	while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) {
650 		smb_iod_main(iod);
651 		SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo);
652 		if (iod->iod_flags & SMBIOD_SHUTDOWN)
653 			break;
654 		tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo);
655 	}
656 
657 	/* We can now safely destroy the mutexes and free the iod structure. */
658 	smb_sl_destroy(&iod->iod_rqlock);
659 	smb_sl_destroy(&iod->iod_evlock);
660 	free(iod, M_SMBIOD);
661 	mtx_unlock(&Giant);
662 	kproc_exit(0);
663 }
664 
665 int
666 smb_iod_create(struct smb_vc *vcp)
667 {
668 	struct smbiod *iod;
669 	int error;
670 
671 	iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK);
672 	iod->iod_id = smb_iod_next++;
673 	iod->iod_state = SMBIOD_ST_NOTCONN;
674 	iod->iod_vc = vcp;
675 	iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO;
676 	iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO;
677 	getnanotime(&iod->iod_lastrqsent);
678 	vcp->vc_iod = iod;
679 	smb_sl_init(&iod->iod_rqlock, "90rql");
680 	TAILQ_INIT(&iod->iod_rqlist);
681 	smb_sl_init(&iod->iod_evlock, "90evl");
682 	STAILQ_INIT(&iod->iod_evlist);
683 	error = kproc_create(smb_iod_thread, iod, &iod->iod_p,
684 	    RFNOWAIT, 0, "smbiod%d", iod->iod_id);
685 	if (error) {
686 		SMBERROR("can't start smbiod: %d", error);
687 		vcp->vc_iod = NULL;
688 		smb_sl_destroy(&iod->iod_rqlock);
689 		smb_sl_destroy(&iod->iod_evlock);
690 		free(iod, M_SMBIOD);
691 		return error;
692 	}
693 	return 0;
694 }
695 
696 int
697 smb_iod_destroy(struct smbiod *iod)
698 {
699 	smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL);
700 	return 0;
701 }
702 
703 int
704 smb_iod_init(void)
705 {
706 	return 0;
707 }
708 
709 int
710 smb_iod_done(void)
711 {
712 	return 0;
713 }
714