1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2000-2001 Boris Popov 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/endian.h> 35 #include <sys/proc.h> 36 #include <sys/kernel.h> 37 #include <sys/kthread.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/unistd.h> 41 42 #include <netsmb/smb.h> 43 #include <netsmb/smb_conn.h> 44 #include <netsmb/smb_rq.h> 45 #include <netsmb/smb_tran.h> 46 #include <netsmb/smb_trantcp.h> 47 48 #define SMBIOD_SLEEP_TIMO 2 49 #define SMBIOD_PING_TIMO 60 /* seconds */ 50 51 #define SMB_IOD_EVLOCKPTR(iod) (&((iod)->iod_evlock)) 52 #define SMB_IOD_EVLOCK(iod) smb_sl_lock(&((iod)->iod_evlock)) 53 #define SMB_IOD_EVUNLOCK(iod) smb_sl_unlock(&((iod)->iod_evlock)) 54 55 #define SMB_IOD_RQLOCKPTR(iod) (&((iod)->iod_rqlock)) 56 #define SMB_IOD_RQLOCK(iod) smb_sl_lock(&((iod)->iod_rqlock)) 57 #define SMB_IOD_RQUNLOCK(iod) smb_sl_unlock(&((iod)->iod_rqlock)) 58 59 #define smb_iod_wakeup(iod) wakeup(&(iod)->iod_flags) 60 61 static MALLOC_DEFINE(M_SMBIOD, "SMBIOD", "SMB network io daemon"); 62 63 static int smb_iod_next; 64 65 static int smb_iod_sendall(struct smbiod *iod); 66 static int smb_iod_disconnect(struct smbiod *iod); 67 static void smb_iod_thread(void *); 68 69 static __inline void 70 smb_iod_rqprocessed(struct smb_rq *rqp, int error) 71 { 72 SMBRQ_SLOCK(rqp); 73 rqp->sr_lerror = error; 74 rqp->sr_rpgen++; 75 rqp->sr_state = SMBRQ_NOTIFIED; 76 wakeup(&rqp->sr_state); 77 SMBRQ_SUNLOCK(rqp); 78 } 79 80 static void 81 smb_iod_invrq(struct smbiod *iod) 82 { 83 struct smb_rq *rqp; 84 85 /* 86 * Invalidate all outstanding requests for this connection 87 */ 88 SMB_IOD_RQLOCK(iod); 89 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { 90 rqp->sr_flags |= SMBR_RESTART; 91 smb_iod_rqprocessed(rqp, ENOTCONN); 92 } 93 SMB_IOD_RQUNLOCK(iod); 94 } 95 96 static void 97 smb_iod_closetran(struct smbiod *iod) 98 { 99 struct smb_vc *vcp = iod->iod_vc; 100 struct thread *td = iod->iod_td; 101 102 if (vcp->vc_tdata == NULL) 103 return; 104 SMB_TRAN_DISCONNECT(vcp, td); 105 SMB_TRAN_DONE(vcp, td); 106 vcp->vc_tdata = NULL; 107 } 108 109 static void 110 smb_iod_dead(struct smbiod *iod) 111 { 112 iod->iod_state = SMBIOD_ST_DEAD; 113 smb_iod_closetran(iod); 114 smb_iod_invrq(iod); 115 } 116 117 static int 118 smb_iod_connect(struct smbiod *iod) 119 { 120 struct smb_vc *vcp = iod->iod_vc; 121 struct thread *td = iod->iod_td; 122 int error; 123 124 SMBIODEBUG("%d\n", iod->iod_state); 125 switch(iod->iod_state) { 126 case SMBIOD_ST_VCACTIVE: 127 SMBERROR("called for already opened connection\n"); 128 return EISCONN; 129 case SMBIOD_ST_DEAD: 130 return ENOTCONN; /* XXX: last error code ? */ 131 default: 132 break; 133 } 134 vcp->vc_genid++; 135 error = 0; 136 137 error = (int)SMB_TRAN_CREATE(vcp, td); 138 if (error) 139 goto fail; 140 SMBIODEBUG("tcreate\n"); 141 if (vcp->vc_laddr) { 142 error = (int)SMB_TRAN_BIND(vcp, vcp->vc_laddr, td); 143 if (error) 144 goto fail; 145 } 146 SMBIODEBUG("tbind\n"); 147 error = (int)SMB_TRAN_CONNECT(vcp, vcp->vc_paddr, td); 148 if (error) 149 goto fail; 150 SMB_TRAN_SETPARAM(vcp, SMBTP_SELECTID, &iod->iod_flags); 151 iod->iod_state = SMBIOD_ST_TRANACTIVE; 152 SMBIODEBUG("tconnect\n"); 153 /* vcp->vc_mid = 0;*/ 154 error = (int)smb_smb_negotiate(vcp, &iod->iod_scred); 155 if (error) 156 goto fail; 157 SMBIODEBUG("snegotiate\n"); 158 error = (int)smb_smb_ssnsetup(vcp, &iod->iod_scred); 159 if (error) 160 goto fail; 161 iod->iod_state = SMBIOD_ST_VCACTIVE; 162 SMBIODEBUG("completed\n"); 163 smb_iod_invrq(iod); 164 return (0); 165 166 fail: 167 smb_iod_dead(iod); 168 return (error); 169 } 170 171 static int 172 smb_iod_disconnect(struct smbiod *iod) 173 { 174 struct smb_vc *vcp = iod->iod_vc; 175 176 SMBIODEBUG("\n"); 177 if (iod->iod_state == SMBIOD_ST_VCACTIVE) { 178 smb_smb_ssnclose(vcp, &iod->iod_scred); 179 iod->iod_state = SMBIOD_ST_TRANACTIVE; 180 } 181 vcp->vc_smbuid = SMB_UID_UNKNOWN; 182 smb_iod_closetran(iod); 183 iod->iod_state = SMBIOD_ST_NOTCONN; 184 return 0; 185 } 186 187 static int 188 smb_iod_treeconnect(struct smbiod *iod, struct smb_share *ssp) 189 { 190 int error; 191 192 if (iod->iod_state != SMBIOD_ST_VCACTIVE) { 193 if (iod->iod_state != SMBIOD_ST_DEAD) 194 return ENOTCONN; 195 iod->iod_state = SMBIOD_ST_RECONNECT; 196 error = smb_iod_connect(iod); 197 if (error) 198 return error; 199 } 200 SMBIODEBUG("tree reconnect\n"); 201 SMBS_ST_LOCK(ssp); 202 ssp->ss_flags |= SMBS_RECONNECTING; 203 SMBS_ST_UNLOCK(ssp); 204 error = smb_smb_treeconnect(ssp, &iod->iod_scred); 205 SMBS_ST_LOCK(ssp); 206 ssp->ss_flags &= ~SMBS_RECONNECTING; 207 SMBS_ST_UNLOCK(ssp); 208 wakeup(&ssp->ss_vcgenid); 209 return error; 210 } 211 212 static int 213 smb_iod_sendrq(struct smbiod *iod, struct smb_rq *rqp) 214 { 215 struct thread *td = iod->iod_td; 216 struct smb_vc *vcp = iod->iod_vc; 217 struct smb_share *ssp = rqp->sr_share; 218 struct mbuf *m; 219 int error; 220 221 SMBIODEBUG("iod_state = %d\n", iod->iod_state); 222 switch (iod->iod_state) { 223 case SMBIOD_ST_NOTCONN: 224 smb_iod_rqprocessed(rqp, ENOTCONN); 225 return 0; 226 case SMBIOD_ST_DEAD: 227 iod->iod_state = SMBIOD_ST_RECONNECT; 228 return 0; 229 case SMBIOD_ST_RECONNECT: 230 return 0; 231 default: 232 break; 233 } 234 if (rqp->sr_sendcnt == 0) { 235 #ifdef movedtoanotherplace 236 if (vcp->vc_maxmux != 0 && iod->iod_muxcnt >= vcp->vc_maxmux) 237 return 0; 238 #endif 239 le16enc(rqp->sr_rqtid, ssp ? ssp->ss_tid : SMB_TID_UNKNOWN); 240 le16enc(rqp->sr_rquid, vcp ? vcp->vc_smbuid : 0); 241 mb_fixhdr(&rqp->sr_rq); 242 if (vcp->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) 243 smb_rq_sign(rqp); 244 } 245 if (rqp->sr_sendcnt++ > 5) { 246 rqp->sr_flags |= SMBR_RESTART; 247 smb_iod_rqprocessed(rqp, rqp->sr_lerror); 248 /* 249 * If all attempts to send a request failed, then 250 * something is seriously hosed. 251 */ 252 return ENOTCONN; 253 } 254 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0); 255 m_dumpm(rqp->sr_rq.mb_top); 256 m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, M_WAITOK); 257 error = rqp->sr_lerror = SMB_TRAN_SEND(vcp, m, td); 258 if (error == 0) { 259 getnanotime(&rqp->sr_timesent); 260 iod->iod_lastrqsent = rqp->sr_timesent; 261 rqp->sr_flags |= SMBR_SENT; 262 rqp->sr_state = SMBRQ_SENT; 263 return 0; 264 } 265 /* 266 * Check for fatal errors 267 */ 268 if (SMB_TRAN_FATAL(vcp, error)) { 269 /* 270 * No further attempts should be made 271 */ 272 return ENOTCONN; 273 } 274 if (smb_rq_intr(rqp)) 275 smb_iod_rqprocessed(rqp, EINTR); 276 return 0; 277 } 278 279 /* 280 * Process incoming packets 281 */ 282 static int 283 smb_iod_recvall(struct smbiod *iod) 284 { 285 struct smb_vc *vcp = iod->iod_vc; 286 struct thread *td = iod->iod_td; 287 struct smb_rq *rqp; 288 struct mbuf *m; 289 u_char *hp; 290 u_short mid; 291 int error; 292 293 switch (iod->iod_state) { 294 case SMBIOD_ST_NOTCONN: 295 case SMBIOD_ST_DEAD: 296 case SMBIOD_ST_RECONNECT: 297 return 0; 298 default: 299 break; 300 } 301 for (;;) { 302 m = NULL; 303 error = SMB_TRAN_RECV(vcp, &m, td); 304 if (error == EWOULDBLOCK) 305 break; 306 if (SMB_TRAN_FATAL(vcp, error)) { 307 smb_iod_dead(iod); 308 break; 309 } 310 if (error) 311 break; 312 if (m == NULL) { 313 SMBERROR("tran return NULL without error\n"); 314 error = EPIPE; 315 continue; 316 } 317 m = m_pullup(m, SMB_HDRLEN); 318 if (m == NULL) 319 continue; /* wait for a good packet */ 320 /* 321 * Now we got an entire and possibly invalid SMB packet. 322 * Be careful while parsing it. 323 */ 324 m_dumpm(m); 325 hp = mtod(m, u_char*); 326 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) { 327 m_freem(m); 328 continue; 329 } 330 mid = SMB_HDRMID(hp); 331 SMBSDEBUG("mid %04x\n", (u_int)mid); 332 SMB_IOD_RQLOCK(iod); 333 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { 334 if (rqp->sr_mid != mid) 335 continue; 336 SMBRQ_SLOCK(rqp); 337 if (rqp->sr_rp.md_top == NULL) { 338 md_initm(&rqp->sr_rp, m); 339 } else { 340 if (rqp->sr_flags & SMBR_MULTIPACKET) { 341 md_append_record(&rqp->sr_rp, m); 342 } else { 343 SMBRQ_SUNLOCK(rqp); 344 SMBERROR("duplicate response %d (ignored)\n", mid); 345 break; 346 } 347 } 348 SMBRQ_SUNLOCK(rqp); 349 smb_iod_rqprocessed(rqp, 0); 350 break; 351 } 352 SMB_IOD_RQUNLOCK(iod); 353 if (rqp == NULL) { 354 SMBERROR("drop resp with mid %d\n", (u_int)mid); 355 /* smb_printrqlist(vcp);*/ 356 m_freem(m); 357 } 358 } 359 /* 360 * check for interrupts 361 */ 362 SMB_IOD_RQLOCK(iod); 363 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { 364 if (smb_td_intr(rqp->sr_cred->scr_td)) { 365 smb_iod_rqprocessed(rqp, EINTR); 366 } 367 } 368 SMB_IOD_RQUNLOCK(iod); 369 return 0; 370 } 371 372 int 373 smb_iod_request(struct smbiod *iod, int event, void *ident) 374 { 375 struct smbiod_event *evp; 376 int error; 377 378 SMBIODEBUG("\n"); 379 evp = smb_zmalloc(sizeof(*evp), M_SMBIOD, M_WAITOK); 380 evp->ev_type = event; 381 evp->ev_ident = ident; 382 SMB_IOD_EVLOCK(iod); 383 STAILQ_INSERT_TAIL(&iod->iod_evlist, evp, ev_link); 384 if ((event & SMBIOD_EV_SYNC) == 0) { 385 SMB_IOD_EVUNLOCK(iod); 386 smb_iod_wakeup(iod); 387 return 0; 388 } 389 smb_iod_wakeup(iod); 390 msleep(evp, SMB_IOD_EVLOCKPTR(iod), PWAIT | PDROP, "90evw", 0); 391 error = evp->ev_error; 392 free(evp, M_SMBIOD); 393 return error; 394 } 395 396 /* 397 * Place request in the queue. 398 * Request from smbiod have a high priority. 399 */ 400 int 401 smb_iod_addrq(struct smb_rq *rqp) 402 { 403 struct smb_vc *vcp = rqp->sr_vc; 404 struct smbiod *iod = vcp->vc_iod; 405 int error; 406 407 SMBIODEBUG("\n"); 408 if (rqp->sr_cred->scr_td != NULL && 409 rqp->sr_cred->scr_td->td_proc == iod->iod_p) { 410 rqp->sr_flags |= SMBR_INTERNAL; 411 SMB_IOD_RQLOCK(iod); 412 TAILQ_INSERT_HEAD(&iod->iod_rqlist, rqp, sr_link); 413 SMB_IOD_RQUNLOCK(iod); 414 for (;;) { 415 if (smb_iod_sendrq(iod, rqp) != 0) { 416 smb_iod_dead(iod); 417 break; 418 } 419 /* 420 * we don't need to lock state field here 421 */ 422 if (rqp->sr_state != SMBRQ_NOTSENT) 423 break; 424 tsleep(&iod->iod_flags, PWAIT, "90sndw", hz); 425 } 426 if (rqp->sr_lerror) 427 smb_iod_removerq(rqp); 428 return rqp->sr_lerror; 429 } 430 431 switch (iod->iod_state) { 432 case SMBIOD_ST_NOTCONN: 433 return ENOTCONN; 434 case SMBIOD_ST_DEAD: 435 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_CONNECT | SMBIOD_EV_SYNC, NULL); 436 if (error) 437 return error; 438 return EXDEV; 439 default: 440 break; 441 } 442 443 SMB_IOD_RQLOCK(iod); 444 for (;;) { 445 if (vcp->vc_maxmux == 0) { 446 SMBERROR("maxmux == 0\n"); 447 break; 448 } 449 if (iod->iod_muxcnt < vcp->vc_maxmux) 450 break; 451 iod->iod_muxwant++; 452 msleep(&iod->iod_muxwant, SMB_IOD_RQLOCKPTR(iod), 453 PWAIT, "90mux", 0); 454 } 455 iod->iod_muxcnt++; 456 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); 457 SMB_IOD_RQUNLOCK(iod); 458 smb_iod_wakeup(iod); 459 return 0; 460 } 461 462 int 463 smb_iod_removerq(struct smb_rq *rqp) 464 { 465 struct smb_vc *vcp = rqp->sr_vc; 466 struct smbiod *iod = vcp->vc_iod; 467 468 SMBIODEBUG("\n"); 469 if (rqp->sr_flags & SMBR_INTERNAL) { 470 SMB_IOD_RQLOCK(iod); 471 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); 472 SMB_IOD_RQUNLOCK(iod); 473 return 0; 474 } 475 SMB_IOD_RQLOCK(iod); 476 while (rqp->sr_flags & SMBR_XLOCK) { 477 rqp->sr_flags |= SMBR_XLOCKWANT; 478 msleep(rqp, SMB_IOD_RQLOCKPTR(iod), PWAIT, "90xrm", 0); 479 } 480 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); 481 iod->iod_muxcnt--; 482 if (iod->iod_muxwant) { 483 iod->iod_muxwant--; 484 wakeup(&iod->iod_muxwant); 485 } 486 SMB_IOD_RQUNLOCK(iod); 487 return 0; 488 } 489 490 int 491 smb_iod_waitrq(struct smb_rq *rqp) 492 { 493 struct smbiod *iod = rqp->sr_vc->vc_iod; 494 int error; 495 496 SMBIODEBUG("\n"); 497 if (rqp->sr_flags & SMBR_INTERNAL) { 498 for (;;) { 499 smb_iod_sendall(iod); 500 smb_iod_recvall(iod); 501 if (rqp->sr_rpgen != rqp->sr_rplast) 502 break; 503 tsleep(&iod->iod_flags, PWAIT, "90irq", hz); 504 } 505 smb_iod_removerq(rqp); 506 return rqp->sr_lerror; 507 } 508 SMBRQ_SLOCK(rqp); 509 if (rqp->sr_rpgen == rqp->sr_rplast) 510 msleep(&rqp->sr_state, SMBRQ_SLOCKPTR(rqp), PWAIT, "90wrq", 0); 511 rqp->sr_rplast++; 512 SMBRQ_SUNLOCK(rqp); 513 error = rqp->sr_lerror; 514 if (rqp->sr_flags & SMBR_MULTIPACKET) { 515 /* 516 * If request should stay in the list, then reinsert it 517 * at the end of queue so other waiters have chance to concur 518 */ 519 SMB_IOD_RQLOCK(iod); 520 TAILQ_REMOVE(&iod->iod_rqlist, rqp, sr_link); 521 TAILQ_INSERT_TAIL(&iod->iod_rqlist, rqp, sr_link); 522 SMB_IOD_RQUNLOCK(iod); 523 } else 524 smb_iod_removerq(rqp); 525 return error; 526 } 527 528 static int 529 smb_iod_sendall(struct smbiod *iod) 530 { 531 struct smb_vc *vcp = iod->iod_vc; 532 struct smb_rq *rqp; 533 struct timespec ts, tstimeout; 534 int herror; 535 536 herror = 0; 537 /* 538 * Loop through the list of requests and send them if possible 539 */ 540 SMB_IOD_RQLOCK(iod); 541 TAILQ_FOREACH(rqp, &iod->iod_rqlist, sr_link) { 542 switch (rqp->sr_state) { 543 case SMBRQ_NOTSENT: 544 rqp->sr_flags |= SMBR_XLOCK; 545 SMB_IOD_RQUNLOCK(iod); 546 herror = smb_iod_sendrq(iod, rqp); 547 SMB_IOD_RQLOCK(iod); 548 rqp->sr_flags &= ~SMBR_XLOCK; 549 if (rqp->sr_flags & SMBR_XLOCKWANT) { 550 rqp->sr_flags &= ~SMBR_XLOCKWANT; 551 wakeup(rqp); 552 } 553 break; 554 case SMBRQ_SENT: 555 SMB_TRAN_GETPARAM(vcp, SMBTP_TIMEOUT, &tstimeout); 556 timespecadd(&tstimeout, &tstimeout, &tstimeout); 557 getnanotime(&ts); 558 timespecsub(&ts, &tstimeout, &ts); 559 if (timespeccmp(&ts, &rqp->sr_timesent, >)) { 560 smb_iod_rqprocessed(rqp, ETIMEDOUT); 561 } 562 break; 563 default: 564 break; 565 } 566 if (herror) 567 break; 568 } 569 SMB_IOD_RQUNLOCK(iod); 570 if (herror == ENOTCONN) 571 smb_iod_dead(iod); 572 return 0; 573 } 574 575 /* 576 * "main" function for smbiod daemon 577 */ 578 static __inline void 579 smb_iod_main(struct smbiod *iod) 580 { 581 /* struct smb_vc *vcp = iod->iod_vc;*/ 582 struct smbiod_event *evp; 583 /* struct timespec tsnow;*/ 584 585 SMBIODEBUG("\n"); 586 587 /* 588 * Check all interesting events 589 */ 590 for (;;) { 591 SMB_IOD_EVLOCK(iod); 592 evp = STAILQ_FIRST(&iod->iod_evlist); 593 if (evp == NULL) { 594 SMB_IOD_EVUNLOCK(iod); 595 break; 596 } 597 STAILQ_REMOVE_HEAD(&iod->iod_evlist, ev_link); 598 evp->ev_type |= SMBIOD_EV_PROCESSING; 599 SMB_IOD_EVUNLOCK(iod); 600 switch (evp->ev_type & SMBIOD_EV_MASK) { 601 case SMBIOD_EV_CONNECT: 602 iod->iod_state = SMBIOD_ST_RECONNECT; 603 evp->ev_error = smb_iod_connect(iod); 604 break; 605 case SMBIOD_EV_DISCONNECT: 606 evp->ev_error = smb_iod_disconnect(iod); 607 break; 608 case SMBIOD_EV_TREECONNECT: 609 evp->ev_error = smb_iod_treeconnect(iod, evp->ev_ident); 610 break; 611 case SMBIOD_EV_SHUTDOWN: 612 iod->iod_flags |= SMBIOD_SHUTDOWN; 613 break; 614 case SMBIOD_EV_NEWRQ: 615 break; 616 } 617 if (evp->ev_type & SMBIOD_EV_SYNC) { 618 SMB_IOD_EVLOCK(iod); 619 wakeup(evp); 620 SMB_IOD_EVUNLOCK(iod); 621 } else 622 free(evp, M_SMBIOD); 623 } 624 #if 0 625 if (iod->iod_state == SMBIOD_ST_VCACTIVE) { 626 getnanotime(&tsnow); 627 timespecsub(&tsnow, &iod->iod_pingtimo, &tsnow); 628 if (timespeccmp(&tsnow, &iod->iod_lastrqsent, >)) { 629 smb_smb_echo(vcp, &iod->iod_scred); 630 } 631 } 632 #endif 633 smb_iod_sendall(iod); 634 smb_iod_recvall(iod); 635 return; 636 } 637 638 void 639 smb_iod_thread(void *arg) 640 { 641 struct smbiod *iod = arg; 642 643 mtx_lock(&Giant); 644 645 /* 646 * Here we assume that the thread structure will be the same 647 * for an entire kthread (kproc, to be more precise) life. 648 */ 649 iod->iod_td = curthread; 650 smb_makescred(&iod->iod_scred, iod->iod_td, NULL); 651 while ((iod->iod_flags & SMBIOD_SHUTDOWN) == 0) { 652 smb_iod_main(iod); 653 SMBIODEBUG("going to sleep for %d ticks\n", iod->iod_sleeptimo); 654 if (iod->iod_flags & SMBIOD_SHUTDOWN) 655 break; 656 tsleep(&iod->iod_flags, PWAIT, "90idle", iod->iod_sleeptimo); 657 } 658 659 /* We can now safely destroy the mutexes and free the iod structure. */ 660 smb_sl_destroy(&iod->iod_rqlock); 661 smb_sl_destroy(&iod->iod_evlock); 662 free(iod, M_SMBIOD); 663 mtx_unlock(&Giant); 664 kproc_exit(0); 665 } 666 667 int 668 smb_iod_create(struct smb_vc *vcp) 669 { 670 struct smbiod *iod; 671 int error; 672 673 iod = smb_zmalloc(sizeof(*iod), M_SMBIOD, M_WAITOK); 674 iod->iod_id = smb_iod_next++; 675 iod->iod_state = SMBIOD_ST_NOTCONN; 676 iod->iod_vc = vcp; 677 iod->iod_sleeptimo = hz * SMBIOD_SLEEP_TIMO; 678 iod->iod_pingtimo.tv_sec = SMBIOD_PING_TIMO; 679 getnanotime(&iod->iod_lastrqsent); 680 vcp->vc_iod = iod; 681 smb_sl_init(&iod->iod_rqlock, "90rql"); 682 TAILQ_INIT(&iod->iod_rqlist); 683 smb_sl_init(&iod->iod_evlock, "90evl"); 684 STAILQ_INIT(&iod->iod_evlist); 685 error = kproc_create(smb_iod_thread, iod, &iod->iod_p, 686 RFNOWAIT, 0, "smbiod%d", iod->iod_id); 687 if (error) { 688 SMBERROR("can't start smbiod: %d", error); 689 vcp->vc_iod = NULL; 690 smb_sl_destroy(&iod->iod_rqlock); 691 smb_sl_destroy(&iod->iod_evlock); 692 free(iod, M_SMBIOD); 693 return error; 694 } 695 return 0; 696 } 697 698 int 699 smb_iod_destroy(struct smbiod *iod) 700 { 701 smb_iod_request(iod, SMBIOD_EV_SHUTDOWN | SMBIOD_EV_SYNC, NULL); 702 return 0; 703 } 704 705 int 706 smb_iod_init(void) 707 { 708 return 0; 709 } 710 711 int 712 smb_iod_done(void) 713 { 714 return 0; 715 } 716