1 /* 2 * Copyright (c) 2000-2001 Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $ 33 */ 34 35 /* 36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 37 * Use is subject to license terms. 38 */ 39 40 #ifdef DEBUG 41 /* See sys/queue.h */ 42 #define QUEUEDEBUG 1 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/atomic.h> 48 #include <sys/proc.h> 49 #include <sys/thread.h> 50 #include <sys/file.h> 51 #include <sys/kmem.h> 52 #include <sys/unistd.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/types.h> 56 #include <sys/ddi.h> 57 #include <sys/sunddi.h> 58 #include <sys/stream.h> 59 #include <sys/strsun.h> 60 #include <sys/time.h> 61 #include <sys/class.h> 62 #include <sys/disp.h> 63 #include <sys/cmn_err.h> 64 #include <sys/zone.h> 65 #include <sys/sdt.h> 66 67 #include <netsmb/smb_osdep.h> 68 69 #include <netsmb/smb.h> 70 #include <netsmb/smb_conn.h> 71 #include <netsmb/smb_rq.h> 72 #include <netsmb/smb_subr.h> 73 #include <netsmb/smb_tran.h> 74 #include <netsmb/smb_trantcp.h> 75 76 int smb_iod_send_echo(smb_vc_t *); 77 78 /* 79 * This is set/cleared when smbfs loads/unloads 80 * No locks should be necessary, because smbfs 81 * can't unload until all the mounts are gone. 82 */ 83 static smb_fscb_t *fscb; 84 void 85 smb_fscb_set(smb_fscb_t *cb) 86 { 87 fscb = cb; 88 } 89 90 static void 91 smb_iod_share_disconnected(smb_share_t *ssp) 92 { 93 94 smb_share_invalidate(ssp); 95 96 /* smbfs_dead() */ 97 if (fscb && fscb->fscb_disconn) { 98 fscb->fscb_disconn(ssp); 99 } 100 } 101 102 /* 103 * State changes are important and infrequent. 104 * Make them easily observable via dtrace. 105 */ 106 void 107 smb_iod_newstate(struct smb_vc *vcp, int state) 108 { 109 vcp->vc_state = state; 110 } 111 112 /* Lock Held version of the next function. */ 113 static inline void 114 smb_iod_rqprocessed_LH( 115 struct smb_rq *rqp, 116 int error, 117 int flags) 118 { 119 rqp->sr_flags |= flags; 120 rqp->sr_lerror = error; 121 rqp->sr_rpgen++; 122 rqp->sr_state = SMBRQ_NOTIFIED; 123 cv_broadcast(&rqp->sr_cond); 124 } 125 126 static void 127 smb_iod_rqprocessed( 128 struct smb_rq *rqp, 129 int error, 130 int flags) 131 { 132 133 SMBRQ_LOCK(rqp); 134 smb_iod_rqprocessed_LH(rqp, error, flags); 135 SMBRQ_UNLOCK(rqp); 136 } 137 138 static void 139 smb_iod_invrq(struct smb_vc *vcp) 140 { 141 struct smb_rq *rqp; 142 143 /* 144 * Invalidate all outstanding requests for this connection 145 */ 146 rw_enter(&vcp->iod_rqlock, RW_READER); 147 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 148 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART); 149 } 150 rw_exit(&vcp->iod_rqlock); 151 } 152 153 /* 154 * Called by smb_vc_rele, smb_vc_kill, and by the driver 155 * close entry point if the IOD closes its dev handle. 156 * 157 * Forcibly kill the connection and IOD. 158 */ 159 void 160 smb_iod_disconnect(struct smb_vc *vcp) 161 { 162 163 /* 164 * Inform everyone of the state change. 165 */ 166 SMB_VC_LOCK(vcp); 167 if (vcp->vc_state != SMBIOD_ST_DEAD) { 168 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 169 cv_broadcast(&vcp->vc_statechg); 170 } 171 SMB_VC_UNLOCK(vcp); 172 173 /* 174 * Let's be safe here and avoid doing any 175 * call across the network while trying to 176 * shut things down. If we just disconnect, 177 * the server will take care of the logoff. 178 */ 179 SMB_TRAN_DISCONNECT(vcp); 180 } 181 182 /* 183 * Send one request. 184 * 185 * Called by _addrq (for internal requests) 186 * and _sendall (via _addrq, _multirq, _waitrq) 187 */ 188 static int 189 smb_iod_sendrq(struct smb_rq *rqp) 190 { 191 struct smb_vc *vcp = rqp->sr_vc; 192 mblk_t *m; 193 int error; 194 195 ASSERT(vcp); 196 ASSERT(SEMA_HELD(&vcp->vc_sendlock)); 197 ASSERT(RW_READ_HELD(&vcp->iod_rqlock)); 198 199 /* 200 * Note: Anything special for SMBR_INTERNAL here? 201 */ 202 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 203 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 204 return (ENOTCONN); 205 } 206 207 208 /* 209 * On the first send, set the MID and (maybe) 210 * the signing sequence numbers. The increments 211 * here are serialized by vc_sendlock 212 */ 213 if (rqp->sr_sendcnt == 0) { 214 215 rqp->sr_mid = vcp->vc_next_mid++; 216 217 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) { 218 /* 219 * We're signing requests and verifying 220 * signatures on responses. Set the 221 * sequence numbers of the request and 222 * response here, used in smb_rq_verify. 223 */ 224 rqp->sr_seqno = vcp->vc_next_seq++; 225 rqp->sr_rseqno = vcp->vc_next_seq++; 226 } 227 228 /* Fill in UID, TID, MID, etc. */ 229 smb_rq_fillhdr(rqp); 230 231 /* 232 * Sign the message now that we're finally done 233 * filling in the SMB header fields, etc. 234 */ 235 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) { 236 smb_rq_sign(rqp); 237 } 238 } 239 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */ 240 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART); 241 /* 242 * If all attempts to send a request failed, then 243 * something is seriously hosed. 244 */ 245 return (ENOTCONN); 246 } 247 248 /* 249 * Replaced m_copym() with Solaris copymsg() which does the same 250 * work when we want to do a M_COPYALL. 251 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0); 252 */ 253 m = copymsg(rqp->sr_rq.mb_top); 254 255 #ifdef DTRACE_PROBE 256 DTRACE_PROBE2(smb_iod_sendrq, 257 (smb_rq_t *), rqp, (mblk_t *), m); 258 #else 259 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0); 260 #endif 261 m_dumpm(m); 262 263 if (m != NULL) { 264 error = SMB_TRAN_SEND(vcp, m); 265 m = 0; /* consumed by SEND */ 266 } else 267 error = ENOBUFS; 268 269 rqp->sr_lerror = error; 270 if (error == 0) { 271 SMBRQ_LOCK(rqp); 272 rqp->sr_flags |= SMBR_SENT; 273 rqp->sr_state = SMBRQ_SENT; 274 if (rqp->sr_flags & SMBR_SENDWAIT) 275 cv_broadcast(&rqp->sr_cond); 276 SMBRQ_UNLOCK(rqp); 277 return (0); 278 } 279 /* 280 * Check for fatal errors 281 */ 282 if (SMB_TRAN_FATAL(vcp, error)) { 283 /* 284 * No further attempts should be made 285 */ 286 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error); 287 return (ENOTCONN); 288 } 289 if (error) 290 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error); 291 292 #ifdef APPLE 293 /* If proc waiting on rqp was signaled... */ 294 if (smb_rq_intr(rqp)) 295 smb_iod_rqprocessed(rqp, EINTR, 0); 296 #endif 297 298 return (0); 299 } 300 301 static int 302 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp) 303 { 304 mblk_t *m; 305 uchar_t *hp; 306 int error; 307 308 top: 309 m = NULL; 310 error = SMB_TRAN_RECV(vcp, &m); 311 if (error == EAGAIN) 312 goto top; 313 if (error) 314 return (error); 315 ASSERT(m); 316 317 m = m_pullup(m, SMB_HDRLEN); 318 if (m == NULL) { 319 return (ENOSR); 320 } 321 322 /* 323 * Check the SMB header 324 */ 325 hp = mtod(m, uchar_t *); 326 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) { 327 m_freem(m); 328 return (EPROTO); 329 } 330 331 *mpp = m; 332 return (0); 333 } 334 335 /* 336 * Process incoming packets 337 * 338 * This is the "reader" loop, run by the IOD thread 339 * while in state SMBIOD_ST_VCACTIVE. The loop now 340 * simply blocks in the socket recv until either a 341 * message arrives, or a disconnect. 342 * 343 * Any non-zero error means the IOD should terminate. 344 */ 345 int 346 smb_iod_recvall(struct smb_vc *vcp) 347 { 348 struct smb_rq *rqp; 349 mblk_t *m; 350 uchar_t *hp; 351 ushort_t mid; 352 int error = 0; 353 int etime_count = 0; /* for "server not responding", etc. */ 354 355 for (;;) { 356 /* 357 * Check whether someone "killed" this VC, 358 * or is asking the IOD to terminate. 359 */ 360 361 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 362 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 363 error = 0; 364 break; 365 } 366 367 if (vcp->iod_flags & SMBIOD_SHUTDOWN) { 368 SMBIODEBUG("SHUTDOWN set\n"); 369 /* This IOD thread will terminate. */ 370 SMB_VC_LOCK(vcp); 371 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 372 cv_broadcast(&vcp->vc_statechg); 373 SMB_VC_UNLOCK(vcp); 374 error = EINTR; 375 break; 376 } 377 378 m = NULL; 379 error = smb_iod_recv1(vcp, &m); 380 381 if (error == ETIME && 382 vcp->iod_rqlist.tqh_first != NULL) { 383 /* 384 * Nothing received for 15 seconds and 385 * we have requests in the queue. 386 */ 387 etime_count++; 388 389 /* 390 * Once, at 15 sec. notify callbacks 391 * and print the warning message. 392 */ 393 if (etime_count == 1) { 394 /* Was: smb_iod_notify_down(vcp); */ 395 if (fscb && fscb->fscb_down) 396 smb_vc_walkshares(vcp, 397 fscb->fscb_down); 398 zprintf(vcp->vc_zoneid, 399 "SMB server %s not responding\n", 400 vcp->vc_srvname); 401 } 402 403 /* 404 * At 30 sec. try sending an echo, and then 405 * once a minute thereafter. 406 */ 407 if ((etime_count & 3) == 2) { 408 (void) smb_iod_send_echo(vcp); 409 } 410 411 continue; 412 } /* ETIME && requests in queue */ 413 414 if (error == ETIME) { 415 /* 416 * If the IOD thread holds the last reference 417 * to this VC, let the IOD thread terminate. 418 */ 419 if (vcp->vc_co.co_usecount > 1) 420 continue; 421 SMB_VC_LOCK(vcp); 422 if (vcp->vc_co.co_usecount == 1) { 423 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 424 SMB_VC_UNLOCK(vcp); 425 error = 0; 426 break; 427 } 428 SMB_VC_UNLOCK(vcp); 429 continue; 430 } /* error == ETIME */ 431 432 if (error) { 433 /* 434 * The recv. above returned some error 435 * we can't continue from i.e. ENOTCONN. 436 * It's dangerous to continue here. 437 * (possible infinite loop!) 438 * 439 * If we have requests enqueued, next 440 * state is reconnecting, else idle. 441 */ 442 int state; 443 SMB_VC_LOCK(vcp); 444 state = (vcp->iod_rqlist.tqh_first != NULL) ? 445 SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE; 446 smb_iod_newstate(vcp, state); 447 cv_broadcast(&vcp->vc_statechg); 448 SMB_VC_UNLOCK(vcp); 449 error = 0; 450 break; 451 } 452 453 /* 454 * Received something. Yea! 455 */ 456 if (etime_count) { 457 etime_count = 0; 458 459 zprintf(vcp->vc_zoneid, "SMB server %s OK\n", 460 vcp->vc_srvname); 461 462 /* Was: smb_iod_notify_up(vcp); */ 463 if (fscb && fscb->fscb_up) 464 smb_vc_walkshares(vcp, fscb->fscb_up); 465 } 466 467 /* 468 * Have an SMB packet. The SMB header was 469 * checked in smb_iod_recv1(). 470 * Find the request... 471 */ 472 hp = mtod(m, uchar_t *); 473 /*LINTED*/ 474 mid = letohs(SMB_HDRMID(hp)); 475 SMBIODEBUG("mid %04x\n", (uint_t)mid); 476 477 rw_enter(&vcp->iod_rqlock, RW_READER); 478 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 479 480 if (rqp->sr_mid != mid) 481 continue; 482 483 DTRACE_PROBE2(smb_iod_recvrq, 484 (smb_rq_t *), rqp, (mblk_t *), m); 485 m_dumpm(m); 486 487 SMBRQ_LOCK(rqp); 488 if (rqp->sr_rp.md_top == NULL) { 489 md_initm(&rqp->sr_rp, m); 490 } else { 491 if (rqp->sr_flags & SMBR_MULTIPACKET) { 492 md_append_record(&rqp->sr_rp, m); 493 } else { 494 SMBRQ_UNLOCK(rqp); 495 SMBSDEBUG("duplicate response %d " 496 "(ignored)\n", mid); 497 break; 498 } 499 } 500 smb_iod_rqprocessed_LH(rqp, 0, 0); 501 SMBRQ_UNLOCK(rqp); 502 break; 503 } 504 505 if (rqp == NULL) { 506 int cmd = SMB_HDRCMD(hp); 507 508 if (cmd != SMB_COM_ECHO) 509 SMBSDEBUG("drop resp: mid %d, cmd %d\n", 510 (uint_t)mid, cmd); 511 /* smb_printrqlist(vcp); */ 512 m_freem(m); 513 } 514 rw_exit(&vcp->iod_rqlock); 515 516 } 517 518 return (error); 519 } 520 521 /* 522 * The IOD receiver thread has requests pending and 523 * has not received anything in a while. Try to 524 * send an SMB echo request. It's tricky to do a 525 * send from the IOD thread because we can't block. 526 * 527 * Using tmo=SMBNOREPLYWAIT in the request 528 * so smb_rq_reply will skip smb_iod_waitrq. 529 * The smb_smb_echo call uses SMBR_INTERNAL 530 * to avoid calling smb_iod_sendall(). 531 */ 532 int 533 smb_iod_send_echo(smb_vc_t *vcp) 534 { 535 smb_cred_t scred; 536 int err; 537 538 smb_credinit(&scred, NULL); 539 err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT); 540 smb_credrele(&scred); 541 return (err); 542 } 543 544 /* 545 * The IOD thread is now just a "reader", 546 * so no more smb_iod_request(). Yea! 547 */ 548 549 /* 550 * Place request in the queue, and send it now if possible. 551 * Called with no locks held. 552 */ 553 int 554 smb_iod_addrq(struct smb_rq *rqp) 555 { 556 struct smb_vc *vcp = rqp->sr_vc; 557 int error, save_newrq; 558 559 ASSERT(rqp->sr_cred); 560 561 /* 562 * State should be correct after the check in 563 * smb_rq_enqueue(), but we dropped locks... 564 */ 565 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 566 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 567 return (ENOTCONN); 568 } 569 570 /* 571 * Requests from the IOD itself are marked _INTERNAL, 572 * and get some special treatment to avoid blocking 573 * the reader thread (so we don't deadlock). 574 * The request is not yet on the queue, so we can 575 * modify it's state here without locks. 576 * Only thing using this now is ECHO. 577 */ 578 rqp->sr_owner = curthread; 579 if (rqp->sr_owner == vcp->iod_thr) { 580 rqp->sr_flags |= SMBR_INTERNAL; 581 582 /* 583 * This is a request from the IOD thread. 584 * Always send directly from this thread. 585 * Note lock order: iod_rqlist, vc_sendlock 586 */ 587 rw_enter(&vcp->iod_rqlock, RW_WRITER); 588 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link); 589 rw_downgrade(&vcp->iod_rqlock); 590 591 /* 592 * Note: iod_sendrq expects vc_sendlock, 593 * so take that here, but carefully: 594 * Never block the IOD thread here. 595 */ 596 if (sema_tryp(&vcp->vc_sendlock) == 0) { 597 SMBIODEBUG("sendlock busy\n"); 598 error = EAGAIN; 599 } else { 600 /* Have vc_sendlock */ 601 error = smb_iod_sendrq(rqp); 602 sema_v(&vcp->vc_sendlock); 603 } 604 605 rw_exit(&vcp->iod_rqlock); 606 607 /* 608 * In the non-error case, _removerq 609 * is done by either smb_rq_reply 610 * or smb_iod_waitrq. 611 */ 612 if (error) 613 smb_iod_removerq(rqp); 614 615 return (error); 616 } 617 618 rw_enter(&vcp->iod_rqlock, RW_WRITER); 619 620 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link); 621 /* iod_rqlock/WRITER protects iod_newrq */ 622 save_newrq = vcp->iod_newrq; 623 vcp->iod_newrq++; 624 625 rw_exit(&vcp->iod_rqlock); 626 627 /* 628 * Now send any requests that need to be sent, 629 * including the one we just put on the list. 630 * Only the thread that found iod_newrq==0 631 * needs to run the send loop. 632 */ 633 if (save_newrq == 0) 634 smb_iod_sendall(vcp); 635 636 return (0); 637 } 638 639 /* 640 * Mark an SMBR_MULTIPACKET request as 641 * needing another send. Similar to the 642 * "normal" part of smb_iod_addrq. 643 */ 644 int 645 smb_iod_multirq(struct smb_rq *rqp) 646 { 647 struct smb_vc *vcp = rqp->sr_vc; 648 int save_newrq; 649 650 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET); 651 652 if (rqp->sr_flags & SMBR_INTERNAL) 653 return (EINVAL); 654 655 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 656 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 657 return (ENOTCONN); 658 } 659 660 rw_enter(&vcp->iod_rqlock, RW_WRITER); 661 662 /* Already on iod_rqlist, just reset state. */ 663 rqp->sr_state = SMBRQ_NOTSENT; 664 665 /* iod_rqlock/WRITER protects iod_newrq */ 666 save_newrq = vcp->iod_newrq; 667 vcp->iod_newrq++; 668 669 rw_exit(&vcp->iod_rqlock); 670 671 /* 672 * Now send any requests that need to be sent, 673 * including the one we just marked NOTSENT. 674 * Only the thread that found iod_newrq==0 675 * needs to run the send loop. 676 */ 677 if (save_newrq == 0) 678 smb_iod_sendall(vcp); 679 680 return (0); 681 } 682 683 684 void 685 smb_iod_removerq(struct smb_rq *rqp) 686 { 687 struct smb_vc *vcp = rqp->sr_vc; 688 689 rw_enter(&vcp->iod_rqlock, RW_WRITER); 690 #ifdef QUEUEDEBUG 691 /* 692 * Make sure we have not already removed it. 693 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE 694 * XXX: Don't like the constant 1 here... 695 */ 696 ASSERT(rqp->sr_link.tqe_next != (void *)1L); 697 #endif 698 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link); 699 rw_exit(&vcp->iod_rqlock); 700 } 701 702 703 704 /* 705 * Wait for a request to complete. 706 * 707 * For normal requests, we need to deal with 708 * ioc_muxcnt dropping below vc_maxmux by 709 * making arrangements to send more... 710 */ 711 int 712 smb_iod_waitrq(struct smb_rq *rqp) 713 { 714 struct smb_vc *vcp = rqp->sr_vc; 715 clock_t tr, tmo1, tmo2; 716 int error, rc; 717 718 if (rqp->sr_flags & SMBR_INTERNAL) { 719 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0); 720 smb_iod_removerq(rqp); 721 return (EAGAIN); 722 } 723 724 /* 725 * Make sure this is NOT the IOD thread, 726 * or the wait below will stop the reader. 727 */ 728 ASSERT(curthread != vcp->iod_thr); 729 730 SMBRQ_LOCK(rqp); 731 732 /* 733 * First, wait for the request to be sent. Normally the send 734 * has already happened by the time we get here. However, if 735 * we have more than maxmux entries in the request list, our 736 * request may not be sent until other requests complete. 737 * The wait in this case is due to local I/O demands, so 738 * we don't want the server response timeout to apply. 739 * 740 * If a request is allowed to interrupt this wait, then the 741 * request is cancelled and never sent OTW. Some kinds of 742 * requests should never be cancelled (i.e. close) and those 743 * are marked SMBR_NOINTR_SEND so they either go eventually, 744 * or a connection close will terminate them with ENOTCONN. 745 */ 746 while (rqp->sr_state == SMBRQ_NOTSENT) { 747 rqp->sr_flags |= SMBR_SENDWAIT; 748 if (rqp->sr_flags & SMBR_NOINTR_SEND) { 749 cv_wait(&rqp->sr_cond, &rqp->sr_lock); 750 rc = 1; 751 } else 752 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock); 753 rqp->sr_flags &= ~SMBR_SENDWAIT; 754 if (rc == 0) { 755 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp); 756 error = EINTR; 757 goto out; 758 } 759 } 760 761 /* 762 * The request has been sent. Now wait for the response, 763 * with the timeout specified for this request. 764 * Compute all the deadlines now, so we effectively 765 * start the timer(s) after the request is sent. 766 */ 767 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo)) 768 tmo1 = SEC_TO_TICK(smb_timo_notice); 769 else 770 tmo1 = 0; 771 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo); 772 773 /* 774 * As above, we don't want to allow interrupt for some 775 * requests like open, because we could miss a succesful 776 * response and therefore "leak" a FID. Such requests 777 * are marked SMBR_NOINTR_RECV to prevent that. 778 * 779 * If "slow server" warnings are enabled, wait first 780 * for the "notice" timeout, and warn if expired. 781 */ 782 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) { 783 if (rqp->sr_flags & SMBR_NOINTR_RECV) 784 tr = cv_reltimedwait(&rqp->sr_cond, 785 &rqp->sr_lock, tmo1, TR_CLOCK_TICK); 786 else 787 tr = cv_reltimedwait_sig(&rqp->sr_cond, 788 &rqp->sr_lock, tmo1, TR_CLOCK_TICK); 789 if (tr == 0) { 790 error = EINTR; 791 goto out; 792 } 793 if (tr < 0) { 794 #ifdef DTRACE_PROBE 795 DTRACE_PROBE1(smb_iod_waitrq1, 796 (smb_rq_t *), rqp); 797 #endif 798 #ifdef NOT_YET 799 /* Want this to go ONLY to the user. */ 800 uprintf("SMB server %s has not responded" 801 " to request %d after %d seconds..." 802 " (still waiting).\n", vcp->vc_srvname, 803 rqp->sr_mid, smb_timo_notice); 804 #endif 805 } 806 } 807 808 /* 809 * Keep waiting until tmo2 is expired. 810 */ 811 while (rqp->sr_rpgen == rqp->sr_rplast) { 812 if (rqp->sr_flags & SMBR_NOINTR_RECV) 813 tr = cv_timedwait(&rqp->sr_cond, 814 &rqp->sr_lock, tmo2); 815 else 816 tr = cv_timedwait_sig(&rqp->sr_cond, 817 &rqp->sr_lock, tmo2); 818 if (tr == 0) { 819 error = EINTR; 820 goto out; 821 } 822 if (tr < 0) { 823 #ifdef DTRACE_PROBE 824 DTRACE_PROBE1(smb_iod_waitrq2, 825 (smb_rq_t *), rqp); 826 #endif 827 #ifdef NOT_YET 828 /* Want this to go ONLY to the user. */ 829 uprintf("SMB server %s has not responded" 830 " to request %d after %d seconds..." 831 " (giving up).\n", vcp->vc_srvname, 832 rqp->sr_mid, rqp->sr_timo); 833 #endif 834 error = ETIME; 835 goto out; 836 } 837 /* got wakeup */ 838 } 839 error = rqp->sr_lerror; 840 rqp->sr_rplast++; 841 842 out: 843 SMBRQ_UNLOCK(rqp); 844 845 /* 846 * MULTIPACKET request must stay in the list. 847 * They may need additional responses. 848 */ 849 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0) 850 smb_iod_removerq(rqp); 851 852 /* 853 * Some request has been completed. 854 * If we reached the mux limit, 855 * re-run the send loop... 856 */ 857 if (vcp->iod_muxfull) 858 smb_iod_sendall(vcp); 859 860 return (error); 861 } 862 863 /* 864 * Shutdown all outstanding I/O requests on the specified share with 865 * ENXIO; used when unmounting a share. (There shouldn't be any for a 866 * non-forced unmount; if this is a forced unmount, we have to shutdown 867 * the requests as part of the unmount process.) 868 */ 869 void 870 smb_iod_shutdown_share(struct smb_share *ssp) 871 { 872 struct smb_vc *vcp = SSTOVC(ssp); 873 struct smb_rq *rqp; 874 875 /* 876 * Loop through the list of requests and shutdown the ones 877 * that are for the specified share. 878 */ 879 rw_enter(&vcp->iod_rqlock, RW_READER); 880 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 881 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp) 882 smb_iod_rqprocessed(rqp, EIO, 0); 883 } 884 rw_exit(&vcp->iod_rqlock); 885 } 886 887 /* 888 * Send all requests that need sending. 889 * Called from _addrq, _multirq, _waitrq 890 */ 891 void 892 smb_iod_sendall(smb_vc_t *vcp) 893 { 894 struct smb_rq *rqp; 895 int error, muxcnt; 896 897 /* 898 * Clear "newrq" to make sure threads adding 899 * new requests will run this function again. 900 */ 901 rw_enter(&vcp->iod_rqlock, RW_WRITER); 902 vcp->iod_newrq = 0; 903 904 /* 905 * We only read iod_rqlist, so downgrade rwlock. 906 * This allows the IOD to handle responses while 907 * some requesting thread may be blocked in send. 908 */ 909 rw_downgrade(&vcp->iod_rqlock); 910 911 /* 912 * Serialize to prevent multiple senders. 913 * Note lock order: iod_rqlock, vc_sendlock 914 */ 915 sema_p(&vcp->vc_sendlock); 916 917 /* 918 * Walk the list of requests and send when possible. 919 * We avoid having more than vc_maxmux requests 920 * outstanding to the server by traversing only 921 * vc_maxmux entries into this list. Simple! 922 */ 923 ASSERT(vcp->vc_maxmux > 0); 924 error = muxcnt = 0; 925 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 926 927 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 928 error = ENOTCONN; /* stop everything! */ 929 break; 930 } 931 932 if (rqp->sr_state == SMBRQ_NOTSENT) { 933 error = smb_iod_sendrq(rqp); 934 if (error) 935 break; 936 } 937 938 if (++muxcnt == vcp->vc_maxmux) { 939 SMBIODEBUG("muxcnt == vc_maxmux\n"); 940 break; 941 } 942 943 } 944 945 /* 946 * If we have vc_maxmux requests outstanding, 947 * arrange for _waitrq to call _sendall as 948 * requests are completed. 949 */ 950 vcp->iod_muxfull = 951 (muxcnt < vcp->vc_maxmux) ? 0 : 1; 952 953 sema_v(&vcp->vc_sendlock); 954 rw_exit(&vcp->iod_rqlock); 955 } 956 957 int 958 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr) 959 { 960 struct file *fp = NULL; 961 int err = 0; 962 963 /* 964 * This is called by the one-and-only 965 * IOD thread for this VC. 966 */ 967 ASSERT(vcp->iod_thr == curthread); 968 969 /* 970 * Get the network transport file pointer, 971 * and "loan" it to our transport module. 972 */ 973 if ((fp = getf(vcp->vc_tran_fd)) == NULL) { 974 err = EBADF; 975 goto out; 976 } 977 if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0) 978 goto out; 979 980 /* 981 * In case of reconnect, tell any enqueued requests 982 * then can GO! 983 */ 984 SMB_VC_LOCK(vcp); 985 vcp->vc_genid++; /* possibly new connection */ 986 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE); 987 cv_broadcast(&vcp->vc_statechg); 988 SMB_VC_UNLOCK(vcp); 989 990 /* 991 * The above cv_broadcast should be sufficient to 992 * get requests going again. 993 * 994 * If we have a callback function, run it. 995 * Was: smb_iod_notify_connected() 996 */ 997 if (fscb && fscb->fscb_connect) 998 smb_vc_walkshares(vcp, fscb->fscb_connect); 999 1000 /* 1001 * Run the "reader" loop. 1002 */ 1003 err = smb_iod_recvall(vcp); 1004 1005 /* 1006 * The reader loop returned, so we must have a 1007 * new state. (disconnected or reconnecting) 1008 * 1009 * Notify shares of the disconnect. 1010 * Was: smb_iod_notify_disconnect() 1011 */ 1012 smb_vc_walkshares(vcp, smb_iod_share_disconnected); 1013 1014 /* 1015 * The reader loop function returns only when 1016 * there's been an error on the connection, or 1017 * this VC has no more references. It also 1018 * updates the state before it returns. 1019 * 1020 * Tell any requests to give up or restart. 1021 */ 1022 smb_iod_invrq(vcp); 1023 1024 out: 1025 /* Recall the file descriptor loan. */ 1026 (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr); 1027 if (fp != NULL) { 1028 releasef(vcp->vc_tran_fd); 1029 } 1030 1031 return (err); 1032 } 1033 1034 /* 1035 * Wait around for someone to ask to use this VC. 1036 * If the VC has only the IOD reference, then 1037 * wait only a minute or so, then drop it. 1038 */ 1039 int 1040 smb_iod_vc_idle(struct smb_vc *vcp) 1041 { 1042 clock_t tr, delta = SEC_TO_TICK(15); 1043 int err = 0; 1044 1045 /* 1046 * This is called by the one-and-only 1047 * IOD thread for this VC. 1048 */ 1049 ASSERT(vcp->iod_thr == curthread); 1050 1051 SMB_VC_LOCK(vcp); 1052 while (vcp->vc_state == SMBIOD_ST_IDLE) { 1053 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock, 1054 delta, TR_CLOCK_TICK); 1055 if (tr == 0) { 1056 err = EINTR; 1057 break; 1058 } 1059 if (tr < 0) { 1060 /* timeout */ 1061 if (vcp->vc_co.co_usecount == 1) { 1062 /* Let this IOD terminate. */ 1063 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 1064 /* nobody to cv_broadcast */ 1065 break; 1066 } 1067 } 1068 } 1069 SMB_VC_UNLOCK(vcp); 1070 1071 return (err); 1072 } 1073 1074 /* 1075 * After a failed reconnect attempt, smbiod will 1076 * call this to make current requests error out. 1077 */ 1078 int 1079 smb_iod_vc_rcfail(struct smb_vc *vcp) 1080 { 1081 clock_t tr; 1082 int err = 0; 1083 1084 /* 1085 * This is called by the one-and-only 1086 * IOD thread for this VC. 1087 */ 1088 ASSERT(vcp->iod_thr == curthread); 1089 1090 if (vcp->vc_state != SMBIOD_ST_RECONNECT) 1091 return (EINVAL); 1092 1093 SMB_VC_LOCK(vcp); 1094 1095 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED); 1096 cv_broadcast(&vcp->vc_statechg); 1097 1098 /* 1099 * Short wait here for two reasons: 1100 * (1) Give requests a chance to error out. 1101 * (2) Prevent immediate retry. 1102 */ 1103 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock, 1104 SEC_TO_TICK(5), TR_CLOCK_TICK); 1105 if (tr == 0) 1106 err = EINTR; 1107 1108 smb_iod_newstate(vcp, SMBIOD_ST_IDLE); 1109 cv_broadcast(&vcp->vc_statechg); 1110 1111 SMB_VC_UNLOCK(vcp); 1112 1113 return (err); 1114 } 1115 1116 /* 1117 * Ask the IOD to reconnect (if not already underway) 1118 * then wait for the reconnect to finish. 1119 */ 1120 int 1121 smb_iod_reconnect(struct smb_vc *vcp) 1122 { 1123 int err = 0, rv; 1124 1125 SMB_VC_LOCK(vcp); 1126 again: 1127 switch (vcp->vc_state) { 1128 1129 case SMBIOD_ST_IDLE: 1130 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT); 1131 cv_signal(&vcp->iod_idle); 1132 /* FALLTHROUGH */ 1133 1134 case SMBIOD_ST_RECONNECT: 1135 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock); 1136 if (rv == 0) { 1137 err = EINTR; 1138 break; 1139 } 1140 goto again; 1141 1142 case SMBIOD_ST_VCACTIVE: 1143 err = 0; /* success! */ 1144 break; 1145 1146 case SMBIOD_ST_RCFAILED: 1147 case SMBIOD_ST_DEAD: 1148 default: 1149 err = ENOTCONN; 1150 break; 1151 } 1152 1153 SMB_VC_UNLOCK(vcp); 1154 return (err); 1155 } 1156