1 /* 2 * Copyright (c) 2000-2001 Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: smb_iod.c,v 1.32 2005/02/12 00:17:09 lindak Exp $ 33 */ 34 35 /* 36 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 37 * Use is subject to license terms. 38 */ 39 40 #ifdef DEBUG 41 /* See sys/queue.h */ 42 #define QUEUEDEBUG 1 43 #endif 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/atomic.h> 48 #include <sys/proc.h> 49 #include <sys/thread.h> 50 #include <sys/file.h> 51 #include <sys/kmem.h> 52 #include <sys/unistd.h> 53 #include <sys/mount.h> 54 #include <sys/vnode.h> 55 #include <sys/types.h> 56 #include <sys/ddi.h> 57 #include <sys/sunddi.h> 58 #include <sys/stream.h> 59 #include <sys/strsun.h> 60 #include <sys/time.h> 61 #include <sys/class.h> 62 #include <sys/disp.h> 63 #include <sys/cmn_err.h> 64 #include <sys/zone.h> 65 #include <sys/sdt.h> 66 67 #ifdef APPLE 68 #include <sys/smb_apple.h> 69 #else 70 #include <netsmb/smb_osdep.h> 71 #endif 72 73 #include <netsmb/smb.h> 74 #include <netsmb/smb_conn.h> 75 #include <netsmb/smb_rq.h> 76 #include <netsmb/smb_subr.h> 77 #include <netsmb/smb_tran.h> 78 #include <netsmb/smb_trantcp.h> 79 80 int smb_iod_send_echo(smb_vc_t *); 81 82 /* 83 * This is set/cleared when smbfs loads/unloads 84 * No locks should be necessary, because smbfs 85 * can't unload until all the mounts are gone. 86 */ 87 static smb_fscb_t *fscb; 88 int 89 smb_fscb_set(smb_fscb_t *cb) 90 { 91 fscb = cb; 92 return (0); 93 } 94 95 static void 96 smb_iod_share_disconnected(smb_share_t *ssp) 97 { 98 99 smb_share_invalidate(ssp); 100 101 /* smbfs_dead() */ 102 if (fscb && fscb->fscb_disconn) { 103 fscb->fscb_disconn(ssp); 104 } 105 } 106 107 /* 108 * State changes are important and infrequent. 109 * Make them easily observable via dtrace. 110 */ 111 void 112 smb_iod_newstate(struct smb_vc *vcp, int state) 113 { 114 vcp->vc_state = state; 115 } 116 117 /* Lock Held version of the next function. */ 118 static inline void 119 smb_iod_rqprocessed_LH( 120 struct smb_rq *rqp, 121 int error, 122 int flags) 123 { 124 rqp->sr_flags |= flags; 125 rqp->sr_lerror = error; 126 rqp->sr_rpgen++; 127 rqp->sr_state = SMBRQ_NOTIFIED; 128 cv_broadcast(&rqp->sr_cond); 129 } 130 131 static void 132 smb_iod_rqprocessed( 133 struct smb_rq *rqp, 134 int error, 135 int flags) 136 { 137 138 SMBRQ_LOCK(rqp); 139 smb_iod_rqprocessed_LH(rqp, error, flags); 140 SMBRQ_UNLOCK(rqp); 141 } 142 143 static void 144 smb_iod_invrq(struct smb_vc *vcp) 145 { 146 struct smb_rq *rqp; 147 148 /* 149 * Invalidate all outstanding requests for this connection 150 */ 151 rw_enter(&vcp->iod_rqlock, RW_READER); 152 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 153 smb_iod_rqprocessed(rqp, ENOTCONN, SMBR_RESTART); 154 } 155 rw_exit(&vcp->iod_rqlock); 156 } 157 158 /* 159 * Called by smb_vc_rele, smb_vc_kill, and by the driver 160 * close entry point if the IOD closes its dev handle. 161 * 162 * Forcibly kill the connection and IOD. 163 */ 164 int 165 smb_iod_disconnect(struct smb_vc *vcp) 166 { 167 168 /* 169 * Inform everyone of the state change. 170 */ 171 SMB_VC_LOCK(vcp); 172 if (vcp->vc_state != SMBIOD_ST_DEAD) { 173 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 174 cv_broadcast(&vcp->vc_statechg); 175 } 176 SMB_VC_UNLOCK(vcp); 177 178 /* 179 * Let's be safe here and avoid doing any 180 * call across the network while trying to 181 * shut things down. If we just disconnect, 182 * the server will take care of the logoff. 183 */ 184 SMB_TRAN_DISCONNECT(vcp); 185 186 /* 187 * If we have an IOD, it should immediately notice 188 * that its connection has closed. But in case 189 * it doesn't, let's also send it a signal. 190 * (but don't shoot our own foot!) 191 * Note: the iod calls smb_iod_invrq on its way out. 192 */ 193 if (vcp->iod_thr != NULL && 194 vcp->iod_thr != curthread) { 195 tsignal(vcp->iod_thr, SIGKILL); 196 } 197 198 return (0); 199 } 200 201 /* 202 * Send one request. 203 * 204 * Called by _addrq (for internal requests) 205 * and _sendall (via _addrq, _multirq, _waitrq) 206 */ 207 static int 208 smb_iod_sendrq(struct smb_rq *rqp) 209 { 210 struct smb_vc *vcp = rqp->sr_vc; 211 mblk_t *m; 212 int error; 213 214 ASSERT(vcp); 215 ASSERT(SEMA_HELD(&vcp->vc_sendlock)); 216 ASSERT(RW_READ_HELD(&vcp->iod_rqlock)); 217 218 /* 219 * Note: Anything special for SMBR_INTERNAL here? 220 */ 221 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 222 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 223 return (ENOTCONN); 224 } 225 226 227 /* 228 * On the first send, set the MID and (maybe) 229 * the signing sequence numbers. The increments 230 * here are serialized by vc_sendlock 231 */ 232 if (rqp->sr_sendcnt == 0) { 233 234 rqp->sr_mid = vcp->vc_next_mid++; 235 236 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) { 237 /* 238 * We're signing requests and verifying 239 * signatures on responses. Set the 240 * sequence numbers of the request and 241 * response here, used in smb_rq_verify. 242 */ 243 rqp->sr_seqno = vcp->vc_next_seq++; 244 rqp->sr_rseqno = vcp->vc_next_seq++; 245 } 246 247 /* Fill in UID, TID, MID, etc. */ 248 smb_rq_fillhdr(rqp); 249 250 /* 251 * Sign the message now that we're finally done 252 * filling in the SMB header fields, etc. 253 */ 254 if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) { 255 smb_rq_sign(rqp); 256 } 257 } 258 if (rqp->sr_sendcnt++ >= 60/SMBSBTIMO) { /* one minute */ 259 smb_iod_rqprocessed(rqp, rqp->sr_lerror, SMBR_RESTART); 260 /* 261 * If all attempts to send a request failed, then 262 * something is seriously hosed. 263 */ 264 return (ENOTCONN); 265 } 266 267 /* 268 * Replaced m_copym() with Solaris copymsg() which does the same 269 * work when we want to do a M_COPYALL. 270 * m = m_copym(rqp->sr_rq.mb_top, 0, M_COPYALL, 0); 271 */ 272 m = copymsg(rqp->sr_rq.mb_top); 273 274 #ifdef DTRACE_PROBE 275 DTRACE_PROBE2(smb_iod_sendrq, 276 (smb_rq_t *), rqp, (mblk_t *), m); 277 #else 278 SMBIODEBUG("M:%04x, P:%04x, U:%04x, T:%04x\n", rqp->sr_mid, 0, 0, 0); 279 #endif 280 m_dumpm(m); 281 282 if (m != NULL) { 283 error = SMB_TRAN_SEND(vcp, m); 284 m = 0; /* consumed by SEND */ 285 } else 286 error = ENOBUFS; 287 288 rqp->sr_lerror = error; 289 if (error == 0) { 290 SMBRQ_LOCK(rqp); 291 rqp->sr_flags |= SMBR_SENT; 292 rqp->sr_state = SMBRQ_SENT; 293 if (rqp->sr_flags & SMBR_SENDWAIT) 294 cv_broadcast(&rqp->sr_cond); 295 SMBRQ_UNLOCK(rqp); 296 return (0); 297 } 298 /* 299 * Check for fatal errors 300 */ 301 if (SMB_TRAN_FATAL(vcp, error)) { 302 /* 303 * No further attempts should be made 304 */ 305 SMBSDEBUG("TRAN_SEND returned fatal error %d\n", error); 306 return (ENOTCONN); 307 } 308 if (error) 309 SMBSDEBUG("TRAN_SEND returned non-fatal error %d\n", error); 310 311 #ifdef APPLE 312 /* If proc waiting on rqp was signaled... */ 313 if (smb_rq_intr(rqp)) 314 smb_iod_rqprocessed(rqp, EINTR, 0); 315 #endif 316 317 return (0); 318 } 319 320 static int 321 smb_iod_recv1(struct smb_vc *vcp, mblk_t **mpp) 322 { 323 mblk_t *m; 324 uchar_t *hp; 325 int error; 326 327 top: 328 m = NULL; 329 error = SMB_TRAN_RECV(vcp, &m); 330 if (error == EAGAIN) 331 goto top; 332 if (error) 333 return (error); 334 ASSERT(m); 335 336 m = m_pullup(m, SMB_HDRLEN); 337 if (m == NULL) { 338 return (ENOSR); 339 } 340 341 /* 342 * Check the SMB header 343 */ 344 hp = mtod(m, uchar_t *); 345 if (bcmp(hp, SMB_SIGNATURE, SMB_SIGLEN) != 0) { 346 m_freem(m); 347 return (EPROTO); 348 } 349 350 *mpp = m; 351 return (0); 352 } 353 354 /* 355 * Process incoming packets 356 * 357 * This is the "reader" loop, run by the IOD thread 358 * while in state SMBIOD_ST_VCACTIVE. The loop now 359 * simply blocks in the socket recv until either a 360 * message arrives, or a disconnect. 361 * 362 * Any non-zero error means the IOD should terminate. 363 */ 364 int 365 smb_iod_recvall(struct smb_vc *vcp) 366 { 367 struct smb_rq *rqp; 368 mblk_t *m; 369 uchar_t *hp; 370 ushort_t mid; 371 int error = 0; 372 int etime_count = 0; /* for "server not responding", etc. */ 373 374 for (;;) { 375 /* 376 * Check whether someone "killed" this VC, 377 * or is asking the IOD to terminate. 378 */ 379 380 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 381 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 382 error = 0; 383 break; 384 } 385 386 if (vcp->iod_flags & SMBIOD_SHUTDOWN) { 387 SMBIODEBUG("SHUTDOWN set\n"); 388 /* This IOD thread will terminate. */ 389 SMB_VC_LOCK(vcp); 390 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 391 cv_broadcast(&vcp->vc_statechg); 392 SMB_VC_UNLOCK(vcp); 393 error = EINTR; 394 break; 395 } 396 397 m = NULL; 398 error = smb_iod_recv1(vcp, &m); 399 400 if (error == ETIME && 401 vcp->iod_rqlist.tqh_first != NULL) { 402 /* 403 * Nothing received for 15 seconds and 404 * we have requests in the queue. 405 */ 406 etime_count++; 407 408 /* 409 * Once, at 15 sec. notify callbacks 410 * and print the warning message. 411 */ 412 if (etime_count == 1) { 413 /* Was: smb_iod_notify_down(vcp); */ 414 if (fscb && fscb->fscb_down) 415 smb_vc_walkshares(vcp, 416 fscb->fscb_down); 417 zprintf(vcp->vc_zoneid, 418 "SMB server %s not responding\n", 419 vcp->vc_srvname); 420 } 421 422 /* 423 * At 30 sec. try sending an echo, and then 424 * once a minute thereafter. 425 */ 426 if ((etime_count & 3) == 2) { 427 (void) smb_iod_send_echo(vcp); 428 } 429 430 continue; 431 } /* ETIME && requests in queue */ 432 433 if (error == ETIME) { 434 /* 435 * If the IOD thread holds the last reference 436 * to this VC, let the IOD thread terminate. 437 */ 438 if (vcp->vc_co.co_usecount > 1) 439 continue; 440 SMB_VC_LOCK(vcp); 441 if (vcp->vc_co.co_usecount == 1) { 442 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 443 SMB_VC_UNLOCK(vcp); 444 error = 0; 445 break; 446 } 447 SMB_VC_UNLOCK(vcp); 448 continue; 449 } /* error == ETIME */ 450 451 if (error) { 452 /* 453 * The recv. above returned some error 454 * we can't continue from i.e. ENOTCONN. 455 * It's dangerous to continue here. 456 * (possible infinite loop!) 457 * 458 * If we have requests enqueued, next 459 * state is reconnecting, else idle. 460 */ 461 int state; 462 SMB_VC_LOCK(vcp); 463 state = (vcp->iod_rqlist.tqh_first != NULL) ? 464 SMBIOD_ST_RECONNECT : SMBIOD_ST_IDLE; 465 smb_iod_newstate(vcp, state); 466 cv_broadcast(&vcp->vc_statechg); 467 SMB_VC_UNLOCK(vcp); 468 error = 0; 469 break; 470 } 471 472 /* 473 * Received something. Yea! 474 */ 475 if (etime_count) { 476 etime_count = 0; 477 478 zprintf(vcp->vc_zoneid, "SMB server %s OK\n", 479 vcp->vc_srvname); 480 481 /* Was: smb_iod_notify_up(vcp); */ 482 if (fscb && fscb->fscb_up) 483 smb_vc_walkshares(vcp, fscb->fscb_up); 484 } 485 486 /* 487 * Have an SMB packet. The SMB header was 488 * checked in smb_iod_recv1(). 489 * Find the request... 490 */ 491 hp = mtod(m, uchar_t *); 492 /*LINTED*/ 493 mid = letohs(SMB_HDRMID(hp)); 494 SMBIODEBUG("mid %04x\n", (uint_t)mid); 495 496 rw_enter(&vcp->iod_rqlock, RW_READER); 497 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 498 499 if (rqp->sr_mid != mid) 500 continue; 501 502 DTRACE_PROBE2(smb_iod_recvrq, 503 (smb_rq_t *), rqp, (mblk_t *), m); 504 m_dumpm(m); 505 506 SMBRQ_LOCK(rqp); 507 if (rqp->sr_rp.md_top == NULL) { 508 md_initm(&rqp->sr_rp, m); 509 } else { 510 if (rqp->sr_flags & SMBR_MULTIPACKET) { 511 md_append_record(&rqp->sr_rp, m); 512 } else { 513 SMBRQ_UNLOCK(rqp); 514 SMBSDEBUG("duplicate response %d " 515 "(ignored)\n", mid); 516 break; 517 } 518 } 519 smb_iod_rqprocessed_LH(rqp, 0, 0); 520 SMBRQ_UNLOCK(rqp); 521 break; 522 } 523 524 if (rqp == NULL) { 525 int cmd = SMB_HDRCMD(hp); 526 527 if (cmd != SMB_COM_ECHO) 528 SMBSDEBUG("drop resp: mid %d, cmd %d\n", 529 (uint_t)mid, cmd); 530 /* smb_printrqlist(vcp); */ 531 m_freem(m); 532 } 533 rw_exit(&vcp->iod_rqlock); 534 535 } 536 537 return (error); 538 } 539 540 /* 541 * The IOD receiver thread has requests pending and 542 * has not received anything in a while. Try to 543 * send an SMB echo request. It's tricky to do a 544 * send from the IOD thread because we can't block. 545 * 546 * Using tmo=SMBNOREPLYWAIT in the request 547 * so smb_rq_reply will skip smb_iod_waitrq. 548 * The smb_smb_echo call uses SMBR_INTERNAL 549 * to avoid calling smb_iod_sendall(). 550 */ 551 int 552 smb_iod_send_echo(smb_vc_t *vcp) 553 { 554 smb_cred_t scred; 555 int err; 556 557 smb_credinit(&scred, NULL); 558 err = smb_smb_echo(vcp, &scred, SMBNOREPLYWAIT); 559 smb_credrele(&scred); 560 return (err); 561 } 562 563 /* 564 * The IOD thread is now just a "reader", 565 * so no more smb_iod_request(). Yea! 566 */ 567 568 /* 569 * Place request in the queue, and send it now if possible. 570 * Called with no locks held. 571 */ 572 int 573 smb_iod_addrq(struct smb_rq *rqp) 574 { 575 struct smb_vc *vcp = rqp->sr_vc; 576 int error, save_newrq; 577 578 ASSERT(rqp->sr_cred); 579 580 /* 581 * State should be correct after the check in 582 * smb_rq_enqueue(), but we dropped locks... 583 */ 584 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 585 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 586 return (ENOTCONN); 587 } 588 589 /* 590 * Requests from the IOD itself are marked _INTERNAL, 591 * and get some special treatment to avoid blocking 592 * the reader thread (so we don't deadlock). 593 * The request is not yet on the queue, so we can 594 * modify it's state here without locks. 595 * Only thing using this now is ECHO. 596 */ 597 rqp->sr_owner = curthread; 598 if (rqp->sr_owner == vcp->iod_thr) { 599 rqp->sr_flags |= SMBR_INTERNAL; 600 601 /* 602 * This is a request from the IOD thread. 603 * Always send directly from this thread. 604 * Note lock order: iod_rqlist, vc_sendlock 605 */ 606 rw_enter(&vcp->iod_rqlock, RW_WRITER); 607 TAILQ_INSERT_HEAD(&vcp->iod_rqlist, rqp, sr_link); 608 rw_downgrade(&vcp->iod_rqlock); 609 610 /* 611 * Note: iod_sendrq expects vc_sendlock, 612 * so take that here, but carefully: 613 * Never block the IOD thread here. 614 */ 615 if (sema_tryp(&vcp->vc_sendlock) == 0) { 616 SMBIODEBUG("sendlock busy\n"); 617 error = EAGAIN; 618 } else { 619 /* Have vc_sendlock */ 620 error = smb_iod_sendrq(rqp); 621 sema_v(&vcp->vc_sendlock); 622 } 623 624 rw_exit(&vcp->iod_rqlock); 625 626 /* 627 * In the non-error case, _removerq 628 * is done by either smb_rq_reply 629 * or smb_iod_waitrq. 630 */ 631 if (error) 632 smb_iod_removerq(rqp); 633 634 return (error); 635 } 636 637 rw_enter(&vcp->iod_rqlock, RW_WRITER); 638 639 TAILQ_INSERT_TAIL(&vcp->iod_rqlist, rqp, sr_link); 640 /* iod_rqlock/WRITER protects iod_newrq */ 641 save_newrq = vcp->iod_newrq; 642 vcp->iod_newrq++; 643 644 rw_exit(&vcp->iod_rqlock); 645 646 /* 647 * Now send any requests that need to be sent, 648 * including the one we just put on the list. 649 * Only the thread that found iod_newrq==0 650 * needs to run the send loop. 651 */ 652 if (save_newrq == 0) 653 smb_iod_sendall(vcp); 654 655 return (0); 656 } 657 658 /* 659 * Mark an SMBR_MULTIPACKET request as 660 * needing another send. Similar to the 661 * "normal" part of smb_iod_addrq. 662 */ 663 int 664 smb_iod_multirq(struct smb_rq *rqp) 665 { 666 struct smb_vc *vcp = rqp->sr_vc; 667 int save_newrq; 668 669 ASSERT(rqp->sr_flags & SMBR_MULTIPACKET); 670 671 if (rqp->sr_flags & SMBR_INTERNAL) 672 return (EINVAL); 673 674 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 675 SMBIODEBUG("bad vc_state=%d\n", vcp->vc_state); 676 return (ENOTCONN); 677 } 678 679 rw_enter(&vcp->iod_rqlock, RW_WRITER); 680 681 /* Already on iod_rqlist, just reset state. */ 682 rqp->sr_state = SMBRQ_NOTSENT; 683 684 /* iod_rqlock/WRITER protects iod_newrq */ 685 save_newrq = vcp->iod_newrq; 686 vcp->iod_newrq++; 687 688 rw_exit(&vcp->iod_rqlock); 689 690 /* 691 * Now send any requests that need to be sent, 692 * including the one we just marked NOTSENT. 693 * Only the thread that found iod_newrq==0 694 * needs to run the send loop. 695 */ 696 if (save_newrq == 0) 697 smb_iod_sendall(vcp); 698 699 return (0); 700 } 701 702 703 int 704 smb_iod_removerq(struct smb_rq *rqp) 705 { 706 struct smb_vc *vcp = rqp->sr_vc; 707 708 rw_enter(&vcp->iod_rqlock, RW_WRITER); 709 #ifdef QUEUEDEBUG 710 /* 711 * Make sure we have not already removed it. 712 * See sys/queue.h QUEUEDEBUG_TAILQ_POSTREMOVE 713 * XXX: Don't like the constant 1 here... 714 */ 715 ASSERT(rqp->sr_link.tqe_next != (void *)1L); 716 #endif 717 TAILQ_REMOVE(&vcp->iod_rqlist, rqp, sr_link); 718 rw_exit(&vcp->iod_rqlock); 719 720 return (0); 721 } 722 723 724 725 /* 726 * Wait for a request to complete. 727 * 728 * For normal requests, we need to deal with 729 * ioc_muxcnt dropping below vc_maxmux by 730 * making arrangements to send more... 731 */ 732 int 733 smb_iod_waitrq(struct smb_rq *rqp) 734 { 735 struct smb_vc *vcp = rqp->sr_vc; 736 clock_t tr, tmo1, tmo2; 737 int error, rc; 738 739 if (rqp->sr_flags & SMBR_INTERNAL) { 740 ASSERT((rqp->sr_flags & SMBR_MULTIPACKET) == 0); 741 smb_iod_removerq(rqp); 742 return (EAGAIN); 743 } 744 745 /* 746 * Make sure this is NOT the IOD thread, 747 * or the wait below will stop the reader. 748 */ 749 ASSERT(curthread != vcp->iod_thr); 750 751 SMBRQ_LOCK(rqp); 752 753 /* 754 * First, wait for the request to be sent. Normally the send 755 * has already happened by the time we get here. However, if 756 * we have more than maxmux entries in the request list, our 757 * request may not be sent until other requests complete. 758 * The wait in this case is due to local I/O demands, so 759 * we don't want the server response timeout to apply. 760 * 761 * If a request is allowed to interrupt this wait, then the 762 * request is cancelled and never sent OTW. Some kinds of 763 * requests should never be cancelled (i.e. close) and those 764 * are marked SMBR_NOINTR_SEND so they either go eventually, 765 * or a connection close will terminate them with ENOTCONN. 766 */ 767 while (rqp->sr_state == SMBRQ_NOTSENT) { 768 rqp->sr_flags |= SMBR_SENDWAIT; 769 if (rqp->sr_flags & SMBR_NOINTR_SEND) { 770 cv_wait(&rqp->sr_cond, &rqp->sr_lock); 771 rc = 1; 772 } else 773 rc = cv_wait_sig(&rqp->sr_cond, &rqp->sr_lock); 774 rqp->sr_flags &= ~SMBR_SENDWAIT; 775 if (rc == 0) { 776 SMBIODEBUG("EINTR in sendwait, rqp=%p\n", rqp); 777 error = EINTR; 778 goto out; 779 } 780 } 781 782 /* 783 * The request has been sent. Now wait for the response, 784 * with the timeout specified for this request. 785 * Compute all the deadlines now, so we effectively 786 * start the timer(s) after the request is sent. 787 */ 788 if (smb_timo_notice && (smb_timo_notice < rqp->sr_timo)) 789 tmo1 = SEC_TO_TICK(smb_timo_notice); 790 else 791 tmo1 = 0; 792 793 tmo2 = ddi_get_lbolt() + SEC_TO_TICK(rqp->sr_timo); 794 795 /* 796 * As above, we don't want to allow interrupt for some 797 * requests like open, because we could miss a succesful 798 * response and therefore "leak" a FID. Such requests 799 * are marked SMBR_NOINTR_RECV to prevent that. 800 * 801 * If "slow server" warnings are enabled, wait first 802 * for the "notice" timeout, and warn if expired. 803 */ 804 if (tmo1 && rqp->sr_rpgen == rqp->sr_rplast) { 805 if (rqp->sr_flags & SMBR_NOINTR_RECV) 806 tr = cv_reltimedwait(&rqp->sr_cond, 807 &rqp->sr_lock, tmo1, TR_CLOCK_TICK); 808 else 809 tr = cv_reltimedwait_sig(&rqp->sr_cond, 810 &rqp->sr_lock, tmo1, TR_CLOCK_TICK); 811 if (tr == 0) { 812 error = EINTR; 813 goto out; 814 } 815 if (tr < 0) { 816 #ifdef DTRACE_PROBE 817 DTRACE_PROBE1(smb_iod_waitrq1, 818 (smb_rq_t *), rqp); 819 #endif 820 #ifdef NOT_YET 821 /* Want this to go ONLY to the user. */ 822 uprintf("SMB server %s has not responded" 823 " to request %d after %d seconds..." 824 " (still waiting).\n", vcp->vc_srvname, 825 rqp->sr_mid, smb_timo_notice); 826 #endif 827 } 828 } 829 830 /* 831 * Keep waiting until tmo2 is expired. 832 */ 833 while (rqp->sr_rpgen == rqp->sr_rplast) { 834 if (rqp->sr_flags & SMBR_NOINTR_RECV) 835 tr = cv_timedwait(&rqp->sr_cond, 836 &rqp->sr_lock, tmo2); 837 else 838 tr = cv_timedwait_sig(&rqp->sr_cond, 839 &rqp->sr_lock, tmo2); 840 if (tr == 0) { 841 error = EINTR; 842 goto out; 843 } 844 if (tr < 0) { 845 #ifdef DTRACE_PROBE 846 DTRACE_PROBE1(smb_iod_waitrq2, 847 (smb_rq_t *), rqp); 848 #endif 849 #ifdef NOT_YET 850 /* Want this to go ONLY to the user. */ 851 uprintf("SMB server %s has not responded" 852 " to request %d after %d seconds..." 853 " (giving up).\n", vcp->vc_srvname, 854 rqp->sr_mid, rqp->sr_timo); 855 #endif 856 error = ETIME; 857 goto out; 858 } 859 /* got wakeup */ 860 } 861 error = rqp->sr_lerror; 862 rqp->sr_rplast++; 863 864 out: 865 SMBRQ_UNLOCK(rqp); 866 867 /* 868 * MULTIPACKET request must stay in the list. 869 * They may need additional responses. 870 */ 871 if ((rqp->sr_flags & SMBR_MULTIPACKET) == 0) 872 smb_iod_removerq(rqp); 873 874 /* 875 * Some request has been completed. 876 * If we reached the mux limit, 877 * re-run the send loop... 878 */ 879 if (vcp->iod_muxfull) 880 smb_iod_sendall(vcp); 881 882 return (error); 883 } 884 885 /* 886 * Shutdown all outstanding I/O requests on the specified share with 887 * ENXIO; used when unmounting a share. (There shouldn't be any for a 888 * non-forced unmount; if this is a forced unmount, we have to shutdown 889 * the requests as part of the unmount process.) 890 */ 891 void 892 smb_iod_shutdown_share(struct smb_share *ssp) 893 { 894 struct smb_vc *vcp = SSTOVC(ssp); 895 struct smb_rq *rqp; 896 897 /* 898 * Loop through the list of requests and shutdown the ones 899 * that are for the specified share. 900 */ 901 rw_enter(&vcp->iod_rqlock, RW_READER); 902 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 903 if (rqp->sr_state != SMBRQ_NOTIFIED && rqp->sr_share == ssp) 904 smb_iod_rqprocessed(rqp, EIO, 0); 905 } 906 rw_exit(&vcp->iod_rqlock); 907 } 908 909 /* 910 * Send all requests that need sending. 911 * Called from _addrq, _multirq, _waitrq 912 */ 913 void 914 smb_iod_sendall(smb_vc_t *vcp) 915 { 916 struct smb_rq *rqp; 917 int error, save_newrq, muxcnt; 918 919 /* 920 * Clear "newrq" to make sure threads adding 921 * new requests will run this function again. 922 */ 923 rw_enter(&vcp->iod_rqlock, RW_WRITER); 924 save_newrq = vcp->iod_newrq; 925 vcp->iod_newrq = 0; 926 927 /* 928 * We only read iod_rqlist, so downgrade rwlock. 929 * This allows the IOD to handle responses while 930 * some requesting thread may be blocked in send. 931 */ 932 rw_downgrade(&vcp->iod_rqlock); 933 934 /* Expect to find about this many requests. */ 935 SMBIODEBUG("top, save_newrq=%d\n", save_newrq); 936 937 /* 938 * Serialize to prevent multiple senders. 939 * Note lock order: iod_rqlock, vc_sendlock 940 */ 941 sema_p(&vcp->vc_sendlock); 942 943 /* 944 * Walk the list of requests and send when possible. 945 * We avoid having more than vc_maxmux requests 946 * outstanding to the server by traversing only 947 * vc_maxmux entries into this list. Simple! 948 */ 949 ASSERT(vcp->vc_maxmux > 0); 950 error = muxcnt = 0; 951 TAILQ_FOREACH(rqp, &vcp->iod_rqlist, sr_link) { 952 953 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 954 error = ENOTCONN; /* stop everything! */ 955 break; 956 } 957 958 if (rqp->sr_state == SMBRQ_NOTSENT) { 959 error = smb_iod_sendrq(rqp); 960 if (error) 961 break; 962 } 963 964 if (++muxcnt == vcp->vc_maxmux) { 965 SMBIODEBUG("muxcnt == vc_maxmux\n"); 966 break; 967 } 968 969 } 970 971 /* 972 * If we have vc_maxmux requests outstanding, 973 * arrange for _waitrq to call _sendall as 974 * requests are completed. 975 */ 976 vcp->iod_muxfull = 977 (muxcnt < vcp->vc_maxmux) ? 0 : 1; 978 979 sema_v(&vcp->vc_sendlock); 980 rw_exit(&vcp->iod_rqlock); 981 } 982 983 int 984 smb_iod_vc_work(struct smb_vc *vcp, cred_t *cr) 985 { 986 struct file *fp = NULL; 987 int err = 0; 988 989 /* 990 * This is called by the one-and-only 991 * IOD thread for this VC. 992 */ 993 ASSERT(vcp->iod_thr == curthread); 994 995 /* 996 * Get the network transport file pointer, 997 * and "loan" it to our transport module. 998 */ 999 if ((fp = getf(vcp->vc_tran_fd)) == NULL) { 1000 err = EBADF; 1001 goto out; 1002 } 1003 if ((err = SMB_TRAN_LOAN_FP(vcp, fp, cr)) != 0) 1004 goto out; 1005 1006 /* 1007 * In case of reconnect, tell any enqueued requests 1008 * then can GO! 1009 */ 1010 SMB_VC_LOCK(vcp); 1011 vcp->vc_genid++; /* possibly new connection */ 1012 smb_iod_newstate(vcp, SMBIOD_ST_VCACTIVE); 1013 cv_broadcast(&vcp->vc_statechg); 1014 SMB_VC_UNLOCK(vcp); 1015 1016 /* 1017 * The above cv_broadcast should be sufficient to 1018 * get requests going again. 1019 * 1020 * If we have a callback function, run it. 1021 * Was: smb_iod_notify_connected() 1022 */ 1023 if (fscb && fscb->fscb_connect) 1024 smb_vc_walkshares(vcp, fscb->fscb_connect); 1025 1026 /* 1027 * Run the "reader" loop. 1028 */ 1029 err = smb_iod_recvall(vcp); 1030 1031 /* 1032 * The reader loop returned, so we must have a 1033 * new state. (disconnected or reconnecting) 1034 * 1035 * Notify shares of the disconnect. 1036 * Was: smb_iod_notify_disconnect() 1037 */ 1038 smb_vc_walkshares(vcp, smb_iod_share_disconnected); 1039 1040 /* 1041 * The reader loop function returns only when 1042 * there's been an error on the connection, or 1043 * this VC has no more references. It also 1044 * updates the state before it returns. 1045 * 1046 * Tell any requests to give up or restart. 1047 */ 1048 smb_iod_invrq(vcp); 1049 1050 out: 1051 /* Recall the file descriptor loan. */ 1052 (void) SMB_TRAN_LOAN_FP(vcp, NULL, cr); 1053 if (fp != NULL) { 1054 releasef(vcp->vc_tran_fd); 1055 } 1056 1057 return (err); 1058 } 1059 1060 /* 1061 * Wait around for someone to ask to use this VC. 1062 * If the VC has only the IOD reference, then 1063 * wait only a minute or so, then drop it. 1064 */ 1065 int 1066 smb_iod_vc_idle(struct smb_vc *vcp) 1067 { 1068 clock_t tr, delta = SEC_TO_TICK(15); 1069 int err = 0; 1070 1071 /* 1072 * This is called by the one-and-only 1073 * IOD thread for this VC. 1074 */ 1075 ASSERT(vcp->iod_thr == curthread); 1076 1077 SMB_VC_LOCK(vcp); 1078 while (vcp->vc_state == SMBIOD_ST_IDLE) { 1079 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock, 1080 delta, TR_CLOCK_TICK); 1081 if (tr == 0) { 1082 err = EINTR; 1083 break; 1084 } 1085 if (tr < 0) { 1086 /* timeout */ 1087 if (vcp->vc_co.co_usecount == 1) { 1088 /* Let this IOD terminate. */ 1089 smb_iod_newstate(vcp, SMBIOD_ST_DEAD); 1090 /* nobody to cv_broadcast */ 1091 break; 1092 } 1093 } 1094 } 1095 SMB_VC_UNLOCK(vcp); 1096 1097 return (err); 1098 } 1099 1100 /* 1101 * After a failed reconnect attempt, smbiod will 1102 * call this to make current requests error out. 1103 */ 1104 int 1105 smb_iod_vc_rcfail(struct smb_vc *vcp) 1106 { 1107 clock_t tr; 1108 int err = 0; 1109 1110 /* 1111 * This is called by the one-and-only 1112 * IOD thread for this VC. 1113 */ 1114 ASSERT(vcp->iod_thr == curthread); 1115 1116 if (vcp->vc_state != SMBIOD_ST_RECONNECT) 1117 return (EINVAL); 1118 1119 SMB_VC_LOCK(vcp); 1120 1121 smb_iod_newstate(vcp, SMBIOD_ST_RCFAILED); 1122 cv_broadcast(&vcp->vc_statechg); 1123 1124 /* 1125 * Short wait here for two reasons: 1126 * (1) Give requests a chance to error out. 1127 * (2) Prevent immediate retry. 1128 */ 1129 tr = cv_reltimedwait_sig(&vcp->iod_idle, &vcp->vc_lock, 1130 SEC_TO_TICK(5), TR_CLOCK_TICK); 1131 if (tr == 0) 1132 err = EINTR; 1133 1134 smb_iod_newstate(vcp, SMBIOD_ST_IDLE); 1135 cv_broadcast(&vcp->vc_statechg); 1136 1137 SMB_VC_UNLOCK(vcp); 1138 1139 return (err); 1140 } 1141 1142 /* 1143 * Ask the IOD to reconnect (if not already underway) 1144 * then wait for the reconnect to finish. 1145 */ 1146 int 1147 smb_iod_reconnect(struct smb_vc *vcp) 1148 { 1149 int err = 0, rv; 1150 1151 SMB_VC_LOCK(vcp); 1152 again: 1153 switch (vcp->vc_state) { 1154 1155 case SMBIOD_ST_IDLE: 1156 smb_iod_newstate(vcp, SMBIOD_ST_RECONNECT); 1157 cv_signal(&vcp->iod_idle); 1158 /* FALLTHROUGH */ 1159 1160 case SMBIOD_ST_RECONNECT: 1161 rv = cv_wait_sig(&vcp->vc_statechg, &vcp->vc_lock); 1162 if (rv == 0) { 1163 err = EINTR; 1164 break; 1165 } 1166 goto again; 1167 1168 case SMBIOD_ST_VCACTIVE: 1169 err = 0; /* success! */ 1170 break; 1171 1172 case SMBIOD_ST_RCFAILED: 1173 case SMBIOD_ST_DEAD: 1174 default: 1175 err = ENOTCONN; 1176 break; 1177 } 1178 1179 SMB_VC_UNLOCK(vcp); 1180 return (err); 1181 } 1182