1 /* 2 * Copyright (c) 2000-2001, Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $ 33 */ 34 /* 35 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 36 * Use is subject to license terms. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kmem.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/socket.h> 47 #include <sys/mount.h> 48 #include <sys/cmn_err.h> 49 #include <sys/sdt.h> 50 51 #ifdef APPLE 52 #include <sys/smb_apple.h> 53 #else 54 #include <netsmb/smb_osdep.h> 55 #endif 56 57 #include <netsmb/smb.h> 58 #include <netsmb/smb_conn.h> 59 #include <netsmb/smb_subr.h> 60 #include <netsmb/smb_tran.h> 61 #include <netsmb/smb_rq.h> 62 63 static int smb_rq_reply(struct smb_rq *rqp); 64 static int smb_rq_enqueue(struct smb_rq *rqp); 65 static int smb_rq_getenv(struct smb_connobj *layer, 66 struct smb_vc **vcpp, struct smb_share **sspp); 67 static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd); 68 static int smb_t2_reply(struct smb_t2rq *t2p); 69 static int smb_nt_reply(struct smb_ntrq *ntp); 70 71 72 73 int 74 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred, 75 struct smb_rq **rqpp) 76 { 77 struct smb_rq *rqp; 78 int error; 79 80 rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP); 81 if (rqp == NULL) 82 return (ENOMEM); 83 error = smb_rq_init(rqp, layer, cmd, scred); 84 if (error) { 85 smb_rq_done(rqp); 86 return (error); 87 } 88 rqp->sr_flags |= SMBR_ALLOCED; 89 *rqpp = rqp; 90 return (0); 91 } 92 93 94 int 95 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, uchar_t cmd, 96 struct smb_cred *scred) 97 { 98 int error; 99 100 bzero(rqp, sizeof (*rqp)); 101 mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL); 102 cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL); 103 104 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share); 105 if (error) 106 return (error); 107 108 rqp->sr_rexmit = SMBMAXRESTARTS; 109 rqp->sr_cred = scred; /* XXX no ref hold */ 110 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc); 111 error = smb_rq_new(rqp, cmd); 112 if (!error) { 113 rqp->sr_flags |= SMBR_VCREF; 114 smb_vc_hold(rqp->sr_vc); 115 } 116 return (error); 117 } 118 119 static int 120 smb_rq_new(struct smb_rq *rqp, uchar_t cmd) 121 { 122 struct smb_vc *vcp = rqp->sr_vc; 123 struct mbchain *mbp = &rqp->sr_rq; 124 int error; 125 static char tzero[12]; 126 caddr_t ptr; 127 pid_t pid; 128 129 ASSERT(rqp != NULL); 130 ASSERT(rqp->sr_cred != NULL); 131 pid = rqp->sr_cred->vc_pid; 132 rqp->sr_sendcnt = 0; 133 rqp->sr_cmd = cmd; 134 mb_done(mbp); 135 md_done(&rqp->sr_rp); 136 error = mb_init(mbp); 137 if (error) 138 return (error); 139 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM); 140 mb_put_uint8(mbp, cmd); 141 mb_put_uint32le(mbp, 0); 142 mb_put_uint8(mbp, vcp->vc_hflags); 143 if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY) 144 mb_put_uint16le(mbp, (vcp->vc_hflags2 & ~SMB_FLAGS2_UNICODE)); 145 else 146 mb_put_uint16le(mbp, vcp->vc_hflags2); 147 mb_put_mem(mbp, tzero, 12, MB_MSYSTEM); 148 ptr = mb_reserve(mbp, sizeof (u_int16_t)); 149 /*LINTED*/ 150 ASSERT(ptr == (caddr_t)((u_int16_t *)ptr)); 151 /*LINTED*/ 152 rqp->sr_rqtid = (u_int16_t *)ptr; 153 mb_put_uint16le(mbp, (u_int16_t)(pid)); 154 ptr = mb_reserve(mbp, sizeof (u_int16_t)); 155 /*LINTED*/ 156 ASSERT(ptr == (caddr_t)((u_int16_t *)ptr)); 157 /*LINTED*/ 158 rqp->sr_rquid = (u_int16_t *)ptr; 159 mb_put_uint16le(mbp, rqp->sr_mid); 160 return (0); 161 } 162 163 void 164 smb_rq_done(struct smb_rq *rqp) 165 { 166 /* No locks. Last ref. here. */ 167 if (rqp->sr_flags & SMBR_VCREF) { 168 rqp->sr_flags &= ~SMBR_VCREF; 169 smb_vc_rele(rqp->sr_vc); 170 } 171 mb_done(&rqp->sr_rq); 172 md_done(&rqp->sr_rp); 173 mutex_destroy(&rqp->sr_lock); 174 cv_destroy(&rqp->sr_cond); 175 if (rqp->sr_flags & SMBR_ALLOCED) 176 kmem_free(rqp, sizeof (*rqp)); 177 } 178 179 /* 180 * Simple request-reply exchange 181 */ 182 int 183 smb_rq_simple_timed(struct smb_rq *rqp, int timeout) 184 { 185 int error = EINVAL; 186 187 for (; ; ) { 188 /* 189 * Don't send any new requests if force unmount is underway. 190 * This check was moved into smb_rq_enqueue. 191 */ 192 rqp->sr_flags &= ~SMBR_RESTART; 193 rqp->sr_timo = timeout; /* in seconds */ 194 rqp->sr_state = SMBRQ_NOTSENT; 195 error = smb_rq_enqueue(rqp); 196 if (error) { 197 break; 198 } 199 error = smb_rq_reply(rqp); 200 if (!error) 201 break; 202 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != 203 SMBR_RESTART) 204 break; 205 if (rqp->sr_rexmit <= 0) 206 break; 207 SMBRQ_LOCK(rqp); 208 if (rqp->sr_share && rqp->sr_share->ss_mount) { 209 cv_timedwait(&rqp->sr_cond, &(rqp)->sr_lock, 210 lbolt + (hz * SMB_RCNDELAY)); 211 212 } else { 213 delay(lbolt + (hz * SMB_RCNDELAY)); 214 } 215 SMBRQ_UNLOCK(rqp); 216 rqp->sr_rexmit--; 217 #ifdef XXX 218 timeout *= 2; 219 #endif 220 } 221 return (error); 222 } 223 224 225 int 226 smb_rq_simple(struct smb_rq *rqp) 227 { 228 return (smb_rq_simple_timed(rqp, smb_timo_default)); 229 } 230 231 static int 232 smb_rq_enqueue(struct smb_rq *rqp) 233 { 234 struct smb_vc *vcp = rqp->sr_vc; 235 struct smb_share *ssp = rqp->sr_share; 236 int error = 0; 237 238 /* 239 * Unfortunate special case needed for 240 * tree disconnect, which needs sr_share 241 * but should skip the reconnect check. 242 */ 243 if (rqp->sr_cmd == SMB_COM_TREE_DISCONNECT) 244 ssp = NULL; 245 246 /* 247 * If this is an "internal" request, bypass any 248 * wait for connection state changes, etc. 249 * This request is making those changes. 250 */ 251 if (rqp->sr_flags & SMBR_INTERNAL) { 252 ASSERT(ssp == NULL); 253 goto just_doit; 254 } 255 256 /* 257 * Wait for VC reconnect to finish... 258 * XXX: Deal with reconnect later. 259 * Just bail out for now. 260 * 261 * MacOS might check vfs_isforce() here. 262 */ 263 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 264 SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state); 265 return (ENOTCONN); 266 } 267 268 /* 269 * If this request has a "share" object: 270 * 1: Deny access if share is _GONE (unmounted) 271 * 2: Wait for state changes in that object, 272 * Initiate share (re)connect if needed. 273 * XXX: Not really doing 2 yet. 274 */ 275 if (ssp) { 276 if (ssp->ss_flags & SMBS_GONE) 277 return (ENOTCONN); 278 SMB_SS_LOCK(ssp); 279 if (!smb_share_valid(ssp)) { 280 error = smb_share_tcon(ssp); 281 } 282 SMB_SS_UNLOCK(ssp); 283 } 284 285 if (!error) { 286 just_doit: 287 error = smb_iod_addrq(rqp); 288 } 289 290 return (error); 291 } 292 293 /* 294 * Mark location of the word count, which is filled in later by 295 * smb_rw_wend(). Also initialize the counter that it uses 296 * to figure out what value to fill in. 297 * 298 * Note that the word count happens to be 8-bit. 299 */ 300 void 301 smb_rq_wstart(struct smb_rq *rqp) 302 { 303 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t)); 304 rqp->sr_rq.mb_count = 0; 305 } 306 307 void 308 smb_rq_wend(struct smb_rq *rqp) 309 { 310 uint_t wcnt; 311 312 if (rqp->sr_wcount == NULL) { 313 SMBSDEBUG("no wcount\n"); 314 return; 315 } 316 wcnt = rqp->sr_rq.mb_count; 317 if (wcnt > 0x1ff) 318 SMBSDEBUG("word count too large (%d)\n", wcnt); 319 if (wcnt & 1) 320 SMBSDEBUG("odd word count\n"); 321 /* Fill in the word count (8-bits) */ 322 *rqp->sr_wcount = (wcnt >> 1); 323 } 324 325 /* 326 * Mark location of the byte count, which is filled in later by 327 * smb_rw_bend(). Also initialize the counter that it uses 328 * to figure out what value to fill in. 329 * 330 * Note that the byte count happens to be 16-bit. 331 */ 332 void 333 smb_rq_bstart(struct smb_rq *rqp) 334 { 335 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t)); 336 rqp->sr_rq.mb_count = 0; 337 } 338 339 void 340 smb_rq_bend(struct smb_rq *rqp) 341 { 342 uint_t bcnt; 343 344 if (rqp->sr_bcount == NULL) { 345 SMBSDEBUG("no bcount\n"); 346 return; 347 } 348 bcnt = rqp->sr_rq.mb_count; 349 if (bcnt > 0xffff) 350 SMBSDEBUG("byte count too large (%d)\n", bcnt); 351 /* 352 * Fill in the byte count (16-bits) 353 * The pointer is char * type due to 354 * typical off-by-one alignment. 355 */ 356 rqp->sr_bcount[0] = bcnt & 0xFF; 357 rqp->sr_bcount[1] = (bcnt >> 8); 358 } 359 360 int 361 smb_rq_intr(struct smb_rq *rqp) 362 { 363 if (rqp->sr_flags & SMBR_INTR) 364 return (EINTR); 365 366 return (0); 367 #ifdef APPLE 368 return (smb_sigintr(rqp->sr_cred->scr_vfsctx)); 369 #endif 370 } 371 372 int 373 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp) 374 { 375 *mbpp = &rqp->sr_rq; 376 return (0); 377 } 378 379 int 380 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp) 381 { 382 *mbpp = &rqp->sr_rp; 383 return (0); 384 } 385 386 static int 387 smb_rq_getenv(struct smb_connobj *co, 388 struct smb_vc **vcpp, struct smb_share **sspp) 389 { 390 struct smb_vc *vcp = NULL; 391 struct smb_share *ssp = NULL; 392 int error = 0; 393 394 if (co->co_flags & SMBO_GONE) { 395 SMBSDEBUG("zombie CO\n"); 396 error = EINVAL; 397 goto out; 398 } 399 400 switch (co->co_level) { 401 case SMBL_VC: 402 vcp = CPTOVC(co); 403 if (co->co_parent == NULL) { 404 SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname); 405 error = EINVAL; 406 break; 407 } 408 break; 409 410 case SMBL_SHARE: 411 ssp = CPTOSS(co); 412 if (co->co_parent == NULL) { 413 SMBSDEBUG("zombie share %s\n", ssp->ss_name); 414 error = EINVAL; 415 break; 416 } 417 error = smb_rq_getenv(co->co_parent, &vcp, NULL); 418 break; 419 default: 420 SMBSDEBUG("invalid level %d passed\n", co->co_level); 421 error = EINVAL; 422 } 423 424 out: 425 if (!error) { 426 if (vcpp) 427 *vcpp = vcp; 428 if (sspp) 429 *sspp = ssp; 430 } 431 432 return (error); 433 } 434 435 /* 436 * Wait for reply on the request 437 */ 438 static int 439 smb_rq_reply(struct smb_rq *rqp) 440 { 441 struct mdchain *mdp = &rqp->sr_rp; 442 u_int32_t tdw; 443 u_int8_t tb; 444 int error, rperror = 0; 445 446 if (rqp->sr_timo == SMBNOREPLYWAIT) 447 return (smb_iod_removerq(rqp)); 448 449 error = smb_iod_waitrq(rqp); 450 if (error) 451 return (error); 452 error = md_get_uint32(mdp, &tdw); 453 if (error) 454 return (error); 455 error = md_get_uint8(mdp, &tb); 456 error = md_get_uint32le(mdp, &rqp->sr_error); 457 error = md_get_uint8(mdp, &rqp->sr_rpflags); 458 error = md_get_uint16le(mdp, &rqp->sr_rpflags2); 459 if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) { 460 /* 461 * Do a special check for STATUS_BUFFER_OVERFLOW; 462 * it's not an error. 463 */ 464 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) { 465 /* 466 * Don't report it as an error to our caller; 467 * they can look at rqp->sr_error if they 468 * need to know whether we got a 469 * STATUS_BUFFER_OVERFLOW. 470 * XXX - should we do that for all errors 471 * where (error & 0xC0000000) is 0x80000000, 472 * i.e. all warnings? 473 */ 474 rperror = 0; 475 } else 476 rperror = smb_maperr32(rqp->sr_error); 477 } else { 478 rqp->sr_errclass = rqp->sr_error & 0xff; 479 rqp->sr_serror = rqp->sr_error >> 16; 480 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror); 481 } 482 if (rperror == EMOREDATA) { 483 rperror = E2BIG; 484 rqp->sr_flags |= SMBR_MOREDATA; 485 } else 486 rqp->sr_flags &= ~SMBR_MOREDATA; 487 488 error = md_get_uint32(mdp, &tdw); 489 error = md_get_uint32(mdp, &tdw); 490 error = md_get_uint32(mdp, &tdw); 491 492 error = md_get_uint16le(mdp, &rqp->sr_rptid); 493 error = md_get_uint16le(mdp, &rqp->sr_rppid); 494 error = md_get_uint16le(mdp, &rqp->sr_rpuid); 495 error = md_get_uint16le(mdp, &rqp->sr_rpmid); 496 497 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n", 498 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid, 499 rqp->sr_errclass, rqp->sr_serror); 500 501 return ((error) ? error : rperror); 502 } 503 504 505 #define ALIGN4(a) (((a) + 3) & ~3) 506 507 /* 508 * TRANS2 request implementation 509 * TRANS implementation is in the "t2" routines 510 * NT_TRANSACTION implementation is the separate "nt" stuff 511 */ 512 int 513 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred, 514 struct smb_t2rq **t2pp) 515 { 516 struct smb_t2rq *t2p; 517 int error; 518 519 t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP); 520 if (t2p == NULL) 521 return (ENOMEM); 522 error = smb_t2_init(t2p, layer, &setup, 1, scred); 523 mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL); 524 cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL); 525 t2p->t2_flags |= SMBT2_ALLOCED; 526 if (error) { 527 smb_t2_done(t2p); 528 return (error); 529 } 530 *t2pp = t2p; 531 return (0); 532 } 533 534 int 535 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred, 536 struct smb_ntrq **ntpp) 537 { 538 struct smb_ntrq *ntp; 539 int error; 540 541 ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP); 542 if (ntp == NULL) 543 return (ENOMEM); 544 error = smb_nt_init(ntp, layer, fn, scred); 545 mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL); 546 cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL); 547 ntp->nt_flags |= SMBT2_ALLOCED; 548 if (error) { 549 smb_nt_done(ntp); 550 return (error); 551 } 552 *ntpp = ntp; 553 return (0); 554 } 555 556 int 557 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup, 558 int setupcnt, struct smb_cred *scred) 559 { 560 int i; 561 int error; 562 563 bzero(t2p, sizeof (*t2p)); 564 t2p->t2_source = source; 565 t2p->t2_setupcount = (u_int16_t)setupcnt; 566 t2p->t2_setupdata = t2p->t2_setup; 567 for (i = 0; i < setupcnt; i++) 568 t2p->t2_setup[i] = setup[i]; 569 t2p->t2_fid = 0xffff; 570 t2p->t2_cred = scred; 571 t2p->t2_share = (source->co_level == SMBL_SHARE ? 572 CPTOSS(source) : NULL); /* for smb up/down */ 573 error = smb_rq_getenv(source, &t2p->t2_vc, NULL); 574 if (error) 575 return (error); 576 return (0); 577 } 578 579 int 580 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn, 581 struct smb_cred *scred) 582 { 583 int error; 584 585 bzero(ntp, sizeof (*ntp)); 586 ntp->nt_source = source; 587 ntp->nt_function = fn; 588 ntp->nt_cred = scred; 589 ntp->nt_share = (source->co_level == SMBL_SHARE ? 590 CPTOSS(source) : NULL); /* for smb up/down */ 591 error = smb_rq_getenv(source, &ntp->nt_vc, NULL); 592 if (error) 593 return (error); 594 return (0); 595 } 596 597 void 598 smb_t2_done(struct smb_t2rq *t2p) 599 { 600 mb_done(&t2p->t2_tparam); 601 mb_done(&t2p->t2_tdata); 602 md_done(&t2p->t2_rparam); 603 md_done(&t2p->t2_rdata); 604 mutex_destroy(&t2p->t2_lock); 605 cv_destroy(&t2p->t2_cond); 606 #ifdef NOTYETRESOLVED 607 if (t2p->t2_flags & SMBT2_ALLOCED) 608 kmem_free(t2p, sizeof (*t2p)); 609 #endif 610 if (t2p) { 611 kmem_free(t2p, sizeof (*t2p)); 612 } 613 } 614 615 u_int32_t 616 smb_t2_err(struct smb_t2rq *t2p) 617 { 618 /* mask off "severity" and the "component" bit */ 619 return (t2p->t2_sr_error & ~(0xe0000000)); 620 } 621 622 void 623 smb_nt_done(struct smb_ntrq *ntp) 624 { 625 mb_done(&ntp->nt_tsetup); 626 mb_done(&ntp->nt_tparam); 627 mb_done(&ntp->nt_tdata); 628 md_done(&ntp->nt_rparam); 629 md_done(&ntp->nt_rdata); 630 cv_destroy(&ntp->nt_cond); 631 mutex_destroy(&ntp->nt_lock); 632 if (ntp->nt_flags & SMBT2_ALLOCED) 633 kmem_free(ntp, sizeof (*ntp)); 634 } 635 636 /* 637 * Extract data [offset,count] from mtop and add to mdp. 638 */ 639 static int 640 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count, 641 struct mdchain *mdp) 642 { 643 mblk_t *n; 644 645 n = m_copym(mtop, offset, count, M_WAITOK); 646 if (n == NULL) 647 return (EBADRPC); 648 649 if (mdp->md_top == NULL) { 650 md_initm(mdp, n); 651 } else 652 m_cat(mdp->md_top, n); 653 654 return (0); 655 } 656 657 static int 658 smb_t2_reply(struct smb_t2rq *t2p) 659 { 660 struct mdchain *mdp; 661 struct smb_rq *rqp = t2p->t2_rq; 662 int error, error2, totpgot, totdgot; 663 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; 664 u_int16_t tmp, bc, dcount; 665 u_int8_t wc; 666 667 t2p->t2_flags &= ~SMBT2_MOREDATA; 668 669 error = smb_rq_reply(rqp); 670 if (rqp->sr_flags & SMBR_MOREDATA) 671 t2p->t2_flags |= SMBT2_MOREDATA; 672 t2p->t2_sr_errclass = rqp->sr_errclass; 673 t2p->t2_sr_serror = rqp->sr_serror; 674 t2p->t2_sr_error = rqp->sr_error; 675 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2; 676 if (error && !(rqp->sr_flags & SMBR_MOREDATA)) 677 return (error); 678 /* 679 * Now we have to get all subseqent responses, if any. 680 * The CIFS specification says that they can be misordered, 681 * which is weird. 682 * TODO: timo 683 */ 684 totpgot = totdgot = 0; 685 totpcount = totdcount = 0xffff; 686 mdp = &rqp->sr_rp; 687 for (;;) { 688 DTRACE_PROBE2(smb_trans_reply, 689 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top); 690 m_dumpm(mdp->md_top); 691 692 if ((error2 = md_get_uint8(mdp, &wc)) != 0) 693 break; 694 if (wc < 10) { 695 error2 = ENOENT; 696 break; 697 } 698 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0) 699 break; 700 if (totpcount > tmp) 701 totpcount = tmp; 702 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0) 703 break; 704 if (totdcount > tmp) 705 totdcount = tmp; 706 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */ 707 (error2 = md_get_uint16le(mdp, &pcount)) != 0 || 708 (error2 = md_get_uint16le(mdp, &poff)) != 0 || 709 (error2 = md_get_uint16le(mdp, &pdisp)) != 0) 710 break; 711 if (pcount != 0 && pdisp != totpgot) { 712 SMBSDEBUG("Can't handle misordered parameters %d:%d\n", 713 pdisp, totpgot); 714 error2 = EINVAL; 715 break; 716 } 717 if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 || 718 (error2 = md_get_uint16le(mdp, &doff)) != 0 || 719 (error2 = md_get_uint16le(mdp, &ddisp)) != 0) 720 break; 721 if (dcount != 0 && ddisp != totdgot) { 722 SMBSDEBUG("Can't handle misordered data: dcount %d\n", 723 dcount); 724 error2 = EINVAL; 725 break; 726 } 727 728 /* XXX: Skip setup words? We don't save them? */ 729 md_get_uint8(mdp, &wc); /* SetupCount */ 730 md_get_uint8(mdp, NULL); /* Reserved2 */ 731 tmp = wc; 732 while (tmp--) 733 md_get_uint16(mdp, NULL); 734 735 if ((error2 = md_get_uint16le(mdp, &bc)) != 0) 736 break; 737 738 /* 739 * There are pad bytes here, and the poff value 740 * indicates where the next data are found. 741 * No need to guess at the padding size. 742 */ 743 if (pcount) { 744 error2 = smb_t2_placedata(mdp->md_top, poff, 745 pcount, &t2p->t2_rparam); 746 if (error2) 747 break; 748 } 749 totpgot += pcount; 750 751 if (dcount) { 752 error2 = smb_t2_placedata(mdp->md_top, doff, 753 dcount, &t2p->t2_rdata); 754 if (error2) 755 break; 756 } 757 totdgot += dcount; 758 759 if (totpgot >= totpcount && totdgot >= totdcount) { 760 error2 = 0; 761 t2p->t2_flags |= SMBT2_ALLRECV; 762 break; 763 } 764 /* 765 * We're done with this reply, look for the next one. 766 */ 767 SMBRQ_LOCK(rqp); 768 md_next_record(&rqp->sr_rp); 769 SMBRQ_UNLOCK(rqp); 770 error2 = smb_rq_reply(rqp); 771 if (rqp->sr_flags & SMBR_MOREDATA) 772 t2p->t2_flags |= SMBT2_MOREDATA; 773 if (!error2) 774 continue; 775 t2p->t2_sr_errclass = rqp->sr_errclass; 776 t2p->t2_sr_serror = rqp->sr_serror; 777 t2p->t2_sr_error = rqp->sr_error; 778 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2; 779 error = error2; 780 if (!(rqp->sr_flags & SMBR_MOREDATA)) 781 break; 782 } 783 return (error ? error : error2); 784 } 785 786 static int 787 smb_nt_reply(struct smb_ntrq *ntp) 788 { 789 struct mdchain *mdp; 790 struct smb_rq *rqp = ntp->nt_rq; 791 int error, error2; 792 u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; 793 u_int32_t tmp, dcount, totpgot, totdgot; 794 u_int16_t bc; 795 u_int8_t wc; 796 797 ntp->nt_flags &= ~SMBT2_MOREDATA; 798 799 error = smb_rq_reply(rqp); 800 if (rqp->sr_flags & SMBR_MOREDATA) 801 ntp->nt_flags |= SMBT2_MOREDATA; 802 ntp->nt_sr_error = rqp->sr_error; 803 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2; 804 if (error && !(rqp->sr_flags & SMBR_MOREDATA)) 805 return (error); 806 /* 807 * Now we have to get all subseqent responses. The CIFS specification 808 * says that they can be misordered which is weird. 809 * TODO: timo 810 */ 811 totpgot = totdgot = 0; 812 totpcount = totdcount = 0xffffffff; 813 mdp = &rqp->sr_rp; 814 for (;;) { 815 DTRACE_PROBE2(smb_trans_reply, 816 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top); 817 m_dumpm(mdp->md_top); 818 819 if ((error2 = md_get_uint8(mdp, &wc)) != 0) 820 break; 821 if (wc < 18) { 822 error2 = ENOENT; 823 break; 824 } 825 md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */ 826 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0) 827 break; 828 if (totpcount > tmp) 829 totpcount = tmp; 830 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0) 831 break; 832 if (totdcount > tmp) 833 totdcount = tmp; 834 if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 || 835 (error2 = md_get_uint32le(mdp, &poff)) != 0 || 836 (error2 = md_get_uint32le(mdp, &pdisp)) != 0) 837 break; 838 if (pcount != 0 && pdisp != totpgot) { 839 SMBSDEBUG("Can't handle misordered parameters %d:%d\n", 840 pdisp, totpgot); 841 error2 = EINVAL; 842 break; 843 } 844 if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 || 845 (error2 = md_get_uint32le(mdp, &doff)) != 0 || 846 (error2 = md_get_uint32le(mdp, &ddisp)) != 0) 847 break; 848 if (dcount != 0 && ddisp != totdgot) { 849 SMBSDEBUG("Can't handle misordered data: dcount %d\n", 850 dcount); 851 error2 = EINVAL; 852 break; 853 } 854 855 /* XXX: Skip setup words? We don't save them? */ 856 md_get_uint8(mdp, &wc); /* SetupCount */ 857 tmp = wc; 858 while (tmp--) 859 md_get_uint16(mdp, NULL); 860 861 if ((error2 = md_get_uint16le(mdp, &bc)) != 0) 862 break; 863 864 /* 865 * There are pad bytes here, and the poff value 866 * indicates where the next data are found. 867 * No need to guess at the padding size. 868 */ 869 if (pcount) { 870 error2 = smb_t2_placedata(mdp->md_top, poff, pcount, 871 &ntp->nt_rparam); 872 if (error2) 873 break; 874 } 875 totpgot += pcount; 876 877 if (dcount) { 878 error2 = smb_t2_placedata(mdp->md_top, doff, dcount, 879 &ntp->nt_rdata); 880 if (error2) 881 break; 882 } 883 totdgot += dcount; 884 885 if (totpgot >= totpcount && totdgot >= totdcount) { 886 error2 = 0; 887 ntp->nt_flags |= SMBT2_ALLRECV; 888 break; 889 } 890 /* 891 * We're done with this reply, look for the next one. 892 */ 893 SMBRQ_LOCK(rqp); 894 md_next_record(&rqp->sr_rp); 895 SMBRQ_UNLOCK(rqp); 896 error2 = smb_rq_reply(rqp); 897 if (rqp->sr_flags & SMBR_MOREDATA) 898 ntp->nt_flags |= SMBT2_MOREDATA; 899 if (!error2) 900 continue; 901 ntp->nt_sr_error = rqp->sr_error; 902 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2; 903 error = error2; 904 if (!(rqp->sr_flags & SMBR_MOREDATA)) 905 break; 906 } 907 return (error ? error : error2); 908 } 909 910 /* 911 * Perform a full round of TRANS2 request 912 */ 913 static int 914 smb_t2_request_int(struct smb_t2rq *t2p) 915 { 916 struct smb_vc *vcp = t2p->t2_vc; 917 struct smb_cred *scred = t2p->t2_cred; 918 struct mbchain *mbp; 919 struct mdchain *mdp, mbparam, mbdata; 920 mblk_t *m; 921 struct smb_rq *rqp; 922 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i; 923 int error, doff, poff, txdcount, txpcount, nmlen; 924 925 m = t2p->t2_tparam.mb_top; 926 if (m) { 927 md_initm(&mbparam, m); /* do not free it! */ 928 totpcount = m_fixhdr(m); 929 if (totpcount > 0xffff) /* maxvalue for ushort_t */ 930 return (EINVAL); 931 } else 932 totpcount = 0; 933 m = t2p->t2_tdata.mb_top; 934 if (m) { 935 md_initm(&mbdata, m); /* do not free it! */ 936 totdcount = m_fixhdr(m); 937 if (totdcount > 0xffff) 938 return (EINVAL); 939 } else 940 totdcount = 0; 941 leftdcount = totdcount; 942 leftpcount = totpcount; 943 txmax = vcp->vc_txmax; 944 error = smb_rq_alloc(t2p->t2_source, t2p->t_name[0] ? 945 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp); 946 if (error) 947 return (error); 948 rqp->sr_timo = smb_timo_default; 949 rqp->sr_flags |= SMBR_MULTIPACKET; 950 t2p->t2_rq = rqp; 951 mbp = &rqp->sr_rq; 952 smb_rq_wstart(rqp); 953 mb_put_uint16le(mbp, totpcount); 954 mb_put_uint16le(mbp, totdcount); 955 mb_put_uint16le(mbp, t2p->t2_maxpcount); 956 mb_put_uint16le(mbp, t2p->t2_maxdcount); 957 mb_put_uint8(mbp, t2p->t2_maxscount); 958 mb_put_uint8(mbp, 0); /* reserved */ 959 mb_put_uint16le(mbp, 0); /* flags */ 960 mb_put_uint32le(mbp, 0); /* Timeout */ 961 mb_put_uint16le(mbp, 0); /* reserved 2 */ 962 len = mb_fixhdr(mbp); 963 964 /* 965 * now we have known packet size as 966 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1), 967 * and need to decide which parts should go into the first request 968 */ 969 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0; 970 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1); 971 if (len + leftpcount > txmax) { 972 txpcount = min(leftpcount, txmax - len); 973 poff = len; 974 txdcount = 0; 975 doff = 0; 976 } else { 977 txpcount = leftpcount; 978 poff = txpcount ? len : 0; 979 /* 980 * Other client traffic seems to "ALIGN2" here. The extra 981 * 2 byte pad we use has no observed downside and may be 982 * required for some old servers(?) 983 */ 984 len = ALIGN4(len + txpcount); 985 txdcount = min(leftdcount, txmax - len); 986 doff = txdcount ? len : 0; 987 } 988 leftpcount -= txpcount; 989 leftdcount -= txdcount; 990 mb_put_uint16le(mbp, txpcount); 991 mb_put_uint16le(mbp, poff); 992 mb_put_uint16le(mbp, txdcount); 993 mb_put_uint16le(mbp, doff); 994 mb_put_uint8(mbp, t2p->t2_setupcount); 995 mb_put_uint8(mbp, 0); 996 for (i = 0; i < t2p->t2_setupcount; i++) { 997 mb_put_uint16le(mbp, t2p->t2_setupdata[i]); 998 } 999 smb_rq_wend(rqp); 1000 smb_rq_bstart(rqp); 1001 /* TDUNICODE */ 1002 if (t2p->t_name) 1003 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM); 1004 mb_put_uint8(mbp, 0); /* terminating zero */ 1005 len = mb_fixhdr(mbp); 1006 if (txpcount) { 1007 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1008 error = md_get_mbuf(&mbparam, txpcount, &m); 1009 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); 1010 if (error) 1011 goto freerq; 1012 mb_put_mbuf(mbp, m); 1013 } 1014 len = mb_fixhdr(mbp); 1015 if (txdcount) { 1016 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1017 error = md_get_mbuf(&mbdata, txdcount, &m); 1018 if (error) 1019 goto freerq; 1020 mb_put_mbuf(mbp, m); 1021 } 1022 smb_rq_bend(rqp); /* incredible, but thats it... */ 1023 error = smb_rq_enqueue(rqp); 1024 if (error) 1025 goto freerq; 1026 if (leftpcount || leftdcount) { 1027 error = smb_rq_reply(rqp); 1028 if (error) 1029 goto bad; 1030 /* 1031 * this is an interim response, ignore it. 1032 */ 1033 SMBRQ_LOCK(rqp); 1034 md_next_record(&rqp->sr_rp); 1035 SMBRQ_UNLOCK(rqp); 1036 } 1037 while (leftpcount || leftdcount) { 1038 error = smb_rq_new(rqp, t2p->t_name ? 1039 SMB_COM_TRANSACTION_SECONDARY : 1040 SMB_COM_TRANSACTION2_SECONDARY); 1041 if (error) 1042 goto bad; 1043 mbp = &rqp->sr_rq; 1044 smb_rq_wstart(rqp); 1045 mb_put_uint16le(mbp, totpcount); 1046 mb_put_uint16le(mbp, totdcount); 1047 len = mb_fixhdr(mbp); 1048 /* 1049 * now we have known packet size as 1050 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one, 1051 * and need to decide which parts should go into request 1052 */ 1053 len = ALIGN4(len + 6 * 2 + 2); 1054 if (t2p->t_name == NULL) 1055 len += 2; 1056 if (len + leftpcount > txmax) { 1057 txpcount = min(leftpcount, txmax - len); 1058 poff = len; 1059 txdcount = 0; 1060 doff = 0; 1061 } else { 1062 txpcount = leftpcount; 1063 poff = txpcount ? len : 0; 1064 len = ALIGN4(len + txpcount); 1065 txdcount = min(leftdcount, txmax - len); 1066 doff = txdcount ? len : 0; 1067 } 1068 mb_put_uint16le(mbp, txpcount); 1069 mb_put_uint16le(mbp, poff); 1070 mb_put_uint16le(mbp, totpcount - leftpcount); 1071 mb_put_uint16le(mbp, txdcount); 1072 mb_put_uint16le(mbp, doff); 1073 mb_put_uint16le(mbp, totdcount - leftdcount); 1074 leftpcount -= txpcount; 1075 leftdcount -= txdcount; 1076 if (t2p->t_name == NULL) 1077 mb_put_uint16le(mbp, t2p->t2_fid); 1078 smb_rq_wend(rqp); 1079 smb_rq_bstart(rqp); 1080 mb_put_uint8(mbp, 0); /* name */ 1081 len = mb_fixhdr(mbp); 1082 if (txpcount) { 1083 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1084 error = md_get_mbuf(&mbparam, txpcount, &m); 1085 if (error) 1086 goto bad; 1087 mb_put_mbuf(mbp, m); 1088 } 1089 len = mb_fixhdr(mbp); 1090 if (txdcount) { 1091 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1092 error = md_get_mbuf(&mbdata, txdcount, &m); 1093 if (error) 1094 goto bad; 1095 mb_put_mbuf(mbp, m); 1096 } 1097 smb_rq_bend(rqp); 1098 error = smb_iod_multirq(rqp); 1099 if (error) 1100 goto bad; 1101 } /* while left params or data */ 1102 error = smb_t2_reply(t2p); 1103 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) 1104 goto bad; 1105 mdp = &t2p->t2_rdata; 1106 if (mdp->md_top) { 1107 m_fixhdr(mdp->md_top); 1108 md_initm(mdp, mdp->md_top); 1109 } 1110 mdp = &t2p->t2_rparam; 1111 if (mdp->md_top) { 1112 m_fixhdr(mdp->md_top); 1113 md_initm(mdp, mdp->md_top); 1114 } 1115 bad: 1116 smb_iod_removerq(rqp); 1117 freerq: 1118 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) { 1119 if (rqp->sr_flags & SMBR_RESTART) 1120 t2p->t2_flags |= SMBT2_RESTART; 1121 md_done(&t2p->t2_rparam); 1122 md_done(&t2p->t2_rdata); 1123 } 1124 smb_rq_done(rqp); 1125 return (error); 1126 } 1127 1128 1129 /* 1130 * Perform a full round of NT_TRANSACTION request 1131 */ 1132 static int 1133 smb_nt_request_int(struct smb_ntrq *ntp) 1134 { 1135 struct smb_vc *vcp = ntp->nt_vc; 1136 struct smb_cred *scred = ntp->nt_cred; 1137 struct mbchain *mbp; 1138 struct mdchain *mdp, mbsetup, mbparam, mbdata; 1139 mblk_t *m; 1140 struct smb_rq *rqp; 1141 int totpcount, leftpcount, totdcount, leftdcount, len, txmax; 1142 int error, doff, poff, txdcount, txpcount; 1143 int totscount; 1144 1145 m = ntp->nt_tsetup.mb_top; 1146 if (m) { 1147 md_initm(&mbsetup, m); /* do not free it! */ 1148 totscount = m_fixhdr(m); 1149 if (totscount > 2 * 0xff) 1150 return (EINVAL); 1151 } else 1152 totscount = 0; 1153 m = ntp->nt_tparam.mb_top; 1154 if (m) { 1155 md_initm(&mbparam, m); /* do not free it! */ 1156 totpcount = m_fixhdr(m); 1157 if (totpcount > 0x7fffffff) 1158 return (EINVAL); 1159 } else 1160 totpcount = 0; 1161 m = ntp->nt_tdata.mb_top; 1162 if (m) { 1163 md_initm(&mbdata, m); /* do not free it! */ 1164 totdcount = m_fixhdr(m); 1165 if (totdcount > 0x7fffffff) 1166 return (EINVAL); 1167 } else 1168 totdcount = 0; 1169 leftdcount = totdcount; 1170 leftpcount = totpcount; 1171 txmax = vcp->vc_txmax; 1172 error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp); 1173 if (error) 1174 return (error); 1175 rqp->sr_timo = smb_timo_default; 1176 rqp->sr_flags |= SMBR_MULTIPACKET; 1177 ntp->nt_rq = rqp; 1178 mbp = &rqp->sr_rq; 1179 smb_rq_wstart(rqp); 1180 mb_put_uint8(mbp, ntp->nt_maxscount); 1181 mb_put_uint16le(mbp, 0); /* reserved (flags?) */ 1182 mb_put_uint32le(mbp, totpcount); 1183 mb_put_uint32le(mbp, totdcount); 1184 mb_put_uint32le(mbp, ntp->nt_maxpcount); 1185 mb_put_uint32le(mbp, ntp->nt_maxdcount); 1186 len = mb_fixhdr(mbp); 1187 /* 1188 * now we have known packet size as 1189 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2), 1190 * and need to decide which parts should go into the first request 1191 */ 1192 len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2); 1193 if (len + leftpcount > txmax) { 1194 txpcount = min(leftpcount, txmax - len); 1195 poff = len; 1196 txdcount = 0; 1197 doff = 0; 1198 } else { 1199 txpcount = leftpcount; 1200 poff = txpcount ? len : 0; 1201 len = ALIGN4(len + txpcount); 1202 txdcount = min(leftdcount, txmax - len); 1203 doff = txdcount ? len : 0; 1204 } 1205 leftpcount -= txpcount; 1206 leftdcount -= txdcount; 1207 mb_put_uint32le(mbp, txpcount); 1208 mb_put_uint32le(mbp, poff); 1209 mb_put_uint32le(mbp, txdcount); 1210 mb_put_uint32le(mbp, doff); 1211 mb_put_uint8(mbp, (totscount+1)/2); 1212 mb_put_uint16le(mbp, ntp->nt_function); 1213 if (totscount) { 1214 error = md_get_mbuf(&mbsetup, totscount, &m); 1215 SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax); 1216 if (error) 1217 goto freerq; 1218 mb_put_mbuf(mbp, m); 1219 if (totscount & 1) 1220 mb_put_uint8(mbp, 0); /* setup is in words */ 1221 } 1222 smb_rq_wend(rqp); 1223 smb_rq_bstart(rqp); 1224 len = mb_fixhdr(mbp); 1225 if (txpcount) { 1226 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1227 error = md_get_mbuf(&mbparam, txpcount, &m); 1228 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); 1229 if (error) 1230 goto freerq; 1231 mb_put_mbuf(mbp, m); 1232 } 1233 len = mb_fixhdr(mbp); 1234 if (txdcount) { 1235 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1236 error = md_get_mbuf(&mbdata, txdcount, &m); 1237 if (error) 1238 goto freerq; 1239 mb_put_mbuf(mbp, m); 1240 } 1241 smb_rq_bend(rqp); /* incredible, but thats it... */ 1242 error = smb_rq_enqueue(rqp); 1243 if (error) 1244 goto freerq; 1245 if (leftpcount || leftdcount) { 1246 error = smb_rq_reply(rqp); 1247 if (error) 1248 goto bad; 1249 /* 1250 * this is an interim response, ignore it. 1251 */ 1252 SMBRQ_LOCK(rqp); 1253 md_next_record(&rqp->sr_rp); 1254 SMBRQ_UNLOCK(rqp); 1255 } 1256 while (leftpcount || leftdcount) { 1257 error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY); 1258 if (error) 1259 goto bad; 1260 mbp = &rqp->sr_rq; 1261 smb_rq_wstart(rqp); 1262 mb_put_mem(mbp, NULL, 3, MB_MZERO); 1263 mb_put_uint32le(mbp, totpcount); 1264 mb_put_uint32le(mbp, totdcount); 1265 len = mb_fixhdr(mbp); 1266 /* 1267 * now we have known packet size as 1268 * ALIGN4(len + 6 * 4 + 2) 1269 * and need to decide which parts should go into request 1270 */ 1271 len = ALIGN4(len + 6 * 4 + 2); 1272 if (len + leftpcount > txmax) { 1273 txpcount = min(leftpcount, txmax - len); 1274 poff = len; 1275 txdcount = 0; 1276 doff = 0; 1277 } else { 1278 txpcount = leftpcount; 1279 poff = txpcount ? len : 0; 1280 len = ALIGN4(len + txpcount); 1281 txdcount = min(leftdcount, txmax - len); 1282 doff = txdcount ? len : 0; 1283 } 1284 mb_put_uint32le(mbp, txpcount); 1285 mb_put_uint32le(mbp, poff); 1286 mb_put_uint32le(mbp, totpcount - leftpcount); 1287 mb_put_uint32le(mbp, txdcount); 1288 mb_put_uint32le(mbp, doff); 1289 mb_put_uint32le(mbp, totdcount - leftdcount); 1290 leftpcount -= txpcount; 1291 leftdcount -= txdcount; 1292 smb_rq_wend(rqp); 1293 smb_rq_bstart(rqp); 1294 len = mb_fixhdr(mbp); 1295 if (txpcount) { 1296 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1297 error = md_get_mbuf(&mbparam, txpcount, &m); 1298 if (error) 1299 goto bad; 1300 mb_put_mbuf(mbp, m); 1301 } 1302 len = mb_fixhdr(mbp); 1303 if (txdcount) { 1304 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1305 error = md_get_mbuf(&mbdata, txdcount, &m); 1306 if (error) 1307 goto bad; 1308 mb_put_mbuf(mbp, m); 1309 } 1310 smb_rq_bend(rqp); 1311 error = smb_iod_multirq(rqp); 1312 if (error) 1313 goto bad; 1314 } /* while left params or data */ 1315 error = smb_nt_reply(ntp); 1316 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) 1317 goto bad; 1318 mdp = &ntp->nt_rdata; 1319 if (mdp->md_top) { 1320 m_fixhdr(mdp->md_top); 1321 md_initm(mdp, mdp->md_top); 1322 } 1323 mdp = &ntp->nt_rparam; 1324 if (mdp->md_top) { 1325 m_fixhdr(mdp->md_top); 1326 md_initm(mdp, mdp->md_top); 1327 } 1328 bad: 1329 smb_iod_removerq(rqp); 1330 freerq: 1331 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) { 1332 if (rqp->sr_flags & SMBR_RESTART) 1333 ntp->nt_flags |= SMBT2_RESTART; 1334 md_done(&ntp->nt_rparam); 1335 md_done(&ntp->nt_rdata); 1336 } 1337 smb_rq_done(rqp); 1338 return (error); 1339 } 1340 1341 int 1342 smb_t2_request(struct smb_t2rq *t2p) 1343 { 1344 int error = EINVAL, i; 1345 1346 for (i = 0; ; ) { 1347 /* 1348 * Don't send any new requests if force unmount is underway. 1349 * This check was moved into smb_rq_enqueue, called by 1350 * smb_t2_request_int() 1351 */ 1352 t2p->t2_flags &= ~SMBT2_RESTART; 1353 error = smb_t2_request_int(t2p); 1354 if (!error) 1355 break; 1356 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != 1357 SMBT2_RESTART) 1358 break; 1359 if (++i > SMBMAXRESTARTS) 1360 break; 1361 mutex_enter(&(t2p)->t2_lock); 1362 if (t2p->t2_share && t2p->t2_share->ss_mount) { 1363 cv_timedwait(&t2p->t2_cond, &(t2p)->t2_lock, 1364 lbolt + (hz * SMB_RCNDELAY)); 1365 } else { 1366 delay(lbolt + (hz * SMB_RCNDELAY)); 1367 } 1368 mutex_exit(&(t2p)->t2_lock); 1369 } 1370 return (error); 1371 } 1372 1373 1374 int 1375 smb_nt_request(struct smb_ntrq *ntp) 1376 { 1377 int error = EINVAL, i; 1378 1379 for (i = 0; ; ) { 1380 /* 1381 * Don't send any new requests if force unmount is underway. 1382 * This check was moved into smb_rq_enqueue, called by 1383 * smb_nt_request_int() 1384 */ 1385 ntp->nt_flags &= ~SMBT2_RESTART; 1386 error = smb_nt_request_int(ntp); 1387 if (!error) 1388 break; 1389 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != 1390 SMBT2_RESTART) 1391 break; 1392 if (++i > SMBMAXRESTARTS) 1393 break; 1394 mutex_enter(&(ntp)->nt_lock); 1395 if (ntp->nt_share && ntp->nt_share->ss_mount) { 1396 cv_timedwait(&ntp->nt_cond, &(ntp)->nt_lock, 1397 lbolt + (hz * SMB_RCNDELAY)); 1398 1399 } else { 1400 delay(lbolt + (hz * SMB_RCNDELAY)); 1401 } 1402 mutex_exit(&(ntp)->nt_lock); 1403 } 1404 return (error); 1405 } 1406