1 /* 2 * Copyright (c) 2000-2001, Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $ 33 */ 34 /* 35 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 36 * Use is subject to license terms. 37 */ 38 39 #pragma ident "%Z%%M% %I% %E% SMI" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/kmem.h> 44 #include <sys/proc.h> 45 #include <sys/lock.h> 46 #include <sys/socket.h> 47 #include <sys/mount.h> 48 #include <sys/cmn_err.h> 49 #include <sys/sdt.h> 50 51 #ifdef APPLE 52 #include <sys/smb_apple.h> 53 #else 54 #include <netsmb/smb_osdep.h> 55 #endif 56 57 #include <netsmb/smb.h> 58 #include <netsmb/smb_conn.h> 59 #include <netsmb/smb_subr.h> 60 #include <netsmb/smb_tran.h> 61 #include <netsmb/smb_rq.h> 62 63 static int smb_rq_reply(struct smb_rq *rqp); 64 static int smb_rq_enqueue(struct smb_rq *rqp); 65 static int smb_rq_getenv(struct smb_connobj *layer, 66 struct smb_vc **vcpp, struct smb_share **sspp); 67 static int smb_rq_new(struct smb_rq *rqp, uchar_t cmd); 68 static int smb_t2_reply(struct smb_t2rq *t2p); 69 static int smb_nt_reply(struct smb_ntrq *ntp); 70 71 72 73 int 74 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred, 75 struct smb_rq **rqpp) 76 { 77 struct smb_rq *rqp; 78 int error; 79 80 rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP); 81 if (rqp == NULL) 82 return (ENOMEM); 83 error = smb_rq_init(rqp, layer, cmd, scred); 84 if (error) { 85 smb_rq_done(rqp); 86 return (error); 87 } 88 rqp->sr_flags |= SMBR_ALLOCED; 89 *rqpp = rqp; 90 return (0); 91 } 92 93 94 int 95 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, uchar_t cmd, 96 struct smb_cred *scred) 97 { 98 int error; 99 100 bzero(rqp, sizeof (*rqp)); 101 mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL); 102 cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL); 103 104 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share); 105 if (error) 106 return (error); 107 108 rqp->sr_rexmit = SMBMAXRESTARTS; 109 rqp->sr_cred = scred; /* XXX no ref hold */ 110 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc); 111 error = smb_rq_new(rqp, cmd); 112 if (!error) { 113 rqp->sr_flags |= SMBR_VCREF; 114 smb_vc_hold(rqp->sr_vc); 115 } 116 return (error); 117 } 118 119 static int 120 smb_rq_new(struct smb_rq *rqp, uchar_t cmd) 121 { 122 struct smb_vc *vcp = rqp->sr_vc; 123 struct mbchain *mbp = &rqp->sr_rq; 124 int error; 125 static char tzero[12]; 126 caddr_t ptr; 127 pid_t pid; 128 129 ASSERT(rqp != NULL); 130 ASSERT(rqp->sr_cred != NULL); 131 pid = rqp->sr_cred->vc_pid; 132 rqp->sr_sendcnt = 0; 133 rqp->sr_cmd = cmd; 134 mb_done(mbp); 135 md_done(&rqp->sr_rp); 136 error = mb_init(mbp); 137 if (error) 138 return (error); 139 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM); 140 mb_put_uint8(mbp, cmd); 141 mb_put_uint32le(mbp, 0); 142 mb_put_uint8(mbp, vcp->vc_hflags); 143 if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY) 144 mb_put_uint16le(mbp, (vcp->vc_hflags2 & ~SMB_FLAGS2_UNICODE)); 145 else 146 mb_put_uint16le(mbp, vcp->vc_hflags2); 147 mb_put_mem(mbp, tzero, 12, MB_MSYSTEM); 148 ptr = mb_reserve(mbp, sizeof (u_int16_t)); 149 /*LINTED*/ 150 ASSERT(ptr == (caddr_t)((u_int16_t *)ptr)); 151 /*LINTED*/ 152 rqp->sr_rqtid = (u_int16_t *)ptr; 153 mb_put_uint16le(mbp, (u_int16_t)(pid)); 154 ptr = mb_reserve(mbp, sizeof (u_int16_t)); 155 /*LINTED*/ 156 ASSERT(ptr == (caddr_t)((u_int16_t *)ptr)); 157 /*LINTED*/ 158 rqp->sr_rquid = (u_int16_t *)ptr; 159 mb_put_uint16le(mbp, rqp->sr_mid); 160 return (0); 161 } 162 163 void 164 smb_rq_done(struct smb_rq *rqp) 165 { 166 /* No locks. Last ref. here. */ 167 if (rqp->sr_flags & SMBR_VCREF) { 168 rqp->sr_flags &= ~SMBR_VCREF; 169 smb_vc_rele(rqp->sr_vc); 170 } 171 mb_done(&rqp->sr_rq); 172 md_done(&rqp->sr_rp); 173 mutex_destroy(&rqp->sr_lock); 174 cv_destroy(&rqp->sr_cond); 175 if (rqp->sr_flags & SMBR_ALLOCED) 176 kmem_free(rqp, sizeof (*rqp)); 177 } 178 179 /* 180 * Simple request-reply exchange 181 */ 182 int 183 smb_rq_simple_timed(struct smb_rq *rqp, int timeout) 184 { 185 int error = EINVAL; 186 187 for (; ; ) { 188 /* 189 * Don't send any new requests if force unmount is underway. 190 * This check was moved into smb_rq_enqueue. 191 */ 192 rqp->sr_flags &= ~SMBR_RESTART; 193 rqp->sr_timo = timeout; /* in seconds */ 194 rqp->sr_state = SMBRQ_NOTSENT; 195 error = smb_rq_enqueue(rqp); 196 if (error) { 197 break; 198 } 199 error = smb_rq_reply(rqp); 200 if (!error) 201 break; 202 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != 203 SMBR_RESTART) 204 break; 205 if (rqp->sr_rexmit <= 0) 206 break; 207 SMBRQ_LOCK(rqp); 208 if (rqp->sr_share && rqp->sr_share->ss_mount) { 209 cv_timedwait(&rqp->sr_cond, &(rqp)->sr_lock, 210 lbolt + (hz * SMB_RCNDELAY)); 211 212 } else { 213 delay(lbolt + (hz * SMB_RCNDELAY)); 214 } 215 SMBRQ_UNLOCK(rqp); 216 rqp->sr_rexmit--; 217 #ifdef XXX 218 timeout *= 2; 219 #endif 220 } 221 return (error); 222 } 223 224 225 int 226 smb_rq_simple(struct smb_rq *rqp) 227 { 228 return (smb_rq_simple_timed(rqp, smb_timo_default)); 229 } 230 231 static int 232 smb_rq_enqueue(struct smb_rq *rqp) 233 { 234 struct smb_vc *vcp = rqp->sr_vc; 235 struct smb_share *ssp = rqp->sr_share; 236 int error = 0; 237 238 /* 239 * Unfortunate special case needed for 240 * tree disconnect, which needs sr_share 241 * but should skip the reconnect check. 242 */ 243 if (rqp->sr_cmd == SMB_COM_TREE_DISCONNECT) 244 ssp = NULL; 245 246 /* 247 * If this is an "internal" request, bypass any 248 * wait for connection state changes, etc. 249 * This request is making those changes. 250 */ 251 if (rqp->sr_flags & SMBR_INTERNAL) { 252 ASSERT(ssp == NULL); 253 goto just_doit; 254 } 255 256 /* 257 * Wait for VC reconnect to finish... 258 * XXX: Deal with reconnect later. 259 * Just bail out for now. 260 * 261 * MacOS might check vfs_isforce() here. 262 */ 263 if (vcp->vc_state != SMBIOD_ST_VCACTIVE) { 264 SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state); 265 return (ENOTCONN); 266 } 267 268 /* 269 * If this request has a "share" object: 270 * 1: Deny access if share is _GONE (unmounted) 271 * 2: Wait for state changes in that object, 272 * Initiate share (re)connect if needed. 273 * XXX: Not really doing 2 yet. 274 */ 275 if (ssp) { 276 if (ssp->ss_flags & SMBS_GONE) 277 return (ENOTCONN); 278 SMB_SS_LOCK(ssp); 279 if (!smb_share_valid(ssp)) { 280 error = smb_share_tcon(ssp); 281 } 282 SMB_SS_UNLOCK(ssp); 283 } 284 285 if (!error) { 286 just_doit: 287 error = smb_iod_addrq(rqp); 288 } 289 290 return (error); 291 } 292 293 /* 294 * Mark location of the word count, which is filled in later by 295 * smb_rw_wend(). Also initialize the counter that it uses 296 * to figure out what value to fill in. 297 * 298 * Note that the word count happens to be 8-bit. 299 */ 300 void 301 smb_rq_wstart(struct smb_rq *rqp) 302 { 303 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t)); 304 rqp->sr_rq.mb_count = 0; 305 } 306 307 void 308 smb_rq_wend(struct smb_rq *rqp) 309 { 310 uint_t wcnt; 311 312 if (rqp->sr_wcount == NULL) { 313 SMBSDEBUG("no wcount\n"); 314 return; 315 } 316 wcnt = rqp->sr_rq.mb_count; 317 if (wcnt > 0x1ff) 318 SMBSDEBUG("word count too large (%d)\n", wcnt); 319 if (wcnt & 1) 320 SMBSDEBUG("odd word count\n"); 321 /* Fill in the word count (8-bits) */ 322 *rqp->sr_wcount = (wcnt >> 1); 323 } 324 325 /* 326 * Mark location of the byte count, which is filled in later by 327 * smb_rw_bend(). Also initialize the counter that it uses 328 * to figure out what value to fill in. 329 * 330 * Note that the byte count happens to be 16-bit. 331 */ 332 void 333 smb_rq_bstart(struct smb_rq *rqp) 334 { 335 rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t)); 336 rqp->sr_rq.mb_count = 0; 337 } 338 339 void 340 smb_rq_bend(struct smb_rq *rqp) 341 { 342 uint_t bcnt; 343 344 if (rqp->sr_bcount == NULL) { 345 SMBSDEBUG("no bcount\n"); 346 return; 347 } 348 bcnt = rqp->sr_rq.mb_count; 349 if (bcnt > 0xffff) 350 SMBSDEBUG("byte count too large (%d)\n", bcnt); 351 /* 352 * Fill in the byte count (16-bits) 353 * The pointer is char * type due to 354 * typical off-by-one alignment. 355 */ 356 rqp->sr_bcount[0] = bcnt & 0xFF; 357 rqp->sr_bcount[1] = (bcnt >> 8); 358 } 359 360 int 361 smb_rq_intr(struct smb_rq *rqp) 362 { 363 if (rqp->sr_flags & SMBR_INTR) 364 return (EINTR); 365 366 return (0); 367 #ifdef APPLE 368 return (smb_sigintr(rqp->sr_cred->scr_vfsctx)); 369 #endif 370 } 371 372 int 373 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp) 374 { 375 *mbpp = &rqp->sr_rq; 376 return (0); 377 } 378 379 int 380 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp) 381 { 382 *mbpp = &rqp->sr_rp; 383 return (0); 384 } 385 386 static int 387 smb_rq_getenv(struct smb_connobj *co, 388 struct smb_vc **vcpp, struct smb_share **sspp) 389 { 390 struct smb_vc *vcp = NULL; 391 struct smb_share *ssp = NULL; 392 int error = 0; 393 394 if (co->co_flags & SMBO_GONE) { 395 SMBSDEBUG("zombie CO\n"); 396 error = EINVAL; 397 goto out; 398 } 399 400 switch (co->co_level) { 401 case SMBL_VC: 402 vcp = CPTOVC(co); 403 if (co->co_parent == NULL) { 404 SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname); 405 error = EINVAL; 406 break; 407 } 408 break; 409 410 case SMBL_SHARE: 411 ssp = CPTOSS(co); 412 if (co->co_parent == NULL) { 413 SMBSDEBUG("zombie share %s\n", ssp->ss_name); 414 error = EINVAL; 415 break; 416 } 417 error = smb_rq_getenv(co->co_parent, &vcp, NULL); 418 break; 419 default: 420 SMBSDEBUG("invalid level %d passed\n", co->co_level); 421 error = EINVAL; 422 } 423 424 out: 425 if (!error) { 426 if (vcpp) 427 *vcpp = vcp; 428 if (sspp) 429 *sspp = ssp; 430 } 431 432 return (error); 433 } 434 435 /* 436 * Wait for reply on the request 437 */ 438 static int 439 smb_rq_reply(struct smb_rq *rqp) 440 { 441 struct mdchain *mdp = &rqp->sr_rp; 442 u_int32_t tdw; 443 u_int8_t tb; 444 int error, rperror = 0; 445 446 if (rqp->sr_timo == SMBNOREPLYWAIT) 447 return (smb_iod_removerq(rqp)); 448 449 error = smb_iod_waitrq(rqp); 450 if (error) 451 return (error); 452 error = md_get_uint32(mdp, &tdw); 453 if (error) 454 return (error); 455 error = md_get_uint8(mdp, &tb); 456 error = md_get_uint32le(mdp, &rqp->sr_error); 457 error = md_get_uint8(mdp, &rqp->sr_rpflags); 458 error = md_get_uint16le(mdp, &rqp->sr_rpflags2); 459 if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) { 460 /* 461 * Do a special check for STATUS_BUFFER_OVERFLOW; 462 * it's not an error. 463 */ 464 if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) { 465 /* 466 * Don't report it as an error to our caller; 467 * they can look at rqp->sr_error if they 468 * need to know whether we got a 469 * STATUS_BUFFER_OVERFLOW. 470 * XXX - should we do that for all errors 471 * where (error & 0xC0000000) is 0x80000000, 472 * i.e. all warnings? 473 */ 474 rperror = 0; 475 } else 476 rperror = smb_maperr32(rqp->sr_error); 477 } else { 478 rqp->sr_errclass = rqp->sr_error & 0xff; 479 rqp->sr_serror = rqp->sr_error >> 16; 480 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror); 481 } 482 if (rperror == EMOREDATA) { 483 rperror = E2BIG; 484 rqp->sr_flags |= SMBR_MOREDATA; 485 } else 486 rqp->sr_flags &= ~SMBR_MOREDATA; 487 488 error = md_get_uint32(mdp, &tdw); 489 error = md_get_uint32(mdp, &tdw); 490 error = md_get_uint32(mdp, &tdw); 491 492 error = md_get_uint16le(mdp, &rqp->sr_rptid); 493 error = md_get_uint16le(mdp, &rqp->sr_rppid); 494 error = md_get_uint16le(mdp, &rqp->sr_rpuid); 495 error = md_get_uint16le(mdp, &rqp->sr_rpmid); 496 497 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n", 498 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid, 499 rqp->sr_errclass, rqp->sr_serror); 500 501 return ((error) ? error : rperror); 502 } 503 504 505 #define ALIGN4(a) (((a) + 3) & ~3) 506 507 /* 508 * TRANS2 request implementation 509 * TRANS implementation is in the "t2" routines 510 * NT_TRANSACTION implementation is the separate "nt" stuff 511 */ 512 int 513 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred, 514 struct smb_t2rq **t2pp) 515 { 516 struct smb_t2rq *t2p; 517 int error; 518 519 t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP); 520 if (t2p == NULL) 521 return (ENOMEM); 522 error = smb_t2_init(t2p, layer, &setup, 1, scred); 523 mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL); 524 cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL); 525 t2p->t2_flags |= SMBT2_ALLOCED; 526 if (error) { 527 smb_t2_done(t2p); 528 return (error); 529 } 530 *t2pp = t2p; 531 return (0); 532 } 533 534 int 535 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred, 536 struct smb_ntrq **ntpp) 537 { 538 struct smb_ntrq *ntp; 539 int error; 540 541 ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP); 542 if (ntp == NULL) 543 return (ENOMEM); 544 error = smb_nt_init(ntp, layer, fn, scred); 545 mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL); 546 cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL); 547 ntp->nt_flags |= SMBT2_ALLOCED; 548 if (error) { 549 smb_nt_done(ntp); 550 return (error); 551 } 552 *ntpp = ntp; 553 return (0); 554 } 555 556 int 557 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup, 558 int setupcnt, struct smb_cred *scred) 559 { 560 int i; 561 int error; 562 563 bzero(t2p, sizeof (*t2p)); 564 t2p->t2_source = source; 565 t2p->t2_setupcount = (u_int16_t)setupcnt; 566 t2p->t2_setupdata = t2p->t2_setup; 567 for (i = 0; i < setupcnt; i++) 568 t2p->t2_setup[i] = setup[i]; 569 t2p->t2_fid = 0xffff; 570 t2p->t2_cred = scred; 571 t2p->t2_share = (source->co_level == SMBL_SHARE ? 572 CPTOSS(source) : NULL); /* for smb up/down */ 573 error = smb_rq_getenv(source, &t2p->t2_vc, NULL); 574 if (error) 575 return (error); 576 return (0); 577 } 578 579 int 580 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn, 581 struct smb_cred *scred) 582 { 583 int error; 584 585 bzero(ntp, sizeof (*ntp)); 586 ntp->nt_source = source; 587 ntp->nt_function = fn; 588 ntp->nt_cred = scred; 589 ntp->nt_share = (source->co_level == SMBL_SHARE ? 590 CPTOSS(source) : NULL); /* for smb up/down */ 591 error = smb_rq_getenv(source, &ntp->nt_vc, NULL); 592 if (error) 593 return (error); 594 return (0); 595 } 596 597 void 598 smb_t2_done(struct smb_t2rq *t2p) 599 { 600 mb_done(&t2p->t2_tparam); 601 mb_done(&t2p->t2_tdata); 602 md_done(&t2p->t2_rparam); 603 md_done(&t2p->t2_rdata); 604 mutex_destroy(&t2p->t2_lock); 605 cv_destroy(&t2p->t2_cond); 606 #ifdef NOTYETRESOLVED 607 if (t2p->t2_flags & SMBT2_ALLOCED) 608 kmem_free(t2p, sizeof (*t2p)); 609 #endif 610 if (t2p) { 611 kmem_free(t2p, sizeof (*t2p)); 612 } 613 } 614 615 u_int32_t 616 smb_t2_err(struct smb_t2rq *t2p) 617 { 618 /* mask off "severity" and the "component" bit */ 619 return (t2p->t2_sr_error & ~(0xe0000000)); 620 } 621 622 void 623 smb_nt_done(struct smb_ntrq *ntp) 624 { 625 mb_done(&ntp->nt_tsetup); 626 mb_done(&ntp->nt_tparam); 627 mb_done(&ntp->nt_tdata); 628 md_done(&ntp->nt_rparam); 629 md_done(&ntp->nt_rdata); 630 cv_destroy(&ntp->nt_cond); 631 mutex_destroy(&ntp->nt_lock); 632 if (ntp->nt_flags & SMBT2_ALLOCED) 633 kmem_free(ntp, sizeof (*ntp)); 634 } 635 636 /* 637 * Extract data [offset,count] from mtop and add to mdp. 638 */ 639 static int 640 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count, 641 struct mdchain *mdp) 642 { 643 mblk_t *n; 644 645 n = m_copym(mtop, offset, count, M_WAITOK); 646 if (n == NULL) 647 return (EBADRPC); 648 649 if (mdp->md_top == NULL) { 650 md_initm(mdp, n); 651 } else 652 m_cat(mdp->md_top, n); 653 654 return (0); 655 } 656 657 static int 658 smb_t2_reply(struct smb_t2rq *t2p) 659 { 660 struct mdchain *mdp; 661 struct smb_rq *rqp = t2p->t2_rq; 662 int error, error2, totpgot, totdgot; 663 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; 664 u_int16_t tmp, bc, dcount; 665 u_int8_t wc; 666 667 t2p->t2_flags &= ~SMBT2_MOREDATA; 668 669 error = smb_rq_reply(rqp); 670 if (rqp->sr_flags & SMBR_MOREDATA) 671 t2p->t2_flags |= SMBT2_MOREDATA; 672 t2p->t2_sr_errclass = rqp->sr_errclass; 673 t2p->t2_sr_serror = rqp->sr_serror; 674 t2p->t2_sr_error = rqp->sr_error; 675 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2; 676 if (error && !(rqp->sr_flags & SMBR_MOREDATA)) 677 return (error); 678 /* 679 * Now we have to get all subseqent responses, if any. 680 * The CIFS specification says that they can be misordered, 681 * which is weird. 682 * TODO: timo 683 */ 684 totpgot = totdgot = 0; 685 totpcount = totdcount = 0xffff; 686 mdp = &rqp->sr_rp; 687 for (;;) { 688 DTRACE_PROBE2(smb_trans_reply, 689 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top); 690 m_dumpm(mdp->md_top); 691 692 if ((error2 = md_get_uint8(mdp, &wc)) != 0) 693 break; 694 if (wc < 10) { 695 error2 = ENOENT; 696 break; 697 } 698 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0) 699 break; 700 if (totpcount > tmp) 701 totpcount = tmp; 702 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0) 703 break; 704 if (totdcount > tmp) 705 totdcount = tmp; 706 if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */ 707 (error2 = md_get_uint16le(mdp, &pcount)) != 0 || 708 (error2 = md_get_uint16le(mdp, &poff)) != 0 || 709 (error2 = md_get_uint16le(mdp, &pdisp)) != 0) 710 break; 711 if (pcount != 0 && pdisp != totpgot) { 712 SMBSDEBUG("Can't handle misordered parameters %d:%d\n", 713 pdisp, totpgot); 714 error2 = EINVAL; 715 break; 716 } 717 if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 || 718 (error2 = md_get_uint16le(mdp, &doff)) != 0 || 719 (error2 = md_get_uint16le(mdp, &ddisp)) != 0) 720 break; 721 if (dcount != 0 && ddisp != totdgot) { 722 SMBSDEBUG("Can't handle misordered data: dcount %d\n", 723 dcount); 724 error2 = EINVAL; 725 break; 726 } 727 728 /* XXX: Skip setup words? We don't save them? */ 729 md_get_uint8(mdp, &wc); /* SetupCount */ 730 md_get_uint8(mdp, NULL); /* Reserved2 */ 731 tmp = wc; 732 while (tmp--) 733 md_get_uint16(mdp, NULL); 734 735 if ((error2 = md_get_uint16le(mdp, &bc)) != 0) 736 break; 737 738 /* 739 * There are pad bytes here, and the poff value 740 * indicates where the next data are found. 741 * No need to guess at the padding size. 742 */ 743 if (pcount) { 744 error2 = smb_t2_placedata(mdp->md_top, poff, 745 pcount, &t2p->t2_rparam); 746 if (error2) 747 break; 748 } 749 totpgot += pcount; 750 751 if (dcount) { 752 error2 = smb_t2_placedata(mdp->md_top, doff, 753 dcount, &t2p->t2_rdata); 754 if (error2) 755 break; 756 } 757 totdgot += dcount; 758 759 if (totpgot >= totpcount && totdgot >= totdcount) { 760 error2 = 0; 761 t2p->t2_flags |= SMBT2_ALLRECV; 762 break; 763 } 764 /* 765 * We're done with this reply, look for the next one. 766 */ 767 SMBRQ_LOCK(rqp); 768 md_next_record(&rqp->sr_rp); 769 SMBRQ_UNLOCK(rqp); 770 error2 = smb_rq_reply(rqp); 771 if (rqp->sr_flags & SMBR_MOREDATA) 772 t2p->t2_flags |= SMBT2_MOREDATA; 773 if (!error2) 774 continue; 775 t2p->t2_sr_errclass = rqp->sr_errclass; 776 t2p->t2_sr_serror = rqp->sr_serror; 777 t2p->t2_sr_error = rqp->sr_error; 778 t2p->t2_sr_rpflags2 = rqp->sr_rpflags2; 779 error = error2; 780 if (!(rqp->sr_flags & SMBR_MOREDATA)) 781 break; 782 } 783 return (error ? error : error2); 784 } 785 786 static int 787 smb_nt_reply(struct smb_ntrq *ntp) 788 { 789 struct mdchain *mdp; 790 struct smb_rq *rqp = ntp->nt_rq; 791 int error, error2; 792 u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; 793 u_int32_t tmp, dcount, totpgot, totdgot; 794 u_int16_t bc; 795 u_int8_t wc; 796 797 ntp->nt_flags &= ~SMBT2_MOREDATA; 798 799 error = smb_rq_reply(rqp); 800 if (rqp->sr_flags & SMBR_MOREDATA) 801 ntp->nt_flags |= SMBT2_MOREDATA; 802 ntp->nt_sr_error = rqp->sr_error; 803 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2; 804 if (error && !(rqp->sr_flags & SMBR_MOREDATA)) 805 return (error); 806 /* 807 * Now we have to get all subseqent responses. The CIFS specification 808 * says that they can be misordered which is weird. 809 * TODO: timo 810 */ 811 totpgot = totdgot = 0; 812 totpcount = totdcount = 0xffffffff; 813 mdp = &rqp->sr_rp; 814 for (;;) { 815 DTRACE_PROBE2(smb_trans_reply, 816 (smb_rq_t *), rqp, (mblk_t *), mdp->md_top); 817 m_dumpm(mdp->md_top); 818 819 if ((error2 = md_get_uint8(mdp, &wc)) != 0) 820 break; 821 if (wc < 18) { 822 error2 = ENOENT; 823 break; 824 } 825 md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */ 826 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0) 827 break; 828 if (totpcount > tmp) 829 totpcount = tmp; 830 if ((error2 = md_get_uint32le(mdp, &tmp)) != 0) 831 break; 832 if (totdcount > tmp) 833 totdcount = tmp; 834 if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 || 835 (error2 = md_get_uint32le(mdp, &poff)) != 0 || 836 (error2 = md_get_uint32le(mdp, &pdisp)) != 0) 837 break; 838 if (pcount != 0 && pdisp != totpgot) { 839 SMBSDEBUG("Can't handle misordered parameters %d:%d\n", 840 pdisp, totpgot); 841 error2 = EINVAL; 842 break; 843 } 844 if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 || 845 (error2 = md_get_uint32le(mdp, &doff)) != 0 || 846 (error2 = md_get_uint32le(mdp, &ddisp)) != 0) 847 break; 848 if (dcount != 0 && ddisp != totdgot) { 849 SMBSDEBUG("Can't handle misordered data: dcount %d\n", 850 dcount); 851 error2 = EINVAL; 852 break; 853 } 854 855 /* XXX: Skip setup words? We don't save them? */ 856 md_get_uint8(mdp, &wc); /* SetupCount */ 857 tmp = wc; 858 while (tmp--) 859 md_get_uint16(mdp, NULL); 860 861 if ((error2 = md_get_uint16le(mdp, &bc)) != 0) 862 break; 863 864 /* 865 * There are pad bytes here, and the poff value 866 * indicates where the next data are found. 867 * No need to guess at the padding size. 868 */ 869 if (pcount) { 870 error2 = smb_t2_placedata(mdp->md_top, poff, pcount, 871 &ntp->nt_rparam); 872 if (error2) 873 break; 874 } 875 totpgot += pcount; 876 877 if (dcount) { 878 error2 = smb_t2_placedata(mdp->md_top, doff, dcount, 879 &ntp->nt_rdata); 880 if (error2) 881 break; 882 } 883 totdgot += dcount; 884 885 if (totpgot >= totpcount && totdgot >= totdcount) { 886 error2 = 0; 887 ntp->nt_flags |= SMBT2_ALLRECV; 888 break; 889 } 890 /* 891 * We're done with this reply, look for the next one. 892 */ 893 SMBRQ_LOCK(rqp); 894 md_next_record(&rqp->sr_rp); 895 SMBRQ_UNLOCK(rqp); 896 error2 = smb_rq_reply(rqp); 897 if (rqp->sr_flags & SMBR_MOREDATA) 898 ntp->nt_flags |= SMBT2_MOREDATA; 899 if (!error2) 900 continue; 901 ntp->nt_sr_error = rqp->sr_error; 902 ntp->nt_sr_rpflags2 = rqp->sr_rpflags2; 903 error = error2; 904 if (!(rqp->sr_flags & SMBR_MOREDATA)) 905 break; 906 } 907 return (error ? error : error2); 908 } 909 910 int md_get_mbuf(struct mdchain *mdp, int size, mblk_t **ret); 911 int mb_put_mbuf(struct mbchain *mbp, mblk_t *m); 912 913 /* 914 * Perform a full round of TRANS2 request 915 */ 916 static int 917 smb_t2_request_int(struct smb_t2rq *t2p) 918 { 919 struct smb_vc *vcp = t2p->t2_vc; 920 struct smb_cred *scred = t2p->t2_cred; 921 struct mbchain *mbp; 922 struct mdchain *mdp, mbparam, mbdata; 923 mblk_t *m; 924 struct smb_rq *rqp; 925 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i; 926 int error, doff, poff, txdcount, txpcount, nmlen; 927 928 m = t2p->t2_tparam.mb_top; 929 if (m) { 930 md_initm(&mbparam, m); /* do not free it! */ 931 totpcount = m_fixhdr(m); 932 if (totpcount > 0xffff) /* maxvalue for ushort_t */ 933 return (EINVAL); 934 } else 935 totpcount = 0; 936 m = t2p->t2_tdata.mb_top; 937 if (m) { 938 md_initm(&mbdata, m); /* do not free it! */ 939 totdcount = m_fixhdr(m); 940 if (totdcount > 0xffff) 941 return (EINVAL); 942 } else 943 totdcount = 0; 944 leftdcount = totdcount; 945 leftpcount = totpcount; 946 txmax = vcp->vc_txmax; 947 error = smb_rq_alloc(t2p->t2_source, t2p->t_name[0] ? 948 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp); 949 if (error) 950 return (error); 951 rqp->sr_timo = smb_timo_default; 952 rqp->sr_flags |= SMBR_MULTIPACKET; 953 t2p->t2_rq = rqp; 954 mbp = &rqp->sr_rq; 955 smb_rq_wstart(rqp); 956 mb_put_uint16le(mbp, totpcount); 957 mb_put_uint16le(mbp, totdcount); 958 mb_put_uint16le(mbp, t2p->t2_maxpcount); 959 mb_put_uint16le(mbp, t2p->t2_maxdcount); 960 mb_put_uint8(mbp, t2p->t2_maxscount); 961 mb_put_uint8(mbp, 0); /* reserved */ 962 mb_put_uint16le(mbp, 0); /* flags */ 963 mb_put_uint32le(mbp, 0); /* Timeout */ 964 mb_put_uint16le(mbp, 0); /* reserved 2 */ 965 len = mb_fixhdr(mbp); 966 967 /* 968 * now we have known packet size as 969 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1), 970 * and need to decide which parts should go into the first request 971 */ 972 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0; 973 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1); 974 if (len + leftpcount > txmax) { 975 txpcount = min(leftpcount, txmax - len); 976 poff = len; 977 txdcount = 0; 978 doff = 0; 979 } else { 980 txpcount = leftpcount; 981 poff = txpcount ? len : 0; 982 /* 983 * Other client traffic seems to "ALIGN2" here. The extra 984 * 2 byte pad we use has no observed downside and may be 985 * required for some old servers(?) 986 */ 987 len = ALIGN4(len + txpcount); 988 txdcount = min(leftdcount, txmax - len); 989 doff = txdcount ? len : 0; 990 } 991 leftpcount -= txpcount; 992 leftdcount -= txdcount; 993 mb_put_uint16le(mbp, txpcount); 994 mb_put_uint16le(mbp, poff); 995 mb_put_uint16le(mbp, txdcount); 996 mb_put_uint16le(mbp, doff); 997 mb_put_uint8(mbp, t2p->t2_setupcount); 998 mb_put_uint8(mbp, 0); 999 for (i = 0; i < t2p->t2_setupcount; i++) { 1000 mb_put_uint16le(mbp, t2p->t2_setupdata[i]); 1001 } 1002 smb_rq_wend(rqp); 1003 smb_rq_bstart(rqp); 1004 /* TDUNICODE */ 1005 if (t2p->t_name) 1006 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM); 1007 mb_put_uint8(mbp, 0); /* terminating zero */ 1008 len = mb_fixhdr(mbp); 1009 if (txpcount) { 1010 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1011 error = md_get_mbuf(&mbparam, txpcount, &m); 1012 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); 1013 if (error) 1014 goto freerq; 1015 mb_put_mbuf(mbp, m); 1016 } 1017 len = mb_fixhdr(mbp); 1018 if (txdcount) { 1019 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1020 error = md_get_mbuf(&mbdata, txdcount, &m); 1021 if (error) 1022 goto freerq; 1023 mb_put_mbuf(mbp, m); 1024 } 1025 smb_rq_bend(rqp); /* incredible, but thats it... */ 1026 error = smb_rq_enqueue(rqp); 1027 if (error) 1028 goto freerq; 1029 if (leftpcount || leftdcount) { 1030 error = smb_rq_reply(rqp); 1031 if (error) 1032 goto bad; 1033 /* 1034 * this is an interim response, ignore it. 1035 */ 1036 SMBRQ_LOCK(rqp); 1037 md_next_record(&rqp->sr_rp); 1038 SMBRQ_UNLOCK(rqp); 1039 } 1040 while (leftpcount || leftdcount) { 1041 error = smb_rq_new(rqp, t2p->t_name ? 1042 SMB_COM_TRANSACTION_SECONDARY : 1043 SMB_COM_TRANSACTION2_SECONDARY); 1044 if (error) 1045 goto bad; 1046 mbp = &rqp->sr_rq; 1047 smb_rq_wstart(rqp); 1048 mb_put_uint16le(mbp, totpcount); 1049 mb_put_uint16le(mbp, totdcount); 1050 len = mb_fixhdr(mbp); 1051 /* 1052 * now we have known packet size as 1053 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one, 1054 * and need to decide which parts should go into request 1055 */ 1056 len = ALIGN4(len + 6 * 2 + 2); 1057 if (t2p->t_name == NULL) 1058 len += 2; 1059 if (len + leftpcount > txmax) { 1060 txpcount = min(leftpcount, txmax - len); 1061 poff = len; 1062 txdcount = 0; 1063 doff = 0; 1064 } else { 1065 txpcount = leftpcount; 1066 poff = txpcount ? len : 0; 1067 len = ALIGN4(len + txpcount); 1068 txdcount = min(leftdcount, txmax - len); 1069 doff = txdcount ? len : 0; 1070 } 1071 mb_put_uint16le(mbp, txpcount); 1072 mb_put_uint16le(mbp, poff); 1073 mb_put_uint16le(mbp, totpcount - leftpcount); 1074 mb_put_uint16le(mbp, txdcount); 1075 mb_put_uint16le(mbp, doff); 1076 mb_put_uint16le(mbp, totdcount - leftdcount); 1077 leftpcount -= txpcount; 1078 leftdcount -= txdcount; 1079 if (t2p->t_name == NULL) 1080 mb_put_uint16le(mbp, t2p->t2_fid); 1081 smb_rq_wend(rqp); 1082 smb_rq_bstart(rqp); 1083 mb_put_uint8(mbp, 0); /* name */ 1084 len = mb_fixhdr(mbp); 1085 if (txpcount) { 1086 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1087 error = md_get_mbuf(&mbparam, txpcount, &m); 1088 if (error) 1089 goto bad; 1090 mb_put_mbuf(mbp, m); 1091 } 1092 len = mb_fixhdr(mbp); 1093 if (txdcount) { 1094 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1095 error = md_get_mbuf(&mbdata, txdcount, &m); 1096 if (error) 1097 goto bad; 1098 mb_put_mbuf(mbp, m); 1099 } 1100 smb_rq_bend(rqp); 1101 error = smb_iod_multirq(rqp); 1102 if (error) 1103 goto bad; 1104 } /* while left params or data */ 1105 error = smb_t2_reply(t2p); 1106 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) 1107 goto bad; 1108 mdp = &t2p->t2_rdata; 1109 if (mdp->md_top) { 1110 m_fixhdr(mdp->md_top); 1111 md_initm(mdp, mdp->md_top); 1112 } 1113 mdp = &t2p->t2_rparam; 1114 if (mdp->md_top) { 1115 m_fixhdr(mdp->md_top); 1116 md_initm(mdp, mdp->md_top); 1117 } 1118 bad: 1119 smb_iod_removerq(rqp); 1120 freerq: 1121 if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) { 1122 if (rqp->sr_flags & SMBR_RESTART) 1123 t2p->t2_flags |= SMBT2_RESTART; 1124 md_done(&t2p->t2_rparam); 1125 md_done(&t2p->t2_rdata); 1126 } 1127 smb_rq_done(rqp); 1128 return (error); 1129 } 1130 1131 1132 /* 1133 * Perform a full round of NT_TRANSACTION request 1134 */ 1135 static int 1136 smb_nt_request_int(struct smb_ntrq *ntp) 1137 { 1138 struct smb_vc *vcp = ntp->nt_vc; 1139 struct smb_cred *scred = ntp->nt_cred; 1140 struct mbchain *mbp; 1141 struct mdchain *mdp, mbsetup, mbparam, mbdata; 1142 mblk_t *m; 1143 struct smb_rq *rqp; 1144 int totpcount, leftpcount, totdcount, leftdcount, len, txmax; 1145 int error, doff, poff, txdcount, txpcount; 1146 int totscount; 1147 1148 m = ntp->nt_tsetup.mb_top; 1149 if (m) { 1150 md_initm(&mbsetup, m); /* do not free it! */ 1151 totscount = m_fixhdr(m); 1152 if (totscount > 2 * 0xff) 1153 return (EINVAL); 1154 } else 1155 totscount = 0; 1156 m = ntp->nt_tparam.mb_top; 1157 if (m) { 1158 md_initm(&mbparam, m); /* do not free it! */ 1159 totpcount = m_fixhdr(m); 1160 if (totpcount > 0x7fffffff) 1161 return (EINVAL); 1162 } else 1163 totpcount = 0; 1164 m = ntp->nt_tdata.mb_top; 1165 if (m) { 1166 md_initm(&mbdata, m); /* do not free it! */ 1167 totdcount = m_fixhdr(m); 1168 if (totdcount > 0x7fffffff) 1169 return (EINVAL); 1170 } else 1171 totdcount = 0; 1172 leftdcount = totdcount; 1173 leftpcount = totpcount; 1174 txmax = vcp->vc_txmax; 1175 error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp); 1176 if (error) 1177 return (error); 1178 rqp->sr_timo = smb_timo_default; 1179 rqp->sr_flags |= SMBR_MULTIPACKET; 1180 ntp->nt_rq = rqp; 1181 mbp = &rqp->sr_rq; 1182 smb_rq_wstart(rqp); 1183 mb_put_uint8(mbp, ntp->nt_maxscount); 1184 mb_put_uint16le(mbp, 0); /* reserved (flags?) */ 1185 mb_put_uint32le(mbp, totpcount); 1186 mb_put_uint32le(mbp, totdcount); 1187 mb_put_uint32le(mbp, ntp->nt_maxpcount); 1188 mb_put_uint32le(mbp, ntp->nt_maxdcount); 1189 len = mb_fixhdr(mbp); 1190 /* 1191 * now we have known packet size as 1192 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2), 1193 * and need to decide which parts should go into the first request 1194 */ 1195 len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2); 1196 if (len + leftpcount > txmax) { 1197 txpcount = min(leftpcount, txmax - len); 1198 poff = len; 1199 txdcount = 0; 1200 doff = 0; 1201 } else { 1202 txpcount = leftpcount; 1203 poff = txpcount ? len : 0; 1204 len = ALIGN4(len + txpcount); 1205 txdcount = min(leftdcount, txmax - len); 1206 doff = txdcount ? len : 0; 1207 } 1208 leftpcount -= txpcount; 1209 leftdcount -= txdcount; 1210 mb_put_uint32le(mbp, txpcount); 1211 mb_put_uint32le(mbp, poff); 1212 mb_put_uint32le(mbp, txdcount); 1213 mb_put_uint32le(mbp, doff); 1214 mb_put_uint8(mbp, (totscount+1)/2); 1215 mb_put_uint16le(mbp, ntp->nt_function); 1216 if (totscount) { 1217 error = md_get_mbuf(&mbsetup, totscount, &m); 1218 SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax); 1219 if (error) 1220 goto freerq; 1221 mb_put_mbuf(mbp, m); 1222 if (totscount & 1) 1223 mb_put_uint8(mbp, 0); /* setup is in words */ 1224 } 1225 smb_rq_wend(rqp); 1226 smb_rq_bstart(rqp); 1227 len = mb_fixhdr(mbp); 1228 if (txpcount) { 1229 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1230 error = md_get_mbuf(&mbparam, txpcount, &m); 1231 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); 1232 if (error) 1233 goto freerq; 1234 mb_put_mbuf(mbp, m); 1235 } 1236 len = mb_fixhdr(mbp); 1237 if (txdcount) { 1238 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1239 error = md_get_mbuf(&mbdata, txdcount, &m); 1240 if (error) 1241 goto freerq; 1242 mb_put_mbuf(mbp, m); 1243 } 1244 smb_rq_bend(rqp); /* incredible, but thats it... */ 1245 error = smb_rq_enqueue(rqp); 1246 if (error) 1247 goto freerq; 1248 if (leftpcount || leftdcount) { 1249 error = smb_rq_reply(rqp); 1250 if (error) 1251 goto bad; 1252 /* 1253 * this is an interim response, ignore it. 1254 */ 1255 SMBRQ_LOCK(rqp); 1256 md_next_record(&rqp->sr_rp); 1257 SMBRQ_UNLOCK(rqp); 1258 } 1259 while (leftpcount || leftdcount) { 1260 error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY); 1261 if (error) 1262 goto bad; 1263 mbp = &rqp->sr_rq; 1264 smb_rq_wstart(rqp); 1265 mb_put_mem(mbp, NULL, 3, MB_MZERO); 1266 mb_put_uint32le(mbp, totpcount); 1267 mb_put_uint32le(mbp, totdcount); 1268 len = mb_fixhdr(mbp); 1269 /* 1270 * now we have known packet size as 1271 * ALIGN4(len + 6 * 4 + 2) 1272 * and need to decide which parts should go into request 1273 */ 1274 len = ALIGN4(len + 6 * 4 + 2); 1275 if (len + leftpcount > txmax) { 1276 txpcount = min(leftpcount, txmax - len); 1277 poff = len; 1278 txdcount = 0; 1279 doff = 0; 1280 } else { 1281 txpcount = leftpcount; 1282 poff = txpcount ? len : 0; 1283 len = ALIGN4(len + txpcount); 1284 txdcount = min(leftdcount, txmax - len); 1285 doff = txdcount ? len : 0; 1286 } 1287 mb_put_uint32le(mbp, txpcount); 1288 mb_put_uint32le(mbp, poff); 1289 mb_put_uint32le(mbp, totpcount - leftpcount); 1290 mb_put_uint32le(mbp, txdcount); 1291 mb_put_uint32le(mbp, doff); 1292 mb_put_uint32le(mbp, totdcount - leftdcount); 1293 leftpcount -= txpcount; 1294 leftdcount -= txdcount; 1295 smb_rq_wend(rqp); 1296 smb_rq_bstart(rqp); 1297 len = mb_fixhdr(mbp); 1298 if (txpcount) { 1299 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1300 error = md_get_mbuf(&mbparam, txpcount, &m); 1301 if (error) 1302 goto bad; 1303 mb_put_mbuf(mbp, m); 1304 } 1305 len = mb_fixhdr(mbp); 1306 if (txdcount) { 1307 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 1308 error = md_get_mbuf(&mbdata, txdcount, &m); 1309 if (error) 1310 goto bad; 1311 mb_put_mbuf(mbp, m); 1312 } 1313 smb_rq_bend(rqp); 1314 error = smb_iod_multirq(rqp); 1315 if (error) 1316 goto bad; 1317 } /* while left params or data */ 1318 error = smb_nt_reply(ntp); 1319 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) 1320 goto bad; 1321 mdp = &ntp->nt_rdata; 1322 if (mdp->md_top) { 1323 m_fixhdr(mdp->md_top); 1324 md_initm(mdp, mdp->md_top); 1325 } 1326 mdp = &ntp->nt_rparam; 1327 if (mdp->md_top) { 1328 m_fixhdr(mdp->md_top); 1329 md_initm(mdp, mdp->md_top); 1330 } 1331 bad: 1332 smb_iod_removerq(rqp); 1333 freerq: 1334 if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) { 1335 if (rqp->sr_flags & SMBR_RESTART) 1336 ntp->nt_flags |= SMBT2_RESTART; 1337 md_done(&ntp->nt_rparam); 1338 md_done(&ntp->nt_rdata); 1339 } 1340 smb_rq_done(rqp); 1341 return (error); 1342 } 1343 1344 int 1345 smb_t2_request(struct smb_t2rq *t2p) 1346 { 1347 int error = EINVAL, i; 1348 1349 for (i = 0; ; ) { 1350 /* 1351 * Don't send any new requests if force unmount is underway. 1352 * This check was moved into smb_rq_enqueue, called by 1353 * smb_t2_request_int() 1354 */ 1355 t2p->t2_flags &= ~SMBT2_RESTART; 1356 error = smb_t2_request_int(t2p); 1357 if (!error) 1358 break; 1359 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != 1360 SMBT2_RESTART) 1361 break; 1362 if (++i > SMBMAXRESTARTS) 1363 break; 1364 mutex_enter(&(t2p)->t2_lock); 1365 if (t2p->t2_share && t2p->t2_share->ss_mount) { 1366 cv_timedwait(&t2p->t2_cond, &(t2p)->t2_lock, 1367 lbolt + (hz * SMB_RCNDELAY)); 1368 } else { 1369 delay(lbolt + (hz * SMB_RCNDELAY)); 1370 } 1371 mutex_exit(&(t2p)->t2_lock); 1372 } 1373 return (error); 1374 } 1375 1376 1377 int 1378 smb_nt_request(struct smb_ntrq *ntp) 1379 { 1380 int error = EINVAL, i; 1381 1382 for (i = 0; ; ) { 1383 /* 1384 * Don't send any new requests if force unmount is underway. 1385 * This check was moved into smb_rq_enqueue, called by 1386 * smb_nt_request_int() 1387 */ 1388 ntp->nt_flags &= ~SMBT2_RESTART; 1389 error = smb_nt_request_int(ntp); 1390 if (!error) 1391 break; 1392 if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != 1393 SMBT2_RESTART) 1394 break; 1395 if (++i > SMBMAXRESTARTS) 1396 break; 1397 mutex_enter(&(ntp)->nt_lock); 1398 if (ntp->nt_share && ntp->nt_share->ss_mount) { 1399 cv_timedwait(&ntp->nt_cond, &(ntp)->nt_lock, 1400 lbolt + (hz * SMB_RCNDELAY)); 1401 1402 } else { 1403 delay(lbolt + (hz * SMB_RCNDELAY)); 1404 } 1405 mutex_exit(&(ntp)->nt_lock); 1406 } 1407 return (error); 1408 } 1409