1 /* 2 * Copyright (c) 2000-2001, Boris Popov 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Boris Popov. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * $FreeBSD$ 33 */ 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/proc.h> 40 #include <sys/lock.h> 41 #include <sys/sysctl.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/mbuf.h> 45 46 #include <netsmb/smb.h> 47 #include <netsmb/smb_conn.h> 48 #include <netsmb/smb_rq.h> 49 #include <netsmb/smb_subr.h> 50 #include <netsmb/smb_tran.h> 51 52 MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request"); 53 54 MODULE_DEPEND(netsmb, libmchain, 1, 1, 1); 55 56 static int smb_rq_reply(struct smb_rq *rqp); 57 static int smb_rq_enqueue(struct smb_rq *rqp); 58 static int smb_rq_getenv(struct smb_connobj *layer, 59 struct smb_vc **vcpp, struct smb_share **sspp); 60 static int smb_rq_new(struct smb_rq *rqp, u_char cmd); 61 static int smb_t2_reply(struct smb_t2rq *t2p); 62 63 int 64 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred, 65 struct smb_rq **rqpp) 66 { 67 struct smb_rq *rqp; 68 int error; 69 70 MALLOC(rqp, struct smb_rq *, sizeof(*rqp), M_SMBRQ, M_WAITOK); 71 if (rqp == NULL) 72 return ENOMEM; 73 error = smb_rq_init(rqp, layer, cmd, scred); 74 rqp->sr_flags |= SMBR_ALLOCED; 75 if (error) { 76 smb_rq_done(rqp); 77 return error; 78 } 79 *rqpp = rqp; 80 return 0; 81 } 82 83 static char tzero[12]; 84 85 int 86 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd, 87 struct smb_cred *scred) 88 { 89 int error; 90 91 bzero(rqp, sizeof(*rqp)); 92 smb_sl_init(&rqp->sr_slock, "srslock"); 93 error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share); 94 if (error) 95 return error; 96 error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC); 97 if (error) 98 return error; 99 if (rqp->sr_share) { 100 error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC); 101 if (error) 102 return error; 103 } 104 rqp->sr_cred = scred; 105 rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc); 106 return smb_rq_new(rqp, cmd); 107 } 108 109 static int 110 smb_rq_new(struct smb_rq *rqp, u_char cmd) 111 { 112 struct smb_vc *vcp = rqp->sr_vc; 113 struct mbchain *mbp = &rqp->sr_rq; 114 int error; 115 116 rqp->sr_sendcnt = 0; 117 mb_done(mbp); 118 md_done(&rqp->sr_rp); 119 error = mb_init(mbp); 120 if (error) 121 return error; 122 mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM); 123 mb_put_uint8(mbp, cmd); 124 mb_put_uint32le(mbp, 0); /* DosError */ 125 mb_put_uint8(mbp, vcp->vc_hflags); 126 if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY) 127 mb_put_uint16le(mbp, (vcp->vc_hflags2 & ~SMB_FLAGS2_UNICODE)); 128 else 129 mb_put_uint16le(mbp, vcp->vc_hflags2); 130 mb_put_mem(mbp, tzero, 12, MB_MSYSTEM); 131 rqp->sr_rqtid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t)); 132 mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/); 133 rqp->sr_rquid = (u_int16_t*)mb_reserve(mbp, sizeof(u_int16_t)); 134 mb_put_uint16le(mbp, rqp->sr_mid); 135 return 0; 136 } 137 138 void 139 smb_rq_done(struct smb_rq *rqp) 140 { 141 mb_done(&rqp->sr_rq); 142 md_done(&rqp->sr_rp); 143 smb_sl_destroy(&rqp->sr_slock); 144 if (rqp->sr_flags & SMBR_ALLOCED) 145 free(rqp, M_SMBRQ); 146 } 147 148 /* 149 * Simple request-reply exchange 150 */ 151 int 152 smb_rq_simple(struct smb_rq *rqp) 153 { 154 struct smb_vc *vcp = rqp->sr_vc; 155 int error = EINVAL, i; 156 157 for (i = 0; i < SMB_MAXRCN; i++) { 158 rqp->sr_flags &= ~SMBR_RESTART; 159 rqp->sr_timo = vcp->vc_timo; 160 rqp->sr_state = SMBRQ_NOTSENT; 161 error = smb_rq_enqueue(rqp); 162 if (error) 163 return error; 164 error = smb_rq_reply(rqp); 165 if (error == 0) 166 break; 167 if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART) 168 break; 169 } 170 return error; 171 } 172 173 static int 174 smb_rq_enqueue(struct smb_rq *rqp) 175 { 176 struct smb_share *ssp = rqp->sr_share; 177 int error; 178 179 if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) { 180 return smb_iod_addrq(rqp); 181 } 182 for (;;) { 183 SMBS_ST_LOCK(ssp); 184 if (ssp->ss_flags & SMBS_RECONNECTING) { 185 msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp), 186 PWAIT | PDROP, "90trcn", hz); 187 if (smb_proc_intr(rqp->sr_cred->scr_td->td_proc)) 188 return EINTR; 189 continue; 190 } 191 if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) { 192 SMBS_ST_UNLOCK(ssp); 193 } else { 194 SMBS_ST_UNLOCK(ssp); 195 error = smb_iod_request(rqp->sr_vc->vc_iod, 196 SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp); 197 if (error) 198 return error; 199 } 200 error = smb_iod_addrq(rqp); 201 if (error != EXDEV) 202 break; 203 } 204 return error; 205 } 206 207 void 208 smb_rq_wstart(struct smb_rq *rqp) 209 { 210 rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t)); 211 rqp->sr_rq.mb_count = 0; 212 } 213 214 void 215 smb_rq_wend(struct smb_rq *rqp) 216 { 217 if (rqp->sr_wcount == NULL) { 218 SMBERROR("no wcount\n"); /* actually panic */ 219 return; 220 } 221 if (rqp->sr_rq.mb_count & 1) 222 SMBERROR("odd word count\n"); 223 *rqp->sr_wcount = rqp->sr_rq.mb_count / 2; 224 } 225 226 void 227 smb_rq_bstart(struct smb_rq *rqp) 228 { 229 rqp->sr_bcount = (u_short*)mb_reserve(&rqp->sr_rq, sizeof(u_short)); 230 rqp->sr_rq.mb_count = 0; 231 } 232 233 void 234 smb_rq_bend(struct smb_rq *rqp) 235 { 236 int bcnt; 237 238 if (rqp->sr_bcount == NULL) { 239 SMBERROR("no bcount\n"); /* actually panic */ 240 return; 241 } 242 bcnt = rqp->sr_rq.mb_count; 243 if (bcnt > 0xffff) 244 SMBERROR("byte count too large (%d)\n", bcnt); 245 *rqp->sr_bcount = htole16(bcnt); 246 } 247 248 int 249 smb_rq_intr(struct smb_rq *rqp) 250 { 251 struct proc *p = rqp->sr_cred->scr_td->td_proc; 252 253 if (rqp->sr_flags & SMBR_INTR) 254 return EINTR; 255 return smb_proc_intr(p); 256 } 257 258 int 259 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp) 260 { 261 *mbpp = &rqp->sr_rq; 262 return 0; 263 } 264 265 int 266 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp) 267 { 268 *mbpp = &rqp->sr_rp; 269 return 0; 270 } 271 272 static int 273 smb_rq_getenv(struct smb_connobj *layer, 274 struct smb_vc **vcpp, struct smb_share **sspp) 275 { 276 struct smb_vc *vcp = NULL; 277 struct smb_share *ssp = NULL; 278 struct smb_connobj *cp; 279 int error = 0; 280 281 switch (layer->co_level) { 282 case SMBL_VC: 283 vcp = CPTOVC(layer); 284 if (layer->co_parent == NULL) { 285 SMBERROR("zombie VC %s\n", vcp->vc_srvname); 286 error = EINVAL; 287 break; 288 } 289 break; 290 case SMBL_SHARE: 291 ssp = CPTOSS(layer); 292 cp = layer->co_parent; 293 if (cp == NULL) { 294 SMBERROR("zombie share %s\n", ssp->ss_name); 295 error = EINVAL; 296 break; 297 } 298 error = smb_rq_getenv(cp, &vcp, NULL); 299 if (error) 300 break; 301 break; 302 default: 303 SMBERROR("invalid layer %d passed\n", layer->co_level); 304 error = EINVAL; 305 } 306 if (vcpp) 307 *vcpp = vcp; 308 if (sspp) 309 *sspp = ssp; 310 return error; 311 } 312 313 /* 314 * Wait for reply on the request 315 */ 316 static int 317 smb_rq_reply(struct smb_rq *rqp) 318 { 319 struct mdchain *mdp = &rqp->sr_rp; 320 u_int32_t tdw; 321 u_int8_t tb; 322 int error, rperror = 0; 323 324 error = smb_iod_waitrq(rqp); 325 if (error) 326 return error; 327 error = md_get_uint32(mdp, &tdw); 328 if (error) 329 return error; 330 error = md_get_uint8(mdp, &tb); 331 if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) { 332 error = md_get_uint32le(mdp, &rqp->sr_error); 333 } else { 334 error = md_get_uint8(mdp, &rqp->sr_errclass); 335 error = md_get_uint8(mdp, &tb); 336 error = md_get_uint16le(mdp, &rqp->sr_serror); 337 if (!error) 338 rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror); 339 } 340 error = md_get_uint8(mdp, &rqp->sr_rpflags); 341 error = md_get_uint16le(mdp, &rqp->sr_rpflags2); 342 343 error = md_get_uint32(mdp, &tdw); 344 error = md_get_uint32(mdp, &tdw); 345 error = md_get_uint32(mdp, &tdw); 346 347 error = md_get_uint16le(mdp, &rqp->sr_rptid); 348 error = md_get_uint16le(mdp, &rqp->sr_rppid); 349 error = md_get_uint16le(mdp, &rqp->sr_rpuid); 350 error = md_get_uint16le(mdp, &rqp->sr_rpmid); 351 352 SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n", 353 rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid, 354 rqp->sr_errclass, rqp->sr_serror); 355 return error ? error : rperror; 356 } 357 358 359 #define ALIGN4(a) (((a) + 3) & ~3) 360 361 /* 362 * TRANS2 request implementation 363 */ 364 int 365 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred, 366 struct smb_t2rq **t2pp) 367 { 368 struct smb_t2rq *t2p; 369 int error; 370 371 MALLOC(t2p, struct smb_t2rq *, sizeof(*t2p), M_SMBRQ, M_WAITOK); 372 if (t2p == NULL) 373 return ENOMEM; 374 error = smb_t2_init(t2p, layer, setup, scred); 375 t2p->t2_flags |= SMBT2_ALLOCED; 376 if (error) { 377 smb_t2_done(t2p); 378 return error; 379 } 380 *t2pp = t2p; 381 return 0; 382 } 383 384 int 385 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup, 386 struct smb_cred *scred) 387 { 388 int error; 389 390 bzero(t2p, sizeof(*t2p)); 391 t2p->t2_source = source; 392 t2p->t2_setupcount = 1; 393 t2p->t2_setupdata = t2p->t2_setup; 394 t2p->t2_setup[0] = setup; 395 t2p->t2_fid = 0xffff; 396 t2p->t2_cred = scred; 397 error = smb_rq_getenv(source, &t2p->t2_vc, NULL); 398 if (error) 399 return error; 400 return 0; 401 } 402 403 void 404 smb_t2_done(struct smb_t2rq *t2p) 405 { 406 mb_done(&t2p->t2_tparam); 407 mb_done(&t2p->t2_tdata); 408 md_done(&t2p->t2_rparam); 409 md_done(&t2p->t2_rdata); 410 if (t2p->t2_flags & SMBT2_ALLOCED) 411 free(t2p, M_SMBRQ); 412 } 413 414 static int 415 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count, 416 struct mdchain *mdp) 417 { 418 struct mbuf *m, *m0; 419 int len; 420 421 m0 = m_split(mtop, offset, M_TRYWAIT); 422 if (m0 == NULL) 423 return EBADRPC; 424 len = m_length(m0, &m); 425 m->m_len -= len - count; 426 if (mdp->md_top == NULL) { 427 md_initm(mdp, m0); 428 } else 429 m_cat(mdp->md_top, m0); 430 return 0; 431 } 432 433 static int 434 smb_t2_reply(struct smb_t2rq *t2p) 435 { 436 struct mdchain *mdp; 437 struct smb_rq *rqp = t2p->t2_rq; 438 int error, totpgot, totdgot; 439 u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp; 440 u_int16_t tmp, bc, dcount; 441 u_int8_t wc; 442 443 error = smb_rq_reply(rqp); 444 if (error) 445 return error; 446 if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) { 447 /* 448 * this is an interim response, ignore it. 449 */ 450 SMBRQ_SLOCK(rqp); 451 md_next_record(&rqp->sr_rp); 452 SMBRQ_SUNLOCK(rqp); 453 return 0; 454 } 455 /* 456 * Now we have to get all subsequent responses. The CIFS specification 457 * says that they can be disordered which is weird. 458 * TODO: timo 459 */ 460 totpgot = totdgot = 0; 461 totpcount = totdcount = 0xffff; 462 mdp = &rqp->sr_rp; 463 for (;;) { 464 m_dumpm(mdp->md_top); 465 if ((error = md_get_uint8(mdp, &wc)) != 0) 466 break; 467 if (wc < 10) { 468 error = ENOENT; 469 break; 470 } 471 if ((error = md_get_uint16le(mdp, &tmp)) != 0) 472 break; 473 if (totpcount > tmp) 474 totpcount = tmp; 475 md_get_uint16le(mdp, &tmp); 476 if (totdcount > tmp) 477 totdcount = tmp; 478 if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */ 479 (error = md_get_uint16le(mdp, &pcount)) != 0 || 480 (error = md_get_uint16le(mdp, &poff)) != 0 || 481 (error = md_get_uint16le(mdp, &pdisp)) != 0) 482 break; 483 if (pcount != 0 && pdisp != totpgot) { 484 SMBERROR("Can't handle disordered parameters %d:%d\n", 485 pdisp, totpgot); 486 error = EINVAL; 487 break; 488 } 489 if ((error = md_get_uint16le(mdp, &dcount)) != 0 || 490 (error = md_get_uint16le(mdp, &doff)) != 0 || 491 (error = md_get_uint16le(mdp, &ddisp)) != 0) 492 break; 493 if (dcount != 0 && ddisp != totdgot) { 494 SMBERROR("Can't handle disordered data\n"); 495 error = EINVAL; 496 break; 497 } 498 md_get_uint8(mdp, &wc); 499 md_get_uint8(mdp, NULL); 500 tmp = wc; 501 while (tmp--) 502 md_get_uint16(mdp, NULL); 503 if ((error = md_get_uint16le(mdp, &bc)) != 0) 504 break; 505 /* tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/ 506 if (dcount) { 507 error = smb_t2_placedata(mdp->md_top, doff, dcount, 508 &t2p->t2_rdata); 509 if (error) 510 break; 511 } 512 if (pcount) { 513 error = smb_t2_placedata(mdp->md_top, poff, pcount, 514 &t2p->t2_rparam); 515 if (error) 516 break; 517 } 518 totpgot += pcount; 519 totdgot += dcount; 520 if (totpgot >= totpcount && totdgot >= totdcount) { 521 error = 0; 522 t2p->t2_flags |= SMBT2_ALLRECV; 523 break; 524 } 525 /* 526 * We're done with this reply, look for the next one. 527 */ 528 SMBRQ_SLOCK(rqp); 529 md_next_record(&rqp->sr_rp); 530 SMBRQ_SUNLOCK(rqp); 531 error = smb_rq_reply(rqp); 532 if (error) 533 break; 534 } 535 return error; 536 } 537 538 /* 539 * Perform a full round of TRANS2 request 540 */ 541 static int 542 smb_t2_request_int(struct smb_t2rq *t2p) 543 { 544 struct smb_vc *vcp = t2p->t2_vc; 545 struct smb_cred *scred = t2p->t2_cred; 546 struct mbchain *mbp; 547 struct mdchain *mdp, mbparam, mbdata; 548 struct mbuf *m; 549 struct smb_rq *rqp; 550 int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i; 551 int error, doff, poff, txdcount, txpcount, nmlen; 552 553 m = t2p->t2_tparam.mb_top; 554 if (m) { 555 md_initm(&mbparam, m); /* do not free it! */ 556 totpcount = m_fixhdr(m); 557 if (totpcount > 0xffff) /* maxvalue for u_short */ 558 return EINVAL; 559 } else 560 totpcount = 0; 561 m = t2p->t2_tdata.mb_top; 562 if (m) { 563 md_initm(&mbdata, m); /* do not free it! */ 564 totdcount = m_fixhdr(m); 565 if (totdcount > 0xffff) 566 return EINVAL; 567 } else 568 totdcount = 0; 569 leftdcount = totdcount; 570 leftpcount = totpcount; 571 txmax = vcp->vc_txmax; 572 error = smb_rq_alloc(t2p->t2_source, t2p->t_name ? 573 SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp); 574 if (error) 575 return error; 576 rqp->sr_flags |= SMBR_MULTIPACKET; 577 t2p->t2_rq = rqp; 578 mbp = &rqp->sr_rq; 579 smb_rq_wstart(rqp); 580 mb_put_uint16le(mbp, totpcount); 581 mb_put_uint16le(mbp, totdcount); 582 mb_put_uint16le(mbp, t2p->t2_maxpcount); 583 mb_put_uint16le(mbp, t2p->t2_maxdcount); 584 mb_put_uint8(mbp, t2p->t2_maxscount); 585 mb_put_uint8(mbp, 0); /* reserved */ 586 mb_put_uint16le(mbp, 0); /* flags */ 587 mb_put_uint32le(mbp, 0); /* Timeout */ 588 mb_put_uint16le(mbp, 0); /* reserved 2 */ 589 len = mb_fixhdr(mbp); 590 /* 591 * now we have known packet size as 592 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1), 593 * and need to decide which parts should go into the first request 594 */ 595 nmlen = t2p->t_name ? strlen(t2p->t_name) : 0; 596 len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1); 597 if (len + leftpcount > txmax) { 598 txpcount = min(leftpcount, txmax - len); 599 poff = len; 600 txdcount = 0; 601 doff = 0; 602 } else { 603 txpcount = leftpcount; 604 poff = txpcount ? len : 0; 605 len = ALIGN4(len + txpcount); 606 txdcount = min(leftdcount, txmax - len); 607 doff = txdcount ? len : 0; 608 } 609 leftpcount -= txpcount; 610 leftdcount -= txdcount; 611 mb_put_uint16le(mbp, txpcount); 612 mb_put_uint16le(mbp, poff); 613 mb_put_uint16le(mbp, txdcount); 614 mb_put_uint16le(mbp, doff); 615 mb_put_uint8(mbp, t2p->t2_setupcount); 616 mb_put_uint8(mbp, 0); 617 for (i = 0; i < t2p->t2_setupcount; i++) 618 mb_put_uint16le(mbp, t2p->t2_setupdata[i]); 619 smb_rq_wend(rqp); 620 smb_rq_bstart(rqp); 621 /* TDUNICODE */ 622 if (t2p->t_name) 623 mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM); 624 mb_put_uint8(mbp, 0); /* terminating zero */ 625 len = mb_fixhdr(mbp); 626 if (txpcount) { 627 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 628 error = md_get_mbuf(&mbparam, txpcount, &m); 629 SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax); 630 if (error) 631 goto freerq; 632 mb_put_mbuf(mbp, m); 633 } 634 len = mb_fixhdr(mbp); 635 if (txdcount) { 636 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 637 error = md_get_mbuf(&mbdata, txdcount, &m); 638 if (error) 639 goto freerq; 640 mb_put_mbuf(mbp, m); 641 } 642 smb_rq_bend(rqp); /* incredible, but thats it... */ 643 error = smb_rq_enqueue(rqp); 644 if (error) 645 goto freerq; 646 if (leftpcount == 0 && leftdcount == 0) 647 t2p->t2_flags |= SMBT2_ALLSENT; 648 error = smb_t2_reply(t2p); 649 if (error) 650 goto bad; 651 while (leftpcount || leftdcount) { 652 error = smb_rq_new(rqp, t2p->t_name ? 653 SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY); 654 if (error) 655 goto bad; 656 mbp = &rqp->sr_rq; 657 smb_rq_wstart(rqp); 658 mb_put_uint16le(mbp, totpcount); 659 mb_put_uint16le(mbp, totdcount); 660 len = mb_fixhdr(mbp); 661 /* 662 * now we have known packet size as 663 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one, 664 * and need to decide which parts should go into request 665 */ 666 len = ALIGN4(len + 6 * 2 + 2); 667 if (t2p->t_name == NULL) 668 len += 2; 669 if (len + leftpcount > txmax) { 670 txpcount = min(leftpcount, txmax - len); 671 poff = len; 672 txdcount = 0; 673 doff = 0; 674 } else { 675 txpcount = leftpcount; 676 poff = txpcount ? len : 0; 677 len = ALIGN4(len + txpcount); 678 txdcount = min(leftdcount, txmax - len); 679 doff = txdcount ? len : 0; 680 } 681 mb_put_uint16le(mbp, txpcount); 682 mb_put_uint16le(mbp, poff); 683 mb_put_uint16le(mbp, totpcount - leftpcount); 684 mb_put_uint16le(mbp, txdcount); 685 mb_put_uint16le(mbp, doff); 686 mb_put_uint16le(mbp, totdcount - leftdcount); 687 leftpcount -= txpcount; 688 leftdcount -= txdcount; 689 if (t2p->t_name == NULL) 690 mb_put_uint16le(mbp, t2p->t2_fid); 691 smb_rq_wend(rqp); 692 smb_rq_bstart(rqp); 693 mb_put_uint8(mbp, 0); /* name */ 694 len = mb_fixhdr(mbp); 695 if (txpcount) { 696 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 697 error = md_get_mbuf(&mbparam, txpcount, &m); 698 if (error) 699 goto bad; 700 mb_put_mbuf(mbp, m); 701 } 702 len = mb_fixhdr(mbp); 703 if (txdcount) { 704 mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO); 705 error = md_get_mbuf(&mbdata, txdcount, &m); 706 if (error) 707 goto bad; 708 mb_put_mbuf(mbp, m); 709 } 710 smb_rq_bend(rqp); 711 rqp->sr_state = SMBRQ_NOTSENT; 712 error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL); 713 if (error) 714 goto bad; 715 } /* while left params or data */ 716 t2p->t2_flags |= SMBT2_ALLSENT; 717 mdp = &t2p->t2_rdata; 718 if (mdp->md_top) { 719 m_fixhdr(mdp->md_top); 720 md_initm(mdp, mdp->md_top); 721 } 722 mdp = &t2p->t2_rparam; 723 if (mdp->md_top) { 724 m_fixhdr(mdp->md_top); 725 md_initm(mdp, mdp->md_top); 726 } 727 bad: 728 smb_iod_removerq(rqp); 729 freerq: 730 smb_rq_done(rqp); 731 if (error) { 732 if (rqp->sr_flags & SMBR_RESTART) 733 t2p->t2_flags |= SMBT2_RESTART; 734 md_done(&t2p->t2_rparam); 735 md_done(&t2p->t2_rdata); 736 } 737 return error; 738 } 739 740 int 741 smb_t2_request(struct smb_t2rq *t2p) 742 { 743 int error = EINVAL, i; 744 745 for (i = 0; i < SMB_MAXRCN; i++) { 746 t2p->t2_flags &= ~SMBR_RESTART; 747 error = smb_t2_request_int(t2p); 748 if (error == 0) 749 break; 750 if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART) 751 break; 752 } 753 return error; 754 } 755