xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c (revision d8a7fe16f62711cdc5c4267da8b34ff24a6b668c)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/time.h>
43 #include <sys/kmem.h>
44 #include <sys/proc.h>
45 #include <sys/lock.h>
46 #include <sys/socket.h>
47 #include <sys/mount.h>
48 #include <sys/sunddi.h>
49 #include <sys/cmn_err.h>
50 #include <sys/sdt.h>
51 
52 #include <netsmb/smb_osdep.h>
53 
54 #include <netsmb/smb.h>
55 #include <netsmb/smb_conn.h>
56 #include <netsmb/smb_subr.h>
57 #include <netsmb/smb_tran.h>
58 #include <netsmb/smb_rq.h>
59 
60 /*
61  * How long to wait before restarting a request (after reconnect)
62  */
63 #define	SMB_RCNDELAY		2	/* seconds */
64 
65 /*
66  * leave this zero - we can't ssecond guess server side effects of
67  * duplicate ops, this isn't nfs!
68  */
69 #define	SMBMAXRESTARTS		0
70 
71 
72 static int  smb_rq_reply(struct smb_rq *rqp);
73 static int  smb_rq_enqueue(struct smb_rq *rqp);
74 static int  smb_rq_getenv(struct smb_connobj *layer,
75 		struct smb_vc **vcpp, struct smb_share **sspp);
76 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
77 static int  smb_t2_reply(struct smb_t2rq *t2p);
78 static int  smb_nt_reply(struct smb_ntrq *ntp);
79 
80 
81 /*
82  * Done with a request object.  Free its contents.
83  * If it was allocated (SMBR_ALLOCED) free it too.
84  * Some of these are stack locals, not allocated.
85  *
86  * No locks here - this is the last ref.
87  */
88 void
89 smb_rq_done(struct smb_rq *rqp)
90 {
91 
92 	/*
93 	 * No smb_vc_rele() here - see smb_rq_init()
94 	 */
95 	mb_done(&rqp->sr_rq);
96 	md_done(&rqp->sr_rp);
97 	mutex_destroy(&rqp->sr_lock);
98 	cv_destroy(&rqp->sr_cond);
99 	if (rqp->sr_flags & SMBR_ALLOCED)
100 		kmem_free(rqp, sizeof (*rqp));
101 }
102 
103 int
104 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
105 	struct smb_rq **rqpp)
106 {
107 	struct smb_rq *rqp;
108 	int error;
109 
110 	rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
111 	if (rqp == NULL)
112 		return (ENOMEM);
113 	error = smb_rq_init(rqp, layer, cmd, scred);
114 	if (error) {
115 		smb_rq_done(rqp);
116 		return (error);
117 	}
118 	rqp->sr_flags |= SMBR_ALLOCED;
119 	*rqpp = rqp;
120 	return (0);
121 }
122 
123 int
124 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
125 	struct smb_cred *scred)
126 {
127 	int error;
128 
129 	bzero(rqp, sizeof (*rqp));
130 	mutex_init(&rqp->sr_lock, NULL,  MUTEX_DRIVER, NULL);
131 	cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
132 
133 	error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
134 	if (error)
135 		return (error);
136 
137 	/*
138 	 * We copied a VC pointer (vcp) into rqp->sr_vc,
139 	 * but we do NOT do a smb_vc_hold here.  Instead,
140 	 * the caller is responsible for the hold on the
141 	 * share or the VC as needed.  For smbfs callers,
142 	 * the hold is on the share, via the smbfs mount.
143 	 * For nsmb ioctl callers, the hold is done when
144 	 * the driver handle gets VC or share references.
145 	 * This design avoids frequent hold/rele activity
146 	 * when creating and completing requests.
147 	 */
148 
149 	rqp->sr_rexmit = SMBMAXRESTARTS;
150 	rqp->sr_cred = scred;	/* Note: ref hold done by caller. */
151 	rqp->sr_pid = (uint16_t)ddi_get_pid();
152 	error = smb_rq_new(rqp, cmd);
153 
154 	return (error);
155 }
156 
157 static int
158 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
159 {
160 	struct mbchain *mbp = &rqp->sr_rq;
161 	struct smb_vc *vcp = rqp->sr_vc;
162 	int error;
163 
164 	ASSERT(rqp != NULL);
165 
166 	rqp->sr_sendcnt = 0;
167 	rqp->sr_cmd = cmd;
168 
169 	mb_done(mbp);
170 	md_done(&rqp->sr_rp);
171 	error = mb_init(mbp);
172 	if (error)
173 		return (error);
174 
175 	/*
176 	 * Is this the right place to save the flags?
177 	 */
178 	rqp->sr_rqflags  = vcp->vc_hflags;
179 	rqp->sr_rqflags2 = vcp->vc_hflags2;
180 
181 	/*
182 	 * The SMB header is filled in later by
183 	 * smb_rq_fillhdr (see below)
184 	 * Just reserve space here.
185 	 */
186 	mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
187 
188 	return (0);
189 }
190 
191 /*
192  * Given a request with it's body already composed,
193  * rewind to the start and fill in the SMB header.
194  * This is called after the request is enqueued,
195  * so we have the final MID, seq num. etc.
196  */
197 void
198 smb_rq_fillhdr(struct smb_rq *rqp)
199 {
200 	struct mbchain mbtmp, *mbp = &mbtmp;
201 	mblk_t *m;
202 
203 	/*
204 	 * Fill in the SMB header using a dup of the first mblk,
205 	 * which points at the same data but has its own wptr,
206 	 * so we can rewind without trashing the message.
207 	 */
208 	m = dupb(rqp->sr_rq.mb_top);
209 	m->b_wptr = m->b_rptr;	/* rewind */
210 	mb_initm(mbp, m);
211 
212 	mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
213 	mb_put_uint8(mbp, rqp->sr_cmd);
214 	mb_put_uint32le(mbp, 0);	/* status */
215 	mb_put_uint8(mbp, rqp->sr_rqflags);
216 	mb_put_uint16le(mbp, rqp->sr_rqflags2);
217 	mb_put_uint16le(mbp, 0);	/* pid-high */
218 	mb_put_mem(mbp, NULL, 8, MB_MZERO);	/* MAC sig. (later) */
219 	mb_put_uint16le(mbp, 0);	/* reserved */
220 	mb_put_uint16le(mbp, rqp->sr_rqtid);
221 	mb_put_uint16le(mbp, rqp->sr_pid);
222 	mb_put_uint16le(mbp, rqp->sr_rquid);
223 	mb_put_uint16le(mbp, rqp->sr_mid);
224 
225 	/* This will free the mblk from dupb. */
226 	mb_done(mbp);
227 }
228 
229 int
230 smb_rq_simple(struct smb_rq *rqp)
231 {
232 	return (smb_rq_simple_timed(rqp, smb_timo_default));
233 }
234 
235 /*
236  * Simple request-reply exchange
237  */
238 int
239 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
240 {
241 	int error = EINVAL;
242 
243 	for (; ; ) {
244 		/*
245 		 * Don't send any new requests if force unmount is underway.
246 		 * This check was moved into smb_rq_enqueue.
247 		 */
248 		rqp->sr_flags &= ~SMBR_RESTART;
249 		rqp->sr_timo = timeout;	/* in seconds */
250 		rqp->sr_state = SMBRQ_NOTSENT;
251 		error = smb_rq_enqueue(rqp);
252 		if (error) {
253 			break;
254 		}
255 		error = smb_rq_reply(rqp);
256 		if (!error)
257 			break;
258 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
259 		    SMBR_RESTART)
260 			break;
261 		if (rqp->sr_rexmit <= 0)
262 			break;
263 		SMBRQ_LOCK(rqp);
264 		if (rqp->sr_share) {
265 			(void) cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
266 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
267 
268 		} else {
269 			delay(SEC_TO_TICK(SMB_RCNDELAY));
270 		}
271 		SMBRQ_UNLOCK(rqp);
272 		rqp->sr_rexmit--;
273 	}
274 	return (error);
275 }
276 
277 
278 static int
279 smb_rq_enqueue(struct smb_rq *rqp)
280 {
281 	struct smb_vc *vcp = rqp->sr_vc;
282 	struct smb_share *ssp = rqp->sr_share;
283 	int error = 0;
284 
285 	/*
286 	 * Normal requests may initiate a reconnect,
287 	 * and/or wait for state changes to finish.
288 	 * Some requests set the NORECONNECT flag
289 	 * to avoid all that (i.e. tree discon)
290 	 */
291 	if (rqp->sr_flags & SMBR_NORECONNECT) {
292 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
293 			SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
294 			return (ENOTCONN);
295 		}
296 		if (ssp != NULL &&
297 		    ((ssp->ss_flags & SMBS_CONNECTED) == 0))
298 			return (ENOTCONN);
299 		goto ok_out;
300 	}
301 
302 	/*
303 	 * If we're not connected, initiate a reconnect
304 	 * and/or wait for an existing one to finish.
305 	 */
306 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
307 		error = smb_iod_reconnect(vcp);
308 		if (error != 0)
309 			return (error);
310 	}
311 
312 	/*
313 	 * If this request has a "share" object
314 	 * that needs a tree connect, do it now.
315 	 */
316 	if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
317 		error = smb_share_tcon(ssp, rqp->sr_cred);
318 		if (error)
319 			return (error);
320 	}
321 
322 	/*
323 	 * We now know what UID + TID to use.
324 	 * Store them in the request.
325 	 */
326 ok_out:
327 	rqp->sr_rquid = vcp->vc_smbuid;
328 	rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
329 	error = smb_iod_addrq(rqp);
330 
331 	return (error);
332 }
333 
334 /*
335  * Mark location of the word count, which is filled in later by
336  * smb_rw_wend().  Also initialize the counter that it uses
337  * to figure out what value to fill in.
338  *
339  * Note that the word count happens to be 8-bit.
340  */
341 void
342 smb_rq_wstart(struct smb_rq *rqp)
343 {
344 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
345 	rqp->sr_rq.mb_count = 0;
346 }
347 
348 void
349 smb_rq_wend(struct smb_rq *rqp)
350 {
351 	uint_t wcnt;
352 
353 	if (rqp->sr_wcount == NULL) {
354 		SMBSDEBUG("no wcount\n");
355 		return;
356 	}
357 	wcnt = rqp->sr_rq.mb_count;
358 	if (wcnt > 0x1ff)
359 		SMBSDEBUG("word count too large (%d)\n", wcnt);
360 	if (wcnt & 1)
361 		SMBSDEBUG("odd word count\n");
362 	/* Fill in the word count (8-bits) */
363 	*rqp->sr_wcount = (wcnt >> 1);
364 }
365 
366 /*
367  * Mark location of the byte count, which is filled in later by
368  * smb_rw_bend().  Also initialize the counter that it uses
369  * to figure out what value to fill in.
370  *
371  * Note that the byte count happens to be 16-bit.
372  */
373 void
374 smb_rq_bstart(struct smb_rq *rqp)
375 {
376 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
377 	rqp->sr_rq.mb_count = 0;
378 }
379 
380 void
381 smb_rq_bend(struct smb_rq *rqp)
382 {
383 	uint_t bcnt;
384 
385 	if (rqp->sr_bcount == NULL) {
386 		SMBSDEBUG("no bcount\n");
387 		return;
388 	}
389 	bcnt = rqp->sr_rq.mb_count;
390 	if (bcnt > 0xffff)
391 		SMBSDEBUG("byte count too large (%d)\n", bcnt);
392 	/*
393 	 * Fill in the byte count (16-bits)
394 	 * The pointer is char * type due to
395 	 * typical off-by-one alignment.
396 	 */
397 	rqp->sr_bcount[0] = bcnt & 0xFF;
398 	rqp->sr_bcount[1] = (bcnt >> 8);
399 }
400 
401 int
402 smb_rq_intr(struct smb_rq *rqp)
403 {
404 	if (rqp->sr_flags & SMBR_INTR)
405 		return (EINTR);
406 
407 	return (0);
408 }
409 
410 static int
411 smb_rq_getenv(struct smb_connobj *co,
412 	struct smb_vc **vcpp, struct smb_share **sspp)
413 {
414 	struct smb_vc *vcp = NULL;
415 	struct smb_share *ssp = NULL;
416 	int error = EINVAL;
417 
418 	if (co->co_flags & SMBO_GONE) {
419 		SMBSDEBUG("zombie CO\n");
420 		error = EINVAL;
421 		goto out;
422 	}
423 
424 	switch (co->co_level) {
425 	case SMBL_SHARE:
426 		ssp = CPTOSS(co);
427 		if ((co->co_flags & SMBO_GONE) ||
428 		    co->co_parent == NULL) {
429 			SMBSDEBUG("zombie share %s\n", ssp->ss_name);
430 			break;
431 		}
432 		/* instead of recursion... */
433 		co = co->co_parent;
434 		/* FALLTHROUGH */
435 	case SMBL_VC:
436 		vcp = CPTOVC(co);
437 		if ((co->co_flags & SMBO_GONE) ||
438 		    co->co_parent == NULL) {
439 			SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
440 			break;
441 		}
442 		error = 0;
443 		break;
444 
445 	default:
446 		SMBSDEBUG("invalid level %d passed\n", co->co_level);
447 	}
448 
449 out:
450 	if (!error) {
451 		if (vcpp)
452 			*vcpp = vcp;
453 		if (sspp)
454 			*sspp = ssp;
455 	}
456 
457 	return (error);
458 }
459 
460 /*
461  * Wait for reply on the request
462  */
463 static int
464 smb_rq_reply(struct smb_rq *rqp)
465 {
466 	struct mdchain *mdp = &rqp->sr_rp;
467 	u_int8_t tb;
468 	int error, rperror = 0;
469 
470 	if (rqp->sr_timo == SMBNOREPLYWAIT) {
471 		smb_iod_removerq(rqp);
472 		return (0);
473 	}
474 
475 	error = smb_iod_waitrq(rqp);
476 	if (error)
477 		return (error);
478 
479 	/*
480 	 * If the request was signed, validate the
481 	 * signature on the response.
482 	 */
483 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
484 		error = smb_rq_verify(rqp);
485 		if (error)
486 			return (error);
487 	}
488 
489 	/*
490 	 * Parse the SMB header
491 	 */
492 	error = md_get_uint32le(mdp, NULL);
493 	if (error)
494 		return (error);
495 	error = md_get_uint8(mdp, &tb);
496 	error = md_get_uint32le(mdp, &rqp->sr_error);
497 	error = md_get_uint8(mdp, &rqp->sr_rpflags);
498 	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
499 	if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
500 		/*
501 		 * Do a special check for STATUS_BUFFER_OVERFLOW;
502 		 * it's not an error.
503 		 */
504 		if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
505 			/*
506 			 * Don't report it as an error to our caller;
507 			 * they can look at rqp->sr_error if they
508 			 * need to know whether we got a
509 			 * STATUS_BUFFER_OVERFLOW.
510 			 * XXX - should we do that for all errors
511 			 * where (error & 0xC0000000) is 0x80000000,
512 			 * i.e. all warnings?
513 			 */
514 			rperror = 0;
515 		} else
516 			rperror = smb_maperr32(rqp->sr_error);
517 	} else {
518 		rqp->sr_errclass = rqp->sr_error & 0xff;
519 		rqp->sr_serror = rqp->sr_error >> 16;
520 		rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
521 	}
522 	if (rperror == EMOREDATA) {
523 		rperror = E2BIG;
524 		rqp->sr_flags |= SMBR_MOREDATA;
525 	} else
526 		rqp->sr_flags &= ~SMBR_MOREDATA;
527 
528 	error = md_get_uint32le(mdp, NULL);
529 	error = md_get_uint32le(mdp, NULL);
530 	error = md_get_uint32le(mdp, NULL);
531 
532 	error = md_get_uint16le(mdp, &rqp->sr_rptid);
533 	error = md_get_uint16le(mdp, &rqp->sr_rppid);
534 	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
535 	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
536 
537 	return ((error) ? error : rperror);
538 }
539 
540 
541 #define	ALIGN4(a)	(((a) + 3) & ~3)
542 
543 /*
544  * TRANS2 request implementation
545  * TRANS implementation is in the "t2" routines
546  * NT_TRANSACTION implementation is the separate "nt" stuff
547  */
548 int
549 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
550 	struct smb_t2rq **t2pp)
551 {
552 	struct smb_t2rq *t2p;
553 	int error;
554 
555 	t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
556 	if (t2p == NULL)
557 		return (ENOMEM);
558 	error = smb_t2_init(t2p, layer, &setup, 1, scred);
559 	t2p->t2_flags |= SMBT2_ALLOCED;
560 	if (error) {
561 		smb_t2_done(t2p);
562 		return (error);
563 	}
564 	*t2pp = t2p;
565 	return (0);
566 }
567 
568 int
569 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
570 	struct smb_ntrq **ntpp)
571 {
572 	struct smb_ntrq *ntp;
573 	int error;
574 
575 	ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
576 	if (ntp == NULL)
577 		return (ENOMEM);
578 	error = smb_nt_init(ntp, layer, fn, scred);
579 	mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
580 	cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
581 	ntp->nt_flags |= SMBT2_ALLOCED;
582 	if (error) {
583 		smb_nt_done(ntp);
584 		return (error);
585 	}
586 	*ntpp = ntp;
587 	return (0);
588 }
589 
590 int
591 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
592 	int setupcnt, struct smb_cred *scred)
593 {
594 	int i;
595 	int error;
596 
597 	bzero(t2p, sizeof (*t2p));
598 	mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
599 	cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
600 
601 	t2p->t2_source = source;
602 	t2p->t2_setupcount = (u_int16_t)setupcnt;
603 	t2p->t2_setupdata = t2p->t2_setup;
604 	for (i = 0; i < setupcnt; i++)
605 		t2p->t2_setup[i] = setup[i];
606 	t2p->t2_fid = 0xffff;
607 	t2p->t2_cred = scred;
608 	t2p->t2_share = (source->co_level == SMBL_SHARE ?
609 	    CPTOSS(source) : NULL); /* for smb up/down */
610 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
611 	if (error)
612 		return (error);
613 	return (0);
614 }
615 
616 int
617 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
618 	struct smb_cred *scred)
619 {
620 	int error;
621 
622 	bzero(ntp, sizeof (*ntp));
623 	ntp->nt_source = source;
624 	ntp->nt_function = fn;
625 	ntp->nt_cred = scred;
626 	ntp->nt_share = (source->co_level == SMBL_SHARE ?
627 	    CPTOSS(source) : NULL); /* for smb up/down */
628 	error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
629 	if (error)
630 		return (error);
631 	return (0);
632 }
633 
634 void
635 smb_t2_done(struct smb_t2rq *t2p)
636 {
637 	mb_done(&t2p->t2_tparam);
638 	mb_done(&t2p->t2_tdata);
639 	md_done(&t2p->t2_rparam);
640 	md_done(&t2p->t2_rdata);
641 	mutex_destroy(&t2p->t2_lock);
642 	cv_destroy(&t2p->t2_cond);
643 	if (t2p->t2_flags & SMBT2_ALLOCED)
644 		kmem_free(t2p, sizeof (*t2p));
645 }
646 
647 u_int32_t
648 smb_t2_err(struct smb_t2rq *t2p)
649 {
650 	/* mask off "severity" and the "component"  bit */
651 	return (t2p->t2_sr_error & ~(0xe0000000));
652 }
653 
654 void
655 smb_nt_done(struct smb_ntrq *ntp)
656 {
657 	mb_done(&ntp->nt_tsetup);
658 	mb_done(&ntp->nt_tparam);
659 	mb_done(&ntp->nt_tdata);
660 	md_done(&ntp->nt_rparam);
661 	md_done(&ntp->nt_rdata);
662 	cv_destroy(&ntp->nt_cond);
663 	mutex_destroy(&ntp->nt_lock);
664 	if (ntp->nt_flags & SMBT2_ALLOCED)
665 		kmem_free(ntp, sizeof (*ntp));
666 }
667 
668 /*
669  * Extract data [offset,count] from mtop and add to mdp.
670  */
671 static int
672 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
673 	struct mdchain *mdp)
674 {
675 	mblk_t *n;
676 
677 	n = m_copym(mtop, offset, count, M_WAITOK);
678 	if (n == NULL)
679 		return (EBADRPC);
680 
681 	if (mdp->md_top == NULL) {
682 		md_initm(mdp, n);
683 	} else
684 		m_cat(mdp->md_top, n);
685 
686 	return (0);
687 }
688 
689 static int
690 smb_t2_reply(struct smb_t2rq *t2p)
691 {
692 	struct mdchain *mdp;
693 	struct smb_rq *rqp = t2p->t2_rq;
694 	int error, error2, totpgot, totdgot;
695 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
696 	u_int16_t tmp, bc, dcount;
697 	u_int8_t wc;
698 
699 	t2p->t2_flags &= ~SMBT2_MOREDATA;
700 
701 	error = smb_rq_reply(rqp);
702 	if (rqp->sr_flags & SMBR_MOREDATA)
703 		t2p->t2_flags |= SMBT2_MOREDATA;
704 	t2p->t2_sr_errclass = rqp->sr_errclass;
705 	t2p->t2_sr_serror = rqp->sr_serror;
706 	t2p->t2_sr_error = rqp->sr_error;
707 	t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
708 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
709 		return (error);
710 	/*
711 	 * Now we have to get all subseqent responses, if any.
712 	 * The CIFS specification says that they can be misordered,
713 	 * which is weird.
714 	 * TODO: timo
715 	 */
716 	totpgot = totdgot = 0;
717 	totpcount = totdcount = 0xffff;
718 	mdp = &rqp->sr_rp;
719 	for (;;) {
720 		DTRACE_PROBE2(smb_trans_reply,
721 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
722 		m_dumpm(mdp->md_top);
723 
724 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
725 			break;
726 		if (wc < 10) {
727 			error2 = ENOENT;
728 			break;
729 		}
730 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
731 			break;
732 		if (totpcount > tmp)
733 			totpcount = tmp;
734 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
735 			break;
736 		if (totdcount > tmp)
737 			totdcount = tmp;
738 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
739 		    (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
740 		    (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
741 		    (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
742 			break;
743 		if (pcount != 0 && pdisp != totpgot) {
744 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
745 			    pdisp, totpgot);
746 			error2 = EINVAL;
747 			break;
748 		}
749 		if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
750 		    (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
751 		    (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
752 			break;
753 		if (dcount != 0 && ddisp != totdgot) {
754 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
755 			    dcount);
756 			error2 = EINVAL;
757 			break;
758 		}
759 
760 		/* XXX: Skip setup words?  We don't save them? */
761 		md_get_uint8(mdp, &wc);  /* SetupCount */
762 		md_get_uint8(mdp, NULL); /* Reserved2 */
763 		tmp = wc;
764 		while (tmp--)
765 			md_get_uint16le(mdp, NULL);
766 
767 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
768 			break;
769 
770 		/*
771 		 * There are pad bytes here, and the poff value
772 		 * indicates where the next data are found.
773 		 * No need to guess at the padding size.
774 		 */
775 		if (pcount) {
776 			error2 = smb_t2_placedata(mdp->md_top, poff,
777 			    pcount, &t2p->t2_rparam);
778 			if (error2)
779 				break;
780 		}
781 		totpgot += pcount;
782 
783 		if (dcount) {
784 			error2 = smb_t2_placedata(mdp->md_top, doff,
785 			    dcount, &t2p->t2_rdata);
786 			if (error2)
787 				break;
788 		}
789 		totdgot += dcount;
790 
791 		if (totpgot >= totpcount && totdgot >= totdcount) {
792 			error2 = 0;
793 			t2p->t2_flags |= SMBT2_ALLRECV;
794 			break;
795 		}
796 		/*
797 		 * We're done with this reply, look for the next one.
798 		 */
799 		SMBRQ_LOCK(rqp);
800 		md_next_record(&rqp->sr_rp);
801 		SMBRQ_UNLOCK(rqp);
802 		error2 = smb_rq_reply(rqp);
803 		if (rqp->sr_flags & SMBR_MOREDATA)
804 			t2p->t2_flags |= SMBT2_MOREDATA;
805 		if (!error2)
806 			continue;
807 		t2p->t2_sr_errclass = rqp->sr_errclass;
808 		t2p->t2_sr_serror = rqp->sr_serror;
809 		t2p->t2_sr_error = rqp->sr_error;
810 		t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
811 		error = error2;
812 		if (!(rqp->sr_flags & SMBR_MOREDATA))
813 			break;
814 	}
815 	return (error ? error : error2);
816 }
817 
818 static int
819 smb_nt_reply(struct smb_ntrq *ntp)
820 {
821 	struct mdchain *mdp;
822 	struct smb_rq *rqp = ntp->nt_rq;
823 	int error, error2;
824 	u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
825 	u_int32_t tmp, dcount, totpgot, totdgot;
826 	u_int16_t bc;
827 	u_int8_t wc;
828 
829 	ntp->nt_flags &= ~SMBT2_MOREDATA;
830 
831 	error = smb_rq_reply(rqp);
832 	if (rqp->sr_flags & SMBR_MOREDATA)
833 		ntp->nt_flags |= SMBT2_MOREDATA;
834 	ntp->nt_sr_error = rqp->sr_error;
835 	ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
836 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
837 		return (error);
838 	/*
839 	 * Now we have to get all subseqent responses. The CIFS specification
840 	 * says that they can be misordered which is weird.
841 	 * TODO: timo
842 	 */
843 	totpgot = totdgot = 0;
844 	totpcount = totdcount = 0xffffffff;
845 	mdp = &rqp->sr_rp;
846 	for (;;) {
847 		DTRACE_PROBE2(smb_trans_reply,
848 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
849 		m_dumpm(mdp->md_top);
850 
851 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
852 			break;
853 		if (wc < 18) {
854 			error2 = ENOENT;
855 			break;
856 		}
857 		md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
858 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
859 			break;
860 		if (totpcount > tmp)
861 			totpcount = tmp;
862 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
863 			break;
864 		if (totdcount > tmp)
865 			totdcount = tmp;
866 		if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
867 		    (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
868 		    (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
869 			break;
870 		if (pcount != 0 && pdisp != totpgot) {
871 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
872 			    pdisp, totpgot);
873 			error2 = EINVAL;
874 			break;
875 		}
876 		if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
877 		    (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
878 		    (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
879 			break;
880 		if (dcount != 0 && ddisp != totdgot) {
881 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
882 			    dcount);
883 			error2 = EINVAL;
884 			break;
885 		}
886 
887 		/* XXX: Skip setup words?  We don't save them? */
888 		md_get_uint8(mdp, &wc);  /* SetupCount */
889 		tmp = wc;
890 		while (tmp--)
891 			md_get_uint16le(mdp, NULL);
892 
893 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
894 			break;
895 
896 		/*
897 		 * There are pad bytes here, and the poff value
898 		 * indicates where the next data are found.
899 		 * No need to guess at the padding size.
900 		 */
901 		if (pcount) {
902 			error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
903 			    &ntp->nt_rparam);
904 			if (error2)
905 				break;
906 		}
907 		totpgot += pcount;
908 
909 		if (dcount) {
910 			error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
911 			    &ntp->nt_rdata);
912 			if (error2)
913 				break;
914 		}
915 		totdgot += dcount;
916 
917 		if (totpgot >= totpcount && totdgot >= totdcount) {
918 			error2 = 0;
919 			ntp->nt_flags |= SMBT2_ALLRECV;
920 			break;
921 		}
922 		/*
923 		 * We're done with this reply, look for the next one.
924 		 */
925 		SMBRQ_LOCK(rqp);
926 		md_next_record(&rqp->sr_rp);
927 		SMBRQ_UNLOCK(rqp);
928 		error2 = smb_rq_reply(rqp);
929 		if (rqp->sr_flags & SMBR_MOREDATA)
930 			ntp->nt_flags |= SMBT2_MOREDATA;
931 		if (!error2)
932 			continue;
933 		ntp->nt_sr_error = rqp->sr_error;
934 		ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
935 		error = error2;
936 		if (!(rqp->sr_flags & SMBR_MOREDATA))
937 			break;
938 	}
939 	return (error ? error : error2);
940 }
941 
942 /*
943  * Perform a full round of TRANS2 request
944  */
945 static int
946 smb_t2_request_int(struct smb_t2rq *t2p)
947 {
948 	struct smb_vc *vcp = t2p->t2_vc;
949 	struct smb_cred *scred = t2p->t2_cred;
950 	struct mbchain *mbp;
951 	struct mdchain *mdp, mbparam, mbdata;
952 	mblk_t *m;
953 	struct smb_rq *rqp;
954 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
955 	int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
956 
957 	m = t2p->t2_tparam.mb_top;
958 	if (m) {
959 		md_initm(&mbparam, m);	/* do not free it! */
960 		totpcount = m_fixhdr(m);
961 		if (totpcount > 0xffff)		/* maxvalue for ushort_t */
962 			return (EINVAL);
963 	} else
964 		totpcount = 0;
965 	m = t2p->t2_tdata.mb_top;
966 	if (m) {
967 		md_initm(&mbdata, m);	/* do not free it! */
968 		totdcount = m_fixhdr(m);
969 		if (totdcount > 0xffff)
970 			return (EINVAL);
971 	} else
972 		totdcount = 0;
973 	leftdcount = totdcount;
974 	leftpcount = totpcount;
975 	txmax = vcp->vc_txmax;
976 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
977 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
978 	if (error)
979 		return (error);
980 	rqp->sr_timo = smb_timo_default;
981 	rqp->sr_flags |= SMBR_MULTIPACKET;
982 	t2p->t2_rq = rqp;
983 	mbp = &rqp->sr_rq;
984 	smb_rq_wstart(rqp);
985 	mb_put_uint16le(mbp, totpcount);
986 	mb_put_uint16le(mbp, totdcount);
987 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
988 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
989 	mb_put_uint8(mbp, t2p->t2_maxscount);
990 	mb_put_uint8(mbp, 0);			/* reserved */
991 	mb_put_uint16le(mbp, 0);			/* flags */
992 	mb_put_uint32le(mbp, 0);			/* Timeout */
993 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
994 	len = mb_fixhdr(mbp);
995 
996 	/*
997 	 * Now we know the size of the trans overhead stuff:
998 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
999 	 * where nmsize is the OTW size of the name, including
1000 	 * the unicode null terminator and any alignment.
1001 	 * Use this to decide which parts (and how much)
1002 	 * can go into this request: params, data
1003 	 */
1004 	nmlen = t2p->t_name ? t2p->t_name_len : 0;
1005 	nmsize = nmlen + 1; /* null term. */
1006 	if (SMB_UNICODE_STRINGS(vcp)) {
1007 		nmsize *= 2;
1008 		/* we know put_dmem will need to align */
1009 		nmsize += 1;
1010 	}
1011 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1012 	if (len + leftpcount > txmax) {
1013 		txpcount = min(leftpcount, txmax - len);
1014 		poff = len;
1015 		txdcount = 0;
1016 		doff = 0;
1017 	} else {
1018 		txpcount = leftpcount;
1019 		poff = txpcount ? len : 0;
1020 		/*
1021 		 * Other client traffic seems to "ALIGN2" here.  The extra
1022 		 * 2 byte pad we use has no observed downside and may be
1023 		 * required for some old servers(?)
1024 		 */
1025 		len = ALIGN4(len + txpcount);
1026 		txdcount = min(leftdcount, txmax - len);
1027 		doff = txdcount ? len : 0;
1028 	}
1029 	leftpcount -= txpcount;
1030 	leftdcount -= txdcount;
1031 	mb_put_uint16le(mbp, txpcount);
1032 	mb_put_uint16le(mbp, poff);
1033 	mb_put_uint16le(mbp, txdcount);
1034 	mb_put_uint16le(mbp, doff);
1035 	mb_put_uint8(mbp, t2p->t2_setupcount);
1036 	mb_put_uint8(mbp, 0);
1037 	for (i = 0; i < t2p->t2_setupcount; i++) {
1038 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1039 	}
1040 	smb_rq_wend(rqp);
1041 	smb_rq_bstart(rqp);
1042 	if (t2p->t_name) {
1043 		/* Put the string and terminating null. */
1044 		error = smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1045 		    SMB_CS_NONE, NULL);
1046 	} else {
1047 		/* nmsize accounts for padding, char size. */
1048 		error = mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1049 	}
1050 	if (error)
1051 		goto freerq;
1052 	len = mb_fixhdr(mbp);
1053 	if (txpcount) {
1054 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1055 		error = md_get_mbuf(&mbparam, txpcount, &m);
1056 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1057 		if (error)
1058 			goto freerq;
1059 		mb_put_mbuf(mbp, m);
1060 	}
1061 	len = mb_fixhdr(mbp);
1062 	if (txdcount) {
1063 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1064 		error = md_get_mbuf(&mbdata, txdcount, &m);
1065 		if (error)
1066 			goto freerq;
1067 		mb_put_mbuf(mbp, m);
1068 	}
1069 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1070 	error = smb_rq_enqueue(rqp);
1071 	if (error)
1072 		goto freerq;
1073 	if (leftpcount || leftdcount) {
1074 		error = smb_rq_reply(rqp);
1075 		if (error)
1076 			goto bad;
1077 		/*
1078 		 * this is an interim response, ignore it.
1079 		 */
1080 		SMBRQ_LOCK(rqp);
1081 		md_next_record(&rqp->sr_rp);
1082 		SMBRQ_UNLOCK(rqp);
1083 	}
1084 	while (leftpcount || leftdcount) {
1085 		error = smb_rq_new(rqp, t2p->t_name ?
1086 		    SMB_COM_TRANSACTION_SECONDARY :
1087 		    SMB_COM_TRANSACTION2_SECONDARY);
1088 		if (error)
1089 			goto bad;
1090 		mbp = &rqp->sr_rq;
1091 		smb_rq_wstart(rqp);
1092 		mb_put_uint16le(mbp, totpcount);
1093 		mb_put_uint16le(mbp, totdcount);
1094 		len = mb_fixhdr(mbp);
1095 		/*
1096 		 * now we have known packet size as
1097 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1098 		 * and need to decide which parts should go into request
1099 		 */
1100 		len = ALIGN4(len + 6 * 2 + 2);
1101 		if (t2p->t_name == NULL)
1102 			len += 2;
1103 		if (len + leftpcount > txmax) {
1104 			txpcount = min(leftpcount, txmax - len);
1105 			poff = len;
1106 			txdcount = 0;
1107 			doff = 0;
1108 		} else {
1109 			txpcount = leftpcount;
1110 			poff = txpcount ? len : 0;
1111 			len = ALIGN4(len + txpcount);
1112 			txdcount = min(leftdcount, txmax - len);
1113 			doff = txdcount ? len : 0;
1114 		}
1115 		mb_put_uint16le(mbp, txpcount);
1116 		mb_put_uint16le(mbp, poff);
1117 		mb_put_uint16le(mbp, totpcount - leftpcount);
1118 		mb_put_uint16le(mbp, txdcount);
1119 		mb_put_uint16le(mbp, doff);
1120 		mb_put_uint16le(mbp, totdcount - leftdcount);
1121 		leftpcount -= txpcount;
1122 		leftdcount -= txdcount;
1123 		if (t2p->t_name == NULL)
1124 			mb_put_uint16le(mbp, t2p->t2_fid);
1125 		smb_rq_wend(rqp);
1126 		smb_rq_bstart(rqp);
1127 		mb_put_uint8(mbp, 0);	/* name */
1128 		len = mb_fixhdr(mbp);
1129 		if (txpcount) {
1130 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1131 			error = md_get_mbuf(&mbparam, txpcount, &m);
1132 			if (error)
1133 				goto bad;
1134 			mb_put_mbuf(mbp, m);
1135 		}
1136 		len = mb_fixhdr(mbp);
1137 		if (txdcount) {
1138 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1139 			error = md_get_mbuf(&mbdata, txdcount, &m);
1140 			if (error)
1141 				goto bad;
1142 			mb_put_mbuf(mbp, m);
1143 		}
1144 		smb_rq_bend(rqp);
1145 		error = smb_iod_multirq(rqp);
1146 		if (error)
1147 			goto bad;
1148 	}	/* while left params or data */
1149 	error = smb_t2_reply(t2p);
1150 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1151 		goto bad;
1152 	mdp = &t2p->t2_rdata;
1153 	if (mdp->md_top) {
1154 		md_initm(mdp, mdp->md_top);
1155 	}
1156 	mdp = &t2p->t2_rparam;
1157 	if (mdp->md_top) {
1158 		md_initm(mdp, mdp->md_top);
1159 	}
1160 bad:
1161 	smb_iod_removerq(rqp);
1162 freerq:
1163 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1164 		if (rqp->sr_flags & SMBR_RESTART)
1165 			t2p->t2_flags |= SMBT2_RESTART;
1166 		md_done(&t2p->t2_rparam);
1167 		md_done(&t2p->t2_rdata);
1168 	}
1169 	smb_rq_done(rqp);
1170 	return (error);
1171 }
1172 
1173 
1174 /*
1175  * Perform a full round of NT_TRANSACTION request
1176  */
1177 static int
1178 smb_nt_request_int(struct smb_ntrq *ntp)
1179 {
1180 	struct smb_vc *vcp = ntp->nt_vc;
1181 	struct smb_cred *scred = ntp->nt_cred;
1182 	struct mbchain *mbp;
1183 	struct mdchain *mdp, mbsetup, mbparam, mbdata;
1184 	mblk_t *m;
1185 	struct smb_rq *rqp;
1186 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1187 	int error, doff, poff, txdcount, txpcount;
1188 	int totscount;
1189 
1190 	m = ntp->nt_tsetup.mb_top;
1191 	if (m) {
1192 		md_initm(&mbsetup, m);	/* do not free it! */
1193 		totscount = m_fixhdr(m);
1194 		if (totscount > 2 * 0xff)
1195 			return (EINVAL);
1196 	} else
1197 		totscount = 0;
1198 	m = ntp->nt_tparam.mb_top;
1199 	if (m) {
1200 		md_initm(&mbparam, m);	/* do not free it! */
1201 		totpcount = m_fixhdr(m);
1202 		if (totpcount > 0x7fffffff)
1203 			return (EINVAL);
1204 	} else
1205 		totpcount = 0;
1206 	m = ntp->nt_tdata.mb_top;
1207 	if (m) {
1208 		md_initm(&mbdata, m);	/* do not free it! */
1209 		totdcount =  m_fixhdr(m);
1210 		if (totdcount > 0x7fffffff)
1211 			return (EINVAL);
1212 	} else
1213 		totdcount = 0;
1214 	leftdcount = totdcount;
1215 	leftpcount = totpcount;
1216 	txmax = vcp->vc_txmax;
1217 	error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1218 	if (error)
1219 		return (error);
1220 	rqp->sr_timo = smb_timo_default;
1221 	rqp->sr_flags |= SMBR_MULTIPACKET;
1222 	ntp->nt_rq = rqp;
1223 	mbp = &rqp->sr_rq;
1224 	smb_rq_wstart(rqp);
1225 	mb_put_uint8(mbp, ntp->nt_maxscount);
1226 	mb_put_uint16le(mbp, 0);	/* reserved (flags?) */
1227 	mb_put_uint32le(mbp, totpcount);
1228 	mb_put_uint32le(mbp, totdcount);
1229 	mb_put_uint32le(mbp, ntp->nt_maxpcount);
1230 	mb_put_uint32le(mbp, ntp->nt_maxdcount);
1231 	len = mb_fixhdr(mbp);
1232 	/*
1233 	 * now we have known packet size as
1234 	 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1235 	 * and need to decide which parts should go into the first request
1236 	 */
1237 	len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1238 	if (len + leftpcount > txmax) {
1239 		txpcount = min(leftpcount, txmax - len);
1240 		poff = len;
1241 		txdcount = 0;
1242 		doff = 0;
1243 	} else {
1244 		txpcount = leftpcount;
1245 		poff = txpcount ? len : 0;
1246 		len = ALIGN4(len + txpcount);
1247 		txdcount = min(leftdcount, txmax - len);
1248 		doff = txdcount ? len : 0;
1249 	}
1250 	leftpcount -= txpcount;
1251 	leftdcount -= txdcount;
1252 	mb_put_uint32le(mbp, txpcount);
1253 	mb_put_uint32le(mbp, poff);
1254 	mb_put_uint32le(mbp, txdcount);
1255 	mb_put_uint32le(mbp, doff);
1256 	mb_put_uint8(mbp, (totscount+1)/2);
1257 	mb_put_uint16le(mbp, ntp->nt_function);
1258 	if (totscount) {
1259 		error = md_get_mbuf(&mbsetup, totscount, &m);
1260 		SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1261 		if (error)
1262 			goto freerq;
1263 		mb_put_mbuf(mbp, m);
1264 		if (totscount & 1)
1265 			mb_put_uint8(mbp, 0); /* setup is in words */
1266 	}
1267 	smb_rq_wend(rqp);
1268 	smb_rq_bstart(rqp);
1269 	len = mb_fixhdr(mbp);
1270 	if (txpcount) {
1271 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1272 		error = md_get_mbuf(&mbparam, txpcount, &m);
1273 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1274 		if (error)
1275 			goto freerq;
1276 		mb_put_mbuf(mbp, m);
1277 	}
1278 	len = mb_fixhdr(mbp);
1279 	if (txdcount) {
1280 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1281 		error = md_get_mbuf(&mbdata, txdcount, &m);
1282 		if (error)
1283 			goto freerq;
1284 		mb_put_mbuf(mbp, m);
1285 	}
1286 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1287 	error = smb_rq_enqueue(rqp);
1288 	if (error)
1289 		goto freerq;
1290 	if (leftpcount || leftdcount) {
1291 		error = smb_rq_reply(rqp);
1292 		if (error)
1293 			goto bad;
1294 		/*
1295 		 * this is an interim response, ignore it.
1296 		 */
1297 		SMBRQ_LOCK(rqp);
1298 		md_next_record(&rqp->sr_rp);
1299 		SMBRQ_UNLOCK(rqp);
1300 	}
1301 	while (leftpcount || leftdcount) {
1302 		error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1303 		if (error)
1304 			goto bad;
1305 		mbp = &rqp->sr_rq;
1306 		smb_rq_wstart(rqp);
1307 		mb_put_mem(mbp, NULL, 3, MB_MZERO);
1308 		mb_put_uint32le(mbp, totpcount);
1309 		mb_put_uint32le(mbp, totdcount);
1310 		len = mb_fixhdr(mbp);
1311 		/*
1312 		 * now we have known packet size as
1313 		 * ALIGN4(len + 6 * 4  + 2)
1314 		 * and need to decide which parts should go into request
1315 		 */
1316 		len = ALIGN4(len + 6 * 4 + 2);
1317 		if (len + leftpcount > txmax) {
1318 			txpcount = min(leftpcount, txmax - len);
1319 			poff = len;
1320 			txdcount = 0;
1321 			doff = 0;
1322 		} else {
1323 			txpcount = leftpcount;
1324 			poff = txpcount ? len : 0;
1325 			len = ALIGN4(len + txpcount);
1326 			txdcount = min(leftdcount, txmax - len);
1327 			doff = txdcount ? len : 0;
1328 		}
1329 		mb_put_uint32le(mbp, txpcount);
1330 		mb_put_uint32le(mbp, poff);
1331 		mb_put_uint32le(mbp, totpcount - leftpcount);
1332 		mb_put_uint32le(mbp, txdcount);
1333 		mb_put_uint32le(mbp, doff);
1334 		mb_put_uint32le(mbp, totdcount - leftdcount);
1335 		leftpcount -= txpcount;
1336 		leftdcount -= txdcount;
1337 		smb_rq_wend(rqp);
1338 		smb_rq_bstart(rqp);
1339 		len = mb_fixhdr(mbp);
1340 		if (txpcount) {
1341 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1342 			error = md_get_mbuf(&mbparam, txpcount, &m);
1343 			if (error)
1344 				goto bad;
1345 			mb_put_mbuf(mbp, m);
1346 		}
1347 		len = mb_fixhdr(mbp);
1348 		if (txdcount) {
1349 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1350 			error = md_get_mbuf(&mbdata, txdcount, &m);
1351 			if (error)
1352 				goto bad;
1353 			mb_put_mbuf(mbp, m);
1354 		}
1355 		smb_rq_bend(rqp);
1356 		error = smb_iod_multirq(rqp);
1357 		if (error)
1358 			goto bad;
1359 	}	/* while left params or data */
1360 	error = smb_nt_reply(ntp);
1361 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1362 		goto bad;
1363 	mdp = &ntp->nt_rdata;
1364 	if (mdp->md_top) {
1365 		md_initm(mdp, mdp->md_top);
1366 	}
1367 	mdp = &ntp->nt_rparam;
1368 	if (mdp->md_top) {
1369 		md_initm(mdp, mdp->md_top);
1370 	}
1371 bad:
1372 	smb_iod_removerq(rqp);
1373 freerq:
1374 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1375 		if (rqp->sr_flags & SMBR_RESTART)
1376 			ntp->nt_flags |= SMBT2_RESTART;
1377 		md_done(&ntp->nt_rparam);
1378 		md_done(&ntp->nt_rdata);
1379 	}
1380 	smb_rq_done(rqp);
1381 	return (error);
1382 }
1383 
1384 int
1385 smb_t2_request(struct smb_t2rq *t2p)
1386 {
1387 	int error = EINVAL, i;
1388 
1389 	for (i = 0; ; ) {
1390 		/*
1391 		 * Don't send any new requests if force unmount is underway.
1392 		 * This check was moved into smb_rq_enqueue, called by
1393 		 * smb_t2_request_int()
1394 		 */
1395 		t2p->t2_flags &= ~SMBT2_RESTART;
1396 		error = smb_t2_request_int(t2p);
1397 		if (!error)
1398 			break;
1399 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1400 		    SMBT2_RESTART)
1401 			break;
1402 		if (++i > SMBMAXRESTARTS)
1403 			break;
1404 		mutex_enter(&(t2p)->t2_lock);
1405 		if (t2p->t2_share) {
1406 			(void) cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1407 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1408 		} else {
1409 			delay(SEC_TO_TICK(SMB_RCNDELAY));
1410 		}
1411 		mutex_exit(&(t2p)->t2_lock);
1412 	}
1413 	return (error);
1414 }
1415 
1416 
1417 int
1418 smb_nt_request(struct smb_ntrq *ntp)
1419 {
1420 	int error = EINVAL, i;
1421 
1422 	for (i = 0; ; ) {
1423 		/*
1424 		 * Don't send any new requests if force unmount is underway.
1425 		 * This check was moved into smb_rq_enqueue, called by
1426 		 * smb_nt_request_int()
1427 		 */
1428 		ntp->nt_flags &= ~SMBT2_RESTART;
1429 		error = smb_nt_request_int(ntp);
1430 		if (!error)
1431 			break;
1432 		if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1433 		    SMBT2_RESTART)
1434 			break;
1435 		if (++i > SMBMAXRESTARTS)
1436 			break;
1437 		mutex_enter(&(ntp)->nt_lock);
1438 		if (ntp->nt_share) {
1439 			(void) cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1440 			    SEC_TO_TICK(SMB_RCNDELAY), TR_CLOCK_TICK);
1441 
1442 		} else {
1443 			delay(SEC_TO_TICK(SMB_RCNDELAY));
1444 		}
1445 		mutex_exit(&(ntp)->nt_lock);
1446 	}
1447 	return (error);
1448 }
1449