xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_rq.c (revision aedf2b3bb56b025fcaf87b49ec6c8aeea07f16d7)
1 /*
2  * Copyright (c) 2000-2001, Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_rq.c,v 1.29 2005/02/11 01:44:17 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kmem.h>
43 #include <sys/proc.h>
44 #include <sys/lock.h>
45 #include <sys/socket.h>
46 #include <sys/mount.h>
47 #include <sys/sunddi.h>
48 #include <sys/cmn_err.h>
49 #include <sys/sdt.h>
50 
51 #include <netsmb/smb_osdep.h>
52 
53 #include <netsmb/smb.h>
54 #include <netsmb/smb_conn.h>
55 #include <netsmb/smb_subr.h>
56 #include <netsmb/smb_tran.h>
57 #include <netsmb/smb_rq.h>
58 
59 /*
60  * How long to wait before restarting a request (after reconnect)
61  */
62 #define	SMB_RCNDELAY		2	/* seconds */
63 
64 /*
65  * leave this zero - we can't ssecond guess server side effects of
66  * duplicate ops, this isn't nfs!
67  */
68 #define	SMBMAXRESTARTS		0
69 
70 
71 static int  smb_rq_reply(struct smb_rq *rqp);
72 static int  smb_rq_enqueue(struct smb_rq *rqp);
73 static int  smb_rq_getenv(struct smb_connobj *layer,
74 		struct smb_vc **vcpp, struct smb_share **sspp);
75 static int  smb_rq_new(struct smb_rq *rqp, uchar_t cmd);
76 static int  smb_t2_reply(struct smb_t2rq *t2p);
77 static int  smb_nt_reply(struct smb_ntrq *ntp);
78 
79 
80 /*
81  * Done with a request object.  Free its contents.
82  * If it was allocated (SMBR_ALLOCED) free it too.
83  * Some of these are stack locals, not allocated.
84  *
85  * No locks here - this is the last ref.
86  */
87 void
88 smb_rq_done(struct smb_rq *rqp)
89 {
90 
91 	/*
92 	 * No smb_vc_rele() here - see smb_rq_init()
93 	 */
94 	mb_done(&rqp->sr_rq);
95 	md_done(&rqp->sr_rp);
96 	mutex_destroy(&rqp->sr_lock);
97 	cv_destroy(&rqp->sr_cond);
98 	if (rqp->sr_flags & SMBR_ALLOCED)
99 		kmem_free(rqp, sizeof (*rqp));
100 }
101 
102 int
103 smb_rq_alloc(struct smb_connobj *layer, uchar_t cmd, struct smb_cred *scred,
104 	struct smb_rq **rqpp)
105 {
106 	struct smb_rq *rqp;
107 	int error;
108 
109 	rqp = (struct smb_rq *)kmem_alloc(sizeof (struct smb_rq), KM_SLEEP);
110 	if (rqp == NULL)
111 		return (ENOMEM);
112 	error = smb_rq_init(rqp, layer, cmd, scred);
113 	if (error) {
114 		smb_rq_done(rqp);
115 		return (error);
116 	}
117 	rqp->sr_flags |= SMBR_ALLOCED;
118 	*rqpp = rqp;
119 	return (0);
120 }
121 
122 int
123 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd,
124 	struct smb_cred *scred)
125 {
126 	int error;
127 
128 	bzero(rqp, sizeof (*rqp));
129 	mutex_init(&rqp->sr_lock, NULL,  MUTEX_DRIVER, NULL);
130 	cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL);
131 
132 	error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share);
133 	if (error)
134 		return (error);
135 
136 	/*
137 	 * We copied a VC pointer (vcp) into rqp->sr_vc,
138 	 * but we do NOT do a smb_vc_hold here.  Instead,
139 	 * the caller is responsible for the hold on the
140 	 * share or the VC as needed.  For smbfs callers,
141 	 * the hold is on the share, via the smbfs mount.
142 	 * For nsmb ioctl callers, the hold is done when
143 	 * the driver handle gets VC or share references.
144 	 * This design avoids frequent hold/rele activity
145 	 * when creating and completing requests.
146 	 */
147 
148 	rqp->sr_rexmit = SMBMAXRESTARTS;
149 	rqp->sr_cred = scred;	/* Note: ref hold done by caller. */
150 	rqp->sr_pid = (uint16_t)ddi_get_pid();
151 	error = smb_rq_new(rqp, cmd);
152 
153 	return (error);
154 }
155 
156 static int
157 smb_rq_new(struct smb_rq *rqp, uchar_t cmd)
158 {
159 	struct mbchain *mbp = &rqp->sr_rq;
160 	struct smb_vc *vcp = rqp->sr_vc;
161 	int error;
162 
163 	ASSERT(rqp != NULL);
164 
165 	rqp->sr_sendcnt = 0;
166 	rqp->sr_cmd = cmd;
167 
168 	mb_done(mbp);
169 	md_done(&rqp->sr_rp);
170 	error = mb_init(mbp);
171 	if (error)
172 		return (error);
173 
174 	/*
175 	 * Is this the right place to save the flags?
176 	 */
177 	rqp->sr_rqflags  = vcp->vc_hflags;
178 	rqp->sr_rqflags2 = vcp->vc_hflags2;
179 
180 	/*
181 	 * The SMB header is filled in later by
182 	 * smb_rq_fillhdr (see below)
183 	 * Just reserve space here.
184 	 */
185 	mb_put_mem(mbp, NULL, SMB_HDRLEN, MB_MZERO);
186 
187 	return (0);
188 }
189 
190 /*
191  * Given a request with it's body already composed,
192  * rewind to the start and fill in the SMB header.
193  * This is called after the request is enqueued,
194  * so we have the final MID, seq num. etc.
195  */
196 void
197 smb_rq_fillhdr(struct smb_rq *rqp)
198 {
199 	struct mbchain mbtmp, *mbp = &mbtmp;
200 	mblk_t *m;
201 
202 	/*
203 	 * Fill in the SMB header using a dup of the first mblk,
204 	 * which points at the same data but has its own wptr,
205 	 * so we can rewind without trashing the message.
206 	 */
207 	m = dupb(rqp->sr_rq.mb_top);
208 	m->b_wptr = m->b_rptr;	/* rewind */
209 	mb_initm(mbp, m);
210 
211 	mb_put_mem(mbp, SMB_SIGNATURE, 4, MB_MSYSTEM);
212 	mb_put_uint8(mbp, rqp->sr_cmd);
213 	mb_put_uint32le(mbp, 0);	/* status */
214 	mb_put_uint8(mbp, rqp->sr_rqflags);
215 	mb_put_uint16le(mbp, rqp->sr_rqflags2);
216 	mb_put_uint16le(mbp, 0);	/* pid-high */
217 	mb_put_mem(mbp, NULL, 8, MB_MZERO);	/* MAC sig. (later) */
218 	mb_put_uint16le(mbp, 0);	/* reserved */
219 	mb_put_uint16le(mbp, rqp->sr_rqtid);
220 	mb_put_uint16le(mbp, rqp->sr_pid);
221 	mb_put_uint16le(mbp, rqp->sr_rquid);
222 	mb_put_uint16le(mbp, rqp->sr_mid);
223 
224 	/* This will free the mblk from dupb. */
225 	mb_done(mbp);
226 }
227 
228 int
229 smb_rq_simple(struct smb_rq *rqp)
230 {
231 	return (smb_rq_simple_timed(rqp, smb_timo_default));
232 }
233 
234 /*
235  * Simple request-reply exchange
236  */
237 int
238 smb_rq_simple_timed(struct smb_rq *rqp, int timeout)
239 {
240 	int error = EINVAL;
241 
242 	for (; ; ) {
243 		/*
244 		 * Don't send any new requests if force unmount is underway.
245 		 * This check was moved into smb_rq_enqueue.
246 		 */
247 		rqp->sr_flags &= ~SMBR_RESTART;
248 		rqp->sr_timo = timeout;	/* in seconds */
249 		rqp->sr_state = SMBRQ_NOTSENT;
250 		error = smb_rq_enqueue(rqp);
251 		if (error) {
252 			break;
253 		}
254 		error = smb_rq_reply(rqp);
255 		if (!error)
256 			break;
257 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) !=
258 		    SMBR_RESTART)
259 			break;
260 		if (rqp->sr_rexmit <= 0)
261 			break;
262 		SMBRQ_LOCK(rqp);
263 		if (rqp->sr_share) {
264 			cv_reltimedwait(&rqp->sr_cond, &(rqp)->sr_lock,
265 			    (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
266 
267 		} else {
268 			delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
269 		}
270 		SMBRQ_UNLOCK(rqp);
271 		rqp->sr_rexmit--;
272 #ifdef XXX
273 		timeout *= 2;
274 #endif
275 	}
276 	return (error);
277 }
278 
279 
280 static int
281 smb_rq_enqueue(struct smb_rq *rqp)
282 {
283 	struct smb_vc *vcp = rqp->sr_vc;
284 	struct smb_share *ssp = rqp->sr_share;
285 	int error = 0;
286 
287 	/*
288 	 * Normal requests may initiate a reconnect,
289 	 * and/or wait for state changes to finish.
290 	 * Some requests set the NORECONNECT flag
291 	 * to avoid all that (i.e. tree discon)
292 	 */
293 	if (rqp->sr_flags & SMBR_NORECONNECT) {
294 		if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
295 			SMBSDEBUG("bad vc_state=%d\n", vcp->vc_state);
296 			return (ENOTCONN);
297 		}
298 		if (ssp != NULL &&
299 		    ((ssp->ss_flags & SMBS_CONNECTED) == 0))
300 			return (ENOTCONN);
301 		goto ok_out;
302 	}
303 
304 	/*
305 	 * If we're not connected, initiate a reconnect
306 	 * and/or wait for an existing one to finish.
307 	 */
308 	if (vcp->vc_state != SMBIOD_ST_VCACTIVE) {
309 		error = smb_iod_reconnect(vcp);
310 		if (error != 0)
311 			return (error);
312 	}
313 
314 	/*
315 	 * If this request has a "share" object
316 	 * that needs a tree connect, do it now.
317 	 */
318 	if (ssp != NULL && (ssp->ss_flags & SMBS_CONNECTED) == 0) {
319 		error = smb_share_tcon(ssp, rqp->sr_cred);
320 		if (error)
321 			return (error);
322 	}
323 
324 	/*
325 	 * We now know what UID + TID to use.
326 	 * Store them in the request.
327 	 */
328 ok_out:
329 	rqp->sr_rquid = vcp->vc_smbuid;
330 	rqp->sr_rqtid = ssp ? ssp->ss_tid : SMB_TID_UNKNOWN;
331 	error = smb_iod_addrq(rqp);
332 
333 	return (error);
334 }
335 
336 /*
337  * Mark location of the word count, which is filled in later by
338  * smb_rw_wend().  Also initialize the counter that it uses
339  * to figure out what value to fill in.
340  *
341  * Note that the word count happens to be 8-bit.
342  */
343 void
344 smb_rq_wstart(struct smb_rq *rqp)
345 {
346 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof (uint8_t));
347 	rqp->sr_rq.mb_count = 0;
348 }
349 
350 void
351 smb_rq_wend(struct smb_rq *rqp)
352 {
353 	uint_t wcnt;
354 
355 	if (rqp->sr_wcount == NULL) {
356 		SMBSDEBUG("no wcount\n");
357 		return;
358 	}
359 	wcnt = rqp->sr_rq.mb_count;
360 	if (wcnt > 0x1ff)
361 		SMBSDEBUG("word count too large (%d)\n", wcnt);
362 	if (wcnt & 1)
363 		SMBSDEBUG("odd word count\n");
364 	/* Fill in the word count (8-bits) */
365 	*rqp->sr_wcount = (wcnt >> 1);
366 }
367 
368 /*
369  * Mark location of the byte count, which is filled in later by
370  * smb_rw_bend().  Also initialize the counter that it uses
371  * to figure out what value to fill in.
372  *
373  * Note that the byte count happens to be 16-bit.
374  */
375 void
376 smb_rq_bstart(struct smb_rq *rqp)
377 {
378 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof (uint16_t));
379 	rqp->sr_rq.mb_count = 0;
380 }
381 
382 void
383 smb_rq_bend(struct smb_rq *rqp)
384 {
385 	uint_t bcnt;
386 
387 	if (rqp->sr_bcount == NULL) {
388 		SMBSDEBUG("no bcount\n");
389 		return;
390 	}
391 	bcnt = rqp->sr_rq.mb_count;
392 	if (bcnt > 0xffff)
393 		SMBSDEBUG("byte count too large (%d)\n", bcnt);
394 	/*
395 	 * Fill in the byte count (16-bits)
396 	 * The pointer is char * type due to
397 	 * typical off-by-one alignment.
398 	 */
399 	rqp->sr_bcount[0] = bcnt & 0xFF;
400 	rqp->sr_bcount[1] = (bcnt >> 8);
401 }
402 
403 int
404 smb_rq_intr(struct smb_rq *rqp)
405 {
406 	if (rqp->sr_flags & SMBR_INTR)
407 		return (EINTR);
408 
409 	return (0);
410 }
411 
412 static int
413 smb_rq_getenv(struct smb_connobj *co,
414 	struct smb_vc **vcpp, struct smb_share **sspp)
415 {
416 	struct smb_vc *vcp = NULL;
417 	struct smb_share *ssp = NULL;
418 	int error = EINVAL;
419 
420 	if (co->co_flags & SMBO_GONE) {
421 		SMBSDEBUG("zombie CO\n");
422 		error = EINVAL;
423 		goto out;
424 	}
425 
426 	switch (co->co_level) {
427 	case SMBL_SHARE:
428 		ssp = CPTOSS(co);
429 		if ((co->co_flags & SMBO_GONE) ||
430 		    co->co_parent == NULL) {
431 			SMBSDEBUG("zombie share %s\n", ssp->ss_name);
432 			break;
433 		}
434 		/* instead of recursion... */
435 		co = co->co_parent;
436 		/* FALLTHROUGH */
437 	case SMBL_VC:
438 		vcp = CPTOVC(co);
439 		if ((co->co_flags & SMBO_GONE) ||
440 		    co->co_parent == NULL) {
441 			SMBSDEBUG("zombie VC %s\n", vcp->vc_srvname);
442 			break;
443 		}
444 		error = 0;
445 		break;
446 
447 	default:
448 		SMBSDEBUG("invalid level %d passed\n", co->co_level);
449 	}
450 
451 out:
452 	if (!error) {
453 		if (vcpp)
454 			*vcpp = vcp;
455 		if (sspp)
456 			*sspp = ssp;
457 	}
458 
459 	return (error);
460 }
461 
462 /*
463  * Wait for reply on the request
464  */
465 static int
466 smb_rq_reply(struct smb_rq *rqp)
467 {
468 	struct mdchain *mdp = &rqp->sr_rp;
469 	u_int8_t tb;
470 	int error, rperror = 0;
471 
472 	if (rqp->sr_timo == SMBNOREPLYWAIT)
473 		return (smb_iod_removerq(rqp));
474 
475 	error = smb_iod_waitrq(rqp);
476 	if (error)
477 		return (error);
478 
479 	/*
480 	 * If the request was signed, validate the
481 	 * signature on the response.
482 	 */
483 	if (rqp->sr_rqflags2 & SMB_FLAGS2_SECURITY_SIGNATURE) {
484 		error = smb_rq_verify(rqp);
485 		if (error)
486 			return (error);
487 	}
488 
489 	/*
490 	 * Parse the SMB header
491 	 */
492 	error = md_get_uint32le(mdp, NULL);
493 	if (error)
494 		return (error);
495 	error = md_get_uint8(mdp, &tb);
496 	error = md_get_uint32le(mdp, &rqp->sr_error);
497 	error = md_get_uint8(mdp, &rqp->sr_rpflags);
498 	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
499 	if (rqp->sr_rpflags2 & SMB_FLAGS2_ERR_STATUS) {
500 		/*
501 		 * Do a special check for STATUS_BUFFER_OVERFLOW;
502 		 * it's not an error.
503 		 */
504 		if (rqp->sr_error == NT_STATUS_BUFFER_OVERFLOW) {
505 			/*
506 			 * Don't report it as an error to our caller;
507 			 * they can look at rqp->sr_error if they
508 			 * need to know whether we got a
509 			 * STATUS_BUFFER_OVERFLOW.
510 			 * XXX - should we do that for all errors
511 			 * where (error & 0xC0000000) is 0x80000000,
512 			 * i.e. all warnings?
513 			 */
514 			rperror = 0;
515 		} else
516 			rperror = smb_maperr32(rqp->sr_error);
517 	} else {
518 		rqp->sr_errclass = rqp->sr_error & 0xff;
519 		rqp->sr_serror = rqp->sr_error >> 16;
520 		rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
521 	}
522 	if (rperror == EMOREDATA) {
523 		rperror = E2BIG;
524 		rqp->sr_flags |= SMBR_MOREDATA;
525 	} else
526 		rqp->sr_flags &= ~SMBR_MOREDATA;
527 
528 	error = md_get_uint32le(mdp, NULL);
529 	error = md_get_uint32le(mdp, NULL);
530 	error = md_get_uint32le(mdp, NULL);
531 
532 	error = md_get_uint16le(mdp, &rqp->sr_rptid);
533 	error = md_get_uint16le(mdp, &rqp->sr_rppid);
534 	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
535 	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
536 
537 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
538 	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
539 	    rqp->sr_errclass, rqp->sr_serror);
540 
541 	return ((error) ? error : rperror);
542 }
543 
544 
545 #define	ALIGN4(a)	(((a) + 3) & ~3)
546 
547 /*
548  * TRANS2 request implementation
549  * TRANS implementation is in the "t2" routines
550  * NT_TRANSACTION implementation is the separate "nt" stuff
551  */
552 int
553 smb_t2_alloc(struct smb_connobj *layer, ushort_t setup, struct smb_cred *scred,
554 	struct smb_t2rq **t2pp)
555 {
556 	struct smb_t2rq *t2p;
557 	int error;
558 
559 	t2p = (struct smb_t2rq *)kmem_alloc(sizeof (*t2p), KM_SLEEP);
560 	if (t2p == NULL)
561 		return (ENOMEM);
562 	error = smb_t2_init(t2p, layer, &setup, 1, scred);
563 	t2p->t2_flags |= SMBT2_ALLOCED;
564 	if (error) {
565 		smb_t2_done(t2p);
566 		return (error);
567 	}
568 	*t2pp = t2p;
569 	return (0);
570 }
571 
572 int
573 smb_nt_alloc(struct smb_connobj *layer, ushort_t fn, struct smb_cred *scred,
574 	struct smb_ntrq **ntpp)
575 {
576 	struct smb_ntrq *ntp;
577 	int error;
578 
579 	ntp = (struct smb_ntrq *)kmem_alloc(sizeof (*ntp), KM_SLEEP);
580 	if (ntp == NULL)
581 		return (ENOMEM);
582 	error = smb_nt_init(ntp, layer, fn, scred);
583 	mutex_init(&ntp->nt_lock, NULL, MUTEX_DRIVER, NULL);
584 	cv_init(&ntp->nt_cond, NULL, CV_DEFAULT, NULL);
585 	ntp->nt_flags |= SMBT2_ALLOCED;
586 	if (error) {
587 		smb_nt_done(ntp);
588 		return (error);
589 	}
590 	*ntpp = ntp;
591 	return (0);
592 }
593 
594 int
595 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup,
596 	int setupcnt, struct smb_cred *scred)
597 {
598 	int i;
599 	int error;
600 
601 	bzero(t2p, sizeof (*t2p));
602 	mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL);
603 	cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL);
604 
605 	t2p->t2_source = source;
606 	t2p->t2_setupcount = (u_int16_t)setupcnt;
607 	t2p->t2_setupdata = t2p->t2_setup;
608 	for (i = 0; i < setupcnt; i++)
609 		t2p->t2_setup[i] = setup[i];
610 	t2p->t2_fid = 0xffff;
611 	t2p->t2_cred = scred;
612 	t2p->t2_share = (source->co_level == SMBL_SHARE ?
613 	    CPTOSS(source) : NULL); /* for smb up/down */
614 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
615 	if (error)
616 		return (error);
617 	return (0);
618 }
619 
620 int
621 smb_nt_init(struct smb_ntrq *ntp, struct smb_connobj *source, ushort_t fn,
622 	struct smb_cred *scred)
623 {
624 	int error;
625 
626 	bzero(ntp, sizeof (*ntp));
627 	ntp->nt_source = source;
628 	ntp->nt_function = fn;
629 	ntp->nt_cred = scred;
630 	ntp->nt_share = (source->co_level == SMBL_SHARE ?
631 	    CPTOSS(source) : NULL); /* for smb up/down */
632 	error = smb_rq_getenv(source, &ntp->nt_vc, NULL);
633 	if (error)
634 		return (error);
635 	return (0);
636 }
637 
638 void
639 smb_t2_done(struct smb_t2rq *t2p)
640 {
641 	mb_done(&t2p->t2_tparam);
642 	mb_done(&t2p->t2_tdata);
643 	md_done(&t2p->t2_rparam);
644 	md_done(&t2p->t2_rdata);
645 	mutex_destroy(&t2p->t2_lock);
646 	cv_destroy(&t2p->t2_cond);
647 	if (t2p->t2_flags & SMBT2_ALLOCED)
648 		kmem_free(t2p, sizeof (*t2p));
649 }
650 
651 u_int32_t
652 smb_t2_err(struct smb_t2rq *t2p)
653 {
654 	/* mask off "severity" and the "component"  bit */
655 	return (t2p->t2_sr_error & ~(0xe0000000));
656 }
657 
658 void
659 smb_nt_done(struct smb_ntrq *ntp)
660 {
661 	mb_done(&ntp->nt_tsetup);
662 	mb_done(&ntp->nt_tparam);
663 	mb_done(&ntp->nt_tdata);
664 	md_done(&ntp->nt_rparam);
665 	md_done(&ntp->nt_rdata);
666 	cv_destroy(&ntp->nt_cond);
667 	mutex_destroy(&ntp->nt_lock);
668 	if (ntp->nt_flags & SMBT2_ALLOCED)
669 		kmem_free(ntp, sizeof (*ntp));
670 }
671 
672 /*
673  * Extract data [offset,count] from mtop and add to mdp.
674  */
675 static int
676 smb_t2_placedata(mblk_t *mtop, u_int16_t offset, u_int16_t count,
677 	struct mdchain *mdp)
678 {
679 	mblk_t *n;
680 
681 	n = m_copym(mtop, offset, count, M_WAITOK);
682 	if (n == NULL)
683 		return (EBADRPC);
684 
685 	if (mdp->md_top == NULL) {
686 		md_initm(mdp, n);
687 	} else
688 		m_cat(mdp->md_top, n);
689 
690 	return (0);
691 }
692 
693 static int
694 smb_t2_reply(struct smb_t2rq *t2p)
695 {
696 	struct mdchain *mdp;
697 	struct smb_rq *rqp = t2p->t2_rq;
698 	int error, error2, totpgot, totdgot;
699 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
700 	u_int16_t tmp, bc, dcount;
701 	u_int8_t wc;
702 
703 	t2p->t2_flags &= ~SMBT2_MOREDATA;
704 
705 	error = smb_rq_reply(rqp);
706 	if (rqp->sr_flags & SMBR_MOREDATA)
707 		t2p->t2_flags |= SMBT2_MOREDATA;
708 	t2p->t2_sr_errclass = rqp->sr_errclass;
709 	t2p->t2_sr_serror = rqp->sr_serror;
710 	t2p->t2_sr_error = rqp->sr_error;
711 	t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
712 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
713 		return (error);
714 	/*
715 	 * Now we have to get all subseqent responses, if any.
716 	 * The CIFS specification says that they can be misordered,
717 	 * which is weird.
718 	 * TODO: timo
719 	 */
720 	totpgot = totdgot = 0;
721 	totpcount = totdcount = 0xffff;
722 	mdp = &rqp->sr_rp;
723 	for (;;) {
724 		DTRACE_PROBE2(smb_trans_reply,
725 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
726 		m_dumpm(mdp->md_top);
727 
728 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
729 			break;
730 		if (wc < 10) {
731 			error2 = ENOENT;
732 			break;
733 		}
734 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
735 			break;
736 		if (totpcount > tmp)
737 			totpcount = tmp;
738 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0)
739 			break;
740 		if (totdcount > tmp)
741 			totdcount = tmp;
742 		if ((error2 = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
743 		    (error2 = md_get_uint16le(mdp, &pcount)) != 0 ||
744 		    (error2 = md_get_uint16le(mdp, &poff)) != 0 ||
745 		    (error2 = md_get_uint16le(mdp, &pdisp)) != 0)
746 			break;
747 		if (pcount != 0 && pdisp != totpgot) {
748 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
749 			    pdisp, totpgot);
750 			error2 = EINVAL;
751 			break;
752 		}
753 		if ((error2 = md_get_uint16le(mdp, &dcount)) != 0 ||
754 		    (error2 = md_get_uint16le(mdp, &doff)) != 0 ||
755 		    (error2 = md_get_uint16le(mdp, &ddisp)) != 0)
756 			break;
757 		if (dcount != 0 && ddisp != totdgot) {
758 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
759 			    dcount);
760 			error2 = EINVAL;
761 			break;
762 		}
763 
764 		/* XXX: Skip setup words?  We don't save them? */
765 		md_get_uint8(mdp, &wc);  /* SetupCount */
766 		md_get_uint8(mdp, NULL); /* Reserved2 */
767 		tmp = wc;
768 		while (tmp--)
769 			md_get_uint16le(mdp, NULL);
770 
771 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
772 			break;
773 
774 		/*
775 		 * There are pad bytes here, and the poff value
776 		 * indicates where the next data are found.
777 		 * No need to guess at the padding size.
778 		 */
779 		if (pcount) {
780 			error2 = smb_t2_placedata(mdp->md_top, poff,
781 			    pcount, &t2p->t2_rparam);
782 			if (error2)
783 				break;
784 		}
785 		totpgot += pcount;
786 
787 		if (dcount) {
788 			error2 = smb_t2_placedata(mdp->md_top, doff,
789 			    dcount, &t2p->t2_rdata);
790 			if (error2)
791 				break;
792 		}
793 		totdgot += dcount;
794 
795 		if (totpgot >= totpcount && totdgot >= totdcount) {
796 			error2 = 0;
797 			t2p->t2_flags |= SMBT2_ALLRECV;
798 			break;
799 		}
800 		/*
801 		 * We're done with this reply, look for the next one.
802 		 */
803 		SMBRQ_LOCK(rqp);
804 		md_next_record(&rqp->sr_rp);
805 		SMBRQ_UNLOCK(rqp);
806 		error2 = smb_rq_reply(rqp);
807 		if (rqp->sr_flags & SMBR_MOREDATA)
808 			t2p->t2_flags |= SMBT2_MOREDATA;
809 		if (!error2)
810 			continue;
811 		t2p->t2_sr_errclass = rqp->sr_errclass;
812 		t2p->t2_sr_serror = rqp->sr_serror;
813 		t2p->t2_sr_error = rqp->sr_error;
814 		t2p->t2_sr_rpflags2 = rqp->sr_rpflags2;
815 		error = error2;
816 		if (!(rqp->sr_flags & SMBR_MOREDATA))
817 			break;
818 	}
819 	return (error ? error : error2);
820 }
821 
822 static int
823 smb_nt_reply(struct smb_ntrq *ntp)
824 {
825 	struct mdchain *mdp;
826 	struct smb_rq *rqp = ntp->nt_rq;
827 	int error, error2;
828 	u_int32_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
829 	u_int32_t tmp, dcount, totpgot, totdgot;
830 	u_int16_t bc;
831 	u_int8_t wc;
832 
833 	ntp->nt_flags &= ~SMBT2_MOREDATA;
834 
835 	error = smb_rq_reply(rqp);
836 	if (rqp->sr_flags & SMBR_MOREDATA)
837 		ntp->nt_flags |= SMBT2_MOREDATA;
838 	ntp->nt_sr_error = rqp->sr_error;
839 	ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
840 	if (error && !(rqp->sr_flags & SMBR_MOREDATA))
841 		return (error);
842 	/*
843 	 * Now we have to get all subseqent responses. The CIFS specification
844 	 * says that they can be misordered which is weird.
845 	 * TODO: timo
846 	 */
847 	totpgot = totdgot = 0;
848 	totpcount = totdcount = 0xffffffff;
849 	mdp = &rqp->sr_rp;
850 	for (;;) {
851 		DTRACE_PROBE2(smb_trans_reply,
852 		    (smb_rq_t *), rqp, (mblk_t *), mdp->md_top);
853 		m_dumpm(mdp->md_top);
854 
855 		if ((error2 = md_get_uint8(mdp, &wc)) != 0)
856 			break;
857 		if (wc < 18) {
858 			error2 = ENOENT;
859 			break;
860 		}
861 		md_get_mem(mdp, NULL, 3, MB_MSYSTEM); /* reserved */
862 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
863 			break;
864 		if (totpcount > tmp)
865 			totpcount = tmp;
866 		if ((error2 = md_get_uint32le(mdp, &tmp)) != 0)
867 			break;
868 		if (totdcount > tmp)
869 			totdcount = tmp;
870 		if ((error2 = md_get_uint32le(mdp, &pcount)) != 0 ||
871 		    (error2 = md_get_uint32le(mdp, &poff)) != 0 ||
872 		    (error2 = md_get_uint32le(mdp, &pdisp)) != 0)
873 			break;
874 		if (pcount != 0 && pdisp != totpgot) {
875 			SMBSDEBUG("Can't handle misordered parameters %d:%d\n",
876 			    pdisp, totpgot);
877 			error2 = EINVAL;
878 			break;
879 		}
880 		if ((error2 = md_get_uint32le(mdp, &dcount)) != 0 ||
881 		    (error2 = md_get_uint32le(mdp, &doff)) != 0 ||
882 		    (error2 = md_get_uint32le(mdp, &ddisp)) != 0)
883 			break;
884 		if (dcount != 0 && ddisp != totdgot) {
885 			SMBSDEBUG("Can't handle misordered data: dcount %d\n",
886 			    dcount);
887 			error2 = EINVAL;
888 			break;
889 		}
890 
891 		/* XXX: Skip setup words?  We don't save them? */
892 		md_get_uint8(mdp, &wc);  /* SetupCount */
893 		tmp = wc;
894 		while (tmp--)
895 			md_get_uint16le(mdp, NULL);
896 
897 		if ((error2 = md_get_uint16le(mdp, &bc)) != 0)
898 			break;
899 
900 		/*
901 		 * There are pad bytes here, and the poff value
902 		 * indicates where the next data are found.
903 		 * No need to guess at the padding size.
904 		 */
905 		if (pcount) {
906 			error2 = smb_t2_placedata(mdp->md_top, poff, pcount,
907 			    &ntp->nt_rparam);
908 			if (error2)
909 				break;
910 		}
911 		totpgot += pcount;
912 
913 		if (dcount) {
914 			error2 = smb_t2_placedata(mdp->md_top, doff, dcount,
915 			    &ntp->nt_rdata);
916 			if (error2)
917 				break;
918 		}
919 		totdgot += dcount;
920 
921 		if (totpgot >= totpcount && totdgot >= totdcount) {
922 			error2 = 0;
923 			ntp->nt_flags |= SMBT2_ALLRECV;
924 			break;
925 		}
926 		/*
927 		 * We're done with this reply, look for the next one.
928 		 */
929 		SMBRQ_LOCK(rqp);
930 		md_next_record(&rqp->sr_rp);
931 		SMBRQ_UNLOCK(rqp);
932 		error2 = smb_rq_reply(rqp);
933 		if (rqp->sr_flags & SMBR_MOREDATA)
934 			ntp->nt_flags |= SMBT2_MOREDATA;
935 		if (!error2)
936 			continue;
937 		ntp->nt_sr_error = rqp->sr_error;
938 		ntp->nt_sr_rpflags2 = rqp->sr_rpflags2;
939 		error = error2;
940 		if (!(rqp->sr_flags & SMBR_MOREDATA))
941 			break;
942 	}
943 	return (error ? error : error2);
944 }
945 
946 /*
947  * Perform a full round of TRANS2 request
948  */
949 static int
950 smb_t2_request_int(struct smb_t2rq *t2p)
951 {
952 	struct smb_vc *vcp = t2p->t2_vc;
953 	struct smb_cred *scred = t2p->t2_cred;
954 	struct mbchain *mbp;
955 	struct mdchain *mdp, mbparam, mbdata;
956 	mblk_t *m;
957 	struct smb_rq *rqp;
958 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
959 	int error, doff, poff, txdcount, txpcount, nmlen, nmsize;
960 
961 	m = t2p->t2_tparam.mb_top;
962 	if (m) {
963 		md_initm(&mbparam, m);	/* do not free it! */
964 		totpcount = m_fixhdr(m);
965 		if (totpcount > 0xffff)		/* maxvalue for ushort_t */
966 			return (EINVAL);
967 	} else
968 		totpcount = 0;
969 	m = t2p->t2_tdata.mb_top;
970 	if (m) {
971 		md_initm(&mbdata, m);	/* do not free it! */
972 		totdcount =  m_fixhdr(m);
973 		if (totdcount > 0xffff)
974 			return (EINVAL);
975 	} else
976 		totdcount = 0;
977 	leftdcount = totdcount;
978 	leftpcount = totpcount;
979 	txmax = vcp->vc_txmax;
980 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
981 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
982 	if (error)
983 		return (error);
984 	rqp->sr_timo = smb_timo_default;
985 	rqp->sr_flags |= SMBR_MULTIPACKET;
986 	t2p->t2_rq = rqp;
987 	mbp = &rqp->sr_rq;
988 	smb_rq_wstart(rqp);
989 	mb_put_uint16le(mbp, totpcount);
990 	mb_put_uint16le(mbp, totdcount);
991 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
992 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
993 	mb_put_uint8(mbp, t2p->t2_maxscount);
994 	mb_put_uint8(mbp, 0);			/* reserved */
995 	mb_put_uint16le(mbp, 0);			/* flags */
996 	mb_put_uint32le(mbp, 0);			/* Timeout */
997 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
998 	len = mb_fixhdr(mbp);
999 
1000 	/*
1001 	 * Now we know the size of the trans overhead stuff:
1002 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + nmsize),
1003 	 * where nmsize is the OTW size of the name, including
1004 	 * the unicode null terminator and any alignment.
1005 	 * Use this to decide which parts (and how much)
1006 	 * can go into this request: params, data
1007 	 */
1008 	nmlen = t2p->t_name ? t2p->t_name_len : 0;
1009 	nmsize = nmlen + 1; /* null term. */
1010 	if (SMB_UNICODE_STRINGS(vcp)) {
1011 		nmsize *= 2;
1012 		/* we know put_dmem will need to align */
1013 		nmsize += 1;
1014 	}
1015 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmsize);
1016 	if (len + leftpcount > txmax) {
1017 		txpcount = min(leftpcount, txmax - len);
1018 		poff = len;
1019 		txdcount = 0;
1020 		doff = 0;
1021 	} else {
1022 		txpcount = leftpcount;
1023 		poff = txpcount ? len : 0;
1024 		/*
1025 		 * Other client traffic seems to "ALIGN2" here.  The extra
1026 		 * 2 byte pad we use has no observed downside and may be
1027 		 * required for some old servers(?)
1028 		 */
1029 		len = ALIGN4(len + txpcount);
1030 		txdcount = min(leftdcount, txmax - len);
1031 		doff = txdcount ? len : 0;
1032 	}
1033 	leftpcount -= txpcount;
1034 	leftdcount -= txdcount;
1035 	mb_put_uint16le(mbp, txpcount);
1036 	mb_put_uint16le(mbp, poff);
1037 	mb_put_uint16le(mbp, txdcount);
1038 	mb_put_uint16le(mbp, doff);
1039 	mb_put_uint8(mbp, t2p->t2_setupcount);
1040 	mb_put_uint8(mbp, 0);
1041 	for (i = 0; i < t2p->t2_setupcount; i++) {
1042 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
1043 	}
1044 	smb_rq_wend(rqp);
1045 	smb_rq_bstart(rqp);
1046 	if (t2p->t_name) {
1047 		/* Put the string and terminating null. */
1048 		smb_put_dmem(mbp, vcp, t2p->t_name, nmlen + 1,
1049 		    SMB_CS_NONE, NULL);
1050 	} else {
1051 		/* nmsize accounts for padding, char size. */
1052 		mb_put_mem(mbp, NULL, nmsize, MB_MZERO);
1053 	}
1054 	len = mb_fixhdr(mbp);
1055 	if (txpcount) {
1056 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1057 		error = md_get_mbuf(&mbparam, txpcount, &m);
1058 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1059 		if (error)
1060 			goto freerq;
1061 		mb_put_mbuf(mbp, m);
1062 	}
1063 	len = mb_fixhdr(mbp);
1064 	if (txdcount) {
1065 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1066 		error = md_get_mbuf(&mbdata, txdcount, &m);
1067 		if (error)
1068 			goto freerq;
1069 		mb_put_mbuf(mbp, m);
1070 	}
1071 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1072 	error = smb_rq_enqueue(rqp);
1073 	if (error)
1074 		goto freerq;
1075 	if (leftpcount || leftdcount) {
1076 		error = smb_rq_reply(rqp);
1077 		if (error)
1078 			goto bad;
1079 		/*
1080 		 * this is an interim response, ignore it.
1081 		 */
1082 		SMBRQ_LOCK(rqp);
1083 		md_next_record(&rqp->sr_rp);
1084 		SMBRQ_UNLOCK(rqp);
1085 	}
1086 	while (leftpcount || leftdcount) {
1087 		error = smb_rq_new(rqp, t2p->t_name ?
1088 		    SMB_COM_TRANSACTION_SECONDARY :
1089 		    SMB_COM_TRANSACTION2_SECONDARY);
1090 		if (error)
1091 			goto bad;
1092 		mbp = &rqp->sr_rq;
1093 		smb_rq_wstart(rqp);
1094 		mb_put_uint16le(mbp, totpcount);
1095 		mb_put_uint16le(mbp, totdcount);
1096 		len = mb_fixhdr(mbp);
1097 		/*
1098 		 * now we have known packet size as
1099 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
1100 		 * and need to decide which parts should go into request
1101 		 */
1102 		len = ALIGN4(len + 6 * 2 + 2);
1103 		if (t2p->t_name == NULL)
1104 			len += 2;
1105 		if (len + leftpcount > txmax) {
1106 			txpcount = min(leftpcount, txmax - len);
1107 			poff = len;
1108 			txdcount = 0;
1109 			doff = 0;
1110 		} else {
1111 			txpcount = leftpcount;
1112 			poff = txpcount ? len : 0;
1113 			len = ALIGN4(len + txpcount);
1114 			txdcount = min(leftdcount, txmax - len);
1115 			doff = txdcount ? len : 0;
1116 		}
1117 		mb_put_uint16le(mbp, txpcount);
1118 		mb_put_uint16le(mbp, poff);
1119 		mb_put_uint16le(mbp, totpcount - leftpcount);
1120 		mb_put_uint16le(mbp, txdcount);
1121 		mb_put_uint16le(mbp, doff);
1122 		mb_put_uint16le(mbp, totdcount - leftdcount);
1123 		leftpcount -= txpcount;
1124 		leftdcount -= txdcount;
1125 		if (t2p->t_name == NULL)
1126 			mb_put_uint16le(mbp, t2p->t2_fid);
1127 		smb_rq_wend(rqp);
1128 		smb_rq_bstart(rqp);
1129 		mb_put_uint8(mbp, 0);	/* name */
1130 		len = mb_fixhdr(mbp);
1131 		if (txpcount) {
1132 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1133 			error = md_get_mbuf(&mbparam, txpcount, &m);
1134 			if (error)
1135 				goto bad;
1136 			mb_put_mbuf(mbp, m);
1137 		}
1138 		len = mb_fixhdr(mbp);
1139 		if (txdcount) {
1140 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1141 			error = md_get_mbuf(&mbdata, txdcount, &m);
1142 			if (error)
1143 				goto bad;
1144 			mb_put_mbuf(mbp, m);
1145 		}
1146 		smb_rq_bend(rqp);
1147 		error = smb_iod_multirq(rqp);
1148 		if (error)
1149 			goto bad;
1150 	}	/* while left params or data */
1151 	error = smb_t2_reply(t2p);
1152 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA))
1153 		goto bad;
1154 	mdp = &t2p->t2_rdata;
1155 	if (mdp->md_top) {
1156 		m_fixhdr(mdp->md_top);
1157 		md_initm(mdp, mdp->md_top);
1158 	}
1159 	mdp = &t2p->t2_rparam;
1160 	if (mdp->md_top) {
1161 		m_fixhdr(mdp->md_top);
1162 		md_initm(mdp, mdp->md_top);
1163 	}
1164 bad:
1165 	smb_iod_removerq(rqp);
1166 freerq:
1167 	if (error && !(t2p->t2_flags & SMBT2_MOREDATA)) {
1168 		if (rqp->sr_flags & SMBR_RESTART)
1169 			t2p->t2_flags |= SMBT2_RESTART;
1170 		md_done(&t2p->t2_rparam);
1171 		md_done(&t2p->t2_rdata);
1172 	}
1173 	smb_rq_done(rqp);
1174 	return (error);
1175 }
1176 
1177 
1178 /*
1179  * Perform a full round of NT_TRANSACTION request
1180  */
1181 static int
1182 smb_nt_request_int(struct smb_ntrq *ntp)
1183 {
1184 	struct smb_vc *vcp = ntp->nt_vc;
1185 	struct smb_cred *scred = ntp->nt_cred;
1186 	struct mbchain *mbp;
1187 	struct mdchain *mdp, mbsetup, mbparam, mbdata;
1188 	mblk_t *m;
1189 	struct smb_rq *rqp;
1190 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax;
1191 	int error, doff, poff, txdcount, txpcount;
1192 	int totscount;
1193 
1194 	m = ntp->nt_tsetup.mb_top;
1195 	if (m) {
1196 		md_initm(&mbsetup, m);	/* do not free it! */
1197 		totscount = m_fixhdr(m);
1198 		if (totscount > 2 * 0xff)
1199 			return (EINVAL);
1200 	} else
1201 		totscount = 0;
1202 	m = ntp->nt_tparam.mb_top;
1203 	if (m) {
1204 		md_initm(&mbparam, m);	/* do not free it! */
1205 		totpcount = m_fixhdr(m);
1206 		if (totpcount > 0x7fffffff)
1207 			return (EINVAL);
1208 	} else
1209 		totpcount = 0;
1210 	m = ntp->nt_tdata.mb_top;
1211 	if (m) {
1212 		md_initm(&mbdata, m);	/* do not free it! */
1213 		totdcount =  m_fixhdr(m);
1214 		if (totdcount > 0x7fffffff)
1215 			return (EINVAL);
1216 	} else
1217 		totdcount = 0;
1218 	leftdcount = totdcount;
1219 	leftpcount = totpcount;
1220 	txmax = vcp->vc_txmax;
1221 	error = smb_rq_alloc(ntp->nt_source, SMB_COM_NT_TRANSACT, scred, &rqp);
1222 	if (error)
1223 		return (error);
1224 	rqp->sr_timo = smb_timo_default;
1225 	rqp->sr_flags |= SMBR_MULTIPACKET;
1226 	ntp->nt_rq = rqp;
1227 	mbp = &rqp->sr_rq;
1228 	smb_rq_wstart(rqp);
1229 	mb_put_uint8(mbp, ntp->nt_maxscount);
1230 	mb_put_uint16le(mbp, 0);	/* reserved (flags?) */
1231 	mb_put_uint32le(mbp, totpcount);
1232 	mb_put_uint32le(mbp, totdcount);
1233 	mb_put_uint32le(mbp, ntp->nt_maxpcount);
1234 	mb_put_uint32le(mbp, ntp->nt_maxdcount);
1235 	len = mb_fixhdr(mbp);
1236 	/*
1237 	 * now we have known packet size as
1238 	 * ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2),
1239 	 * and need to decide which parts should go into the first request
1240 	 */
1241 	len = ALIGN4(len + 4 * 4 + 1 + 2 + ((totscount+1)&~1) + 2);
1242 	if (len + leftpcount > txmax) {
1243 		txpcount = min(leftpcount, txmax - len);
1244 		poff = len;
1245 		txdcount = 0;
1246 		doff = 0;
1247 	} else {
1248 		txpcount = leftpcount;
1249 		poff = txpcount ? len : 0;
1250 		len = ALIGN4(len + txpcount);
1251 		txdcount = min(leftdcount, txmax - len);
1252 		doff = txdcount ? len : 0;
1253 	}
1254 	leftpcount -= txpcount;
1255 	leftdcount -= txdcount;
1256 	mb_put_uint32le(mbp, txpcount);
1257 	mb_put_uint32le(mbp, poff);
1258 	mb_put_uint32le(mbp, txdcount);
1259 	mb_put_uint32le(mbp, doff);
1260 	mb_put_uint8(mbp, (totscount+1)/2);
1261 	mb_put_uint16le(mbp, ntp->nt_function);
1262 	if (totscount) {
1263 		error = md_get_mbuf(&mbsetup, totscount, &m);
1264 		SMBSDEBUG("%d:%d:%d\n", error, totscount, txmax);
1265 		if (error)
1266 			goto freerq;
1267 		mb_put_mbuf(mbp, m);
1268 		if (totscount & 1)
1269 			mb_put_uint8(mbp, 0); /* setup is in words */
1270 	}
1271 	smb_rq_wend(rqp);
1272 	smb_rq_bstart(rqp);
1273 	len = mb_fixhdr(mbp);
1274 	if (txpcount) {
1275 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1276 		error = md_get_mbuf(&mbparam, txpcount, &m);
1277 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
1278 		if (error)
1279 			goto freerq;
1280 		mb_put_mbuf(mbp, m);
1281 	}
1282 	len = mb_fixhdr(mbp);
1283 	if (txdcount) {
1284 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1285 		error = md_get_mbuf(&mbdata, txdcount, &m);
1286 		if (error)
1287 			goto freerq;
1288 		mb_put_mbuf(mbp, m);
1289 	}
1290 	smb_rq_bend(rqp);	/* incredible, but thats it... */
1291 	error = smb_rq_enqueue(rqp);
1292 	if (error)
1293 		goto freerq;
1294 	if (leftpcount || leftdcount) {
1295 		error = smb_rq_reply(rqp);
1296 		if (error)
1297 			goto bad;
1298 		/*
1299 		 * this is an interim response, ignore it.
1300 		 */
1301 		SMBRQ_LOCK(rqp);
1302 		md_next_record(&rqp->sr_rp);
1303 		SMBRQ_UNLOCK(rqp);
1304 	}
1305 	while (leftpcount || leftdcount) {
1306 		error = smb_rq_new(rqp, SMB_COM_NT_TRANSACT_SECONDARY);
1307 		if (error)
1308 			goto bad;
1309 		mbp = &rqp->sr_rq;
1310 		smb_rq_wstart(rqp);
1311 		mb_put_mem(mbp, NULL, 3, MB_MZERO);
1312 		mb_put_uint32le(mbp, totpcount);
1313 		mb_put_uint32le(mbp, totdcount);
1314 		len = mb_fixhdr(mbp);
1315 		/*
1316 		 * now we have known packet size as
1317 		 * ALIGN4(len + 6 * 4  + 2)
1318 		 * and need to decide which parts should go into request
1319 		 */
1320 		len = ALIGN4(len + 6 * 4 + 2);
1321 		if (len + leftpcount > txmax) {
1322 			txpcount = min(leftpcount, txmax - len);
1323 			poff = len;
1324 			txdcount = 0;
1325 			doff = 0;
1326 		} else {
1327 			txpcount = leftpcount;
1328 			poff = txpcount ? len : 0;
1329 			len = ALIGN4(len + txpcount);
1330 			txdcount = min(leftdcount, txmax - len);
1331 			doff = txdcount ? len : 0;
1332 		}
1333 		mb_put_uint32le(mbp, txpcount);
1334 		mb_put_uint32le(mbp, poff);
1335 		mb_put_uint32le(mbp, totpcount - leftpcount);
1336 		mb_put_uint32le(mbp, txdcount);
1337 		mb_put_uint32le(mbp, doff);
1338 		mb_put_uint32le(mbp, totdcount - leftdcount);
1339 		leftpcount -= txpcount;
1340 		leftdcount -= txdcount;
1341 		smb_rq_wend(rqp);
1342 		smb_rq_bstart(rqp);
1343 		len = mb_fixhdr(mbp);
1344 		if (txpcount) {
1345 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1346 			error = md_get_mbuf(&mbparam, txpcount, &m);
1347 			if (error)
1348 				goto bad;
1349 			mb_put_mbuf(mbp, m);
1350 		}
1351 		len = mb_fixhdr(mbp);
1352 		if (txdcount) {
1353 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
1354 			error = md_get_mbuf(&mbdata, txdcount, &m);
1355 			if (error)
1356 				goto bad;
1357 			mb_put_mbuf(mbp, m);
1358 		}
1359 		smb_rq_bend(rqp);
1360 		error = smb_iod_multirq(rqp);
1361 		if (error)
1362 			goto bad;
1363 	}	/* while left params or data */
1364 	error = smb_nt_reply(ntp);
1365 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA))
1366 		goto bad;
1367 	mdp = &ntp->nt_rdata;
1368 	if (mdp->md_top) {
1369 		m_fixhdr(mdp->md_top);
1370 		md_initm(mdp, mdp->md_top);
1371 	}
1372 	mdp = &ntp->nt_rparam;
1373 	if (mdp->md_top) {
1374 		m_fixhdr(mdp->md_top);
1375 		md_initm(mdp, mdp->md_top);
1376 	}
1377 bad:
1378 	smb_iod_removerq(rqp);
1379 freerq:
1380 	if (error && !(ntp->nt_flags & SMBT2_MOREDATA)) {
1381 		if (rqp->sr_flags & SMBR_RESTART)
1382 			ntp->nt_flags |= SMBT2_RESTART;
1383 		md_done(&ntp->nt_rparam);
1384 		md_done(&ntp->nt_rdata);
1385 	}
1386 	smb_rq_done(rqp);
1387 	return (error);
1388 }
1389 
1390 int
1391 smb_t2_request(struct smb_t2rq *t2p)
1392 {
1393 	int error = EINVAL, i;
1394 
1395 	for (i = 0; ; ) {
1396 		/*
1397 		 * Don't send any new requests if force unmount is underway.
1398 		 * This check was moved into smb_rq_enqueue, called by
1399 		 * smb_t2_request_int()
1400 		 */
1401 		t2p->t2_flags &= ~SMBT2_RESTART;
1402 		error = smb_t2_request_int(t2p);
1403 		if (!error)
1404 			break;
1405 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1406 		    SMBT2_RESTART)
1407 			break;
1408 		if (++i > SMBMAXRESTARTS)
1409 			break;
1410 		mutex_enter(&(t2p)->t2_lock);
1411 		if (t2p->t2_share) {
1412 			cv_reltimedwait(&t2p->t2_cond, &(t2p)->t2_lock,
1413 			    (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
1414 		} else {
1415 			delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
1416 		}
1417 		mutex_exit(&(t2p)->t2_lock);
1418 	}
1419 	return (error);
1420 }
1421 
1422 
1423 int
1424 smb_nt_request(struct smb_ntrq *ntp)
1425 {
1426 	int error = EINVAL, i;
1427 
1428 	for (i = 0; ; ) {
1429 		/*
1430 		 * Don't send any new requests if force unmount is underway.
1431 		 * This check was moved into smb_rq_enqueue, called by
1432 		 * smb_nt_request_int()
1433 		 */
1434 		ntp->nt_flags &= ~SMBT2_RESTART;
1435 		error = smb_nt_request_int(ntp);
1436 		if (!error)
1437 			break;
1438 		if ((ntp->nt_flags & (SMBT2_RESTART | SMBT2_NORESTART)) !=
1439 		    SMBT2_RESTART)
1440 			break;
1441 		if (++i > SMBMAXRESTARTS)
1442 			break;
1443 		mutex_enter(&(ntp)->nt_lock);
1444 		if (ntp->nt_share) {
1445 			cv_reltimedwait(&ntp->nt_cond, &(ntp)->nt_lock,
1446 			    (hz * SMB_RCNDELAY), TR_CLOCK_TICK);
1447 
1448 		} else {
1449 			delay(ddi_get_lbolt() + (hz * SMB_RCNDELAY));
1450 		}
1451 		mutex_exit(&(ntp)->nt_lock);
1452 	}
1453 	return (error);
1454 }
1455