xref: /freebsd/sys/netsmb/smb_rq.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000-2001 Boris Popov
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/endian.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/module.h>
36 #include <sys/proc.h>
37 #include <sys/lock.h>
38 #include <sys/sysctl.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/mbuf.h>
42 
43 #include <netsmb/smb.h>
44 #include <netsmb/smb_conn.h>
45 #include <netsmb/smb_rq.h>
46 #include <netsmb/smb_subr.h>
47 #include <netsmb/smb_tran.h>
48 
49 static MALLOC_DEFINE(M_SMBRQ, "SMBRQ", "SMB request");
50 
51 MODULE_DEPEND(netsmb, libmchain, 1, 1, 1);
52 
53 static int  smb_rq_reply(struct smb_rq *rqp);
54 static int  smb_rq_enqueue(struct smb_rq *rqp);
55 static int  smb_rq_getenv(struct smb_connobj *layer,
56 		struct smb_vc **vcpp, struct smb_share **sspp);
57 static int  smb_rq_new(struct smb_rq *rqp, u_char cmd);
58 static int  smb_t2_reply(struct smb_t2rq *t2p);
59 
60 int
61 smb_rq_alloc(struct smb_connobj *layer, u_char cmd, struct smb_cred *scred,
62 	struct smb_rq **rqpp)
63 {
64 	struct smb_rq *rqp;
65 	int error;
66 
67 	rqp = malloc(sizeof(*rqp), M_SMBRQ, M_WAITOK);
68 	if (rqp == NULL)
69 		return ENOMEM;
70 	error = smb_rq_init(rqp, layer, cmd, scred);
71 	rqp->sr_flags |= SMBR_ALLOCED;
72 	if (error) {
73 		smb_rq_done(rqp);
74 		return error;
75 	}
76 	*rqpp = rqp;
77 	return 0;
78 }
79 
80 static char tzero[12];
81 
82 int
83 smb_rq_init(struct smb_rq *rqp, struct smb_connobj *layer, u_char cmd,
84 	struct smb_cred *scred)
85 {
86 	int error;
87 
88 	bzero(rqp, sizeof(*rqp));
89 	smb_sl_init(&rqp->sr_slock, "srslock");
90 	error = smb_rq_getenv(layer, &rqp->sr_vc, &rqp->sr_share);
91 	if (error)
92 		return error;
93 	error = smb_vc_access(rqp->sr_vc, scred, SMBM_EXEC);
94 	if (error)
95 		return error;
96 	if (rqp->sr_share) {
97 		error = smb_share_access(rqp->sr_share, scred, SMBM_EXEC);
98 		if (error)
99 			return error;
100 	}
101 	rqp->sr_cred = scred;
102 	rqp->sr_mid = smb_vc_nextmid(rqp->sr_vc);
103 	return smb_rq_new(rqp, cmd);
104 }
105 
106 static int
107 smb_rq_new(struct smb_rq *rqp, u_char cmd)
108 {
109 	struct smb_vc *vcp = rqp->sr_vc;
110 	struct mbchain *mbp = &rqp->sr_rq;
111 	int error;
112 	u_int16_t flags2;
113 
114 	rqp->sr_sendcnt = 0;
115 	mb_done(mbp);
116 	md_done(&rqp->sr_rp);
117 	error = mb_init(mbp);
118 	if (error)
119 		return error;
120 	mb_put_mem(mbp, SMB_SIGNATURE, SMB_SIGLEN, MB_MSYSTEM);
121 	mb_put_uint8(mbp, cmd);
122 	mb_put_uint32le(mbp, 0);		/* DosError */
123 	mb_put_uint8(mbp, vcp->vc_hflags);
124 	flags2 = vcp->vc_hflags2;
125 	if (cmd == SMB_COM_TRANSACTION || cmd == SMB_COM_TRANSACTION_SECONDARY)
126 		flags2 &= ~SMB_FLAGS2_UNICODE;
127 	if (cmd == SMB_COM_NEGOTIATE)
128 		flags2 &= ~SMB_FLAGS2_SECURITY_SIGNATURE;
129 	mb_put_uint16le(mbp, flags2);
130 	if ((flags2 & SMB_FLAGS2_SECURITY_SIGNATURE) == 0) {
131 		mb_put_mem(mbp, tzero, 12, MB_MSYSTEM);
132 		rqp->sr_rqsig = NULL;
133 	} else {
134 		mb_put_uint16le(mbp, 0 /*scred->sc_p->p_pid >> 16*/);
135 		rqp->sr_rqsig = (u_int8_t *)mb_reserve(mbp, 8);
136 		mb_put_uint16le(mbp, 0);
137 	}
138 	rqp->sr_rqtid = mb_reserve(mbp, sizeof(u_int16_t));
139 	mb_put_uint16le(mbp, 1 /*scred->sc_p->p_pid & 0xffff*/);
140 	rqp->sr_rquid = mb_reserve(mbp, sizeof(u_int16_t));
141 	mb_put_uint16le(mbp, rqp->sr_mid);
142 	return 0;
143 }
144 
145 void
146 smb_rq_done(struct smb_rq *rqp)
147 {
148 	mb_done(&rqp->sr_rq);
149 	md_done(&rqp->sr_rp);
150 	smb_sl_destroy(&rqp->sr_slock);
151 	if (rqp->sr_flags & SMBR_ALLOCED)
152 		free(rqp, M_SMBRQ);
153 }
154 
155 /*
156  * Simple request-reply exchange
157  */
158 int
159 smb_rq_simple(struct smb_rq *rqp)
160 {
161 	struct smb_vc *vcp = rqp->sr_vc;
162 	int error = EINVAL, i;
163 
164 	for (i = 0; i < SMB_MAXRCN; i++) {
165 		rqp->sr_flags &= ~SMBR_RESTART;
166 		rqp->sr_timo = vcp->vc_timo;
167 		rqp->sr_state = SMBRQ_NOTSENT;
168 		error = smb_rq_enqueue(rqp);
169 		if (error)
170 			return error;
171 		error = smb_rq_reply(rqp);
172 		if (error == 0)
173 			break;
174 		if ((rqp->sr_flags & (SMBR_RESTART | SMBR_NORESTART)) != SMBR_RESTART)
175 			break;
176 	}
177 	return error;
178 }
179 
180 static int
181 smb_rq_enqueue(struct smb_rq *rqp)
182 {
183 	struct smb_share *ssp = rqp->sr_share;
184 	int error;
185 
186 	if (ssp == NULL || rqp->sr_cred == &rqp->sr_vc->vc_iod->iod_scred) {
187 		return smb_iod_addrq(rqp);
188 	}
189 	for (;;) {
190 		SMBS_ST_LOCK(ssp);
191 		if (ssp->ss_flags & SMBS_RECONNECTING) {
192 			msleep(&ssp->ss_vcgenid, SMBS_ST_LOCKPTR(ssp),
193 			    PWAIT | PDROP, "90trcn", hz);
194 			if (smb_td_intr(rqp->sr_cred->scr_td))
195 				return EINTR;
196 			continue;
197 		}
198 		if (smb_share_valid(ssp) || (ssp->ss_flags & SMBS_CONNECTED) == 0) {
199 			SMBS_ST_UNLOCK(ssp);
200 		} else {
201 			SMBS_ST_UNLOCK(ssp);
202 			error = smb_iod_request(rqp->sr_vc->vc_iod,
203 			    SMBIOD_EV_TREECONNECT | SMBIOD_EV_SYNC, ssp);
204 			if (error)
205 				return error;
206 		}
207 		error = smb_iod_addrq(rqp);
208 		if (error != EXDEV)
209 			break;
210 	}
211 	return error;
212 }
213 
214 void
215 smb_rq_wstart(struct smb_rq *rqp)
216 {
217 	rqp->sr_wcount = mb_reserve(&rqp->sr_rq, sizeof(u_int8_t));
218 	rqp->sr_rq.mb_count = 0;
219 }
220 
221 void
222 smb_rq_wend(struct smb_rq *rqp)
223 {
224 	if (rqp->sr_wcount == NULL) {
225 		SMBERROR("no wcount\n");	/* actually panic */
226 		return;
227 	}
228 	if (rqp->sr_rq.mb_count & 1)
229 		SMBERROR("odd word count\n");
230 	*rqp->sr_wcount = rqp->sr_rq.mb_count / 2;
231 }
232 
233 void
234 smb_rq_bstart(struct smb_rq *rqp)
235 {
236 	rqp->sr_bcount = mb_reserve(&rqp->sr_rq, sizeof(u_short));
237 	rqp->sr_rq.mb_count = 0;
238 }
239 
240 void
241 smb_rq_bend(struct smb_rq *rqp)
242 {
243 	int bcnt;
244 
245 	if (rqp->sr_bcount == NULL) {
246 		SMBERROR("no bcount\n");	/* actually panic */
247 		return;
248 	}
249 	bcnt = rqp->sr_rq.mb_count;
250 	if (bcnt > 0xffff)
251 		SMBERROR("byte count too large (%d)\n", bcnt);
252 	le16enc(rqp->sr_bcount, bcnt);
253 }
254 
255 int
256 smb_rq_intr(struct smb_rq *rqp)
257 {
258 	if (rqp->sr_flags & SMBR_INTR)
259 		return EINTR;
260 	return smb_td_intr(rqp->sr_cred->scr_td);
261 }
262 
263 int
264 smb_rq_getrequest(struct smb_rq *rqp, struct mbchain **mbpp)
265 {
266 	*mbpp = &rqp->sr_rq;
267 	return 0;
268 }
269 
270 int
271 smb_rq_getreply(struct smb_rq *rqp, struct mdchain **mbpp)
272 {
273 	*mbpp = &rqp->sr_rp;
274 	return 0;
275 }
276 
277 static int
278 smb_rq_getenv(struct smb_connobj *layer,
279 	struct smb_vc **vcpp, struct smb_share **sspp)
280 {
281 	struct smb_vc *vcp = NULL;
282 	struct smb_share *ssp = NULL;
283 	struct smb_connobj *cp;
284 	int error = 0;
285 
286 	switch (layer->co_level) {
287 	    case SMBL_VC:
288 		vcp = CPTOVC(layer);
289 		if (layer->co_parent == NULL) {
290 			SMBERROR("zombie VC %s\n", vcp->vc_srvname);
291 			error = EINVAL;
292 			break;
293 		}
294 		break;
295 	    case SMBL_SHARE:
296 		ssp = CPTOSS(layer);
297 		cp = layer->co_parent;
298 		if (cp == NULL) {
299 			SMBERROR("zombie share %s\n", ssp->ss_name);
300 			error = EINVAL;
301 			break;
302 		}
303 		error = smb_rq_getenv(cp, &vcp, NULL);
304 		if (error)
305 			break;
306 		break;
307 	    default:
308 		SMBERROR("invalid layer %d passed\n", layer->co_level);
309 		error = EINVAL;
310 	}
311 	if (vcpp)
312 		*vcpp = vcp;
313 	if (sspp)
314 		*sspp = ssp;
315 	return error;
316 }
317 
318 /*
319  * Wait for reply on the request
320  */
321 static int
322 smb_rq_reply(struct smb_rq *rqp)
323 {
324 	struct mdchain *mdp = &rqp->sr_rp;
325 	u_int32_t tdw;
326 	u_int8_t tb;
327 	int error, rperror = 0;
328 
329 	error = smb_iod_waitrq(rqp);
330 	if (error)
331 		return error;
332 	error = md_get_uint32(mdp, &tdw);
333 	if (error)
334 		return error;
335 	error = md_get_uint8(mdp, &tb);
336 	if (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_ERR_STATUS) {
337 		error = md_get_uint32le(mdp, &rqp->sr_error);
338 	} else {
339 		error = md_get_uint8(mdp, &rqp->sr_errclass);
340 		error = md_get_uint8(mdp, &tb);
341 		error = md_get_uint16le(mdp, &rqp->sr_serror);
342 		if (!error)
343 			rperror = smb_maperror(rqp->sr_errclass, rqp->sr_serror);
344 	}
345 	error = md_get_uint8(mdp, &rqp->sr_rpflags);
346 	error = md_get_uint16le(mdp, &rqp->sr_rpflags2);
347 
348 	error = md_get_uint32(mdp, &tdw);
349 	error = md_get_uint32(mdp, &tdw);
350 	error = md_get_uint32(mdp, &tdw);
351 
352 	error = md_get_uint16le(mdp, &rqp->sr_rptid);
353 	error = md_get_uint16le(mdp, &rqp->sr_rppid);
354 	error = md_get_uint16le(mdp, &rqp->sr_rpuid);
355 	error = md_get_uint16le(mdp, &rqp->sr_rpmid);
356 
357 	if (error == 0 &&
358 	    (rqp->sr_vc->vc_hflags2 & SMB_FLAGS2_SECURITY_SIGNATURE))
359 		error = smb_rq_verify(rqp);
360 
361 	SMBSDEBUG("M:%04x, P:%04x, U:%04x, T:%04x, E: %d:%d\n",
362 	    rqp->sr_rpmid, rqp->sr_rppid, rqp->sr_rpuid, rqp->sr_rptid,
363 	    rqp->sr_errclass, rqp->sr_serror);
364 	return error ? error : rperror;
365 }
366 
367 #define ALIGN4(a)	(((a) + 3) & ~3)
368 
369 /*
370  * TRANS2 request implementation
371  */
372 int
373 smb_t2_alloc(struct smb_connobj *layer, u_short setup, struct smb_cred *scred,
374 	struct smb_t2rq **t2pp)
375 {
376 	struct smb_t2rq *t2p;
377 	int error;
378 
379 	t2p = malloc(sizeof(*t2p), M_SMBRQ, M_WAITOK);
380 	if (t2p == NULL)
381 		return ENOMEM;
382 	error = smb_t2_init(t2p, layer, setup, scred);
383 	t2p->t2_flags |= SMBT2_ALLOCED;
384 	if (error) {
385 		smb_t2_done(t2p);
386 		return error;
387 	}
388 	*t2pp = t2p;
389 	return 0;
390 }
391 
392 int
393 smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, u_short setup,
394 	struct smb_cred *scred)
395 {
396 	int error;
397 
398 	bzero(t2p, sizeof(*t2p));
399 	t2p->t2_source = source;
400 	t2p->t2_setupcount = 1;
401 	t2p->t2_setupdata = t2p->t2_setup;
402 	t2p->t2_setup[0] = setup;
403 	t2p->t2_fid = 0xffff;
404 	t2p->t2_cred = scred;
405 	error = smb_rq_getenv(source, &t2p->t2_vc, NULL);
406 	if (error)
407 		return error;
408 	return 0;
409 }
410 
411 void
412 smb_t2_done(struct smb_t2rq *t2p)
413 {
414 	mb_done(&t2p->t2_tparam);
415 	mb_done(&t2p->t2_tdata);
416 	md_done(&t2p->t2_rparam);
417 	md_done(&t2p->t2_rdata);
418 	if (t2p->t2_flags & SMBT2_ALLOCED)
419 		free(t2p, M_SMBRQ);
420 }
421 
422 static int
423 smb_t2_placedata(struct mbuf *mtop, u_int16_t offset, u_int16_t count,
424 	struct mdchain *mdp)
425 {
426 	struct mbuf *m0;
427 	int len;
428 
429 	len = m_length(mtop, NULL);
430 	if (offset + count > len)
431 		return (EPROTO);
432 
433 	m0 = m_split(mtop, offset, M_WAITOK);
434 	if (len != offset + count) {
435 		len -= offset + count;
436 		m_adj(m0, -len);
437 	}
438 	if (mdp->md_top == NULL) {
439 		md_initm(mdp, m0);
440 	} else
441 		m_cat(mdp->md_top, m0);
442 	return 0;
443 }
444 
445 static int
446 smb_t2_reply(struct smb_t2rq *t2p)
447 {
448 	struct mdchain *mdp;
449 	struct smb_rq *rqp = t2p->t2_rq;
450 	int error, totpgot, totdgot;
451 	u_int16_t totpcount, totdcount, pcount, poff, doff, pdisp, ddisp;
452 	u_int16_t tmp, bc, dcount;
453 	u_int8_t wc;
454 
455 	error = smb_rq_reply(rqp);
456 	if (error)
457 		return error;
458 	if ((t2p->t2_flags & SMBT2_ALLSENT) == 0) {
459 		/*
460 		 * this is an interim response, ignore it.
461 		 */
462 		SMBRQ_SLOCK(rqp);
463 		md_next_record(&rqp->sr_rp);
464 		SMBRQ_SUNLOCK(rqp);
465 		return 0;
466 	}
467 	/*
468 	 * Now we have to get all subsequent responses. The CIFS specification
469 	 * says that they can be disordered which is weird.
470 	 * TODO: timo
471 	 */
472 	totpgot = totdgot = 0;
473 	totpcount = totdcount = 0xffff;
474 	mdp = &rqp->sr_rp;
475 	for (;;) {
476 		m_dumpm(mdp->md_top);
477 		if ((error = md_get_uint8(mdp, &wc)) != 0)
478 			break;
479 		if (wc < 10) {
480 			error = ENOENT;
481 			break;
482 		}
483 		if ((error = md_get_uint16le(mdp, &tmp)) != 0)
484 			break;
485 		if (totpcount > tmp)
486 			totpcount = tmp;
487 		md_get_uint16le(mdp, &tmp);
488 		if (totdcount > tmp)
489 			totdcount = tmp;
490 		if ((error = md_get_uint16le(mdp, &tmp)) != 0 || /* reserved */
491 		    (error = md_get_uint16le(mdp, &pcount)) != 0 ||
492 		    (error = md_get_uint16le(mdp, &poff)) != 0 ||
493 		    (error = md_get_uint16le(mdp, &pdisp)) != 0)
494 			break;
495 		if (pcount != 0 && pdisp != totpgot) {
496 			SMBERROR("Can't handle disordered parameters %d:%d\n",
497 			    pdisp, totpgot);
498 			error = EINVAL;
499 			break;
500 		}
501 		if ((error = md_get_uint16le(mdp, &dcount)) != 0 ||
502 		    (error = md_get_uint16le(mdp, &doff)) != 0 ||
503 		    (error = md_get_uint16le(mdp, &ddisp)) != 0)
504 			break;
505 		if (dcount != 0 && ddisp != totdgot) {
506 			SMBERROR("Can't handle disordered data\n");
507 			error = EINVAL;
508 			break;
509 		}
510 		md_get_uint8(mdp, &wc);
511 		md_get_uint8(mdp, NULL);
512 		tmp = wc;
513 		while (tmp--)
514 			md_get_uint16(mdp, NULL);
515 		if ((error = md_get_uint16le(mdp, &bc)) != 0)
516 			break;
517 /*		tmp = SMB_HDRLEN + 1 + 10 * 2 + 2 * wc + 2;*/
518 		if (dcount) {
519 			error = smb_t2_placedata(mdp->md_top, doff, dcount,
520 			    &t2p->t2_rdata);
521 			if (error)
522 				break;
523 		}
524 		if (pcount) {
525 			error = smb_t2_placedata(mdp->md_top, poff, pcount,
526 			    &t2p->t2_rparam);
527 			if (error)
528 				break;
529 		}
530 		totpgot += pcount;
531 		totdgot += dcount;
532 		if (totpgot >= totpcount && totdgot >= totdcount) {
533 			error = 0;
534 			t2p->t2_flags |= SMBT2_ALLRECV;
535 			break;
536 		}
537 		/*
538 		 * We're done with this reply, look for the next one.
539 		 */
540 		SMBRQ_SLOCK(rqp);
541 		md_next_record(&rqp->sr_rp);
542 		SMBRQ_SUNLOCK(rqp);
543 		error = smb_rq_reply(rqp);
544 		if (error)
545 			break;
546 	}
547 	return error;
548 }
549 
550 /*
551  * Perform a full round of TRANS2 request
552  */
553 static int
554 smb_t2_request_int(struct smb_t2rq *t2p)
555 {
556 	struct smb_vc *vcp = t2p->t2_vc;
557 	struct smb_cred *scred = t2p->t2_cred;
558 	struct mbchain *mbp;
559 	struct mdchain *mdp, mbparam, mbdata;
560 	struct mbuf *m;
561 	struct smb_rq *rqp;
562 	int totpcount, leftpcount, totdcount, leftdcount, len, txmax, i;
563 	int error, doff, poff, txdcount, txpcount, nmlen;
564 
565 	m = t2p->t2_tparam.mb_top;
566 	if (m) {
567 		md_initm(&mbparam, m);	/* do not free it! */
568 		totpcount = m_fixhdr(m);
569 		if (totpcount > 0xffff)		/* maxvalue for u_short */
570 			return EINVAL;
571 	} else
572 		totpcount = 0;
573 	m = t2p->t2_tdata.mb_top;
574 	if (m) {
575 		md_initm(&mbdata, m);	/* do not free it! */
576 		totdcount =  m_fixhdr(m);
577 		if (totdcount > 0xffff)
578 			return EINVAL;
579 	} else
580 		totdcount = 0;
581 	leftdcount = totdcount;
582 	leftpcount = totpcount;
583 	txmax = vcp->vc_txmax;
584 	error = smb_rq_alloc(t2p->t2_source, t2p->t_name ?
585 	    SMB_COM_TRANSACTION : SMB_COM_TRANSACTION2, scred, &rqp);
586 	if (error)
587 		return error;
588 	rqp->sr_flags |= SMBR_MULTIPACKET;
589 	t2p->t2_rq = rqp;
590 	rqp->sr_t2 = t2p;
591 	mbp = &rqp->sr_rq;
592 	smb_rq_wstart(rqp);
593 	mb_put_uint16le(mbp, totpcount);
594 	mb_put_uint16le(mbp, totdcount);
595 	mb_put_uint16le(mbp, t2p->t2_maxpcount);
596 	mb_put_uint16le(mbp, t2p->t2_maxdcount);
597 	mb_put_uint8(mbp, t2p->t2_maxscount);
598 	mb_put_uint8(mbp, 0);			/* reserved */
599 	mb_put_uint16le(mbp, 0);			/* flags */
600 	mb_put_uint32le(mbp, 0);			/* Timeout */
601 	mb_put_uint16le(mbp, 0);			/* reserved 2 */
602 	len = mb_fixhdr(mbp);
603 	/*
604 	 * now we have known packet size as
605 	 * ALIGN4(len + 5 * 2 + setupcount * 2 + 2 + strlen(name) + 1),
606 	 * and need to decide which parts should go into the first request
607 	 */
608 	nmlen = t2p->t_name ? strlen(t2p->t_name) : 0;
609 	len = ALIGN4(len + 5 * 2 + t2p->t2_setupcount * 2 + 2 + nmlen + 1);
610 	if (len + leftpcount > txmax) {
611 		txpcount = min(leftpcount, txmax - len);
612 		poff = len;
613 		txdcount = 0;
614 		doff = 0;
615 	} else {
616 		txpcount = leftpcount;
617 		poff = txpcount ? len : 0;
618 		len = ALIGN4(len + txpcount);
619 		txdcount = min(leftdcount, txmax - len);
620 		doff = txdcount ? len : 0;
621 	}
622 	leftpcount -= txpcount;
623 	leftdcount -= txdcount;
624 	mb_put_uint16le(mbp, txpcount);
625 	mb_put_uint16le(mbp, poff);
626 	mb_put_uint16le(mbp, txdcount);
627 	mb_put_uint16le(mbp, doff);
628 	mb_put_uint8(mbp, t2p->t2_setupcount);
629 	mb_put_uint8(mbp, 0);
630 	for (i = 0; i < t2p->t2_setupcount; i++)
631 		mb_put_uint16le(mbp, t2p->t2_setupdata[i]);
632 	smb_rq_wend(rqp);
633 	smb_rq_bstart(rqp);
634 	/* TDUNICODE */
635 	if (t2p->t_name)
636 		mb_put_mem(mbp, t2p->t_name, nmlen, MB_MSYSTEM);
637 	mb_put_uint8(mbp, 0);	/* terminating zero */
638 	len = mb_fixhdr(mbp);
639 	if (txpcount) {
640 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
641 		error = md_get_mbuf(&mbparam, txpcount, &m);
642 		SMBSDEBUG("%d:%d:%d\n", error, txpcount, txmax);
643 		if (error)
644 			goto freerq;
645 		mb_put_mbuf(mbp, m);
646 	}
647 	len = mb_fixhdr(mbp);
648 	if (txdcount) {
649 		mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
650 		error = md_get_mbuf(&mbdata, txdcount, &m);
651 		if (error)
652 			goto freerq;
653 		mb_put_mbuf(mbp, m);
654 	}
655 	smb_rq_bend(rqp);	/* incredible, but thats it... */
656 	error = smb_rq_enqueue(rqp);
657 	if (error)
658 		goto freerq;
659 	if (leftpcount == 0 && leftdcount == 0)
660 		t2p->t2_flags |= SMBT2_ALLSENT;
661 	error = smb_t2_reply(t2p);
662 	if (error)
663 		goto bad;
664 	while (leftpcount || leftdcount) {
665 		t2p->t2_flags |= SMBT2_SECONDARY;
666 		error = smb_rq_new(rqp, t2p->t_name ?
667 		    SMB_COM_TRANSACTION_SECONDARY : SMB_COM_TRANSACTION2_SECONDARY);
668 		if (error)
669 			goto bad;
670 		mbp = &rqp->sr_rq;
671 		smb_rq_wstart(rqp);
672 		mb_put_uint16le(mbp, totpcount);
673 		mb_put_uint16le(mbp, totdcount);
674 		len = mb_fixhdr(mbp);
675 		/*
676 		 * now we have known packet size as
677 		 * ALIGN4(len + 7 * 2 + 2) for T2 request, and -2 for T one,
678 		 * and need to decide which parts should go into request
679 		 */
680 		len = ALIGN4(len + 6 * 2 + 2);
681 		if (t2p->t_name == NULL)
682 			len += 2;
683 		if (len + leftpcount > txmax) {
684 			txpcount = min(leftpcount, txmax - len);
685 			poff = len;
686 			txdcount = 0;
687 			doff = 0;
688 		} else {
689 			txpcount = leftpcount;
690 			poff = txpcount ? len : 0;
691 			len = ALIGN4(len + txpcount);
692 			txdcount = min(leftdcount, txmax - len);
693 			doff = txdcount ? len : 0;
694 		}
695 		mb_put_uint16le(mbp, txpcount);
696 		mb_put_uint16le(mbp, poff);
697 		mb_put_uint16le(mbp, totpcount - leftpcount);
698 		mb_put_uint16le(mbp, txdcount);
699 		mb_put_uint16le(mbp, doff);
700 		mb_put_uint16le(mbp, totdcount - leftdcount);
701 		leftpcount -= txpcount;
702 		leftdcount -= txdcount;
703 		if (t2p->t_name == NULL)
704 			mb_put_uint16le(mbp, t2p->t2_fid);
705 		smb_rq_wend(rqp);
706 		smb_rq_bstart(rqp);
707 		mb_put_uint8(mbp, 0);	/* name */
708 		len = mb_fixhdr(mbp);
709 		if (txpcount) {
710 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
711 			error = md_get_mbuf(&mbparam, txpcount, &m);
712 			if (error)
713 				goto bad;
714 			mb_put_mbuf(mbp, m);
715 		}
716 		len = mb_fixhdr(mbp);
717 		if (txdcount) {
718 			mb_put_mem(mbp, NULL, ALIGN4(len) - len, MB_MZERO);
719 			error = md_get_mbuf(&mbdata, txdcount, &m);
720 			if (error)
721 				goto bad;
722 			mb_put_mbuf(mbp, m);
723 		}
724 		smb_rq_bend(rqp);
725 		rqp->sr_state = SMBRQ_NOTSENT;
726 		error = smb_iod_request(vcp->vc_iod, SMBIOD_EV_NEWRQ, NULL);
727 		if (error)
728 			goto bad;
729 	}	/* while left params or data */
730 	t2p->t2_flags |= SMBT2_ALLSENT;
731 	mdp = &t2p->t2_rdata;
732 	if (mdp->md_top) {
733 		m_fixhdr(mdp->md_top);
734 		md_initm(mdp, mdp->md_top);
735 	}
736 	mdp = &t2p->t2_rparam;
737 	if (mdp->md_top) {
738 		m_fixhdr(mdp->md_top);
739 		md_initm(mdp, mdp->md_top);
740 	}
741 bad:
742 	smb_iod_removerq(rqp);
743 freerq:
744 	if (error) {
745 		if (rqp->sr_flags & SMBR_RESTART)
746 			t2p->t2_flags |= SMBT2_RESTART;
747 		md_done(&t2p->t2_rparam);
748 		md_done(&t2p->t2_rdata);
749 	}
750 	smb_rq_done(rqp);
751 	return error;
752 }
753 
754 int
755 smb_t2_request(struct smb_t2rq *t2p)
756 {
757 	int error = EINVAL, i;
758 
759 	for (i = 0; i < SMB_MAXRCN; i++) {
760 		t2p->t2_flags &= ~SMBR_RESTART;
761 		error = smb_t2_request_int(t2p);
762 		if (error == 0)
763 			break;
764 		if ((t2p->t2_flags & (SMBT2_RESTART | SMBT2_NORESTART)) != SMBT2_RESTART)
765 			break;
766 	}
767 	return error;
768 }
769