xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_smb.c (revision 588541fbf64fffe619698198cef04af1900f1f86)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_smb.c,v 1.35.100.2 2005/06/02 00:55:39 lindak Exp $
33  */
34 
35 /*
36  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
37  */
38 
39 /*
40  * various SMB requests. Most of the routines merely packs data into mbufs.
41  */
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kmem.h>
45 #include <sys/proc.h>
46 #include <sys/lock.h>
47 #include <sys/socket.h>
48 #include <sys/uio.h>
49 #include <sys/random.h>
50 #include <sys/note.h>
51 #include <sys/cmn_err.h>
52 
53 #include <netsmb/smb_osdep.h>
54 
55 #include <netsmb/smb.h>
56 #include <netsmb/smb_conn.h>
57 #include <netsmb/smb_rq.h>
58 #include <netsmb/smb_subr.h>
59 #include <netsmb/smb_tran.h>
60 
61 /*
62  * Largest size to use with LARGE_READ/LARGE_WRITE.
63  * Specs say up to 64k data bytes, but Windows traffic
64  * uses 60k... no doubt for some good reason.
65  * (Probably to keep 4k block alignment.)
66  * XXX: Move to smb.h maybe?
67  */
68 #define	SMB_MAX_LARGE_RW_SIZE (60*1024)
69 
70 /*
71  * Default timeout values, all in seconds.
72  * Make these tunable (only via mdb for now).
73  */
74 int smb_timo_notice = 15;
75 int smb_timo_default = 30;	/* was SMB_DEFRQTIMO */
76 int smb_timo_open = 45;
77 int smb_timo_read = 45;
78 int smb_timo_write = 60;	/* was SMBWRTTIMO */
79 int smb_timo_append = 90;
80 
81 static int smb_smb_read(struct smb_share *ssp, uint16_t fid,
82 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
83 static int smb_smb_write(struct smb_share *ssp, uint16_t fid,
84 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
85 
86 static int smb_smb_readx(struct smb_share *ssp, uint16_t fid,
87 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
88 static int smb_smb_writex(struct smb_share *ssp, uint16_t fid,
89 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
90 
91 int
92 smb_smb_treeconnect(struct smb_share *ssp, struct smb_cred *scred)
93 {
94 	struct smb_vc *vcp;
95 	struct smb_rq *rqp = NULL;
96 	struct mbchain *mbp;
97 	struct mdchain *mdp;
98 	char *pbuf, *unc_name = NULL;
99 	int error, tlen, plen, unc_len;
100 	uint16_t bcnt, options;
101 	uint8_t wc;
102 
103 	vcp = SSTOVC(ssp);
104 
105 	/*
106 	 * Make this a "VC-level" request, so it will have
107 	 * rqp->sr_share == NULL, and smb_iod_sendrq()
108 	 * will send it with TID = SMB_TID_UNKNOWN
109 	 *
110 	 * This also serves to bypass the wait for
111 	 * share state changes, which this call is
112 	 * trying to carry out.
113 	 */
114 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_CONNECT_ANDX,
115 	    scred, &rqp);
116 	if (error)
117 		return (error);
118 
119 	/*
120 	 * Build the UNC name, i.e. "//server/share"
121 	 * but with backslashes of course.
122 	 * size math: three slashes, one null.
123 	 */
124 	unc_len = 4 + strlen(vcp->vc_srvname) + strlen(ssp->ss_name);
125 	unc_name = kmem_alloc(unc_len, KM_SLEEP);
126 	(void) snprintf(unc_name, unc_len, "\\\\%s\\%s",
127 	    vcp->vc_srvname, ssp->ss_name);
128 	SMBSDEBUG("unc_name: \"%s\"", unc_name);
129 
130 	/*
131 	 * The password is now pre-computed in the
132 	 * user-space helper process.
133 	 */
134 	plen = ssp->ss_pwlen;
135 	pbuf = ssp->ss_pass;
136 
137 	/*
138 	 * Build the request.
139 	 */
140 	mbp = &rqp->sr_rq;
141 	smb_rq_wstart(rqp);
142 	mb_put_uint8(mbp, 0xff);
143 	mb_put_uint8(mbp, 0);
144 	mb_put_uint16le(mbp, 0);
145 	mb_put_uint16le(mbp, 0);		/* Flags */
146 	mb_put_uint16le(mbp, plen);
147 	smb_rq_wend(rqp);
148 	smb_rq_bstart(rqp);
149 
150 	/* Tree connect password, if any */
151 	error = mb_put_mem(mbp, pbuf, plen, MB_MSYSTEM);
152 	if (error)
153 		goto out;
154 
155 	/* UNC resource name */
156 	error = smb_put_dstring(mbp, vcp, unc_name, SMB_CS_NONE);
157 	if (error)
158 		goto out;
159 
160 	/*
161 	 * Put the type string (always ASCII),
162 	 * including the null.
163 	 */
164 	tlen = strlen(ssp->ss_type_req) + 1;
165 	error = mb_put_mem(mbp, ssp->ss_type_req, tlen, MB_MSYSTEM);
166 	if (error)
167 		goto out;
168 
169 	smb_rq_bend(rqp);
170 
171 	/*
172 	 * Run the request.
173 	 *
174 	 * Using NOINTR_RECV because we don't want to risk
175 	 * missing a successful tree connect response,
176 	 * which would "leak" Tree IDs.
177 	 */
178 	rqp->sr_flags |= SMBR_NOINTR_RECV;
179 	error = smb_rq_simple(rqp);
180 	SMBSDEBUG("%d\n", error);
181 	if (error) {
182 		/*
183 		 * If we get the server name wrong, i.e. due to
184 		 * mis-configured name services, this will be
185 		 * NT_STATUS_DUPLICATE_NAME.  Log this error.
186 		 */
187 		SMBERROR("(%s) failed, status=0x%x",
188 		    unc_name, rqp->sr_error);
189 		goto out;
190 	}
191 
192 	/*
193 	 * Parse the TCON response
194 	 */
195 	smb_rq_getreply(rqp, &mdp);
196 	md_get_uint8(mdp, &wc);
197 	if (wc != 3 && wc != 7) {
198 		error = EBADRPC;
199 		goto out;
200 	}
201 	md_get_uint16le(mdp, NULL);		/* AndX cmd */
202 	md_get_uint16le(mdp, NULL);		/* AndX off */
203 	md_get_uint16le(mdp, &options);		/* option bits (DFS, search) */
204 	if (wc == 7) {
205 		md_get_uint32le(mdp, NULL);	/* MaximalShareAccessRights */
206 		md_get_uint32le(mdp, NULL);	/* GuestMaximalShareAcc... */
207 	}
208 	error = md_get_uint16le(mdp, &bcnt);	/* byte count */
209 	if (error)
210 		goto out;
211 
212 	/*
213 	 * Get the returned share type string,
214 	 * i.e. "IPC" or whatever.   Don't care
215 	 * if we get an error reading the type.
216 	 */
217 	tlen = sizeof (ssp->ss_type_ret);
218 	bzero(ssp->ss_type_ret, tlen--);
219 	if (tlen > bcnt)
220 		tlen = bcnt;
221 	md_get_mem(mdp, ssp->ss_type_ret, tlen, MB_MSYSTEM);
222 
223 	/* Success! */
224 	SMB_SS_LOCK(ssp);
225 	ssp->ss_tid = rqp->sr_rptid;
226 	ssp->ss_vcgenid = vcp->vc_genid;
227 	ssp->ss_options = options;
228 	ssp->ss_flags |= SMBS_CONNECTED;
229 	SMB_SS_UNLOCK(ssp);
230 
231 out:
232 	if (unc_name)
233 		kmem_free(unc_name, unc_len);
234 	smb_rq_done(rqp);
235 	return (error);
236 }
237 
238 int
239 smb_smb_treedisconnect(struct smb_share *ssp, struct smb_cred *scred)
240 {
241 	struct smb_vc *vcp;
242 	struct smb_rq *rqp;
243 	int error;
244 
245 	if (ssp->ss_tid == SMB_TID_UNKNOWN)
246 		return (0);
247 
248 	/*
249 	 * Build this as a "VC-level" request, so it will
250 	 * avoid testing the _GONE flag on the share,
251 	 * which has already been set at this point.
252 	 * Add the share pointer "by hand" below, so
253 	 * smb_iod_sendrq will plug in the TID.
254 	 */
255 	vcp = SSTOVC(ssp);
256 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_DISCONNECT, scred, &rqp);
257 	if (error)
258 		return (error);
259 	rqp->sr_share = ssp; /* by hand */
260 
261 	smb_rq_wstart(rqp);
262 	smb_rq_wend(rqp);
263 	smb_rq_bstart(rqp);
264 	smb_rq_bend(rqp);
265 
266 	/*
267 	 * Run this with a relatively short timeout. (5 sec.)
268 	 * We don't really care about the result here, but we
269 	 * do need to make sure we send this out, or we could
270 	 * "leak" active tree IDs on interrupt or timeout.
271 	 * The NOINTR_SEND flag makes this request immune to
272 	 * interrupt or timeout until the send is done.
273 	 * Also, don't reconnect for this, of course!
274 	 */
275 	rqp->sr_flags |= (SMBR_NOINTR_SEND | SMBR_NORECONNECT);
276 	error = smb_rq_simple_timed(rqp, 5);
277 	SMBSDEBUG("%d\n", error);
278 	smb_rq_done(rqp);
279 	ssp->ss_tid = SMB_TID_UNKNOWN;
280 	return (error);
281 }
282 
283 /*
284  * Common function for read/write with UIO.
285  * Called by netsmb smb_usr_rw,
286  *  smbfs_readvnode, smbfs_writevnode
287  */
288 int
289 smb_rwuio(struct smb_share *ssp, uint16_t fid, uio_rw_t rw,
290 	uio_t *uiop, smb_cred_t *scred, int timo)
291 {
292 	struct smb_vc *vcp = SSTOVC(ssp);
293 	ssize_t  save_resid;
294 	uint32_t len, rlen, maxlen;
295 	int error = 0;
296 	int (*iofun)(struct smb_share *, uint16_t, uint32_t *,
297 	    uio_t *, smb_cred_t *, int);
298 
299 	/*
300 	 * Determine which function to use,
301 	 * and the transfer size per call.
302 	 */
303 	if (SMB_DIALECT(vcp) >= SMB_DIALECT_NTLM0_12) {
304 		/*
305 		 * Using NT LM 0.12, so readx, writex.
306 		 * Make sure we can represent the offset.
307 		 */
308 		if ((vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES) == 0 &&
309 		    (uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
310 			return (EFBIG);
311 
312 		if (rw == UIO_READ) {
313 			iofun = smb_smb_readx;
314 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_READX)
315 				maxlen = SMB_MAX_LARGE_RW_SIZE;
316 			else
317 				maxlen = vcp->vc_rxmax;
318 		} else { /* UIO_WRITE */
319 			iofun = smb_smb_writex;
320 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_WRITEX)
321 				maxlen = SMB_MAX_LARGE_RW_SIZE;
322 			else
323 				maxlen = vcp->vc_wxmax;
324 		}
325 	} else {
326 		/*
327 		 * Using the old SMB_READ and SMB_WRITE so
328 		 * we're limited to 32-bit offsets, etc.
329 		 * XXX: Someday, punt the old dialects.
330 		 */
331 		if ((uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
332 			return (EFBIG);
333 
334 		if (rw == UIO_READ) {
335 			iofun = smb_smb_read;
336 			maxlen = vcp->vc_rxmax;
337 		} else { /* UIO_WRITE */
338 			iofun = smb_smb_write;
339 			maxlen = vcp->vc_wxmax;
340 		}
341 	}
342 
343 	save_resid = uiop->uio_resid;
344 	while (uiop->uio_resid > 0) {
345 		/* Lint: uio_resid may be 64-bits */
346 		rlen = len = (uint32_t)min(maxlen, uiop->uio_resid);
347 		error = (*iofun)(ssp, fid, &rlen, uiop, scred, timo);
348 
349 		/*
350 		 * Note: the iofun called uio_update, so
351 		 * not doing that here as one might expect.
352 		 *
353 		 * Quit the loop either on error, or if we
354 		 * transferred less then requested.
355 		 */
356 		if (error || (rlen < len))
357 			break;
358 
359 		timo = 0; /* only first I/O should wait */
360 	}
361 	if (error && (save_resid != uiop->uio_resid)) {
362 		/*
363 		 * Stopped on an error after having
364 		 * successfully transferred data.
365 		 * Suppress this error.
366 		 */
367 		SMBSDEBUG("error %d suppressed\n", error);
368 		error = 0;
369 	}
370 
371 	return (error);
372 }
373 
374 static int
375 smb_smb_readx(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
376 	uio_t *uiop, smb_cred_t *scred, int timo)
377 {
378 	struct smb_rq *rqp;
379 	struct mbchain *mbp;
380 	struct mdchain *mdp;
381 	int error;
382 	uint32_t offlo, offhi, rlen;
383 	uint16_t lenhi, lenlo, off, doff;
384 	uint8_t wc;
385 
386 	lenhi = (uint16_t)(*lenp >> 16);
387 	lenlo = (uint16_t)*lenp;
388 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
389 	offlo = (uint32_t)uiop->uio_loffset;
390 
391 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ_ANDX, scred, &rqp);
392 	if (error)
393 		return (error);
394 	smb_rq_getrequest(rqp, &mbp);
395 	smb_rq_wstart(rqp);
396 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
397 	mb_put_uint8(mbp, 0);		/* MBZ */
398 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
399 	mb_put_uint16le(mbp, fid);
400 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
401 	mb_put_uint16le(mbp, lenlo);	/* MaxCount */
402 	mb_put_uint16le(mbp, 1);	/* MinCount */
403 					/* (only indicates blocking) */
404 	mb_put_uint32le(mbp, lenhi);	/* MaxCountHigh */
405 	mb_put_uint16le(mbp, lenlo);	/* Remaining ("obsolete") */
406 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
407 	smb_rq_wend(rqp);
408 	smb_rq_bstart(rqp);
409 	smb_rq_bend(rqp);
410 
411 	if (timo == 0)
412 		timo = smb_timo_read;
413 	error = smb_rq_simple_timed(rqp, timo);
414 	if (error)
415 		goto out;
416 
417 	smb_rq_getreply(rqp, &mdp);
418 	error = md_get_uint8(mdp, &wc);
419 	if (error)
420 		goto out;
421 	if (wc != 12) {
422 		error = EBADRPC;
423 		goto out;
424 	}
425 	md_get_uint8(mdp, NULL);
426 	md_get_uint8(mdp, NULL);
427 	md_get_uint16le(mdp, NULL);
428 	md_get_uint16le(mdp, NULL);
429 	md_get_uint16le(mdp, NULL);	/* data compaction mode */
430 	md_get_uint16le(mdp, NULL);
431 	md_get_uint16le(mdp, &lenlo);	/* data len ret. */
432 	md_get_uint16le(mdp, &doff);	/* data offset */
433 	md_get_uint16le(mdp, &lenhi);
434 	rlen = (lenhi << 16) | lenlo;
435 	md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);
436 	error = md_get_uint16le(mdp, NULL);	/* ByteCount */
437 	if (error)
438 		goto out;
439 	/*
440 	 * Does the data offset indicate padding?
441 	 * The current offset is a constant, found
442 	 * by counting the md_get_ calls above.
443 	 */
444 	off = SMB_HDRLEN + 3 + (12 * 2); /* =59 */
445 	if (doff > off)	/* pad byte(s)? */
446 		md_get_mem(mdp, NULL, doff - off, MB_MSYSTEM);
447 	if (rlen == 0) {
448 		*lenp = rlen;
449 		goto out;
450 	}
451 	/* paranoid */
452 	if (rlen > *lenp) {
453 		SMBSDEBUG("bad server! rlen %d, len %d\n",
454 		    rlen, *lenp);
455 		rlen = *lenp;
456 	}
457 	error = md_get_uio(mdp, uiop, rlen);
458 	if (error)
459 		goto out;
460 
461 	/* Success */
462 	*lenp = rlen;
463 
464 out:
465 	smb_rq_done(rqp);
466 	return (error);
467 }
468 
469 static int
470 smb_smb_writex(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
471 	uio_t *uiop, smb_cred_t *scred, int timo)
472 {
473 	struct smb_rq *rqp;
474 	struct mbchain *mbp;
475 	struct mdchain *mdp;
476 	int error;
477 	uint32_t offlo, offhi, rlen;
478 	uint16_t lenhi, lenlo;
479 	uint8_t wc;
480 
481 	lenhi = (uint16_t)(*lenp >> 16);
482 	lenlo = (uint16_t)*lenp;
483 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
484 	offlo = (uint32_t)uiop->uio_loffset;
485 
486 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE_ANDX, scred, &rqp);
487 	if (error)
488 		return (error);
489 	smb_rq_getrequest(rqp, &mbp);
490 	smb_rq_wstart(rqp);
491 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
492 	mb_put_uint8(mbp, 0);		/* MBZ */
493 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
494 	mb_put_uint16le(mbp, fid);
495 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
496 	mb_put_uint32le(mbp, 0);	/* MBZ (timeout) */
497 	mb_put_uint16le(mbp, 0);	/* !write-thru */
498 	mb_put_uint16le(mbp, 0);
499 	mb_put_uint16le(mbp, lenhi);
500 	mb_put_uint16le(mbp, lenlo);
501 	mb_put_uint16le(mbp, 64);	/* data offset from header start */
502 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
503 	smb_rq_wend(rqp);
504 	smb_rq_bstart(rqp);
505 
506 	mb_put_uint8(mbp, 0);	/* pad byte */
507 	error = mb_put_uio(mbp, uiop, *lenp);
508 	if (error)
509 		goto out;
510 	smb_rq_bend(rqp);
511 	if (timo == 0)
512 		timo = smb_timo_write;
513 	error = smb_rq_simple_timed(rqp, timo);
514 	if (error)
515 		goto out;
516 	smb_rq_getreply(rqp, &mdp);
517 	error = md_get_uint8(mdp, &wc);
518 	if (error)
519 		goto out;
520 	if (wc != 6) {
521 		error = EBADRPC;
522 		goto out;
523 	}
524 	md_get_uint8(mdp, NULL);	/* andx cmd */
525 	md_get_uint8(mdp, NULL);	/* reserved */
526 	md_get_uint16le(mdp, NULL);	/* andx offset */
527 	md_get_uint16le(mdp, &lenlo);	/* data len ret. */
528 	md_get_uint16le(mdp, NULL);	/* remaining */
529 	error = md_get_uint16le(mdp, &lenhi);
530 	if (error)
531 		goto out;
532 
533 	/* Success */
534 	rlen = (lenhi << 16) | lenlo;
535 	*lenp = rlen;
536 
537 out:
538 	smb_rq_done(rqp);
539 	return (error);
540 }
541 
542 static int
543 smb_smb_read(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
544 	uio_t *uiop, smb_cred_t *scred, int timo)
545 {
546 	struct smb_rq *rqp;
547 	struct mbchain *mbp;
548 	struct mdchain *mdp;
549 	int error;
550 	uint32_t off32;
551 	uint16_t bc, cnt, dlen, rcnt, todo;
552 	uint8_t wc;
553 
554 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
555 	off32 = (uint32_t)uiop->uio_loffset;
556 	ASSERT(*lenp <= UINT16_MAX);
557 	cnt = (uint16_t)*lenp;
558 	/* This next is an "estimate" of planned reads. */
559 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
560 
561 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ, scred, &rqp);
562 	if (error)
563 		return (error);
564 	smb_rq_getrequest(rqp, &mbp);
565 	smb_rq_wstart(rqp);
566 	mb_put_uint16le(mbp, fid);
567 	mb_put_uint16le(mbp, cnt);
568 	mb_put_uint32le(mbp, off32);
569 	mb_put_uint16le(mbp, todo);
570 	smb_rq_wend(rqp);
571 	smb_rq_bstart(rqp);
572 	smb_rq_bend(rqp);
573 
574 	if (timo == 0)
575 		timo = smb_timo_read;
576 	error = smb_rq_simple_timed(rqp, timo);
577 	if (error)
578 		goto out;
579 	smb_rq_getreply(rqp, &mdp);
580 	error = md_get_uint8(mdp, &wc);
581 	if (error)
582 		goto out;
583 	if (wc != 5) {
584 		error = EBADRPC;
585 		goto out;
586 	}
587 	md_get_uint16le(mdp, &rcnt);		/* ret. count */
588 	md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);  /* res. */
589 	md_get_uint16le(mdp, &bc);		/* byte count */
590 	md_get_uint8(mdp, NULL);		/* buffer format */
591 	error = md_get_uint16le(mdp, &dlen);	/* data len */
592 	if (error)
593 		goto out;
594 	if (dlen < rcnt) {
595 		SMBSDEBUG("oops: dlen=%d rcnt=%d\n",
596 		    (int)dlen, (int)rcnt);
597 		rcnt = dlen;
598 	}
599 	if (rcnt == 0) {
600 		*lenp = 0;
601 		goto out;
602 	}
603 	/* paranoid */
604 	if (rcnt > cnt) {
605 		SMBSDEBUG("bad server! rcnt %d, cnt %d\n",
606 		    (int)rcnt, (int)cnt);
607 		rcnt = cnt;
608 	}
609 	error = md_get_uio(mdp, uiop, (int)rcnt);
610 	if (error)
611 		goto out;
612 
613 	/* success */
614 	*lenp = (int)rcnt;
615 
616 out:
617 	smb_rq_done(rqp);
618 	return (error);
619 }
620 
621 static int
622 smb_smb_write(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
623 	uio_t *uiop, smb_cred_t *scred, int timo)
624 {
625 	struct smb_rq *rqp;
626 	struct mbchain *mbp;
627 	struct mdchain *mdp;
628 	int error;
629 	uint32_t off32;
630 	uint16_t cnt, rcnt, todo;
631 	uint8_t wc;
632 
633 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
634 	off32 = (uint32_t)uiop->uio_loffset;
635 	ASSERT(*lenp <= UINT16_MAX);
636 	cnt = (uint16_t)*lenp;
637 	/* This next is an "estimate" of planned writes. */
638 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
639 
640 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE, scred, &rqp);
641 	if (error)
642 		return (error);
643 	smb_rq_getrequest(rqp, &mbp);
644 	smb_rq_wstart(rqp);
645 	mb_put_uint16le(mbp, fid);
646 	mb_put_uint16le(mbp, cnt);
647 	mb_put_uint32le(mbp, off32);
648 	mb_put_uint16le(mbp, todo);
649 	smb_rq_wend(rqp);
650 	smb_rq_bstart(rqp);
651 	mb_put_uint8(mbp, SMB_DT_DATA);
652 	mb_put_uint16le(mbp, cnt);
653 
654 	error = mb_put_uio(mbp, uiop, *lenp);
655 	if (error)
656 		goto out;
657 	smb_rq_bend(rqp);
658 	if (timo == 0)
659 		timo = smb_timo_write;
660 	error = smb_rq_simple_timed(rqp, timo);
661 	if (error)
662 		goto out;
663 	smb_rq_getreply(rqp, &mdp);
664 	error = md_get_uint8(mdp, &wc);
665 	if (error)
666 		goto out;
667 	if (wc != 1) {
668 		error = EBADRPC;
669 		goto out;
670 	}
671 	error = md_get_uint16le(mdp, &rcnt);
672 	if (error)
673 		goto out;
674 	*lenp = rcnt;
675 
676 out:
677 	smb_rq_done(rqp);
678 	return (error);
679 }
680 
681 
682 static u_int32_t	smbechoes = 0;
683 
684 int
685 smb_smb_echo(struct smb_vc *vcp, struct smb_cred *scred, int timo)
686 {
687 	struct smb_rq *rqp;
688 	struct mbchain *mbp;
689 	int error;
690 
691 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_ECHO, scred, &rqp);
692 	if (error)
693 		return (error);
694 	mbp = &rqp->sr_rq;
695 	smb_rq_wstart(rqp);
696 	mb_put_uint16le(mbp, 1); /* echo count */
697 	smb_rq_wend(rqp);
698 	smb_rq_bstart(rqp);
699 	mb_put_uint32le(mbp, atomic_inc_32_nv(&smbechoes));
700 	smb_rq_bend(rqp);
701 	/*
702 	 * Note: the IOD calls this, so
703 	 * this request must not wait for
704 	 * connection state changes, etc.
705 	 */
706 	rqp->sr_flags |= SMBR_NORECONNECT;
707 	error = smb_rq_simple_timed(rqp, timo);
708 	SMBSDEBUG("%d\n", error);
709 	smb_rq_done(rqp);
710 	return (error);
711 }
712