xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_smb.c (revision b1d7ec75953cd517f5b7c3d9cb427ff8ec5d7d07)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_smb.c,v 1.35.100.2 2005/06/02 00:55:39 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 /*
41  * various SMB requests. Most of the routines merely packs data into mbufs.
42  */
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kmem.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/socket.h>
49 #include <sys/uio.h>
50 #include <sys/random.h>
51 #include <sys/note.h>
52 #include <sys/cmn_err.h>
53 
54 #include <netsmb/smb_osdep.h>
55 
56 #include <netsmb/smb.h>
57 #include <netsmb/smb_conn.h>
58 #include <netsmb/smb_rq.h>
59 #include <netsmb/smb_subr.h>
60 #include <netsmb/smb_tran.h>
61 
62 /*
63  * Largest size to use with LARGE_READ/LARGE_WRITE.
64  * Specs say up to 64k data bytes, but Windows traffic
65  * uses 60k... no doubt for some good reason.
66  * (Probably to keep 4k block alignment.)
67  * XXX: Move to smb.h maybe?
68  */
69 #define	SMB_MAX_LARGE_RW_SIZE (60*1024)
70 
71 /*
72  * Default timeout values, all in seconds.
73  * Make these tunable (only via mdb for now).
74  */
75 int smb_timo_notice = 15;
76 int smb_timo_default = 30;	/* was SMB_DEFRQTIMO */
77 int smb_timo_open = 45;
78 int smb_timo_read = 45;
79 int smb_timo_write = 60;	/* was SMBWRTTIMO */
80 int smb_timo_append = 90;
81 
82 static int smb_smb_read(struct smb_share *ssp, uint16_t fid,
83 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
84 static int smb_smb_write(struct smb_share *ssp, uint16_t fid,
85 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
86 
87 static int smb_smb_readx(struct smb_share *ssp, uint16_t fid,
88 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
89 static int smb_smb_writex(struct smb_share *ssp, uint16_t fid,
90 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
91 
92 int
93 smb_smb_treeconnect(struct smb_share *ssp, struct smb_cred *scred)
94 {
95 	struct smb_vc *vcp;
96 	struct smb_rq *rqp = NULL;
97 	struct mbchain *mbp;
98 	struct mdchain *mdp;
99 	char *pbuf, *unc_name = NULL;
100 	int error, tlen, plen, unc_len;
101 	uint16_t bcnt, options;
102 	uint8_t wc;
103 
104 	vcp = SSTOVC(ssp);
105 
106 	/*
107 	 * Make this a "VC-level" request, so it will have
108 	 * rqp->sr_share == NULL, and smb_iod_sendrq()
109 	 * will send it with TID = SMB_TID_UNKNOWN
110 	 *
111 	 * This also serves to bypass the wait for
112 	 * share state changes, which this call is
113 	 * trying to carry out.
114 	 */
115 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_CONNECT_ANDX,
116 	    scred, &rqp);
117 	if (error)
118 		return (error);
119 
120 	/*
121 	 * Build the UNC name, i.e. "//server/share"
122 	 * but with backslashes of course.
123 	 * size math: three slashes, one null.
124 	 */
125 	unc_len = 4 + strlen(vcp->vc_srvname) + strlen(ssp->ss_name);
126 	unc_name = kmem_alloc(unc_len, KM_SLEEP);
127 	(void) snprintf(unc_name, unc_len, "\\\\%s\\%s",
128 	    vcp->vc_srvname, ssp->ss_name);
129 
130 	/*
131 	 * The password is now pre-computed in the
132 	 * user-space helper process.
133 	 */
134 	plen = ssp->ss_pwlen;
135 	pbuf = ssp->ss_pass;
136 
137 	/*
138 	 * Build the request.
139 	 */
140 	mbp = &rqp->sr_rq;
141 	smb_rq_wstart(rqp);
142 	mb_put_uint8(mbp, 0xff);
143 	mb_put_uint8(mbp, 0);
144 	mb_put_uint16le(mbp, 0);
145 	mb_put_uint16le(mbp, 0);		/* Flags */
146 	mb_put_uint16le(mbp, plen);
147 	smb_rq_wend(rqp);
148 	smb_rq_bstart(rqp);
149 
150 	/* Tree connect password, if any */
151 	error = mb_put_mem(mbp, pbuf, plen, MB_MSYSTEM);
152 	if (error)
153 		goto out;
154 
155 	/* UNC resource name */
156 	error = smb_put_dstring(mbp, vcp, unc_name, SMB_CS_NONE);
157 	if (error)
158 		goto out;
159 
160 	/*
161 	 * Put the type string (always ASCII),
162 	 * including the null.
163 	 */
164 	tlen = strlen(ssp->ss_type_req) + 1;
165 	error = mb_put_mem(mbp, ssp->ss_type_req, tlen, MB_MSYSTEM);
166 	if (error)
167 		goto out;
168 
169 	smb_rq_bend(rqp);
170 
171 	/*
172 	 * Run the request.
173 	 *
174 	 * Using NOINTR_RECV because we don't want to risk
175 	 * missing a successful tree connect response,
176 	 * which would "leak" Tree IDs.
177 	 */
178 	rqp->sr_flags |= SMBR_NOINTR_RECV;
179 	error = smb_rq_simple(rqp);
180 	SMBSDEBUG("%d\n", error);
181 	if (error)
182 		goto out;
183 
184 	/*
185 	 * Parse the TCON response
186 	 */
187 	smb_rq_getreply(rqp, &mdp);
188 	md_get_uint8(mdp, &wc);
189 	if (wc != 3) {
190 		error = EBADRPC;
191 		goto out;
192 	}
193 	md_get_uint16le(mdp, NULL);		/* AndX cmd */
194 	md_get_uint16le(mdp, NULL);		/* AndX off */
195 	md_get_uint16le(mdp, &options);		/* option bits (DFS, search) */
196 	error = md_get_uint16le(mdp, &bcnt);	/* byte count */
197 	if (error)
198 		goto out;
199 
200 	/*
201 	 * Get the returned share type string,
202 	 * i.e. "IPC" or whatever.   Don't care
203 	 * if we get an error reading the type.
204 	 */
205 	tlen = sizeof (ssp->ss_type_ret);
206 	bzero(ssp->ss_type_ret, tlen--);
207 	if (tlen > bcnt)
208 		tlen = bcnt;
209 	md_get_mem(mdp, ssp->ss_type_ret, tlen, MB_MSYSTEM);
210 
211 	/* Success! */
212 	SMB_SS_LOCK(ssp);
213 	ssp->ss_tid = rqp->sr_rptid;
214 	ssp->ss_vcgenid = vcp->vc_genid;
215 	ssp->ss_options = options;
216 	ssp->ss_flags |= SMBS_CONNECTED;
217 	SMB_SS_UNLOCK(ssp);
218 
219 out:
220 	if (unc_name)
221 		kmem_free(unc_name, unc_len);
222 	smb_rq_done(rqp);
223 	return (error);
224 }
225 
226 int
227 smb_smb_treedisconnect(struct smb_share *ssp, struct smb_cred *scred)
228 {
229 	struct smb_vc *vcp;
230 	struct smb_rq *rqp;
231 	int error;
232 
233 	if (ssp->ss_tid == SMB_TID_UNKNOWN)
234 		return (0);
235 
236 	/*
237 	 * Build this as a "VC-level" request, so it will
238 	 * avoid testing the _GONE flag on the share,
239 	 * which has already been set at this point.
240 	 * Add the share pointer "by hand" below, so
241 	 * smb_iod_sendrq will plug in the TID.
242 	 */
243 	vcp = SSTOVC(ssp);
244 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_DISCONNECT, scred, &rqp);
245 	if (error)
246 		return (error);
247 	rqp->sr_share = ssp; /* by hand */
248 
249 	smb_rq_wstart(rqp);
250 	smb_rq_wend(rqp);
251 	smb_rq_bstart(rqp);
252 	smb_rq_bend(rqp);
253 
254 	/*
255 	 * Run this with a relatively short timeout. (5 sec.)
256 	 * We don't really care about the result here, but we
257 	 * do need to make sure we send this out, or we could
258 	 * "leak" active tree IDs on interrupt or timeout.
259 	 * The NOINTR_SEND flag makes this request immune to
260 	 * interrupt or timeout until the send is done.
261 	 * Also, don't reconnect for this, of course!
262 	 */
263 	rqp->sr_flags |= (SMBR_NOINTR_SEND | SMBR_NORECONNECT);
264 	error = smb_rq_simple_timed(rqp, 5);
265 	SMBSDEBUG("%d\n", error);
266 	smb_rq_done(rqp);
267 	ssp->ss_tid = SMB_TID_UNKNOWN;
268 	return (error);
269 }
270 
271 /*
272  * Common function for read/write with UIO.
273  * Called by netsmb smb_usr_rw,
274  *  smbfs_readvnode, smbfs_writevnode
275  */
276 int
277 smb_rwuio(struct smb_share *ssp, uint16_t fid, uio_rw_t rw,
278 	uio_t *uiop, smb_cred_t *scred, int timo)
279 {
280 	struct smb_vc *vcp = SSTOVC(ssp);
281 	ssize_t  save_resid;
282 	uint32_t len, rlen, maxlen;
283 	int error = 0;
284 	int (*iofun)(struct smb_share *, uint16_t, uint32_t *,
285 	    uio_t *, smb_cred_t *, int);
286 
287 	/*
288 	 * Determine which function to use,
289 	 * and the transfer size per call.
290 	 */
291 	if (SMB_DIALECT(vcp) >= SMB_DIALECT_NTLM0_12) {
292 		/*
293 		 * Using NT LM 0.12, so readx, writex.
294 		 * Make sure we can represent the offset.
295 		 */
296 		if ((vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES) == 0 &&
297 		    (uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
298 			return (EFBIG);
299 
300 		if (rw == UIO_READ) {
301 			iofun = smb_smb_readx;
302 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_READX)
303 				maxlen = SMB_MAX_LARGE_RW_SIZE;
304 			else
305 				maxlen = vcp->vc_rxmax;
306 		} else { /* UIO_WRITE */
307 			iofun = smb_smb_writex;
308 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_WRITEX)
309 				maxlen = SMB_MAX_LARGE_RW_SIZE;
310 			else
311 				maxlen = vcp->vc_wxmax;
312 		}
313 	} else {
314 		/*
315 		 * Using the old SMB_READ and SMB_WRITE so
316 		 * we're limited to 32-bit offsets, etc.
317 		 * XXX: Someday, punt the old dialects.
318 		 */
319 		if ((uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
320 			return (EFBIG);
321 
322 		if (rw == UIO_READ) {
323 			iofun = smb_smb_read;
324 			maxlen = vcp->vc_rxmax;
325 		} else { /* UIO_WRITE */
326 			iofun = smb_smb_write;
327 			maxlen = vcp->vc_wxmax;
328 		}
329 	}
330 
331 	save_resid = uiop->uio_resid;
332 	while (uiop->uio_resid > 0) {
333 		/* Lint: uio_resid may be 64-bits */
334 		rlen = len = (uint32_t)min(maxlen, uiop->uio_resid);
335 		error = (*iofun)(ssp, fid, &rlen, uiop, scred, timo);
336 
337 		/*
338 		 * Note: the iofun called uio_update, so
339 		 * not doing that here as one might expect.
340 		 *
341 		 * Quit the loop either on error, or if we
342 		 * transferred less then requested.
343 		 */
344 		if (error || (rlen < len))
345 			break;
346 
347 		timo = 0; /* only first I/O should wait */
348 	}
349 	if (error && (save_resid != uiop->uio_resid)) {
350 		/*
351 		 * Stopped on an error after having
352 		 * successfully transferred data.
353 		 * Suppress this error.
354 		 */
355 		SMBSDEBUG("error %d suppressed\n", error);
356 		error = 0;
357 	}
358 
359 	return (error);
360 }
361 
362 static int
363 smb_smb_readx(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
364 	uio_t *uiop, smb_cred_t *scred, int timo)
365 {
366 	struct smb_rq *rqp;
367 	struct mbchain *mbp;
368 	struct mdchain *mdp;
369 	int error;
370 	uint32_t offlo, offhi, rlen;
371 	uint16_t lenhi, lenlo, off, doff;
372 	uint8_t wc;
373 
374 	lenhi = (uint16_t)(*lenp >> 16);
375 	lenlo = (uint16_t)*lenp;
376 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
377 	offlo = (uint32_t)uiop->uio_loffset;
378 
379 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ_ANDX, scred, &rqp);
380 	if (error)
381 		return (error);
382 	smb_rq_getrequest(rqp, &mbp);
383 	smb_rq_wstart(rqp);
384 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
385 	mb_put_uint8(mbp, 0);		/* MBZ */
386 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
387 	mb_put_uint16le(mbp, fid);
388 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
389 	mb_put_uint16le(mbp, lenlo);	/* MaxCount */
390 	mb_put_uint16le(mbp, 1);	/* MinCount */
391 					/* (only indicates blocking) */
392 	mb_put_uint32le(mbp, lenhi);	/* MaxCountHigh */
393 	mb_put_uint16le(mbp, lenlo);	/* Remaining ("obsolete") */
394 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
395 	smb_rq_wend(rqp);
396 	smb_rq_bstart(rqp);
397 	smb_rq_bend(rqp);
398 
399 	if (timo == 0)
400 		timo = smb_timo_read;
401 	error = smb_rq_simple_timed(rqp, timo);
402 	if (error)
403 		goto out;
404 
405 	smb_rq_getreply(rqp, &mdp);
406 	error = md_get_uint8(mdp, &wc);
407 	if (error)
408 		goto out;
409 	if (wc != 12) {
410 		error = EBADRPC;
411 		goto out;
412 	}
413 	md_get_uint8(mdp, NULL);
414 	md_get_uint8(mdp, NULL);
415 	md_get_uint16le(mdp, NULL);
416 	md_get_uint16le(mdp, NULL);
417 	md_get_uint16le(mdp, NULL);	/* data compaction mode */
418 	md_get_uint16le(mdp, NULL);
419 	md_get_uint16le(mdp, &lenlo);	/* data len ret. */
420 	md_get_uint16le(mdp, &doff);	/* data offset */
421 	md_get_uint16le(mdp, &lenhi);
422 	rlen = (lenhi << 16) | lenlo;
423 	md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);
424 	error = md_get_uint16le(mdp, NULL);	/* ByteCount */
425 	if (error)
426 		goto out;
427 	/*
428 	 * Does the data offset indicate padding?
429 	 * The current offset is a constant, found
430 	 * by counting the md_get_ calls above.
431 	 */
432 	off = SMB_HDRLEN + 3 + (12 * 2); /* =59 */
433 	if (doff > off)	/* pad byte(s)? */
434 		md_get_mem(mdp, NULL, doff - off, MB_MSYSTEM);
435 	if (rlen == 0) {
436 		*lenp = rlen;
437 		goto out;
438 	}
439 	/* paranoid */
440 	if (rlen > *lenp) {
441 		SMBSDEBUG("bad server! rlen %d, len %d\n",
442 		    rlen, *lenp);
443 		rlen = *lenp;
444 	}
445 	error = md_get_uio(mdp, uiop, rlen);
446 	if (error)
447 		goto out;
448 
449 	/* Success */
450 	*lenp = rlen;
451 
452 out:
453 	smb_rq_done(rqp);
454 	return (error);
455 }
456 
457 static int
458 smb_smb_writex(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
459 	uio_t *uiop, smb_cred_t *scred, int timo)
460 {
461 	struct smb_rq *rqp;
462 	struct mbchain *mbp;
463 	struct mdchain *mdp;
464 	int error;
465 	uint32_t offlo, offhi, rlen;
466 	uint16_t lenhi, lenlo;
467 	uint8_t wc;
468 
469 	lenhi = (uint16_t)(*lenp >> 16);
470 	lenlo = (uint16_t)*lenp;
471 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
472 	offlo = (uint32_t)uiop->uio_loffset;
473 
474 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE_ANDX, scred, &rqp);
475 	if (error)
476 		return (error);
477 	smb_rq_getrequest(rqp, &mbp);
478 	smb_rq_wstart(rqp);
479 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
480 	mb_put_uint8(mbp, 0);		/* MBZ */
481 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
482 	mb_put_uint16le(mbp, fid);
483 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
484 	mb_put_uint32le(mbp, 0);	/* MBZ (timeout) */
485 	mb_put_uint16le(mbp, 0);	/* !write-thru */
486 	mb_put_uint16le(mbp, 0);
487 	mb_put_uint16le(mbp, lenhi);
488 	mb_put_uint16le(mbp, lenlo);
489 	mb_put_uint16le(mbp, 64);	/* data offset from header start */
490 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
491 	smb_rq_wend(rqp);
492 	smb_rq_bstart(rqp);
493 
494 	mb_put_uint8(mbp, 0);	/* pad byte */
495 	error = mb_put_uio(mbp, uiop, *lenp);
496 	if (error)
497 		goto out;
498 	smb_rq_bend(rqp);
499 	if (timo == 0)
500 		timo = smb_timo_write;
501 	error = smb_rq_simple_timed(rqp, timo);
502 	if (error)
503 		goto out;
504 	smb_rq_getreply(rqp, &mdp);
505 	error = md_get_uint8(mdp, &wc);
506 	if (error)
507 		goto out;
508 	if (wc != 6) {
509 		error = EBADRPC;
510 		goto out;
511 	}
512 	md_get_uint8(mdp, NULL);	/* andx cmd */
513 	md_get_uint8(mdp, NULL);	/* reserved */
514 	md_get_uint16le(mdp, NULL);	/* andx offset */
515 	md_get_uint16le(mdp, &lenlo);	/* data len ret. */
516 	md_get_uint16le(mdp, NULL);	/* remaining */
517 	error = md_get_uint16le(mdp, &lenhi);
518 	if (error)
519 		goto out;
520 
521 	/* Success */
522 	rlen = (lenhi << 16) | lenlo;
523 	*lenp = rlen;
524 
525 out:
526 	smb_rq_done(rqp);
527 	return (error);
528 }
529 
530 static int
531 smb_smb_read(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
532 	uio_t *uiop, smb_cred_t *scred, int timo)
533 {
534 	struct smb_rq *rqp;
535 	struct mbchain *mbp;
536 	struct mdchain *mdp;
537 	int error;
538 	uint32_t off32;
539 	uint16_t bc, cnt, dlen, rcnt, todo;
540 	uint8_t wc;
541 
542 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
543 	off32 = (uint32_t)uiop->uio_loffset;
544 	ASSERT(*lenp <= UINT16_MAX);
545 	cnt = (uint16_t)*lenp;
546 	/* This next is an "estimate" of planned reads. */
547 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
548 
549 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ, scred, &rqp);
550 	if (error)
551 		return (error);
552 	smb_rq_getrequest(rqp, &mbp);
553 	smb_rq_wstart(rqp);
554 	mb_put_uint16le(mbp, fid);
555 	mb_put_uint16le(mbp, cnt);
556 	mb_put_uint32le(mbp, off32);
557 	mb_put_uint16le(mbp, todo);
558 	smb_rq_wend(rqp);
559 	smb_rq_bstart(rqp);
560 	smb_rq_bend(rqp);
561 
562 	if (timo == 0)
563 		timo = smb_timo_read;
564 	error = smb_rq_simple_timed(rqp, timo);
565 	if (error)
566 		goto out;
567 	smb_rq_getreply(rqp, &mdp);
568 	error = md_get_uint8(mdp, &wc);
569 	if (error)
570 		goto out;
571 	if (wc != 5) {
572 		error = EBADRPC;
573 		goto out;
574 	}
575 	md_get_uint16le(mdp, &rcnt);		/* ret. count */
576 	md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);  /* res. */
577 	md_get_uint16le(mdp, &bc);		/* byte count */
578 	md_get_uint8(mdp, NULL);		/* buffer format */
579 	error = md_get_uint16le(mdp, &dlen);	/* data len */
580 	if (error)
581 		goto out;
582 	if (dlen < rcnt) {
583 		SMBSDEBUG("oops: dlen=%d rcnt=%d\n",
584 		    (int)dlen, (int)rcnt);
585 		rcnt = dlen;
586 	}
587 	if (rcnt == 0) {
588 		*lenp = 0;
589 		goto out;
590 	}
591 	/* paranoid */
592 	if (rcnt > cnt) {
593 		SMBSDEBUG("bad server! rcnt %d, cnt %d\n",
594 		    (int)rcnt, (int)cnt);
595 		rcnt = cnt;
596 	}
597 	error = md_get_uio(mdp, uiop, (int)rcnt);
598 	if (error)
599 		goto out;
600 
601 	/* success */
602 	*lenp = (int)rcnt;
603 
604 out:
605 	smb_rq_done(rqp);
606 	return (error);
607 }
608 
609 static int
610 smb_smb_write(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
611 	uio_t *uiop, smb_cred_t *scred, int timo)
612 {
613 	struct smb_rq *rqp;
614 	struct mbchain *mbp;
615 	struct mdchain *mdp;
616 	int error;
617 	uint32_t off32;
618 	uint16_t cnt, rcnt, todo;
619 	uint8_t wc;
620 
621 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
622 	off32 = (uint32_t)uiop->uio_loffset;
623 	ASSERT(*lenp <= UINT16_MAX);
624 	cnt = (uint16_t)*lenp;
625 	/* This next is an "estimate" of planned writes. */
626 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
627 
628 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE, scred, &rqp);
629 	if (error)
630 		return (error);
631 	smb_rq_getrequest(rqp, &mbp);
632 	smb_rq_wstart(rqp);
633 	mb_put_uint16le(mbp, fid);
634 	mb_put_uint16le(mbp, cnt);
635 	mb_put_uint32le(mbp, off32);
636 	mb_put_uint16le(mbp, todo);
637 	smb_rq_wend(rqp);
638 	smb_rq_bstart(rqp);
639 	mb_put_uint8(mbp, SMB_DT_DATA);
640 	mb_put_uint16le(mbp, cnt);
641 
642 	error = mb_put_uio(mbp, uiop, *lenp);
643 	if (error)
644 		goto out;
645 	smb_rq_bend(rqp);
646 	if (timo == 0)
647 		timo = smb_timo_write;
648 	error = smb_rq_simple_timed(rqp, timo);
649 	if (error)
650 		goto out;
651 	smb_rq_getreply(rqp, &mdp);
652 	error = md_get_uint8(mdp, &wc);
653 	if (error)
654 		goto out;
655 	if (wc != 1) {
656 		error = EBADRPC;
657 		goto out;
658 	}
659 	error = md_get_uint16le(mdp, &rcnt);
660 	if (error)
661 		goto out;
662 	*lenp = rcnt;
663 
664 out:
665 	smb_rq_done(rqp);
666 	return (error);
667 }
668 
669 
670 static u_int32_t	smbechoes = 0;
671 
672 int
673 smb_smb_echo(struct smb_vc *vcp, struct smb_cred *scred, int timo)
674 {
675 	struct smb_rq *rqp;
676 	struct mbchain *mbp;
677 	int error;
678 
679 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_ECHO, scred, &rqp);
680 	if (error)
681 		return (error);
682 	mbp = &rqp->sr_rq;
683 	smb_rq_wstart(rqp);
684 	mb_put_uint16le(mbp, 1); /* echo count */
685 	smb_rq_wend(rqp);
686 	smb_rq_bstart(rqp);
687 	mb_put_uint32le(mbp, atomic_inc_32_nv(&smbechoes));
688 	smb_rq_bend(rqp);
689 	/*
690 	 * Note: the IOD calls this, so
691 	 * this request must not wait for
692 	 * connection state changes, etc.
693 	 */
694 	rqp->sr_flags |= SMBR_NORECONNECT;
695 	error = smb_rq_simple_timed(rqp, timo);
696 	SMBSDEBUG("%d\n", error);
697 	smb_rq_done(rqp);
698 	return (error);
699 }
700