xref: /illumos-gate/usr/src/uts/common/fs/smbclnt/netsmb/smb_smb.c (revision 07a48826732249fcd3aa8dd53c8389595e9f1fbc)
1 /*
2  * Copyright (c) 2000-2001 Boris Popov
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *    This product includes software developed by Boris Popov.
16  * 4. Neither the name of the author nor the names of any co-contributors
17  *    may be used to endorse or promote products derived from this software
18  *    without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30  * SUCH DAMAGE.
31  *
32  * $Id: smb_smb.c,v 1.35.100.2 2005/06/02 00:55:39 lindak Exp $
33  */
34 
35 /*
36  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
37  * Use is subject to license terms.
38  */
39 
40 /*
41  * various SMB requests. Most of the routines merely packs data into mbufs.
42  */
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/kmem.h>
46 #include <sys/proc.h>
47 #include <sys/lock.h>
48 #include <sys/socket.h>
49 #include <sys/uio.h>
50 #include <sys/random.h>
51 #include <sys/note.h>
52 #include <sys/cmn_err.h>
53 
54 #ifdef APPLE
55 #include <sys/smb_apple.h>
56 #include <sys/utfconv.h>
57 #else
58 #include <netsmb/smb_osdep.h>
59 #endif
60 
61 #include <netsmb/smb.h>
62 #include <netsmb/smb_conn.h>
63 #include <netsmb/smb_rq.h>
64 #include <netsmb/smb_subr.h>
65 #include <netsmb/smb_tran.h>
66 
67 /*
68  * Largest size to use with LARGE_READ/LARGE_WRITE.
69  * Specs say up to 64k data bytes, but Windows traffic
70  * uses 60k... no doubt for some good reason.
71  * (Probably to keep 4k block alignment.)
72  * XXX: Move to smb.h maybe?
73  */
74 #define	SMB_MAX_LARGE_RW_SIZE (60*1024)
75 
76 /*
77  * Default timeout values, all in seconds.
78  * Make these tunable (only via mdb for now).
79  */
80 int smb_timo_notice = 15;
81 int smb_timo_default = 30;	/* was SMB_DEFRQTIMO */
82 int smb_timo_open = 45;
83 int smb_timo_read = 45;
84 int smb_timo_write = 60;	/* was SMBWRTTIMO */
85 int smb_timo_append = 90;
86 
87 /*
88  * Debug/test feature to disable NTMLv2.
89  * Set this to zero to skip NTLMv2
90  */
91 int nsmb_enable_ntlmv2 = 1;
92 
93 static int smb_smb_read(struct smb_share *ssp, u_int16_t fid,
94 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
95 static int smb_smb_write(struct smb_share *ssp, u_int16_t fid,
96 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
97 
98 static int smb_smb_readx(struct smb_share *ssp, u_int16_t fid,
99 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
100 static int smb_smb_writex(struct smb_share *ssp, u_int16_t fid,
101 	uint32_t *lenp, uio_t *uiop, smb_cred_t *scred, int timo);
102 
103 struct smb_dialect {
104 	int		d_id;
105 	const char	*d_name;
106 };
107 
108 
109 /*
110  * Number of seconds between 1970 and 1601 year
111  */
112 const u_int64_t DIFF1970TO1601 = 11644473600ULL;
113 
114 void
115 smb_time_local2server(struct timespec *tsp, int tzoff, long *seconds)
116 {
117 	/*
118 	 * XXX - what if we connected to the server when it was in
119 	 * daylight savings/summer time and we've subsequently switched
120 	 * to standard time, or vice versa, so that the time zone
121 	 * offset we got from the server is now wrong?
122 	 */
123 	*seconds = tsp->tv_sec - tzoff * 60;
124 	/* - tz.tz_minuteswest * 60 - (wall_cmos_clock ? adjkerntz : 0) */
125 }
126 
127 void
128 smb_time_server2local(ulong_t seconds, int tzoff, struct timespec *tsp)
129 {
130 	/*
131 	 * XXX - what if we connected to the server when it was in
132 	 * daylight savings/summer time and we've subsequently switched
133 	 * to standard time, or vice versa, so that the time zone
134 	 * offset we got from the server is now wrong?
135 	 */
136 	tsp->tv_sec = seconds + tzoff * 60;
137 	    /* + tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0); */
138 	tsp->tv_nsec = 0;
139 }
140 
141 /*
142  * Time from server comes as UTC, so no need to use tz
143  */
144 /*ARGSUSED*/
145 void
146 smb_time_NT2local(u_int64_t nsec, int tzoff, struct timespec *tsp)
147 {
148 	smb_time_server2local(nsec / 10000000 - DIFF1970TO1601, 0, tsp);
149 }
150 
151 /*ARGSUSED*/
152 void
153 smb_time_local2NT(struct timespec *tsp, int tzoff, u_int64_t *nsec)
154 {
155 	long seconds;
156 
157 	smb_time_local2server(tsp, 0, &seconds);
158 	*nsec = (((u_int64_t)(seconds) & ~1) + DIFF1970TO1601) *
159 	    (u_int64_t)10000000;
160 }
161 
162 #if defined(NOICONVSUPPORT) || defined(lint)
163 extern int iconv_open(const char *to, const char *from, void **handle);
164 extern int iconv_close(void *handle);
165 #endif
166 
167 /*
168  * Moved to user space helper:
169  *   smb_smb_negotiate()
170  *   smb_smb_ssnsetup()
171  *   smb_smb_ssnclose()
172  *   smb_share_typename()
173  */
174 
175 
176 int
177 smb_smb_treeconnect(struct smb_share *ssp, struct smb_cred *scred)
178 {
179 	struct smb_vc *vcp;
180 	struct smb_rq *rqp = NULL;
181 	struct mbchain *mbp;
182 	struct mdchain *mdp;
183 	char *pbuf, *unc_name = NULL;
184 	int error, tlen, plen, unc_len;
185 	uint16_t bcnt, options;
186 	uint8_t wc;
187 
188 	vcp = SSTOVC(ssp);
189 
190 	/*
191 	 * Make this a "VC-level" request, so it will have
192 	 * rqp->sr_share == NULL, and smb_iod_sendrq()
193 	 * will send it with TID = SMB_TID_UNKNOWN
194 	 *
195 	 * This also serves to bypass the wait for
196 	 * share state changes, which this call is
197 	 * trying to carry out.
198 	 */
199 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_CONNECT_ANDX,
200 	    scred, &rqp);
201 	if (error)
202 		return (error);
203 
204 	/*
205 	 * Build the UNC name, i.e. "//server/share"
206 	 * but with backslashes of course.
207 	 * size math: three slashes, one null.
208 	 */
209 	unc_len = 4 + strlen(vcp->vc_srvname) + strlen(ssp->ss_name);
210 	unc_name = kmem_alloc(unc_len, KM_SLEEP);
211 	snprintf(unc_name, unc_len, "\\\\%s\\%s",
212 	    vcp->vc_srvname, ssp->ss_name);
213 
214 	/*
215 	 * The password is now pre-computed in the
216 	 * user-space helper process.
217 	 */
218 	plen = ssp->ss_pwlen;
219 	pbuf = ssp->ss_pass;
220 
221 	/*
222 	 * Build the request.
223 	 */
224 	mbp = &rqp->sr_rq;
225 	smb_rq_wstart(rqp);
226 	mb_put_uint8(mbp, 0xff);
227 	mb_put_uint8(mbp, 0);
228 	mb_put_uint16le(mbp, 0);
229 	mb_put_uint16le(mbp, 0);		/* Flags */
230 	mb_put_uint16le(mbp, plen);
231 	smb_rq_wend(rqp);
232 	smb_rq_bstart(rqp);
233 
234 	/* Tree connect password, if any */
235 	error = mb_put_mem(mbp, pbuf, plen, MB_MSYSTEM);
236 	if (error)
237 		goto out;
238 
239 	/* UNC resource name */
240 	error = smb_put_dstring(mbp, vcp, unc_name, SMB_CS_NONE);
241 	if (error)
242 		goto out;
243 
244 	/*
245 	 * Put the type string (always ASCII),
246 	 * including the null.
247 	 */
248 	tlen = strlen(ssp->ss_type_req) + 1;
249 	error = mb_put_mem(mbp, ssp->ss_type_req, tlen, MB_MSYSTEM);
250 	if (error)
251 		goto out;
252 
253 	smb_rq_bend(rqp);
254 
255 	/*
256 	 * Run the request.
257 	 *
258 	 * Using NOINTR_RECV because we don't want to risk
259 	 * missing a successful tree connect response,
260 	 * which would "leak" Tree IDs.
261 	 */
262 	rqp->sr_flags |= SMBR_NOINTR_RECV;
263 	error = smb_rq_simple(rqp);
264 	SMBSDEBUG("%d\n", error);
265 	if (error)
266 		goto out;
267 
268 	/*
269 	 * Parse the TCON response
270 	 */
271 	smb_rq_getreply(rqp, &mdp);
272 	md_get_uint8(mdp, &wc);
273 	if (wc != 3) {
274 		error = EBADRPC;
275 		goto out;
276 	}
277 	md_get_uint16le(mdp, NULL);	/* AndX cmd */
278 	md_get_uint16le(mdp, NULL);	/* AndX off */
279 	md_get_uint16le(mdp, &options);	/* option bits (DFS, search) */
280 	md_get_uint16le(mdp, &bcnt);	/* byte count */
281 
282 	/*
283 	 * Get the returned share type string,
284 	 * i.e. "IPC" or whatever.
285 	 */
286 	tlen = sizeof (ssp->ss_type_ret);
287 	bzero(ssp->ss_type_ret, tlen--);
288 	if (tlen > bcnt)
289 		tlen = bcnt;
290 	md_get_mem(mdp, ssp->ss_type_ret, tlen, MB_MSYSTEM);
291 
292 	/* Success! */
293 	SMB_SS_LOCK(ssp);
294 	ssp->ss_tid = rqp->sr_rptid;
295 	ssp->ss_vcgenid = vcp->vc_genid;
296 	ssp->ss_options = options;
297 	ssp->ss_flags |= SMBS_CONNECTED;
298 	SMB_SS_UNLOCK(ssp);
299 
300 out:
301 	if (unc_name)
302 		kmem_free(unc_name, unc_len);
303 	smb_rq_done(rqp);
304 	return (error);
305 }
306 
307 int
308 smb_smb_treedisconnect(struct smb_share *ssp, struct smb_cred *scred)
309 {
310 	struct smb_vc *vcp;
311 	struct smb_rq *rqp;
312 	int error;
313 
314 	if (ssp->ss_tid == SMB_TID_UNKNOWN)
315 		return (0);
316 
317 	/*
318 	 * Build this as a "VC-level" request, so it will
319 	 * avoid testing the _GONE flag on the share,
320 	 * which has already been set at this point.
321 	 * Add the share pointer "by hand" below, so
322 	 * smb_iod_sendrq will plug in the TID.
323 	 */
324 	vcp = SSTOVC(ssp);
325 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_TREE_DISCONNECT, scred, &rqp);
326 	if (error)
327 		return (error);
328 	rqp->sr_share = ssp; /* by hand */
329 
330 	smb_rq_wstart(rqp);
331 	smb_rq_wend(rqp);
332 	smb_rq_bstart(rqp);
333 	smb_rq_bend(rqp);
334 
335 	/*
336 	 * Run this with a relatively short timeout. (5 sec.)
337 	 * We don't really care about the result here, but we
338 	 * do need to make sure we send this out, or we could
339 	 * "leak" active tree IDs on interrupt or timeout.
340 	 * The NOINTR_SEND flag makes this request immune to
341 	 * interrupt or timeout until the send is done.
342 	 * Also, don't reconnect for this, of course!
343 	 */
344 	rqp->sr_flags |= (SMBR_NOINTR_SEND | SMBR_NORECONNECT);
345 	error = smb_rq_simple_timed(rqp, 5);
346 	SMBSDEBUG("%d\n", error);
347 	smb_rq_done(rqp);
348 	ssp->ss_tid = SMB_TID_UNKNOWN;
349 	return (error);
350 }
351 
352 /*
353  * Common function for read/write with UIO.
354  * Called by netsmb smb_usr_rw,
355  *  smbfs_readvnode, smbfs_writevnode
356  */
357 int
358 smb_rwuio(struct smb_share *ssp, uint16_t fid, uio_rw_t rw,
359 	uio_t *uiop, smb_cred_t *scred, int timo)
360 {
361 	struct smb_vc *vcp = SSTOVC(ssp);
362 	ssize_t  save_resid;
363 	uint32_t len, rlen, maxlen;
364 	int error = 0;
365 	int (*iofun)(struct smb_share *, uint16_t, uint32_t *,
366 	    uio_t *, smb_cred_t *, int);
367 
368 	/*
369 	 * Determine which function to use,
370 	 * and the transfer size per call.
371 	 */
372 	if (SMB_DIALECT(vcp) >= SMB_DIALECT_NTLM0_12) {
373 		/*
374 		 * Using NT LM 0.12, so readx, writex.
375 		 * Make sure we can represent the offset.
376 		 */
377 		if ((vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_FILES) == 0 &&
378 		    (uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
379 			return (EFBIG);
380 
381 		if (rw == UIO_READ) {
382 			iofun = smb_smb_readx;
383 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_READX)
384 				maxlen = SMB_MAX_LARGE_RW_SIZE;
385 			else
386 				maxlen = vcp->vc_rxmax;
387 		} else { /* UIO_WRITE */
388 			iofun = smb_smb_writex;
389 			if (vcp->vc_sopt.sv_caps & SMB_CAP_LARGE_WRITEX)
390 				maxlen = SMB_MAX_LARGE_RW_SIZE;
391 			else
392 				maxlen = vcp->vc_wxmax;
393 		}
394 	} else {
395 		/*
396 		 * Using the old SMB_READ and SMB_WRITE so
397 		 * we're limited to 32-bit offsets, etc.
398 		 * XXX: Someday, punt the old dialects.
399 		 */
400 		if ((uiop->uio_loffset + uiop->uio_resid) > UINT32_MAX)
401 			return (EFBIG);
402 
403 		if (rw == UIO_READ) {
404 			iofun = smb_smb_read;
405 			maxlen = vcp->vc_rxmax;
406 		} else { /* UIO_WRITE */
407 			iofun = smb_smb_write;
408 			maxlen = vcp->vc_wxmax;
409 		}
410 	}
411 
412 	save_resid = uiop->uio_resid;
413 	while (uiop->uio_resid > 0) {
414 		/* Lint: uio_resid may be 64-bits */
415 		rlen = len = (uint32_t)min(maxlen, uiop->uio_resid);
416 		error = (*iofun)(ssp, fid, &rlen, uiop, scred, timo);
417 
418 		/*
419 		 * Note: the iofun called uio_update, so
420 		 * not doing that here as one might expect.
421 		 *
422 		 * Quit the loop either on error, or if we
423 		 * transferred less then requested.
424 		 */
425 		if (error || (rlen < len))
426 			break;
427 
428 		timo = 0; /* only first I/O should wait */
429 	}
430 	if (error && (save_resid != uiop->uio_resid)) {
431 		/*
432 		 * Stopped on an error after having
433 		 * successfully transferred data.
434 		 * Suppress this error.
435 		 */
436 		SMBSDEBUG("error %d suppressed\n", error);
437 		error = 0;
438 	}
439 
440 	return (error);
441 }
442 
443 static int
444 smb_smb_readx(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
445 	uio_t *uiop, smb_cred_t *scred, int timo)
446 {
447 	struct smb_rq *rqp;
448 	struct mbchain *mbp;
449 	struct mdchain *mdp;
450 	int error;
451 	uint32_t offlo, offhi, rlen;
452 	uint16_t lenhi, lenlo, off, doff;
453 	uint8_t wc;
454 
455 	lenhi = (uint16_t)(*lenp >> 16);
456 	lenlo = (uint16_t)*lenp;
457 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
458 	offlo = (uint32_t)uiop->uio_loffset;
459 
460 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ_ANDX, scred, &rqp);
461 	if (error)
462 		return (error);
463 	smb_rq_getrequest(rqp, &mbp);
464 	smb_rq_wstart(rqp);
465 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
466 	mb_put_uint8(mbp, 0);		/* MBZ */
467 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
468 	mb_put_uint16le(mbp, fid);
469 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
470 	mb_put_uint16le(mbp, lenlo);	/* MaxCount */
471 	mb_put_uint16le(mbp, 1);	/* MinCount */
472 					/* (only indicates blocking) */
473 	mb_put_uint32le(mbp, lenhi);	/* MaxCountHigh */
474 	mb_put_uint16le(mbp, lenlo);	/* Remaining ("obsolete") */
475 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
476 	smb_rq_wend(rqp);
477 	smb_rq_bstart(rqp);
478 	smb_rq_bend(rqp);
479 	do {
480 		if (timo == 0)
481 			timo = smb_timo_read;
482 		error = smb_rq_simple_timed(rqp, timo);
483 		if (error)
484 			break;
485 		smb_rq_getreply(rqp, &mdp);
486 		md_get_uint8(mdp, &wc);
487 		if (wc != 12) {
488 			error = EBADRPC;
489 			break;
490 		}
491 		md_get_uint8(mdp, NULL);
492 		md_get_uint8(mdp, NULL);
493 		md_get_uint16le(mdp, NULL);
494 		md_get_uint16le(mdp, NULL);
495 		md_get_uint16le(mdp, NULL);	/* data compaction mode */
496 		md_get_uint16le(mdp, NULL);
497 		md_get_uint16le(mdp, &lenlo);	/* data len ret. */
498 		md_get_uint16le(mdp, &doff);	/* data offset */
499 		md_get_uint16le(mdp, &lenhi);
500 		rlen = (lenhi << 16) | lenlo;
501 		md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);
502 		md_get_uint16le(mdp, NULL);	/* ByteCount */
503 		/*
504 		 * Does the data offset indicate padding?
505 		 * Add up the gets above, we have:
506 		 */
507 		off = SMB_HDRLEN + 3 + (12 * 2); /* =59 */
508 		if (doff > off)	/* pad byte(s)? */
509 			md_get_mem(mdp, NULL, doff - off, MB_MSYSTEM);
510 		if (rlen == 0) {
511 			*lenp = rlen;
512 			break;
513 		}
514 		/* paranoid */
515 		if (rlen > *lenp) {
516 			SMBSDEBUG("bad server! rlen %d, len %d\n",
517 			    rlen, *lenp);
518 			rlen = *lenp;
519 		}
520 		error = md_get_uio(mdp, uiop, rlen);
521 		if (error)
522 			break;
523 		*lenp = rlen;
524 		/*LINTED*/
525 	} while (0);
526 	smb_rq_done(rqp);
527 	return (error);
528 }
529 
530 static int
531 smb_smb_writex(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
532 	uio_t *uiop, smb_cred_t *scred, int timo)
533 {
534 	struct smb_rq *rqp;
535 	struct mbchain *mbp;
536 	struct mdchain *mdp;
537 	int error;
538 	uint32_t offlo, offhi, rlen;
539 	uint16_t lenhi, lenlo;
540 	uint8_t wc;
541 
542 	lenhi = (uint16_t)(*lenp >> 16);
543 	lenlo = (uint16_t)*lenp;
544 	offhi = (uint32_t)(uiop->uio_loffset >> 32);
545 	offlo = (uint32_t)uiop->uio_loffset;
546 
547 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE_ANDX, scred, &rqp);
548 	if (error)
549 		return (error);
550 	smb_rq_getrequest(rqp, &mbp);
551 	smb_rq_wstart(rqp);
552 	mb_put_uint8(mbp, 0xff);	/* no secondary command */
553 	mb_put_uint8(mbp, 0);		/* MBZ */
554 	mb_put_uint16le(mbp, 0);	/* offset to secondary */
555 	mb_put_uint16le(mbp, fid);
556 	mb_put_uint32le(mbp, offlo);	/* offset (low part) */
557 	mb_put_uint32le(mbp, 0);	/* MBZ (timeout) */
558 	mb_put_uint16le(mbp, 0);	/* !write-thru */
559 	mb_put_uint16le(mbp, 0);
560 	mb_put_uint16le(mbp, lenhi);
561 	mb_put_uint16le(mbp, lenlo);
562 	mb_put_uint16le(mbp, 64);	/* data offset from header start */
563 	mb_put_uint32le(mbp, offhi);	/* offset (high part) */
564 	smb_rq_wend(rqp);
565 	smb_rq_bstart(rqp);
566 	do {
567 		mb_put_uint8(mbp, 0);	/* pad byte */
568 		error = mb_put_uio(mbp, uiop, *lenp);
569 		if (error)
570 			break;
571 		smb_rq_bend(rqp);
572 		if (timo == 0)
573 			timo = smb_timo_write;
574 		error = smb_rq_simple_timed(rqp, timo);
575 		if (error)
576 			break;
577 		smb_rq_getreply(rqp, &mdp);
578 		md_get_uint8(mdp, &wc);
579 		if (wc != 6) {
580 			error = EBADRPC;
581 			break;
582 		}
583 		md_get_uint8(mdp, NULL);	/* andx cmd */
584 		md_get_uint8(mdp, NULL);	/* reserved */
585 		md_get_uint16le(mdp, NULL);	/* andx offset */
586 		md_get_uint16le(mdp, &lenlo);	/* data len ret. */
587 		md_get_uint16le(mdp, NULL);	/* remaining */
588 		md_get_uint16le(mdp, &lenhi);
589 		rlen = (lenhi << 16) | lenlo;
590 		*lenp = rlen;
591 		/*LINTED*/
592 	} while (0);
593 
594 	smb_rq_done(rqp);
595 	return (error);
596 }
597 
598 static int
599 smb_smb_read(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
600 	uio_t *uiop, smb_cred_t *scred, int timo)
601 {
602 	struct smb_rq *rqp;
603 	struct mbchain *mbp;
604 	struct mdchain *mdp;
605 	int error;
606 	uint32_t off32;
607 	uint16_t bc, cnt, dlen, rcnt, todo;
608 	uint8_t wc;
609 
610 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
611 	off32 = (uint32_t)uiop->uio_loffset;
612 	ASSERT(*lenp <= UINT16_MAX);
613 	cnt = (uint16_t)*lenp;
614 	/* This next is an "estimate" of planned reads. */
615 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
616 
617 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_READ, scred, &rqp);
618 	if (error)
619 		return (error);
620 	smb_rq_getrequest(rqp, &mbp);
621 	smb_rq_wstart(rqp);
622 	mb_put_uint16le(mbp, fid);
623 	mb_put_uint16le(mbp, cnt);
624 	mb_put_uint32le(mbp, off32);
625 	mb_put_uint16le(mbp, todo);
626 	smb_rq_wend(rqp);
627 	smb_rq_bstart(rqp);
628 	smb_rq_bend(rqp);
629 	do {
630 		if (timo == 0)
631 			timo = smb_timo_read;
632 		error = smb_rq_simple_timed(rqp, timo);
633 		if (error)
634 			break;
635 		smb_rq_getreply(rqp, &mdp);
636 		md_get_uint8(mdp, &wc);
637 		if (wc != 5) {
638 			error = EBADRPC;
639 			break;
640 		}
641 		md_get_uint16le(mdp, &rcnt);	/* ret. count */
642 		md_get_mem(mdp, NULL, 4 * 2, MB_MSYSTEM);  /* res. */
643 		md_get_uint16le(mdp, &bc);	/* byte count */
644 		md_get_uint8(mdp, NULL);	/* buffer format */
645 		md_get_uint16le(mdp, &dlen);	/* data len */
646 		if (dlen < rcnt) {
647 			SMBSDEBUG("oops: dlen=%d rcnt=%d\n",
648 			    (int)dlen, (int)rcnt);
649 			rcnt = dlen;
650 		}
651 		if (rcnt == 0) {
652 			*lenp = 0;
653 			break;
654 		}
655 		/* paranoid */
656 		if (rcnt > cnt) {
657 			SMBSDEBUG("bad server! rcnt %d, cnt %d\n",
658 			    (int)rcnt, (int)cnt);
659 			rcnt = cnt;
660 		}
661 		error = md_get_uio(mdp, uiop, (int)rcnt);
662 		if (error)
663 			break;
664 		*lenp = (int)rcnt;
665 		/*LINTED*/
666 	} while (0);
667 	smb_rq_done(rqp);
668 	return (error);
669 }
670 
671 static int
672 smb_smb_write(struct smb_share *ssp, uint16_t fid, uint32_t *lenp,
673 	uio_t *uiop, smb_cred_t *scred, int timo)
674 {
675 	struct smb_rq *rqp;
676 	struct mbchain *mbp;
677 	struct mdchain *mdp;
678 	int error;
679 	uint32_t off32;
680 	uint16_t cnt, rcnt, todo;
681 	uint8_t wc;
682 
683 	ASSERT(uiop->uio_loffset <= UINT32_MAX);
684 	off32 = (uint32_t)uiop->uio_loffset;
685 	ASSERT(*lenp <= UINT16_MAX);
686 	cnt = (uint16_t)*lenp;
687 	/* This next is an "estimate" of planned writes. */
688 	todo = (uint16_t)min(uiop->uio_resid, UINT16_MAX);
689 
690 	error = smb_rq_alloc(SSTOCP(ssp), SMB_COM_WRITE, scred, &rqp);
691 	if (error)
692 		return (error);
693 	smb_rq_getrequest(rqp, &mbp);
694 	smb_rq_wstart(rqp);
695 	mb_put_uint16le(mbp, fid);
696 	mb_put_uint16le(mbp, cnt);
697 	mb_put_uint32le(mbp, off32);
698 	mb_put_uint16le(mbp, todo);
699 	smb_rq_wend(rqp);
700 	smb_rq_bstart(rqp);
701 	mb_put_uint8(mbp, SMB_DT_DATA);
702 	mb_put_uint16le(mbp, cnt);
703 	do {
704 		error = mb_put_uio(mbp, uiop, *lenp);
705 		if (error)
706 			break;
707 		smb_rq_bend(rqp);
708 		if (timo == 0)
709 			timo = smb_timo_write;
710 		error = smb_rq_simple_timed(rqp, timo);
711 		if (error)
712 			break;
713 		smb_rq_getreply(rqp, &mdp);
714 		md_get_uint8(mdp, &wc);
715 		if (wc != 1) {
716 			error = EBADRPC;
717 			break;
718 		}
719 		md_get_uint16le(mdp, &rcnt);
720 		*lenp = rcnt;
721 		/*LINTED*/
722 	} while (0);
723 	smb_rq_done(rqp);
724 	return (error);
725 }
726 
727 
728 static u_int32_t	smbechoes = 0;
729 
730 int
731 smb_smb_echo(struct smb_vc *vcp, struct smb_cred *scred, int timo)
732 {
733 	struct smb_rq *rqp;
734 	struct mbchain *mbp;
735 	int error;
736 
737 	error = smb_rq_alloc(VCTOCP(vcp), SMB_COM_ECHO, scred, &rqp);
738 	if (error)
739 		return (error);
740 	mbp = &rqp->sr_rq;
741 	smb_rq_wstart(rqp);
742 	mb_put_uint16le(mbp, 1); /* echo count */
743 	smb_rq_wend(rqp);
744 	smb_rq_bstart(rqp);
745 	mb_put_uint32le(mbp, atomic_inc_32_nv(&smbechoes));
746 	smb_rq_bend(rqp);
747 	/*
748 	 * Note: the IOD calls this, so
749 	 * this request must not wait for
750 	 * connection state changes, etc.
751 	 */
752 	rqp->sr_flags |= SMBR_NORECONNECT;
753 	error = smb_rq_simple_timed(rqp, timo);
754 	SMBSDEBUG("%d\n", error);
755 	smb_rq_done(rqp);
756 	return (error);
757 }
758