xref: /linux/fs/smb/client/smb2pdu.c (revision 8a848efd482be65d488e888f96812d8729ea64ea)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2009, 2013
5  *                 Etersoft, 2012
6  *   Author(s): Steve French (sfrench@us.ibm.com)
7  *              Pavel Shilovsky (pshilovsky@samba.org) 2012
8  *
9  *   Contains the routines for constructing the SMB2 PDUs themselves
10  *
11  */
12 
13  /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14  /* Note that there are handle based routines which must be		      */
15  /* treated slightly differently for reconnection purposes since we never     */
16  /* want to reuse a stale file handle and only the caller knows the file info */
17 
18 #include <linux/fs.h>
19 #include <linux/kernel.h>
20 #include <linux/vfs.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uaccess.h>
23 #include <linux/uuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/xattr.h>
26 #include <linux/netfs.h>
27 #include <trace/events/netfs.h>
28 #include "cifsglob.h"
29 #include "cifsproto.h"
30 #include "cifsacl.h"
31 #include "smb2proto.h"
32 #include "cifs_unicode.h"
33 #include "cifs_debug.h"
34 #include "ntlmssp.h"
35 #include "../common/smbfsctl.h"
36 #include "../common/smb2status.h"
37 #include "smb2glob.h"
38 #include "cifs_spnego.h"
39 #include "../common/smbdirect/smbdirect.h"
40 #include "smbdirect.h"
41 #include "trace.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
44 #endif
45 #include "cached_dir.h"
46 #include "compress.h"
47 #include "fs_context.h"
48 
49 /*
50  *  The following table defines the expected "StructureSize" of SMB2 requests
51  *  in order by SMB2 command.  This is similar to "wct" in SMB/CIFS requests.
52  *
53  *  Note that commands are defined in smb2pdu.h in le16 but the array below is
54  *  indexed by command in host byte order.
55  */
56 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
57 	/* SMB2_NEGOTIATE */ 36,
58 	/* SMB2_SESSION_SETUP */ 25,
59 	/* SMB2_LOGOFF */ 4,
60 	/* SMB2_TREE_CONNECT */	9,
61 	/* SMB2_TREE_DISCONNECT */ 4,
62 	/* SMB2_CREATE */ 57,
63 	/* SMB2_CLOSE */ 24,
64 	/* SMB2_FLUSH */ 24,
65 	/* SMB2_READ */	49,
66 	/* SMB2_WRITE */ 49,
67 	/* SMB2_LOCK */	48,
68 	/* SMB2_IOCTL */ 57,
69 	/* SMB2_CANCEL */ 4,
70 	/* SMB2_ECHO */ 4,
71 	/* SMB2_QUERY_DIRECTORY */ 33,
72 	/* SMB2_CHANGE_NOTIFY */ 32,
73 	/* SMB2_QUERY_INFO */ 41,
74 	/* SMB2_SET_INFO */ 33,
75 	/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
76 };
77 
78 int smb3_encryption_required(const struct cifs_tcon *tcon)
79 {
80 	if (!tcon || !tcon->ses)
81 		return 0;
82 	if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
83 	    (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
84 		return 1;
85 	if (tcon->seal &&
86 	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
87 		return 1;
88 	if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
89 	    (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
90 		return 1;
91 	return 0;
92 }
93 
94 static void
95 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
96 		  const struct cifs_tcon *tcon,
97 		  struct TCP_Server_Info *server)
98 {
99 	struct smb3_hdr_req *smb3_hdr;
100 
101 	shdr->ProtocolId = SMB2_PROTO_NUMBER;
102 	shdr->StructureSize = cpu_to_le16(64);
103 	shdr->Command = smb2_cmd;
104 
105 	if (server) {
106 		/* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
107 		if (server->dialect >= SMB30_PROT_ID) {
108 			smb3_hdr = (struct smb3_hdr_req *)shdr;
109 			/*
110 			 * if primary channel is not set yet, use default
111 			 * channel for chan sequence num
112 			 */
113 			if (SERVER_IS_CHAN(server))
114 				smb3_hdr->ChannelSequence =
115 					cpu_to_le16(server->primary_server->channel_sequence_num);
116 			else
117 				smb3_hdr->ChannelSequence =
118 					cpu_to_le16(server->channel_sequence_num);
119 		}
120 		spin_lock(&server->req_lock);
121 		/* Request up to 10 credits but don't go over the limit. */
122 		if (server->credits >= server->max_credits)
123 			shdr->CreditRequest = cpu_to_le16(0);
124 		else
125 			shdr->CreditRequest = cpu_to_le16(
126 				min_t(int, server->max_credits -
127 						server->credits, 10));
128 		spin_unlock(&server->req_lock);
129 	} else {
130 		shdr->CreditRequest = cpu_to_le16(2);
131 	}
132 	shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
133 
134 	if (!tcon)
135 		goto out;
136 
137 	/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
138 	/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
139 	if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
140 		shdr->CreditCharge = cpu_to_le16(1);
141 	/* else CreditCharge MBZ */
142 
143 	shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
144 	/* Uid is not converted */
145 	if (tcon->ses)
146 		shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
147 
148 	/*
149 	 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
150 	 * to pass the path on the Open SMB prefixed by \\server\share.
151 	 * Not sure when we would need to do the augmented path (if ever) and
152 	 * setting this flag breaks the SMB2 open operation since it is
153 	 * illegal to send an empty path name (without \\server\share prefix)
154 	 * when the DFS flag is set in the SMB open header. We could
155 	 * consider setting the flag on all operations other than open
156 	 * but it is safer to net set it for now.
157 	 */
158 /*	if (tcon->share_flags & SHI1005_FLAGS_DFS)
159 		shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
160 
161 	if (server && server->sign && !smb3_encryption_required(tcon))
162 		shdr->Flags |= SMB2_FLAGS_SIGNED;
163 out:
164 	return;
165 }
166 
167 /* helper function for code reuse */
168 static int
169 cifs_chan_skip_or_disable(struct cifs_ses *ses,
170 			  struct TCP_Server_Info *server,
171 			  bool from_reconnect, bool disable_mchan)
172 {
173 	struct TCP_Server_Info *pserver;
174 	unsigned int chan_index;
175 
176 	if (SERVER_IS_CHAN(server)) {
177 		cifs_dbg(VFS,
178 			"server %s does not support multichannel anymore. Skip secondary channel\n",
179 			 ses->server->hostname);
180 
181 		spin_lock(&ses->chan_lock);
182 		chan_index = cifs_ses_get_chan_index(ses, server);
183 		if (chan_index == CIFS_INVAL_CHAN_INDEX) {
184 			spin_unlock(&ses->chan_lock);
185 			goto skip_terminate;
186 		}
187 
188 		ses->chans[chan_index].server = NULL;
189 		server->terminate = true;
190 		spin_unlock(&ses->chan_lock);
191 
192 		/*
193 		 * the above reference of server by channel
194 		 * needs to be dropped without holding chan_lock
195 		 * as cifs_put_tcp_session takes a higher lock
196 		 * i.e. cifs_tcp_ses_lock
197 		 */
198 		cifs_put_tcp_session(server, from_reconnect);
199 
200 		cifs_signal_cifsd_for_reconnect(server, false);
201 
202 		/* mark primary server as needing reconnect */
203 		pserver = server->primary_server;
204 		cifs_signal_cifsd_for_reconnect(pserver, false);
205 skip_terminate:
206 		return -EHOSTDOWN;
207 	}
208 
209 	cifs_decrease_secondary_channels(ses, disable_mchan);
210 
211 	return 0;
212 }
213 
214 /*
215  * smb3_update_ses_channels - Synchronize session channels with new configuration
216  * @ses: pointer to the CIFS session structure
217  * @server: pointer to the TCP server info structure
218  * @from_reconnect: indicates if called from reconnect context
219  * @disable_mchan: indicates if called from reconnect to disable multichannel
220  *
221  * Returns 0 on success or error code on failure.
222  *
223  * Outside of reconfigure, this function is called from cifs_mount() during mount
224  * and from reconnect scenarios to adjust channel count when the
225  * server's multichannel support changes.
226  */
227 int smb3_update_ses_channels(struct cifs_ses *ses, struct TCP_Server_Info *server,
228 			bool from_reconnect, bool disable_mchan)
229 {
230 	int rc = 0;
231 	/*
232 	 * Manage session channels based on current count vs max:
233 	 * - If disable requested, skip or disable the channel
234 	 * - If below max channels, attempt to add more
235 	 * - If above max channels, skip or disable excess channels
236 	 */
237 	if (disable_mchan)
238 		rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan);
239 	else {
240 		if (ses->chan_count < ses->chan_max)
241 			rc = cifs_try_adding_channels(ses);
242 		else if (ses->chan_count > ses->chan_max)
243 			rc = cifs_chan_skip_or_disable(ses, server, from_reconnect, disable_mchan);
244 	}
245 
246 	return rc;
247 }
248 
249 static int
250 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
251 	       struct TCP_Server_Info *server, bool from_reconnect)
252 {
253 	struct cifs_ses *ses;
254 	int xid;
255 	int rc = 0;
256 
257 	/*
258 	 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
259 	 * check for tcp and smb session status done differently
260 	 * for those three - in the calling routine.
261 	 */
262 	if (tcon == NULL)
263 		return 0;
264 
265 	if (smb2_command == SMB2_TREE_CONNECT)
266 		return 0;
267 
268 	spin_lock(&tcon->tc_lock);
269 	if (tcon->status == TID_EXITING) {
270 		/*
271 		 * only tree disconnect allowed when disconnecting ...
272 		 */
273 		if (smb2_command != SMB2_TREE_DISCONNECT) {
274 			spin_unlock(&tcon->tc_lock);
275 			cifs_tcon_dbg(FYI, "can not send cmd %d while umounting\n",
276 				      smb2_command);
277 			return -ENODEV;
278 		}
279 	}
280 	spin_unlock(&tcon->tc_lock);
281 
282 	ses = tcon->ses;
283 	if (!ses)
284 		return smb_EIO(smb_eio_trace_null_pointers);
285 	spin_lock(&ses->ses_lock);
286 	if (ses->ses_status == SES_EXITING) {
287 		spin_unlock(&ses->ses_lock);
288 		return smb_EIO(smb_eio_trace_sess_exiting);
289 	}
290 	spin_unlock(&ses->ses_lock);
291 	if (!ses->server || !server)
292 		return smb_EIO(smb_eio_trace_null_pointers);
293 
294 	spin_lock(&server->srv_lock);
295 	if (server->tcpStatus == CifsNeedReconnect) {
296 		/*
297 		 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
298 		 * here since they are implicitly done when session drops.
299 		 */
300 		switch (smb2_command) {
301 		/*
302 		 * BB Should we keep oplock break and add flush to exceptions?
303 		 */
304 		case SMB2_TREE_DISCONNECT:
305 		case SMB2_CANCEL:
306 		case SMB2_CLOSE:
307 		case SMB2_OPLOCK_BREAK:
308 			spin_unlock(&server->srv_lock);
309 			return -EAGAIN;
310 		}
311 	}
312 
313 	/* if server is marked for termination, cifsd will cleanup */
314 	if (server->terminate) {
315 		spin_unlock(&server->srv_lock);
316 		return -EHOSTDOWN;
317 	}
318 	spin_unlock(&server->srv_lock);
319 
320 again:
321 	rc = cifs_wait_for_server_reconnect(server, tcon->retry);
322 	if (rc)
323 		return rc;
324 
325 	spin_lock(&ses->chan_lock);
326 	if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
327 		spin_unlock(&ses->chan_lock);
328 		return 0;
329 	}
330 	spin_unlock(&ses->chan_lock);
331 	cifs_tcon_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d\n",
332 		      tcon->ses->chans_need_reconnect,
333 		      tcon->need_reconnect);
334 
335 	mutex_lock(&ses->session_mutex);
336 	/*
337 	 * Handle the case where a concurrent thread failed to negotiate or
338 	 * killed a channel.
339 	 */
340 	spin_lock(&server->srv_lock);
341 	switch (server->tcpStatus) {
342 	case CifsExiting:
343 		spin_unlock(&server->srv_lock);
344 		mutex_unlock(&ses->session_mutex);
345 		return -EHOSTDOWN;
346 	case CifsNeedReconnect:
347 		spin_unlock(&server->srv_lock);
348 		mutex_unlock(&ses->session_mutex);
349 		if (!tcon->retry)
350 			return -EHOSTDOWN;
351 		goto again;
352 	default:
353 		break;
354 	}
355 	spin_unlock(&server->srv_lock);
356 
357 	/*
358 	 * need to prevent multiple threads trying to simultaneously
359 	 * reconnect the same SMB session
360 	 */
361 	spin_lock(&ses->ses_lock);
362 	spin_lock(&ses->chan_lock);
363 	if (!cifs_chan_needs_reconnect(ses, server) &&
364 	    ses->ses_status == SES_GOOD) {
365 		spin_unlock(&ses->chan_lock);
366 		spin_unlock(&ses->ses_lock);
367 		/* this means that we only need to tree connect */
368 		if (tcon->need_reconnect)
369 			goto skip_sess_setup;
370 
371 		mutex_unlock(&ses->session_mutex);
372 		goto out;
373 	}
374 	spin_unlock(&ses->chan_lock);
375 	spin_unlock(&ses->ses_lock);
376 
377 	rc = cifs_negotiate_protocol(0, ses, server);
378 	if (rc) {
379 		mutex_unlock(&ses->session_mutex);
380 		if (!tcon->retry)
381 			return -EHOSTDOWN;
382 		goto again;
383 	}
384 	/*
385 	 * if server stopped supporting multichannel
386 	 * and the first channel reconnected, disable all the others.
387 	 */
388 	if (ses->chan_count > 1 &&
389 	    !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
390 		rc = smb3_update_ses_channels(ses, server,
391 					       from_reconnect, true /* disable_mchan */);
392 		if (rc) {
393 			mutex_unlock(&ses->session_mutex);
394 			goto out;
395 		}
396 	}
397 
398 	rc = cifs_setup_session(0, ses, server, ses->local_nls);
399 	if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
400 		/*
401 		 * Try alternate password for next reconnect (key rotation
402 		 * could be enabled on the server e.g.) if an alternate
403 		 * password is available and the current password is expired,
404 		 * but do not swap on non pwd related errors like host down
405 		 */
406 		if (ses->password2)
407 			swap(ses->password2, ses->password);
408 	}
409 	if (rc) {
410 		mutex_unlock(&ses->session_mutex);
411 		if (rc == -EACCES && !tcon->retry)
412 			return -EHOSTDOWN;
413 		goto out;
414 	}
415 
416 skip_sess_setup:
417 	if (!tcon->need_reconnect) {
418 		mutex_unlock(&ses->session_mutex);
419 		goto out;
420 	}
421 	cifs_mark_open_files_invalid(tcon);
422 	if (tcon->use_persistent)
423 		tcon->need_reopen_files = true;
424 
425 	rc = cifs_tree_connect(0, tcon);
426 
427 	cifs_tcon_dbg(FYI, "reconnect tcon rc = %d\n", rc);
428 	if (rc) {
429 		/* If sess reconnected but tcon didn't, something strange ... */
430 		mutex_unlock(&ses->session_mutex);
431 		cifs_tcon_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
432 		goto out;
433 	}
434 
435 	spin_lock(&ses->ses_lock);
436 	if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
437 		spin_unlock(&ses->ses_lock);
438 		mutex_unlock(&ses->session_mutex);
439 		goto skip_add_channels;
440 	}
441 	ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
442 	spin_unlock(&ses->ses_lock);
443 
444 	if (!rc &&
445 	    (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
446 	    server->ops->query_server_interfaces) {
447 		/*
448 		 * query server network interfaces, in case they change.
449 		 * Also mark the session as pending this update while the query
450 		 * is in progress. This will be used to avoid calling
451 		 * smb2_reconnect recursively.
452 		 */
453 		ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
454 		xid = get_xid();
455 		rc = server->ops->query_server_interfaces(xid, tcon, false);
456 		free_xid(xid);
457 		ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
458 
459 		if (!tcon->ipc && !tcon->dummy)
460 			queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
461 					   (SMB_INTERFACE_POLL_INTERVAL * HZ));
462 
463 		mutex_unlock(&ses->session_mutex);
464 
465 		if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
466 			/*
467 			 * some servers like Azure SMB server do not advertise
468 			 * that multichannel has been disabled with server
469 			 * capabilities, rather return STATUS_NOT_IMPLEMENTED.
470 			 * treat this as server not supporting multichannel
471 			 */
472 
473 			rc = smb3_update_ses_channels(ses, server,
474 						       from_reconnect,
475 						       true /* disable_mchan */);
476 			goto skip_add_channels;
477 		} else if (rc)
478 			cifs_tcon_dbg(FYI, "%s: failed to query server interfaces: %d\n",
479 				      __func__, rc);
480 
481 		if (ses->chan_max > ses->chan_count &&
482 		    ses->iface_count &&
483 		    !SERVER_IS_CHAN(server)) {
484 			if (ses->chan_count == 1)
485 				cifs_server_dbg(VFS, "supports multichannel now\n");
486 
487 			smb3_update_ses_channels(ses, server, from_reconnect,
488 						  false /* disable_mchan */);
489 		}
490 	} else {
491 		mutex_unlock(&ses->session_mutex);
492 	}
493 
494 skip_add_channels:
495 	spin_lock(&ses->ses_lock);
496 	ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
497 	spin_unlock(&ses->ses_lock);
498 
499 	if (smb2_command != SMB2_INTERNAL_CMD)
500 		cifs_queue_server_reconn(server);
501 
502 	atomic_inc(&tconInfoReconnectCount);
503 out:
504 	/*
505 	 * Check if handle based operation so we know whether we can continue
506 	 * or not without returning to caller to reset file handle.
507 	 */
508 	/*
509 	 * BB Is flush done by server on drop of tcp session? Should we special
510 	 * case it and skip above?
511 	 */
512 	switch (smb2_command) {
513 	case SMB2_FLUSH:
514 	case SMB2_READ:
515 	case SMB2_WRITE:
516 	case SMB2_LOCK:
517 	case SMB2_QUERY_DIRECTORY:
518 	case SMB2_CHANGE_NOTIFY:
519 	case SMB2_QUERY_INFO:
520 	case SMB2_SET_INFO:
521 	case SMB2_IOCTL:
522 		rc = -EAGAIN;
523 	}
524 	return rc;
525 }
526 
527 static void
528 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
529 	       struct TCP_Server_Info *server,
530 	       void *buf,
531 	       unsigned int *total_len)
532 {
533 	struct smb2_pdu *spdu = buf;
534 	/* lookup word count ie StructureSize from table */
535 	__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
536 
537 	/*
538 	 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
539 	 * largest operations (Create)
540 	 */
541 	memset(buf, 0, 256);
542 
543 	smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
544 	spdu->StructureSize2 = cpu_to_le16(parmsize);
545 
546 	*total_len = parmsize + sizeof(struct smb2_hdr);
547 }
548 
549 /*
550  * Allocate and return pointer to an SMB request hdr, and set basic
551  * SMB information in the SMB header. If the return code is zero, this
552  * function must have filled in request_buf pointer.
553  */
554 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
555 				 struct TCP_Server_Info *server,
556 				 void **request_buf, unsigned int *total_len)
557 {
558 	/* BB eventually switch this to SMB2 specific small buf size */
559 	switch (smb2_command) {
560 	case SMB2_SET_INFO:
561 	case SMB2_QUERY_INFO:
562 		*request_buf = cifs_buf_get();
563 		break;
564 	default:
565 		*request_buf = cifs_small_buf_get();
566 		break;
567 	}
568 	if (*request_buf == NULL) {
569 		/* BB should we add a retry in here if not a writepage? */
570 		return -ENOMEM;
571 	}
572 
573 	fill_small_buf(smb2_command, tcon, server,
574 		       (struct smb2_hdr *)(*request_buf),
575 		       total_len);
576 
577 	if (tcon != NULL) {
578 		uint16_t com_code = le16_to_cpu(smb2_command);
579 		cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
580 		cifs_stats_inc(&tcon->num_smbs_sent);
581 	}
582 
583 	return 0;
584 }
585 
586 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
587 			       struct TCP_Server_Info *server,
588 			       void **request_buf, unsigned int *total_len)
589 {
590 	int rc;
591 
592 	rc = smb2_reconnect(smb2_command, tcon, server, false);
593 	if (rc)
594 		return rc;
595 
596 	return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
597 				     total_len);
598 }
599 
600 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
601 			       struct TCP_Server_Info *server,
602 			       void **request_buf, unsigned int *total_len)
603 {
604 	/*
605 	 * Skip reconnect in one of the following cases:
606 	 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
607 	 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
608 	 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
609 	 */
610 	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
611 	    (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
612 	     (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
613 		return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
614 					     request_buf, total_len);
615 
616 	return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
617 				   request_buf, total_len);
618 }
619 
620 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
621 
622 static void
623 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
624 {
625 	pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
626 	pneg_ctxt->DataLength = cpu_to_le16(38);
627 	pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
628 	pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
629 	get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
630 	pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
631 }
632 
633 static void
634 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
635 {
636 	pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
637 	pneg_ctxt->DataLength =
638 		cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
639 			  - sizeof(struct smb2_neg_context));
640 	pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
641 	pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
642 	pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
643 	pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
644 }
645 
646 static unsigned int
647 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
648 {
649 	unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
650 	unsigned short num_algs = 1; /* number of signing algorithms sent */
651 
652 	pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
653 	/*
654 	 * Context Data length must be rounded to multiple of 8 for some servers
655 	 */
656 	pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
657 					    sizeof(struct smb2_neg_context) +
658 					    (num_algs * sizeof(u16)), 8));
659 	pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
660 	pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
661 
662 	ctxt_len += sizeof(__le16) * num_algs;
663 	ctxt_len = ALIGN(ctxt_len, 8);
664 	return ctxt_len;
665 	/* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
666 }
667 
668 static void
669 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
670 {
671 	pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
672 	if (require_gcm_256) {
673 		pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
674 		pneg_ctxt->CipherCount = cpu_to_le16(1);
675 		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
676 	} else if (enable_gcm_256) {
677 		pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
678 		pneg_ctxt->CipherCount = cpu_to_le16(3);
679 		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
680 		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
681 		pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
682 	} else {
683 		pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
684 		pneg_ctxt->CipherCount = cpu_to_le16(2);
685 		pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
686 		pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
687 	}
688 }
689 
690 static unsigned int
691 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
692 {
693 	struct nls_table *cp = load_nls_default();
694 
695 	pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
696 
697 	/* copy up to max of first 100 bytes of server name to NetName field */
698 	pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
699 	/* context size is DataLength + minimal smb2_neg_context */
700 	return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
701 }
702 
703 static void
704 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
705 {
706 	pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
707 	pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
708 	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
709 	pneg_ctxt->Name[0] = 0x93;
710 	pneg_ctxt->Name[1] = 0xAD;
711 	pneg_ctxt->Name[2] = 0x25;
712 	pneg_ctxt->Name[3] = 0x50;
713 	pneg_ctxt->Name[4] = 0x9C;
714 	pneg_ctxt->Name[5] = 0xB4;
715 	pneg_ctxt->Name[6] = 0x11;
716 	pneg_ctxt->Name[7] = 0xE7;
717 	pneg_ctxt->Name[8] = 0xB4;
718 	pneg_ctxt->Name[9] = 0x23;
719 	pneg_ctxt->Name[10] = 0x83;
720 	pneg_ctxt->Name[11] = 0xDE;
721 	pneg_ctxt->Name[12] = 0x96;
722 	pneg_ctxt->Name[13] = 0x8B;
723 	pneg_ctxt->Name[14] = 0xCD;
724 	pneg_ctxt->Name[15] = 0x7C;
725 }
726 
727 static void
728 assemble_neg_contexts(struct smb2_negotiate_req *req,
729 		      struct TCP_Server_Info *server, unsigned int *total_len)
730 {
731 	unsigned int ctxt_len, neg_context_count;
732 	struct TCP_Server_Info *pserver;
733 	char *pneg_ctxt;
734 	char *hostname;
735 
736 	if (*total_len > 200) {
737 		/* In case length corrupted don't want to overrun smb buffer */
738 		cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
739 		return;
740 	}
741 
742 	/*
743 	 * round up total_len of fixed part of SMB3 negotiate request to 8
744 	 * byte boundary before adding negotiate contexts
745 	 */
746 	*total_len = ALIGN(*total_len, 8);
747 
748 	pneg_ctxt = (*total_len) + (char *)req;
749 	req->NegotiateContextOffset = cpu_to_le32(*total_len);
750 
751 	build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
752 	ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
753 	*total_len += ctxt_len;
754 	pneg_ctxt += ctxt_len;
755 
756 	build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
757 	ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
758 	*total_len += ctxt_len;
759 	pneg_ctxt += ctxt_len;
760 
761 	/*
762 	 * secondary channels don't have the hostname field populated
763 	 * use the hostname field in the primary channel instead
764 	 */
765 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
766 	cifs_server_lock(pserver);
767 	hostname = pserver->hostname;
768 	if (hostname && (hostname[0] != 0)) {
769 		ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
770 					      hostname);
771 		*total_len += ctxt_len;
772 		pneg_ctxt += ctxt_len;
773 		neg_context_count = 3;
774 	} else
775 		neg_context_count = 2;
776 	cifs_server_unlock(pserver);
777 
778 	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
779 	*total_len += sizeof(struct smb2_posix_neg_context);
780 	pneg_ctxt += sizeof(struct smb2_posix_neg_context);
781 	neg_context_count++;
782 
783 	if (server->compression.requested) {
784 		build_compression_ctxt((struct smb2_compression_capabilities_context *)
785 				pneg_ctxt);
786 		ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
787 		*total_len += ctxt_len;
788 		pneg_ctxt += ctxt_len;
789 		neg_context_count++;
790 	}
791 
792 	if (enable_negotiate_signing) {
793 		ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
794 				pneg_ctxt);
795 		*total_len += ctxt_len;
796 		pneg_ctxt += ctxt_len;
797 		neg_context_count++;
798 	}
799 
800 	/* check for and add transport_capabilities and signing capabilities */
801 	req->NegotiateContextCount = cpu_to_le16(neg_context_count);
802 
803 }
804 
805 /* If invalid preauth context warn but use what we requested, SHA-512 */
806 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
807 {
808 	unsigned int len = le16_to_cpu(ctxt->DataLength);
809 
810 	/*
811 	 * Caller checked that DataLength remains within SMB boundary. We still
812 	 * need to confirm that one HashAlgorithms member is accounted for.
813 	 */
814 	if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
815 		pr_warn_once("server sent bad preauth context\n");
816 		return;
817 	} else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
818 		pr_warn_once("server sent invalid SaltLength\n");
819 		return;
820 	}
821 	if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
822 		pr_warn_once("Invalid SMB3 hash algorithm count\n");
823 	if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
824 		pr_warn_once("unknown SMB3 hash algorithm\n");
825 }
826 
827 static void decode_compress_ctx(struct TCP_Server_Info *server,
828 			 struct smb2_compression_capabilities_context *ctxt)
829 {
830 	unsigned int len = le16_to_cpu(ctxt->DataLength);
831 	__le16 alg;
832 
833 	server->compression.enabled = false;
834 
835 	/*
836 	 * Caller checked that DataLength remains within SMB boundary. We still
837 	 * need to confirm that one CompressionAlgorithms member is accounted
838 	 * for.
839 	 */
840 	if (len < 10) {
841 		pr_warn_once("server sent bad compression cntxt\n");
842 		return;
843 	}
844 
845 	if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
846 		pr_warn_once("invalid SMB3 compress algorithm count\n");
847 		return;
848 	}
849 
850 	alg = ctxt->CompressionAlgorithms[0];
851 
852 	/* 'NONE' (0) compressor type is never negotiated */
853 	if (alg == 0 || le16_to_cpu(alg) > 3) {
854 		pr_warn_once("invalid compression algorithm '%u'\n", alg);
855 		return;
856 	}
857 
858 	server->compression.alg = alg;
859 	server->compression.enabled = true;
860 }
861 
862 static int decode_encrypt_ctx(struct TCP_Server_Info *server,
863 			      struct smb2_encryption_neg_context *ctxt)
864 {
865 	unsigned int len = le16_to_cpu(ctxt->DataLength);
866 
867 	cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
868 	/*
869 	 * Caller checked that DataLength remains within SMB boundary. We still
870 	 * need to confirm that one Cipher flexible array member is accounted
871 	 * for.
872 	 */
873 	if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
874 		pr_warn_once("server sent bad crypto ctxt len\n");
875 		return -EINVAL;
876 	}
877 
878 	if (le16_to_cpu(ctxt->CipherCount) != 1) {
879 		pr_warn_once("Invalid SMB3.11 cipher count\n");
880 		return -EINVAL;
881 	}
882 	cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
883 	if (require_gcm_256) {
884 		if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
885 			cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
886 			return -EOPNOTSUPP;
887 		}
888 	} else if (ctxt->Ciphers[0] == 0) {
889 		/*
890 		 * e.g. if server only supported AES256_CCM (very unlikely)
891 		 * or server supported no encryption types or had all disabled.
892 		 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
893 		 * in which mount requested encryption ("seal") checks later
894 		 * on during tree connection will return proper rc, but if
895 		 * seal not requested by client, since server is allowed to
896 		 * return 0 to indicate no supported cipher, we can't fail here
897 		 */
898 		server->cipher_type = 0;
899 		server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
900 		pr_warn_once("Server does not support requested encryption types\n");
901 		return 0;
902 	} else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
903 		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
904 		   (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
905 		/* server returned a cipher we didn't ask for */
906 		pr_warn_once("Invalid SMB3.11 cipher returned\n");
907 		return -EINVAL;
908 	}
909 	server->cipher_type = ctxt->Ciphers[0];
910 	server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
911 	return 0;
912 }
913 
914 static void decode_signing_ctx(struct TCP_Server_Info *server,
915 			       struct smb2_signing_capabilities *pctxt)
916 {
917 	unsigned int len = le16_to_cpu(pctxt->DataLength);
918 
919 	/*
920 	 * Caller checked that DataLength remains within SMB boundary. We still
921 	 * need to confirm that one SigningAlgorithms flexible array member is
922 	 * accounted for.
923 	 */
924 	if ((len < 4) || (len > 16)) {
925 		pr_warn_once("server sent bad signing negcontext\n");
926 		return;
927 	}
928 	if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
929 		pr_warn_once("Invalid signing algorithm count\n");
930 		return;
931 	}
932 	if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
933 		pr_warn_once("unknown signing algorithm\n");
934 		return;
935 	}
936 
937 	server->signing_negotiated = true;
938 	server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
939 	cifs_dbg(FYI, "signing algorithm %d chosen\n",
940 		     server->signing_algorithm);
941 }
942 
943 
944 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
945 				     struct TCP_Server_Info *server,
946 				     unsigned int len_of_smb)
947 {
948 	struct smb2_neg_context *pctx;
949 	unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
950 	unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
951 	unsigned int len_of_ctxts, i;
952 	int rc = 0;
953 
954 	cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
955 	if (len_of_smb <= offset) {
956 		cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
957 		return -EINVAL;
958 	}
959 
960 	len_of_ctxts = len_of_smb - offset;
961 
962 	for (i = 0; i < ctxt_cnt; i++) {
963 		int clen;
964 		/* check that offset is not beyond end of SMB */
965 		if (len_of_ctxts < sizeof(struct smb2_neg_context))
966 			break;
967 
968 		pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
969 		clen = sizeof(struct smb2_neg_context)
970 			+ le16_to_cpu(pctx->DataLength);
971 		/*
972 		 * 2.2.4 SMB2 NEGOTIATE Response
973 		 * Subsequent negotiate contexts MUST appear at the first 8-byte
974 		 * aligned offset following the previous negotiate context.
975 		 */
976 		if (i + 1 != ctxt_cnt)
977 			clen = ALIGN(clen, 8);
978 		if (clen > len_of_ctxts)
979 			break;
980 
981 		if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
982 			decode_preauth_context(
983 				(struct smb2_preauth_neg_context *)pctx);
984 		else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
985 			rc = decode_encrypt_ctx(server,
986 				(struct smb2_encryption_neg_context *)pctx);
987 		else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
988 			decode_compress_ctx(server,
989 				(struct smb2_compression_capabilities_context *)pctx);
990 		else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
991 			server->posix_ext_supported = true;
992 		else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
993 			decode_signing_ctx(server,
994 				(struct smb2_signing_capabilities *)pctx);
995 		else
996 			cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
997 				le16_to_cpu(pctx->ContextType));
998 		if (rc)
999 			break;
1000 
1001 		offset += clen;
1002 		len_of_ctxts -= clen;
1003 	}
1004 	return rc;
1005 }
1006 
1007 static struct create_posix *
1008 create_posix_buf(umode_t mode)
1009 {
1010 	struct create_posix *buf;
1011 
1012 	buf = kzalloc(sizeof(struct create_posix),
1013 			GFP_KERNEL);
1014 	if (!buf)
1015 		return NULL;
1016 
1017 	buf->ccontext.DataOffset =
1018 		cpu_to_le16(offsetof(struct create_posix, Mode));
1019 	buf->ccontext.DataLength = cpu_to_le32(4);
1020 	buf->ccontext.NameOffset =
1021 		cpu_to_le16(offsetof(struct create_posix, Name));
1022 	buf->ccontext.NameLength = cpu_to_le16(16);
1023 
1024 	/* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
1025 	buf->Name[0] = 0x93;
1026 	buf->Name[1] = 0xAD;
1027 	buf->Name[2] = 0x25;
1028 	buf->Name[3] = 0x50;
1029 	buf->Name[4] = 0x9C;
1030 	buf->Name[5] = 0xB4;
1031 	buf->Name[6] = 0x11;
1032 	buf->Name[7] = 0xE7;
1033 	buf->Name[8] = 0xB4;
1034 	buf->Name[9] = 0x23;
1035 	buf->Name[10] = 0x83;
1036 	buf->Name[11] = 0xDE;
1037 	buf->Name[12] = 0x96;
1038 	buf->Name[13] = 0x8B;
1039 	buf->Name[14] = 0xCD;
1040 	buf->Name[15] = 0x7C;
1041 	buf->Mode = cpu_to_le32(mode);
1042 	cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
1043 	return buf;
1044 }
1045 
1046 static int
1047 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
1048 {
1049 	unsigned int num = *num_iovec;
1050 
1051 	iov[num].iov_base = create_posix_buf(mode);
1052 	if (mode == ACL_NO_MODE)
1053 		cifs_dbg(FYI, "%s: no mode\n", __func__);
1054 	if (iov[num].iov_base == NULL)
1055 		return -ENOMEM;
1056 	iov[num].iov_len = sizeof(struct create_posix);
1057 	*num_iovec = num + 1;
1058 	return 0;
1059 }
1060 
1061 
1062 /*
1063  *
1064  *	SMB2 Worker functions follow:
1065  *
1066  *	The general structure of the worker functions is:
1067  *	1) Call smb2_init (assembles SMB2 header)
1068  *	2) Initialize SMB2 command specific fields in fixed length area of SMB
1069  *	3) Call smb_sendrcv2 (sends request on socket and waits for response)
1070  *	4) Decode SMB2 command specific fields in the fixed length area
1071  *	5) Decode variable length data area (if any for this SMB2 command type)
1072  *	6) Call free smb buffer
1073  *	7) return
1074  *
1075  */
1076 
1077 int
1078 SMB2_negotiate(const unsigned int xid,
1079 	       struct cifs_ses *ses,
1080 	       struct TCP_Server_Info *server)
1081 {
1082 	struct smb_rqst rqst;
1083 	struct smb2_negotiate_req *req;
1084 	struct smb2_negotiate_rsp *rsp;
1085 	struct kvec iov[1];
1086 	struct kvec rsp_iov;
1087 	int rc;
1088 	int resp_buftype;
1089 	int blob_offset, blob_length;
1090 	char *security_blob;
1091 	int flags = CIFS_NEG_OP;
1092 	unsigned int total_len;
1093 
1094 	cifs_dbg(FYI, "Negotiate protocol\n");
1095 
1096 	if (!server) {
1097 		WARN(1, "%s: server is NULL!\n", __func__);
1098 		return smb_EIO(smb_eio_trace_null_pointers);
1099 	}
1100 
1101 	rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
1102 				 (void **) &req, &total_len);
1103 	if (rc)
1104 		return rc;
1105 
1106 	req->hdr.SessionId = 0;
1107 
1108 	memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1109 	memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1110 
1111 	if (strcmp(server->vals->version_string,
1112 		   SMB3ANY_VERSION_STRING) == 0) {
1113 		req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1114 		req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1115 		req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1116 		req->DialectCount = cpu_to_le16(3);
1117 		total_len += 6;
1118 	} else if (strcmp(server->vals->version_string,
1119 		   SMBDEFAULT_VERSION_STRING) == 0) {
1120 		req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1121 		req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1122 		req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1123 		req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1124 		req->DialectCount = cpu_to_le16(4);
1125 		total_len += 8;
1126 	} else {
1127 		/* otherwise send specific dialect */
1128 		req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
1129 		req->DialectCount = cpu_to_le16(1);
1130 		total_len += 2;
1131 	}
1132 
1133 	/* only one of SMB2 signing flags may be set in SMB2 request */
1134 	if (ses->sign)
1135 		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1136 	else if (global_secflags & CIFSSEC_MAY_SIGN)
1137 		req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1138 	else
1139 		req->SecurityMode = 0;
1140 
1141 	req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
1142 	req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1143 
1144 	/* ClientGUID must be zero for SMB2.02 dialect */
1145 	if (server->vals->protocol_id == SMB20_PROT_ID)
1146 		memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
1147 	else {
1148 		memcpy(req->ClientGUID, server->client_guid,
1149 			SMB2_CLIENT_GUID_SIZE);
1150 		if ((server->vals->protocol_id == SMB311_PROT_ID) ||
1151 		    (strcmp(server->vals->version_string,
1152 		     SMB3ANY_VERSION_STRING) == 0) ||
1153 		    (strcmp(server->vals->version_string,
1154 		     SMBDEFAULT_VERSION_STRING) == 0))
1155 			assemble_neg_contexts(req, server, &total_len);
1156 	}
1157 	iov[0].iov_base = (char *)req;
1158 	iov[0].iov_len = total_len;
1159 
1160 	memset(&rqst, 0, sizeof(struct smb_rqst));
1161 	rqst.rq_iov = iov;
1162 	rqst.rq_nvec = 1;
1163 
1164 	rc = cifs_send_recv(xid, ses, server,
1165 			    &rqst, &resp_buftype, flags, &rsp_iov);
1166 	cifs_small_buf_release(req);
1167 	rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
1168 	/*
1169 	 * No tcon so can't do
1170 	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1171 	 */
1172 	if (rc == -EOPNOTSUPP) {
1173 		cifs_server_dbg(VFS, "Dialect not supported by server. Consider  specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
1174 		goto neg_exit;
1175 	} else if (rc != 0)
1176 		goto neg_exit;
1177 
1178 	u16 dialect = le16_to_cpu(rsp->DialectRevision);
1179 	if (strcmp(server->vals->version_string,
1180 		   SMB3ANY_VERSION_STRING) == 0) {
1181 		switch (dialect) {
1182 		case SMB20_PROT_ID:
1183 			cifs_server_dbg(VFS,
1184 				"SMB2 dialect returned but not requested\n");
1185 			rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3);
1186 			goto neg_exit;
1187 		case SMB21_PROT_ID:
1188 			cifs_server_dbg(VFS,
1189 				"SMB2.1 dialect returned but not requested\n");
1190 			rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 3);
1191 			goto neg_exit;
1192 		case SMB311_PROT_ID:
1193 			/* ops set to 3.0 by default for default so update */
1194 			server->ops = &smb311_operations;
1195 			server->vals = &smb311_values;
1196 			break;
1197 		default:
1198 			break;
1199 		}
1200 	} else if (strcmp(server->vals->version_string,
1201 			  SMBDEFAULT_VERSION_STRING) == 0) {
1202 		switch (dialect) {
1203 		case SMB20_PROT_ID:
1204 			cifs_server_dbg(VFS,
1205 				"SMB2 dialect returned but not requested\n");
1206 			rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect, dialect, 0);
1207 			goto neg_exit;
1208 		case SMB21_PROT_ID:
1209 			/* ops set to 3.0 by default for default so update */
1210 			server->ops = &smb21_operations;
1211 			server->vals = &smb21_values;
1212 			break;
1213 		case SMB311_PROT_ID:
1214 			server->ops = &smb311_operations;
1215 			server->vals = &smb311_values;
1216 			break;
1217 		default:
1218 			break;
1219 		}
1220 	} else if (dialect != server->vals->protocol_id) {
1221 		/* if requested single dialect ensure returned dialect matched */
1222 		cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
1223 				dialect);
1224 		rc = smb_EIO2(smb_eio_trace_neg_unreq_dialect,
1225 			      dialect, server->vals->protocol_id);
1226 		goto neg_exit;
1227 	}
1228 
1229 	cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
1230 
1231 	switch (dialect) {
1232 	case SMB20_PROT_ID:
1233 		cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
1234 		break;
1235 	case SMB21_PROT_ID:
1236 		cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
1237 		break;
1238 	case SMB30_PROT_ID:
1239 		cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
1240 		break;
1241 	case SMB302_PROT_ID:
1242 		cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
1243 		break;
1244 	case SMB311_PROT_ID:
1245 		cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
1246 		break;
1247 	default:
1248 		cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
1249 				dialect);
1250 		rc = smb_EIO1(smb_eio_trace_neg_inval_dialect, dialect);
1251 		goto neg_exit;
1252 	}
1253 
1254 	rc = 0;
1255 	server->dialect = dialect;
1256 
1257 	/*
1258 	 * Keep a copy of the hash after negprot. This hash will be
1259 	 * the starting hash value for all sessions made from this
1260 	 * server.
1261 	 */
1262 	memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
1263 	       SMB2_PREAUTH_HASH_SIZE);
1264 
1265 	/* SMB2 only has an extended negflavor */
1266 	server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
1267 	/* set it to the maximum buffer size value we can send with 1 credit */
1268 	server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1269 			       SMB2_MAX_BUFFER_SIZE);
1270 	server->max_read = le32_to_cpu(rsp->MaxReadSize);
1271 	server->max_write = le32_to_cpu(rsp->MaxWriteSize);
1272 	server->sec_mode = le16_to_cpu(rsp->SecurityMode);
1273 	if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1274 		cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1275 				server->sec_mode);
1276 	server->capabilities = le32_to_cpu(rsp->Capabilities);
1277 	/* Internal types */
1278 	server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
1279 
1280 	/*
1281 	 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1282 	 * Set the cipher type manually.
1283 	 */
1284 	if ((server->dialect == SMB30_PROT_ID ||
1285 	     server->dialect == SMB302_PROT_ID) &&
1286 	    (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1287 		server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1288 
1289 	security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
1290 					       (struct smb2_hdr *)rsp);
1291 	/*
1292 	 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1293 	 * for us will be
1294 	 *	ses->sectype = RawNTLMSSP;
1295 	 * but for time being this is our only auth choice so doesn't matter.
1296 	 * We just found a server which sets blob length to zero expecting raw.
1297 	 */
1298 	if (blob_length == 0) {
1299 		cifs_dbg(FYI, "missing security blob on negprot\n");
1300 		server->sec_ntlmssp = true;
1301 	}
1302 
1303 	rc = cifs_enable_signing(server, ses->sign);
1304 	if (rc)
1305 		goto neg_exit;
1306 	if (blob_length) {
1307 		rc = decode_negTokenInit(security_blob, blob_length, server);
1308 		if (rc == 1)
1309 			rc = 0;
1310 		else if (rc == 0)
1311 			rc = smb_EIO1(smb_eio_trace_neg_decode_token, rc);
1312 	}
1313 
1314 	if (server->dialect == SMB311_PROT_ID) {
1315 		if (rsp->NegotiateContextCount)
1316 			rc = smb311_decode_neg_context(rsp, server,
1317 						       rsp_iov.iov_len);
1318 		else
1319 			cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
1320 	}
1321 
1322 	if (server->cipher_type && !rc)
1323 		rc = smb3_crypto_aead_allocate(server);
1324 neg_exit:
1325 	free_rsp_buf(resp_buftype, rsp);
1326 	return rc;
1327 }
1328 
1329 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1330 {
1331 	int rc;
1332 	struct validate_negotiate_info_req *pneg_inbuf;
1333 	struct validate_negotiate_info_rsp *pneg_rsp = NULL;
1334 	u32 rsplen;
1335 	u32 inbuflen; /* max of 4 dialects */
1336 	struct TCP_Server_Info *server = tcon->ses->server;
1337 
1338 	cifs_dbg(FYI, "validate negotiate\n");
1339 
1340 	/* In SMB3.11 preauth integrity supersedes validate negotiate */
1341 	if (server->dialect == SMB311_PROT_ID)
1342 		return 0;
1343 
1344 	/*
1345 	 * validation ioctl must be signed, so no point sending this if we
1346 	 * can not sign it (ie are not known user).  Even if signing is not
1347 	 * required (enabled but not negotiated), in those cases we selectively
1348 	 * sign just this, the first and only signed request on a connection.
1349 	 * Having validation of negotiate info  helps reduce attack vectors.
1350 	 */
1351 	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1352 		return 0; /* validation requires signing */
1353 
1354 	if (tcon->ses->user_name == NULL) {
1355 		cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1356 		return 0; /* validation requires signing */
1357 	}
1358 
1359 	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1360 		cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1361 
1362 	pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1363 	if (!pneg_inbuf)
1364 		return -ENOMEM;
1365 
1366 	pneg_inbuf->Capabilities =
1367 			cpu_to_le32(server->vals->req_capabilities);
1368 	pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1369 
1370 	memcpy(pneg_inbuf->Guid, server->client_guid,
1371 					SMB2_CLIENT_GUID_SIZE);
1372 
1373 	if (tcon->ses->sign)
1374 		pneg_inbuf->SecurityMode =
1375 			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1376 	else if (global_secflags & CIFSSEC_MAY_SIGN)
1377 		pneg_inbuf->SecurityMode =
1378 			cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1379 	else
1380 		pneg_inbuf->SecurityMode = 0;
1381 
1382 
1383 	if (strcmp(server->vals->version_string,
1384 		SMB3ANY_VERSION_STRING) == 0) {
1385 		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1386 		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1387 		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1388 		pneg_inbuf->DialectCount = cpu_to_le16(3);
1389 		/* SMB 2.1 not included so subtract one dialect from len */
1390 		inbuflen = sizeof(*pneg_inbuf) -
1391 				(sizeof(pneg_inbuf->Dialects[0]));
1392 	} else if (strcmp(server->vals->version_string,
1393 		SMBDEFAULT_VERSION_STRING) == 0) {
1394 		pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1395 		pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1396 		pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1397 		pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1398 		pneg_inbuf->DialectCount = cpu_to_le16(4);
1399 		/* structure is big enough for 4 dialects */
1400 		inbuflen = sizeof(*pneg_inbuf);
1401 	} else {
1402 		/* otherwise specific dialect was requested */
1403 		pneg_inbuf->Dialects[0] =
1404 			cpu_to_le16(server->vals->protocol_id);
1405 		pneg_inbuf->DialectCount = cpu_to_le16(1);
1406 		/* structure is big enough for 4 dialects, sending only 1 */
1407 		inbuflen = sizeof(*pneg_inbuf) -
1408 				sizeof(pneg_inbuf->Dialects[0]) * 3;
1409 	}
1410 
1411 	rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1412 		FSCTL_VALIDATE_NEGOTIATE_INFO,
1413 		(char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1414 		(char **)&pneg_rsp, &rsplen);
1415 	if (rc == -EOPNOTSUPP) {
1416 		/*
1417 		 * Old Windows versions or Netapp SMB server can return
1418 		 * not supported error. Client should accept it.
1419 		 */
1420 		cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
1421 		rc = 0;
1422 		goto out_free_inbuf;
1423 	} else if (rc != 0) {
1424 		cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1425 			      rc);
1426 		rc = smb_EIO1(smb_eio_trace_neg_info_fail, rc);
1427 		goto out_free_inbuf;
1428 	}
1429 
1430 	if (rsplen != sizeof(*pneg_rsp)) {
1431 		cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1432 			      rsplen);
1433 
1434 		/* relax check since Mac returns max bufsize allowed on ioctl */
1435 		if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp)) {
1436 			rc = smb_EIO1(smb_eio_trace_neg_bad_rsplen, rsplen);
1437 			goto out_free_rsp;
1438 		}
1439 	}
1440 
1441 	/* check validate negotiate info response matches what we got earlier */
1442 	u16 dialect = le16_to_cpu(pneg_rsp->Dialect);
1443 
1444 	if (dialect != server->dialect) {
1445 		rc = smb_EIO2(smb_eio_trace_neg_info_dialect,
1446 			      dialect, server->dialect);
1447 		goto vneg_out;
1448 	}
1449 
1450 	u16 sec_mode = le16_to_cpu(pneg_rsp->SecurityMode);
1451 
1452 	if (sec_mode != server->sec_mode) {
1453 		rc = smb_EIO2(smb_eio_trace_neg_info_sec_mode,
1454 			      sec_mode, server->sec_mode);
1455 		goto vneg_out;
1456 	}
1457 
1458 	/* do not validate server guid because not saved at negprot time yet */
1459 	u32 caps = le32_to_cpu(pneg_rsp->Capabilities);
1460 
1461 	if ((caps | SMB2_NT_FIND |
1462 	     SMB2_LARGE_FILES) != server->capabilities) {
1463 		rc = smb_EIO2(smb_eio_trace_neg_info_caps,
1464 			      caps, server->capabilities);
1465 		goto vneg_out;
1466 	}
1467 
1468 	/* validate negotiate successful */
1469 	rc = 0;
1470 	cifs_dbg(FYI, "validate negotiate info successful\n");
1471 	goto out_free_rsp;
1472 
1473 vneg_out:
1474 	cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
1475 out_free_rsp:
1476 	kfree(pneg_rsp);
1477 out_free_inbuf:
1478 	kfree(pneg_inbuf);
1479 	return rc;
1480 }
1481 
1482 enum securityEnum
1483 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1484 {
1485 	switch (requested) {
1486 	case Kerberos:
1487 	case RawNTLMSSP:
1488 		return requested;
1489 	case NTLMv2:
1490 		return RawNTLMSSP;
1491 	case Unspecified:
1492 		if (server->sec_ntlmssp &&
1493 			(global_secflags & CIFSSEC_MAY_NTLMSSP))
1494 			return RawNTLMSSP;
1495 		if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
1496 			(global_secflags & CIFSSEC_MAY_KRB5))
1497 			return Kerberos;
1498 		fallthrough;
1499 	default:
1500 		return Unspecified;
1501 	}
1502 }
1503 
1504 struct SMB2_sess_data {
1505 	unsigned int xid;
1506 	struct cifs_ses *ses;
1507 	struct TCP_Server_Info *server;
1508 	struct nls_table *nls_cp;
1509 	void (*func)(struct SMB2_sess_data *);
1510 	int result;
1511 	u64 previous_session;
1512 
1513 	/* we will send the SMB in three pieces:
1514 	 * a fixed length beginning part, an optional
1515 	 * SPNEGO blob (which can be zero length), and a
1516 	 * last part which will include the strings
1517 	 * and rest of bcc area. This allows us to avoid
1518 	 * a large buffer 17K allocation
1519 	 */
1520 	int buf0_type;
1521 	struct kvec iov[2];
1522 };
1523 
1524 static int
1525 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1526 {
1527 	int rc;
1528 	struct cifs_ses *ses = sess_data->ses;
1529 	struct TCP_Server_Info *server = sess_data->server;
1530 	struct smb2_sess_setup_req *req;
1531 	unsigned int total_len;
1532 	bool is_binding = false;
1533 
1534 	rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1535 				 (void **) &req,
1536 				 &total_len);
1537 	if (rc)
1538 		return rc;
1539 
1540 	spin_lock(&ses->ses_lock);
1541 	is_binding = (ses->ses_status == SES_GOOD);
1542 	spin_unlock(&ses->ses_lock);
1543 
1544 	if (is_binding) {
1545 		req->hdr.SessionId = cpu_to_le64(ses->Suid);
1546 		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1547 		req->PreviousSessionId = 0;
1548 		req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1549 		cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
1550 	} else {
1551 		/* First session, not a reauthenticate */
1552 		req->hdr.SessionId = 0;
1553 		/*
1554 		 * if reconnect, we need to send previous sess id
1555 		 * otherwise it is 0
1556 		 */
1557 		req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
1558 		req->Flags = 0; /* MBZ */
1559 		cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
1560 			 sess_data->previous_session);
1561 	}
1562 
1563 	/* enough to enable echos and oplocks and one max size write */
1564 	if (server->credits >= server->max_credits)
1565 		req->hdr.CreditRequest = cpu_to_le16(0);
1566 	else
1567 		req->hdr.CreditRequest = cpu_to_le16(
1568 			min_t(int, server->max_credits -
1569 			      server->credits, 130));
1570 
1571 	/* only one of SMB2 signing flags may be set in SMB2 request */
1572 	if (server->sign)
1573 		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1574 	else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1575 		req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1576 	else
1577 		req->SecurityMode = 0;
1578 
1579 #ifdef CONFIG_CIFS_DFS_UPCALL
1580 	req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1581 #else
1582 	req->Capabilities = 0;
1583 #endif /* DFS_UPCALL */
1584 
1585 	req->Channel = 0; /* MBZ */
1586 
1587 	sess_data->iov[0].iov_base = (char *)req;
1588 	/* 1 for pad */
1589 	sess_data->iov[0].iov_len = total_len - 1;
1590 	/*
1591 	 * This variable will be used to clear the buffer
1592 	 * allocated above in case of any error in the calling function.
1593 	 */
1594 	sess_data->buf0_type = CIFS_SMALL_BUFFER;
1595 
1596 	return 0;
1597 }
1598 
1599 static void
1600 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1601 {
1602 	struct kvec *iov = sess_data->iov;
1603 
1604 	/* iov[1] is already freed by caller */
1605 	if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
1606 		memzero_explicit(iov[0].iov_base, iov[0].iov_len);
1607 
1608 	free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
1609 	sess_data->buf0_type = CIFS_NO_BUFFER;
1610 }
1611 
1612 static int
1613 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1614 {
1615 	int rc;
1616 	struct smb_rqst rqst;
1617 	struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1618 	struct kvec rsp_iov = { NULL, 0 };
1619 
1620 	/* Testing shows that buffer offset must be at location of Buffer[0] */
1621 	req->SecurityBufferOffset =
1622 		cpu_to_le16(sizeof(struct smb2_sess_setup_req));
1623 	req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1624 
1625 	memset(&rqst, 0, sizeof(struct smb_rqst));
1626 	rqst.rq_iov = sess_data->iov;
1627 	rqst.rq_nvec = 2;
1628 
1629 	/* BB add code to build os and lm fields */
1630 	rc = cifs_send_recv(sess_data->xid, sess_data->ses,
1631 			    sess_data->server,
1632 			    &rqst,
1633 			    &sess_data->buf0_type,
1634 			    CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
1635 	cifs_small_buf_release(sess_data->iov[0].iov_base);
1636 	if (rc == 0)
1637 		sess_data->ses->expired_pwd = false;
1638 	else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
1639 		if (sess_data->ses->expired_pwd == false)
1640 			trace_smb3_key_expired(sess_data->server->hostname,
1641 					       sess_data->ses->user_name,
1642 					       sess_data->server->conn_id,
1643 					       &sess_data->server->dstaddr, rc);
1644 		sess_data->ses->expired_pwd = true;
1645 	}
1646 
1647 	memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1648 
1649 	return rc;
1650 }
1651 
1652 static int
1653 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1654 {
1655 	int rc = 0;
1656 	struct cifs_ses *ses = sess_data->ses;
1657 	struct TCP_Server_Info *server = sess_data->server;
1658 
1659 	cifs_server_lock(server);
1660 	if (server->ops->generate_signingkey) {
1661 		rc = server->ops->generate_signingkey(ses, server);
1662 		if (rc) {
1663 			cifs_dbg(FYI,
1664 				"SMB3 session key generation failed\n");
1665 			cifs_server_unlock(server);
1666 			return rc;
1667 		}
1668 	}
1669 	if (!server->session_estab) {
1670 		server->sequence_number = 0x2;
1671 		server->session_estab = true;
1672 	}
1673 	cifs_server_unlock(server);
1674 
1675 	cifs_dbg(FYI, "SMB2/3 session established successfully\n");
1676 	return rc;
1677 }
1678 
1679 #ifdef CONFIG_CIFS_UPCALL
1680 static void
1681 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1682 {
1683 	int rc;
1684 	struct cifs_ses *ses = sess_data->ses;
1685 	struct TCP_Server_Info *server = sess_data->server;
1686 	struct cifs_spnego_msg *msg;
1687 	struct key *spnego_key = NULL;
1688 	struct smb2_sess_setup_rsp *rsp = NULL;
1689 	bool is_binding = false;
1690 
1691 	rc = SMB2_sess_alloc_buffer(sess_data);
1692 	if (rc)
1693 		goto out;
1694 
1695 	spnego_key = cifs_get_spnego_key(ses, server);
1696 	if (IS_ERR(spnego_key)) {
1697 		rc = PTR_ERR(spnego_key);
1698 		spnego_key = NULL;
1699 		goto out;
1700 	}
1701 
1702 	msg = spnego_key->payload.data[0];
1703 	/*
1704 	 * check version field to make sure that cifs.upcall is
1705 	 * sending us a response in an expected form
1706 	 */
1707 	if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
1708 		cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1709 			 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
1710 		rc = -EKEYREJECTED;
1711 		goto out_put_spnego_key;
1712 	}
1713 
1714 	spin_lock(&ses->ses_lock);
1715 	is_binding = (ses->ses_status == SES_GOOD);
1716 	spin_unlock(&ses->ses_lock);
1717 
1718 	/* keep session key if binding */
1719 	if (!is_binding) {
1720 		kfree_sensitive(ses->auth_key.response);
1721 		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1722 						 GFP_KERNEL);
1723 		if (!ses->auth_key.response) {
1724 			cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
1725 				 msg->sesskey_len);
1726 			rc = -ENOMEM;
1727 			goto out_put_spnego_key;
1728 		}
1729 		ses->auth_key.len = msg->sesskey_len;
1730 	}
1731 
1732 	sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1733 	sess_data->iov[1].iov_len = msg->secblob_len;
1734 
1735 	rc = SMB2_sess_sendreceive(sess_data);
1736 	if (rc)
1737 		goto out_put_spnego_key;
1738 
1739 	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1740 	/* keep session id and flags if binding */
1741 	if (!is_binding) {
1742 		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1743 		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1744 	}
1745 
1746 	rc = SMB2_sess_establish_session(sess_data);
1747 out_put_spnego_key:
1748 	key_invalidate(spnego_key);
1749 	key_put(spnego_key);
1750 	if (rc) {
1751 		kfree_sensitive(ses->auth_key.response);
1752 		ses->auth_key.response = NULL;
1753 		ses->auth_key.len = 0;
1754 	}
1755 out:
1756 	sess_data->result = rc;
1757 	sess_data->func = NULL;
1758 	SMB2_sess_free_buffer(sess_data);
1759 }
1760 #else
1761 static void
1762 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1763 {
1764 	cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1765 	sess_data->result = -EOPNOTSUPP;
1766 	sess_data->func = NULL;
1767 }
1768 #endif
1769 
1770 static void
1771 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1772 
1773 static void
1774 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1775 {
1776 	int rc;
1777 	struct cifs_ses *ses = sess_data->ses;
1778 	struct TCP_Server_Info *server = sess_data->server;
1779 	struct smb2_sess_setup_rsp *rsp = NULL;
1780 	unsigned char *ntlmssp_blob = NULL;
1781 	bool use_spnego = false; /* else use raw ntlmssp */
1782 	u16 blob_length = 0;
1783 	bool is_binding = false;
1784 
1785 	/*
1786 	 * If memory allocation is successful, caller of this function
1787 	 * frees it.
1788 	 */
1789 	ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1790 	if (!ses->ntlmssp) {
1791 		rc = -ENOMEM;
1792 		goto out_err;
1793 	}
1794 	ses->ntlmssp->sesskey_per_smbsess = true;
1795 
1796 	rc = SMB2_sess_alloc_buffer(sess_data);
1797 	if (rc)
1798 		goto out_err;
1799 
1800 	rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
1801 					  &blob_length, ses, server,
1802 					  sess_data->nls_cp);
1803 	if (rc)
1804 		goto out;
1805 
1806 	if (use_spnego) {
1807 		/* BB eventually need to add this */
1808 		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1809 		rc = -EOPNOTSUPP;
1810 		goto out;
1811 	}
1812 	sess_data->iov[1].iov_base = ntlmssp_blob;
1813 	sess_data->iov[1].iov_len = blob_length;
1814 
1815 	rc = SMB2_sess_sendreceive(sess_data);
1816 	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1817 
1818 	/* If true, rc here is expected and not an error */
1819 	if (sess_data->buf0_type != CIFS_NO_BUFFER &&
1820 		rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
1821 		rc = 0;
1822 
1823 	if (rc)
1824 		goto out;
1825 
1826 	u16 boff = le16_to_cpu(rsp->SecurityBufferOffset);
1827 
1828 	if (offsetof(struct smb2_sess_setup_rsp, Buffer) != boff) {
1829 		cifs_dbg(VFS, "Invalid security buffer offset %d\n", boff);
1830 		rc = smb_EIO1(smb_eio_trace_sess_buf_off, boff);
1831 		goto out;
1832 	}
1833 	rc = decode_ntlmssp_challenge(rsp->Buffer,
1834 			le16_to_cpu(rsp->SecurityBufferLength), ses);
1835 	if (rc)
1836 		goto out;
1837 
1838 	cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1839 
1840 	spin_lock(&ses->ses_lock);
1841 	is_binding = (ses->ses_status == SES_GOOD);
1842 	spin_unlock(&ses->ses_lock);
1843 
1844 	/* keep existing ses id and flags if binding */
1845 	if (!is_binding) {
1846 		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1847 		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1848 	}
1849 
1850 out:
1851 	kfree_sensitive(ntlmssp_blob);
1852 	SMB2_sess_free_buffer(sess_data);
1853 	if (!rc) {
1854 		sess_data->result = 0;
1855 		sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1856 		return;
1857 	}
1858 out_err:
1859 	kfree_sensitive(ses->ntlmssp);
1860 	ses->ntlmssp = NULL;
1861 	sess_data->result = rc;
1862 	sess_data->func = NULL;
1863 }
1864 
1865 static void
1866 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1867 {
1868 	int rc;
1869 	struct cifs_ses *ses = sess_data->ses;
1870 	struct TCP_Server_Info *server = sess_data->server;
1871 	struct smb2_sess_setup_req *req;
1872 	struct smb2_sess_setup_rsp *rsp = NULL;
1873 	unsigned char *ntlmssp_blob = NULL;
1874 	bool use_spnego = false; /* else use raw ntlmssp */
1875 	u16 blob_length = 0;
1876 	bool is_binding = false;
1877 
1878 	rc = SMB2_sess_alloc_buffer(sess_data);
1879 	if (rc)
1880 		goto out;
1881 
1882 	req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1883 	req->hdr.SessionId = cpu_to_le64(ses->Suid);
1884 
1885 	rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
1886 				     ses, server,
1887 				     sess_data->nls_cp);
1888 	if (rc) {
1889 		cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1890 		goto out;
1891 	}
1892 
1893 	if (use_spnego) {
1894 		/* BB eventually need to add this */
1895 		cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1896 		rc = -EOPNOTSUPP;
1897 		goto out;
1898 	}
1899 	sess_data->iov[1].iov_base = ntlmssp_blob;
1900 	sess_data->iov[1].iov_len = blob_length;
1901 
1902 	rc = SMB2_sess_sendreceive(sess_data);
1903 	if (rc)
1904 		goto out;
1905 
1906 	rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1907 
1908 	spin_lock(&ses->ses_lock);
1909 	is_binding = (ses->ses_status == SES_GOOD);
1910 	spin_unlock(&ses->ses_lock);
1911 
1912 	/* keep existing ses id and flags if binding */
1913 	if (!is_binding) {
1914 		ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1915 		ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1916 	}
1917 
1918 	rc = SMB2_sess_establish_session(sess_data);
1919 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1920 	if (ses->server->dialect < SMB30_PROT_ID) {
1921 		cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1922 		/*
1923 		 * The session id is opaque in terms of endianness, so we can't
1924 		 * print it as a long long. we dump it as we got it on the wire
1925 		 */
1926 		cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
1927 			 &ses->Suid);
1928 		cifs_dbg(VFS, "Session Key   %*ph\n",
1929 			 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1930 		cifs_dbg(VFS, "Signing Key   %*ph\n",
1931 			 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1932 	}
1933 #endif
1934 out:
1935 	kfree_sensitive(ntlmssp_blob);
1936 	SMB2_sess_free_buffer(sess_data);
1937 	kfree_sensitive(ses->ntlmssp);
1938 	ses->ntlmssp = NULL;
1939 	sess_data->result = rc;
1940 	sess_data->func = NULL;
1941 }
1942 
1943 static int
1944 SMB2_select_sec(struct SMB2_sess_data *sess_data)
1945 {
1946 	int type;
1947 	struct cifs_ses *ses = sess_data->ses;
1948 	struct TCP_Server_Info *server = sess_data->server;
1949 
1950 	type = smb2_select_sectype(server, ses->sectype);
1951 	cifs_dbg(FYI, "sess setup type %d\n", type);
1952 	if (type == Unspecified) {
1953 		cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
1954 		return -EINVAL;
1955 	}
1956 
1957 	switch (type) {
1958 	case Kerberos:
1959 		sess_data->func = SMB2_auth_kerberos;
1960 		break;
1961 	case RawNTLMSSP:
1962 		sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1963 		break;
1964 	default:
1965 		cifs_dbg(VFS, "secType %d not supported!\n", type);
1966 		return -EOPNOTSUPP;
1967 	}
1968 
1969 	return 0;
1970 }
1971 
1972 int
1973 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1974 		struct TCP_Server_Info *server,
1975 		const struct nls_table *nls_cp)
1976 {
1977 	int rc = 0;
1978 	struct SMB2_sess_data *sess_data;
1979 
1980 	cifs_dbg(FYI, "Session Setup\n");
1981 
1982 	if (!server) {
1983 		WARN(1, "%s: server is NULL!\n", __func__);
1984 		return smb_EIO(smb_eio_trace_null_pointers);
1985 	}
1986 
1987 	sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1988 	if (!sess_data)
1989 		return -ENOMEM;
1990 
1991 	sess_data->xid = xid;
1992 	sess_data->ses = ses;
1993 	sess_data->server = server;
1994 	sess_data->buf0_type = CIFS_NO_BUFFER;
1995 	sess_data->nls_cp = (struct nls_table *) nls_cp;
1996 	sess_data->previous_session = ses->Suid;
1997 
1998 	rc = SMB2_select_sec(sess_data);
1999 	if (rc)
2000 		goto out;
2001 
2002 	/*
2003 	 * Initialize the session hash with the server one.
2004 	 */
2005 	memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
2006 	       SMB2_PREAUTH_HASH_SIZE);
2007 
2008 	while (sess_data->func)
2009 		sess_data->func(sess_data);
2010 
2011 	if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
2012 		cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
2013 	rc = sess_data->result;
2014 out:
2015 	kfree_sensitive(sess_data);
2016 	return rc;
2017 }
2018 
2019 int
2020 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
2021 {
2022 	struct smb_rqst rqst;
2023 	struct smb2_logoff_req *req; /* response is also trivial struct */
2024 	int rc = 0;
2025 	struct TCP_Server_Info *server;
2026 	int flags = 0;
2027 	unsigned int total_len;
2028 	struct kvec iov[1];
2029 	struct kvec rsp_iov;
2030 	int resp_buf_type;
2031 
2032 	cifs_dbg(FYI, "disconnect session %p\n", ses);
2033 
2034 	if (!ses || !ses->server)
2035 		return smb_EIO(smb_eio_trace_null_pointers);
2036 	server = ses->server;
2037 
2038 	/* no need to send SMB logoff if uid already closed due to reconnect */
2039 	spin_lock(&ses->chan_lock);
2040 	if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
2041 		spin_unlock(&ses->chan_lock);
2042 		goto smb2_session_already_dead;
2043 	}
2044 	spin_unlock(&ses->chan_lock);
2045 
2046 	rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
2047 				 (void **) &req, &total_len);
2048 	if (rc)
2049 		return rc;
2050 
2051 	 /* since no tcon, smb2_init can not do this, so do here */
2052 	req->hdr.SessionId = cpu_to_le64(ses->Suid);
2053 
2054 	if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
2055 		flags |= CIFS_TRANSFORM_REQ;
2056 	else if (server->sign)
2057 		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2058 
2059 	flags |= CIFS_NO_RSP_BUF;
2060 
2061 	iov[0].iov_base = (char *)req;
2062 	iov[0].iov_len = total_len;
2063 
2064 	memset(&rqst, 0, sizeof(struct smb_rqst));
2065 	rqst.rq_iov = iov;
2066 	rqst.rq_nvec = 1;
2067 
2068 	rc = cifs_send_recv(xid, ses, ses->server,
2069 			    &rqst, &resp_buf_type, flags, &rsp_iov);
2070 	cifs_small_buf_release(req);
2071 	/*
2072 	 * No tcon so can't do
2073 	 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
2074 	 */
2075 
2076 smb2_session_already_dead:
2077 	return rc;
2078 }
2079 
2080 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
2081 {
2082 	cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
2083 }
2084 
2085 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
2086 
2087 /* These are similar values to what Windows uses */
2088 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
2089 {
2090 	tcon->max_chunks = 256;
2091 	tcon->max_bytes_chunk = 1048576;
2092 	tcon->max_bytes_copy = 16777216;
2093 }
2094 
2095 int
2096 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
2097 	  struct cifs_tcon *tcon, const struct nls_table *cp)
2098 {
2099 	struct smb_rqst rqst;
2100 	struct smb2_tree_connect_req *req;
2101 	struct smb2_tree_connect_rsp *rsp = NULL;
2102 	struct kvec iov[2];
2103 	struct kvec rsp_iov = { NULL, 0 };
2104 	int rc = 0;
2105 	int resp_buftype;
2106 	int unc_path_len;
2107 	__le16 *unc_path = NULL;
2108 	int flags = 0;
2109 	unsigned int total_len;
2110 	struct TCP_Server_Info *server = cifs_pick_channel(ses);
2111 
2112 	cifs_dbg(FYI, "TCON\n");
2113 
2114 	if (!server || !tree)
2115 		return smb_EIO(smb_eio_trace_null_pointers);
2116 
2117 	unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
2118 	if (unc_path == NULL)
2119 		return -ENOMEM;
2120 
2121 	unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp);
2122 	if (unc_path_len <= 0) {
2123 		kfree(unc_path);
2124 		return -EINVAL;
2125 	}
2126 	unc_path_len *= 2;
2127 
2128 	/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
2129 	tcon->tid = 0;
2130 	atomic_set(&tcon->num_remote_opens, 0);
2131 	rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
2132 				 (void **) &req, &total_len);
2133 	if (rc) {
2134 		kfree(unc_path);
2135 		return rc;
2136 	}
2137 
2138 	if (smb3_encryption_required(tcon))
2139 		flags |= CIFS_TRANSFORM_REQ;
2140 
2141 	iov[0].iov_base = (char *)req;
2142 	/* 1 for pad */
2143 	iov[0].iov_len = total_len - 1;
2144 
2145 	/* Testing shows that buffer offset must be at location of Buffer[0] */
2146 	req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req));
2147 	req->PathLength = cpu_to_le16(unc_path_len);
2148 	iov[1].iov_base = unc_path;
2149 	iov[1].iov_len = unc_path_len;
2150 
2151 	/*
2152 	 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
2153 	 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
2154 	 * (Samba servers don't always set the flag so also check if null user)
2155 	 */
2156 	if ((server->dialect == SMB311_PROT_ID) &&
2157 	    !smb3_encryption_required(tcon) &&
2158 	    !(ses->session_flags &
2159 		    (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
2160 	    ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
2161 		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2162 
2163 	memset(&rqst, 0, sizeof(struct smb_rqst));
2164 	rqst.rq_iov = iov;
2165 	rqst.rq_nvec = 2;
2166 
2167 	/* Need 64 for max size write so ask for more in case not there yet */
2168 	if (server->credits >= server->max_credits)
2169 		req->hdr.CreditRequest = cpu_to_le16(0);
2170 	else
2171 		req->hdr.CreditRequest = cpu_to_le16(
2172 			min_t(int, server->max_credits -
2173 			      server->credits, 64));
2174 
2175 	rc = cifs_send_recv(xid, ses, server,
2176 			    &rqst, &resp_buftype, flags, &rsp_iov);
2177 	cifs_small_buf_release(req);
2178 	rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
2179 	trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
2180 	if ((rc != 0) || (rsp == NULL)) {
2181 		cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
2182 		tcon->need_reconnect = true;
2183 		goto tcon_error_exit;
2184 	}
2185 
2186 	switch (rsp->ShareType) {
2187 	case SMB2_SHARE_TYPE_DISK:
2188 		cifs_dbg(FYI, "connection to disk share\n");
2189 		break;
2190 	case SMB2_SHARE_TYPE_PIPE:
2191 		tcon->pipe = true;
2192 		cifs_dbg(FYI, "connection to pipe share\n");
2193 		break;
2194 	case SMB2_SHARE_TYPE_PRINT:
2195 		tcon->print = true;
2196 		cifs_dbg(FYI, "connection to printer\n");
2197 		break;
2198 	default:
2199 		cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
2200 		rc = -EOPNOTSUPP;
2201 		goto tcon_error_exit;
2202 	}
2203 
2204 	tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
2205 	tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
2206 	tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
2207 	tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
2208 	strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
2209 
2210 	if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
2211 	    ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
2212 		cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
2213 
2214 	if (tcon->seal &&
2215 	    !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
2216 		cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
2217 
2218 	init_copy_chunk_defaults(tcon);
2219 	if (server->ops->validate_negotiate)
2220 		rc = server->ops->validate_negotiate(xid, tcon);
2221 	if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */
2222 		if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT)
2223 			server->nosharesock = true;
2224 tcon_exit:
2225 
2226 	free_rsp_buf(resp_buftype, rsp);
2227 	kfree(unc_path);
2228 	return rc;
2229 
2230 tcon_error_exit:
2231 	if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
2232 		cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree);
2233 	goto tcon_exit;
2234 }
2235 
2236 int
2237 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
2238 {
2239 	struct smb_rqst rqst;
2240 	struct smb2_tree_disconnect_req *req; /* response is trivial */
2241 	int rc = 0;
2242 	struct cifs_ses *ses = tcon->ses;
2243 	struct TCP_Server_Info *server = cifs_pick_channel(ses);
2244 	int flags = 0;
2245 	unsigned int total_len;
2246 	struct kvec iov[1];
2247 	struct kvec rsp_iov;
2248 	int resp_buf_type;
2249 
2250 	cifs_dbg(FYI, "Tree Disconnect\n");
2251 
2252 	if (!ses || !(ses->server))
2253 		return smb_EIO(smb_eio_trace_null_pointers);
2254 
2255 	trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
2256 	spin_lock(&ses->chan_lock);
2257 	if ((tcon->need_reconnect) ||
2258 	    (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
2259 		spin_unlock(&ses->chan_lock);
2260 		return 0;
2261 	}
2262 	spin_unlock(&ses->chan_lock);
2263 
2264 	invalidate_all_cached_dirs(tcon);
2265 
2266 	rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
2267 				 (void **) &req,
2268 				 &total_len);
2269 	if (rc)
2270 		return rc;
2271 
2272 	if (smb3_encryption_required(tcon))
2273 		flags |= CIFS_TRANSFORM_REQ;
2274 
2275 	flags |= CIFS_NO_RSP_BUF;
2276 
2277 	iov[0].iov_base = (char *)req;
2278 	iov[0].iov_len = total_len;
2279 
2280 	memset(&rqst, 0, sizeof(struct smb_rqst));
2281 	rqst.rq_iov = iov;
2282 	rqst.rq_nvec = 1;
2283 
2284 	rc = cifs_send_recv(xid, ses, server,
2285 			    &rqst, &resp_buf_type, flags, &rsp_iov);
2286 	cifs_small_buf_release(req);
2287 	if (rc) {
2288 		cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
2289 		trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
2290 	}
2291 	trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
2292 
2293 	return rc;
2294 }
2295 
2296 static create_durable_req_t *
2297 create_durable_buf(void)
2298 {
2299 	create_durable_req_t *buf;
2300 
2301 	buf = kzalloc(sizeof(create_durable_req_t), GFP_KERNEL);
2302 	if (!buf)
2303 		return NULL;
2304 
2305 	buf->ccontext.DataOffset = cpu_to_le16(offsetof
2306 					(create_durable_req_t, Data));
2307 	buf->ccontext.DataLength = cpu_to_le32(16);
2308 	buf->ccontext.NameOffset = cpu_to_le16(offsetof
2309 				(create_durable_req_t, Name));
2310 	buf->ccontext.NameLength = cpu_to_le16(4);
2311 	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
2312 	buf->Name[0] = 'D';
2313 	buf->Name[1] = 'H';
2314 	buf->Name[2] = 'n';
2315 	buf->Name[3] = 'Q';
2316 	return buf;
2317 }
2318 
2319 static create_durable_req_t *
2320 create_reconnect_durable_buf(struct cifs_fid *fid)
2321 {
2322 	create_durable_req_t *buf;
2323 
2324 	buf = kzalloc(sizeof(create_durable_req_t), GFP_KERNEL);
2325 	if (!buf)
2326 		return NULL;
2327 
2328 	buf->ccontext.DataOffset = cpu_to_le16(offsetof
2329 					(create_durable_req_t, Data));
2330 	buf->ccontext.DataLength = cpu_to_le32(16);
2331 	buf->ccontext.NameOffset = cpu_to_le16(offsetof
2332 				(create_durable_req_t, Name));
2333 	buf->ccontext.NameLength = cpu_to_le16(4);
2334 	buf->Data.Fid.PersistentFileId = fid->persistent_fid;
2335 	buf->Data.Fid.VolatileFileId = fid->volatile_fid;
2336 	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
2337 	buf->Name[0] = 'D';
2338 	buf->Name[1] = 'H';
2339 	buf->Name[2] = 'n';
2340 	buf->Name[3] = 'C';
2341 	return buf;
2342 }
2343 
2344 static void
2345 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
2346 {
2347 	struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc;
2348 
2349 	cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2350 		pdisk_id->DiskFileId, pdisk_id->VolumeId);
2351 	buf->IndexNumber = pdisk_id->DiskFileId;
2352 }
2353 
2354 static void
2355 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2356 		 struct create_posix_rsp *posix)
2357 {
2358 	int sid_len;
2359 	u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2360 	u8 *end = beg + le32_to_cpu(cc->DataLength);
2361 	u8 *sid;
2362 
2363 	memset(posix, 0, sizeof(*posix));
2364 
2365 	posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2366 	posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2367 	posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2368 
2369 	sid = beg + 12;
2370 	sid_len = posix_info_sid_size(sid, end);
2371 	if (sid_len < 0) {
2372 		cifs_dbg(VFS, "bad owner sid in posix create response\n");
2373 		return;
2374 	}
2375 	memcpy(&posix->owner, sid, sid_len);
2376 
2377 	sid = sid + sid_len;
2378 	sid_len = posix_info_sid_size(sid, end);
2379 	if (sid_len < 0) {
2380 		cifs_dbg(VFS, "bad group sid in posix create response\n");
2381 		return;
2382 	}
2383 	memcpy(&posix->group, sid, sid_len);
2384 
2385 	cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2386 		 posix->nlink, posix->mode, posix->reparse_tag);
2387 }
2388 
2389 int smb2_parse_contexts(struct TCP_Server_Info *server,
2390 			struct kvec *rsp_iov,
2391 			__u16 *epoch,
2392 			char *lease_key, __u8 *oplock,
2393 			struct smb2_file_all_info *buf,
2394 			struct create_posix_rsp *posix)
2395 {
2396 	struct smb2_create_rsp *rsp = rsp_iov->iov_base;
2397 	struct create_context *cc;
2398 	size_t rem, off, len;
2399 	size_t doff, dlen;
2400 	size_t noff, nlen;
2401 	char *name;
2402 	static const char smb3_create_tag_posix[] = {
2403 		0x93, 0xAD, 0x25, 0x50, 0x9C,
2404 		0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2405 		0xDE, 0x96, 0x8B, 0xCD, 0x7C
2406 	};
2407 
2408 	*oplock = 0;
2409 
2410 	off = le32_to_cpu(rsp->CreateContextsOffset);
2411 	rem = le32_to_cpu(rsp->CreateContextsLength);
2412 	if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
2413 		return -EINVAL;
2414 	cc = (struct create_context *)((u8 *)rsp + off);
2415 
2416 	/* Initialize inode number to 0 in case no valid data in qfid context */
2417 	if (buf)
2418 		buf->IndexNumber = 0;
2419 
2420 	while (rem >= sizeof(*cc)) {
2421 		doff = le16_to_cpu(cc->DataOffset);
2422 		dlen = le32_to_cpu(cc->DataLength);
2423 		if (check_add_overflow(doff, dlen, &len) || len > rem)
2424 			return -EINVAL;
2425 
2426 		noff = le16_to_cpu(cc->NameOffset);
2427 		nlen = le16_to_cpu(cc->NameLength);
2428 		if (noff + nlen > doff)
2429 			return -EINVAL;
2430 
2431 		name = (char *)cc + noff;
2432 		switch (nlen) {
2433 		case 4:
2434 			if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
2435 				*oplock = server->ops->parse_lease_buf(cc, epoch,
2436 								       lease_key);
2437 			} else if (buf &&
2438 				   !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
2439 				parse_query_id_ctxt(cc, buf);
2440 			}
2441 			break;
2442 		case 16:
2443 			if (posix && !memcmp(name, smb3_create_tag_posix, 16))
2444 				parse_posix_ctxt(cc, buf, posix);
2445 			break;
2446 		default:
2447 			cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
2448 				 __func__, nlen, dlen);
2449 			if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
2450 				cifs_dump_mem("context data: ", cc, dlen);
2451 			break;
2452 		}
2453 
2454 		off = le32_to_cpu(cc->Next);
2455 		if (!off)
2456 			break;
2457 		if (check_sub_overflow(rem, off, &rem))
2458 			return -EINVAL;
2459 		cc = (struct create_context *)((u8 *)cc + off);
2460 	}
2461 
2462 	if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2463 		*oplock = rsp->OplockLevel;
2464 
2465 	return 0;
2466 }
2467 
2468 static int
2469 add_lease_context(struct TCP_Server_Info *server,
2470 		  struct smb2_create_req *req,
2471 		  struct kvec *iov,
2472 		  unsigned int *num_iovec,
2473 		  u8 *lease_key,
2474 		  __u8 *oplock,
2475 		  u8 *parent_lease_key,
2476 		  __le32 flags)
2477 {
2478 	unsigned int num = *num_iovec;
2479 
2480 	iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock,
2481 							  parent_lease_key, flags);
2482 	if (iov[num].iov_base == NULL)
2483 		return -ENOMEM;
2484 	iov[num].iov_len = server->vals->create_lease_size;
2485 	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2486 	*num_iovec = num + 1;
2487 	return 0;
2488 }
2489 
2490 static struct create_durable_req_v2 *
2491 create_durable_v2_buf(struct cifs_open_parms *oparms)
2492 {
2493 	struct cifs_fid *pfid = oparms->fid;
2494 	struct create_durable_req_v2 *buf;
2495 
2496 	buf = kzalloc(sizeof(struct create_durable_req_v2), GFP_KERNEL);
2497 	if (!buf)
2498 		return NULL;
2499 
2500 	buf->ccontext.DataOffset = cpu_to_le16(offsetof
2501 					(struct create_durable_req_v2, dcontext));
2502 	buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2_req));
2503 	buf->ccontext.NameOffset = cpu_to_le16(offsetof
2504 				(struct create_durable_req_v2, Name));
2505 	buf->ccontext.NameLength = cpu_to_le16(4);
2506 
2507 	/*
2508 	 * NB: Handle timeout defaults to 0, which allows server to choose
2509 	 * (most servers default to 120 seconds) and most clients default to 0.
2510 	 * This can be overridden at mount ("handletimeout=") if the user wants
2511 	 * a different persistent (or resilient) handle timeout for all opens
2512 	 * on a particular SMB3 mount.
2513 	 */
2514 	buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2515 	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2516 
2517 	/* for replay, we should not overwrite the existing create guid */
2518 	if (!oparms->replay) {
2519 		generate_random_uuid(buf->dcontext.CreateGuid);
2520 		memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2521 	} else
2522 		memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
2523 
2524 	/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2525 	buf->Name[0] = 'D';
2526 	buf->Name[1] = 'H';
2527 	buf->Name[2] = '2';
2528 	buf->Name[3] = 'Q';
2529 	return buf;
2530 }
2531 
2532 static struct create_durable_handle_reconnect_v2 *
2533 create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2534 {
2535 	struct create_durable_handle_reconnect_v2 *buf;
2536 
2537 	buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2538 			GFP_KERNEL);
2539 	if (!buf)
2540 		return NULL;
2541 
2542 	buf->ccontext.DataOffset =
2543 		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2544 				     dcontext));
2545 	buf->ccontext.DataLength =
2546 		cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2547 	buf->ccontext.NameOffset =
2548 		cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2549 			    Name));
2550 	buf->ccontext.NameLength = cpu_to_le16(4);
2551 
2552 	buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2553 	buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2554 	buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2555 	memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2556 
2557 	/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2558 	buf->Name[0] = 'D';
2559 	buf->Name[1] = 'H';
2560 	buf->Name[2] = '2';
2561 	buf->Name[3] = 'C';
2562 	return buf;
2563 }
2564 
2565 static int
2566 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2567 		    struct cifs_open_parms *oparms)
2568 {
2569 	unsigned int num = *num_iovec;
2570 
2571 	iov[num].iov_base = create_durable_v2_buf(oparms);
2572 	if (iov[num].iov_base == NULL)
2573 		return -ENOMEM;
2574 	iov[num].iov_len = sizeof(struct create_durable_req_v2);
2575 	*num_iovec = num + 1;
2576 	return 0;
2577 }
2578 
2579 static int
2580 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2581 		    struct cifs_open_parms *oparms)
2582 {
2583 	unsigned int num = *num_iovec;
2584 
2585 	/* indicate that we don't need to relock the file */
2586 	oparms->reconnect = false;
2587 
2588 	iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2589 	if (iov[num].iov_base == NULL)
2590 		return -ENOMEM;
2591 	iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2592 	*num_iovec = num + 1;
2593 	return 0;
2594 }
2595 
2596 static int
2597 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2598 		    struct cifs_open_parms *oparms, bool use_persistent)
2599 {
2600 	unsigned int num = *num_iovec;
2601 
2602 	if (use_persistent) {
2603 		if (oparms->reconnect)
2604 			return add_durable_reconnect_v2_context(iov, num_iovec,
2605 								oparms);
2606 		else
2607 			return add_durable_v2_context(iov, num_iovec, oparms);
2608 	}
2609 
2610 	if (oparms->reconnect) {
2611 		iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2612 		/* indicate that we don't need to relock the file */
2613 		oparms->reconnect = false;
2614 	} else
2615 		iov[num].iov_base = create_durable_buf();
2616 	if (iov[num].iov_base == NULL)
2617 		return -ENOMEM;
2618 	iov[num].iov_len = sizeof(create_durable_req_t);
2619 	*num_iovec = num + 1;
2620 	return 0;
2621 }
2622 
2623 /* See MS-SMB2 2.2.13.2.7 */
2624 static struct crt_twarp_ctxt *
2625 create_twarp_buf(__u64 timewarp)
2626 {
2627 	struct crt_twarp_ctxt *buf;
2628 
2629 	buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2630 	if (!buf)
2631 		return NULL;
2632 
2633 	buf->ccontext.DataOffset = cpu_to_le16(offsetof
2634 					(struct crt_twarp_ctxt, Timestamp));
2635 	buf->ccontext.DataLength = cpu_to_le32(8);
2636 	buf->ccontext.NameOffset = cpu_to_le16(offsetof
2637 				(struct crt_twarp_ctxt, Name));
2638 	buf->ccontext.NameLength = cpu_to_le16(4);
2639 	/* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2640 	buf->Name[0] = 'T';
2641 	buf->Name[1] = 'W';
2642 	buf->Name[2] = 'r';
2643 	buf->Name[3] = 'p';
2644 	buf->Timestamp = cpu_to_le64(timewarp);
2645 	return buf;
2646 }
2647 
2648 /* See MS-SMB2 2.2.13.2.7 */
2649 static int
2650 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2651 {
2652 	unsigned int num = *num_iovec;
2653 
2654 	iov[num].iov_base = create_twarp_buf(timewarp);
2655 	if (iov[num].iov_base == NULL)
2656 		return -ENOMEM;
2657 	iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2658 	*num_iovec = num + 1;
2659 	return 0;
2660 }
2661 
2662 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2663 static void setup_owner_group_sids(char *buf)
2664 {
2665 	struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2666 
2667 	/* Populate the user ownership fields S-1-5-88-1 */
2668 	sids->owner.Revision = 1;
2669 	sids->owner.NumAuth = 3;
2670 	sids->owner.Authority[5] = 5;
2671 	sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2672 	sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2673 	sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2674 
2675 	/* Populate the group ownership fields S-1-5-88-2 */
2676 	sids->group.Revision = 1;
2677 	sids->group.NumAuth = 3;
2678 	sids->group.Authority[5] = 5;
2679 	sids->group.SubAuthorities[0] = cpu_to_le32(88);
2680 	sids->group.SubAuthorities[1] = cpu_to_le32(2);
2681 	sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
2682 
2683 	cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
2684 }
2685 
2686 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2687 static struct crt_sd_ctxt *
2688 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
2689 {
2690 	struct crt_sd_ctxt *buf;
2691 	__u8 *ptr, *aclptr;
2692 	unsigned int acelen, acl_size, ace_count;
2693 	unsigned int owner_offset = 0;
2694 	unsigned int group_offset = 0;
2695 	struct smb3_acl acl = {};
2696 
2697 	*len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
2698 
2699 	if (set_owner) {
2700 		/* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2701 		*len += sizeof(struct owner_group_sids);
2702 	}
2703 
2704 	buf = kzalloc(*len, GFP_KERNEL);
2705 	if (buf == NULL)
2706 		return buf;
2707 
2708 	ptr = (__u8 *)&buf[1];
2709 	if (set_owner) {
2710 		/* offset fields are from beginning of security descriptor not of create context */
2711 		owner_offset = ptr - (__u8 *)&buf->sd;
2712 		buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
2713 		group_offset = owner_offset + offsetof(struct owner_group_sids, group);
2714 		buf->sd.OffsetGroup = cpu_to_le32(group_offset);
2715 
2716 		setup_owner_group_sids(ptr);
2717 		ptr += sizeof(struct owner_group_sids);
2718 	} else {
2719 		buf->sd.OffsetOwner = 0;
2720 		buf->sd.OffsetGroup = 0;
2721 	}
2722 
2723 	buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
2724 	buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
2725 	buf->ccontext.NameLength = cpu_to_le16(4);
2726 	/* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2727 	buf->Name[0] = 'S';
2728 	buf->Name[1] = 'e';
2729 	buf->Name[2] = 'c';
2730 	buf->Name[3] = 'D';
2731 	buf->sd.Revision = 1;  /* Must be one see MS-DTYP 2.4.6 */
2732 
2733 	/*
2734 	 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2735 	 * and "DP" ie the DACL is present
2736 	 */
2737 	buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2738 
2739 	/* offset owner, group and Sbz1 and SACL are all zero */
2740 	buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2741 	/* Ship the ACL for now. we will copy it into buf later. */
2742 	aclptr = ptr;
2743 	ptr += sizeof(struct smb3_acl);
2744 
2745 	/* create one ACE to hold the mode embedded in reserved special SID */
2746 	acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
2747 	ptr += acelen;
2748 	acl_size = acelen + sizeof(struct smb3_acl);
2749 	ace_count = 1;
2750 
2751 	if (set_owner) {
2752 		/* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
2753 		acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
2754 		ptr += acelen;
2755 		acl_size += acelen;
2756 		ace_count += 1;
2757 	}
2758 
2759 	/* and one more ACE to allow access for authenticated users */
2760 	acelen = setup_authusers_ACE((struct smb_ace *)ptr);
2761 	ptr += acelen;
2762 	acl_size += acelen;
2763 	ace_count += 1;
2764 
2765 	acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2766 	acl.AclSize = cpu_to_le16(acl_size);
2767 	acl.AceCount = cpu_to_le16(ace_count);
2768 	/* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
2769 	memcpy(aclptr, &acl, sizeof(struct smb3_acl));
2770 
2771 	buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2772 	*len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
2773 
2774 	return buf;
2775 }
2776 
2777 static int
2778 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2779 {
2780 	unsigned int num = *num_iovec;
2781 	unsigned int len = 0;
2782 
2783 	iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2784 	if (iov[num].iov_base == NULL)
2785 		return -ENOMEM;
2786 	iov[num].iov_len = len;
2787 	*num_iovec = num + 1;
2788 	return 0;
2789 }
2790 
2791 static struct crt_query_id_ctxt *
2792 create_query_id_buf(void)
2793 {
2794 	struct crt_query_id_ctxt *buf;
2795 
2796 	buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2797 	if (!buf)
2798 		return NULL;
2799 
2800 	buf->ccontext.DataOffset = cpu_to_le16(0);
2801 	buf->ccontext.DataLength = cpu_to_le32(0);
2802 	buf->ccontext.NameOffset = cpu_to_le16(offsetof
2803 				(struct crt_query_id_ctxt, Name));
2804 	buf->ccontext.NameLength = cpu_to_le16(4);
2805 	/* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2806 	buf->Name[0] = 'Q';
2807 	buf->Name[1] = 'F';
2808 	buf->Name[2] = 'i';
2809 	buf->Name[3] = 'd';
2810 	return buf;
2811 }
2812 
2813 /* See MS-SMB2 2.2.13.2.9 */
2814 static int
2815 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2816 {
2817 	unsigned int num = *num_iovec;
2818 
2819 	iov[num].iov_base = create_query_id_buf();
2820 	if (iov[num].iov_base == NULL)
2821 		return -ENOMEM;
2822 	iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2823 	*num_iovec = num + 1;
2824 	return 0;
2825 }
2826 
2827 static void add_ea_context(struct cifs_open_parms *oparms,
2828 			   struct kvec *rq_iov, unsigned int *num_iovs)
2829 {
2830 	struct kvec *iov = oparms->ea_cctx;
2831 
2832 	if (iov && iov->iov_base && iov->iov_len) {
2833 		rq_iov[(*num_iovs)++] = *iov;
2834 		memset(iov, 0, sizeof(*iov));
2835 	}
2836 }
2837 
2838 static int
2839 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2840 			    const char *treename, const __le16 *path)
2841 {
2842 	int treename_len, path_len;
2843 	struct nls_table *cp;
2844 	const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2845 
2846 	/*
2847 	 * skip leading "\\"
2848 	 */
2849 	treename_len = strlen(treename);
2850 	if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2851 		return -EINVAL;
2852 
2853 	treename += 2;
2854 	treename_len -= 2;
2855 
2856 	path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2857 
2858 	/* make room for one path separator only if @path isn't empty */
2859 	*out_len = treename_len + (path[0] ? 1 : 0) + path_len;
2860 
2861 	/*
2862 	 * final path needs to be 8-byte aligned as specified in
2863 	 * MS-SMB2 2.2.13 SMB2 CREATE Request.
2864 	 */
2865 	*out_size = round_up(*out_len * sizeof(__le16), 8);
2866 	*out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
2867 	if (!*out_path)
2868 		return -ENOMEM;
2869 
2870 	cp = load_nls_default();
2871 	cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2872 
2873 	/* Do not append the separator if the path is empty */
2874 	if (path[0] != cpu_to_le16(0x0000)) {
2875 		UniStrcat((wchar_t *)*out_path, (wchar_t *)sep);
2876 		UniStrcat((wchar_t *)*out_path, (wchar_t *)path);
2877 	}
2878 
2879 	unload_nls(cp);
2880 
2881 	return 0;
2882 }
2883 
2884 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2885 			       umode_t mode, struct cifs_tcon *tcon,
2886 			       const char *full_path,
2887 			       struct cifs_sb_info *cifs_sb)
2888 {
2889 	struct smb_rqst rqst;
2890 	struct smb2_create_req *req;
2891 	struct smb2_create_rsp *rsp = NULL;
2892 	struct cifs_ses *ses = tcon->ses;
2893 	struct kvec iov[3]; /* make sure at least one for each open context */
2894 	struct kvec rsp_iov = {NULL, 0};
2895 	int resp_buftype;
2896 	int uni_path_len;
2897 	__le16 *copy_path = NULL;
2898 	int copy_size;
2899 	int rc = 0;
2900 	unsigned int n_iov = 2;
2901 	__u32 file_attributes = 0;
2902 	char *pc_buf = NULL;
2903 	int flags = 0;
2904 	unsigned int total_len;
2905 	__le16 *utf16_path = NULL;
2906 	struct TCP_Server_Info *server;
2907 	int retries = 0, cur_sleep = 0;
2908 
2909 replay_again:
2910 	/* reinitialize for possible replay */
2911 	flags = 0;
2912 	n_iov = 2;
2913 	server = cifs_pick_channel(ses);
2914 
2915 	cifs_dbg(FYI, "mkdir\n");
2916 
2917 	/* resource #1: path allocation */
2918 	utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2919 	if (!utf16_path)
2920 		return -ENOMEM;
2921 
2922 	if (!ses || !server) {
2923 		rc = smb_EIO(smb_eio_trace_null_pointers);
2924 		goto err_free_path;
2925 	}
2926 
2927 	/* resource #2: request */
2928 	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2929 				 (void **) &req, &total_len);
2930 	if (rc)
2931 		goto err_free_path;
2932 
2933 
2934 	if (smb3_encryption_required(tcon))
2935 		flags |= CIFS_TRANSFORM_REQ;
2936 
2937 	req->ImpersonationLevel = IL_IMPERSONATION;
2938 	req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2939 	/* File attributes ignored on open (used in create though) */
2940 	req->FileAttributes = cpu_to_le32(file_attributes);
2941 	req->ShareAccess = FILE_SHARE_ALL_LE;
2942 	req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2943 	req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2944 
2945 	iov[0].iov_base = (char *)req;
2946 	/* -1 since last byte is buf[0] which is sent below (path) */
2947 	iov[0].iov_len = total_len - 1;
2948 
2949 	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2950 
2951 	/* [MS-SMB2] 2.2.13 NameOffset:
2952 	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2953 	 * the SMB2 header, the file name includes a prefix that will
2954 	 * be processed during DFS name normalization as specified in
2955 	 * section 3.3.5.9. Otherwise, the file name is relative to
2956 	 * the share that is identified by the TreeId in the SMB2
2957 	 * header.
2958 	 */
2959 	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2960 		int name_len;
2961 
2962 		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2963 		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
2964 						 &name_len,
2965 						 tcon->tree_name, utf16_path);
2966 		if (rc)
2967 			goto err_free_req;
2968 
2969 		req->NameLength = cpu_to_le16(name_len * 2);
2970 		uni_path_len = copy_size;
2971 		/* free before overwriting resource */
2972 		kfree(utf16_path);
2973 		utf16_path = copy_path;
2974 	} else {
2975 		uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
2976 		/* MUST set path len (NameLength) to 0 opening root of share */
2977 		req->NameLength = cpu_to_le16(uni_path_len - 2);
2978 		if (uni_path_len % 8 != 0) {
2979 			copy_size = roundup(uni_path_len, 8);
2980 			copy_path = kzalloc(copy_size, GFP_KERNEL);
2981 			if (!copy_path) {
2982 				rc = -ENOMEM;
2983 				goto err_free_req;
2984 			}
2985 			memcpy((char *)copy_path, (const char *)utf16_path,
2986 			       uni_path_len);
2987 			uni_path_len = copy_size;
2988 			/* free before overwriting resource */
2989 			kfree(utf16_path);
2990 			utf16_path = copy_path;
2991 		}
2992 	}
2993 
2994 	iov[1].iov_len = uni_path_len;
2995 	iov[1].iov_base = utf16_path;
2996 	req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2997 
2998 	if (tcon->posix_extensions) {
2999 		/* resource #3: posix buf */
3000 		rc = add_posix_context(iov, &n_iov, mode);
3001 		if (rc)
3002 			goto err_free_req;
3003 		req->CreateContextsOffset = cpu_to_le32(
3004 			sizeof(struct smb2_create_req) +
3005 			iov[1].iov_len);
3006 		le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
3007 		pc_buf = iov[n_iov-1].iov_base;
3008 	}
3009 
3010 
3011 	memset(&rqst, 0, sizeof(struct smb_rqst));
3012 	rqst.rq_iov = iov;
3013 	rqst.rq_nvec = n_iov;
3014 
3015 	/* no need to inc num_remote_opens because we close it just below */
3016 	trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
3017 				    FILE_WRITE_ATTRIBUTES);
3018 
3019 	if (retries) {
3020 		/* Back-off before retry */
3021 		if (cur_sleep)
3022 			msleep(cur_sleep);
3023 		smb2_set_replay(server, &rqst);
3024 	}
3025 
3026 	/* resource #4: response buffer */
3027 	rc = cifs_send_recv(xid, ses, server,
3028 			    &rqst, &resp_buftype, flags, &rsp_iov);
3029 	if (rc) {
3030 		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3031 		trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
3032 					   CREATE_NOT_FILE,
3033 					   FILE_WRITE_ATTRIBUTES, rc);
3034 		goto err_free_rsp_buf;
3035 	}
3036 
3037 	/*
3038 	 * Although unlikely to be possible for rsp to be null and rc not set,
3039 	 * adding check below is slightly safer long term (and quiets Coverity
3040 	 * warning)
3041 	 */
3042 	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3043 	if (rsp == NULL) {
3044 		rc = smb_EIO(smb_eio_trace_mkdir_no_rsp);
3045 		kfree(pc_buf);
3046 		goto err_free_req;
3047 	}
3048 
3049 	trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3050 				    CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
3051 
3052 	SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
3053 
3054 	/* Eventually save off posix specific response info and timestamps */
3055 
3056 err_free_rsp_buf:
3057 	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3058 	kfree(pc_buf);
3059 err_free_req:
3060 	cifs_small_buf_release(req);
3061 err_free_path:
3062 	kfree(utf16_path);
3063 
3064 	if (is_replayable_error(rc) &&
3065 	    smb2_should_replay(tcon, &retries, &cur_sleep))
3066 		goto replay_again;
3067 
3068 	return rc;
3069 }
3070 
3071 int
3072 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3073 	       struct smb_rqst *rqst, __u8 *oplock,
3074 	       struct cifs_open_parms *oparms, __le16 *path)
3075 {
3076 	struct smb2_create_req *req;
3077 	unsigned int n_iov = 2;
3078 	__u32 file_attributes = 0;
3079 	int copy_size;
3080 	int uni_path_len;
3081 	unsigned int total_len;
3082 	struct kvec *iov = rqst->rq_iov;
3083 	__le16 *copy_path;
3084 	int rc;
3085 
3086 	rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
3087 				 (void **) &req, &total_len);
3088 	if (rc)
3089 		return rc;
3090 
3091 	iov[0].iov_base = (char *)req;
3092 	/* -1 since last byte is buf[0] which is sent below (path) */
3093 	iov[0].iov_len = total_len - 1;
3094 
3095 	if (oparms->create_options & CREATE_OPTION_READONLY)
3096 		file_attributes |= ATTR_READONLY;
3097 	if (oparms->create_options & CREATE_OPTION_SPECIAL)
3098 		file_attributes |= ATTR_SYSTEM;
3099 
3100 	req->ImpersonationLevel = IL_IMPERSONATION;
3101 	req->DesiredAccess = cpu_to_le32(oparms->desired_access);
3102 	/* File attributes ignored on open (used in create though) */
3103 	req->FileAttributes = cpu_to_le32(file_attributes);
3104 	req->ShareAccess = FILE_SHARE_ALL_LE;
3105 
3106 	req->CreateDisposition = cpu_to_le32(oparms->disposition);
3107 	req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
3108 	req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
3109 
3110 	/* [MS-SMB2] 2.2.13 NameOffset:
3111 	 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
3112 	 * the SMB2 header, the file name includes a prefix that will
3113 	 * be processed during DFS name normalization as specified in
3114 	 * section 3.3.5.9. Otherwise, the file name is relative to
3115 	 * the share that is identified by the TreeId in the SMB2
3116 	 * header.
3117 	 */
3118 	if (tcon->share_flags & SHI1005_FLAGS_DFS) {
3119 		int name_len;
3120 
3121 		req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
3122 		rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
3123 						 &name_len,
3124 						 tcon->tree_name, path);
3125 		if (rc)
3126 			return rc;
3127 		req->NameLength = cpu_to_le16(name_len * 2);
3128 		uni_path_len = copy_size;
3129 		path = copy_path;
3130 	} else {
3131 		uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
3132 		/* MUST set path len (NameLength) to 0 opening root of share */
3133 		req->NameLength = cpu_to_le16(uni_path_len - 2);
3134 		copy_size = round_up(uni_path_len, 8);
3135 		copy_path = kzalloc(copy_size, GFP_KERNEL);
3136 		if (!copy_path)
3137 			return -ENOMEM;
3138 		memcpy((char *)copy_path, (const char *)path,
3139 		       uni_path_len);
3140 		uni_path_len = copy_size;
3141 		path = copy_path;
3142 	}
3143 
3144 	iov[1].iov_len = uni_path_len;
3145 	iov[1].iov_base = path;
3146 
3147 	if ((!server->oplocks) || (tcon->no_lease))
3148 		*oplock = SMB2_OPLOCK_LEVEL_NONE;
3149 
3150 	if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3151 	    *oplock == SMB2_OPLOCK_LEVEL_NONE)
3152 		req->RequestedOplockLevel = *oplock;
3153 	else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3154 		  (oparms->create_options & CREATE_NOT_FILE))
3155 		req->RequestedOplockLevel = *oplock; /* no srv lease support */
3156 	else {
3157 		rc = add_lease_context(server, req, iov, &n_iov,
3158 				       oparms->fid->lease_key, oplock,
3159 				       oparms->fid->parent_lease_key,
3160 				       oparms->lease_flags);
3161 		if (rc)
3162 			return rc;
3163 	}
3164 
3165 	if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3166 		rc = add_durable_context(iov, &n_iov, oparms,
3167 					tcon->use_persistent);
3168 		if (rc)
3169 			return rc;
3170 	}
3171 
3172 	if (tcon->posix_extensions) {
3173 		rc = add_posix_context(iov, &n_iov, oparms->mode);
3174 		if (rc)
3175 			return rc;
3176 	}
3177 
3178 	if (tcon->snapshot_time) {
3179 		cifs_dbg(FYI, "adding snapshot context\n");
3180 		rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
3181 		if (rc)
3182 			return rc;
3183 	}
3184 
3185 	if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
3186 		bool set_mode;
3187 		bool set_owner;
3188 
3189 		if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
3190 		    (oparms->mode != ACL_NO_MODE))
3191 			set_mode = true;
3192 		else {
3193 			set_mode = false;
3194 			oparms->mode = ACL_NO_MODE;
3195 		}
3196 
3197 		if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
3198 			set_owner = true;
3199 		else
3200 			set_owner = false;
3201 
3202 		if (set_owner | set_mode) {
3203 			cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
3204 			rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
3205 			if (rc)
3206 				return rc;
3207 		}
3208 	}
3209 
3210 	add_query_id_context(iov, &n_iov);
3211 	add_ea_context(oparms, iov, &n_iov);
3212 
3213 	if (n_iov > 2) {
3214 		/*
3215 		 * We have create contexts behind iov[1] (the file
3216 		 * name), point at them from the main create request
3217 		 */
3218 		req->CreateContextsOffset = cpu_to_le32(
3219 			sizeof(struct smb2_create_req) +
3220 			iov[1].iov_len);
3221 		req->CreateContextsLength = 0;
3222 
3223 		for (unsigned int i = 2; i < (n_iov-1); i++) {
3224 			struct kvec *v = &iov[i];
3225 			size_t len = v->iov_len;
3226 			struct create_context *cctx =
3227 				(struct create_context *)v->iov_base;
3228 
3229 			cctx->Next = cpu_to_le32(len);
3230 			le32_add_cpu(&req->CreateContextsLength, len);
3231 		}
3232 		le32_add_cpu(&req->CreateContextsLength,
3233 			     iov[n_iov-1].iov_len);
3234 	}
3235 
3236 	rqst->rq_nvec = n_iov;
3237 	return 0;
3238 }
3239 
3240 /* rq_iov[0] is the request and is released by cifs_small_buf_release().
3241  * All other vectors are freed by kfree().
3242  */
3243 void
3244 SMB2_open_free(struct smb_rqst *rqst)
3245 {
3246 	int i;
3247 
3248 	if (rqst && rqst->rq_iov) {
3249 		cifs_small_buf_release(rqst->rq_iov[0].iov_base);
3250 		for (i = 1; i < rqst->rq_nvec; i++)
3251 			if (rqst->rq_iov[i].iov_base != smb2_padding)
3252 				kfree(rqst->rq_iov[i].iov_base);
3253 	}
3254 }
3255 
3256 int
3257 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3258 	  __u8 *oplock, struct smb2_file_all_info *buf,
3259 	  struct create_posix_rsp *posix,
3260 	  struct kvec *err_iov, int *buftype)
3261 {
3262 	struct smb_rqst rqst;
3263 	struct smb2_create_rsp *rsp = NULL;
3264 	struct cifs_tcon *tcon = oparms->tcon;
3265 	struct cifs_ses *ses = tcon->ses;
3266 	struct TCP_Server_Info *server;
3267 	struct kvec iov[SMB2_CREATE_IOV_SIZE];
3268 	struct kvec rsp_iov = {NULL, 0};
3269 	int resp_buftype = CIFS_NO_BUFFER;
3270 	int rc = 0;
3271 	int flags = 0;
3272 	int retries = 0, cur_sleep = 0;
3273 
3274 replay_again:
3275 	/* reinitialize for possible replay */
3276 	flags = 0;
3277 	server = cifs_pick_channel(ses);
3278 	oparms->replay = !!(retries);
3279 
3280 	cifs_dbg(FYI, "create/open\n");
3281 	if (!ses || !server)
3282 		return smb_EIO(smb_eio_trace_null_pointers);
3283 
3284 	if (smb3_encryption_required(tcon))
3285 		flags |= CIFS_TRANSFORM_REQ;
3286 
3287 	memset(&rqst, 0, sizeof(struct smb_rqst));
3288 	memset(&iov, 0, sizeof(iov));
3289 	rqst.rq_iov = iov;
3290 	rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
3291 
3292 	rc = SMB2_open_init(tcon, server,
3293 			    &rqst, oplock, oparms, path);
3294 	if (rc)
3295 		goto creat_exit;
3296 
3297 	trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
3298 		oparms->create_options, oparms->desired_access);
3299 
3300 	if (retries) {
3301 		/* Back-off before retry */
3302 		if (cur_sleep)
3303 			msleep(cur_sleep);
3304 		smb2_set_replay(server, &rqst);
3305 	}
3306 
3307 	rc = cifs_send_recv(xid, ses, server,
3308 			    &rqst, &resp_buftype, flags,
3309 			    &rsp_iov);
3310 	rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3311 
3312 	if (rc != 0) {
3313 		cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3314 		if (err_iov && rsp) {
3315 			*err_iov = rsp_iov;
3316 			*buftype = resp_buftype;
3317 			resp_buftype = CIFS_NO_BUFFER;
3318 			rsp = NULL;
3319 		}
3320 		trace_smb3_open_err(xid, tcon->tid, ses->Suid,
3321 				    oparms->create_options, oparms->desired_access, rc);
3322 		if (rc == -EREMCHG) {
3323 			pr_warn_once("server share %s deleted\n",
3324 				     tcon->tree_name);
3325 			tcon->need_reconnect = true;
3326 		}
3327 		goto creat_exit;
3328 	} else if (rsp == NULL) /* unlikely to happen, but safer to check */
3329 		goto creat_exit;
3330 	else
3331 		trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3332 				     oparms->create_options, oparms->desired_access);
3333 
3334 	atomic_inc(&tcon->num_remote_opens);
3335 	oparms->fid->persistent_fid = rsp->PersistentFileId;
3336 	oparms->fid->volatile_fid = rsp->VolatileFileId;
3337 	oparms->fid->access = oparms->desired_access;
3338 #ifdef CONFIG_CIFS_DEBUG2
3339 	oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
3340 #endif /* CIFS_DEBUG2 */
3341 
3342 	if (buf) {
3343 		buf->CreationTime = rsp->CreationTime;
3344 		buf->LastAccessTime = rsp->LastAccessTime;
3345 		buf->LastWriteTime = rsp->LastWriteTime;
3346 		buf->ChangeTime = rsp->ChangeTime;
3347 		buf->AllocationSize = rsp->AllocationSize;
3348 		buf->EndOfFile = rsp->EndofFile;
3349 		buf->Attributes = rsp->FileAttributes;
3350 		buf->NumberOfLinks = cpu_to_le32(1);
3351 		buf->DeletePending = 0; /* successful open = not delete pending */
3352 	}
3353 
3354 
3355 	rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
3356 				 oparms->fid->lease_key, oplock, buf, posix);
3357 creat_exit:
3358 	SMB2_open_free(&rqst);
3359 	free_rsp_buf(resp_buftype, rsp);
3360 
3361 	if (is_replayable_error(rc) &&
3362 	    smb2_should_replay(tcon, &retries, &cur_sleep))
3363 		goto replay_again;
3364 
3365 	return rc;
3366 }
3367 
3368 int
3369 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3370 		struct smb_rqst *rqst,
3371 		u64 persistent_fid, u64 volatile_fid, u32 opcode,
3372 		char *in_data, u32 indatalen,
3373 		__u32 max_response_size)
3374 {
3375 	struct smb2_ioctl_req *req;
3376 	struct kvec *iov = rqst->rq_iov;
3377 	unsigned int total_len;
3378 	int rc;
3379 	char *in_data_buf;
3380 
3381 	rc = smb2_ioctl_req_init(opcode, tcon, server,
3382 				 (void **) &req, &total_len);
3383 	if (rc)
3384 		return rc;
3385 
3386 	if (indatalen) {
3387 		/*
3388 		 * indatalen is usually small at a couple of bytes max, so
3389 		 * just allocate through generic pool
3390 		 */
3391 		in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
3392 		if (!in_data_buf) {
3393 			cifs_small_buf_release(req);
3394 			return -ENOMEM;
3395 		}
3396 	}
3397 
3398 	req->CtlCode = cpu_to_le32(opcode);
3399 	req->PersistentFileId = persistent_fid;
3400 	req->VolatileFileId = volatile_fid;
3401 
3402 	iov[0].iov_base = (char *)req;
3403 	/*
3404 	 * If no input data, the size of ioctl struct in
3405 	 * protocol spec still includes a 1 byte data buffer,
3406 	 * but if input data passed to ioctl, we do not
3407 	 * want to double count this, so we do not send
3408 	 * the dummy one byte of data in iovec[0] if sending
3409 	 * input data (in iovec[1]).
3410 	 */
3411 	if (indatalen) {
3412 		req->InputCount = cpu_to_le32(indatalen);
3413 		/* do not set InputOffset if no input data */
3414 		req->InputOffset =
3415 		       cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
3416 		rqst->rq_nvec = 2;
3417 		iov[0].iov_len = total_len - 1;
3418 		iov[1].iov_base = in_data_buf;
3419 		iov[1].iov_len = indatalen;
3420 	} else {
3421 		rqst->rq_nvec = 1;
3422 		iov[0].iov_len = total_len;
3423 	}
3424 
3425 	req->OutputOffset = 0;
3426 	req->OutputCount = 0; /* MBZ */
3427 
3428 	/*
3429 	 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3430 	 * We Could increase default MaxOutputResponse, but that could require
3431 	 * more credits. Windows typically sets this smaller, but for some
3432 	 * ioctls it may be useful to allow server to send more. No point
3433 	 * limiting what the server can send as long as fits in one credit
3434 	 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3435 	 * to increase this limit up in the future.
3436 	 * Note that for snapshot queries that servers like Azure expect that
3437 	 * the first query be minimal size (and just used to get the number/size
3438 	 * of previous versions) so response size must be specified as EXACTLY
3439 	 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3440 	 * of eight bytes.  Currently that is the only case where we set max
3441 	 * response size smaller.
3442 	 */
3443 	req->MaxOutputResponse = cpu_to_le32(max_response_size);
3444 	req->hdr.CreditCharge =
3445 		cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3446 					 SMB2_MAX_BUFFER_SIZE));
3447 	/* always an FSCTL (for now) */
3448 	req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3449 
3450 	/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3451 	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
3452 		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
3453 
3454 	return 0;
3455 }
3456 
3457 void
3458 SMB2_ioctl_free(struct smb_rqst *rqst)
3459 {
3460 	int i;
3461 
3462 	if (rqst && rqst->rq_iov) {
3463 		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3464 		for (i = 1; i < rqst->rq_nvec; i++)
3465 			if (rqst->rq_iov[i].iov_base != smb2_padding)
3466 				kfree(rqst->rq_iov[i].iov_base);
3467 	}
3468 }
3469 
3470 
3471 /*
3472  *	SMB2 IOCTL is used for both IOCTLs and FSCTLs
3473  */
3474 int
3475 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3476 	   u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
3477 	   u32 max_out_data_len, char **out_data,
3478 	   u32 *plen /* returned data len */)
3479 {
3480 	struct smb_rqst rqst;
3481 	struct smb2_ioctl_rsp *rsp = NULL;
3482 	struct cifs_ses *ses;
3483 	struct TCP_Server_Info *server;
3484 	struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3485 	struct kvec rsp_iov = {NULL, 0};
3486 	int resp_buftype = CIFS_NO_BUFFER;
3487 	int rc = 0;
3488 	int flags = 0;
3489 	int retries = 0, cur_sleep = 0;
3490 
3491 	if (!tcon)
3492 		return smb_EIO(smb_eio_trace_null_pointers);
3493 
3494 	ses = tcon->ses;
3495 	if (!ses)
3496 		return smb_EIO(smb_eio_trace_null_pointers);
3497 
3498 replay_again:
3499 	/* reinitialize for possible replay */
3500 	flags = 0;
3501 	server = cifs_pick_channel(ses);
3502 
3503 	if (!server)
3504 		return smb_EIO(smb_eio_trace_null_pointers);
3505 
3506 	cifs_dbg(FYI, "SMB2 IOCTL\n");
3507 
3508 	if (out_data != NULL)
3509 		*out_data = NULL;
3510 
3511 	/* zero out returned data len, in case of error */
3512 	if (plen)
3513 		*plen = 0;
3514 
3515 	if (smb3_encryption_required(tcon))
3516 		flags |= CIFS_TRANSFORM_REQ;
3517 
3518 	memset(&rqst, 0, sizeof(struct smb_rqst));
3519 	memset(&iov, 0, sizeof(iov));
3520 	rqst.rq_iov = iov;
3521 	rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
3522 
3523 	rc = SMB2_ioctl_init(tcon, server,
3524 			     &rqst, persistent_fid, volatile_fid, opcode,
3525 			     in_data, indatalen, max_out_data_len);
3526 	if (rc)
3527 		goto ioctl_exit;
3528 
3529 	if (retries) {
3530 		/* Back-off before retry */
3531 		if (cur_sleep)
3532 			msleep(cur_sleep);
3533 		smb2_set_replay(server, &rqst);
3534 	}
3535 
3536 	rc = cifs_send_recv(xid, ses, server,
3537 			    &rqst, &resp_buftype, flags,
3538 			    &rsp_iov);
3539 	rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
3540 
3541 	if (rc != 0)
3542 		trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3543 				ses->Suid, 0, opcode, rc);
3544 
3545 	if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
3546 		cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3547 		goto ioctl_exit;
3548 	} else if (rc == -EINVAL) {
3549 		if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3550 		    (opcode != FSCTL_SRV_COPYCHUNK)) {
3551 			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3552 			goto ioctl_exit;
3553 		}
3554 	} else if (rc == -E2BIG) {
3555 		if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3556 			cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3557 			goto ioctl_exit;
3558 		}
3559 	}
3560 
3561 	/* check if caller wants to look at return data or just return rc */
3562 	if ((plen == NULL) || (out_data == NULL))
3563 		goto ioctl_exit;
3564 
3565 	/*
3566 	 * Although unlikely to be possible for rsp to be null and rc not set,
3567 	 * adding check below is slightly safer long term (and quiets Coverity
3568 	 * warning)
3569 	 */
3570 	if (rsp == NULL) {
3571 		rc = smb_EIO(smb_eio_trace_ioctl_no_rsp);
3572 		goto ioctl_exit;
3573 	}
3574 
3575 	*plen = le32_to_cpu(rsp->OutputCount);
3576 
3577 	/* We check for obvious errors in the output buffer length and offset */
3578 	if (*plen == 0)
3579 		goto ioctl_exit; /* server returned no data */
3580 	else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
3581 		cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
3582 		rc = smb_EIO2(smb_eio_trace_ioctl_data_len, *plen, rsp_iov.iov_len);
3583 		*plen = 0;
3584 		goto ioctl_exit;
3585 	}
3586 
3587 	u32 outoff = le32_to_cpu(rsp->OutputOffset);
3588 
3589 	if (rsp_iov.iov_len - *plen < outoff) {
3590 		cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n",
3591 			      *plen, outoff);
3592 		rc = smb_EIO2(smb_eio_trace_ioctl_out_off, rsp_iov.iov_len - *plen, outoff);
3593 		*plen = 0;
3594 		goto ioctl_exit;
3595 	}
3596 
3597 	*out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3598 			    *plen, GFP_KERNEL);
3599 	if (*out_data == NULL) {
3600 		rc = -ENOMEM;
3601 		goto ioctl_exit;
3602 	}
3603 
3604 ioctl_exit:
3605 	SMB2_ioctl_free(&rqst);
3606 	free_rsp_buf(resp_buftype, rsp);
3607 
3608 	if (is_replayable_error(rc) &&
3609 	    smb2_should_replay(tcon, &retries, &cur_sleep))
3610 		goto replay_again;
3611 
3612 	return rc;
3613 }
3614 
3615 /*
3616  *   Individual callers to ioctl worker function follow
3617  */
3618 
3619 int
3620 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3621 		     u64 persistent_fid, u64 volatile_fid)
3622 {
3623 	int rc;
3624 	struct  compress_ioctl fsctl_input;
3625 	char *ret_data = NULL;
3626 
3627 	fsctl_input.CompressionState =
3628 			cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
3629 
3630 	rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3631 			FSCTL_SET_COMPRESSION,
3632 			(char *)&fsctl_input /* data input */,
3633 			2 /* in data len */, CIFSMaxBufSize /* max out data */,
3634 			&ret_data /* out data */, NULL);
3635 
3636 	cifs_dbg(FYI, "set compression rc %d\n", rc);
3637 
3638 	return rc;
3639 }
3640 
3641 int
3642 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3643 		struct smb_rqst *rqst,
3644 		u64 persistent_fid, u64 volatile_fid, bool query_attrs)
3645 {
3646 	struct smb2_close_req *req;
3647 	struct kvec *iov = rqst->rq_iov;
3648 	unsigned int total_len;
3649 	int rc;
3650 
3651 	rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3652 				 (void **) &req, &total_len);
3653 	if (rc)
3654 		return rc;
3655 
3656 	req->PersistentFileId = persistent_fid;
3657 	req->VolatileFileId = volatile_fid;
3658 	if (query_attrs)
3659 		req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3660 	else
3661 		req->Flags = 0;
3662 	iov[0].iov_base = (char *)req;
3663 	iov[0].iov_len = total_len;
3664 
3665 	return 0;
3666 }
3667 
3668 void
3669 SMB2_close_free(struct smb_rqst *rqst)
3670 {
3671 	if (rqst && rqst->rq_iov)
3672 		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3673 }
3674 
3675 int
3676 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3677 	     u64 persistent_fid, u64 volatile_fid,
3678 	     struct smb2_file_network_open_info *pbuf)
3679 {
3680 	struct smb_rqst rqst;
3681 	struct smb2_close_rsp *rsp = NULL;
3682 	struct cifs_ses *ses = tcon->ses;
3683 	struct TCP_Server_Info *server;
3684 	struct kvec iov[1];
3685 	struct kvec rsp_iov;
3686 	int resp_buftype = CIFS_NO_BUFFER;
3687 	int rc = 0;
3688 	int flags = 0;
3689 	bool query_attrs = false;
3690 	int retries = 0, cur_sleep = 0;
3691 
3692 replay_again:
3693 	/* reinitialize for possible replay */
3694 	flags = 0;
3695 	query_attrs = false;
3696 	server = cifs_pick_channel(ses);
3697 
3698 	cifs_dbg(FYI, "Close\n");
3699 
3700 	if (!ses || !server)
3701 		return smb_EIO(smb_eio_trace_null_pointers);
3702 
3703 	if (smb3_encryption_required(tcon))
3704 		flags |= CIFS_TRANSFORM_REQ;
3705 
3706 	memset(&rqst, 0, sizeof(struct smb_rqst));
3707 	memset(&iov, 0, sizeof(iov));
3708 	rqst.rq_iov = iov;
3709 	rqst.rq_nvec = 1;
3710 
3711 	/* check if need to ask server to return timestamps in close response */
3712 	if (pbuf)
3713 		query_attrs = true;
3714 
3715 	trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3716 	rc = SMB2_close_init(tcon, server,
3717 			     &rqst, persistent_fid, volatile_fid,
3718 			     query_attrs);
3719 	if (rc)
3720 		goto close_exit;
3721 
3722 	if (retries) {
3723 		/* Back-off before retry */
3724 		if (cur_sleep)
3725 			msleep(cur_sleep);
3726 		smb2_set_replay(server, &rqst);
3727 	}
3728 
3729 	rc = cifs_send_recv(xid, ses, server,
3730 			    &rqst, &resp_buftype, flags, &rsp_iov);
3731 	rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
3732 
3733 	if (rc != 0) {
3734 		cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
3735 		trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3736 				     rc);
3737 		goto close_exit;
3738 	} else {
3739 		trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3740 				      ses->Suid);
3741 		if (pbuf)
3742 			memcpy(&pbuf->network_open_info,
3743 			       &rsp->network_open_info,
3744 			       sizeof(pbuf->network_open_info));
3745 		atomic_dec(&tcon->num_remote_opens);
3746 	}
3747 
3748 close_exit:
3749 	SMB2_close_free(&rqst);
3750 	free_rsp_buf(resp_buftype, rsp);
3751 
3752 	/* retry close in a worker thread if this one is interrupted */
3753 	if (is_interrupt_error(rc)) {
3754 		int tmp_rc;
3755 
3756 		tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3757 						     volatile_fid);
3758 		if (tmp_rc)
3759 			cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3760 				 persistent_fid, tmp_rc);
3761 	}
3762 
3763 	if (is_replayable_error(rc) &&
3764 	    smb2_should_replay(tcon, &retries, &cur_sleep))
3765 		goto replay_again;
3766 
3767 	return rc;
3768 }
3769 
3770 int
3771 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3772 		u64 persistent_fid, u64 volatile_fid)
3773 {
3774 	return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3775 }
3776 
3777 int
3778 smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3779 		  struct kvec *iov, unsigned int min_buf_size)
3780 {
3781 	unsigned int smb_len = iov->iov_len;
3782 	char *end_of_smb = smb_len + (char *)iov->iov_base;
3783 	char *begin_of_buf = offset + (char *)iov->iov_base;
3784 	char *end_of_buf = begin_of_buf + buffer_length;
3785 
3786 
3787 	if (buffer_length < min_buf_size) {
3788 		cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3789 			 buffer_length, min_buf_size);
3790 		return -EINVAL;
3791 	}
3792 
3793 	/* check if beyond RFC1001 maximum length */
3794 	if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
3795 		cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3796 			 buffer_length, smb_len);
3797 		return -EINVAL;
3798 	}
3799 
3800 	if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
3801 		cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
3802 		return -EINVAL;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 /*
3809  * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3810  * Caller must free buffer.
3811  */
3812 int
3813 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3814 			   struct kvec *iov, unsigned int minbufsize,
3815 			   char *data)
3816 {
3817 	char *begin_of_buf = offset + (char *)iov->iov_base;
3818 	int rc;
3819 
3820 	if (!data)
3821 		return -EINVAL;
3822 
3823 	rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3824 	if (rc)
3825 		return rc;
3826 
3827 	memcpy(data, begin_of_buf, minbufsize);
3828 
3829 	return 0;
3830 }
3831 
3832 int
3833 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3834 		     struct smb_rqst *rqst,
3835 		     u64 persistent_fid, u64 volatile_fid,
3836 		     u8 info_class, u8 info_type, u32 additional_info,
3837 		     size_t output_len, size_t input_len, void *input)
3838 {
3839 	struct smb2_query_info_req *req;
3840 	struct kvec *iov = rqst->rq_iov;
3841 	unsigned int total_len;
3842 	size_t len;
3843 	int rc;
3844 
3845 	if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
3846 		     len > CIFSMaxBufSize))
3847 		return -EINVAL;
3848 
3849 	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3850 				 (void **) &req, &total_len);
3851 	if (rc)
3852 		return rc;
3853 
3854 	req->InfoType = info_type;
3855 	req->FileInfoClass = info_class;
3856 	req->PersistentFileId = persistent_fid;
3857 	req->VolatileFileId = volatile_fid;
3858 	req->AdditionalInformation = cpu_to_le32(additional_info);
3859 
3860 	req->OutputBufferLength = cpu_to_le32(output_len);
3861 	if (input_len) {
3862 		req->InputBufferLength = cpu_to_le32(input_len);
3863 		/* total_len for smb query request never close to le16 max */
3864 		req->InputBufferOffset = cpu_to_le16(total_len - 1);
3865 		memcpy(req->Buffer, input, input_len);
3866 	}
3867 
3868 	iov[0].iov_base = (char *)req;
3869 	/* 1 for Buffer */
3870 	iov[0].iov_len = len;
3871 	return 0;
3872 }
3873 
3874 void
3875 SMB2_query_info_free(struct smb_rqst *rqst)
3876 {
3877 	if (rqst && rqst->rq_iov)
3878 		cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
3879 }
3880 
3881 static int
3882 query_info(const unsigned int xid, struct cifs_tcon *tcon,
3883 	   u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3884 	   u32 additional_info, size_t output_len, size_t min_len, void **data,
3885 		u32 *dlen)
3886 {
3887 	struct smb_rqst rqst;
3888 	struct smb2_query_info_rsp *rsp = NULL;
3889 	struct kvec iov[1];
3890 	struct kvec rsp_iov;
3891 	int rc = 0;
3892 	int resp_buftype = CIFS_NO_BUFFER;
3893 	struct cifs_ses *ses = tcon->ses;
3894 	struct TCP_Server_Info *server;
3895 	int flags = 0;
3896 	bool allocated = false;
3897 	int retries = 0, cur_sleep = 0;
3898 
3899 	cifs_dbg(FYI, "Query Info\n");
3900 
3901 	if (!ses)
3902 		return smb_EIO(smb_eio_trace_null_pointers);
3903 
3904 replay_again:
3905 	/* reinitialize for possible replay */
3906 	flags = 0;
3907 	allocated = false;
3908 	server = cifs_pick_channel(ses);
3909 
3910 	if (!server)
3911 		return smb_EIO(smb_eio_trace_null_pointers);
3912 
3913 	if (smb3_encryption_required(tcon))
3914 		flags |= CIFS_TRANSFORM_REQ;
3915 
3916 	memset(&rqst, 0, sizeof(struct smb_rqst));
3917 	memset(&iov, 0, sizeof(iov));
3918 	rqst.rq_iov = iov;
3919 	rqst.rq_nvec = 1;
3920 
3921 	rc = SMB2_query_info_init(tcon, server,
3922 				  &rqst, persistent_fid, volatile_fid,
3923 				  info_class, info_type, additional_info,
3924 				  output_len, 0, NULL);
3925 	if (rc)
3926 		goto qinf_exit;
3927 
3928 	trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3929 				    ses->Suid, info_class, (__u32)info_type);
3930 
3931 	if (retries) {
3932 		/* Back-off before retry */
3933 		if (cur_sleep)
3934 			msleep(cur_sleep);
3935 		smb2_set_replay(server, &rqst);
3936 	}
3937 
3938 	rc = cifs_send_recv(xid, ses, server,
3939 			    &rqst, &resp_buftype, flags, &rsp_iov);
3940 	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3941 
3942 	if (rc) {
3943 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
3944 		trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3945 				ses->Suid, info_class, (__u32)info_type, rc);
3946 		goto qinf_exit;
3947 	}
3948 
3949 	trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3950 				ses->Suid, info_class, (__u32)info_type);
3951 
3952 	if (dlen) {
3953 		*dlen = le32_to_cpu(rsp->OutputBufferLength);
3954 		if (!*data) {
3955 			*data = kmalloc(*dlen, GFP_KERNEL);
3956 			if (!*data) {
3957 				cifs_tcon_dbg(VFS,
3958 					"Error %d allocating memory for acl\n",
3959 					rc);
3960 				*dlen = 0;
3961 				rc = -ENOMEM;
3962 				goto qinf_exit;
3963 			}
3964 			allocated = true;
3965 		}
3966 	}
3967 
3968 	rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3969 					le32_to_cpu(rsp->OutputBufferLength),
3970 					&rsp_iov, dlen ? *dlen : min_len, *data);
3971 	if (rc && allocated) {
3972 		kfree(*data);
3973 		*data = NULL;
3974 		*dlen = 0;
3975 	}
3976 
3977 qinf_exit:
3978 	SMB2_query_info_free(&rqst);
3979 	free_rsp_buf(resp_buftype, rsp);
3980 
3981 	if (is_replayable_error(rc) &&
3982 	    smb2_should_replay(tcon, &retries, &cur_sleep))
3983 		goto replay_again;
3984 
3985 	return rc;
3986 }
3987 
3988 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3989 	u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
3990 {
3991 	return query_info(xid, tcon, persistent_fid, volatile_fid,
3992 			  FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
3993 			  sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3994 			  sizeof(struct smb2_file_all_info), (void **)&data,
3995 			  NULL);
3996 }
3997 
3998 #if 0
3999 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
4000 int
4001 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
4002 			u64 persistent_fid, u64 volatile_fid,
4003 			struct smb311_posix_qinfo *data, u32 *plen)
4004 {
4005 	size_t output_len = sizeof(struct smb311_posix_qinfo *) +
4006 			(sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
4007 	*plen = 0;
4008 
4009 	return query_info(xid, tcon, persistent_fid, volatile_fid,
4010 			  SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
4011 			  output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
4012 	/* Note caller must free "data" (passed in above). It may be allocated in query_info call */
4013 }
4014 #endif
4015 
4016 int
4017 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
4018 	       u64 persistent_fid, u64 volatile_fid,
4019 	       void **data, u32 *plen, u32 extra_info)
4020 {
4021 	*plen = 0;
4022 
4023 	return query_info(xid, tcon, persistent_fid, volatile_fid,
4024 			  0, SMB2_O_INFO_SECURITY, extra_info,
4025 			  SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
4026 }
4027 
4028 int
4029 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
4030 		 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
4031 {
4032 	return query_info(xid, tcon, persistent_fid, volatile_fid,
4033 			  FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
4034 			  sizeof(struct smb2_file_internal_info),
4035 			  sizeof(struct smb2_file_internal_info),
4036 			  (void **)&uniqueid, NULL);
4037 }
4038 
4039 /*
4040  * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
4041  * See MS-SMB2 2.2.35 and 2.2.36
4042  */
4043 
4044 static int
4045 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
4046 		 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4047 		 u64 persistent_fid, u64 volatile_fid,
4048 		 u32 completion_filter, bool watch_tree)
4049 {
4050 	struct smb2_change_notify_req *req;
4051 	struct kvec *iov = rqst->rq_iov;
4052 	unsigned int total_len;
4053 	int rc;
4054 
4055 	rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
4056 				 (void **) &req, &total_len);
4057 	if (rc)
4058 		return rc;
4059 
4060 	req->PersistentFileId = persistent_fid;
4061 	req->VolatileFileId = volatile_fid;
4062 	/* See note 354 of MS-SMB2, 64K max */
4063 	req->OutputBufferLength =
4064 		cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
4065 	req->CompletionFilter = cpu_to_le32(completion_filter);
4066 	if (watch_tree)
4067 		req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
4068 	else
4069 		req->Flags = 0;
4070 
4071 	iov[0].iov_base = (char *)req;
4072 	iov[0].iov_len = total_len;
4073 
4074 	return 0;
4075 }
4076 
4077 int
4078 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
4079 		u64 persistent_fid, u64 volatile_fid, bool watch_tree,
4080 		u32 completion_filter, u32 max_out_data_len, char **out_data,
4081 		u32 *plen /* returned data len */)
4082 {
4083 	struct cifs_ses *ses = tcon->ses;
4084 	struct TCP_Server_Info *server;
4085 	struct smb_rqst rqst;
4086 	struct smb2_change_notify_rsp *smb_rsp;
4087 	struct kvec iov[1];
4088 	struct kvec rsp_iov = {NULL, 0};
4089 	int resp_buftype = CIFS_NO_BUFFER;
4090 	int flags = 0;
4091 	int rc = 0;
4092 	int retries = 0, cur_sleep = 0;
4093 
4094 replay_again:
4095 	/* reinitialize for possible replay */
4096 	flags = 0;
4097 	server = cifs_pick_channel(ses);
4098 
4099 	cifs_dbg(FYI, "change notify\n");
4100 	if (!ses || !server)
4101 		return smb_EIO(smb_eio_trace_null_pointers);
4102 
4103 	if (smb3_encryption_required(tcon))
4104 		flags |= CIFS_TRANSFORM_REQ;
4105 
4106 	memset(&rqst, 0, sizeof(struct smb_rqst));
4107 	memset(&iov, 0, sizeof(iov));
4108 	if (plen)
4109 		*plen = 0;
4110 
4111 	rqst.rq_iov = iov;
4112 	rqst.rq_nvec = 1;
4113 
4114 	rc = SMB2_notify_init(xid, &rqst, tcon, server,
4115 			      persistent_fid, volatile_fid,
4116 			      completion_filter, watch_tree);
4117 	if (rc)
4118 		goto cnotify_exit;
4119 
4120 	trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
4121 				(u8)watch_tree, completion_filter);
4122 
4123 	if (retries) {
4124 		/* Back-off before retry */
4125 		if (cur_sleep)
4126 			msleep(cur_sleep);
4127 		smb2_set_replay(server, &rqst);
4128 	}
4129 
4130 	rc = cifs_send_recv(xid, ses, server,
4131 			    &rqst, &resp_buftype, flags, &rsp_iov);
4132 
4133 	if (rc != 0) {
4134 		cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
4135 		trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
4136 				(u8)watch_tree, completion_filter, rc);
4137 	} else {
4138 		trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
4139 			ses->Suid, (u8)watch_tree, completion_filter);
4140 		/* validate that notify information is plausible */
4141 		if ((rsp_iov.iov_base == NULL) ||
4142 		    (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1))
4143 			goto cnotify_exit;
4144 
4145 		smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
4146 
4147 		rc = smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
4148 				le32_to_cpu(smb_rsp->OutputBufferLength),
4149 				&rsp_iov,
4150 				sizeof(struct file_notify_information));
4151 		if (rc)
4152 			goto cnotify_exit;
4153 
4154 		*out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
4155 				le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
4156 		if (*out_data == NULL) {
4157 			rc = -ENOMEM;
4158 			goto cnotify_exit;
4159 		} else if (plen)
4160 			*plen = le32_to_cpu(smb_rsp->OutputBufferLength);
4161 	}
4162 
4163  cnotify_exit:
4164 	if (rqst.rq_iov)
4165 		cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
4166 	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4167 
4168 	if (is_replayable_error(rc) &&
4169 	    smb2_should_replay(tcon, &retries, &cur_sleep))
4170 		goto replay_again;
4171 
4172 	return rc;
4173 }
4174 
4175 
4176 
4177 /*
4178  * This is a no-op for now. We're not really interested in the reply, but
4179  * rather in the fact that the server sent one and that server->lstrp
4180  * gets updated.
4181  *
4182  * FIXME: maybe we should consider checking that the reply matches request?
4183  */
4184 static void
4185 smb2_echo_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4186 {
4187 	struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
4188 	struct cifs_credits credits = { .value = 0, .instance = 0 };
4189 
4190 	if (mid->mid_state == MID_RESPONSE_RECEIVED
4191 	    || mid->mid_state == MID_RESPONSE_MALFORMED) {
4192 		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4193 		credits.instance = server->reconnect_instance;
4194 	}
4195 
4196 	release_mid(server, mid);
4197 	add_credits(server, &credits, CIFS_ECHO_OP);
4198 }
4199 
4200 static void cifs_renegotiate_iosize(struct TCP_Server_Info *server,
4201 				    struct cifs_tcon *tcon)
4202 {
4203 	struct cifs_sb_info *cifs_sb;
4204 
4205 	if (server == NULL || tcon == NULL)
4206 		return;
4207 
4208 	spin_lock(&tcon->sb_list_lock);
4209 	list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link)
4210 		cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
4211 	spin_unlock(&tcon->sb_list_lock);
4212 }
4213 
4214 void smb2_reconnect_server(struct work_struct *work)
4215 {
4216 	struct TCP_Server_Info *server = container_of(work,
4217 					struct TCP_Server_Info, reconnect.work);
4218 	struct TCP_Server_Info *pserver;
4219 	struct cifs_ses *ses, *ses2;
4220 	struct cifs_tcon *tcon, *tcon2;
4221 	struct list_head tmp_list, tmp_ses_list;
4222 	bool ses_exist = false;
4223 	bool tcon_selected = false;
4224 	int rc;
4225 	bool resched = false;
4226 
4227 	/* first check if ref count has reached 0, if not inc ref count */
4228 	spin_lock(&cifs_tcp_ses_lock);
4229 	if (!server->srv_count) {
4230 		spin_unlock(&cifs_tcp_ses_lock);
4231 		return;
4232 	}
4233 	server->srv_count++;
4234 	spin_unlock(&cifs_tcp_ses_lock);
4235 
4236 	/* If server is a channel, select the primary channel */
4237 	pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4238 
4239 	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
4240 	mutex_lock(&pserver->reconnect_mutex);
4241 
4242 	/* if the server is marked for termination, drop the ref count here */
4243 	if (server->terminate) {
4244 		cifs_put_tcp_session(server, true);
4245 		mutex_unlock(&pserver->reconnect_mutex);
4246 		return;
4247 	}
4248 
4249 	INIT_LIST_HEAD(&tmp_list);
4250 	INIT_LIST_HEAD(&tmp_ses_list);
4251 	cifs_dbg(FYI, "Reconnecting tcons and channels\n");
4252 
4253 	spin_lock(&cifs_tcp_ses_lock);
4254 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4255 		spin_lock(&ses->ses_lock);
4256 		if (ses->ses_status == SES_EXITING) {
4257 			spin_unlock(&ses->ses_lock);
4258 			continue;
4259 		}
4260 		spin_unlock(&ses->ses_lock);
4261 
4262 		tcon_selected = false;
4263 
4264 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
4265 			if (tcon->need_reconnect || tcon->need_reopen_files) {
4266 				spin_lock(&tcon->tc_lock);
4267 				tcon->tc_count++;
4268 				spin_unlock(&tcon->tc_lock);
4269 				trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
4270 						    netfs_trace_tcon_ref_get_reconnect_server);
4271 				list_add_tail(&tcon->rlist, &tmp_list);
4272 				tcon_selected = true;
4273 			}
4274 		}
4275 		/*
4276 		 * IPC has the same lifetime as its session and uses its
4277 		 * refcount.
4278 		 */
4279 		if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
4280 			list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
4281 			tcon_selected = true;
4282 			cifs_smb_ses_inc_refcount(ses);
4283 		}
4284 		/*
4285 		 * handle the case where channel needs to reconnect
4286 		 * binding session, but tcon is healthy (some other channel
4287 		 * is active)
4288 		 */
4289 		spin_lock(&ses->chan_lock);
4290 		if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
4291 			list_add_tail(&ses->rlist, &tmp_ses_list);
4292 			ses_exist = true;
4293 			cifs_smb_ses_inc_refcount(ses);
4294 		}
4295 		spin_unlock(&ses->chan_lock);
4296 	}
4297 	spin_unlock(&cifs_tcp_ses_lock);
4298 
4299 	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
4300 		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4301 		if (!rc) {
4302 			cifs_renegotiate_iosize(server, tcon);
4303 			cifs_reopen_persistent_handles(tcon);
4304 		} else
4305 			resched = true;
4306 		list_del_init(&tcon->rlist);
4307 		if (tcon->ipc)
4308 			cifs_put_smb_ses(tcon->ses);
4309 		else
4310 			cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
4311 	}
4312 
4313 	if (!ses_exist)
4314 		goto done;
4315 
4316 	/* allocate a dummy tcon struct used for reconnect */
4317 	tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
4318 	if (!tcon) {
4319 		resched = true;
4320 		list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4321 			list_del_init(&ses->rlist);
4322 			cifs_put_smb_ses(ses);
4323 		}
4324 		goto done;
4325 	}
4326 	tcon->status = TID_GOOD;
4327 	tcon->dummy = true;
4328 
4329 	/* now reconnect sessions for necessary channels */
4330 	list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4331 		tcon->ses = ses;
4332 		rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4333 		if (rc)
4334 			resched = true;
4335 		list_del_init(&ses->rlist);
4336 		cifs_put_smb_ses(ses);
4337 	}
4338 	tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
4339 
4340 done:
4341 	cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
4342 	if (resched)
4343 		cifs_requeue_server_reconn(server);
4344 	mutex_unlock(&pserver->reconnect_mutex);
4345 
4346 	/* now we can safely release srv struct */
4347 	cifs_put_tcp_session(server, true);
4348 }
4349 
4350 int
4351 SMB2_echo(struct TCP_Server_Info *server)
4352 {
4353 	struct smb2_echo_req *req;
4354 	int rc = 0;
4355 	struct kvec iov[1];
4356 	struct smb_rqst rqst = { .rq_iov = iov,
4357 				 .rq_nvec = 1 };
4358 	unsigned int total_len;
4359 
4360 	cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
4361 
4362 	spin_lock(&server->srv_lock);
4363 	if (server->ops->need_neg &&
4364 	    server->ops->need_neg(server)) {
4365 		spin_unlock(&server->srv_lock);
4366 		/* No need to send echo on newly established connections */
4367 		cifs_queue_server_reconn(server);
4368 		return rc;
4369 	}
4370 	spin_unlock(&server->srv_lock);
4371 
4372 	rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
4373 				 (void **)&req, &total_len);
4374 	if (rc)
4375 		return rc;
4376 
4377 	req->hdr.CreditRequest = cpu_to_le16(1);
4378 
4379 	iov[0].iov_len = total_len;
4380 	iov[0].iov_base = (char *)req;
4381 
4382 	rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
4383 			     server, CIFS_ECHO_OP, NULL);
4384 	if (rc)
4385 		cifs_dbg(FYI, "Echo request failed: %d\n", rc);
4386 
4387 	cifs_small_buf_release(req);
4388 	return rc;
4389 }
4390 
4391 void
4392 SMB2_flush_free(struct smb_rqst *rqst)
4393 {
4394 	if (rqst && rqst->rq_iov)
4395 		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4396 }
4397 
4398 int
4399 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
4400 		struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4401 		u64 persistent_fid, u64 volatile_fid)
4402 {
4403 	struct smb2_flush_req *req;
4404 	struct kvec *iov = rqst->rq_iov;
4405 	unsigned int total_len;
4406 	int rc;
4407 
4408 	rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
4409 				 (void **) &req, &total_len);
4410 	if (rc)
4411 		return rc;
4412 
4413 	req->PersistentFileId = persistent_fid;
4414 	req->VolatileFileId = volatile_fid;
4415 
4416 	iov[0].iov_base = (char *)req;
4417 	iov[0].iov_len = total_len;
4418 
4419 	return 0;
4420 }
4421 
4422 int
4423 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
4424 	   u64 volatile_fid)
4425 {
4426 	struct cifs_ses *ses = tcon->ses;
4427 	struct smb_rqst rqst;
4428 	struct kvec iov[1];
4429 	struct kvec rsp_iov = {NULL, 0};
4430 	struct TCP_Server_Info *server;
4431 	int resp_buftype = CIFS_NO_BUFFER;
4432 	int flags = 0;
4433 	int rc = 0;
4434 	int retries = 0, cur_sleep = 0;
4435 
4436 replay_again:
4437 	/* reinitialize for possible replay */
4438 	flags = 0;
4439 	server = cifs_pick_channel(ses);
4440 
4441 	cifs_dbg(FYI, "flush\n");
4442 	if (!ses || !(ses->server))
4443 		return smb_EIO(smb_eio_trace_null_pointers);
4444 
4445 	if (smb3_encryption_required(tcon))
4446 		flags |= CIFS_TRANSFORM_REQ;
4447 
4448 	memset(&rqst, 0, sizeof(struct smb_rqst));
4449 	memset(&iov, 0, sizeof(iov));
4450 	rqst.rq_iov = iov;
4451 	rqst.rq_nvec = 1;
4452 
4453 	rc = SMB2_flush_init(xid, &rqst, tcon, server,
4454 			     persistent_fid, volatile_fid);
4455 	if (rc)
4456 		goto flush_exit;
4457 
4458 	trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
4459 
4460 	if (retries) {
4461 		/* Back-off before retry */
4462 		if (cur_sleep)
4463 			msleep(cur_sleep);
4464 		smb2_set_replay(server, &rqst);
4465 	}
4466 
4467 	rc = cifs_send_recv(xid, ses, server,
4468 			    &rqst, &resp_buftype, flags, &rsp_iov);
4469 
4470 	if (rc != 0) {
4471 		cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
4472 		trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
4473 				     rc);
4474 	} else
4475 		trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
4476 				      ses->Suid);
4477 
4478  flush_exit:
4479 	SMB2_flush_free(&rqst);
4480 	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4481 
4482 	if (is_replayable_error(rc) &&
4483 	    smb2_should_replay(tcon, &retries, &cur_sleep))
4484 		goto replay_again;
4485 
4486 	return rc;
4487 }
4488 
4489 #ifdef CONFIG_CIFS_SMB_DIRECT
4490 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
4491 {
4492 	struct TCP_Server_Info *server = io_parms->server;
4493 	struct cifs_tcon *tcon = io_parms->tcon;
4494 
4495 	/* we can only offload if we're connected */
4496 	if (!server || !tcon)
4497 		return false;
4498 
4499 	/* we can only offload on an rdma connection */
4500 	if (!server->rdma || !server->smbd_conn)
4501 		return false;
4502 
4503 	/* we don't support signed offload yet */
4504 	if (server->sign)
4505 		return false;
4506 
4507 	/* we don't support encrypted offload yet */
4508 	if (smb3_encryption_required(tcon))
4509 		return false;
4510 
4511 	/* offload also has its overhead, so only do it if desired */
4512 	if (io_parms->length < server->rdma_readwrite_threshold)
4513 		return false;
4514 
4515 	return true;
4516 }
4517 #endif /* CONFIG_CIFS_SMB_DIRECT */
4518 
4519 /*
4520  * To form a chain of read requests, any read requests after the first should
4521  * have the end_of_chain boolean set to true.
4522  */
4523 static int
4524 smb2_new_read_req(void **buf, unsigned int *total_len,
4525 	struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata,
4526 	unsigned int remaining_bytes, int request_type)
4527 {
4528 	int rc = -EACCES;
4529 	struct smb2_read_req *req = NULL;
4530 	struct smb2_hdr *shdr;
4531 	struct TCP_Server_Info *server = io_parms->server;
4532 
4533 	rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
4534 				 (void **) &req, total_len);
4535 	if (rc)
4536 		return rc;
4537 
4538 	if (server == NULL)
4539 		return -ECONNABORTED;
4540 
4541 	shdr = &req->hdr;
4542 	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4543 
4544 	req->PersistentFileId = io_parms->persistent_fid;
4545 	req->VolatileFileId = io_parms->volatile_fid;
4546 	req->ReadChannelInfoOffset = 0; /* reserved */
4547 	req->ReadChannelInfoLength = 0; /* reserved */
4548 	req->Channel = 0; /* reserved */
4549 	req->MinimumCount = 0;
4550 	req->Length = cpu_to_le32(io_parms->length);
4551 	req->Offset = cpu_to_le64(io_parms->offset);
4552 
4553 	trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0,
4554 			      rdata ? rdata->subreq.debug_index : 0,
4555 			      rdata ? rdata->xid : 0,
4556 			      io_parms->persistent_fid,
4557 			      io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4558 			      io_parms->offset, io_parms->length);
4559 #ifdef CONFIG_CIFS_SMB_DIRECT
4560 	/*
4561 	 * If we want to do a RDMA write, fill in and append
4562 	 * smbdirect_buffer_descriptor_v1 to the end of read request
4563 	 */
4564 	if (rdata && smb3_use_rdma_offload(io_parms)) {
4565 		struct smbdirect_buffer_descriptor_v1 *v1;
4566 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
4567 
4568 		rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
4569 					     true, need_invalidate);
4570 		if (!rdata->mr)
4571 			return -EAGAIN;
4572 
4573 		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4574 		if (need_invalidate)
4575 			req->Channel = SMB2_CHANNEL_RDMA_V1;
4576 		req->ReadChannelInfoOffset =
4577 			cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
4578 		req->ReadChannelInfoLength =
4579 			cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
4580 		v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
4581 		v1->offset = cpu_to_le64(rdata->mr->mr->iova);
4582 		v1->token = cpu_to_le32(rdata->mr->mr->rkey);
4583 		v1->length = cpu_to_le32(rdata->mr->mr->length);
4584 
4585 		*total_len += sizeof(*v1) - 1;
4586 	}
4587 #endif
4588 	if (request_type & CHAINED_REQUEST) {
4589 		if (!(request_type & END_OF_CHAIN)) {
4590 			/* next 8-byte aligned request */
4591 			*total_len = ALIGN(*total_len, 8);
4592 			shdr->NextCommand = cpu_to_le32(*total_len);
4593 		} else /* END_OF_CHAIN */
4594 			shdr->NextCommand = 0;
4595 		if (request_type & RELATED_REQUEST) {
4596 			shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
4597 			/*
4598 			 * Related requests use info from previous read request
4599 			 * in chain.
4600 			 */
4601 			shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
4602 			shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
4603 			req->PersistentFileId = (u64)-1;
4604 			req->VolatileFileId = (u64)-1;
4605 		}
4606 	}
4607 	if (remaining_bytes > io_parms->length)
4608 		req->RemainingBytes = cpu_to_le32(remaining_bytes);
4609 	else
4610 		req->RemainingBytes = 0;
4611 
4612 	*buf = req;
4613 	return rc;
4614 }
4615 
4616 static void
4617 smb2_readv_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4618 {
4619 	struct cifs_io_subrequest *rdata = mid->callback_data;
4620 	struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
4621 	struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4622 	struct smb2_hdr *shdr = (struct smb2_hdr *)rdata->iov[0].iov_base;
4623 	struct cifs_credits credits = {
4624 		.value = 0,
4625 		.instance = 0,
4626 		.rreq_debug_id = rdata->rreq->debug_id,
4627 		.rreq_debug_index = rdata->subreq.debug_index,
4628 	};
4629 	struct smb_rqst rqst = { .rq_iov = &rdata->iov[0], .rq_nvec = 1 };
4630 	unsigned int rreq_debug_id = rdata->rreq->debug_id;
4631 	unsigned int subreq_debug_index = rdata->subreq.debug_index;
4632 
4633 	if (rdata->got_bytes) {
4634 		rqst.rq_iter	  = rdata->subreq.io_iter;
4635 	}
4636 
4637 	WARN_ONCE(rdata->server != server,
4638 		  "rdata server %p != mid server %p",
4639 		  rdata->server, server);
4640 
4641 	cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
4642 		 __func__, mid->mid, mid->mid_state, rdata->result,
4643 		 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
4644 
4645 	switch (mid->mid_state) {
4646 	case MID_RESPONSE_RECEIVED:
4647 		credits.value = le16_to_cpu(shdr->CreditRequest);
4648 		credits.instance = server->reconnect_instance;
4649 		/* result already set, check signature */
4650 		if (server->sign && !mid->decrypted) {
4651 			int rc;
4652 
4653 			iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
4654 			rc = smb2_verify_signature(&rqst, server);
4655 			if (rc) {
4656 				cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
4657 					      rc);
4658 				rdata->subreq.error = rc;
4659 				rdata->result = rc;
4660 
4661 				if (is_replayable_error(rc)) {
4662 					trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
4663 					__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4664 				} else
4665 					trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_bad);
4666 			} else
4667 				trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
4668 		}
4669 		/* FIXME: should this be counted toward the initiating task? */
4670 		task_io_account_read(rdata->got_bytes);
4671 		cifs_stats_bytes_read(tcon, rdata->got_bytes);
4672 		break;
4673 	case MID_REQUEST_SUBMITTED:
4674 		trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted);
4675 		goto do_retry;
4676 	case MID_RETRY_NEEDED:
4677 		trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
4678 do_retry:
4679 		__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4680 		rdata->result = -EAGAIN;
4681 		if (server->sign && rdata->got_bytes)
4682 			/* reset bytes number since we can not check a sign */
4683 			rdata->got_bytes = 0;
4684 		/* FIXME: should this be counted toward the initiating task? */
4685 		task_io_account_read(rdata->got_bytes);
4686 		cifs_stats_bytes_read(tcon, rdata->got_bytes);
4687 		break;
4688 	case MID_RESPONSE_MALFORMED:
4689 		trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed);
4690 		credits.value = le16_to_cpu(shdr->CreditRequest);
4691 		credits.instance = server->reconnect_instance;
4692 		rdata->result = smb_EIO(smb_eio_trace_read_rsp_malformed);
4693 		break;
4694 	default:
4695 		trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown);
4696 		rdata->result = smb_EIO1(smb_eio_trace_read_mid_state_unknown,
4697 					 mid->mid_state);
4698 		break;
4699 	}
4700 #ifdef CONFIG_CIFS_SMB_DIRECT
4701 	/*
4702 	 * If this rdata has a memory registered, the MR can be freed
4703 	 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4704 	 * because they have limited number and are used for future I/Os
4705 	 */
4706 	if (rdata->mr) {
4707 		smbd_deregister_mr(rdata->mr);
4708 		rdata->mr = NULL;
4709 	}
4710 #endif
4711 	if (rdata->result && rdata->result != -ENODATA) {
4712 		cifs_stats_fail_inc(tcon, SMB2_READ_HE);
4713 		trace_smb3_read_err(rdata->rreq->debug_id,
4714 				    rdata->subreq.debug_index,
4715 				    rdata->xid,
4716 				    rdata->req->cfile->fid.persistent_fid,
4717 				    tcon->tid, tcon->ses->Suid,
4718 				    rdata->subreq.start + rdata->subreq.transferred,
4719 				    rdata->subreq.len   - rdata->subreq.transferred,
4720 				    rdata->result);
4721 	} else
4722 		trace_smb3_read_done(rdata->rreq->debug_id,
4723 				     rdata->subreq.debug_index,
4724 				     rdata->xid,
4725 				     rdata->req->cfile->fid.persistent_fid,
4726 				     tcon->tid, tcon->ses->Suid,
4727 				     rdata->subreq.start + rdata->subreq.transferred,
4728 				     rdata->got_bytes);
4729 
4730 	if (rdata->result == -ENODATA) {
4731 		__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4732 		rdata->result = 0;
4733 	} else {
4734 		size_t trans = rdata->subreq.transferred + rdata->got_bytes;
4735 		if (trans < rdata->subreq.len &&
4736 		    rdata->subreq.start + trans >= ictx->remote_i_size) {
4737 			__set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4738 			rdata->result = 0;
4739 		}
4740 		if (rdata->got_bytes)
4741 			__set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags);
4742 	}
4743 
4744 	/* see if we need to retry */
4745 	if (is_replayable_error(rdata->result) &&
4746 	    smb2_should_replay(tcon,
4747 			       &rdata->retries,
4748 			       &rdata->cur_sleep))
4749 		rdata->replay = true;
4750 
4751 	trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
4752 			      server->credits, server->in_flight,
4753 			      0, cifs_trace_rw_credits_read_response_clear);
4754 	rdata->credits.value = 0;
4755 	rdata->subreq.error = rdata->result;
4756 	rdata->subreq.transferred += rdata->got_bytes;
4757 	trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
4758 	netfs_read_subreq_terminated(&rdata->subreq);
4759 	release_mid(server, mid);
4760 	trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4761 			      server->credits, server->in_flight,
4762 			      credits.value, cifs_trace_rw_credits_read_response_add);
4763 	add_credits(server, &credits, 0);
4764 }
4765 
4766 /* smb2_async_readv - send an async read, and set up mid to handle result */
4767 int
4768 smb2_async_readv(struct cifs_io_subrequest *rdata)
4769 {
4770 	int rc, flags = 0;
4771 	char *buf;
4772 	struct netfs_io_subrequest *subreq = &rdata->subreq;
4773 	struct smb2_hdr *shdr;
4774 	struct cifs_io_parms io_parms;
4775 	struct smb_rqst rqst = { .rq_iov = rdata->iov,
4776 				 .rq_nvec = 1 };
4777 	struct TCP_Server_Info *server;
4778 	struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4779 	unsigned int total_len;
4780 	int credit_request;
4781 
4782 	cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
4783 		 __func__, subreq->start, subreq->len);
4784 
4785 	if (!rdata->server)
4786 		rdata->server = cifs_pick_channel(tcon->ses);
4787 
4788 	io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
4789 	io_parms.server = server = rdata->server;
4790 	io_parms.offset = subreq->start + subreq->transferred;
4791 	io_parms.length = subreq->len   - subreq->transferred;
4792 	io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
4793 	io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
4794 	io_parms.pid = rdata->req->pid;
4795 
4796 	rc = smb2_new_read_req(
4797 		(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
4798 	if (rc)
4799 		goto out;
4800 
4801 	if (smb3_encryption_required(io_parms.tcon))
4802 		flags |= CIFS_TRANSFORM_REQ;
4803 
4804 	rdata->iov[0].iov_base = buf;
4805 	rdata->iov[0].iov_len = total_len;
4806 	rdata->got_bytes = 0;
4807 	rdata->result = 0;
4808 
4809 	shdr = (struct smb2_hdr *)buf;
4810 
4811 	if (rdata->replay) {
4812 		/* Back-off before retry */
4813 		if (rdata->cur_sleep)
4814 			msleep(rdata->cur_sleep);
4815 		smb2_set_replay(server, &rqst);
4816 	}
4817 
4818 	if (rdata->credits.value > 0) {
4819 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
4820 						SMB2_MAX_BUFFER_SIZE));
4821 		credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
4822 		if (server->credits >= server->max_credits)
4823 			shdr->CreditRequest = cpu_to_le16(0);
4824 		else
4825 			shdr->CreditRequest = cpu_to_le16(
4826 				min_t(int, server->max_credits -
4827 						server->credits, credit_request));
4828 
4829 		rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust);
4830 		if (rc)
4831 			goto async_readv_out;
4832 
4833 		flags |= CIFS_HAS_CREDITS;
4834 	}
4835 
4836 	rc = cifs_call_async(server, &rqst,
4837 			     cifs_readv_receive, smb2_readv_callback,
4838 			     smb3_handle_read_data, rdata, flags,
4839 			     &rdata->credits);
4840 	if (rc) {
4841 		cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
4842 		trace_smb3_read_err(rdata->rreq->debug_id,
4843 				    subreq->debug_index,
4844 				    rdata->xid, io_parms.persistent_fid,
4845 				    io_parms.tcon->tid,
4846 				    io_parms.tcon->ses->Suid,
4847 				    io_parms.offset,
4848 				    subreq->len - subreq->transferred, rc);
4849 	}
4850 
4851 async_readv_out:
4852 	cifs_small_buf_release(buf);
4853 
4854 out:
4855 	/* if the send error is retryable, let netfs know about it */
4856 	if (is_replayable_error(rc) &&
4857 	    smb2_should_replay(tcon,
4858 			       &rdata->retries,
4859 			       &rdata->cur_sleep)) {
4860 		trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
4861 		__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4862 	}
4863 
4864 	return rc;
4865 }
4866 
4867 int
4868 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4869 	  unsigned int *nbytes, char **buf, int *buf_type)
4870 {
4871 	struct smb_rqst rqst;
4872 	int resp_buftype, rc;
4873 	struct smb2_read_req *req = NULL;
4874 	struct smb2_read_rsp *rsp = NULL;
4875 	struct kvec iov[1];
4876 	struct kvec rsp_iov;
4877 	unsigned int total_len;
4878 	int flags = CIFS_LOG_ERROR;
4879 	struct cifs_ses *ses = io_parms->tcon->ses;
4880 
4881 	if (!io_parms->server)
4882 		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4883 
4884 	*nbytes = 0;
4885 	rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
4886 	if (rc)
4887 		return rc;
4888 
4889 	if (smb3_encryption_required(io_parms->tcon))
4890 		flags |= CIFS_TRANSFORM_REQ;
4891 
4892 	iov[0].iov_base = (char *)req;
4893 	iov[0].iov_len = total_len;
4894 
4895 	memset(&rqst, 0, sizeof(struct smb_rqst));
4896 	rqst.rq_iov = iov;
4897 	rqst.rq_nvec = 1;
4898 
4899 	rc = cifs_send_recv(xid, ses, io_parms->server,
4900 			    &rqst, &resp_buftype, flags, &rsp_iov);
4901 	rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
4902 
4903 	if (rc) {
4904 		if (rc != -ENODATA) {
4905 			cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4906 			cifs_dbg(VFS, "Send error in read = %d\n", rc);
4907 			trace_smb3_read_err(0, 0, xid,
4908 					    req->PersistentFileId,
4909 					    io_parms->tcon->tid, ses->Suid,
4910 					    io_parms->offset, io_parms->length,
4911 					    rc);
4912 		} else
4913 			trace_smb3_read_done(0, 0, xid,
4914 					     req->PersistentFileId, io_parms->tcon->tid,
4915 					     ses->Suid, io_parms->offset, 0);
4916 		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4917 		cifs_small_buf_release(req);
4918 		return rc == -ENODATA ? 0 : rc;
4919 	} else
4920 		trace_smb3_read_done(0, 0, xid,
4921 				     req->PersistentFileId,
4922 				     io_parms->tcon->tid, ses->Suid,
4923 				     io_parms->offset, io_parms->length);
4924 
4925 	cifs_small_buf_release(req);
4926 
4927 	*nbytes = le32_to_cpu(rsp->DataLength);
4928 	if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4929 	    (*nbytes > io_parms->length)) {
4930 		cifs_dbg(FYI, "bad length %d for count %d\n",
4931 			 *nbytes, io_parms->length);
4932 		rc = smb_EIO2(smb_eio_trace_read_overlarge,
4933 			      *nbytes, io_parms->length);
4934 		*nbytes = 0;
4935 	}
4936 
4937 	if (*buf) {
4938 		memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
4939 		free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4940 	} else if (resp_buftype != CIFS_NO_BUFFER) {
4941 		*buf = rsp_iov.iov_base;
4942 		if (resp_buftype == CIFS_SMALL_BUFFER)
4943 			*buf_type = CIFS_SMALL_BUFFER;
4944 		else if (resp_buftype == CIFS_LARGE_BUFFER)
4945 			*buf_type = CIFS_LARGE_BUFFER;
4946 	}
4947 	return rc;
4948 }
4949 
4950 /*
4951  * Check the mid_state and signature on received buffer (if any), and queue the
4952  * workqueue completion task.
4953  */
4954 static void
4955 smb2_writev_callback(struct TCP_Server_Info *server, struct mid_q_entry *mid)
4956 {
4957 	struct cifs_io_subrequest *wdata = mid->callback_data;
4958 	struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4959 	struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
4960 	struct cifs_credits credits = {
4961 		.value = 0,
4962 		.instance = 0,
4963 		.rreq_debug_id = wdata->rreq->debug_id,
4964 		.rreq_debug_index = wdata->subreq.debug_index,
4965 	};
4966 	unsigned int rreq_debug_id = wdata->rreq->debug_id;
4967 	unsigned int subreq_debug_index = wdata->subreq.debug_index;
4968 	ssize_t result = 0;
4969 	size_t written;
4970 
4971 	WARN_ONCE(wdata->server != server,
4972 		  "wdata server %p != mid server %p",
4973 		  wdata->server, server);
4974 
4975 	switch (mid->mid_state) {
4976 	case MID_RESPONSE_RECEIVED:
4977 		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4978 		credits.instance = server->reconnect_instance;
4979 		result = smb2_check_receive(mid, server, 0);
4980 		if (result != 0) {
4981 			if (is_replayable_error(result)) {
4982 				trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
4983 				__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
4984 			} else {
4985 				wdata->subreq.error = result;
4986 				trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad);
4987 			}
4988 			break;
4989 		}
4990 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
4991 
4992 		written = le32_to_cpu(rsp->DataLength);
4993 		/*
4994 		 * Mask off high 16 bits when bytes written as returned
4995 		 * by the server is greater than bytes requested by the
4996 		 * client. OS/2 servers are known to set incorrect
4997 		 * CountHigh values.
4998 		 */
4999 		if (written > wdata->subreq.len)
5000 			written &= 0xFFFF;
5001 
5002 		cifs_stats_bytes_written(tcon, written);
5003 
5004 		if (written < wdata->subreq.len) {
5005 			result = -ENOSPC;
5006 		} else if (written > 0) {
5007 			wdata->subreq.len = written;
5008 			__set_bit(NETFS_SREQ_MADE_PROGRESS, &wdata->subreq.flags);
5009 		}
5010 		break;
5011 	case MID_REQUEST_SUBMITTED:
5012 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted);
5013 		__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
5014 		result = -EAGAIN;
5015 		break;
5016 	case MID_RETRY_NEEDED:
5017 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
5018 		__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
5019 		result = -EAGAIN;
5020 		break;
5021 	case MID_RESPONSE_MALFORMED:
5022 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed);
5023 		credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
5024 		credits.instance = server->reconnect_instance;
5025 		result = smb_EIO(smb_eio_trace_write_rsp_malformed);
5026 		break;
5027 	default:
5028 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown);
5029 		result = smb_EIO1(smb_eio_trace_write_mid_state_unknown,
5030 				  mid->mid_state);
5031 		break;
5032 	}
5033 #ifdef CONFIG_CIFS_SMB_DIRECT
5034 	/*
5035 	 * If this wdata has a memory registered, the MR can be freed
5036 	 * The number of MRs available is limited, it's important to recover
5037 	 * used MR as soon as I/O is finished. Hold MR longer in the later
5038 	 * I/O process can possibly result in I/O deadlock due to lack of MR
5039 	 * to send request on I/O retry
5040 	 */
5041 	if (wdata->mr) {
5042 		smbd_deregister_mr(wdata->mr);
5043 		wdata->mr = NULL;
5044 	}
5045 #endif
5046 	if (result) {
5047 		wdata->result = result;
5048 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
5049 		trace_smb3_write_err(wdata->rreq->debug_id,
5050 				     wdata->subreq.debug_index,
5051 				     wdata->xid,
5052 				     wdata->req->cfile->fid.persistent_fid,
5053 				     tcon->tid, tcon->ses->Suid, wdata->subreq.start,
5054 				     wdata->subreq.len, wdata->result);
5055 		if (wdata->result == -ENOSPC)
5056 			pr_warn_once("Out of space writing to %s\n",
5057 				     tcon->tree_name);
5058 	} else
5059 		trace_smb3_write_done(wdata->rreq->debug_id,
5060 				      wdata->subreq.debug_index,
5061 				      wdata->xid,
5062 				      wdata->req->cfile->fid.persistent_fid,
5063 				      tcon->tid, tcon->ses->Suid,
5064 				      wdata->subreq.start, wdata->subreq.len);
5065 
5066 	trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value,
5067 			      server->credits, server->in_flight,
5068 			      0, cifs_trace_rw_credits_write_response_clear);
5069 	wdata->credits.value = 0;
5070 
5071 	/* see if we need to retry */
5072 	if (is_replayable_error(wdata->result) &&
5073 	    smb2_should_replay(tcon,
5074 			       &wdata->retries,
5075 			       &wdata->cur_sleep))
5076 		wdata->replay = true;
5077 
5078 	cifs_write_subrequest_terminated(wdata, result ?: written);
5079 	release_mid(server, mid);
5080 	trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
5081 			      server->credits, server->in_flight,
5082 			      credits.value, cifs_trace_rw_credits_write_response_add);
5083 	add_credits(server, &credits, 0);
5084 }
5085 
5086 /* smb2_async_writev - send an async write, and set up mid to handle result */
5087 void
5088 smb2_async_writev(struct cifs_io_subrequest *wdata)
5089 {
5090 	int rc = -EACCES, flags = 0;
5091 	struct smb2_write_req *req = NULL;
5092 	struct smb2_hdr *shdr;
5093 	struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
5094 	struct TCP_Server_Info *server = wdata->server;
5095 	struct kvec iov[1];
5096 	struct smb_rqst rqst = { };
5097 	unsigned int total_len, xid = wdata->xid;
5098 	struct cifs_io_parms _io_parms;
5099 	struct cifs_io_parms *io_parms = NULL;
5100 	int credit_request;
5101 
5102 	/*
5103 	 * in future we may get cifs_io_parms passed in from the caller,
5104 	 * but for now we construct it here...
5105 	 */
5106 	_io_parms = (struct cifs_io_parms) {
5107 		.tcon = tcon,
5108 		.server = server,
5109 		.offset = wdata->subreq.start,
5110 		.length = wdata->subreq.len,
5111 		.persistent_fid = wdata->req->cfile->fid.persistent_fid,
5112 		.volatile_fid = wdata->req->cfile->fid.volatile_fid,
5113 		.pid = wdata->req->pid,
5114 	};
5115 	io_parms = &_io_parms;
5116 
5117 	rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
5118 				 (void **) &req, &total_len);
5119 	if (rc)
5120 		goto out;
5121 
5122 	rqst.rq_iov = iov;
5123 	rqst.rq_iter = wdata->subreq.io_iter;
5124 
5125 	rqst.rq_iov[0].iov_len = total_len - 1;
5126 	rqst.rq_iov[0].iov_base = (char *)req;
5127 	rqst.rq_nvec += 1;
5128 
5129 	if (smb3_encryption_required(tcon))
5130 		flags |= CIFS_TRANSFORM_REQ;
5131 
5132 	shdr = (struct smb2_hdr *)req;
5133 	shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5134 
5135 	req->PersistentFileId = io_parms->persistent_fid;
5136 	req->VolatileFileId = io_parms->volatile_fid;
5137 	req->WriteChannelInfoOffset = 0;
5138 	req->WriteChannelInfoLength = 0;
5139 	req->Channel = SMB2_CHANNEL_NONE;
5140 	req->Length = cpu_to_le32(io_parms->length);
5141 	req->Offset = cpu_to_le64(io_parms->offset);
5142 	req->DataOffset = cpu_to_le16(
5143 				offsetof(struct smb2_write_req, Buffer));
5144 	req->RemainingBytes = 0;
5145 
5146 	trace_smb3_write_enter(wdata->rreq->debug_id,
5147 			       wdata->subreq.debug_index,
5148 			       wdata->xid,
5149 			       io_parms->persistent_fid,
5150 			       io_parms->tcon->tid,
5151 			       io_parms->tcon->ses->Suid,
5152 			       io_parms->offset,
5153 			       io_parms->length);
5154 
5155 #ifdef CONFIG_CIFS_SMB_DIRECT
5156 	/*
5157 	 * If we want to do a server RDMA read, fill in and append
5158 	 * smbdirect_buffer_descriptor_v1 to the end of write request
5159 	 */
5160 	if (smb3_use_rdma_offload(io_parms)) {
5161 		struct smbdirect_buffer_descriptor_v1 *v1;
5162 		bool need_invalidate = server->dialect == SMB30_PROT_ID;
5163 
5164 		wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
5165 					     false, need_invalidate);
5166 		if (!wdata->mr) {
5167 			rc = -EAGAIN;
5168 			goto async_writev_out;
5169 		}
5170 		/* For RDMA read, I/O size is in RemainingBytes not in Length */
5171 		req->RemainingBytes = req->Length;
5172 		req->Length = 0;
5173 		req->DataOffset = 0;
5174 		req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
5175 		if (need_invalidate)
5176 			req->Channel = SMB2_CHANNEL_RDMA_V1;
5177 		req->WriteChannelInfoOffset =
5178 			cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
5179 		req->WriteChannelInfoLength =
5180 			cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
5181 		v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
5182 		v1->offset = cpu_to_le64(wdata->mr->mr->iova);
5183 		v1->token = cpu_to_le32(wdata->mr->mr->rkey);
5184 		v1->length = cpu_to_le32(wdata->mr->mr->length);
5185 
5186 		rqst.rq_iov[0].iov_len += sizeof(*v1);
5187 
5188 		/*
5189 		 * We keep wdata->subreq.io_iter,
5190 		 * but we have to truncate rqst.rq_iter
5191 		 */
5192 		iov_iter_truncate(&rqst.rq_iter, 0);
5193 	}
5194 #endif
5195 
5196 	if (wdata->replay) {
5197 		/* Back-off before retry */
5198 		if (wdata->cur_sleep)
5199 			msleep(wdata->cur_sleep);
5200 		smb2_set_replay(server, &rqst);
5201 	}
5202 
5203 	cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
5204 		 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
5205 
5206 	if (wdata->credits.value > 0) {
5207 		shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
5208 						    SMB2_MAX_BUFFER_SIZE));
5209 		credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
5210 		if (server->credits >= server->max_credits)
5211 			shdr->CreditRequest = cpu_to_le16(0);
5212 		else
5213 			shdr->CreditRequest = cpu_to_le16(
5214 				min_t(int, server->max_credits -
5215 						server->credits, credit_request));
5216 
5217 		rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust);
5218 		if (rc)
5219 			goto async_writev_out;
5220 
5221 		flags |= CIFS_HAS_CREDITS;
5222 	}
5223 
5224 	/* XXX: compression + encryption is unsupported for now */
5225 	if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst))
5226 		flags |= CIFS_COMPRESS_REQ;
5227 
5228 	rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
5229 			     wdata, flags, &wdata->credits);
5230 	/* Can't touch wdata if rc == 0 */
5231 	if (rc) {
5232 		trace_smb3_write_err(wdata->rreq->debug_id,
5233 				     wdata->subreq.debug_index,
5234 				     xid,
5235 				     io_parms->persistent_fid,
5236 				     io_parms->tcon->tid,
5237 				     io_parms->tcon->ses->Suid,
5238 				     io_parms->offset,
5239 				     io_parms->length,
5240 				     rc);
5241 		cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
5242 	}
5243 
5244 async_writev_out:
5245 	cifs_small_buf_release(req);
5246 out:
5247 	/* if the send error is retryable, let netfs know about it */
5248 	if (is_replayable_error(rc) &&
5249 	    smb2_should_replay(tcon,
5250 			       &wdata->retries,
5251 			       &wdata->cur_sleep)) {
5252 		wdata->replay = true;
5253 		trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
5254 		__set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
5255 	}
5256 
5257 	if (rc) {
5258 		trace_smb3_rw_credits(wdata->rreq->debug_id,
5259 				      wdata->subreq.debug_index,
5260 				      wdata->credits.value,
5261 				      server->credits, server->in_flight,
5262 				      -(int)wdata->credits.value,
5263 				      cifs_trace_rw_credits_write_response_clear);
5264 		add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
5265 		cifs_write_subrequest_terminated(wdata, rc);
5266 	}
5267 }
5268 
5269 /*
5270  * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
5271  * The length field from io_parms must be at least 1 and indicates a number of
5272  * elements with data to write that begins with position 1 in iov array. All
5273  * data length is specified by count.
5274  */
5275 int
5276 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
5277 	   unsigned int *nbytes, struct kvec *iov, int n_vec)
5278 {
5279 	struct smb_rqst rqst;
5280 	int rc = 0;
5281 	struct smb2_write_req *req = NULL;
5282 	struct smb2_write_rsp *rsp = NULL;
5283 	int resp_buftype;
5284 	struct kvec rsp_iov;
5285 	int flags = 0;
5286 	unsigned int total_len;
5287 	struct TCP_Server_Info *server;
5288 	int retries = 0, cur_sleep = 0;
5289 
5290 replay_again:
5291 	/* reinitialize for possible replay */
5292 	flags = 0;
5293 	*nbytes = 0;
5294 	if (!io_parms->server)
5295 		io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
5296 	server = io_parms->server;
5297 	if (server == NULL)
5298 		return -ECONNABORTED;
5299 
5300 	if (n_vec < 1)
5301 		return rc;
5302 
5303 	rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
5304 				 (void **) &req, &total_len);
5305 	if (rc)
5306 		return rc;
5307 
5308 	if (smb3_encryption_required(io_parms->tcon))
5309 		flags |= CIFS_TRANSFORM_REQ;
5310 
5311 	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5312 
5313 	req->PersistentFileId = io_parms->persistent_fid;
5314 	req->VolatileFileId = io_parms->volatile_fid;
5315 	req->WriteChannelInfoOffset = 0;
5316 	req->WriteChannelInfoLength = 0;
5317 	req->Channel = 0;
5318 	req->Length = cpu_to_le32(io_parms->length);
5319 	req->Offset = cpu_to_le64(io_parms->offset);
5320 	req->DataOffset = cpu_to_le16(
5321 				offsetof(struct smb2_write_req, Buffer));
5322 	req->RemainingBytes = 0;
5323 
5324 	trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
5325 		io_parms->tcon->tid, io_parms->tcon->ses->Suid,
5326 		io_parms->offset, io_parms->length);
5327 
5328 	iov[0].iov_base = (char *)req;
5329 	/* 1 for Buffer */
5330 	iov[0].iov_len = total_len - 1;
5331 
5332 	memset(&rqst, 0, sizeof(struct smb_rqst));
5333 	rqst.rq_iov = iov;
5334 	rqst.rq_nvec = n_vec + 1;
5335 
5336 	if (retries) {
5337 		/* Back-off before retry */
5338 		if (cur_sleep)
5339 			msleep(cur_sleep);
5340 		smb2_set_replay(server, &rqst);
5341 	}
5342 
5343 	rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
5344 			    &rqst,
5345 			    &resp_buftype, flags, &rsp_iov);
5346 	rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
5347 
5348 	if (rc) {
5349 		trace_smb3_write_err(0, 0, xid,
5350 				     req->PersistentFileId,
5351 				     io_parms->tcon->tid,
5352 				     io_parms->tcon->ses->Suid,
5353 				     io_parms->offset, io_parms->length, rc);
5354 		cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
5355 		cifs_dbg(VFS, "Send error in write = %d\n", rc);
5356 	} else {
5357 		*nbytes = le32_to_cpu(rsp->DataLength);
5358 		cifs_stats_bytes_written(io_parms->tcon, *nbytes);
5359 		trace_smb3_write_done(0, 0, xid,
5360 				      req->PersistentFileId,
5361 				      io_parms->tcon->tid,
5362 				      io_parms->tcon->ses->Suid,
5363 				      io_parms->offset, *nbytes);
5364 	}
5365 
5366 	cifs_small_buf_release(req);
5367 	free_rsp_buf(resp_buftype, rsp);
5368 
5369 	if (is_replayable_error(rc) &&
5370 	    smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
5371 		goto replay_again;
5372 
5373 	return rc;
5374 }
5375 
5376 int posix_info_sid_size(const void *beg, const void *end)
5377 {
5378 	size_t subauth;
5379 	int total;
5380 
5381 	if (beg + 1 > end)
5382 		return -1;
5383 
5384 	subauth = *(u8 *)(beg+1);
5385 	if (subauth < 1 || subauth > 15)
5386 		return -1;
5387 
5388 	total = 1 + 1 + 6 + 4*subauth;
5389 	if (beg + total > end)
5390 		return -1;
5391 
5392 	return total;
5393 }
5394 
5395 int posix_info_parse(const void *beg, const void *end,
5396 		     struct smb2_posix_info_parsed *out)
5397 
5398 {
5399 	int total_len = 0;
5400 	int owner_len, group_len;
5401 	int name_len;
5402 	const void *owner_sid;
5403 	const void *group_sid;
5404 	const void *name;
5405 
5406 	/* if no end bound given, assume payload to be correct */
5407 	if (!end) {
5408 		const struct smb2_posix_info *p = beg;
5409 
5410 		end = beg + le32_to_cpu(p->NextEntryOffset);
5411 		/* last element will have a 0 offset, pick a sensible bound */
5412 		if (end == beg)
5413 			end += 0xFFFF;
5414 	}
5415 
5416 	/* check base buf */
5417 	if (beg + sizeof(struct smb2_posix_info) > end)
5418 		return -1;
5419 	total_len = sizeof(struct smb2_posix_info);
5420 
5421 	/* check owner sid */
5422 	owner_sid = beg + total_len;
5423 	owner_len = posix_info_sid_size(owner_sid, end);
5424 	if (owner_len < 0)
5425 		return -1;
5426 	total_len += owner_len;
5427 
5428 	/* check group sid */
5429 	group_sid = beg + total_len;
5430 	group_len = posix_info_sid_size(group_sid, end);
5431 	if (group_len < 0)
5432 		return -1;
5433 	total_len += group_len;
5434 
5435 	/* check name len */
5436 	if (beg + total_len + 4 > end)
5437 		return -1;
5438 	name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
5439 	if (name_len < 1 || name_len > 0xFFFF)
5440 		return -1;
5441 	total_len += 4;
5442 
5443 	/* check name */
5444 	name = beg + total_len;
5445 	if (name + name_len > end)
5446 		return -1;
5447 	total_len += name_len;
5448 
5449 	if (out) {
5450 		out->base = beg;
5451 		out->size = total_len;
5452 		out->name_len = name_len;
5453 		out->name = name;
5454 		memcpy(&out->owner, owner_sid, owner_len);
5455 		memcpy(&out->group, group_sid, group_len);
5456 	}
5457 	return total_len;
5458 }
5459 
5460 static int posix_info_extra_size(const void *beg, const void *end)
5461 {
5462 	int len = posix_info_parse(beg, end, NULL);
5463 
5464 	if (len < 0)
5465 		return -1;
5466 	return len - sizeof(struct smb2_posix_info);
5467 }
5468 
5469 static unsigned int
5470 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
5471 	    size_t size)
5472 {
5473 	int len;
5474 	unsigned int entrycount = 0;
5475 	unsigned int next_offset = 0;
5476 	char *entryptr;
5477 	FILE_DIRECTORY_INFO *dir_info;
5478 
5479 	if (bufstart == NULL)
5480 		return 0;
5481 
5482 	entryptr = bufstart;
5483 
5484 	while (1) {
5485 		if (entryptr + next_offset < entryptr ||
5486 		    entryptr + next_offset > end_of_buf ||
5487 		    entryptr + next_offset + size > end_of_buf) {
5488 			cifs_dbg(VFS, "malformed search entry would overflow\n");
5489 			break;
5490 		}
5491 
5492 		entryptr = entryptr + next_offset;
5493 		dir_info = (FILE_DIRECTORY_INFO *)entryptr;
5494 
5495 		if (infotype == SMB_FIND_FILE_POSIX_INFO)
5496 			len = posix_info_extra_size(entryptr, end_of_buf);
5497 		else
5498 			len = le32_to_cpu(dir_info->FileNameLength);
5499 
5500 		if (len < 0 ||
5501 		    entryptr + len < entryptr ||
5502 		    entryptr + len > end_of_buf ||
5503 		    entryptr + len + size > end_of_buf) {
5504 			cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
5505 				 end_of_buf);
5506 			break;
5507 		}
5508 
5509 		*lastentry = entryptr;
5510 		entrycount++;
5511 
5512 		next_offset = le32_to_cpu(dir_info->NextEntryOffset);
5513 		if (!next_offset)
5514 			break;
5515 	}
5516 
5517 	return entrycount;
5518 }
5519 
5520 /*
5521  * Readdir/FindFirst
5522  */
5523 int SMB2_query_directory_init(const unsigned int xid,
5524 			      struct cifs_tcon *tcon,
5525 			      struct TCP_Server_Info *server,
5526 			      struct smb_rqst *rqst,
5527 			      u64 persistent_fid, u64 volatile_fid,
5528 			      int index, int info_level)
5529 {
5530 	struct smb2_query_directory_req *req;
5531 	unsigned char *bufptr;
5532 	__le16 asteriks = cpu_to_le16('*');
5533 	unsigned int output_size = CIFSMaxBufSize -
5534 		MAX_SMB2_CREATE_RESPONSE_SIZE -
5535 		MAX_SMB2_CLOSE_RESPONSE_SIZE;
5536 	unsigned int total_len;
5537 	struct kvec *iov = rqst->rq_iov;
5538 	int len, rc;
5539 
5540 	rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
5541 				 (void **) &req, &total_len);
5542 	if (rc)
5543 		return rc;
5544 
5545 	switch (info_level) {
5546 	case SMB_FIND_FILE_DIRECTORY_INFO:
5547 		req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
5548 		break;
5549 	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5550 		req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
5551 		break;
5552 	case SMB_FIND_FILE_POSIX_INFO:
5553 		req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
5554 		break;
5555 	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5556 		req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
5557 		break;
5558 	default:
5559 		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5560 			info_level);
5561 		return -EINVAL;
5562 	}
5563 
5564 	req->FileIndex = cpu_to_le32(index);
5565 	req->PersistentFileId = persistent_fid;
5566 	req->VolatileFileId = volatile_fid;
5567 
5568 	len = 0x2;
5569 	bufptr = req->Buffer;
5570 	memcpy(bufptr, &asteriks, len);
5571 
5572 	req->FileNameOffset =
5573 		cpu_to_le16(sizeof(struct smb2_query_directory_req));
5574 	req->FileNameLength = cpu_to_le16(len);
5575 	/*
5576 	 * BB could be 30 bytes or so longer if we used SMB2 specific
5577 	 * buffer lengths, but this is safe and close enough.
5578 	 */
5579 	output_size = min_t(unsigned int, output_size, server->maxBuf);
5580 	output_size = min_t(unsigned int, output_size, 2 << 15);
5581 	req->OutputBufferLength = cpu_to_le32(output_size);
5582 
5583 	iov[0].iov_base = (char *)req;
5584 	/* 1 for Buffer */
5585 	iov[0].iov_len = total_len - 1;
5586 
5587 	iov[1].iov_base = (char *)(req->Buffer);
5588 	iov[1].iov_len = len;
5589 
5590 	trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
5591 			tcon->ses->Suid, index, output_size);
5592 
5593 	return 0;
5594 }
5595 
5596 void SMB2_query_directory_free(struct smb_rqst *rqst)
5597 {
5598 	if (rqst && rqst->rq_iov) {
5599 		cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
5600 	}
5601 }
5602 
5603 int
5604 smb2_parse_query_directory(struct cifs_tcon *tcon,
5605 			   struct kvec *rsp_iov,
5606 			   int resp_buftype,
5607 			   struct cifs_search_info *srch_inf)
5608 {
5609 	struct smb2_query_directory_rsp *rsp;
5610 	size_t info_buf_size;
5611 	char *end_of_smb;
5612 	int rc;
5613 
5614 	rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
5615 
5616 	switch (srch_inf->info_level) {
5617 	case SMB_FIND_FILE_DIRECTORY_INFO:
5618 		info_buf_size = sizeof(FILE_DIRECTORY_INFO);
5619 		break;
5620 	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5621 		info_buf_size = sizeof(FILE_ID_FULL_DIR_INFO);
5622 		break;
5623 	case SMB_FIND_FILE_POSIX_INFO:
5624 		/* note that posix payload are variable size */
5625 		info_buf_size = sizeof(struct smb2_posix_info);
5626 		break;
5627 	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5628 		info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
5629 		break;
5630 	default:
5631 		cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5632 			 srch_inf->info_level);
5633 		return -EINVAL;
5634 	}
5635 
5636 	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5637 			       le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
5638 			       info_buf_size);
5639 	if (rc) {
5640 		cifs_tcon_dbg(VFS, "bad info payload");
5641 		return rc;
5642 	}
5643 
5644 	srch_inf->unicode = true;
5645 
5646 	if (srch_inf->ntwrk_buf_start) {
5647 		if (srch_inf->smallBuf)
5648 			cifs_small_buf_release(srch_inf->ntwrk_buf_start);
5649 		else
5650 			cifs_buf_release(srch_inf->ntwrk_buf_start);
5651 	}
5652 	srch_inf->ntwrk_buf_start = (char *)rsp;
5653 	srch_inf->srch_entries_start = srch_inf->last_entry =
5654 		(char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
5655 	end_of_smb = rsp_iov->iov_len + (char *)rsp;
5656 
5657 	srch_inf->entries_in_buffer = num_entries(
5658 		srch_inf->info_level,
5659 		srch_inf->srch_entries_start,
5660 		end_of_smb,
5661 		&srch_inf->last_entry,
5662 		info_buf_size);
5663 
5664 	srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
5665 	cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
5666 		 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
5667 		 srch_inf->srch_entries_start, srch_inf->last_entry);
5668 	if (resp_buftype == CIFS_LARGE_BUFFER)
5669 		srch_inf->smallBuf = false;
5670 	else if (resp_buftype == CIFS_SMALL_BUFFER)
5671 		srch_inf->smallBuf = true;
5672 	else
5673 		cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
5674 
5675 	return 0;
5676 }
5677 
5678 int
5679 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
5680 		     u64 persistent_fid, u64 volatile_fid, int index,
5681 		     struct cifs_search_info *srch_inf)
5682 {
5683 	struct smb_rqst rqst;
5684 	struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
5685 	struct smb2_query_directory_rsp *rsp = NULL;
5686 	int resp_buftype = CIFS_NO_BUFFER;
5687 	struct kvec rsp_iov;
5688 	int rc = 0;
5689 	struct cifs_ses *ses = tcon->ses;
5690 	struct TCP_Server_Info *server;
5691 	int flags = 0;
5692 	int retries = 0, cur_sleep = 0;
5693 
5694 replay_again:
5695 	/* reinitialize for possible replay */
5696 	flags = 0;
5697 	server = cifs_pick_channel(ses);
5698 
5699 	if (!ses || !(ses->server))
5700 		return smb_EIO(smb_eio_trace_null_pointers);
5701 
5702 	if (smb3_encryption_required(tcon))
5703 		flags |= CIFS_TRANSFORM_REQ;
5704 
5705 	memset(&rqst, 0, sizeof(struct smb_rqst));
5706 	memset(&iov, 0, sizeof(iov));
5707 	rqst.rq_iov = iov;
5708 	rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
5709 
5710 	rc = SMB2_query_directory_init(xid, tcon, server,
5711 				       &rqst, persistent_fid,
5712 				       volatile_fid, index,
5713 				       srch_inf->info_level);
5714 	if (rc)
5715 		goto qdir_exit;
5716 
5717 	if (retries) {
5718 		/* Back-off before retry */
5719 		if (cur_sleep)
5720 			msleep(cur_sleep);
5721 		smb2_set_replay(server, &rqst);
5722 	}
5723 
5724 	rc = cifs_send_recv(xid, ses, server,
5725 			    &rqst, &resp_buftype, flags, &rsp_iov);
5726 	rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
5727 
5728 	if (rc) {
5729 		if (rc == -ENODATA &&
5730 		    rsp->hdr.Status == STATUS_NO_MORE_FILES) {
5731 			trace_smb3_query_dir_done(xid, persistent_fid,
5732 				tcon->tid, tcon->ses->Suid, index, 0);
5733 			srch_inf->endOfSearch = true;
5734 			rc = 0;
5735 		} else {
5736 			trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5737 				tcon->ses->Suid, index, 0, rc);
5738 			cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
5739 		}
5740 		goto qdir_exit;
5741 	}
5742 
5743 	rc = smb2_parse_query_directory(tcon, &rsp_iov,	resp_buftype,
5744 					srch_inf);
5745 	if (rc) {
5746 		trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5747 			tcon->ses->Suid, index, 0, rc);
5748 		goto qdir_exit;
5749 	}
5750 	resp_buftype = CIFS_NO_BUFFER;
5751 
5752 	trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
5753 			tcon->ses->Suid, index, srch_inf->entries_in_buffer);
5754 
5755 qdir_exit:
5756 	SMB2_query_directory_free(&rqst);
5757 	free_rsp_buf(resp_buftype, rsp);
5758 
5759 	if (is_replayable_error(rc) &&
5760 	    smb2_should_replay(tcon, &retries, &cur_sleep))
5761 		goto replay_again;
5762 
5763 	return rc;
5764 }
5765 
5766 int
5767 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
5768 		   struct smb_rqst *rqst,
5769 		   u64 persistent_fid, u64 volatile_fid, u32 pid,
5770 		   u8 info_class, u8 info_type, u32 additional_info,
5771 		   void **data, unsigned int *size)
5772 {
5773 	struct smb2_set_info_req *req;
5774 	struct kvec *iov = rqst->rq_iov;
5775 	unsigned int i, total_len;
5776 	int rc;
5777 
5778 	rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
5779 				 (void **) &req, &total_len);
5780 	if (rc)
5781 		return rc;
5782 
5783 	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
5784 	req->InfoType = info_type;
5785 	req->FileInfoClass = info_class;
5786 	req->PersistentFileId = persistent_fid;
5787 	req->VolatileFileId = volatile_fid;
5788 	req->AdditionalInformation = cpu_to_le32(additional_info);
5789 
5790 	req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req));
5791 	req->BufferLength = cpu_to_le32(*size);
5792 
5793 	memcpy(req->Buffer, *data, *size);
5794 	total_len += *size;
5795 
5796 	iov[0].iov_base = (char *)req;
5797 	/* 1 for Buffer */
5798 	iov[0].iov_len = total_len - 1;
5799 
5800 	for (i = 1; i < rqst->rq_nvec; i++) {
5801 		le32_add_cpu(&req->BufferLength, size[i]);
5802 		iov[i].iov_base = (char *)data[i];
5803 		iov[i].iov_len = size[i];
5804 	}
5805 
5806 	return 0;
5807 }
5808 
5809 void
5810 SMB2_set_info_free(struct smb_rqst *rqst)
5811 {
5812 	if (rqst && rqst->rq_iov)
5813 		cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
5814 }
5815 
5816 static int
5817 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
5818 	       u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
5819 	       u8 info_type, u32 additional_info, unsigned int num,
5820 		void **data, unsigned int *size)
5821 {
5822 	struct smb_rqst rqst;
5823 	struct smb2_set_info_rsp *rsp = NULL;
5824 	struct kvec *iov;
5825 	struct kvec rsp_iov;
5826 	int rc = 0;
5827 	int resp_buftype;
5828 	struct cifs_ses *ses = tcon->ses;
5829 	struct TCP_Server_Info *server;
5830 	int flags = 0;
5831 	int retries = 0, cur_sleep = 0;
5832 
5833 replay_again:
5834 	/* reinitialize for possible replay */
5835 	flags = 0;
5836 	server = cifs_pick_channel(ses);
5837 
5838 	if (!ses || !server)
5839 		return smb_EIO(smb_eio_trace_null_pointers);
5840 
5841 	if (!num)
5842 		return -EINVAL;
5843 
5844 	if (smb3_encryption_required(tcon))
5845 		flags |= CIFS_TRANSFORM_REQ;
5846 
5847 	iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5848 	if (!iov)
5849 		return -ENOMEM;
5850 
5851 	memset(&rqst, 0, sizeof(struct smb_rqst));
5852 	rqst.rq_iov = iov;
5853 	rqst.rq_nvec = num;
5854 
5855 	rc = SMB2_set_info_init(tcon, server,
5856 				&rqst, persistent_fid, volatile_fid, pid,
5857 				info_class, info_type, additional_info,
5858 				data, size);
5859 	if (rc) {
5860 		kfree(iov);
5861 		return rc;
5862 	}
5863 
5864 	if (retries) {
5865 		/* Back-off before retry */
5866 		if (cur_sleep)
5867 			msleep(cur_sleep);
5868 		smb2_set_replay(server, &rqst);
5869 	}
5870 
5871 	rc = cifs_send_recv(xid, ses, server,
5872 			    &rqst, &resp_buftype, flags,
5873 			    &rsp_iov);
5874 	SMB2_set_info_free(&rqst);
5875 	rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
5876 
5877 	if (rc != 0) {
5878 		cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
5879 		trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5880 				ses->Suid, info_class, (__u32)info_type, rc);
5881 	}
5882 
5883 	free_rsp_buf(resp_buftype, rsp);
5884 	kfree(iov);
5885 
5886 	if (is_replayable_error(rc) &&
5887 	    smb2_should_replay(tcon, &retries, &cur_sleep))
5888 		goto replay_again;
5889 
5890 	return rc;
5891 }
5892 
5893 int
5894 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
5895 	     u64 volatile_fid, u32 pid, loff_t new_eof)
5896 {
5897 	struct smb2_file_eof_info info;
5898 	void *data;
5899 	unsigned int size;
5900 
5901 	info.EndOfFile = cpu_to_le64(new_eof);
5902 
5903 	data = &info;
5904 	size = sizeof(struct smb2_file_eof_info);
5905 
5906 	trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof);
5907 
5908 	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5909 			pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5910 			0, 1, &data, &size);
5911 }
5912 
5913 int
5914 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5915 		u64 persistent_fid, u64 volatile_fid,
5916 		struct smb_ntsd *pnntsd, int pacllen, int aclflag)
5917 {
5918 	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5919 			current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5920 			1, (void **)&pnntsd, &pacllen);
5921 }
5922 
5923 int
5924 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5925 	    u64 persistent_fid, u64 volatile_fid,
5926 	    struct smb2_file_full_ea_info *buf, int len)
5927 {
5928 	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5929 		current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5930 		0, 1, (void **)&buf, &len);
5931 }
5932 
5933 int
5934 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5935 		  const u64 persistent_fid, const u64 volatile_fid,
5936 		  __u8 oplock_level)
5937 {
5938 	struct smb_rqst rqst;
5939 	int rc;
5940 	struct smb2_oplock_break *req = NULL;
5941 	struct cifs_ses *ses = tcon->ses;
5942 	struct TCP_Server_Info *server;
5943 	int flags = CIFS_OBREAK_OP;
5944 	unsigned int total_len;
5945 	struct kvec iov[1];
5946 	struct kvec rsp_iov;
5947 	int resp_buf_type;
5948 	int retries = 0, cur_sleep = 0;
5949 
5950 replay_again:
5951 	/* reinitialize for possible replay */
5952 	flags = CIFS_OBREAK_OP;
5953 	server = cifs_pick_channel(ses);
5954 
5955 	cifs_dbg(FYI, "SMB2_oplock_break\n");
5956 	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5957 				 (void **) &req, &total_len);
5958 	if (rc)
5959 		return rc;
5960 
5961 	if (smb3_encryption_required(tcon))
5962 		flags |= CIFS_TRANSFORM_REQ;
5963 
5964 	req->VolatileFid = volatile_fid;
5965 	req->PersistentFid = persistent_fid;
5966 	req->OplockLevel = oplock_level;
5967 	req->hdr.CreditRequest = cpu_to_le16(1);
5968 
5969 	flags |= CIFS_NO_RSP_BUF;
5970 
5971 	iov[0].iov_base = (char *)req;
5972 	iov[0].iov_len = total_len;
5973 
5974 	memset(&rqst, 0, sizeof(struct smb_rqst));
5975 	rqst.rq_iov = iov;
5976 	rqst.rq_nvec = 1;
5977 
5978 	if (retries) {
5979 		/* Back-off before retry */
5980 		if (cur_sleep)
5981 			msleep(cur_sleep);
5982 		smb2_set_replay(server, &rqst);
5983 	}
5984 
5985 	rc = cifs_send_recv(xid, ses, server,
5986 			    &rqst, &resp_buf_type, flags, &rsp_iov);
5987 	cifs_small_buf_release(req);
5988 	if (rc) {
5989 		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5990 		cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
5991 	}
5992 
5993 	if (is_replayable_error(rc) &&
5994 	    smb2_should_replay(tcon, &retries, &cur_sleep))
5995 		goto replay_again;
5996 
5997 	return rc;
5998 }
5999 
6000 void
6001 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
6002 			     struct kstatfs *kst)
6003 {
6004 	kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
6005 			  le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
6006 	kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
6007 	kst->f_bfree  = kst->f_bavail =
6008 			le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
6009 	return;
6010 }
6011 
6012 static void
6013 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
6014 			struct kstatfs *kst)
6015 {
6016 	kst->f_bsize = le32_to_cpu(response_data->BlockSize);
6017 	kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
6018 	kst->f_bfree =  le64_to_cpu(response_data->BlocksAvail);
6019 	if (response_data->UserBlocksAvail == cpu_to_le64(-1))
6020 		kst->f_bavail = kst->f_bfree;
6021 	else
6022 		kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
6023 	if (response_data->TotalFileNodes != cpu_to_le64(-1))
6024 		kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
6025 	if (response_data->FreeFileNodes != cpu_to_le64(-1))
6026 		kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
6027 
6028 	return;
6029 }
6030 
6031 static int
6032 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
6033 		   struct TCP_Server_Info *server,
6034 		   int level, int outbuf_len, u64 persistent_fid,
6035 		   u64 volatile_fid)
6036 {
6037 	int rc;
6038 	struct smb2_query_info_req *req;
6039 	unsigned int total_len;
6040 
6041 	cifs_dbg(FYI, "Query FSInfo level %d\n", level);
6042 
6043 	if ((tcon->ses == NULL) || server == NULL)
6044 		return smb_EIO(smb_eio_trace_null_pointers);
6045 
6046 	rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
6047 				 (void **) &req, &total_len);
6048 	if (rc)
6049 		return rc;
6050 
6051 	req->InfoType = SMB2_O_INFO_FILESYSTEM;
6052 	req->FileInfoClass = level;
6053 	req->PersistentFileId = persistent_fid;
6054 	req->VolatileFileId = volatile_fid;
6055 	/* 1 for pad */
6056 	req->InputBufferOffset =
6057 			cpu_to_le16(sizeof(struct smb2_query_info_req));
6058 	req->OutputBufferLength = cpu_to_le32(
6059 		outbuf_len + sizeof(struct smb2_query_info_rsp));
6060 
6061 	iov->iov_base = (char *)req;
6062 	iov->iov_len = total_len;
6063 	return 0;
6064 }
6065 
6066 static inline void free_qfs_info_req(struct kvec *iov)
6067 {
6068 	cifs_buf_release(iov->iov_base);
6069 }
6070 
6071 int
6072 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
6073 	      u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
6074 {
6075 	struct smb_rqst rqst;
6076 	struct smb2_query_info_rsp *rsp = NULL;
6077 	struct kvec iov;
6078 	struct kvec rsp_iov;
6079 	int rc = 0;
6080 	int resp_buftype;
6081 	struct cifs_ses *ses = tcon->ses;
6082 	struct TCP_Server_Info *server;
6083 	FILE_SYSTEM_POSIX_INFO *info = NULL;
6084 	int flags = 0;
6085 	int retries = 0, cur_sleep = 0;
6086 
6087 replay_again:
6088 	/* reinitialize for possible replay */
6089 	flags = 0;
6090 	server = cifs_pick_channel(ses);
6091 
6092 	rc = build_qfs_info_req(&iov, tcon, server,
6093 				FS_POSIX_INFORMATION,
6094 				sizeof(FILE_SYSTEM_POSIX_INFO),
6095 				persistent_fid, volatile_fid);
6096 	if (rc)
6097 		return rc;
6098 
6099 	if (smb3_encryption_required(tcon))
6100 		flags |= CIFS_TRANSFORM_REQ;
6101 
6102 	memset(&rqst, 0, sizeof(struct smb_rqst));
6103 	rqst.rq_iov = &iov;
6104 	rqst.rq_nvec = 1;
6105 
6106 	if (retries) {
6107 		/* Back-off before retry */
6108 		if (cur_sleep)
6109 			msleep(cur_sleep);
6110 		smb2_set_replay(server, &rqst);
6111 	}
6112 
6113 	rc = cifs_send_recv(xid, ses, server,
6114 			    &rqst, &resp_buftype, flags, &rsp_iov);
6115 	free_qfs_info_req(&iov);
6116 	if (rc) {
6117 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6118 		goto posix_qfsinf_exit;
6119 	}
6120 	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6121 
6122 	info = (FILE_SYSTEM_POSIX_INFO *)(
6123 		le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
6124 	rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
6125 			       le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
6126 			       sizeof(FILE_SYSTEM_POSIX_INFO));
6127 	if (!rc)
6128 		copy_posix_fs_info_to_kstatfs(info, fsdata);
6129 
6130 posix_qfsinf_exit:
6131 	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6132 
6133 	if (is_replayable_error(rc) &&
6134 	    smb2_should_replay(tcon, &retries, &cur_sleep))
6135 		goto replay_again;
6136 
6137 	return rc;
6138 }
6139 
6140 int
6141 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
6142 	      u64 persistent_fid, u64 volatile_fid, int level)
6143 {
6144 	struct smb_rqst rqst;
6145 	struct smb2_query_info_rsp *rsp = NULL;
6146 	struct kvec iov;
6147 	struct kvec rsp_iov;
6148 	int rc = 0;
6149 	int resp_buftype, max_len, min_len;
6150 	struct cifs_ses *ses = tcon->ses;
6151 	struct TCP_Server_Info *server;
6152 	unsigned int rsp_len, offset;
6153 	int flags = 0;
6154 	int retries = 0, cur_sleep = 0;
6155 
6156 replay_again:
6157 	/* reinitialize for possible replay */
6158 	flags = 0;
6159 	server = cifs_pick_channel(ses);
6160 
6161 	if (level == FS_DEVICE_INFORMATION) {
6162 		max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6163 		min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6164 	} else if (level == FS_ATTRIBUTE_INFORMATION) {
6165 		max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO) + MAX_FS_NAME_LEN;
6166 		min_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
6167 	} else if (level == FS_SECTOR_SIZE_INFORMATION) {
6168 		max_len = sizeof(struct smb3_fs_ss_info);
6169 		min_len = sizeof(struct smb3_fs_ss_info);
6170 	} else if (level == FS_VOLUME_INFORMATION) {
6171 		max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
6172 		min_len = sizeof(struct smb3_fs_vol_info);
6173 	} else {
6174 		cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
6175 		return -EINVAL;
6176 	}
6177 
6178 	rc = build_qfs_info_req(&iov, tcon, server,
6179 				level, max_len,
6180 				persistent_fid, volatile_fid);
6181 	if (rc)
6182 		return rc;
6183 
6184 	if (smb3_encryption_required(tcon))
6185 		flags |= CIFS_TRANSFORM_REQ;
6186 
6187 	memset(&rqst, 0, sizeof(struct smb_rqst));
6188 	rqst.rq_iov = &iov;
6189 	rqst.rq_nvec = 1;
6190 
6191 	if (retries) {
6192 		/* Back-off before retry */
6193 		if (cur_sleep)
6194 			msleep(cur_sleep);
6195 		smb2_set_replay(server, &rqst);
6196 	}
6197 
6198 	rc = cifs_send_recv(xid, ses, server,
6199 			    &rqst, &resp_buftype, flags, &rsp_iov);
6200 	free_qfs_info_req(&iov);
6201 	if (rc) {
6202 		cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6203 		goto qfsattr_exit;
6204 	}
6205 	rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6206 
6207 	rsp_len = le32_to_cpu(rsp->OutputBufferLength);
6208 	offset = le16_to_cpu(rsp->OutputBufferOffset);
6209 	rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
6210 	if (rc)
6211 		goto qfsattr_exit;
6212 
6213 	if (level == FS_ATTRIBUTE_INFORMATION)
6214 		memcpy(&tcon->fsAttrInfo, offset
6215 			+ (char *)rsp, min_t(unsigned int,
6216 			rsp_len, min_len));
6217 	else if (level == FS_DEVICE_INFORMATION)
6218 		memcpy(&tcon->fsDevInfo, offset
6219 			+ (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
6220 	else if (level == FS_SECTOR_SIZE_INFORMATION) {
6221 		struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
6222 			(offset + (char *)rsp);
6223 		tcon->ss_flags = le32_to_cpu(ss_info->Flags);
6224 		tcon->perf_sector_size =
6225 			le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
6226 	} else if (level == FS_VOLUME_INFORMATION) {
6227 		struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
6228 			(offset + (char *)rsp);
6229 		tcon->vol_serial_number = vol_info->VolumeSerialNumber;
6230 		tcon->vol_create_time = vol_info->VolumeCreationTime;
6231 	}
6232 
6233 qfsattr_exit:
6234 	free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6235 
6236 	if (is_replayable_error(rc) &&
6237 	    smb2_should_replay(tcon, &retries, &cur_sleep))
6238 		goto replay_again;
6239 
6240 	return rc;
6241 }
6242 
6243 int
6244 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
6245 	   const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6246 	   const __u32 num_lock, struct smb2_lock_element *buf)
6247 {
6248 	struct smb_rqst rqst;
6249 	int rc = 0;
6250 	struct smb2_lock_req *req = NULL;
6251 	struct kvec iov[2];
6252 	struct kvec rsp_iov;
6253 	int resp_buf_type;
6254 	unsigned int count;
6255 	int flags = CIFS_NO_RSP_BUF;
6256 	unsigned int total_len;
6257 	struct TCP_Server_Info *server;
6258 	int retries = 0, cur_sleep = 0;
6259 
6260 replay_again:
6261 	/* reinitialize for possible replay */
6262 	flags = CIFS_NO_RSP_BUF;
6263 	server = cifs_pick_channel(tcon->ses);
6264 
6265 	cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
6266 
6267 	rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
6268 				 (void **) &req, &total_len);
6269 	if (rc)
6270 		return rc;
6271 
6272 	if (smb3_encryption_required(tcon))
6273 		flags |= CIFS_TRANSFORM_REQ;
6274 
6275 	req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
6276 	req->LockCount = cpu_to_le16(num_lock);
6277 
6278 	req->PersistentFileId = persist_fid;
6279 	req->VolatileFileId = volatile_fid;
6280 
6281 	count = num_lock * sizeof(struct smb2_lock_element);
6282 
6283 	iov[0].iov_base = (char *)req;
6284 	iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
6285 	iov[1].iov_base = (char *)buf;
6286 	iov[1].iov_len = count;
6287 
6288 	cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
6289 
6290 	memset(&rqst, 0, sizeof(struct smb_rqst));
6291 	rqst.rq_iov = iov;
6292 	rqst.rq_nvec = 2;
6293 
6294 	if (retries) {
6295 		/* Back-off before retry */
6296 		if (cur_sleep)
6297 			msleep(cur_sleep);
6298 		smb2_set_replay(server, &rqst);
6299 	}
6300 
6301 	rc = cifs_send_recv(xid, tcon->ses, server,
6302 			    &rqst, &resp_buf_type, flags,
6303 			    &rsp_iov);
6304 	cifs_small_buf_release(req);
6305 	if (rc) {
6306 		cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
6307 		cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
6308 		trace_smb3_lock_err(xid, persist_fid, tcon->tid,
6309 				    tcon->ses->Suid, rc);
6310 	}
6311 
6312 	if (is_replayable_error(rc) &&
6313 	    smb2_should_replay(tcon, &retries, &cur_sleep))
6314 		goto replay_again;
6315 
6316 	return rc;
6317 }
6318 
6319 int
6320 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
6321 	  const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6322 	  const __u64 length, const __u64 offset, const __u32 lock_flags,
6323 	  const bool wait)
6324 {
6325 	struct smb2_lock_element lock;
6326 
6327 	lock.Offset = cpu_to_le64(offset);
6328 	lock.Length = cpu_to_le64(length);
6329 	lock.Flags = cpu_to_le32(lock_flags);
6330 	if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
6331 		lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
6332 
6333 	return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
6334 }
6335 
6336 int
6337 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
6338 		 __u8 *lease_key, const __le32 lease_state)
6339 {
6340 	struct smb_rqst rqst;
6341 	int rc;
6342 	struct smb2_lease_ack *req = NULL;
6343 	struct cifs_ses *ses = tcon->ses;
6344 	int flags = CIFS_OBREAK_OP;
6345 	unsigned int total_len;
6346 	struct kvec iov[1];
6347 	struct kvec rsp_iov;
6348 	int resp_buf_type;
6349 	__u64 *please_key_high;
6350 	__u64 *please_key_low;
6351 	struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
6352 
6353 	cifs_dbg(FYI, "SMB2_lease_break\n");
6354 	rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
6355 				 (void **) &req, &total_len);
6356 	if (rc)
6357 		return rc;
6358 
6359 	if (smb3_encryption_required(tcon))
6360 		flags |= CIFS_TRANSFORM_REQ;
6361 
6362 	req->hdr.CreditRequest = cpu_to_le16(1);
6363 	req->StructureSize = cpu_to_le16(36);
6364 	total_len += 12;
6365 
6366 	memcpy(req->LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
6367 	req->LeaseState = lease_state;
6368 
6369 	flags |= CIFS_NO_RSP_BUF;
6370 
6371 	iov[0].iov_base = (char *)req;
6372 	iov[0].iov_len = total_len;
6373 
6374 	memset(&rqst, 0, sizeof(struct smb_rqst));
6375 	rqst.rq_iov = iov;
6376 	rqst.rq_nvec = 1;
6377 
6378 	rc = cifs_send_recv(xid, ses, server,
6379 			    &rqst, &resp_buf_type, flags, &rsp_iov);
6380 	cifs_small_buf_release(req);
6381 
6382 	please_key_low = (__u64 *)lease_key;
6383 	please_key_high = (__u64 *)(lease_key+8);
6384 	if (rc) {
6385 		cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
6386 		trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid,
6387 			ses->Suid, *please_key_low, *please_key_high, rc);
6388 		cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
6389 	} else
6390 		trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid,
6391 			ses->Suid, *please_key_low, *please_key_high);
6392 
6393 	return rc;
6394 }
6395