xref: /linux/fs/smb/client/misc.c (revision 9f867ba24d3665d9ac9d9ef1f51844eb4479b291)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
13 #include "cifspdu.h"
14 #include "cifsglob.h"
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
17 #include "smberr.h"
18 #include "nterr.h"
19 #include "cifs_unicode.h"
20 #include "smb2pdu.h"
21 #include "cifsfs.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
25 #include "dfs.h"
26 #endif
27 #include "fs_context.h"
28 #include "cached_dir.h"
29 
30 /* The xid serves as a useful identifier for each incoming vfs request,
31    in a similar way to the mid which is useful to track each sent smb,
32    and CurrentXid can also provide a running counter (although it
33    will eventually wrap past zero) of the total vfs operations handled
34    since the cifs fs was mounted */
35 
36 unsigned int
_get_xid(void)37 _get_xid(void)
38 {
39 	unsigned int xid;
40 
41 	spin_lock(&GlobalMid_Lock);
42 	GlobalTotalActiveXid++;
43 
44 	/* keep high water mark for number of simultaneous ops in filesystem */
45 	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
46 		GlobalMaxActiveXid = GlobalTotalActiveXid;
47 	if (GlobalTotalActiveXid > 65000)
48 		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
49 	xid = GlobalCurrentXid++;
50 	spin_unlock(&GlobalMid_Lock);
51 	return xid;
52 }
53 
54 void
_free_xid(unsigned int xid)55 _free_xid(unsigned int xid)
56 {
57 	spin_lock(&GlobalMid_Lock);
58 	/* if (GlobalTotalActiveXid == 0)
59 		BUG(); */
60 	GlobalTotalActiveXid--;
61 	spin_unlock(&GlobalMid_Lock);
62 }
63 
64 struct cifs_ses *
sesInfoAlloc(void)65 sesInfoAlloc(void)
66 {
67 	struct cifs_ses *ret_buf;
68 
69 	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
70 	if (ret_buf) {
71 		atomic_inc(&sesInfoAllocCount);
72 		spin_lock_init(&ret_buf->ses_lock);
73 		ret_buf->ses_status = SES_NEW;
74 		++ret_buf->ses_count;
75 		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
76 		INIT_LIST_HEAD(&ret_buf->tcon_list);
77 		mutex_init(&ret_buf->session_mutex);
78 		spin_lock_init(&ret_buf->iface_lock);
79 		INIT_LIST_HEAD(&ret_buf->iface_list);
80 		spin_lock_init(&ret_buf->chan_lock);
81 	}
82 	return ret_buf;
83 }
84 
85 void
sesInfoFree(struct cifs_ses * buf_to_free)86 sesInfoFree(struct cifs_ses *buf_to_free)
87 {
88 	struct cifs_server_iface *iface = NULL, *niface = NULL;
89 
90 	if (buf_to_free == NULL) {
91 		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
92 		return;
93 	}
94 
95 	unload_nls(buf_to_free->local_nls);
96 	atomic_dec(&sesInfoAllocCount);
97 	kfree(buf_to_free->serverOS);
98 	kfree(buf_to_free->serverDomain);
99 	kfree(buf_to_free->serverNOS);
100 	kfree_sensitive(buf_to_free->password);
101 	kfree_sensitive(buf_to_free->password2);
102 	kfree(buf_to_free->user_name);
103 	kfree(buf_to_free->domainName);
104 	kfree(buf_to_free->dns_dom);
105 	kfree_sensitive(buf_to_free->auth_key.response);
106 	spin_lock(&buf_to_free->iface_lock);
107 	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
108 				 iface_head)
109 		kref_put(&iface->refcount, release_iface);
110 	spin_unlock(&buf_to_free->iface_lock);
111 	kfree_sensitive(buf_to_free);
112 }
113 
114 struct cifs_tcon *
tcon_info_alloc(bool dir_leases_enabled,enum smb3_tcon_ref_trace trace)115 tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
116 {
117 	struct cifs_tcon *ret_buf;
118 	static atomic_t tcon_debug_id;
119 
120 	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
121 	if (!ret_buf)
122 		return NULL;
123 
124 	if (dir_leases_enabled == true) {
125 		ret_buf->cfids = init_cached_dirs();
126 		if (!ret_buf->cfids) {
127 			kfree(ret_buf);
128 			return NULL;
129 		}
130 	}
131 	/* else ret_buf->cfids is already set to NULL above */
132 
133 	atomic_inc(&tconInfoAllocCount);
134 	ret_buf->status = TID_NEW;
135 	ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
136 	ret_buf->tc_count = 1;
137 	spin_lock_init(&ret_buf->tc_lock);
138 	INIT_LIST_HEAD(&ret_buf->openFileList);
139 	INIT_LIST_HEAD(&ret_buf->tcon_list);
140 	INIT_LIST_HEAD(&ret_buf->cifs_sb_list);
141 	spin_lock_init(&ret_buf->open_file_lock);
142 	spin_lock_init(&ret_buf->stat_lock);
143 	spin_lock_init(&ret_buf->sb_list_lock);
144 	atomic_set(&ret_buf->num_local_opens, 0);
145 	atomic_set(&ret_buf->num_remote_opens, 0);
146 	ret_buf->stats_from_time = ktime_get_real_seconds();
147 #ifdef CONFIG_CIFS_FSCACHE
148 	mutex_init(&ret_buf->fscache_lock);
149 #endif
150 	trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
151 #ifdef CONFIG_CIFS_DFS_UPCALL
152 	INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
153 #endif
154 
155 	return ret_buf;
156 }
157 
158 void
tconInfoFree(struct cifs_tcon * tcon,enum smb3_tcon_ref_trace trace)159 tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
160 {
161 	if (tcon == NULL) {
162 		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
163 		return;
164 	}
165 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
166 	free_cached_dirs(tcon->cfids);
167 	atomic_dec(&tconInfoAllocCount);
168 	kfree(tcon->nativeFileSystem);
169 	kfree_sensitive(tcon->password);
170 	kfree(tcon->origin_fullpath);
171 	kfree(tcon);
172 }
173 
174 struct smb_hdr *
cifs_buf_get(void)175 cifs_buf_get(void)
176 {
177 	struct smb_hdr *ret_buf = NULL;
178 	/*
179 	 * SMB2 header is bigger than CIFS one - no problems to clean some
180 	 * more bytes for CIFS.
181 	 */
182 	size_t buf_size = sizeof(struct smb2_hdr);
183 
184 	/*
185 	 * We could use negotiated size instead of max_msgsize -
186 	 * but it may be more efficient to always alloc same size
187 	 * albeit slightly larger than necessary and maxbuffersize
188 	 * defaults to this and can not be bigger.
189 	 */
190 	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
191 
192 	/* clear the first few header bytes */
193 	/* for most paths, more is cleared in header_assemble */
194 	memset(ret_buf, 0, buf_size + 3);
195 	atomic_inc(&buf_alloc_count);
196 #ifdef CONFIG_CIFS_STATS2
197 	atomic_inc(&total_buf_alloc_count);
198 #endif /* CONFIG_CIFS_STATS2 */
199 
200 	return ret_buf;
201 }
202 
203 void
cifs_buf_release(void * buf_to_free)204 cifs_buf_release(void *buf_to_free)
205 {
206 	if (buf_to_free == NULL) {
207 		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
208 		return;
209 	}
210 	mempool_free(buf_to_free, cifs_req_poolp);
211 
212 	atomic_dec(&buf_alloc_count);
213 	return;
214 }
215 
216 struct smb_hdr *
cifs_small_buf_get(void)217 cifs_small_buf_get(void)
218 {
219 	struct smb_hdr *ret_buf = NULL;
220 
221 /* We could use negotiated size instead of max_msgsize -
222    but it may be more efficient to always alloc same size
223    albeit slightly larger than necessary and maxbuffersize
224    defaults to this and can not be bigger */
225 	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
226 	/* No need to clear memory here, cleared in header assemble */
227 	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
228 	atomic_inc(&small_buf_alloc_count);
229 #ifdef CONFIG_CIFS_STATS2
230 	atomic_inc(&total_small_buf_alloc_count);
231 #endif /* CONFIG_CIFS_STATS2 */
232 
233 	return ret_buf;
234 }
235 
236 void
cifs_small_buf_release(void * buf_to_free)237 cifs_small_buf_release(void *buf_to_free)
238 {
239 
240 	if (buf_to_free == NULL) {
241 		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
242 		return;
243 	}
244 	mempool_free(buf_to_free, cifs_sm_req_poolp);
245 
246 	atomic_dec(&small_buf_alloc_count);
247 	return;
248 }
249 
250 void
free_rsp_buf(int resp_buftype,void * rsp)251 free_rsp_buf(int resp_buftype, void *rsp)
252 {
253 	if (resp_buftype == CIFS_SMALL_BUFFER)
254 		cifs_small_buf_release(rsp);
255 	else if (resp_buftype == CIFS_LARGE_BUFFER)
256 		cifs_buf_release(rsp);
257 }
258 
259 /* NB: MID can not be set if treeCon not passed in, in that
260    case it is responsibility of caller to set the mid */
261 void
header_assemble(struct smb_hdr * buffer,char smb_command,const struct cifs_tcon * treeCon,int word_count)262 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
263 		const struct cifs_tcon *treeCon, int word_count
264 		/* length of fixed section (word count) in two byte units  */)
265 {
266 	char *temp = (char *) buffer;
267 
268 	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
269 
270 	buffer->smb_buf_length = cpu_to_be32(
271 	    (2 * word_count) + sizeof(struct smb_hdr) -
272 	    4 /*  RFC 1001 length field does not count */  +
273 	    2 /* for bcc field itself */) ;
274 
275 	buffer->Protocol[0] = 0xFF;
276 	buffer->Protocol[1] = 'S';
277 	buffer->Protocol[2] = 'M';
278 	buffer->Protocol[3] = 'B';
279 	buffer->Command = smb_command;
280 	buffer->Flags = 0x00;	/* case sensitive */
281 	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
282 	buffer->Pid = cpu_to_le16((__u16)current->tgid);
283 	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
284 	if (treeCon) {
285 		buffer->Tid = treeCon->tid;
286 		if (treeCon->ses) {
287 			if (treeCon->ses->capabilities & CAP_UNICODE)
288 				buffer->Flags2 |= SMBFLG2_UNICODE;
289 			if (treeCon->ses->capabilities & CAP_STATUS32)
290 				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
291 
292 			/* Uid is not converted */
293 			buffer->Uid = treeCon->ses->Suid;
294 			if (treeCon->ses->server)
295 				buffer->Mid = get_next_mid(treeCon->ses->server);
296 		}
297 		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
298 			buffer->Flags2 |= SMBFLG2_DFS;
299 		if (treeCon->nocase)
300 			buffer->Flags  |= SMBFLG_CASELESS;
301 		if ((treeCon->ses) && (treeCon->ses->server))
302 			if (treeCon->ses->server->sign)
303 				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
304 	}
305 
306 /*  endian conversion of flags is now done just before sending */
307 	buffer->WordCount = (char) word_count;
308 	return;
309 }
310 
311 static int
check_smb_hdr(struct smb_hdr * smb)312 check_smb_hdr(struct smb_hdr *smb)
313 {
314 	/* does it have the right SMB "signature" ? */
315 	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
316 		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
317 			 *(unsigned int *)smb->Protocol);
318 		return 1;
319 	}
320 
321 	/* if it's a response then accept */
322 	if (smb->Flags & SMBFLG_RESPONSE)
323 		return 0;
324 
325 	/* only one valid case where server sends us request */
326 	if (smb->Command == SMB_COM_LOCKING_ANDX)
327 		return 0;
328 
329 	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
330 		 get_mid(smb));
331 	return 1;
332 }
333 
334 int
checkSMB(char * buf,unsigned int total_read,struct TCP_Server_Info * server)335 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
336 {
337 	struct smb_hdr *smb = (struct smb_hdr *)buf;
338 	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
339 	__u32 clc_len;  /* calculated length */
340 	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
341 		 total_read, rfclen);
342 
343 	/* is this frame too small to even get to a BCC? */
344 	if (total_read < 2 + sizeof(struct smb_hdr)) {
345 		if ((total_read >= sizeof(struct smb_hdr) - 1)
346 			    && (smb->Status.CifsError != 0)) {
347 			/* it's an error return */
348 			smb->WordCount = 0;
349 			/* some error cases do not return wct and bcc */
350 			return 0;
351 		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
352 				(smb->WordCount == 0)) {
353 			char *tmp = (char *)smb;
354 			/* Need to work around a bug in two servers here */
355 			/* First, check if the part of bcc they sent was zero */
356 			if (tmp[sizeof(struct smb_hdr)] == 0) {
357 				/* some servers return only half of bcc
358 				 * on simple responses (wct, bcc both zero)
359 				 * in particular have seen this on
360 				 * ulogoffX and FindClose. This leaves
361 				 * one byte of bcc potentially uninitialized
362 				 */
363 				/* zero rest of bcc */
364 				tmp[sizeof(struct smb_hdr)+1] = 0;
365 				return 0;
366 			}
367 			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
368 		} else {
369 			cifs_dbg(VFS, "Length less than smb header size\n");
370 		}
371 		return -EIO;
372 	} else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
373 		cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
374 			 __func__, smb->WordCount);
375 		return -EIO;
376 	}
377 
378 	/* otherwise, there is enough to get to the BCC */
379 	if (check_smb_hdr(smb))
380 		return -EIO;
381 	clc_len = smbCalcSize(smb);
382 
383 	if (4 + rfclen != total_read) {
384 		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
385 			 rfclen);
386 		return -EIO;
387 	}
388 
389 	if (4 + rfclen != clc_len) {
390 		__u16 mid = get_mid(smb);
391 		/* check if bcc wrapped around for large read responses */
392 		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
393 			/* check if lengths match mod 64K */
394 			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
395 				return 0; /* bcc wrapped */
396 		}
397 		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
398 			 clc_len, 4 + rfclen, mid);
399 
400 		if (4 + rfclen < clc_len) {
401 			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
402 				 rfclen, mid);
403 			return -EIO;
404 		} else if (rfclen > clc_len + 512) {
405 			/*
406 			 * Some servers (Windows XP in particular) send more
407 			 * data than the lengths in the SMB packet would
408 			 * indicate on certain calls (byte range locks and
409 			 * trans2 find first calls in particular). While the
410 			 * client can handle such a frame by ignoring the
411 			 * trailing data, we choose limit the amount of extra
412 			 * data to 512 bytes.
413 			 */
414 			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
415 				 rfclen, mid);
416 			return -EIO;
417 		}
418 	}
419 	return 0;
420 }
421 
422 bool
is_valid_oplock_break(char * buffer,struct TCP_Server_Info * srv)423 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
424 {
425 	struct smb_hdr *buf = (struct smb_hdr *)buffer;
426 	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
427 	struct TCP_Server_Info *pserver;
428 	struct cifs_ses *ses;
429 	struct cifs_tcon *tcon;
430 	struct cifsInodeInfo *pCifsInode;
431 	struct cifsFileInfo *netfile;
432 
433 	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
434 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
435 	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
436 		struct smb_com_transaction_change_notify_rsp *pSMBr =
437 			(struct smb_com_transaction_change_notify_rsp *)buf;
438 		struct file_notify_information *pnotify;
439 		__u32 data_offset = 0;
440 		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
441 
442 		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
443 			data_offset = le32_to_cpu(pSMBr->DataOffset);
444 
445 			if (data_offset >
446 			    len - sizeof(struct file_notify_information)) {
447 				cifs_dbg(FYI, "Invalid data_offset %u\n",
448 					 data_offset);
449 				return true;
450 			}
451 			pnotify = (struct file_notify_information *)
452 				((char *)&pSMBr->hdr.Protocol + data_offset);
453 			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
454 				 pnotify->FileName, pnotify->Action);
455 			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
456 				sizeof(struct smb_hdr)+60); */
457 			return true;
458 		}
459 		if (pSMBr->hdr.Status.CifsError) {
460 			cifs_dbg(FYI, "notify err 0x%x\n",
461 				 pSMBr->hdr.Status.CifsError);
462 			return true;
463 		}
464 		return false;
465 	}
466 	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
467 		return false;
468 	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
469 		/* no sense logging error on invalid handle on oplock
470 		   break - harmless race between close request and oplock
471 		   break response is expected from time to time writing out
472 		   large dirty files cached on the client */
473 		if ((NT_STATUS_INVALID_HANDLE) ==
474 		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
475 			cifs_dbg(FYI, "Invalid handle on oplock break\n");
476 			return true;
477 		} else if (ERRbadfid ==
478 		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
479 			return true;
480 		} else {
481 			return false; /* on valid oplock brk we get "request" */
482 		}
483 	}
484 	if (pSMB->hdr.WordCount != 8)
485 		return false;
486 
487 	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
488 		 pSMB->LockType, pSMB->OplockLevel);
489 	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
490 		return false;
491 
492 	/* If server is a channel, select the primary channel */
493 	pserver = SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
494 
495 	/* look up tcon based on tid & uid */
496 	spin_lock(&cifs_tcp_ses_lock);
497 	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
498 		if (cifs_ses_exiting(ses))
499 			continue;
500 		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
501 			if (tcon->tid != buf->Tid)
502 				continue;
503 
504 			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
505 			spin_lock(&tcon->open_file_lock);
506 			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
507 				if (pSMB->Fid != netfile->fid.netfid)
508 					continue;
509 
510 				cifs_dbg(FYI, "file id match, oplock break\n");
511 				pCifsInode = CIFS_I(d_inode(netfile->dentry));
512 
513 				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
514 					&pCifsInode->flags);
515 
516 				netfile->oplock_epoch = 0;
517 				netfile->oplock_level = pSMB->OplockLevel;
518 				netfile->oplock_break_cancelled = false;
519 				cifs_queue_oplock_break(netfile);
520 
521 				spin_unlock(&tcon->open_file_lock);
522 				spin_unlock(&cifs_tcp_ses_lock);
523 				return true;
524 			}
525 			spin_unlock(&tcon->open_file_lock);
526 			spin_unlock(&cifs_tcp_ses_lock);
527 			cifs_dbg(FYI, "No matching file for oplock break\n");
528 			return true;
529 		}
530 	}
531 	spin_unlock(&cifs_tcp_ses_lock);
532 	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
533 	return true;
534 }
535 
536 void
dump_smb(void * buf,int smb_buf_length)537 dump_smb(void *buf, int smb_buf_length)
538 {
539 	if (traceSMB == 0)
540 		return;
541 
542 	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
543 		       smb_buf_length, true);
544 }
545 
546 void
cifs_autodisable_serverino(struct cifs_sb_info * cifs_sb)547 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
548 {
549 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
550 		struct cifs_tcon *tcon = NULL;
551 
552 		if (cifs_sb->master_tlink)
553 			tcon = cifs_sb_master_tcon(cifs_sb);
554 
555 		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
556 		cifs_sb->mnt_cifs_serverino_autodisabled = true;
557 		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
558 			 tcon ? tcon->tree_name : "new server");
559 		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
560 		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
561 
562 	}
563 }
564 
cifs_set_oplock_level(struct cifsInodeInfo * cinode,__u32 oplock)565 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
566 {
567 	oplock &= 0xF;
568 
569 	if (oplock == OPLOCK_EXCLUSIVE) {
570 		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
571 		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
572 			 &cinode->netfs.inode);
573 	} else if (oplock == OPLOCK_READ) {
574 		cinode->oplock = CIFS_CACHE_READ_FLG;
575 		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
576 			 &cinode->netfs.inode);
577 	} else
578 		cinode->oplock = 0;
579 }
580 
581 /*
582  * We wait for oplock breaks to be processed before we attempt to perform
583  * writes.
584  */
cifs_get_writer(struct cifsInodeInfo * cinode)585 int cifs_get_writer(struct cifsInodeInfo *cinode)
586 {
587 	int rc;
588 
589 start:
590 	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
591 			 TASK_KILLABLE);
592 	if (rc)
593 		return rc;
594 
595 	spin_lock(&cinode->writers_lock);
596 	if (!cinode->writers)
597 		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
598 	cinode->writers++;
599 	/* Check to see if we have started servicing an oplock break */
600 	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
601 		cinode->writers--;
602 		if (cinode->writers == 0) {
603 			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
604 			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
605 		}
606 		spin_unlock(&cinode->writers_lock);
607 		goto start;
608 	}
609 	spin_unlock(&cinode->writers_lock);
610 	return 0;
611 }
612 
cifs_put_writer(struct cifsInodeInfo * cinode)613 void cifs_put_writer(struct cifsInodeInfo *cinode)
614 {
615 	spin_lock(&cinode->writers_lock);
616 	cinode->writers--;
617 	if (cinode->writers == 0) {
618 		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
619 		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
620 	}
621 	spin_unlock(&cinode->writers_lock);
622 }
623 
624 /**
625  * cifs_queue_oplock_break - queue the oplock break handler for cfile
626  * @cfile: The file to break the oplock on
627  *
628  * This function is called from the demultiplex thread when it
629  * receives an oplock break for @cfile.
630  *
631  * Assumes the tcon->open_file_lock is held.
632  * Assumes cfile->file_info_lock is NOT held.
633  */
cifs_queue_oplock_break(struct cifsFileInfo * cfile)634 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
635 {
636 	/*
637 	 * Bump the handle refcount now while we hold the
638 	 * open_file_lock to enforce the validity of it for the oplock
639 	 * break handler. The matching put is done at the end of the
640 	 * handler.
641 	 */
642 	cifsFileInfo_get(cfile);
643 
644 	queue_work(cifsoplockd_wq, &cfile->oplock_break);
645 }
646 
cifs_done_oplock_break(struct cifsInodeInfo * cinode)647 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
648 {
649 	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
650 	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
651 }
652 
653 bool
backup_cred(struct cifs_sb_info * cifs_sb)654 backup_cred(struct cifs_sb_info *cifs_sb)
655 {
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
657 		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
658 			return true;
659 	}
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
661 		if (in_group_p(cifs_sb->ctx->backupgid))
662 			return true;
663 	}
664 
665 	return false;
666 }
667 
668 void
cifs_del_pending_open(struct cifs_pending_open * open)669 cifs_del_pending_open(struct cifs_pending_open *open)
670 {
671 	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
672 	list_del(&open->olist);
673 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
674 }
675 
676 void
cifs_add_pending_open_locked(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)677 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
678 			     struct cifs_pending_open *open)
679 {
680 	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
681 	open->oplock = CIFS_OPLOCK_NO_CHANGE;
682 	open->tlink = tlink;
683 	fid->pending_open = open;
684 	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
685 }
686 
687 void
cifs_add_pending_open(struct cifs_fid * fid,struct tcon_link * tlink,struct cifs_pending_open * open)688 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
689 		      struct cifs_pending_open *open)
690 {
691 	spin_lock(&tlink_tcon(tlink)->open_file_lock);
692 	cifs_add_pending_open_locked(fid, tlink, open);
693 	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
694 }
695 
696 /*
697  * Critical section which runs after acquiring deferred_lock.
698  * As there is no reference count on cifs_deferred_close, pdclose
699  * should not be used outside deferred_lock.
700  */
701 bool
cifs_is_deferred_close(struct cifsFileInfo * cfile,struct cifs_deferred_close ** pdclose)702 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
703 {
704 	struct cifs_deferred_close *dclose;
705 
706 	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
707 		if ((dclose->netfid == cfile->fid.netfid) &&
708 			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
709 			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
710 			*pdclose = dclose;
711 			return true;
712 		}
713 	}
714 	return false;
715 }
716 
717 /*
718  * Critical section which runs after acquiring deferred_lock.
719  */
720 void
cifs_add_deferred_close(struct cifsFileInfo * cfile,struct cifs_deferred_close * dclose)721 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
722 {
723 	bool is_deferred = false;
724 	struct cifs_deferred_close *pdclose;
725 
726 	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
727 	if (is_deferred) {
728 		kfree(dclose);
729 		return;
730 	}
731 
732 	dclose->tlink = cfile->tlink;
733 	dclose->netfid = cfile->fid.netfid;
734 	dclose->persistent_fid = cfile->fid.persistent_fid;
735 	dclose->volatile_fid = cfile->fid.volatile_fid;
736 	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
737 }
738 
739 /*
740  * Critical section which runs after acquiring deferred_lock.
741  */
742 void
cifs_del_deferred_close(struct cifsFileInfo * cfile)743 cifs_del_deferred_close(struct cifsFileInfo *cfile)
744 {
745 	bool is_deferred = false;
746 	struct cifs_deferred_close *dclose;
747 
748 	is_deferred = cifs_is_deferred_close(cfile, &dclose);
749 	if (!is_deferred)
750 		return;
751 	list_del(&dclose->dlist);
752 	kfree(dclose);
753 }
754 
755 void
cifs_close_deferred_file(struct cifsInodeInfo * cifs_inode)756 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
757 {
758 	struct cifsFileInfo *cfile = NULL;
759 	struct file_list *tmp_list, *tmp_next_list;
760 	LIST_HEAD(file_head);
761 
762 	if (cifs_inode == NULL)
763 		return;
764 
765 	spin_lock(&cifs_inode->open_file_lock);
766 	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
767 		if (delayed_work_pending(&cfile->deferred)) {
768 			if (cancel_delayed_work(&cfile->deferred)) {
769 				spin_lock(&cifs_inode->deferred_lock);
770 				cifs_del_deferred_close(cfile);
771 				spin_unlock(&cifs_inode->deferred_lock);
772 
773 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
774 				if (tmp_list == NULL)
775 					break;
776 				tmp_list->cfile = cfile;
777 				list_add_tail(&tmp_list->list, &file_head);
778 			}
779 		}
780 	}
781 	spin_unlock(&cifs_inode->open_file_lock);
782 
783 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
784 		_cifsFileInfo_put(tmp_list->cfile, false, false);
785 		list_del(&tmp_list->list);
786 		kfree(tmp_list);
787 	}
788 }
789 
790 void
cifs_close_all_deferred_files(struct cifs_tcon * tcon)791 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
792 {
793 	struct cifsFileInfo *cfile;
794 	struct file_list *tmp_list, *tmp_next_list;
795 	LIST_HEAD(file_head);
796 
797 	spin_lock(&tcon->open_file_lock);
798 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
799 		if (delayed_work_pending(&cfile->deferred)) {
800 			if (cancel_delayed_work(&cfile->deferred)) {
801 				spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
802 				cifs_del_deferred_close(cfile);
803 				spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
804 
805 				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
806 				if (tmp_list == NULL)
807 					break;
808 				tmp_list->cfile = cfile;
809 				list_add_tail(&tmp_list->list, &file_head);
810 			}
811 		}
812 	}
813 	spin_unlock(&tcon->open_file_lock);
814 
815 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
816 		_cifsFileInfo_put(tmp_list->cfile, true, false);
817 		list_del(&tmp_list->list);
818 		kfree(tmp_list);
819 	}
820 }
821 void
cifs_close_deferred_file_under_dentry(struct cifs_tcon * tcon,const char * path)822 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
823 {
824 	struct cifsFileInfo *cfile;
825 	struct file_list *tmp_list, *tmp_next_list;
826 	void *page;
827 	const char *full_path;
828 	LIST_HEAD(file_head);
829 
830 	page = alloc_dentry_path();
831 	spin_lock(&tcon->open_file_lock);
832 	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
833 		full_path = build_path_from_dentry(cfile->dentry, page);
834 		if (strstr(full_path, path)) {
835 			if (delayed_work_pending(&cfile->deferred)) {
836 				if (cancel_delayed_work(&cfile->deferred)) {
837 					spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
838 					cifs_del_deferred_close(cfile);
839 					spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
840 
841 					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
842 					if (tmp_list == NULL)
843 						break;
844 					tmp_list->cfile = cfile;
845 					list_add_tail(&tmp_list->list, &file_head);
846 				}
847 			}
848 		}
849 	}
850 	spin_unlock(&tcon->open_file_lock);
851 
852 	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
853 		_cifsFileInfo_put(tmp_list->cfile, true, false);
854 		list_del(&tmp_list->list);
855 		kfree(tmp_list);
856 	}
857 	free_dentry_path(page);
858 }
859 
860 /*
861  * If a dentry has been deleted, all corresponding open handles should know that
862  * so that we do not defer close them.
863  */
cifs_mark_open_handles_for_deleted_file(struct inode * inode,const char * path)864 void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
865 					     const char *path)
866 {
867 	struct cifsFileInfo *cfile;
868 	void *page;
869 	const char *full_path;
870 	struct cifsInodeInfo *cinode = CIFS_I(inode);
871 
872 	page = alloc_dentry_path();
873 	spin_lock(&cinode->open_file_lock);
874 
875 	/*
876 	 * note: we need to construct path from dentry and compare only if the
877 	 * inode has any hardlinks. When number of hardlinks is 1, we can just
878 	 * mark all open handles since they are going to be from the same file.
879 	 */
880 	if (inode->i_nlink > 1) {
881 		list_for_each_entry(cfile, &cinode->openFileList, flist) {
882 			full_path = build_path_from_dentry(cfile->dentry, page);
883 			if (!IS_ERR(full_path) && strcmp(full_path, path) == 0)
884 				cfile->status_file_deleted = true;
885 		}
886 	} else {
887 		list_for_each_entry(cfile, &cinode->openFileList, flist)
888 			cfile->status_file_deleted = true;
889 	}
890 	spin_unlock(&cinode->open_file_lock);
891 	free_dentry_path(page);
892 }
893 
894 /* parses DFS referral V3 structure
895  * caller is responsible for freeing target_nodes
896  * returns:
897  * - on success - 0
898  * - on failure - errno
899  */
900 int
parse_dfs_referrals(struct get_dfs_referral_rsp * rsp,u32 rsp_size,unsigned int * num_of_nodes,struct dfs_info3_param ** target_nodes,const struct nls_table * nls_codepage,int remap,const char * searchName,bool is_unicode)901 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
902 		    unsigned int *num_of_nodes,
903 		    struct dfs_info3_param **target_nodes,
904 		    const struct nls_table *nls_codepage, int remap,
905 		    const char *searchName, bool is_unicode)
906 {
907 	int i, rc = 0;
908 	char *data_end;
909 	struct dfs_referral_level_3 *ref;
910 
911 	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
912 
913 	if (*num_of_nodes < 1) {
914 		cifs_dbg(VFS | ONCE, "%s: [path=%s] num_referrals must be at least > 0, but we got %d\n",
915 			 __func__, searchName, *num_of_nodes);
916 		rc = -ENOENT;
917 		goto parse_DFS_referrals_exit;
918 	}
919 
920 	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
921 	if (ref->VersionNumber != cpu_to_le16(3)) {
922 		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
923 			 le16_to_cpu(ref->VersionNumber));
924 		rc = -EINVAL;
925 		goto parse_DFS_referrals_exit;
926 	}
927 
928 	/* get the upper boundary of the resp buffer */
929 	data_end = (char *)rsp + rsp_size;
930 
931 	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
932 		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
933 
934 	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
935 				GFP_KERNEL);
936 	if (*target_nodes == NULL) {
937 		rc = -ENOMEM;
938 		goto parse_DFS_referrals_exit;
939 	}
940 
941 	/* collect necessary data from referrals */
942 	for (i = 0; i < *num_of_nodes; i++) {
943 		char *temp;
944 		int max_len;
945 		struct dfs_info3_param *node = (*target_nodes)+i;
946 
947 		node->flags = le32_to_cpu(rsp->DFSFlags);
948 		if (is_unicode) {
949 			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
950 						GFP_KERNEL);
951 			if (tmp == NULL) {
952 				rc = -ENOMEM;
953 				goto parse_DFS_referrals_exit;
954 			}
955 			cifsConvertToUTF16((__le16 *) tmp, searchName,
956 					   PATH_MAX, nls_codepage, remap);
957 			node->path_consumed = cifs_utf16_bytes(tmp,
958 					le16_to_cpu(rsp->PathConsumed),
959 					nls_codepage);
960 			kfree(tmp);
961 		} else
962 			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
963 
964 		node->server_type = le16_to_cpu(ref->ServerType);
965 		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
966 
967 		/* copy DfsPath */
968 		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
969 		max_len = data_end - temp;
970 		node->path_name = cifs_strndup_from_utf16(temp, max_len,
971 						is_unicode, nls_codepage);
972 		if (!node->path_name) {
973 			rc = -ENOMEM;
974 			goto parse_DFS_referrals_exit;
975 		}
976 
977 		/* copy link target UNC */
978 		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
979 		max_len = data_end - temp;
980 		node->node_name = cifs_strndup_from_utf16(temp, max_len,
981 						is_unicode, nls_codepage);
982 		if (!node->node_name) {
983 			rc = -ENOMEM;
984 			goto parse_DFS_referrals_exit;
985 		}
986 
987 		node->ttl = le32_to_cpu(ref->TimeToLive);
988 
989 		ref++;
990 	}
991 
992 parse_DFS_referrals_exit:
993 	if (rc) {
994 		free_dfs_info_array(*target_nodes, *num_of_nodes);
995 		*target_nodes = NULL;
996 		*num_of_nodes = 0;
997 	}
998 	return rc;
999 }
1000 
1001 /**
1002  * cifs_alloc_hash - allocate hash and hash context together
1003  * @name: The name of the crypto hash algo
1004  * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1005  *
1006  * The caller has to make sure @sdesc is initialized to either NULL or
1007  * a valid context. It can be freed via cifs_free_hash().
1008  */
1009 int
cifs_alloc_hash(const char * name,struct shash_desc ** sdesc)1010 cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1011 {
1012 	int rc = 0;
1013 	struct crypto_shash *alg = NULL;
1014 
1015 	if (*sdesc)
1016 		return 0;
1017 
1018 	alg = crypto_alloc_shash(name, 0, 0);
1019 	if (IS_ERR(alg)) {
1020 		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1021 		rc = PTR_ERR(alg);
1022 		*sdesc = NULL;
1023 		return rc;
1024 	}
1025 
1026 	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1027 	if (*sdesc == NULL) {
1028 		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1029 		crypto_free_shash(alg);
1030 		return -ENOMEM;
1031 	}
1032 
1033 	(*sdesc)->tfm = alg;
1034 	return 0;
1035 }
1036 
1037 /**
1038  * cifs_free_hash - free hash and hash context together
1039  * @sdesc: Where to find the pointer to the hash TFM
1040  *
1041  * Freeing a NULL descriptor is safe.
1042  */
1043 void
cifs_free_hash(struct shash_desc ** sdesc)1044 cifs_free_hash(struct shash_desc **sdesc)
1045 {
1046 	if (unlikely(!sdesc) || !*sdesc)
1047 		return;
1048 
1049 	if ((*sdesc)->tfm) {
1050 		crypto_free_shash((*sdesc)->tfm);
1051 		(*sdesc)->tfm = NULL;
1052 	}
1053 
1054 	kfree_sensitive(*sdesc);
1055 	*sdesc = NULL;
1056 }
1057 
extract_unc_hostname(const char * unc,const char ** h,size_t * len)1058 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1059 {
1060 	const char *end;
1061 
1062 	/* skip initial slashes */
1063 	while (*unc && (*unc == '\\' || *unc == '/'))
1064 		unc++;
1065 
1066 	end = unc;
1067 
1068 	while (*end && !(*end == '\\' || *end == '/'))
1069 		end++;
1070 
1071 	*h = unc;
1072 	*len = end - unc;
1073 }
1074 
1075 /**
1076  * copy_path_name - copy src path to dst, possibly truncating
1077  * @dst: The destination buffer
1078  * @src: The source name
1079  *
1080  * returns number of bytes written (including trailing nul)
1081  */
copy_path_name(char * dst,const char * src)1082 int copy_path_name(char *dst, const char *src)
1083 {
1084 	int name_len;
1085 
1086 	/*
1087 	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1088 	 * will truncate and strlen(dst) will be PATH_MAX-1
1089 	 */
1090 	name_len = strscpy(dst, src, PATH_MAX);
1091 	if (WARN_ON_ONCE(name_len < 0))
1092 		name_len = PATH_MAX-1;
1093 
1094 	/* we count the trailing nul */
1095 	name_len++;
1096 	return name_len;
1097 }
1098 
1099 struct super_cb_data {
1100 	void *data;
1101 	struct super_block *sb;
1102 };
1103 
tcon_super_cb(struct super_block * sb,void * arg)1104 static void tcon_super_cb(struct super_block *sb, void *arg)
1105 {
1106 	struct super_cb_data *sd = arg;
1107 	struct cifs_sb_info *cifs_sb;
1108 	struct cifs_tcon *t1 = sd->data, *t2;
1109 
1110 	if (sd->sb)
1111 		return;
1112 
1113 	cifs_sb = CIFS_SB(sb);
1114 	t2 = cifs_sb_master_tcon(cifs_sb);
1115 
1116 	spin_lock(&t2->tc_lock);
1117 	if ((t1->ses == t2->ses ||
1118 	     t1->ses->dfs_root_ses == t2->ses->dfs_root_ses) &&
1119 	    t1->ses->server == t2->ses->server &&
1120 	    t2->origin_fullpath &&
1121 	    dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
1122 		sd->sb = sb;
1123 	spin_unlock(&t2->tc_lock);
1124 }
1125 
__cifs_get_super(void (* f)(struct super_block *,void *),void * data)1126 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1127 					    void *data)
1128 {
1129 	struct super_cb_data sd = {
1130 		.data = data,
1131 		.sb = NULL,
1132 	};
1133 	struct file_system_type **fs_type = (struct file_system_type *[]) {
1134 		&cifs_fs_type, &smb3_fs_type, NULL,
1135 	};
1136 
1137 	for (; *fs_type; fs_type++) {
1138 		iterate_supers_type(*fs_type, f, &sd);
1139 		if (sd.sb) {
1140 			/*
1141 			 * Grab an active reference in order to prevent automounts (DFS links)
1142 			 * of expiring and then freeing up our cifs superblock pointer while
1143 			 * we're doing failover.
1144 			 */
1145 			cifs_sb_active(sd.sb);
1146 			return sd.sb;
1147 		}
1148 	}
1149 	pr_warn_once("%s: could not find dfs superblock\n", __func__);
1150 	return ERR_PTR(-EINVAL);
1151 }
1152 
__cifs_put_super(struct super_block * sb)1153 static void __cifs_put_super(struct super_block *sb)
1154 {
1155 	if (!IS_ERR_OR_NULL(sb))
1156 		cifs_sb_deactive(sb);
1157 }
1158 
cifs_get_dfs_tcon_super(struct cifs_tcon * tcon)1159 struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
1160 {
1161 	spin_lock(&tcon->tc_lock);
1162 	if (!tcon->origin_fullpath) {
1163 		spin_unlock(&tcon->tc_lock);
1164 		return ERR_PTR(-ENOENT);
1165 	}
1166 	spin_unlock(&tcon->tc_lock);
1167 	return __cifs_get_super(tcon_super_cb, tcon);
1168 }
1169 
cifs_put_tcp_super(struct super_block * sb)1170 void cifs_put_tcp_super(struct super_block *sb)
1171 {
1172 	__cifs_put_super(sb);
1173 }
1174 
1175 #ifdef CONFIG_CIFS_DFS_UPCALL
match_target_ip(struct TCP_Server_Info * server,const char * host,size_t hostlen,bool * result)1176 int match_target_ip(struct TCP_Server_Info *server,
1177 		    const char *host, size_t hostlen,
1178 		    bool *result)
1179 {
1180 	struct sockaddr_storage ss;
1181 	int rc;
1182 
1183 	cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)hostlen, host);
1184 
1185 	*result = false;
1186 
1187 	rc = dns_resolve_name(server->dns_dom, host, hostlen,
1188 			      (struct sockaddr *)&ss);
1189 	if (rc < 0)
1190 		return rc;
1191 
1192 	spin_lock(&server->srv_lock);
1193 	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1194 	spin_unlock(&server->srv_lock);
1195 	cifs_dbg(FYI, "%s: ip addresses matched: %s\n", __func__, str_yes_no(*result));
1196 	return 0;
1197 }
1198 
cifs_update_super_prepath(struct cifs_sb_info * cifs_sb,char * prefix)1199 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1200 {
1201 	int rc;
1202 
1203 	kfree(cifs_sb->prepath);
1204 	cifs_sb->prepath = NULL;
1205 
1206 	if (prefix && *prefix) {
1207 		cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
1208 		if (IS_ERR(cifs_sb->prepath)) {
1209 			rc = PTR_ERR(cifs_sb->prepath);
1210 			cifs_sb->prepath = NULL;
1211 			return rc;
1212 		}
1213 		if (cifs_sb->prepath)
1214 			convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1215 	}
1216 
1217 	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1218 	return 0;
1219 }
1220 
1221 /*
1222  * Handle weird Windows SMB server behaviour. It responds with
1223  * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1224  * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1225  * non-ASCII unicode symbols.
1226  */
cifs_inval_name_dfs_link_error(const unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,const char * full_path,bool * islink)1227 int cifs_inval_name_dfs_link_error(const unsigned int xid,
1228 				   struct cifs_tcon *tcon,
1229 				   struct cifs_sb_info *cifs_sb,
1230 				   const char *full_path,
1231 				   bool *islink)
1232 {
1233 	struct TCP_Server_Info *server = tcon->ses->server;
1234 	struct cifs_ses *ses = tcon->ses;
1235 	size_t len;
1236 	char *path;
1237 	char *ref_path;
1238 
1239 	*islink = false;
1240 
1241 	/*
1242 	 * Fast path - skip check when @full_path doesn't have a prefix path to
1243 	 * look up or tcon is not DFS.
1244 	 */
1245 	if (strlen(full_path) < 2 || !cifs_sb ||
1246 	    (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
1247 	    !is_tcon_dfs(tcon))
1248 		return 0;
1249 
1250 	spin_lock(&server->srv_lock);
1251 	if (!server->leaf_fullpath) {
1252 		spin_unlock(&server->srv_lock);
1253 		return 0;
1254 	}
1255 	spin_unlock(&server->srv_lock);
1256 
1257 	/*
1258 	 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1259 	 * to get a referral to figure out whether it is an DFS link.
1260 	 */
1261 	len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1262 	path = kmalloc(len, GFP_KERNEL);
1263 	if (!path)
1264 		return -ENOMEM;
1265 
1266 	scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1267 	ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1268 					    cifs_remap(cifs_sb));
1269 	kfree(path);
1270 
1271 	if (IS_ERR(ref_path)) {
1272 		if (PTR_ERR(ref_path) != -EINVAL)
1273 			return PTR_ERR(ref_path);
1274 	} else {
1275 		struct dfs_info3_param *refs = NULL;
1276 		int num_refs = 0;
1277 
1278 		/*
1279 		 * XXX: we are not using dfs_cache_find() here because we might
1280 		 * end up filling all the DFS cache and thus potentially
1281 		 * removing cached DFS targets that the client would eventually
1282 		 * need during failover.
1283 		 */
1284 		ses = CIFS_DFS_ROOT_SES(ses);
1285 		if (ses->server->ops->get_dfs_refer &&
1286 		    !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1287 						     &num_refs, cifs_sb->local_nls,
1288 						     cifs_remap(cifs_sb)))
1289 			*islink = refs[0].server_type == DFS_TYPE_LINK;
1290 		free_dfs_info_array(refs, num_refs);
1291 		kfree(ref_path);
1292 	}
1293 	return 0;
1294 }
1295 #endif
1296 
cifs_wait_for_server_reconnect(struct TCP_Server_Info * server,bool retry)1297 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1298 {
1299 	int timeout = 10;
1300 	int rc;
1301 
1302 	spin_lock(&server->srv_lock);
1303 	if (server->tcpStatus != CifsNeedReconnect) {
1304 		spin_unlock(&server->srv_lock);
1305 		return 0;
1306 	}
1307 	timeout *= server->nr_targets;
1308 	spin_unlock(&server->srv_lock);
1309 
1310 	/*
1311 	 * Give demultiplex thread up to 10 seconds to each target available for
1312 	 * reconnect -- should be greater than cifs socket timeout which is 7
1313 	 * seconds.
1314 	 *
1315 	 * On "soft" mounts we wait once. Hard mounts keep retrying until
1316 	 * process is killed or server comes back on-line.
1317 	 */
1318 	do {
1319 		rc = wait_event_interruptible_timeout(server->response_q,
1320 						      (server->tcpStatus != CifsNeedReconnect),
1321 						      timeout * HZ);
1322 		if (rc < 0) {
1323 			cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1324 				 __func__);
1325 			return -ERESTARTSYS;
1326 		}
1327 
1328 		/* are we still trying to reconnect? */
1329 		spin_lock(&server->srv_lock);
1330 		if (server->tcpStatus != CifsNeedReconnect) {
1331 			spin_unlock(&server->srv_lock);
1332 			return 0;
1333 		}
1334 		spin_unlock(&server->srv_lock);
1335 	} while (retry);
1336 
1337 	cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
1338 	return -EHOSTDOWN;
1339 }
1340