1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 */
8 #include <linux/fs.h>
9 #include <linux/net.h>
10 #include <linux/string.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/signal.h>
13 #include <linux/list.h>
14 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/pagemap.h>
17 #include <linux/ctype.h>
18 #include <linux/utsname.h>
19 #include <linux/mempool.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/kthread.h>
23 #include <linux/pagevec.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/uuid.h>
27 #include <linux/uaccess.h>
28 #include <asm/processor.h>
29 #include <linux/inet.h>
30 #include <linux/module.h>
31 #include <keys/user-type.h>
32 #include <net/ipv6.h>
33 #include <linux/parser.h>
34 #include <linux/bvec.h>
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_unicode.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include "ntlmssp.h"
41 #include "nterr.h"
42 #include "rfc1002pdu.h"
43 #include "fscache.h"
44 #include "smb2proto.h"
45 #include "smbdirect.h"
46 #include "dns_resolve.h"
47 #ifdef CONFIG_CIFS_DFS_UPCALL
48 #include "dfs.h"
49 #include "dfs_cache.h"
50 #endif
51 #include "fs_context.h"
52 #include "cifs_swn.h"
53
54 /* FIXME: should these be tunable? */
55 #define TLINK_ERROR_EXPIRE (1 * HZ)
56 #define TLINK_IDLE_EXPIRE (600 * HZ)
57
58 /* Drop the connection to not overload the server */
59 #define MAX_STATUS_IO_TIMEOUT 5
60
61 static int ip_connect(struct TCP_Server_Info *server);
62 static int generic_ip_connect(struct TCP_Server_Info *server);
63 static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
64 static void cifs_prune_tlinks(struct work_struct *work);
65
66 static struct mchan_mount *mchan_mount_alloc(struct cifs_ses *ses);
67 static void mchan_mount_free(struct mchan_mount *mchan_mount);
68 static void mchan_mount_work_fn(struct work_struct *work);
69
70 /*
71 * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
72 * get their ip addresses changed at some point.
73 *
74 * This should be called with server->srv_mutex held.
75 */
reconn_set_ipaddr_from_hostname(struct TCP_Server_Info * server)76 static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
77 {
78 struct sockaddr_storage ss;
79 int rc;
80
81 if (!server->hostname)
82 return -EINVAL;
83
84 /* if server hostname isn't populated, there's nothing to do here */
85 if (server->hostname[0] == '\0')
86 return 0;
87
88 spin_lock(&server->srv_lock);
89 ss = server->dstaddr;
90 spin_unlock(&server->srv_lock);
91
92 rc = dns_resolve_name(server->dns_dom, server->hostname,
93 strlen(server->hostname),
94 (struct sockaddr *)&ss);
95 if (!rc) {
96 spin_lock(&server->srv_lock);
97 memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr));
98 spin_unlock(&server->srv_lock);
99 }
100 return rc;
101 }
102
smb2_query_server_interfaces(struct work_struct * work)103 void smb2_query_server_interfaces(struct work_struct *work)
104 {
105 int rc;
106 int xid;
107 struct cifs_tcon *tcon = container_of(work,
108 struct cifs_tcon,
109 query_interfaces.work);
110 struct TCP_Server_Info *server = tcon->ses->server;
111
112 /*
113 * query server network interfaces, in case they change
114 */
115 if (!server->ops->query_server_interfaces)
116 return;
117
118 xid = get_xid();
119 rc = server->ops->query_server_interfaces(xid, tcon, false);
120 free_xid(xid);
121
122 if (rc)
123 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
124 __func__, rc);
125
126 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
127 (SMB_INTERFACE_POLL_INTERVAL * HZ));
128 }
129
130 #define set_need_reco(server) \
131 do { \
132 spin_lock(&server->srv_lock); \
133 if (server->tcpStatus != CifsExiting) \
134 server->tcpStatus = CifsNeedReconnect; \
135 spin_unlock(&server->srv_lock); \
136 } while (0)
137
138 /*
139 * Update the tcpStatus for the server.
140 * This is used to signal the cifsd thread to call cifs_reconnect
141 * ONLY cifsd thread should call cifs_reconnect. For any other
142 * thread, use this function
143 *
144 * @server: the tcp ses for which reconnect is needed
145 * @all_channels: if this needs to be done for all channels
146 */
147 void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info * server,bool all_channels)148 cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
149 bool all_channels)
150 {
151 struct TCP_Server_Info *nserver;
152 struct cifs_ses *ses;
153 LIST_HEAD(reco);
154 int i;
155
156 /* if we need to signal just this channel */
157 if (!all_channels) {
158 set_need_reco(server);
159 return;
160 }
161
162 if (SERVER_IS_CHAN(server))
163 server = server->primary_server;
164 scoped_guard(spinlock, &cifs_tcp_ses_lock) {
165 set_need_reco(server);
166 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
167 spin_lock(&ses->ses_lock);
168 if (ses->ses_status == SES_EXITING) {
169 spin_unlock(&ses->ses_lock);
170 continue;
171 }
172 spin_lock(&ses->chan_lock);
173 for (i = 1; i < ses->chan_count; i++) {
174 nserver = ses->chans[i].server;
175 if (!nserver)
176 continue;
177 nserver->srv_count++;
178 list_add(&nserver->rlist, &reco);
179 }
180 spin_unlock(&ses->chan_lock);
181 spin_unlock(&ses->ses_lock);
182 }
183 }
184
185 list_for_each_entry_safe(server, nserver, &reco, rlist) {
186 list_del_init(&server->rlist);
187 set_need_reco(server);
188 cifs_put_tcp_session(server, 0);
189 }
190 }
191
192 /*
193 * Mark all sessions and tcons for reconnect.
194 * IMPORTANT: make sure that this gets called only from
195 * cifsd thread. For any other thread, use
196 * cifs_signal_cifsd_for_reconnect
197 *
198 * @server: the tcp ses for which reconnect is needed
199 * @server needs to be previously set to CifsNeedReconnect.
200 * @mark_smb_session: whether even sessions need to be marked
201 */
202 void
cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)203 cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
204 bool mark_smb_session)
205 {
206 struct TCP_Server_Info *pserver;
207 struct cifs_ses *ses, *nses;
208 struct cifs_tcon *tcon;
209
210 /*
211 * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
212 * are not used until reconnected.
213 */
214 cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
215
216 /* If server is a channel, select the primary channel */
217 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
218
219 /*
220 * if the server has been marked for termination, there is a
221 * chance that the remaining channels all need reconnect. To be
222 * on the safer side, mark the session and trees for reconnect
223 * for this scenario. This might cause a few redundant session
224 * setup and tree connect requests, but it is better than not doing
225 * a tree connect when needed, and all following requests failing
226 */
227 if (server->terminate) {
228 mark_smb_session = true;
229 server = pserver;
230 }
231
232 spin_lock(&cifs_tcp_ses_lock);
233 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) {
234 spin_lock(&ses->ses_lock);
235 if (ses->ses_status == SES_EXITING) {
236 spin_unlock(&ses->ses_lock);
237 continue;
238 }
239 spin_unlock(&ses->ses_lock);
240
241 spin_lock(&ses->chan_lock);
242 if (cifs_ses_get_chan_index(ses, server) ==
243 CIFS_INVAL_CHAN_INDEX) {
244 spin_unlock(&ses->chan_lock);
245 continue;
246 }
247
248 if (!cifs_chan_is_iface_active(ses, server)) {
249 spin_unlock(&ses->chan_lock);
250 cifs_chan_update_iface(ses, server);
251 spin_lock(&ses->chan_lock);
252 }
253
254 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) {
255 spin_unlock(&ses->chan_lock);
256 continue;
257 }
258
259 if (mark_smb_session)
260 CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
261 else
262 cifs_chan_set_need_reconnect(ses, server);
263
264 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
265 __func__, ses->chans_need_reconnect);
266
267 /* If all channels need reconnect, then tcon needs reconnect */
268 if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
269 spin_unlock(&ses->chan_lock);
270 continue;
271 }
272 spin_unlock(&ses->chan_lock);
273
274 spin_lock(&ses->ses_lock);
275 ses->ses_status = SES_NEED_RECON;
276 spin_unlock(&ses->ses_lock);
277
278 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
279 tcon->need_reconnect = true;
280 spin_lock(&tcon->tc_lock);
281 tcon->status = TID_NEED_RECON;
282 spin_unlock(&tcon->tc_lock);
283
284 cancel_delayed_work(&tcon->query_interfaces);
285 }
286 if (ses->tcon_ipc) {
287 ses->tcon_ipc->need_reconnect = true;
288 spin_lock(&ses->tcon_ipc->tc_lock);
289 ses->tcon_ipc->status = TID_NEED_RECON;
290 spin_unlock(&ses->tcon_ipc->tc_lock);
291 }
292 }
293 spin_unlock(&cifs_tcp_ses_lock);
294 }
295
296 static void
cifs_abort_connection(struct TCP_Server_Info * server)297 cifs_abort_connection(struct TCP_Server_Info *server)
298 {
299 struct mid_q_entry *mid, *nmid;
300 struct list_head retry_list;
301
302 server->maxBuf = 0;
303 server->max_read = 0;
304
305 /* do not want to be sending data on a socket we are freeing */
306 cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
307 cifs_server_lock(server);
308 if (server->ssocket) {
309 cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
310 server->ssocket->flags);
311 kernel_sock_shutdown(server->ssocket, SHUT_WR);
312 cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
313 server->ssocket->flags);
314 sock_release(server->ssocket);
315 server->ssocket = NULL;
316 } else if (cifs_rdma_enabled(server)) {
317 smbd_destroy(server);
318 }
319 server->sequence_number = 0;
320 server->session_estab = false;
321 kfree_sensitive(server->session_key.response);
322 server->session_key.response = NULL;
323 server->session_key.len = 0;
324 server->lstrp = jiffies;
325
326 /* mark submitted MIDs for retry and issue callback */
327 INIT_LIST_HEAD(&retry_list);
328 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
329 spin_lock(&server->mid_queue_lock);
330 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
331 smb_get_mid(mid);
332 if (mid->mid_state == MID_REQUEST_SUBMITTED)
333 mid->mid_state = MID_RETRY_NEEDED;
334 list_move(&mid->qhead, &retry_list);
335 mid->deleted_from_q = true;
336 }
337 spin_unlock(&server->mid_queue_lock);
338 cifs_server_unlock(server);
339
340 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
341 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
342 list_del_init(&mid->qhead);
343 mid_execute_callback(server, mid);
344 release_mid(server, mid);
345 }
346 }
347
cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info * server,int num_targets)348 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
349 {
350 spin_lock(&server->srv_lock);
351 server->nr_targets = num_targets;
352 if (server->tcpStatus == CifsExiting) {
353 /* the demux thread will exit normally next time through the loop */
354 spin_unlock(&server->srv_lock);
355 wake_up(&server->response_q);
356 return false;
357 }
358
359 cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
360 trace_smb3_reconnect(server->current_mid, server->conn_id,
361 server->hostname);
362 server->tcpStatus = CifsNeedReconnect;
363
364 spin_unlock(&server->srv_lock);
365 return true;
366 }
367
368 /*
369 * cifs tcp session reconnection
370 *
371 * mark tcp session as reconnecting so temporarily locked
372 * mark all smb sessions as reconnecting for tcp session
373 * reconnect tcp session
374 * wake up waiters on reconnection? - (not needed currently)
375 *
376 * if mark_smb_session is passed as true, unconditionally mark
377 * the smb session (and tcon) for reconnect as well. This value
378 * doesn't really matter for non-multichannel scenario.
379 *
380 */
__cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session,bool once)381 static int __cifs_reconnect(struct TCP_Server_Info *server,
382 bool mark_smb_session, bool once)
383 {
384 int rc = 0;
385
386 if (!cifs_tcp_ses_needs_reconnect(server, 1))
387 return 0;
388
389 /*
390 * if smb session has been marked for reconnect, also reconnect all
391 * connections. This way, the other connections do not end up bad.
392 */
393 if (mark_smb_session)
394 cifs_signal_cifsd_for_reconnect(server, mark_smb_session);
395
396 cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
397
398 cifs_abort_connection(server);
399
400 do {
401 try_to_freeze();
402 cifs_server_lock(server);
403
404 if (!cifs_swn_set_server_dstaddr(server) &&
405 !SERVER_IS_CHAN(server)) {
406 /* resolve the hostname again to make sure that IP address is up-to-date */
407 rc = reconn_set_ipaddr_from_hostname(server);
408 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
409 }
410
411 if (cifs_rdma_enabled(server))
412 rc = smbd_reconnect(server);
413 else
414 rc = generic_ip_connect(server);
415 if (rc) {
416 cifs_server_unlock(server);
417 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
418 /* If was asked to reconnect only once, do not try it more times */
419 if (once)
420 break;
421 msleep(3000);
422 } else {
423 atomic_inc(&tcpSesReconnectCount);
424 set_credits(server, 1);
425 spin_lock(&server->srv_lock);
426 if (server->tcpStatus != CifsExiting)
427 server->tcpStatus = CifsNeedNegotiate;
428 spin_unlock(&server->srv_lock);
429 cifs_swn_reset_server_dstaddr(server);
430 cifs_server_unlock(server);
431 cifs_queue_server_reconn(server);
432 }
433 } while (server->tcpStatus == CifsNeedReconnect);
434
435 spin_lock(&server->srv_lock);
436 if (server->tcpStatus == CifsNeedNegotiate)
437 mod_delayed_work(cifsiod_wq, &server->echo, 0);
438 spin_unlock(&server->srv_lock);
439
440 wake_up(&server->response_q);
441 return rc;
442 }
443
444 #ifdef CONFIG_CIFS_DFS_UPCALL
__reconnect_target_locked(struct TCP_Server_Info * server,const char * target)445 static int __reconnect_target_locked(struct TCP_Server_Info *server,
446 const char *target)
447 {
448 int rc;
449 char *hostname;
450
451 if (!cifs_swn_set_server_dstaddr(server)) {
452 if (server->hostname != target) {
453 hostname = extract_hostname(target);
454 if (!IS_ERR(hostname)) {
455 spin_lock(&server->srv_lock);
456 kfree(server->hostname);
457 server->hostname = hostname;
458 spin_unlock(&server->srv_lock);
459 } else {
460 cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
461 __func__, PTR_ERR(hostname));
462 cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
463 server->hostname);
464 }
465 }
466 /* resolve the hostname again to make sure that IP address is up-to-date. */
467 rc = reconn_set_ipaddr_from_hostname(server);
468 cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
469 }
470 /* Reconnect the socket */
471 if (cifs_rdma_enabled(server))
472 rc = smbd_reconnect(server);
473 else
474 rc = generic_ip_connect(server);
475
476 return rc;
477 }
478
reconnect_target_locked(struct TCP_Server_Info * server,struct dfs_cache_tgt_list * tl,struct dfs_cache_tgt_iterator ** target_hint)479 static int reconnect_target_locked(struct TCP_Server_Info *server,
480 struct dfs_cache_tgt_list *tl,
481 struct dfs_cache_tgt_iterator **target_hint)
482 {
483 struct dfs_cache_tgt_iterator *tit;
484 int rc;
485
486 *target_hint = NULL;
487
488 /* If dfs target list is empty, then reconnect to last server */
489 tit = dfs_cache_get_tgt_iterator(tl);
490 if (!tit)
491 return __reconnect_target_locked(server, server->hostname);
492
493 /* Otherwise, try every dfs target in @tl */
494 do {
495 const char *target = dfs_cache_get_tgt_name(tit);
496
497 spin_lock(&server->srv_lock);
498 if (server->tcpStatus != CifsNeedReconnect) {
499 spin_unlock(&server->srv_lock);
500 return -ECONNRESET;
501 }
502 spin_unlock(&server->srv_lock);
503 rc = __reconnect_target_locked(server, target);
504 if (!rc) {
505 *target_hint = tit;
506 break;
507 }
508 } while ((tit = dfs_cache_get_next_tgt(tl, tit)));
509 return rc;
510 }
511
reconnect_dfs_server(struct TCP_Server_Info * server)512 static int reconnect_dfs_server(struct TCP_Server_Info *server)
513 {
514 struct dfs_cache_tgt_iterator *target_hint = NULL;
515 const char *ref_path = server->leaf_fullpath + 1;
516 DFS_CACHE_TGT_LIST(tl);
517 int num_targets = 0;
518 int rc = 0;
519
520 /*
521 * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
522 *
523 * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
524 * targets (server->nr_targets). It's also possible that the cached referral was cleared
525 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
526 * refreshing the referral, so, in this case, default it to 1.
527 */
528 if (!dfs_cache_noreq_find(ref_path, NULL, &tl))
529 num_targets = dfs_cache_get_nr_tgts(&tl);
530 if (!num_targets)
531 num_targets = 1;
532
533 if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
534 return 0;
535
536 /*
537 * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
538 * different server or share during failover. It could be improved by adding some logic to
539 * only do that in case it connects to a different server or share, though.
540 */
541 cifs_mark_tcp_ses_conns_for_reconnect(server, true);
542
543 cifs_abort_connection(server);
544
545 do {
546 try_to_freeze();
547 cifs_server_lock(server);
548
549 rc = reconnect_target_locked(server, &tl, &target_hint);
550 if (rc) {
551 /* Failed to reconnect socket */
552 cifs_server_unlock(server);
553 cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
554 msleep(3000);
555 continue;
556 }
557 /*
558 * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
559 * process waiting for reconnect will know it needs to re-establish session and tcon
560 * through the reconnected target server.
561 */
562 atomic_inc(&tcpSesReconnectCount);
563 set_credits(server, 1);
564 spin_lock(&server->srv_lock);
565 if (server->tcpStatus != CifsExiting)
566 server->tcpStatus = CifsNeedNegotiate;
567 spin_unlock(&server->srv_lock);
568 cifs_swn_reset_server_dstaddr(server);
569 cifs_server_unlock(server);
570 cifs_queue_server_reconn(server);
571 } while (server->tcpStatus == CifsNeedReconnect);
572
573 dfs_cache_noreq_update_tgthint(ref_path, target_hint);
574 dfs_cache_free_tgts(&tl);
575
576 /* Need to set up echo worker again once connection has been established */
577 spin_lock(&server->srv_lock);
578 if (server->tcpStatus == CifsNeedNegotiate)
579 mod_delayed_work(cifsiod_wq, &server->echo, 0);
580 spin_unlock(&server->srv_lock);
581
582 wake_up(&server->response_q);
583 return rc;
584 }
585
586 static int
_cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session,bool once)587 _cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session, bool once)
588 {
589 if (!server->leaf_fullpath)
590 return __cifs_reconnect(server, mark_smb_session, once);
591 return reconnect_dfs_server(server);
592 }
593 #else
594 static int
_cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session,bool once)595 _cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session, bool once)
596 {
597 return __cifs_reconnect(server, mark_smb_session, once);
598 }
599 #endif
600
601 int
cifs_reconnect(struct TCP_Server_Info * server,bool mark_smb_session)602 cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
603 {
604 return _cifs_reconnect(server, mark_smb_session, false);
605 }
606
607 static int
cifs_reconnect_once(struct TCP_Server_Info * server)608 cifs_reconnect_once(struct TCP_Server_Info *server)
609 {
610 return _cifs_reconnect(server, true, true);
611 }
612
613 static void
cifs_echo_request(struct work_struct * work)614 cifs_echo_request(struct work_struct *work)
615 {
616 int rc;
617 struct TCP_Server_Info *server = container_of(work,
618 struct TCP_Server_Info, echo.work);
619
620 /*
621 * We cannot send an echo if it is disabled.
622 * Also, no need to ping if we got a response recently.
623 */
624
625 if (server->tcpStatus == CifsNeedReconnect ||
626 server->tcpStatus == CifsExiting ||
627 server->tcpStatus == CifsNew ||
628 (server->ops->can_echo && !server->ops->can_echo(server)) ||
629 time_before(jiffies, server->lstrp + server->echo_interval - HZ))
630 goto requeue_echo;
631
632 rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
633 cifs_server_dbg(FYI, "send echo request: rc = %d\n", rc);
634
635 /* Check witness registrations */
636 cifs_swn_check();
637
638 requeue_echo:
639 queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval);
640 }
641
642 static bool
allocate_buffers(struct TCP_Server_Info * server)643 allocate_buffers(struct TCP_Server_Info *server)
644 {
645 if (!server->bigbuf) {
646 server->bigbuf = (char *)cifs_buf_get();
647 if (!server->bigbuf) {
648 cifs_server_dbg(VFS, "No memory for large SMB response\n");
649 msleep(3000);
650 /* retry will check if exiting */
651 return false;
652 }
653 } else if (server->large_buf) {
654 /* we are reusing a dirty large buf, clear its start */
655 memset(server->bigbuf, 0, HEADER_SIZE(server));
656 }
657
658 if (!server->smallbuf) {
659 server->smallbuf = (char *)cifs_small_buf_get();
660 if (!server->smallbuf) {
661 cifs_server_dbg(VFS, "No memory for SMB response\n");
662 msleep(1000);
663 /* retry will check if exiting */
664 return false;
665 }
666 /* beginning of smb buffer is cleared in our buf_get */
667 } else {
668 /* if existing small buf clear beginning */
669 memset(server->smallbuf, 0, HEADER_SIZE(server));
670 }
671
672 return true;
673 }
674
675 static bool
server_unresponsive(struct TCP_Server_Info * server)676 server_unresponsive(struct TCP_Server_Info *server)
677 {
678 /*
679 * If we're in the process of mounting a share or reconnecting a session
680 * and the server abruptly shut down (e.g. socket wasn't closed, packet
681 * had been ACK'ed but no SMB response), don't wait longer than 20s from
682 * when negotiate actually started.
683 */
684 spin_lock(&server->srv_lock);
685 if (server->tcpStatus == CifsInNegotiate &&
686 time_after(jiffies, server->neg_start + 20 * HZ)) {
687 spin_unlock(&server->srv_lock);
688 cifs_reconnect(server, false);
689 return true;
690 }
691 /*
692 * We need to wait 3 echo intervals to make sure we handle such
693 * situations right:
694 * 1s client sends a normal SMB request
695 * 2s client gets a response
696 * 30s echo workqueue job pops, and decides we got a response recently
697 * and don't need to send another
698 * ...
699 * 65s kernel_recvmsg times out, and we see that we haven't gotten
700 * a response in >60s.
701 */
702 if ((server->tcpStatus == CifsGood ||
703 server->tcpStatus == CifsNeedNegotiate) &&
704 (!server->ops->can_echo || server->ops->can_echo(server)) &&
705 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
706 spin_unlock(&server->srv_lock);
707 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
708 (3 * server->echo_interval) / HZ);
709 cifs_reconnect(server, false);
710 return true;
711 }
712 spin_unlock(&server->srv_lock);
713
714 return false;
715 }
716
717 static inline bool
zero_credits(struct TCP_Server_Info * server)718 zero_credits(struct TCP_Server_Info *server)
719 {
720 int val;
721
722 spin_lock(&server->req_lock);
723 val = server->credits + server->echo_credits + server->oplock_credits;
724 if (server->in_flight == 0 && val == 0) {
725 spin_unlock(&server->req_lock);
726 return true;
727 }
728 spin_unlock(&server->req_lock);
729 return false;
730 }
731
732 static int
cifs_readv_from_socket(struct TCP_Server_Info * server,struct msghdr * smb_msg)733 cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
734 {
735 int length = 0;
736 int total_read;
737
738 for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
739 try_to_freeze();
740
741 /* reconnect if no credits and no requests in flight */
742 if (zero_credits(server)) {
743 cifs_reconnect(server, false);
744 return -ECONNABORTED;
745 }
746
747 if (server_unresponsive(server))
748 return -ECONNABORTED;
749 if (cifs_rdma_enabled(server) && server->smbd_conn)
750 length = smbd_recv(server->smbd_conn, smb_msg);
751 else
752 length = sock_recvmsg(server->ssocket, smb_msg, 0);
753
754 spin_lock(&server->srv_lock);
755 if (server->tcpStatus == CifsExiting) {
756 spin_unlock(&server->srv_lock);
757 return -ESHUTDOWN;
758 }
759
760 if (server->tcpStatus == CifsNeedReconnect) {
761 spin_unlock(&server->srv_lock);
762 cifs_reconnect(server, false);
763 return -ECONNABORTED;
764 }
765 spin_unlock(&server->srv_lock);
766
767 if (length == -ERESTARTSYS ||
768 length == -EAGAIN ||
769 length == -EINTR) {
770 /*
771 * Minimum sleep to prevent looping, allowing socket
772 * to clear and app threads to set tcpStatus
773 * CifsNeedReconnect if server hung.
774 */
775 usleep_range(1000, 2000);
776 length = 0;
777 continue;
778 }
779
780 if (length <= 0) {
781 cifs_dbg(FYI, "Received no data or error: %d\n", length);
782 cifs_reconnect(server, false);
783 return -ECONNABORTED;
784 }
785 }
786 return total_read;
787 }
788
789 int
cifs_read_from_socket(struct TCP_Server_Info * server,char * buf,unsigned int to_read)790 cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
791 unsigned int to_read)
792 {
793 struct msghdr smb_msg = {};
794 struct kvec iov = {.iov_base = buf, .iov_len = to_read};
795
796 iov_iter_kvec(&smb_msg.msg_iter, ITER_DEST, &iov, 1, to_read);
797
798 return cifs_readv_from_socket(server, &smb_msg);
799 }
800
801 ssize_t
cifs_discard_from_socket(struct TCP_Server_Info * server,size_t to_read)802 cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
803 {
804 struct msghdr smb_msg = {};
805
806 /*
807 * iov_iter_discard already sets smb_msg.type and count and iov_offset
808 * and cifs_readv_from_socket sets msg_control and msg_controllen
809 * so little to initialize in struct msghdr
810 */
811 iov_iter_discard(&smb_msg.msg_iter, ITER_DEST, to_read);
812
813 return cifs_readv_from_socket(server, &smb_msg);
814 }
815
816 int
cifs_read_iter_from_socket(struct TCP_Server_Info * server,struct iov_iter * iter,unsigned int to_read)817 cifs_read_iter_from_socket(struct TCP_Server_Info *server, struct iov_iter *iter,
818 unsigned int to_read)
819 {
820 struct msghdr smb_msg = { .msg_iter = *iter };
821
822 iov_iter_truncate(&smb_msg.msg_iter, to_read);
823 return cifs_readv_from_socket(server, &smb_msg);
824 }
825
826 static bool
is_smb_response(struct TCP_Server_Info * server,unsigned char type)827 is_smb_response(struct TCP_Server_Info *server, unsigned char type)
828 {
829 /*
830 * The first byte big endian of the length field,
831 * is actually not part of the length but the type
832 * with the most common, zero, as regular data.
833 */
834 switch (type) {
835 case RFC1002_SESSION_MESSAGE:
836 /* Regular SMB response */
837 return true;
838 case RFC1002_SESSION_KEEP_ALIVE:
839 /*
840 * RFC 1002 session keep alive can sent by the server only when
841 * we established a RFC 1002 session. But Samba servers send
842 * RFC 1002 session keep alive also over port 445 on which
843 * RFC 1002 session is not established.
844 */
845 cifs_dbg(FYI, "RFC 1002 session keep alive\n");
846 break;
847 case RFC1002_POSITIVE_SESSION_RESPONSE:
848 /*
849 * RFC 1002 positive session response cannot be returned
850 * for SMB request. RFC 1002 session response is handled
851 * exclusively in ip_rfc1001_connect() function.
852 */
853 cifs_server_dbg(VFS, "RFC 1002 positive session response (unexpected)\n");
854 cifs_reconnect(server, true);
855 break;
856 case RFC1002_NEGATIVE_SESSION_RESPONSE:
857 /*
858 * We get this from Windows 98 instead of an error on
859 * SMB negprot response, when we have not established
860 * RFC 1002 session (which means ip_rfc1001_connect()
861 * was skipped). Note that same still happens with
862 * Windows Server 2022 when connecting via port 139.
863 * So for this case when mount option -o nonbsessinit
864 * was not specified, try to reconnect with establishing
865 * RFC 1002 session. If new socket establishment with
866 * RFC 1002 session was successful then return to the
867 * mid's caller -EAGAIN, so it can retry the request.
868 */
869 if (!cifs_rdma_enabled(server) &&
870 server->tcpStatus == CifsInNegotiate &&
871 !server->with_rfc1001 &&
872 server->rfc1001_sessinit != 0) {
873 int rc, mid_rc;
874 struct mid_q_entry *mid, *nmid;
875 LIST_HEAD(dispose_list);
876
877 cifs_dbg(FYI, "RFC 1002 negative session response during SMB Negotiate, retrying with NetBIOS session\n");
878
879 /*
880 * Before reconnect, delete all pending mids for this
881 * server, so reconnect would not signal connection
882 * aborted error to mid's callbacks. Note that for this
883 * server there should be exactly one pending mid
884 * corresponding to SMB1/SMB2 Negotiate packet.
885 */
886 spin_lock(&server->mid_queue_lock);
887 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
888 smb_get_mid(mid);
889 list_move(&mid->qhead, &dispose_list);
890 mid->deleted_from_q = true;
891 }
892 spin_unlock(&server->mid_queue_lock);
893
894 /* Now try to reconnect once with NetBIOS session. */
895 server->with_rfc1001 = true;
896 rc = cifs_reconnect_once(server);
897
898 /*
899 * If reconnect was successful then indicate -EAGAIN
900 * to mid's caller. If reconnect failed with -EAGAIN
901 * then mask it as -EHOSTDOWN, so mid's caller would
902 * know that it failed.
903 */
904 if (rc == 0)
905 mid_rc = -EAGAIN;
906 else if (rc == -EAGAIN)
907 mid_rc = -EHOSTDOWN;
908 else
909 mid_rc = rc;
910
911 /*
912 * After reconnect (either successful or unsuccessful)
913 * deliver reconnect status to mid's caller via mid's
914 * callback. Use MID_RC state which indicates that the
915 * return code should be read from mid_rc member.
916 */
917 list_for_each_entry_safe(mid, nmid, &dispose_list, qhead) {
918 list_del_init(&mid->qhead);
919 mid->mid_rc = mid_rc;
920 mid->mid_state = MID_RC;
921 mid_execute_callback(server, mid);
922 release_mid(server, mid);
923 }
924
925 /*
926 * If reconnect failed then wait two seconds. In most
927 * cases we were been called from the mount context and
928 * delivered failure to mid's callback will stop this
929 * receiver task thread and fails the mount process.
930 * So wait two seconds to prevent another reconnect
931 * in this task thread, which would be useless as the
932 * mount context will fail at all.
933 */
934 if (rc != 0)
935 msleep(2000);
936 } else {
937 cifs_server_dbg(VFS, "RFC 1002 negative session response (unexpected)\n");
938 cifs_reconnect(server, true);
939 }
940 break;
941 case RFC1002_RETARGET_SESSION_RESPONSE:
942 cifs_server_dbg(VFS, "RFC 1002 retarget session response (unexpected)\n");
943 cifs_reconnect(server, true);
944 break;
945 default:
946 cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
947 cifs_reconnect(server, true);
948 }
949
950 return false;
951 }
952
953 void
dequeue_mid(struct TCP_Server_Info * server,struct mid_q_entry * mid,bool malformed)954 dequeue_mid(struct TCP_Server_Info *server, struct mid_q_entry *mid, bool malformed)
955 {
956 #ifdef CONFIG_CIFS_STATS2
957 mid->when_received = jiffies;
958 #endif
959 spin_lock(&server->mid_queue_lock);
960 if (!malformed)
961 mid->mid_state = MID_RESPONSE_RECEIVED;
962 else
963 mid->mid_state = MID_RESPONSE_MALFORMED;
964 /*
965 * Trying to handle/dequeue a mid after the send_recv()
966 * function has finished processing it is a bug.
967 */
968 if (mid->deleted_from_q == true) {
969 spin_unlock(&server->mid_queue_lock);
970 pr_warn_once("trying to dequeue a deleted mid\n");
971 } else {
972 list_del_init(&mid->qhead);
973 mid->deleted_from_q = true;
974 spin_unlock(&server->mid_queue_lock);
975 }
976 }
977
978 static unsigned int
smb2_get_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)979 smb2_get_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
980 {
981 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
982
983 /*
984 * SMB1 does not use credits.
985 */
986 if (is_smb1(server))
987 return 0;
988
989 return le16_to_cpu(shdr->CreditRequest);
990 }
991
992 static void
handle_mid(struct mid_q_entry * mid,struct TCP_Server_Info * server,char * buf,int malformed)993 handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
994 char *buf, int malformed)
995 {
996 if (server->ops->check_trans2 &&
997 server->ops->check_trans2(mid, server, buf, malformed))
998 return;
999 mid->credits_received = smb2_get_credits_from_hdr(buf, server);
1000 mid->resp_buf = buf;
1001 mid->large_buf = server->large_buf;
1002 /* Was previous buf put in mpx struct for multi-rsp? */
1003 if (!mid->multiRsp) {
1004 /* smb buffer will be freed by user thread */
1005 if (server->large_buf)
1006 server->bigbuf = NULL;
1007 else
1008 server->smallbuf = NULL;
1009 }
1010 dequeue_mid(server, mid, malformed);
1011 }
1012
1013 int
cifs_enable_signing(struct TCP_Server_Info * server,bool mnt_sign_required)1014 cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
1015 {
1016 bool srv_sign_required = server->sec_mode & server->vals->signing_required;
1017 bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled;
1018 bool mnt_sign_enabled;
1019
1020 /*
1021 * Is signing required by mnt options? If not then check
1022 * global_secflags to see if it is there.
1023 */
1024 if (!mnt_sign_required)
1025 mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) ==
1026 CIFSSEC_MUST_SIGN);
1027
1028 /*
1029 * If signing is required then it's automatically enabled too,
1030 * otherwise, check to see if the secflags allow it.
1031 */
1032 mnt_sign_enabled = mnt_sign_required ? mnt_sign_required :
1033 (global_secflags & CIFSSEC_MAY_SIGN);
1034
1035 /* If server requires signing, does client allow it? */
1036 if (srv_sign_required) {
1037 if (!mnt_sign_enabled) {
1038 cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
1039 return -EOPNOTSUPP;
1040 }
1041 server->sign = true;
1042 }
1043
1044 /* If client requires signing, does server allow it? */
1045 if (mnt_sign_required) {
1046 if (!srv_sign_enabled) {
1047 cifs_dbg(VFS, "Server does not support signing!\n");
1048 return -EOPNOTSUPP;
1049 }
1050 server->sign = true;
1051 }
1052
1053 if (cifs_rdma_enabled(server) && server->sign)
1054 cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
1055
1056 return 0;
1057 }
1058
1059 static noinline_for_stack void
clean_demultiplex_info(struct TCP_Server_Info * server)1060 clean_demultiplex_info(struct TCP_Server_Info *server)
1061 {
1062 int length;
1063
1064 /* take it off the list, if it's not already */
1065 spin_lock(&server->srv_lock);
1066 list_del_init(&server->tcp_ses_list);
1067 spin_unlock(&server->srv_lock);
1068
1069 cancel_delayed_work_sync(&server->echo);
1070
1071 spin_lock(&server->srv_lock);
1072 server->tcpStatus = CifsExiting;
1073 spin_unlock(&server->srv_lock);
1074 wake_up_all(&server->response_q);
1075
1076 /* check if we have blocked requests that need to free */
1077 spin_lock(&server->req_lock);
1078 if (server->credits <= 0)
1079 server->credits = 1;
1080 spin_unlock(&server->req_lock);
1081 /*
1082 * Although there should not be any requests blocked on this queue it
1083 * can not hurt to be paranoid and try to wake up requests that may
1084 * haven been blocked when more than 50 at time were on the wire to the
1085 * same server - they now will see the session is in exit state and get
1086 * out of SendReceive.
1087 */
1088 wake_up_all(&server->request_q);
1089 /* give those requests time to exit */
1090 msleep(125);
1091 if (cifs_rdma_enabled(server))
1092 smbd_destroy(server);
1093 if (server->ssocket) {
1094 sock_release(server->ssocket);
1095 server->ssocket = NULL;
1096 }
1097
1098 if (!list_empty(&server->pending_mid_q)) {
1099 struct mid_q_entry *mid_entry;
1100 struct list_head *tmp, *tmp2;
1101 LIST_HEAD(dispose_list);
1102
1103 spin_lock(&server->mid_queue_lock);
1104 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
1105 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1106 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
1107 smb_get_mid(mid_entry);
1108 mid_entry->mid_state = MID_SHUTDOWN;
1109 list_move(&mid_entry->qhead, &dispose_list);
1110 mid_entry->deleted_from_q = true;
1111 }
1112 spin_unlock(&server->mid_queue_lock);
1113
1114 /* now walk dispose list and issue callbacks */
1115 list_for_each_safe(tmp, tmp2, &dispose_list) {
1116 mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
1117 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid);
1118 list_del_init(&mid_entry->qhead);
1119 mid_execute_callback(server, mid_entry);
1120 release_mid(server, mid_entry);
1121 }
1122 /* 1/8th of sec is more than enough time for them to exit */
1123 msleep(125);
1124 }
1125
1126 if (!list_empty(&server->pending_mid_q)) {
1127 /*
1128 * mpx threads have not exited yet give them at least the smb
1129 * send timeout time for long ops.
1130 *
1131 * Due to delays on oplock break requests, we need to wait at
1132 * least 45 seconds before giving up on a request getting a
1133 * response and going ahead and killing cifsd.
1134 */
1135 cifs_dbg(FYI, "Wait for exit from demultiplex thread\n");
1136 msleep(46000);
1137 /*
1138 * If threads still have not exited they are probably never
1139 * coming home not much else we can do but free the memory.
1140 */
1141 }
1142
1143 put_net(cifs_net_ns(server));
1144 kfree(server->leaf_fullpath);
1145 kfree(server->hostname);
1146 kfree(server);
1147
1148 length = atomic_dec_return(&tcpSesAllocCount);
1149 if (length > 0)
1150 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1151 }
1152
1153 static int
standard_receive3(struct TCP_Server_Info * server,struct mid_q_entry * mid)1154 standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1155 {
1156 int length;
1157 char *buf = server->smallbuf;
1158 unsigned int pdu_length = server->pdu_size;
1159
1160 /* make sure this will fit in a large buffer */
1161 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
1162 cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
1163 cifs_reconnect(server, true);
1164 return -ECONNABORTED;
1165 }
1166
1167 /* switch to large buffer if too big for a small one */
1168 if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE) {
1169 server->large_buf = true;
1170 memcpy(server->bigbuf, buf, server->total_read);
1171 buf = server->bigbuf;
1172 }
1173
1174 /* now read the rest */
1175 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
1176 pdu_length - MID_HEADER_SIZE(server));
1177
1178 if (length < 0)
1179 return length;
1180 server->total_read += length;
1181
1182 dump_smb(buf, server->total_read);
1183
1184 return cifs_handle_standard(server, mid);
1185 }
1186
1187 int
cifs_handle_standard(struct TCP_Server_Info * server,struct mid_q_entry * mid)1188 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1189 {
1190 char *buf = server->large_buf ? server->bigbuf : server->smallbuf;
1191 int rc;
1192
1193 /*
1194 * We know that we received enough to get to the MID as we
1195 * checked the pdu_length earlier. Now check to see
1196 * if the rest of the header is OK.
1197 *
1198 * 48 bytes is enough to display the header and a little bit
1199 * into the payload for debugging purposes.
1200 */
1201 rc = server->ops->check_message(buf, server->pdu_size,
1202 server->total_read, server);
1203 if (rc)
1204 cifs_dump_mem("Bad SMB: ", buf,
1205 min_t(unsigned int, server->total_read, 48));
1206
1207 if (server->ops->is_session_expired &&
1208 server->ops->is_session_expired(buf)) {
1209 cifs_reconnect(server, true);
1210 return -1;
1211 }
1212
1213 if (server->ops->is_status_pending &&
1214 server->ops->is_status_pending(buf, server))
1215 return -1;
1216
1217 if (!mid)
1218 return rc;
1219
1220 handle_mid(mid, server, buf, rc);
1221 return 0;
1222 }
1223
1224 static void
smb2_add_credits_from_hdr(char * buffer,struct TCP_Server_Info * server)1225 smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
1226 {
1227 struct smb2_hdr *shdr = (struct smb2_hdr *)buffer;
1228 int scredits, in_flight;
1229
1230 /*
1231 * SMB1 does not use credits.
1232 */
1233 if (is_smb1(server))
1234 return;
1235
1236 if (shdr->CreditRequest) {
1237 spin_lock(&server->req_lock);
1238 server->credits += le16_to_cpu(shdr->CreditRequest);
1239 scredits = server->credits;
1240 in_flight = server->in_flight;
1241 spin_unlock(&server->req_lock);
1242 wake_up(&server->request_q);
1243
1244 trace_smb3_hdr_credits(server->current_mid,
1245 server->conn_id, server->hostname, scredits,
1246 le16_to_cpu(shdr->CreditRequest), in_flight);
1247 cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
1248 __func__, le16_to_cpu(shdr->CreditRequest),
1249 scredits);
1250 }
1251 }
1252
1253
1254 static int
cifs_demultiplex_thread(void * p)1255 cifs_demultiplex_thread(void *p)
1256 {
1257 int i, num_mids, length;
1258 struct TCP_Server_Info *server = p;
1259 unsigned int pdu_length;
1260 unsigned int next_offset;
1261 char *buf = NULL;
1262 struct task_struct *task_to_wake = NULL;
1263 struct mid_q_entry *mids[MAX_COMPOUND];
1264 char *bufs[MAX_COMPOUND];
1265 unsigned int noreclaim_flag, num_io_timeout = 0;
1266 bool pending_reconnect = false;
1267
1268 noreclaim_flag = memalloc_noreclaim_save();
1269 cifs_dbg(FYI, "Demultiplex PID: %d\n", task_pid_nr(current));
1270
1271 length = atomic_inc_return(&tcpSesAllocCount);
1272 if (length > 1)
1273 mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
1274
1275 set_freezable();
1276 allow_kernel_signal(SIGKILL);
1277 while (server->tcpStatus != CifsExiting) {
1278 if (try_to_freeze())
1279 continue;
1280
1281 if (!allocate_buffers(server))
1282 continue;
1283
1284 server->large_buf = false;
1285 buf = server->smallbuf;
1286 pdu_length = 4; /* enough to get RFC1001 header */
1287
1288 length = cifs_read_from_socket(server, buf, pdu_length);
1289 if (length < 0)
1290 continue;
1291
1292 server->total_read = 0;
1293
1294 /*
1295 * The right amount was read from socket - 4 bytes,
1296 * so we can now interpret the length field.
1297 */
1298 pdu_length = be32_to_cpup(((__be32 *)buf)) & 0xffffff;
1299
1300 cifs_dbg(FYI, "RFC1002 header 0x%x\n", pdu_length);
1301 if (!is_smb_response(server, buf[0]))
1302 continue;
1303
1304 pending_reconnect = false;
1305 next_pdu:
1306 server->pdu_size = pdu_length;
1307
1308 /* make sure we have enough to get to the MID */
1309 if (server->pdu_size < MID_HEADER_SIZE(server)) {
1310 cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
1311 server->pdu_size);
1312 cifs_reconnect(server, true);
1313 continue;
1314 }
1315
1316 /* read down to the MID */
1317 length = cifs_read_from_socket(server, buf,
1318 MID_HEADER_SIZE(server));
1319 if (length < 0)
1320 continue;
1321 server->total_read += length;
1322
1323 if (server->ops->next_header) {
1324 if (server->ops->next_header(server, buf, &next_offset)) {
1325 cifs_dbg(VFS, "%s: malformed response (next_offset=%u)\n",
1326 __func__, next_offset);
1327 cifs_reconnect(server, true);
1328 continue;
1329 }
1330 if (next_offset)
1331 server->pdu_size = next_offset;
1332 }
1333
1334 memset(mids, 0, sizeof(mids));
1335 memset(bufs, 0, sizeof(bufs));
1336 num_mids = 0;
1337
1338 if (server->ops->is_transform_hdr &&
1339 server->ops->receive_transform &&
1340 server->ops->is_transform_hdr(buf)) {
1341 length = server->ops->receive_transform(server,
1342 mids,
1343 bufs,
1344 &num_mids);
1345 } else {
1346 mids[0] = server->ops->find_mid(server, buf);
1347 bufs[0] = buf;
1348 num_mids = 1;
1349
1350 if (mids[0])
1351 mids[0]->response_pdu_len = pdu_length;
1352 if (!mids[0] || !mids[0]->receive)
1353 length = standard_receive3(server, mids[0]);
1354 else
1355 length = mids[0]->receive(server, mids[0]);
1356 }
1357
1358 if (length < 0) {
1359 for (i = 0; i < num_mids; i++)
1360 if (mids[i])
1361 release_mid(server, mids[i]);
1362 continue;
1363 }
1364
1365 if (server->ops->is_status_io_timeout &&
1366 server->ops->is_status_io_timeout(buf)) {
1367 num_io_timeout++;
1368 if (num_io_timeout > MAX_STATUS_IO_TIMEOUT) {
1369 cifs_server_dbg(VFS,
1370 "Number of request timeouts exceeded %d. Reconnecting",
1371 MAX_STATUS_IO_TIMEOUT);
1372
1373 pending_reconnect = true;
1374 num_io_timeout = 0;
1375 }
1376 }
1377
1378 server->lstrp = jiffies;
1379
1380 for (i = 0; i < num_mids; i++) {
1381 if (mids[i] != NULL) {
1382 mids[i]->resp_buf_size = server->pdu_size;
1383
1384 if (bufs[i] != NULL) {
1385 if (server->ops->is_network_name_deleted &&
1386 server->ops->is_network_name_deleted(bufs[i],
1387 server)) {
1388 cifs_server_dbg(FYI,
1389 "Share deleted. Reconnect needed");
1390 }
1391 }
1392
1393 if (!mids[i]->multiRsp || mids[i]->multiEnd)
1394 mid_execute_callback(server, mids[i]);
1395
1396 release_mid(server, mids[i]);
1397 } else if (server->ops->is_oplock_break &&
1398 server->ops->is_oplock_break(bufs[i],
1399 server)) {
1400 smb2_add_credits_from_hdr(bufs[i], server);
1401 cifs_dbg(FYI, "Received oplock break\n");
1402 } else {
1403 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
1404 atomic_read(&mid_count));
1405 cifs_dump_mem("Received Data is: ", bufs[i],
1406 HEADER_SIZE(server));
1407 smb2_add_credits_from_hdr(bufs[i], server);
1408 #ifdef CONFIG_CIFS_DEBUG2
1409 if (server->ops->dump_detail)
1410 server->ops->dump_detail(bufs[i], pdu_length,
1411 server);
1412 cifs_dump_mids(server);
1413 #endif /* CIFS_DEBUG2 */
1414 }
1415 }
1416
1417 if (pdu_length > server->pdu_size) {
1418 if (!allocate_buffers(server))
1419 continue;
1420 pdu_length -= server->pdu_size;
1421 server->total_read = 0;
1422 server->large_buf = false;
1423 buf = server->smallbuf;
1424 goto next_pdu;
1425 }
1426
1427 /* do this reconnect at the very end after processing all MIDs */
1428 if (pending_reconnect)
1429 cifs_reconnect(server, true);
1430
1431 } /* end while !EXITING */
1432
1433 /* buffer usually freed in free_mid - need to free it here on exit */
1434 cifs_buf_release(server->bigbuf);
1435 if (server->smallbuf) /* no sense logging a debug message if NULL */
1436 cifs_small_buf_release(server->smallbuf);
1437
1438 task_to_wake = xchg(&server->tsk, NULL);
1439 clean_demultiplex_info(server);
1440
1441 /* if server->tsk was NULL then wait for a signal before exiting */
1442 if (!task_to_wake) {
1443 set_current_state(TASK_INTERRUPTIBLE);
1444 while (!signal_pending(current)) {
1445 schedule();
1446 set_current_state(TASK_INTERRUPTIBLE);
1447 }
1448 set_current_state(TASK_RUNNING);
1449 }
1450
1451 memalloc_noreclaim_restore(noreclaim_flag);
1452 module_put_and_kthread_exit(0);
1453 }
1454
1455 int
cifs_ipaddr_cmp(struct sockaddr * srcaddr,struct sockaddr * rhs)1456 cifs_ipaddr_cmp(struct sockaddr *srcaddr, struct sockaddr *rhs)
1457 {
1458 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1459 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1460 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1461 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1462
1463 switch (srcaddr->sa_family) {
1464 case AF_UNSPEC:
1465 switch (rhs->sa_family) {
1466 case AF_UNSPEC:
1467 return 0;
1468 case AF_INET:
1469 case AF_INET6:
1470 return 1;
1471 default:
1472 return -1;
1473 }
1474 case AF_INET: {
1475 switch (rhs->sa_family) {
1476 case AF_UNSPEC:
1477 return -1;
1478 case AF_INET:
1479 return memcmp(saddr4, vaddr4,
1480 sizeof(struct sockaddr_in));
1481 case AF_INET6:
1482 return 1;
1483 default:
1484 return -1;
1485 }
1486 }
1487 case AF_INET6: {
1488 switch (rhs->sa_family) {
1489 case AF_UNSPEC:
1490 case AF_INET:
1491 return -1;
1492 case AF_INET6:
1493 return memcmp(saddr6,
1494 vaddr6,
1495 sizeof(struct sockaddr_in6));
1496 default:
1497 return -1;
1498 }
1499 }
1500 default:
1501 return -1; /* don't expect to be here */
1502 }
1503 }
1504
1505 /*
1506 * Returns true if srcaddr isn't specified and rhs isn't specified, or
1507 * if srcaddr is specified and matches the IP address of the rhs argument
1508 */
1509 bool
cifs_match_ipaddr(struct sockaddr * srcaddr,struct sockaddr * rhs)1510 cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
1511 {
1512 switch (srcaddr->sa_family) {
1513 case AF_UNSPEC:
1514 return (rhs->sa_family == AF_UNSPEC);
1515 case AF_INET: {
1516 struct sockaddr_in *saddr4 = (struct sockaddr_in *)srcaddr;
1517 struct sockaddr_in *vaddr4 = (struct sockaddr_in *)rhs;
1518
1519 return (saddr4->sin_addr.s_addr == vaddr4->sin_addr.s_addr);
1520 }
1521 case AF_INET6: {
1522 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1523 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1524
1525 return (ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr)
1526 && saddr6->sin6_scope_id == vaddr6->sin6_scope_id);
1527 }
1528 default:
1529 WARN_ON(1);
1530 return false; /* don't expect to be here */
1531 }
1532 }
1533
1534 /*
1535 * If no port is specified in addr structure, we try to match with 445 port
1536 * and if it fails - with 139 ports. It should be called only if address
1537 * families of server and addr are equal.
1538 */
1539 static bool
match_port(struct TCP_Server_Info * server,struct sockaddr * addr)1540 match_port(struct TCP_Server_Info *server, struct sockaddr *addr)
1541 {
1542 __be16 port, *sport;
1543
1544 /* SMBDirect manages its own ports, don't match it here */
1545 if (server->rdma)
1546 return true;
1547
1548 switch (addr->sa_family) {
1549 case AF_INET:
1550 sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port;
1551 port = ((struct sockaddr_in *) addr)->sin_port;
1552 break;
1553 case AF_INET6:
1554 sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port;
1555 port = ((struct sockaddr_in6 *) addr)->sin6_port;
1556 break;
1557 default:
1558 WARN_ON(1);
1559 return false;
1560 }
1561
1562 if (!port) {
1563 port = htons(CIFS_PORT);
1564 if (port == *sport)
1565 return true;
1566
1567 port = htons(RFC1001_PORT);
1568 }
1569
1570 return port == *sport;
1571 }
1572
match_server_address(struct TCP_Server_Info * server,struct sockaddr * addr)1573 static bool match_server_address(struct TCP_Server_Info *server, struct sockaddr *addr)
1574 {
1575 if (!cifs_match_ipaddr(addr, (struct sockaddr *)&server->dstaddr))
1576 return false;
1577
1578 return true;
1579 }
1580
1581 static bool
match_security(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)1582 match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
1583 {
1584 /*
1585 * The select_sectype function should either return the ctx->sectype
1586 * that was specified, or "Unspecified" if that sectype was not
1587 * compatible with the given NEGOTIATE request.
1588 */
1589 if (server->ops->select_sectype(server, ctx->sectype)
1590 == Unspecified)
1591 return false;
1592
1593 /*
1594 * Now check if signing mode is acceptable. No need to check
1595 * global_secflags at this point since if MUST_SIGN is set then
1596 * the server->sign had better be too.
1597 */
1598 if (ctx->sign && !server->sign)
1599 return false;
1600
1601 return true;
1602 }
1603
1604 /* this function must be called with srv_lock held */
match_server(struct TCP_Server_Info * server,struct smb3_fs_context * ctx,bool match_super)1605 static int match_server(struct TCP_Server_Info *server,
1606 struct smb3_fs_context *ctx,
1607 bool match_super)
1608 {
1609 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
1610
1611 lockdep_assert_held(&server->srv_lock);
1612
1613 if (ctx->nosharesock)
1614 return 0;
1615
1616 /* this server does not share socket */
1617 if (server->nosharesock)
1618 return 0;
1619
1620 if (!match_super && (ctx->dfs_conn || server->dfs_conn))
1621 return 0;
1622
1623 /* If multidialect negotiation see if existing sessions match one */
1624 if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
1625 if (server->vals->protocol_id < SMB30_PROT_ID)
1626 return 0;
1627 } else if (strcmp(ctx->vals->version_string,
1628 SMBDEFAULT_VERSION_STRING) == 0) {
1629 if (server->vals->protocol_id < SMB21_PROT_ID)
1630 return 0;
1631 } else if ((server->vals != ctx->vals) || (server->ops != ctx->ops))
1632 return 0;
1633
1634 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
1635 return 0;
1636
1637 if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr,
1638 (struct sockaddr *)&server->srcaddr))
1639 return 0;
1640
1641 if (strcasecmp(server->hostname, ctx->server_hostname) ||
1642 !match_server_address(server, addr) ||
1643 !match_port(server, addr))
1644 return 0;
1645
1646 if (!match_security(server, ctx))
1647 return 0;
1648
1649 if (server->echo_interval != ctx->echo_interval * HZ)
1650 return 0;
1651
1652 if (server->rdma != ctx->rdma)
1653 return 0;
1654
1655 if (server->ignore_signature != ctx->ignore_signature)
1656 return 0;
1657
1658 if (server->min_offload != ctx->min_offload)
1659 return 0;
1660
1661 if (server->retrans != ctx->retrans)
1662 return 0;
1663
1664 return 1;
1665 }
1666
1667 struct TCP_Server_Info *
cifs_find_tcp_session(struct smb3_fs_context * ctx)1668 cifs_find_tcp_session(struct smb3_fs_context *ctx)
1669 {
1670 struct TCP_Server_Info *server;
1671
1672 spin_lock(&cifs_tcp_ses_lock);
1673 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1674 spin_lock(&server->srv_lock);
1675 /*
1676 * Skip ses channels since they're only handled in lower layers
1677 * (e.g. cifs_send_recv).
1678 */
1679 if (SERVER_IS_CHAN(server) ||
1680 !match_server(server, ctx, false)) {
1681 spin_unlock(&server->srv_lock);
1682 continue;
1683 }
1684 spin_unlock(&server->srv_lock);
1685
1686 ++server->srv_count;
1687 spin_unlock(&cifs_tcp_ses_lock);
1688 cifs_dbg(FYI, "Existing tcp session with server found\n");
1689 return server;
1690 }
1691 spin_unlock(&cifs_tcp_ses_lock);
1692 return NULL;
1693 }
1694
1695 void
cifs_put_tcp_session(struct TCP_Server_Info * server,int from_reconnect)1696 cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
1697 {
1698 struct task_struct *task;
1699
1700 spin_lock(&cifs_tcp_ses_lock);
1701 if (--server->srv_count > 0) {
1702 spin_unlock(&cifs_tcp_ses_lock);
1703 return;
1704 }
1705
1706 /* srv_count can never go negative */
1707 WARN_ON(server->srv_count < 0);
1708
1709 list_del_init(&server->tcp_ses_list);
1710 spin_unlock(&cifs_tcp_ses_lock);
1711
1712 cancel_delayed_work_sync(&server->echo);
1713
1714 if (from_reconnect)
1715 /*
1716 * Avoid deadlock here: reconnect work calls
1717 * cifs_put_tcp_session() at its end. Need to be sure
1718 * that reconnect work does nothing with server pointer after
1719 * that step.
1720 */
1721 cancel_delayed_work(&server->reconnect);
1722 else
1723 cancel_delayed_work_sync(&server->reconnect);
1724
1725 /* For secondary channels, we pick up ref-count on the primary server */
1726 if (SERVER_IS_CHAN(server))
1727 cifs_put_tcp_session(server->primary_server, from_reconnect);
1728
1729 spin_lock(&server->srv_lock);
1730 server->tcpStatus = CifsExiting;
1731 spin_unlock(&server->srv_lock);
1732
1733 cifs_crypto_secmech_release(server);
1734
1735 kfree_sensitive(server->session_key.response);
1736 server->session_key.response = NULL;
1737 server->session_key.len = 0;
1738
1739 task = xchg(&server->tsk, NULL);
1740 if (task)
1741 send_sig(SIGKILL, task, 1);
1742 }
1743
1744 struct TCP_Server_Info *
cifs_get_tcp_session(struct smb3_fs_context * ctx,struct TCP_Server_Info * primary_server)1745 cifs_get_tcp_session(struct smb3_fs_context *ctx,
1746 struct TCP_Server_Info *primary_server)
1747 {
1748 struct TCP_Server_Info *tcp_ses = NULL;
1749 int rc;
1750
1751 cifs_dbg(FYI, "UNC: %s\n", ctx->UNC);
1752
1753 /* see if we already have a matching tcp_ses */
1754 tcp_ses = cifs_find_tcp_session(ctx);
1755 if (tcp_ses)
1756 return tcp_ses;
1757
1758 tcp_ses = kzalloc_obj(struct TCP_Server_Info);
1759 if (!tcp_ses) {
1760 rc = -ENOMEM;
1761 goto out_err;
1762 }
1763
1764 tcp_ses->hostname = kstrdup(ctx->server_hostname, GFP_KERNEL);
1765 if (!tcp_ses->hostname) {
1766 rc = -ENOMEM;
1767 goto out_err;
1768 }
1769
1770 if (ctx->leaf_fullpath) {
1771 tcp_ses->leaf_fullpath = kstrdup(ctx->leaf_fullpath, GFP_KERNEL);
1772 if (!tcp_ses->leaf_fullpath) {
1773 rc = -ENOMEM;
1774 goto out_err;
1775 }
1776 }
1777 if (ctx->dns_dom)
1778 strscpy(tcp_ses->dns_dom, ctx->dns_dom);
1779
1780 if (ctx->nosharesock)
1781 tcp_ses->nosharesock = true;
1782 tcp_ses->dfs_conn = ctx->dfs_conn;
1783
1784 tcp_ses->ops = ctx->ops;
1785 tcp_ses->vals = ctx->vals;
1786 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
1787
1788 tcp_ses->sign = ctx->sign;
1789 tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
1790 tcp_ses->noblockcnt = ctx->rootfs;
1791 tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
1792 tcp_ses->noautotune = ctx->noautotune;
1793 tcp_ses->tcp_nodelay = ctx->sockopt_tcp_nodelay;
1794 tcp_ses->rdma = ctx->rdma;
1795 tcp_ses->in_flight = 0;
1796 tcp_ses->max_in_flight = 0;
1797 tcp_ses->credits = 1;
1798 if (primary_server) {
1799 spin_lock(&cifs_tcp_ses_lock);
1800 ++primary_server->srv_count;
1801 spin_unlock(&cifs_tcp_ses_lock);
1802 tcp_ses->primary_server = primary_server;
1803 }
1804 init_waitqueue_head(&tcp_ses->response_q);
1805 init_waitqueue_head(&tcp_ses->request_q);
1806 INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
1807 mutex_init(&tcp_ses->_srv_mutex);
1808 memcpy(tcp_ses->workstation_RFC1001_name,
1809 ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1810 memcpy(tcp_ses->server_RFC1001_name,
1811 ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
1812 tcp_ses->rfc1001_sessinit = ctx->rfc1001_sessinit;
1813 tcp_ses->with_rfc1001 = false;
1814 tcp_ses->session_estab = false;
1815 tcp_ses->sequence_number = 0;
1816 tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
1817 tcp_ses->reconnect_instance = 1;
1818 tcp_ses->lstrp = jiffies;
1819 tcp_ses->compression.requested = ctx->compress;
1820 spin_lock_init(&tcp_ses->req_lock);
1821 spin_lock_init(&tcp_ses->srv_lock);
1822 spin_lock_init(&tcp_ses->mid_queue_lock);
1823 spin_lock_init(&tcp_ses->mid_counter_lock);
1824 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
1825 INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
1826 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
1827 INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
1828 mutex_init(&tcp_ses->reconnect_mutex);
1829 memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
1830 sizeof(tcp_ses->srcaddr));
1831 memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
1832 sizeof(tcp_ses->dstaddr));
1833 if (ctx->use_client_guid)
1834 memcpy(tcp_ses->client_guid, ctx->client_guid,
1835 SMB2_CLIENT_GUID_SIZE);
1836 else
1837 generate_random_uuid(tcp_ses->client_guid);
1838 /*
1839 * at this point we are the only ones with the pointer
1840 * to the struct since the kernel thread not created yet
1841 * no need to spinlock this init of tcpStatus or srv_count
1842 */
1843 tcp_ses->tcpStatus = CifsNew;
1844 ++tcp_ses->srv_count;
1845 tcp_ses->echo_interval = ctx->echo_interval * HZ;
1846
1847 if (tcp_ses->rdma) {
1848 #ifndef CONFIG_CIFS_SMB_DIRECT
1849 cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
1850 rc = -ENOENT;
1851 goto out_err_crypto_release;
1852 #endif
1853 tcp_ses->smbd_conn = smbd_get_connection(
1854 tcp_ses, (struct sockaddr *)&ctx->dstaddr);
1855 if (tcp_ses->smbd_conn) {
1856 cifs_dbg(VFS, "RDMA transport established\n");
1857 rc = 0;
1858 goto smbd_connected;
1859 } else {
1860 rc = -ENOENT;
1861 goto out_err_crypto_release;
1862 }
1863 }
1864 rc = ip_connect(tcp_ses);
1865 if (rc < 0) {
1866 cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
1867 goto out_err_crypto_release;
1868 }
1869 smbd_connected:
1870 /*
1871 * since we're in a cifs function already, we know that
1872 * this will succeed. No need for try_module_get().
1873 */
1874 __module_get(THIS_MODULE);
1875 tcp_ses->tsk = kthread_run(cifs_demultiplex_thread,
1876 tcp_ses, "cifsd");
1877 if (IS_ERR(tcp_ses->tsk)) {
1878 rc = PTR_ERR(tcp_ses->tsk);
1879 cifs_dbg(VFS, "error %d create cifsd thread\n", rc);
1880 module_put(THIS_MODULE);
1881 goto out_err_crypto_release;
1882 }
1883 tcp_ses->min_offload = ctx->min_offload;
1884 tcp_ses->retrans = ctx->retrans;
1885 /*
1886 * at this point we are the only ones with the pointer
1887 * to the struct since the kernel thread not created yet
1888 * no need to spinlock this update of tcpStatus
1889 */
1890 spin_lock(&tcp_ses->srv_lock);
1891 tcp_ses->tcpStatus = CifsNeedNegotiate;
1892 spin_unlock(&tcp_ses->srv_lock);
1893
1894 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
1895 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
1896 else
1897 tcp_ses->max_credits = ctx->max_credits;
1898
1899 tcp_ses->nr_targets = 1;
1900 tcp_ses->ignore_signature = ctx->ignore_signature;
1901 /* thread spawned, put it on the list */
1902 spin_lock(&cifs_tcp_ses_lock);
1903 list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
1904 spin_unlock(&cifs_tcp_ses_lock);
1905
1906 /* queue echo request delayed work */
1907 queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
1908
1909 return tcp_ses;
1910
1911 out_err_crypto_release:
1912 cifs_crypto_secmech_release(tcp_ses);
1913
1914 put_net(cifs_net_ns(tcp_ses));
1915
1916 out_err:
1917 if (tcp_ses) {
1918 if (SERVER_IS_CHAN(tcp_ses))
1919 cifs_put_tcp_session(tcp_ses->primary_server, false);
1920 kfree(tcp_ses->hostname);
1921 kfree(tcp_ses->leaf_fullpath);
1922 if (tcp_ses->ssocket)
1923 sock_release(tcp_ses->ssocket);
1924 kfree(tcp_ses);
1925 }
1926 return ERR_PTR(rc);
1927 }
1928
1929 /* this function must be called with ses_lock and chan_lock held */
match_session(struct cifs_ses * ses,struct smb3_fs_context * ctx,bool match_super)1930 static int match_session(struct cifs_ses *ses,
1931 struct smb3_fs_context *ctx,
1932 bool match_super)
1933 {
1934 struct TCP_Server_Info *server = ses->server;
1935 enum securityEnum ctx_sec, ses_sec;
1936
1937 if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
1938 return 0;
1939
1940 /*
1941 * If an existing session is limited to less channels than
1942 * requested, it should not be reused
1943 */
1944 if (ses->chan_max < ctx->max_channels)
1945 return 0;
1946
1947 ctx_sec = server->ops->select_sectype(server, ctx->sectype);
1948 ses_sec = server->ops->select_sectype(server, ses->sectype);
1949
1950 if (ctx_sec != ses_sec)
1951 return 0;
1952
1953 switch (ctx_sec) {
1954 case IAKerb:
1955 case Kerberos:
1956 if (!uid_eq(ctx->cred_uid, ses->cred_uid))
1957 return 0;
1958 break;
1959 case NTLMv2:
1960 case RawNTLMSSP:
1961 default:
1962 /* NULL username means anonymous session */
1963 if (ses->user_name == NULL) {
1964 if (!ctx->nullauth)
1965 return 0;
1966 break;
1967 }
1968
1969 /* anything else takes username/password */
1970 if (strncmp(ses->user_name,
1971 ctx->username ? ctx->username : "",
1972 CIFS_MAX_USERNAME_LEN))
1973 return 0;
1974 if ((ctx->username && strlen(ctx->username) != 0) &&
1975 ses->password != NULL) {
1976
1977 /* New mount can only share sessions with an existing mount if:
1978 * 1. Both password and password2 match, or
1979 * 2. password2 of the old mount matches password of the new mount
1980 * and password of the old mount matches password2 of the new
1981 * mount
1982 */
1983 if (ses->password2 != NULL && ctx->password2 != NULL) {
1984 if (!((strncmp(ses->password, ctx->password ?
1985 ctx->password : "", CIFS_MAX_PASSWORD_LEN) == 0 &&
1986 strncmp(ses->password2, ctx->password2,
1987 CIFS_MAX_PASSWORD_LEN) == 0) ||
1988 (strncmp(ses->password, ctx->password2,
1989 CIFS_MAX_PASSWORD_LEN) == 0 &&
1990 strncmp(ses->password2, ctx->password ?
1991 ctx->password : "", CIFS_MAX_PASSWORD_LEN) == 0)))
1992 return 0;
1993
1994 } else if ((ses->password2 == NULL && ctx->password2 != NULL) ||
1995 (ses->password2 != NULL && ctx->password2 == NULL)) {
1996 return 0;
1997
1998 } else {
1999 if (strncmp(ses->password, ctx->password ?
2000 ctx->password : "", CIFS_MAX_PASSWORD_LEN))
2001 return 0;
2002 }
2003 }
2004 }
2005
2006 if (strcmp(ctx->local_nls->charset, ses->local_nls->charset))
2007 return 0;
2008
2009 return 1;
2010 }
2011
2012 /**
2013 * cifs_setup_ipc - helper to setup the IPC tcon for the session
2014 * @ses: smb session to issue the request on
2015 * @seal: if encryption is requested
2016 *
2017 * A new IPC connection is made and stored in the session
2018 * tcon_ipc. The IPC tcon has the same lifetime as the session.
2019 */
cifs_setup_ipc(struct cifs_ses * ses,bool seal)2020 struct cifs_tcon *cifs_setup_ipc(struct cifs_ses *ses, bool seal)
2021 {
2022 int rc = 0, xid;
2023 struct cifs_tcon *tcon;
2024 char unc[SERVER_NAME_LENGTH + sizeof("//x/IPC$")] = {0};
2025 struct TCP_Server_Info *server = ses->server;
2026
2027 /*
2028 * If the mount request that resulted in the creation of the
2029 * session requires encryption, force IPC to be encrypted too.
2030 */
2031 if (seal && !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) {
2032 cifs_server_dbg(VFS, "IPC: server doesn't support encryption\n");
2033 return ERR_PTR(-EOPNOTSUPP);
2034 }
2035
2036 /* no need to setup directory caching on IPC share, so pass in false */
2037 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
2038 if (tcon == NULL)
2039 return ERR_PTR(-ENOMEM);
2040
2041 spin_lock(&server->srv_lock);
2042 scnprintf(unc, sizeof(unc), "\\\\%s\\IPC$", server->hostname);
2043 spin_unlock(&server->srv_lock);
2044
2045 xid = get_xid();
2046 tcon->ses = ses;
2047 tcon->ipc = true;
2048 tcon->seal = seal;
2049 rc = server->ops->tree_connect(xid, ses, unc, tcon, ses->local_nls);
2050 free_xid(xid);
2051
2052 if (rc) {
2053 cifs_server_dbg(VFS | ONCE, "failed to connect to IPC (rc=%d)\n", rc);
2054 tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
2055 return ERR_PTR(rc);
2056 }
2057
2058 cifs_dbg(FYI, "IPC tcon rc=%d ipc tid=0x%x\n", rc, tcon->tid);
2059
2060 spin_lock(&tcon->tc_lock);
2061 tcon->status = TID_GOOD;
2062 spin_unlock(&tcon->tc_lock);
2063 return tcon;
2064 }
2065
2066 static struct cifs_ses *
cifs_find_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)2067 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2068 {
2069 struct cifs_ses *ses, *ret = NULL;
2070
2071 spin_lock(&cifs_tcp_ses_lock);
2072 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
2073 spin_lock(&ses->ses_lock);
2074 if (ses->ses_status == SES_EXITING) {
2075 spin_unlock(&ses->ses_lock);
2076 continue;
2077 }
2078 spin_lock(&ses->chan_lock);
2079 if (match_session(ses, ctx, false)) {
2080 spin_unlock(&ses->chan_lock);
2081 spin_unlock(&ses->ses_lock);
2082 ret = ses;
2083 break;
2084 }
2085 spin_unlock(&ses->chan_lock);
2086 spin_unlock(&ses->ses_lock);
2087 }
2088 if (ret)
2089 cifs_smb_ses_inc_refcount(ret);
2090 spin_unlock(&cifs_tcp_ses_lock);
2091 return ret;
2092 }
2093
__cifs_put_smb_ses(struct cifs_ses * ses)2094 void __cifs_put_smb_ses(struct cifs_ses *ses)
2095 {
2096 struct TCP_Server_Info *server = ses->server;
2097 struct cifs_tcon *tcon;
2098 unsigned int xid;
2099 size_t i;
2100 bool do_logoff;
2101 int rc;
2102
2103 spin_lock(&cifs_tcp_ses_lock);
2104 spin_lock(&ses->ses_lock);
2105 cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n",
2106 __func__, ses->Suid, ses->ses_count, ses->ses_status,
2107 ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none");
2108 if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) {
2109 spin_unlock(&ses->ses_lock);
2110 spin_unlock(&cifs_tcp_ses_lock);
2111 return;
2112 }
2113 /* ses_count can never go negative */
2114 WARN_ON(ses->ses_count < 0);
2115
2116 spin_lock(&ses->chan_lock);
2117 cifs_chan_clear_need_reconnect(ses, server);
2118 spin_unlock(&ses->chan_lock);
2119
2120 do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff;
2121 ses->ses_status = SES_EXITING;
2122 tcon = ses->tcon_ipc;
2123 ses->tcon_ipc = NULL;
2124 spin_unlock(&ses->ses_lock);
2125 spin_unlock(&cifs_tcp_ses_lock);
2126
2127 /*
2128 * On session close, the IPC is closed and the server must release all
2129 * tcons of the session. No need to send a tree disconnect here.
2130 *
2131 * Besides, it will make the server to not close durable and resilient
2132 * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
2133 * SMB2 LOGOFF Request.
2134 */
2135 tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc);
2136 if (do_logoff) {
2137 xid = get_xid();
2138 rc = server->ops->logoff(xid, ses);
2139 cifs_server_dbg(FYI, "%s: Session Logoff: rc=%d\n",
2140 __func__, rc);
2141 _free_xid(xid);
2142 }
2143
2144 spin_lock(&cifs_tcp_ses_lock);
2145 list_del_init(&ses->smb_ses_list);
2146 spin_unlock(&cifs_tcp_ses_lock);
2147
2148 /* close any extra channels */
2149 for (i = 1; i < ses->chan_count; i++) {
2150 if (ses->chans[i].iface) {
2151 kref_put(&ses->chans[i].iface->refcount, release_iface);
2152 ses->chans[i].iface = NULL;
2153 }
2154 cifs_put_tcp_session(ses->chans[i].server, 0);
2155 ses->chans[i].server = NULL;
2156 }
2157
2158 /* we now account for primary channel in iface->refcount */
2159 if (ses->chans[0].iface) {
2160 kref_put(&ses->chans[0].iface->refcount, release_iface);
2161 ses->chans[0].server = NULL;
2162 }
2163
2164 sesInfoFree(ses);
2165 cifs_put_tcp_session(server, 0);
2166 }
2167
2168 #ifdef CONFIG_KEYS
2169
2170 /* Populate username and pw fields from keyring if possible */
2171 static int
cifs_set_cifscreds(struct smb3_fs_context * ctx,struct cifs_ses * ses)2172 cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
2173 {
2174 int rc = 0;
2175 int is_domain = 0;
2176 const char *delim, *payload;
2177 size_t desc_sz;
2178 char *desc;
2179 ssize_t len;
2180 struct key *key;
2181 struct TCP_Server_Info *server = ses->server;
2182 struct sockaddr_in *sa;
2183 struct sockaddr_in6 *sa6;
2184 const struct user_key_payload *upayload;
2185
2186 /* "cifs:a:" and "cifs:d:" are the same length; +1 for NUL terminator */
2187 desc_sz = strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1;
2188 desc = kmalloc(desc_sz, GFP_KERNEL);
2189 if (!desc)
2190 return -ENOMEM;
2191
2192 /* try to find an address key first */
2193 switch (server->dstaddr.ss_family) {
2194 case AF_INET:
2195 sa = (struct sockaddr_in *)&server->dstaddr;
2196 snprintf(desc, desc_sz, "cifs:a:%pI4", &sa->sin_addr.s_addr);
2197 break;
2198 case AF_INET6:
2199 sa6 = (struct sockaddr_in6 *)&server->dstaddr;
2200 snprintf(desc, desc_sz, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
2201 break;
2202 default:
2203 cifs_dbg(FYI, "Bad ss_family (%hu)\n",
2204 server->dstaddr.ss_family);
2205 rc = -EINVAL;
2206 goto out_err;
2207 }
2208
2209 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2210 key = request_key(&key_type_logon, desc, "");
2211 if (IS_ERR(key)) {
2212 if (!ses->domainName) {
2213 cifs_dbg(FYI, "domainName is NULL\n");
2214 rc = PTR_ERR(key);
2215 goto out_err;
2216 }
2217
2218 /* didn't work, try to find a domain key */
2219 snprintf(desc, desc_sz, "cifs:d:%s", ses->domainName);
2220 cifs_dbg(FYI, "%s: desc=%s\n", __func__, desc);
2221 key = request_key(&key_type_logon, desc, "");
2222 if (IS_ERR(key)) {
2223 rc = PTR_ERR(key);
2224 goto out_err;
2225 }
2226 is_domain = 1;
2227 }
2228
2229 down_read(&key->sem);
2230 upayload = user_key_payload_locked(key);
2231 if (IS_ERR_OR_NULL(upayload)) {
2232 rc = upayload ? PTR_ERR(upayload) : -EINVAL;
2233 goto out_key_put;
2234 }
2235
2236 /* find first : in payload */
2237 payload = upayload->data;
2238 delim = strnchr(payload, upayload->datalen, ':');
2239 if (!delim) {
2240 cifs_dbg(FYI, "Unable to find ':' in payload (datalen=%d)\n",
2241 upayload->datalen);
2242 rc = -EINVAL;
2243 goto out_key_put;
2244 }
2245
2246 len = delim - payload;
2247 if (len > CIFS_MAX_USERNAME_LEN || len <= 0) {
2248 cifs_dbg(FYI, "Bad value from username search (len=%zd)\n",
2249 len);
2250 rc = -EINVAL;
2251 goto out_key_put;
2252 }
2253
2254 ctx->username = kstrndup(payload, len, GFP_KERNEL);
2255 if (!ctx->username) {
2256 cifs_dbg(FYI, "Unable to allocate %zd bytes for username\n",
2257 len);
2258 rc = -ENOMEM;
2259 goto out_key_put;
2260 }
2261 cifs_dbg(FYI, "%s: username=%s\n", __func__, ctx->username);
2262
2263 len = key->datalen - (len + 1);
2264 if (len > CIFS_MAX_PASSWORD_LEN || len <= 0) {
2265 cifs_dbg(FYI, "Bad len for password search (len=%zd)\n", len);
2266 rc = -EINVAL;
2267 kfree(ctx->username);
2268 ctx->username = NULL;
2269 goto out_key_put;
2270 }
2271
2272 ++delim;
2273 /* BB consider adding support for password2 (Key Rotation) for multiuser in future */
2274 ctx->password = kstrndup(delim, len, GFP_KERNEL);
2275 if (!ctx->password) {
2276 cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
2277 len);
2278 rc = -ENOMEM;
2279 kfree(ctx->username);
2280 ctx->username = NULL;
2281 goto out_key_put;
2282 }
2283
2284 /*
2285 * If we have a domain key then we must set the domainName in the
2286 * for the request.
2287 */
2288 if (is_domain && ses->domainName) {
2289 ctx->domainname = kstrdup(ses->domainName, GFP_KERNEL);
2290 if (!ctx->domainname) {
2291 cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
2292 len);
2293 rc = -ENOMEM;
2294 kfree(ctx->username);
2295 ctx->username = NULL;
2296 kfree_sensitive(ctx->password);
2297 /* no need to free ctx->password2 since not allocated in this path */
2298 ctx->password = NULL;
2299 goto out_key_put;
2300 }
2301 }
2302
2303 strscpy(ctx->workstation_name, ses->workstation_name, sizeof(ctx->workstation_name));
2304
2305 out_key_put:
2306 up_read(&key->sem);
2307 key_put(key);
2308 out_err:
2309 kfree(desc);
2310 cifs_dbg(FYI, "%s: returning %d\n", __func__, rc);
2311 return rc;
2312 }
2313 #else /* ! CONFIG_KEYS */
2314 static inline int
cifs_set_cifscreds(struct smb3_fs_context * ctx __maybe_unused,struct cifs_ses * ses __maybe_unused)2315 cifs_set_cifscreds(struct smb3_fs_context *ctx __maybe_unused,
2316 struct cifs_ses *ses __maybe_unused)
2317 {
2318 return -ENOSYS;
2319 }
2320 #endif /* CONFIG_KEYS */
2321
2322 /**
2323 * cifs_get_smb_ses - get a session matching @ctx data from @server
2324 * @server: server to setup the session to
2325 * @ctx: superblock configuration context to use to setup the session
2326 *
2327 * This function assumes it is being called from cifs_mount() where we
2328 * already got a server reference (server refcount +1). See
2329 * cifs_get_tcon() for refcount explanations.
2330 */
2331 struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info * server,struct smb3_fs_context * ctx)2332 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
2333 {
2334 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
2335 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
2336 struct cifs_tcon *ipc;
2337 struct cifs_ses *ses;
2338 unsigned int xid;
2339 int retries = 0;
2340 size_t len;
2341 int rc = 0;
2342
2343 xid = get_xid();
2344
2345 ses = cifs_find_smb_ses(server, ctx);
2346 if (ses) {
2347 cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
2348 ses->ses_status);
2349
2350 spin_lock(&ses->chan_lock);
2351 if (cifs_chan_needs_reconnect(ses, server)) {
2352 spin_unlock(&ses->chan_lock);
2353 cifs_dbg(FYI, "Session needs reconnect\n");
2354
2355 mutex_lock(&ses->session_mutex);
2356
2357 retry_old_session:
2358 rc = cifs_negotiate_protocol(xid, ses, server);
2359 if (rc) {
2360 mutex_unlock(&ses->session_mutex);
2361 /* problem -- put our ses reference */
2362 cifs_put_smb_ses(ses);
2363 free_xid(xid);
2364 return ERR_PTR(rc);
2365 }
2366
2367 rc = cifs_setup_session(xid, ses, server,
2368 ctx->local_nls);
2369 if (rc) {
2370 if (((rc == -EACCES) || (rc == -EKEYEXPIRED) ||
2371 (rc == -EKEYREVOKED)) && !retries && ses->password2) {
2372 retries++;
2373 cifs_dbg(FYI, "Session reconnect failed, retrying with alternate password\n");
2374 swap(ses->password, ses->password2);
2375 goto retry_old_session;
2376 }
2377 mutex_unlock(&ses->session_mutex);
2378 /* problem -- put our reference */
2379 cifs_put_smb_ses(ses);
2380 free_xid(xid);
2381 return ERR_PTR(rc);
2382 }
2383 mutex_unlock(&ses->session_mutex);
2384
2385 spin_lock(&ses->chan_lock);
2386 }
2387 spin_unlock(&ses->chan_lock);
2388
2389 /* existing SMB ses has a server reference already */
2390 cifs_put_tcp_session(server, 0);
2391 free_xid(xid);
2392 return ses;
2393 }
2394
2395 rc = -ENOMEM;
2396
2397 cifs_dbg(FYI, "Existing smb sess not found\n");
2398 ses = sesInfoAlloc();
2399 if (ses == NULL)
2400 goto get_ses_fail;
2401
2402 /* new SMB session uses our server ref */
2403 ses->server = server;
2404 if (server->dstaddr.ss_family == AF_INET6)
2405 sprintf(ses->ip_addr, "%pI6", &addr6->sin6_addr);
2406 else
2407 sprintf(ses->ip_addr, "%pI4", &addr->sin_addr);
2408
2409 if (ctx->username) {
2410 ses->user_name = kstrdup(ctx->username, GFP_KERNEL);
2411 if (!ses->user_name)
2412 goto get_ses_fail;
2413 }
2414
2415 /* ctx->password freed at unmount */
2416 if (ctx->password) {
2417 ses->password = kstrdup(ctx->password, GFP_KERNEL);
2418 if (!ses->password)
2419 goto get_ses_fail;
2420 }
2421 /* ctx->password freed at unmount */
2422 if (ctx->password2) {
2423 ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
2424 if (!ses->password2)
2425 goto get_ses_fail;
2426 }
2427 if (ctx->domainname) {
2428 ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
2429 if (!ses->domainName)
2430 goto get_ses_fail;
2431
2432 len = strnlen(ctx->domainname, CIFS_MAX_DOMAINNAME_LEN);
2433 if (!cifs_netbios_name(ctx->domainname, len)) {
2434 ses->dns_dom = kstrndup(ctx->domainname,
2435 len, GFP_KERNEL);
2436 if (!ses->dns_dom)
2437 goto get_ses_fail;
2438 }
2439 }
2440
2441 strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name));
2442
2443 if (ctx->domainauto)
2444 ses->domainAuto = ctx->domainauto;
2445 ses->cred_uid = ctx->cred_uid;
2446 ses->linux_uid = ctx->linux_uid;
2447
2448 ses->unicode = ctx->unicode;
2449 ses->sectype = ctx->sectype;
2450 ses->sign = ctx->sign;
2451
2452 /*
2453 *Explicitly marking upcall_target mount option for easier handling
2454 * by cifs_spnego.c and eventually cifs.upcall.c
2455 */
2456
2457 switch (ctx->upcall_target) {
2458 case UPTARGET_UNSPECIFIED: /* default to app */
2459 case UPTARGET_APP:
2460 ses->upcall_target = UPTARGET_APP;
2461 break;
2462 case UPTARGET_MOUNT:
2463 ses->upcall_target = UPTARGET_MOUNT;
2464 break;
2465 default:
2466 // should never happen
2467 ses->upcall_target = UPTARGET_APP;
2468 break;
2469 }
2470
2471 ses->local_nls = load_nls(ctx->local_nls->charset);
2472
2473 /* add server as first channel */
2474 spin_lock(&ses->chan_lock);
2475 ses->chans[0].server = server;
2476 ses->chan_count = 1;
2477 ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
2478 ses->chans_need_reconnect = 1;
2479 spin_unlock(&ses->chan_lock);
2480
2481 retry_new_session:
2482 mutex_lock(&ses->session_mutex);
2483 rc = cifs_negotiate_protocol(xid, ses, server);
2484 if (!rc)
2485 rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
2486 mutex_unlock(&ses->session_mutex);
2487
2488 /* each channel uses a different signing key */
2489 spin_lock(&ses->chan_lock);
2490 memcpy(ses->chans[0].signkey, ses->smb3signingkey,
2491 sizeof(ses->smb3signingkey));
2492 spin_unlock(&ses->chan_lock);
2493
2494 if (rc) {
2495 if (((rc == -EACCES) || (rc == -EKEYEXPIRED) ||
2496 (rc == -EKEYREVOKED)) && !retries && ses->password2) {
2497 retries++;
2498 cifs_dbg(FYI, "Session setup failed, retrying with alternate password\n");
2499 swap(ses->password, ses->password2);
2500 goto retry_new_session;
2501 } else
2502 goto get_ses_fail;
2503 }
2504
2505 /*
2506 * success, put it on the list and add it as first channel
2507 * note: the session becomes active soon after this. So you'll
2508 * need to lock before changing something in the session.
2509 */
2510 spin_lock(&cifs_tcp_ses_lock);
2511 ses->dfs_root_ses = ctx->dfs_root_ses;
2512 list_add(&ses->smb_ses_list, &server->smb_ses_list);
2513 spin_unlock(&cifs_tcp_ses_lock);
2514
2515 ipc = cifs_setup_ipc(ses, ctx->seal);
2516 spin_lock(&cifs_tcp_ses_lock);
2517 spin_lock(&ses->ses_lock);
2518 ses->tcon_ipc = !IS_ERR(ipc) ? ipc : NULL;
2519 spin_unlock(&ses->ses_lock);
2520 spin_unlock(&cifs_tcp_ses_lock);
2521
2522 free_xid(xid);
2523
2524 return ses;
2525
2526 get_ses_fail:
2527 sesInfoFree(ses);
2528 free_xid(xid);
2529 return ERR_PTR(rc);
2530 }
2531
2532 /* this function must be called with tc_lock held */
match_tcon(struct cifs_tcon * tcon,struct smb3_fs_context * ctx)2533 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
2534 {
2535 struct TCP_Server_Info *server = tcon->ses->server;
2536
2537 if (tcon->status == TID_EXITING)
2538 return 0;
2539
2540 if (tcon->origin_fullpath) {
2541 if (!ctx->source ||
2542 !dfs_src_pathname_equal(ctx->source,
2543 tcon->origin_fullpath))
2544 return 0;
2545 } else if (!server->leaf_fullpath &&
2546 strncmp(tcon->tree_name, ctx->UNC, MAX_TREE_SIZE)) {
2547 return 0;
2548 }
2549 if (tcon->seal != ctx->seal)
2550 return 0;
2551 if (tcon->snapshot_time != ctx->snapshot_time)
2552 return 0;
2553 if (tcon->handle_timeout != ctx->handle_timeout)
2554 return 0;
2555 if (tcon->no_lease != ctx->no_lease)
2556 return 0;
2557 if (tcon->nodelete != ctx->nodelete)
2558 return 0;
2559 if (tcon->posix_extensions != ctx->linux_ext)
2560 return 0;
2561 return 1;
2562 }
2563
2564 static struct cifs_tcon *
cifs_find_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2565 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2566 {
2567 struct cifs_tcon *tcon;
2568
2569 spin_lock(&cifs_tcp_ses_lock);
2570 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
2571 spin_lock(&tcon->tc_lock);
2572 if (!match_tcon(tcon, ctx)) {
2573 spin_unlock(&tcon->tc_lock);
2574 continue;
2575 }
2576 ++tcon->tc_count;
2577 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
2578 netfs_trace_tcon_ref_get_find);
2579 spin_unlock(&tcon->tc_lock);
2580 spin_unlock(&cifs_tcp_ses_lock);
2581 return tcon;
2582 }
2583 spin_unlock(&cifs_tcp_ses_lock);
2584 return NULL;
2585 }
2586
2587 void
cifs_put_tcon(struct cifs_tcon * tcon,enum smb3_tcon_ref_trace trace)2588 cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
2589 {
2590 unsigned int xid;
2591 struct cifs_ses *ses;
2592 LIST_HEAD(ses_list);
2593
2594 /*
2595 * IPC tcon share the lifetime of their session and are
2596 * destroyed in the session put function
2597 */
2598 if (tcon == NULL || tcon->ipc)
2599 return;
2600
2601 ses = tcon->ses;
2602 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
2603 spin_lock(&cifs_tcp_ses_lock);
2604 spin_lock(&tcon->tc_lock);
2605 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace);
2606 if (--tcon->tc_count > 0) {
2607 spin_unlock(&tcon->tc_lock);
2608 spin_unlock(&cifs_tcp_ses_lock);
2609 return;
2610 }
2611
2612 /* tc_count can never go negative */
2613 WARN_ON(tcon->tc_count < 0);
2614
2615 list_del_init(&tcon->tcon_list);
2616 tcon->status = TID_EXITING;
2617 spin_unlock(&tcon->tc_lock);
2618 spin_unlock(&cifs_tcp_ses_lock);
2619
2620 /* cancel polling of interfaces */
2621 cancel_delayed_work_sync(&tcon->query_interfaces);
2622 #ifdef CONFIG_CIFS_DFS_UPCALL
2623 cancel_delayed_work_sync(&tcon->dfs_cache_work);
2624 list_replace_init(&tcon->dfs_ses_list, &ses_list);
2625 #endif
2626
2627 if (tcon->use_witness) {
2628 int rc;
2629
2630 rc = cifs_swn_unregister(tcon);
2631 if (rc < 0) {
2632 cifs_dbg(VFS, "%s: Failed to unregister for witness notifications: %d\n",
2633 __func__, rc);
2634 }
2635 }
2636
2637 xid = get_xid();
2638 if (ses->server->ops->tree_disconnect)
2639 ses->server->ops->tree_disconnect(xid, tcon);
2640 _free_xid(xid);
2641
2642 cifs_fscache_release_super_cookie(tcon);
2643 tconInfoFree(tcon, netfs_trace_tcon_ref_free);
2644 cifs_put_smb_ses(ses);
2645 #ifdef CONFIG_CIFS_DFS_UPCALL
2646 dfs_put_root_smb_sessions(&ses_list);
2647 #endif
2648 }
2649
2650 /**
2651 * cifs_get_tcon - get a tcon matching @ctx data from @ses
2652 * @ses: smb session to issue the request on
2653 * @ctx: the superblock configuration context to use for building the
2654 *
2655 * - tcon refcount is the number of mount points using the tcon.
2656 * - ses refcount is the number of tcon using the session.
2657 *
2658 * 1. This function assumes it is being called from cifs_mount() where
2659 * we already got a session reference (ses refcount +1).
2660 *
2661 * 2. Since we're in the context of adding a mount point, the end
2662 * result should be either:
2663 *
2664 * a) a new tcon already allocated with refcount=1 (1 mount point) and
2665 * its session refcount incremented (1 new tcon). This +1 was
2666 * already done in (1).
2667 *
2668 * b) an existing tcon with refcount+1 (add a mount point to it) and
2669 * identical ses refcount (no new tcon). Because of (1) we need to
2670 * decrement the ses refcount.
2671 */
2672 static struct cifs_tcon *
cifs_get_tcon(struct cifs_ses * ses,struct smb3_fs_context * ctx)2673 cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
2674 {
2675 struct cifs_tcon *tcon;
2676 bool nohandlecache;
2677 int rc, xid;
2678
2679 tcon = cifs_find_tcon(ses, ctx);
2680 if (tcon) {
2681 /*
2682 * tcon has refcount already incremented but we need to
2683 * decrement extra ses reference gotten by caller (case b)
2684 */
2685 cifs_dbg(FYI, "Found match on UNC path\n");
2686 cifs_put_smb_ses(ses);
2687 return tcon;
2688 }
2689
2690 if (!ses->server->ops->tree_connect) {
2691 rc = -ENOSYS;
2692 goto out_fail;
2693 }
2694
2695 if (ses->server->dialect >= SMB20_PROT_ID &&
2696 (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING))
2697 nohandlecache = ctx->nohandlecache || !dir_cache_timeout;
2698 else
2699 nohandlecache = true;
2700 tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
2701 if (tcon == NULL) {
2702 rc = -ENOMEM;
2703 goto out_fail;
2704 }
2705 tcon->nohandlecache = nohandlecache;
2706
2707 if (ctx->snapshot_time) {
2708 if (ses->server->vals->protocol_id == 0) {
2709 cifs_dbg(VFS,
2710 "Use SMB2 or later for snapshot mount option\n");
2711 rc = -EOPNOTSUPP;
2712 goto out_fail;
2713 } else
2714 tcon->snapshot_time = ctx->snapshot_time;
2715 }
2716
2717 if (ctx->handle_timeout) {
2718 if (ses->server->vals->protocol_id == 0) {
2719 cifs_dbg(VFS,
2720 "Use SMB2.1 or later for handle timeout option\n");
2721 rc = -EOPNOTSUPP;
2722 goto out_fail;
2723 } else
2724 tcon->handle_timeout = ctx->handle_timeout;
2725 }
2726
2727 tcon->ses = ses;
2728 if (ctx->password) {
2729 tcon->password = kstrdup(ctx->password, GFP_KERNEL);
2730 if (!tcon->password) {
2731 rc = -ENOMEM;
2732 goto out_fail;
2733 }
2734 }
2735
2736 if (ctx->seal) {
2737 if (ses->server->vals->protocol_id == 0) {
2738 cifs_dbg(VFS,
2739 "SMB3 or later required for encryption\n");
2740 rc = -EOPNOTSUPP;
2741 goto out_fail;
2742 } else if (tcon->ses->server->capabilities &
2743 SMB2_GLOBAL_CAP_ENCRYPTION)
2744 tcon->seal = true;
2745 else {
2746 cifs_dbg(VFS, "Encryption is not supported on share\n");
2747 rc = -EOPNOTSUPP;
2748 goto out_fail;
2749 }
2750 }
2751
2752 if (ctx->linux_ext) {
2753 if (ses->server->posix_ext_supported) {
2754 tcon->posix_extensions = true;
2755 pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
2756 } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
2757 (strcmp(ses->server->vals->version_string,
2758 SMB3ANY_VERSION_STRING) == 0) ||
2759 (strcmp(ses->server->vals->version_string,
2760 SMBDEFAULT_VERSION_STRING) == 0)) {
2761 cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
2762 rc = -EOPNOTSUPP;
2763 goto out_fail;
2764 } else if (ses->server->vals->protocol_id == SMB10_PROT_ID)
2765 if (cap_unix(ses))
2766 cifs_dbg(FYI, "Unix Extensions requested on SMB1 mount\n");
2767 else {
2768 cifs_dbg(VFS, "SMB1 Unix Extensions not supported by server\n");
2769 rc = -EOPNOTSUPP;
2770 goto out_fail;
2771 } else {
2772 cifs_dbg(VFS,
2773 "Check vers= mount option. SMB3.11 disabled but required for POSIX extensions\n");
2774 rc = -EOPNOTSUPP;
2775 goto out_fail;
2776 }
2777 }
2778
2779 xid = get_xid();
2780 rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
2781 ctx->local_nls);
2782 free_xid(xid);
2783 cifs_dbg(FYI, "Tcon rc = %d\n", rc);
2784 if (rc)
2785 goto out_fail;
2786
2787 tcon->use_persistent = false;
2788 /* check if SMB2 or later, CIFS does not support persistent handles */
2789 if (ctx->persistent) {
2790 if (ses->server->vals->protocol_id == 0) {
2791 cifs_dbg(VFS,
2792 "SMB3 or later required for persistent handles\n");
2793 rc = -EOPNOTSUPP;
2794 goto out_fail;
2795 } else if (ses->server->capabilities &
2796 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2797 tcon->use_persistent = true;
2798 else /* persistent handles requested but not supported */ {
2799 cifs_dbg(VFS,
2800 "Persistent handles not supported on share\n");
2801 rc = -EOPNOTSUPP;
2802 goto out_fail;
2803 }
2804 } else if ((tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
2805 && (ses->server->capabilities & SMB2_GLOBAL_CAP_PERSISTENT_HANDLES)
2806 && (ctx->nopersistent == false)) {
2807 cifs_dbg(FYI, "enabling persistent handles\n");
2808 tcon->use_persistent = true;
2809 } else if (ctx->resilient) {
2810 if (ses->server->vals->protocol_id == 0) {
2811 cifs_dbg(VFS,
2812 "SMB2.1 or later required for resilient handles\n");
2813 rc = -EOPNOTSUPP;
2814 goto out_fail;
2815 }
2816 tcon->use_resilient = true;
2817 }
2818
2819 tcon->use_witness = false;
2820 if (IS_ENABLED(CONFIG_CIFS_SWN_UPCALL) && ctx->witness) {
2821 if (ses->server->vals->protocol_id >= SMB30_PROT_ID) {
2822 if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER) {
2823 /*
2824 * Set witness in use flag in first place
2825 * to retry registration in the echo task
2826 */
2827 tcon->use_witness = true;
2828 /* And try to register immediately */
2829 rc = cifs_swn_register(tcon);
2830 if (rc < 0) {
2831 cifs_dbg(VFS, "Failed to register for witness notifications: %d\n", rc);
2832 goto out_fail;
2833 }
2834 } else {
2835 /* TODO: try to extend for non-cluster uses (eg multichannel) */
2836 cifs_dbg(VFS, "witness requested on mount but no CLUSTER capability on share\n");
2837 rc = -EOPNOTSUPP;
2838 goto out_fail;
2839 }
2840 } else {
2841 cifs_dbg(VFS, "SMB3 or later required for witness option\n");
2842 rc = -EOPNOTSUPP;
2843 goto out_fail;
2844 }
2845 }
2846
2847 /* If the user really knows what they are doing they can override */
2848 if (tcon->share_flags & SMB2_SHAREFLAG_NO_CACHING) {
2849 if (ctx->cache_ro)
2850 cifs_dbg(VFS, "cache=ro requested on mount but NO_CACHING flag set on share\n");
2851 else if (ctx->cache_rw)
2852 cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
2853 }
2854
2855 if (ctx->no_lease) {
2856 if (ses->server->vals->protocol_id == 0) {
2857 cifs_dbg(VFS,
2858 "SMB2 or later required for nolease option\n");
2859 rc = -EOPNOTSUPP;
2860 goto out_fail;
2861 } else
2862 tcon->no_lease = ctx->no_lease;
2863 }
2864
2865 /*
2866 * We can have only one retry value for a connection to a share so for
2867 * resources mounted more than once to the same server share the last
2868 * value passed in for the retry flag is used.
2869 */
2870 tcon->retry = ctx->retry;
2871 tcon->nocase = ctx->nocase;
2872 tcon->broken_sparse_sup = ctx->no_sparse;
2873 tcon->max_cached_dirs = ctx->max_cached_dirs;
2874 tcon->nodelete = ctx->nodelete;
2875 tcon->local_lease = ctx->local_lease;
2876 tcon->status = TID_GOOD;
2877
2878 if (ses->server->dialect >= SMB30_PROT_ID &&
2879 (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
2880 /* schedule query interfaces poll */
2881 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
2882 (SMB_INTERFACE_POLL_INTERVAL * HZ));
2883 }
2884 spin_lock(&cifs_tcp_ses_lock);
2885 list_add(&tcon->tcon_list, &ses->tcon_list);
2886 spin_unlock(&cifs_tcp_ses_lock);
2887
2888 return tcon;
2889
2890 out_fail:
2891 tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail);
2892 return ERR_PTR(rc);
2893 }
2894
2895 void
cifs_put_tlink(struct tcon_link * tlink)2896 cifs_put_tlink(struct tcon_link *tlink)
2897 {
2898 if (!tlink || IS_ERR(tlink))
2899 return;
2900
2901 if (!atomic_dec_and_test(&tlink->tl_count) ||
2902 test_bit(TCON_LINK_IN_TREE, &tlink->tl_flags)) {
2903 tlink->tl_time = jiffies;
2904 return;
2905 }
2906
2907 if (!IS_ERR(tlink_tcon(tlink)))
2908 cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink);
2909 kfree(tlink);
2910 }
2911
2912 static int
compare_mount_options(struct super_block * sb,struct cifs_mnt_data * mnt_data)2913 compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
2914 {
2915 struct cifs_sb_info *old = CIFS_SB(sb);
2916 struct cifs_sb_info *new = mnt_data->cifs_sb;
2917 unsigned int oldflags = cifs_sb_flags(old) & CIFS_MOUNT_MASK;
2918 unsigned int newflags = cifs_sb_flags(new) & CIFS_MOUNT_MASK;
2919
2920 if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
2921 return 0;
2922
2923 if (old->mnt_cifs_serverino_autodisabled)
2924 newflags &= ~CIFS_MOUNT_SERVER_INUM;
2925
2926 if (oldflags != newflags)
2927 return 0;
2928
2929 /*
2930 * We want to share sb only if we don't specify an r/wsize or
2931 * specified r/wsize is greater than or equal to existing one.
2932 */
2933 if (new->ctx->wsize && new->ctx->wsize < old->ctx->wsize)
2934 return 0;
2935
2936 if (new->ctx->rsize && new->ctx->rsize < old->ctx->rsize)
2937 return 0;
2938
2939 if (!uid_eq(old->ctx->linux_uid, new->ctx->linux_uid) ||
2940 !gid_eq(old->ctx->linux_gid, new->ctx->linux_gid))
2941 return 0;
2942
2943 if (old->ctx->file_mode != new->ctx->file_mode ||
2944 old->ctx->dir_mode != new->ctx->dir_mode)
2945 return 0;
2946
2947 if (strcmp(old->local_nls->charset, new->local_nls->charset))
2948 return 0;
2949
2950 if (old->ctx->acregmax != new->ctx->acregmax)
2951 return 0;
2952 if (old->ctx->acdirmax != new->ctx->acdirmax)
2953 return 0;
2954 if (old->ctx->closetimeo != new->ctx->closetimeo)
2955 return 0;
2956 if (old->ctx->reparse_type != new->ctx->reparse_type)
2957 return 0;
2958 if (old->ctx->nonativesocket != new->ctx->nonativesocket)
2959 return 0;
2960 if (old->ctx->symlink_type != new->ctx->symlink_type)
2961 return 0;
2962
2963 return 1;
2964 }
2965
match_prepath(struct super_block * sb,struct cifs_tcon * tcon,struct cifs_mnt_data * mnt_data)2966 static int match_prepath(struct super_block *sb,
2967 struct cifs_tcon *tcon,
2968 struct cifs_mnt_data *mnt_data)
2969 {
2970 struct smb3_fs_context *ctx = mnt_data->ctx;
2971 struct cifs_sb_info *old = CIFS_SB(sb);
2972 struct cifs_sb_info *new = mnt_data->cifs_sb;
2973 bool old_set = (cifs_sb_flags(old) & CIFS_MOUNT_USE_PREFIX_PATH) &&
2974 old->prepath;
2975 bool new_set = (cifs_sb_flags(new) & CIFS_MOUNT_USE_PREFIX_PATH) &&
2976 new->prepath;
2977
2978 if (tcon->origin_fullpath &&
2979 dfs_src_pathname_equal(tcon->origin_fullpath, ctx->source))
2980 return 1;
2981
2982 if (old_set && new_set && !strcmp(new->prepath, old->prepath))
2983 return 1;
2984 else if (!old_set && !new_set)
2985 return 1;
2986
2987 return 0;
2988 }
2989
2990 int
cifs_match_super(struct super_block * sb,void * data)2991 cifs_match_super(struct super_block *sb, void *data)
2992 {
2993 struct cifs_mnt_data *mnt_data = data;
2994 struct smb3_fs_context *ctx;
2995 struct cifs_sb_info *cifs_sb;
2996 struct TCP_Server_Info *tcp_srv;
2997 struct cifs_ses *ses;
2998 struct cifs_tcon *tcon;
2999 struct tcon_link *tlink;
3000 int rc = 0;
3001
3002 spin_lock(&cifs_tcp_ses_lock);
3003 cifs_sb = CIFS_SB(sb);
3004
3005 /* We do not want to use a superblock that has been shutdown */
3006 if (cifs_forced_shutdown(cifs_sb)) {
3007 spin_unlock(&cifs_tcp_ses_lock);
3008 return 0;
3009 }
3010
3011 tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
3012 if (IS_ERR_OR_NULL(tlink)) {
3013 pr_warn_once("%s: skip super matching due to bad tlink(%p)\n",
3014 __func__, tlink);
3015 spin_unlock(&cifs_tcp_ses_lock);
3016 return 0;
3017 }
3018 tcon = tlink_tcon(tlink);
3019 ses = tcon->ses;
3020 tcp_srv = ses->server;
3021
3022 ctx = mnt_data->ctx;
3023
3024 spin_lock(&tcp_srv->srv_lock);
3025 spin_lock(&ses->ses_lock);
3026 spin_lock(&ses->chan_lock);
3027 spin_lock(&tcon->tc_lock);
3028 if (!match_server(tcp_srv, ctx, true) ||
3029 !match_session(ses, ctx, true) ||
3030 !match_tcon(tcon, ctx) ||
3031 !match_prepath(sb, tcon, mnt_data)) {
3032 rc = 0;
3033 goto out;
3034 }
3035
3036 rc = compare_mount_options(sb, mnt_data);
3037 out:
3038 spin_unlock(&tcon->tc_lock);
3039 spin_unlock(&ses->chan_lock);
3040 spin_unlock(&ses->ses_lock);
3041 spin_unlock(&tcp_srv->srv_lock);
3042
3043 spin_unlock(&cifs_tcp_ses_lock);
3044 cifs_put_tlink(tlink);
3045 return rc;
3046 }
3047
3048 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3049 static struct lock_class_key cifs_key[2];
3050 static struct lock_class_key cifs_slock_key[2];
3051
3052 static inline void
cifs_reclassify_socket4(struct socket * sock)3053 cifs_reclassify_socket4(struct socket *sock)
3054 {
3055 struct sock *sk = sock->sk;
3056
3057 BUG_ON(!sock_allow_reclassification(sk));
3058 sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
3059 &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]);
3060 }
3061
3062 static inline void
cifs_reclassify_socket6(struct socket * sock)3063 cifs_reclassify_socket6(struct socket *sock)
3064 {
3065 struct sock *sk = sock->sk;
3066
3067 BUG_ON(!sock_allow_reclassification(sk));
3068 sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
3069 &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]);
3070 }
3071 #else
3072 static inline void
cifs_reclassify_socket4(struct socket * sock)3073 cifs_reclassify_socket4(struct socket *sock)
3074 {
3075 }
3076
3077 static inline void
cifs_reclassify_socket6(struct socket * sock)3078 cifs_reclassify_socket6(struct socket *sock)
3079 {
3080 }
3081 #endif
3082
3083 /* See RFC1001 section 14 on representation of Netbios names */
rfc1002mangle(char * target,char * source,unsigned int length)3084 static void rfc1002mangle(char *target, char *source, unsigned int length)
3085 {
3086 unsigned int i, j;
3087
3088 for (i = 0, j = 0; i < (length); i++) {
3089 /* mask a nibble at a time and encode */
3090 target[j] = 'A' + (0x0F & (source[i] >> 4));
3091 target[j+1] = 'A' + (0x0F & source[i]);
3092 j += 2;
3093 }
3094
3095 }
3096
3097 static int
bind_socket(struct TCP_Server_Info * server)3098 bind_socket(struct TCP_Server_Info *server)
3099 {
3100 int rc = 0;
3101
3102 if (server->srcaddr.ss_family != AF_UNSPEC) {
3103 /* Bind to the specified local IP address */
3104 struct socket *socket = server->ssocket;
3105
3106 rc = kernel_bind(socket,
3107 (struct sockaddr_unsized *) &server->srcaddr,
3108 sizeof(server->srcaddr));
3109 if (rc < 0) {
3110 struct sockaddr_in *saddr4;
3111 struct sockaddr_in6 *saddr6;
3112
3113 saddr4 = (struct sockaddr_in *)&server->srcaddr;
3114 saddr6 = (struct sockaddr_in6 *)&server->srcaddr;
3115 if (saddr6->sin6_family == AF_INET6)
3116 cifs_server_dbg(VFS, "Failed to bind to: %pI6c, error: %d\n",
3117 &saddr6->sin6_addr, rc);
3118 else
3119 cifs_server_dbg(VFS, "Failed to bind to: %pI4, error: %d\n",
3120 &saddr4->sin_addr.s_addr, rc);
3121 }
3122 }
3123 return rc;
3124 }
3125
3126 static int
smb_recv_kvec(struct TCP_Server_Info * server,struct msghdr * msg,size_t * recv)3127 smb_recv_kvec(struct TCP_Server_Info *server, struct msghdr *msg, size_t *recv)
3128 {
3129 int rc = 0;
3130 int retries = 0;
3131 int msg_flags = server->noblocksnd ? MSG_DONTWAIT : 0;
3132
3133 *recv = 0;
3134
3135 while (msg_data_left(msg)) {
3136 rc = sock_recvmsg(server->ssocket, msg, msg_flags);
3137 if (rc == -EAGAIN) {
3138 retries++;
3139 if (retries >= 14 ||
3140 (!server->noblocksnd && (retries > 2))) {
3141 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
3142 server->ssocket);
3143 return -EAGAIN;
3144 }
3145 msleep(1 << retries);
3146 continue;
3147 }
3148
3149 if (rc < 0)
3150 return rc;
3151
3152 if (rc == 0) {
3153 cifs_dbg(FYI, "Received no data (TCP RST)\n");
3154 return -ECONNABORTED;
3155 }
3156
3157 /* recv was at least partially successful */
3158 *recv += rc;
3159 retries = 0; /* in case we get ENOSPC on the next send */
3160 }
3161 return 0;
3162 }
3163
3164 static int
ip_rfc1001_connect(struct TCP_Server_Info * server)3165 ip_rfc1001_connect(struct TCP_Server_Info *server)
3166 {
3167 int rc = 0;
3168 /*
3169 * some servers require RFC1001 sessinit before sending
3170 * negprot - BB check reconnection in case where second
3171 * sessinit is sent but no second negprot
3172 */
3173 struct rfc1002_session_packet req = {};
3174 struct rfc1002_session_packet resp = {};
3175 struct msghdr msg = {};
3176 struct kvec iov = {};
3177 unsigned int len;
3178 size_t sent;
3179 size_t recv;
3180
3181 req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
3182
3183 if (server->server_RFC1001_name[0] != 0)
3184 rfc1002mangle(req.trailer.session_req.called_name,
3185 server->server_RFC1001_name,
3186 RFC1001_NAME_LEN_WITH_NULL);
3187 else
3188 rfc1002mangle(req.trailer.session_req.called_name,
3189 DEFAULT_CIFS_CALLED_NAME,
3190 RFC1001_NAME_LEN_WITH_NULL);
3191
3192 req.trailer.session_req.calling_len = sizeof(req.trailer.session_req.calling_name);
3193
3194 /* calling name ends in null (byte 16) from old smb convention */
3195 if (server->workstation_RFC1001_name[0] != 0)
3196 rfc1002mangle(req.trailer.session_req.calling_name,
3197 server->workstation_RFC1001_name,
3198 RFC1001_NAME_LEN_WITH_NULL);
3199 else
3200 rfc1002mangle(req.trailer.session_req.calling_name,
3201 "LINUX_CIFS_CLNT",
3202 RFC1001_NAME_LEN_WITH_NULL);
3203
3204 /*
3205 * As per rfc1002, @len must be the number of bytes that follows the
3206 * length field of a rfc1002 session request payload.
3207 */
3208 len = sizeof(req.trailer.session_req);
3209 req.type = RFC1002_SESSION_REQUEST;
3210 req.flags = 0;
3211 req.length = cpu_to_be16(len);
3212 len += offsetof(typeof(req), trailer.session_req);
3213 iov.iov_base = &req;
3214 iov.iov_len = len;
3215 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len);
3216 rc = smb_send_kvec(server, &msg, &sent);
3217 if (rc < 0 || len != sent)
3218 return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
3219
3220 /*
3221 * RFC1001 layer in at least one server requires very short break before
3222 * negprot presumably because not expecting negprot to follow so fast.
3223 * For example DOS SMB servers cannot process negprot if it was received
3224 * before the server sent response for SESSION_REQUEST packet. So, wait
3225 * for the response, read it and parse it as it can contain useful error
3226 * information (e.g. specified server name was incorrect). For example
3227 * even the latest Windows Server 2022 SMB1 server over port 139 send
3228 * error if its server name was in SESSION_REQUEST packet incorrect.
3229 * Nowadays usage of port 139 is not common, so waiting for reply here
3230 * does not slowing down mounting of common case (over port 445).
3231 */
3232 len = offsetof(typeof(resp), trailer);
3233 iov.iov_base = &resp;
3234 iov.iov_len = len;
3235 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
3236 rc = smb_recv_kvec(server, &msg, &recv);
3237 if (rc < 0 || recv != len)
3238 return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
3239
3240 switch (resp.type) {
3241 case RFC1002_POSITIVE_SESSION_RESPONSE:
3242 if (be16_to_cpu(resp.length) != 0) {
3243 cifs_dbg(VFS, "RFC 1002 positive session response but with invalid non-zero length %u\n",
3244 be16_to_cpu(resp.length));
3245 return smb_EIO(smb_eio_trace_rx_pos_sess_resp);
3246 }
3247 cifs_dbg(FYI, "RFC 1002 positive session response");
3248 break;
3249 case RFC1002_NEGATIVE_SESSION_RESPONSE:
3250 /* Read RFC1002 response error code and convert it to errno in rc */
3251 len = sizeof(resp.trailer.neg_ses_resp_error_code);
3252 iov.iov_base = &resp.trailer.neg_ses_resp_error_code;
3253 iov.iov_len = len;
3254 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
3255 if (be16_to_cpu(resp.length) == len &&
3256 smb_recv_kvec(server, &msg, &recv) == 0 &&
3257 recv == len) {
3258 cifs_dbg(VFS, "RFC 1002 negative session response with error 0x%x\n",
3259 resp.trailer.neg_ses_resp_error_code);
3260 switch (resp.trailer.neg_ses_resp_error_code) {
3261 case RFC1002_NOT_LISTENING_CALLED:
3262 /* server does not listen for specified server name */
3263 fallthrough;
3264 case RFC1002_NOT_PRESENT:
3265 /* server name is incorrect */
3266 rc = -ENOENT;
3267 cifs_dbg(VFS, "Server rejected NetBIOS servername %.15s\n",
3268 server->server_RFC1001_name[0] ?
3269 server->server_RFC1001_name :
3270 DEFAULT_CIFS_CALLED_NAME);
3271 cifs_dbg(VFS, "Specify correct NetBIOS servername in source path or with -o servern= option\n");
3272 break;
3273 case RFC1002_NOT_LISTENING_CALLING:
3274 /* client name was not accepted by server */
3275 rc = -EACCES;
3276 cifs_dbg(VFS, "Server rejected NetBIOS clientname %.15s\n",
3277 server->workstation_RFC1001_name[0] ?
3278 server->workstation_RFC1001_name :
3279 "LINUX_CIFS_CLNT");
3280 cifs_dbg(VFS, "Specify correct NetBIOS clientname with -o netbiosname= option\n");
3281 break;
3282 case RFC1002_INSUFFICIENT_RESOURCE:
3283 /* remote server resource error */
3284 smb_EIO(smb_eio_trace_rx_insuff_res);
3285 rc = -EREMOTEIO;
3286 break;
3287 case RFC1002_UNSPECIFIED_ERROR:
3288 default:
3289 /* other/unknown error */
3290 rc = smb_EIO(smb_eio_trace_rx_unspec_error);
3291 break;
3292 }
3293 } else {
3294 cifs_dbg(VFS, "RFC 1002 negative session response\n");
3295 rc = smb_EIO(smb_eio_trace_rx_neg_sess_resp);
3296 }
3297 return rc;
3298 case RFC1002_RETARGET_SESSION_RESPONSE:
3299 cifs_dbg(VFS, "RFC 1002 retarget session response\n");
3300 if (be16_to_cpu(resp.length) == sizeof(resp.trailer.retarget_resp)) {
3301 len = sizeof(resp.trailer.retarget_resp);
3302 iov.iov_base = &resp.trailer.retarget_resp;
3303 iov.iov_len = len;
3304 iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
3305 if (smb_recv_kvec(server, &msg, &recv) == 0 && recv == len) {
3306 cifs_dbg(VFS, "Server wants to redirect connection\n");
3307 cifs_dbg(VFS, "Remount with options -o ip=%pI4,port=%u\n",
3308 &resp.trailer.retarget_resp.retarget_ip_addr,
3309 be16_to_cpu(resp.trailer.retarget_resp.port));
3310 }
3311 }
3312 cifs_dbg(VFS, "Closing connection\n");
3313 /* FIXME: Should we automatically redirect to new retarget_resp server? */
3314 return -EMULTIHOP;
3315 default:
3316 cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", resp.type);
3317 return smb_EIO1(smb_eio_trace_rx_unknown_resp, resp.type);
3318 }
3319
3320 server->with_rfc1001 = true;
3321 return 0;
3322 }
3323
3324 static int
generic_ip_connect(struct TCP_Server_Info * server)3325 generic_ip_connect(struct TCP_Server_Info *server)
3326 {
3327 struct sockaddr *saddr;
3328 struct socket *socket;
3329 int slen, sfamily;
3330 __be16 sport;
3331 int rc = 0;
3332
3333 saddr = (struct sockaddr *) &server->dstaddr;
3334
3335 if (server->dstaddr.ss_family == AF_INET6) {
3336 struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&server->dstaddr;
3337
3338 sport = ipv6->sin6_port;
3339 slen = sizeof(struct sockaddr_in6);
3340 sfamily = AF_INET6;
3341 cifs_dbg(FYI, "%s: connecting to [%pI6]:%d\n", __func__, &ipv6->sin6_addr,
3342 ntohs(sport));
3343 } else {
3344 struct sockaddr_in *ipv4 = (struct sockaddr_in *)&server->dstaddr;
3345
3346 sport = ipv4->sin_port;
3347 slen = sizeof(struct sockaddr_in);
3348 sfamily = AF_INET;
3349 cifs_dbg(FYI, "%s: connecting to %pI4:%d\n", __func__, &ipv4->sin_addr,
3350 ntohs(sport));
3351 }
3352
3353 if (server->ssocket) {
3354 socket = server->ssocket;
3355 } else {
3356 struct net *net = cifs_net_ns(server);
3357 struct sock *sk;
3358
3359 rc = sock_create_kern(net, sfamily, SOCK_STREAM,
3360 IPPROTO_TCP, &server->ssocket);
3361 if (rc < 0) {
3362 cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
3363 return rc;
3364 }
3365
3366 sk = server->ssocket->sk;
3367 sk_net_refcnt_upgrade(sk);
3368
3369 /* BB other socket options to set KEEPALIVE, NODELAY? */
3370 cifs_dbg(FYI, "Socket created\n");
3371 socket = server->ssocket;
3372 socket->sk->sk_allocation = GFP_NOFS;
3373 socket->sk->sk_use_task_frag = false;
3374 if (sfamily == AF_INET6)
3375 cifs_reclassify_socket6(socket);
3376 else
3377 cifs_reclassify_socket4(socket);
3378 }
3379
3380 rc = bind_socket(server);
3381 if (rc < 0)
3382 return rc;
3383
3384 /*
3385 * Eventually check for other socket options to change from
3386 * the default. sock_setsockopt not used because it expects
3387 * user space buffer
3388 */
3389 socket->sk->sk_rcvtimeo = 7 * HZ;
3390 socket->sk->sk_sndtimeo = 5 * HZ;
3391
3392 /* make the bufsizes depend on wsize/rsize and max requests */
3393 if (server->noautotune) {
3394 if (socket->sk->sk_sndbuf < (200 * 1024))
3395 socket->sk->sk_sndbuf = 200 * 1024;
3396 if (socket->sk->sk_rcvbuf < (140 * 1024))
3397 socket->sk->sk_rcvbuf = 140 * 1024;
3398 }
3399
3400 if (server->tcp_nodelay)
3401 tcp_sock_set_nodelay(socket->sk);
3402
3403 cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
3404 socket->sk->sk_sndbuf,
3405 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
3406
3407 rc = kernel_connect(socket, (struct sockaddr_unsized *)saddr, slen,
3408 server->noblockcnt ? O_NONBLOCK : 0);
3409 /*
3410 * When mounting SMB root file systems, we do not want to block in
3411 * connect. Otherwise bail out and then let cifs_reconnect() perform
3412 * reconnect failover - if possible.
3413 */
3414 if (server->noblockcnt && rc == -EINPROGRESS)
3415 rc = 0;
3416 if (rc < 0) {
3417 cifs_dbg(FYI, "Error %d connecting to server\n", rc);
3418 trace_smb3_connect_err(server->hostname, server->conn_id, &server->dstaddr, rc);
3419 sock_release(socket);
3420 server->ssocket = NULL;
3421 return rc;
3422 }
3423 trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
3424
3425 /*
3426 * Establish RFC1001 NetBIOS session when it was explicitly requested
3427 * by mount option -o nbsessinit, or when connecting to default RFC1001
3428 * server port (139) and it was not explicitly disabled by mount option
3429 * -o nonbsessinit.
3430 */
3431 if (server->with_rfc1001 ||
3432 server->rfc1001_sessinit == 1 ||
3433 (server->rfc1001_sessinit == -1 && sport == htons(RFC1001_PORT)))
3434 rc = ip_rfc1001_connect(server);
3435
3436 return rc;
3437 }
3438
3439 static int
ip_connect(struct TCP_Server_Info * server)3440 ip_connect(struct TCP_Server_Info *server)
3441 {
3442 __be16 *sport;
3443 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
3444 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
3445
3446 if (server->dstaddr.ss_family == AF_INET6)
3447 sport = &addr6->sin6_port;
3448 else
3449 sport = &addr->sin_port;
3450
3451 if (*sport == 0) {
3452 int rc;
3453
3454 /* try with 445 port at first */
3455 *sport = htons(CIFS_PORT);
3456
3457 rc = generic_ip_connect(server);
3458 if (rc >= 0)
3459 return rc;
3460
3461 /* if it failed, try with 139 port */
3462 *sport = htons(RFC1001_PORT);
3463 }
3464
3465 return generic_ip_connect(server);
3466 }
3467
cifs_setup_cifs_sb(struct cifs_sb_info * cifs_sb)3468 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
3469 {
3470 struct smb3_fs_context *ctx = cifs_sb->ctx;
3471 unsigned int sbflags;
3472 int rc = 0;
3473
3474 INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
3475 INIT_LIST_HEAD(&cifs_sb->tcon_sb_link);
3476
3477 spin_lock_init(&cifs_sb->tlink_tree_lock);
3478 cifs_sb->tlink_tree = RB_ROOT;
3479
3480 cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
3481 ctx->file_mode, ctx->dir_mode);
3482
3483 /* this is needed for ASCII cp to Unicode converts */
3484 if (ctx->iocharset == NULL) {
3485 /* load_nls_default cannot return null */
3486 cifs_sb->local_nls = load_nls_default();
3487 } else {
3488 cifs_sb->local_nls = load_nls(ctx->iocharset);
3489 if (cifs_sb->local_nls == NULL) {
3490 cifs_dbg(VFS, "CIFS mount error: iocharset %s not found\n",
3491 ctx->iocharset);
3492 return -ELIBACC;
3493 }
3494 }
3495 ctx->local_nls = cifs_sb->local_nls;
3496
3497 sbflags = smb3_update_mnt_flags(cifs_sb);
3498
3499 if (ctx->direct_io)
3500 cifs_dbg(FYI, "mounting share using direct i/o\n");
3501 if (ctx->cache_ro) {
3502 cifs_dbg(VFS, "mounting share with read only caching. Ensure that the share will not be modified while in use.\n");
3503 sbflags |= CIFS_MOUNT_RO_CACHE;
3504 } else if (ctx->cache_rw) {
3505 cifs_dbg(VFS, "mounting share in single client RW caching mode. Ensure that no other systems will be accessing the share.\n");
3506 sbflags |= CIFS_MOUNT_RO_CACHE | CIFS_MOUNT_RW_CACHE;
3507 }
3508
3509 if ((ctx->cifs_acl) && (ctx->dynperm))
3510 cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n");
3511
3512 if (ctx->prepath) {
3513 cifs_sb->prepath = kstrdup(ctx->prepath, GFP_KERNEL);
3514 if (cifs_sb->prepath == NULL)
3515 rc = -ENOMEM;
3516 else
3517 sbflags |= CIFS_MOUNT_USE_PREFIX_PATH;
3518 }
3519
3520 atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
3521 return rc;
3522 }
3523
3524 /* Release all succeed connections */
cifs_mount_put_conns(struct cifs_mount_ctx * mnt_ctx)3525 void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
3526 {
3527 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3528 int rc = 0;
3529
3530 if (mnt_ctx->tcon)
3531 cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx);
3532 else if (mnt_ctx->ses)
3533 cifs_put_smb_ses(mnt_ctx->ses);
3534 else if (mnt_ctx->server)
3535 cifs_put_tcp_session(mnt_ctx->server, 0);
3536 mnt_ctx->ses = NULL;
3537 mnt_ctx->tcon = NULL;
3538 mnt_ctx->server = NULL;
3539 atomic_andnot(CIFS_MOUNT_POSIX_PATHS, &cifs_sb->mnt_cifs_flags);
3540 free_xid(mnt_ctx->xid);
3541 }
3542
cifs_mount_get_session(struct cifs_mount_ctx * mnt_ctx)3543 int cifs_mount_get_session(struct cifs_mount_ctx *mnt_ctx)
3544 {
3545 struct TCP_Server_Info *server = NULL;
3546 struct smb3_fs_context *ctx;
3547 struct cifs_ses *ses = NULL;
3548 unsigned int xid;
3549 int rc = 0;
3550
3551 xid = get_xid();
3552
3553 if (WARN_ON_ONCE(!mnt_ctx || !mnt_ctx->fs_ctx)) {
3554 rc = -EINVAL;
3555 goto out;
3556 }
3557 ctx = mnt_ctx->fs_ctx;
3558
3559 /* get a reference to a tcp session */
3560 server = cifs_get_tcp_session(ctx, NULL);
3561 if (IS_ERR(server)) {
3562 rc = PTR_ERR(server);
3563 server = NULL;
3564 goto out;
3565 }
3566
3567 /* get a reference to a SMB session */
3568 ses = cifs_get_smb_ses(server, ctx);
3569 if (IS_ERR(ses)) {
3570 rc = PTR_ERR(ses);
3571 ses = NULL;
3572 goto out;
3573 }
3574
3575 if ((ctx->persistent == true) && (!(ses->server->capabilities &
3576 SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
3577 cifs_server_dbg(VFS, "persistent handles not supported by server\n");
3578 rc = -EOPNOTSUPP;
3579 }
3580
3581 out:
3582 mnt_ctx->xid = xid;
3583 mnt_ctx->server = server;
3584 mnt_ctx->ses = ses;
3585 mnt_ctx->tcon = NULL;
3586
3587 return rc;
3588 }
3589
cifs_mount_get_tcon(struct cifs_mount_ctx * mnt_ctx)3590 int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
3591 {
3592 struct TCP_Server_Info *server;
3593 struct cifs_tcon *tcon = NULL;
3594 struct cifs_sb_info *cifs_sb;
3595 struct smb3_fs_context *ctx;
3596 unsigned int sbflags;
3597 int rc = 0;
3598
3599 if (WARN_ON_ONCE(!mnt_ctx))
3600 return -EINVAL;
3601 if (WARN_ON_ONCE(!mnt_ctx->server || !mnt_ctx->ses ||
3602 !mnt_ctx->fs_ctx || !mnt_ctx->cifs_sb)) {
3603 mnt_ctx->tcon = NULL;
3604 return -EINVAL;
3605 }
3606 server = mnt_ctx->server;
3607 ctx = mnt_ctx->fs_ctx;
3608 cifs_sb = mnt_ctx->cifs_sb;
3609 sbflags = cifs_sb_flags(cifs_sb);
3610
3611 /* search for existing tcon to this server share */
3612 tcon = cifs_get_tcon(mnt_ctx->ses, ctx);
3613 if (IS_ERR(tcon)) {
3614 rc = PTR_ERR(tcon);
3615 tcon = NULL;
3616 goto out;
3617 }
3618
3619 /*
3620 * if new SMB3.11 POSIX extensions are supported, do not change anything in the
3621 * path (i.e., do not remap / and \ and do not map any special characters)
3622 */
3623 if (tcon->posix_extensions) {
3624 sbflags |= CIFS_MOUNT_POSIX_PATHS;
3625 sbflags &= ~(CIFS_MOUNT_MAP_SFM_CHR |
3626 CIFS_MOUNT_MAP_SPECIAL_CHR);
3627 }
3628
3629 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
3630 /* tell server which Unix caps we support */
3631 if (cap_unix(tcon->ses)) {
3632 /*
3633 * reset of caps checks mount to see if unix extensions disabled
3634 * for just this mount.
3635 */
3636 reset_cifs_unix_caps(mnt_ctx->xid, tcon, cifs_sb, ctx);
3637 spin_lock(&tcon->ses->server->srv_lock);
3638 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
3639 (le64_to_cpu(tcon->fsUnixInfo.Capability) &
3640 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
3641 spin_unlock(&tcon->ses->server->srv_lock);
3642 rc = -EACCES;
3643 goto out;
3644 }
3645 spin_unlock(&tcon->ses->server->srv_lock);
3646 } else
3647 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3648 tcon->unix_ext = 0; /* server does not support them */
3649
3650 /* do not care if a following call succeed - informational */
3651 if (!tcon->pipe && server->ops->qfs_tcon) {
3652 server->ops->qfs_tcon(mnt_ctx->xid, tcon, cifs_sb);
3653 if (sbflags & CIFS_MOUNT_RO_CACHE) {
3654 if (tcon->fsDevInfo.DeviceCharacteristics &
3655 cpu_to_le32(FILE_READ_ONLY_DEVICE))
3656 cifs_dbg(VFS, "mounted to read only share\n");
3657 else if (!(sbflags & CIFS_MOUNT_RW_CACHE))
3658 cifs_dbg(VFS, "read only mount of RW share\n");
3659 /* no need to log a RW mount of a typical RW share */
3660 }
3661 }
3662
3663 cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
3664 /*
3665 * The cookie is initialized from volume info returned above.
3666 * Inside cifs_fscache_get_super_cookie it checks
3667 * that we do not get super cookie twice.
3668 */
3669 if (sbflags & CIFS_MOUNT_FSCACHE)
3670 cifs_fscache_get_super_cookie(tcon);
3671
3672 out:
3673 mnt_ctx->tcon = tcon;
3674 atomic_set(&cifs_sb->mnt_cifs_flags, sbflags);
3675 return rc;
3676 }
3677
mount_setup_tlink(struct cifs_sb_info * cifs_sb,struct cifs_ses * ses,struct cifs_tcon * tcon)3678 static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
3679 struct cifs_tcon *tcon)
3680 {
3681 struct tcon_link *tlink;
3682
3683 /* hang the tcon off of the superblock */
3684 tlink = kzalloc_obj(*tlink);
3685 if (tlink == NULL)
3686 return -ENOMEM;
3687
3688 tlink->tl_uid = ses->linux_uid;
3689 tlink->tl_tcon = tcon;
3690 tlink->tl_time = jiffies;
3691 set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
3692 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3693
3694 cifs_sb->master_tlink = tlink;
3695 spin_lock(&cifs_sb->tlink_tree_lock);
3696 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
3697 spin_unlock(&cifs_sb->tlink_tree_lock);
3698
3699 spin_lock(&tcon->sb_list_lock);
3700 list_add(&cifs_sb->tcon_sb_link, &tcon->cifs_sb_list);
3701 spin_unlock(&tcon->sb_list_lock);
3702
3703 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
3704 TLINK_IDLE_EXPIRE);
3705 return 0;
3706 }
3707
3708 static int
cifs_are_all_path_components_accessible(struct TCP_Server_Info * server,unsigned int xid,struct cifs_tcon * tcon,struct cifs_sb_info * cifs_sb,char * full_path,int added_treename)3709 cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
3710 unsigned int xid,
3711 struct cifs_tcon *tcon,
3712 struct cifs_sb_info *cifs_sb,
3713 char *full_path,
3714 int added_treename)
3715 {
3716 int rc;
3717 char *s;
3718 char sep, tmp;
3719 int skip = added_treename ? 1 : 0;
3720
3721 sep = CIFS_DIR_SEP(cifs_sb);
3722 s = full_path;
3723
3724 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
3725 while (rc == 0) {
3726 /* skip separators */
3727 while (*s == sep)
3728 s++;
3729 if (!*s)
3730 break;
3731 /* next separator */
3732 while (*s && *s != sep)
3733 s++;
3734 /*
3735 * if the treename is added, we then have to skip the first
3736 * part within the separators
3737 */
3738 if (skip) {
3739 skip = 0;
3740 continue;
3741 }
3742 /*
3743 * temporarily null-terminate the path at the end of
3744 * the current component
3745 */
3746 tmp = *s;
3747 *s = 0;
3748 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3749 full_path);
3750 *s = tmp;
3751 }
3752 return rc;
3753 }
3754
3755 /*
3756 * Check if path is remote (i.e. a DFS share).
3757 *
3758 * Return -EREMOTE if it is, otherwise 0 or -errno.
3759 */
cifs_is_path_remote(struct cifs_mount_ctx * mnt_ctx)3760 int cifs_is_path_remote(struct cifs_mount_ctx *mnt_ctx)
3761 {
3762 int rc;
3763 struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
3764 struct TCP_Server_Info *server = mnt_ctx->server;
3765 unsigned int xid = mnt_ctx->xid;
3766 struct cifs_tcon *tcon = mnt_ctx->tcon;
3767 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
3768 char *full_path;
3769
3770 if (!server->ops->is_path_accessible)
3771 return -EOPNOTSUPP;
3772
3773 /*
3774 * cifs_build_path_to_root works only when we have a valid tcon
3775 */
3776 full_path = cifs_build_path_to_root(ctx, cifs_sb, tcon,
3777 tcon->Flags & SMB_SHARE_IS_IN_DFS);
3778 if (full_path == NULL)
3779 return -ENOMEM;
3780
3781 cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
3782
3783 rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
3784 full_path);
3785 if (rc != 0 && rc != -EREMOTE)
3786 goto out;
3787
3788 if (rc != -EREMOTE) {
3789 rc = cifs_are_all_path_components_accessible(server, xid, tcon,
3790 cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
3791 if (rc != 0) {
3792 cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
3793 atomic_or(CIFS_MOUNT_USE_PREFIX_PATH,
3794 &cifs_sb->mnt_cifs_flags);
3795 rc = 0;
3796 }
3797 }
3798
3799 out:
3800 kfree(full_path);
3801 return rc;
3802 }
3803
3804 static struct mchan_mount *
mchan_mount_alloc(struct cifs_ses * ses)3805 mchan_mount_alloc(struct cifs_ses *ses)
3806 {
3807 struct mchan_mount *mchan_mount;
3808
3809 mchan_mount = kzalloc_obj(*mchan_mount);
3810 if (!mchan_mount)
3811 return ERR_PTR(-ENOMEM);
3812
3813 INIT_WORK(&mchan_mount->work, mchan_mount_work_fn);
3814
3815 spin_lock(&cifs_tcp_ses_lock);
3816 cifs_smb_ses_inc_refcount(ses);
3817 spin_unlock(&cifs_tcp_ses_lock);
3818 mchan_mount->ses = ses;
3819
3820 return mchan_mount;
3821 }
3822
3823 static void
mchan_mount_free(struct mchan_mount * mchan_mount)3824 mchan_mount_free(struct mchan_mount *mchan_mount)
3825 {
3826 cifs_put_smb_ses(mchan_mount->ses);
3827 kfree(mchan_mount);
3828 }
3829
3830 static void
mchan_mount_work_fn(struct work_struct * work)3831 mchan_mount_work_fn(struct work_struct *work)
3832 {
3833 struct mchan_mount *mchan_mount = container_of(work, struct mchan_mount, work);
3834
3835 smb3_update_ses_channels(mchan_mount->ses,
3836 mchan_mount->ses->server,
3837 false /* from_reconnect */,
3838 false /* disable_mchan */);
3839
3840 mchan_mount_free(mchan_mount);
3841 }
3842
3843 #ifdef CONFIG_CIFS_DFS_UPCALL
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3844 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3845 {
3846 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3847 struct mchan_mount *mchan_mount = NULL;
3848 int rc;
3849
3850 rc = dfs_mount_share(&mnt_ctx);
3851 if (rc)
3852 goto error;
3853
3854 if (ctx->multichannel) {
3855 mchan_mount = mchan_mount_alloc(mnt_ctx.ses);
3856 if (IS_ERR(mchan_mount)) {
3857 rc = PTR_ERR(mchan_mount);
3858 goto error;
3859 }
3860 }
3861
3862 if (!ctx->dfs_conn)
3863 goto out;
3864
3865 /*
3866 * After reconnecting to a different server, unique ids won't match anymore, so we disable
3867 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
3868 */
3869 cifs_autodisable_serverino(cifs_sb);
3870 /*
3871 * Force the use of prefix path to support failover on DFS paths that resolve to targets
3872 * that have different prefix paths.
3873 */
3874 atomic_or(CIFS_MOUNT_USE_PREFIX_PATH, &cifs_sb->mnt_cifs_flags);
3875 kfree(cifs_sb->prepath);
3876 cifs_sb->prepath = ctx->prepath;
3877 ctx->prepath = NULL;
3878
3879 out:
3880 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3881 if (rc)
3882 goto error;
3883
3884 if (ctx->multichannel)
3885 queue_work(cifsiod_wq, &mchan_mount->work);
3886
3887 free_xid(mnt_ctx.xid);
3888 return rc;
3889
3890 error:
3891 if (ctx->multichannel && !IS_ERR_OR_NULL(mchan_mount))
3892 mchan_mount_free(mchan_mount);
3893 cifs_mount_put_conns(&mnt_ctx);
3894 return rc;
3895 }
3896 #else
cifs_mount(struct cifs_sb_info * cifs_sb,struct smb3_fs_context * ctx)3897 int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
3898 {
3899 int rc = 0;
3900 struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
3901 struct mchan_mount *mchan_mount = NULL;
3902
3903 rc = cifs_mount_get_session(&mnt_ctx);
3904 if (rc)
3905 goto error;
3906
3907 rc = cifs_mount_get_tcon(&mnt_ctx);
3908 if (!rc) {
3909 /*
3910 * Prevent superblock from being created with any missing
3911 * connections.
3912 */
3913 if (WARN_ON(!mnt_ctx.server))
3914 rc = -EHOSTDOWN;
3915 else if (WARN_ON(!mnt_ctx.ses))
3916 rc = -EACCES;
3917 else if (WARN_ON(!mnt_ctx.tcon))
3918 rc = -ENOENT;
3919 }
3920 if (rc)
3921 goto error;
3922
3923 rc = cifs_is_path_remote(&mnt_ctx);
3924 if (rc == -EREMOTE)
3925 rc = -EOPNOTSUPP;
3926 if (rc)
3927 goto error;
3928
3929 if (ctx->multichannel) {
3930 mchan_mount = mchan_mount_alloc(mnt_ctx.ses);
3931 if (IS_ERR(mchan_mount)) {
3932 rc = PTR_ERR(mchan_mount);
3933 goto error;
3934 }
3935 }
3936
3937 rc = mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
3938 if (rc)
3939 goto error;
3940
3941 if (ctx->multichannel)
3942 queue_work(cifsiod_wq, &mchan_mount->work);
3943
3944 free_xid(mnt_ctx.xid);
3945 return rc;
3946
3947 error:
3948 if (ctx->multichannel && !IS_ERR_OR_NULL(mchan_mount))
3949 mchan_mount_free(mchan_mount);
3950 cifs_mount_put_conns(&mnt_ctx);
3951 return rc;
3952 }
3953 #endif
3954
delayed_free(struct rcu_head * p)3955 static void delayed_free(struct rcu_head *p)
3956 {
3957 struct cifs_sb_info *cifs_sb = container_of(p, struct cifs_sb_info, rcu);
3958
3959 unload_nls(cifs_sb->local_nls);
3960 smb3_cleanup_fs_context(cifs_sb->ctx);
3961 kfree(cifs_sb);
3962 }
3963
3964 void
cifs_umount(struct cifs_sb_info * cifs_sb)3965 cifs_umount(struct cifs_sb_info *cifs_sb)
3966 {
3967 struct rb_root *root = &cifs_sb->tlink_tree;
3968 struct rb_node *node;
3969 struct tcon_link *tlink;
3970 struct cifs_tcon *tcon = NULL;
3971
3972 cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
3973
3974 if (cifs_sb->master_tlink) {
3975 tcon = cifs_sb->master_tlink->tl_tcon;
3976 if (tcon) {
3977 spin_lock(&tcon->sb_list_lock);
3978 list_del_init(&cifs_sb->tcon_sb_link);
3979 spin_unlock(&tcon->sb_list_lock);
3980 }
3981 }
3982
3983 spin_lock(&cifs_sb->tlink_tree_lock);
3984 while ((node = rb_first(root))) {
3985 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
3986 cifs_get_tlink(tlink);
3987 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
3988 rb_erase(node, root);
3989
3990 spin_unlock(&cifs_sb->tlink_tree_lock);
3991 cifs_put_tlink(tlink);
3992 spin_lock(&cifs_sb->tlink_tree_lock);
3993 }
3994 spin_unlock(&cifs_sb->tlink_tree_lock);
3995
3996 kfree(cifs_sb->prepath);
3997 call_rcu(&cifs_sb->rcu, delayed_free);
3998 }
3999
4000 int
cifs_negotiate_protocol(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)4001 cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
4002 struct TCP_Server_Info *server)
4003 {
4004 bool in_retry = false;
4005 int rc = 0;
4006
4007 if (!server->ops->need_neg || !server->ops->negotiate)
4008 return -ENOSYS;
4009
4010 retry:
4011 /* only send once per connect */
4012 spin_lock(&server->srv_lock);
4013 if (server->tcpStatus != CifsGood &&
4014 server->tcpStatus != CifsNew &&
4015 server->tcpStatus != CifsNeedNegotiate) {
4016 spin_unlock(&server->srv_lock);
4017 return -EHOSTDOWN;
4018 }
4019
4020 if (!server->ops->need_neg(server) &&
4021 server->tcpStatus == CifsGood) {
4022 spin_unlock(&server->srv_lock);
4023 return 0;
4024 }
4025
4026 server->tcpStatus = CifsInNegotiate;
4027 server->neg_start = jiffies;
4028 spin_unlock(&server->srv_lock);
4029
4030 rc = server->ops->negotiate(xid, ses, server);
4031 if (rc == -EAGAIN) {
4032 /* Allow one retry attempt */
4033 if (!in_retry) {
4034 in_retry = true;
4035 goto retry;
4036 }
4037 rc = -EHOSTDOWN;
4038 }
4039 if (rc == 0) {
4040 spin_lock(&server->srv_lock);
4041 if (server->tcpStatus == CifsInNegotiate)
4042 server->tcpStatus = CifsGood;
4043 else
4044 rc = -EHOSTDOWN;
4045 spin_unlock(&server->srv_lock);
4046 } else {
4047 spin_lock(&server->srv_lock);
4048 if (server->tcpStatus == CifsInNegotiate)
4049 server->tcpStatus = CifsNeedNegotiate;
4050 spin_unlock(&server->srv_lock);
4051 }
4052
4053 return rc;
4054 }
4055
4056 int
cifs_setup_session(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct nls_table * nls_info)4057 cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
4058 struct TCP_Server_Info *server,
4059 struct nls_table *nls_info)
4060 {
4061 int rc = 0;
4062 struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4063 struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
4064 struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
4065 bool is_binding = false;
4066 bool new_ses;
4067
4068 spin_lock(&ses->ses_lock);
4069 new_ses = ses->ses_status == SES_NEW;
4070 cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
4071 __func__, ses->chans_need_reconnect);
4072
4073 if (ses->ses_status != SES_GOOD &&
4074 ses->ses_status != SES_NEW &&
4075 ses->ses_status != SES_NEED_RECON) {
4076 spin_unlock(&ses->ses_lock);
4077 return -EHOSTDOWN;
4078 }
4079
4080 /* only send once per connect */
4081 spin_lock(&ses->chan_lock);
4082 if (CIFS_ALL_CHANS_GOOD(ses)) {
4083 if (ses->ses_status == SES_NEED_RECON)
4084 ses->ses_status = SES_GOOD;
4085 spin_unlock(&ses->chan_lock);
4086 spin_unlock(&ses->ses_lock);
4087 return 0;
4088 }
4089
4090 cifs_chan_set_in_reconnect(ses, server);
4091 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
4092 spin_unlock(&ses->chan_lock);
4093
4094 if (!is_binding) {
4095 ses->ses_status = SES_IN_SETUP;
4096
4097 /* force iface_list refresh */
4098 spin_lock(&ses->iface_lock);
4099 ses->iface_last_update = 0;
4100 spin_unlock(&ses->iface_lock);
4101 }
4102 spin_unlock(&ses->ses_lock);
4103
4104 /* update ses ip_addr only for primary chan */
4105 if (server == pserver) {
4106 if (server->dstaddr.ss_family == AF_INET6)
4107 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr);
4108 else
4109 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI4", &addr->sin_addr);
4110 }
4111
4112 if (!is_binding) {
4113 ses->capabilities = server->capabilities;
4114 if (!linuxExtEnabled)
4115 ses->capabilities &= (~server->vals->cap_unix);
4116
4117 /*
4118 * Check if the server supports specified encoding mode.
4119 * Zero value in vals->cap_unicode indidcates that chosen
4120 * protocol dialect does not support non-UNICODE mode.
4121 */
4122 if (ses->unicode == 1 && server->vals->cap_unicode != 0 &&
4123 !(server->capabilities & server->vals->cap_unicode)) {
4124 cifs_dbg(VFS, "Server does not support mounting in UNICODE mode\n");
4125 rc = -EOPNOTSUPP;
4126 } else if (ses->unicode == 0 && server->vals->cap_unicode == 0) {
4127 cifs_dbg(VFS, "Server does not support mounting in non-UNICODE mode\n");
4128 rc = -EOPNOTSUPP;
4129 } else if (ses->unicode == 0) {
4130 /*
4131 * When UNICODE mode was explicitly disabled then
4132 * do not announce client UNICODE capability.
4133 */
4134 ses->capabilities &= (~server->vals->cap_unicode);
4135 }
4136
4137 if (ses->auth_key.response) {
4138 cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
4139 ses->auth_key.response);
4140 kfree_sensitive(ses->auth_key.response);
4141 ses->auth_key.response = NULL;
4142 ses->auth_key.len = 0;
4143 }
4144 }
4145
4146 cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
4147 server->sec_mode, server->capabilities, server->timeAdj);
4148
4149 if (!rc) {
4150 if (server->ops->sess_setup)
4151 rc = server->ops->sess_setup(xid, ses, server, nls_info);
4152 else
4153 rc = -ENOSYS;
4154 }
4155
4156 if (rc) {
4157 if (new_ses) {
4158 cifs_server_dbg(VFS, "failed to create a new SMB session with %s: %d\n",
4159 get_security_type_str(ses->sectype), rc);
4160 }
4161 spin_lock(&ses->ses_lock);
4162 if (ses->ses_status == SES_IN_SETUP)
4163 ses->ses_status = SES_NEED_RECON;
4164 spin_lock(&ses->chan_lock);
4165 cifs_chan_clear_in_reconnect(ses, server);
4166 spin_unlock(&ses->chan_lock);
4167 spin_unlock(&ses->ses_lock);
4168 } else {
4169 spin_lock(&ses->ses_lock);
4170 if (ses->ses_status == SES_IN_SETUP)
4171 ses->ses_status = SES_GOOD;
4172 spin_lock(&ses->chan_lock);
4173 cifs_chan_clear_in_reconnect(ses, server);
4174 cifs_chan_clear_need_reconnect(ses, server);
4175 spin_unlock(&ses->chan_lock);
4176 spin_unlock(&ses->ses_lock);
4177 }
4178
4179 return rc;
4180 }
4181
4182 static int
cifs_set_vol_auth(struct smb3_fs_context * ctx,struct cifs_ses * ses)4183 cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
4184 {
4185 ctx->sectype = ses->sectype;
4186
4187 /* krb5 is special, since we don't need username or pw */
4188 if (ctx->sectype == Kerberos)
4189 return 0;
4190
4191 return cifs_set_cifscreds(ctx, ses);
4192 }
4193
4194 static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info * cifs_sb,kuid_t fsuid)4195 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
4196 {
4197 int rc;
4198 struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
4199 struct cifs_ses *ses;
4200 struct cifs_tcon *tcon = NULL;
4201 struct smb3_fs_context *ctx;
4202 char *origin_fullpath = NULL;
4203
4204 ctx = kzalloc_obj(*ctx);
4205 if (ctx == NULL)
4206 return ERR_PTR(-ENOMEM);
4207
4208 ctx->local_nls = cifs_sb->local_nls;
4209 ctx->linux_uid = fsuid;
4210 ctx->cred_uid = fsuid;
4211 ctx->UNC = master_tcon->tree_name;
4212 ctx->retry = master_tcon->retry;
4213 ctx->nocase = master_tcon->nocase;
4214 ctx->nohandlecache = master_tcon->nohandlecache;
4215 ctx->local_lease = master_tcon->local_lease;
4216 ctx->no_lease = master_tcon->no_lease;
4217 ctx->resilient = master_tcon->use_resilient;
4218 ctx->persistent = master_tcon->use_persistent;
4219 ctx->handle_timeout = master_tcon->handle_timeout;
4220 ctx->no_linux_ext = !master_tcon->unix_ext;
4221 ctx->linux_ext = master_tcon->posix_extensions;
4222 ctx->sectype = master_tcon->ses->sectype;
4223 ctx->sign = master_tcon->ses->sign;
4224 ctx->seal = master_tcon->seal;
4225 ctx->witness = master_tcon->use_witness;
4226 ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
4227 ctx->unicode = master_tcon->ses->unicode;
4228
4229 rc = cifs_set_vol_auth(ctx, master_tcon->ses);
4230 if (rc) {
4231 tcon = ERR_PTR(rc);
4232 goto out;
4233 }
4234
4235 /* get a reference for the same TCP session */
4236 spin_lock(&cifs_tcp_ses_lock);
4237 ++master_tcon->ses->server->srv_count;
4238 spin_unlock(&cifs_tcp_ses_lock);
4239
4240 ses = cifs_get_smb_ses(master_tcon->ses->server, ctx);
4241 if (IS_ERR(ses)) {
4242 tcon = ERR_CAST(ses);
4243 cifs_put_tcp_session(master_tcon->ses->server, 0);
4244 goto out;
4245 }
4246
4247 #ifdef CONFIG_CIFS_DFS_UPCALL
4248 spin_lock(&master_tcon->tc_lock);
4249 if (master_tcon->origin_fullpath) {
4250 spin_unlock(&master_tcon->tc_lock);
4251 origin_fullpath = dfs_get_path(cifs_sb, cifs_sb->ctx->source);
4252 if (IS_ERR(origin_fullpath)) {
4253 tcon = ERR_CAST(origin_fullpath);
4254 origin_fullpath = NULL;
4255 cifs_put_smb_ses(ses);
4256 goto out;
4257 }
4258 } else {
4259 spin_unlock(&master_tcon->tc_lock);
4260 }
4261 #endif
4262
4263 tcon = cifs_get_tcon(ses, ctx);
4264 if (IS_ERR(tcon)) {
4265 cifs_put_smb_ses(ses);
4266 goto out;
4267 }
4268
4269 #ifdef CONFIG_CIFS_DFS_UPCALL
4270 if (origin_fullpath) {
4271 spin_lock(&tcon->tc_lock);
4272 tcon->origin_fullpath = origin_fullpath;
4273 spin_unlock(&tcon->tc_lock);
4274 origin_fullpath = NULL;
4275 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
4276 dfs_cache_get_ttl() * HZ);
4277 }
4278 #endif
4279
4280 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4281 if (cap_unix(ses))
4282 reset_cifs_unix_caps(0, tcon, NULL, ctx);
4283 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
4284
4285 out:
4286 kfree(ctx->username);
4287 kfree(ctx->domainname);
4288 kfree_sensitive(ctx->password);
4289 kfree(origin_fullpath);
4290 kfree(ctx);
4291
4292 return tcon;
4293 }
4294
4295 struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info * cifs_sb)4296 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
4297 {
4298 return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
4299 }
4300
4301 /* find and return a tlink with given uid */
4302 static struct tcon_link *
tlink_rb_search(struct rb_root * root,kuid_t uid)4303 tlink_rb_search(struct rb_root *root, kuid_t uid)
4304 {
4305 struct rb_node *node = root->rb_node;
4306 struct tcon_link *tlink;
4307
4308 while (node) {
4309 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
4310
4311 if (uid_gt(tlink->tl_uid, uid))
4312 node = node->rb_left;
4313 else if (uid_lt(tlink->tl_uid, uid))
4314 node = node->rb_right;
4315 else
4316 return tlink;
4317 }
4318 return NULL;
4319 }
4320
4321 /* insert a tcon_link into the tree */
4322 static void
tlink_rb_insert(struct rb_root * root,struct tcon_link * new_tlink)4323 tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
4324 {
4325 struct rb_node **new = &(root->rb_node), *parent = NULL;
4326 struct tcon_link *tlink;
4327
4328 while (*new) {
4329 tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
4330 parent = *new;
4331
4332 if (uid_gt(tlink->tl_uid, new_tlink->tl_uid))
4333 new = &((*new)->rb_left);
4334 else
4335 new = &((*new)->rb_right);
4336 }
4337
4338 rb_link_node(&new_tlink->tl_rbnode, parent, new);
4339 rb_insert_color(&new_tlink->tl_rbnode, root);
4340 }
4341
4342 /*
4343 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
4344 * current task.
4345 *
4346 * If the superblock doesn't refer to a multiuser mount, then just return
4347 * the master tcon for the mount.
4348 *
4349 * First, search the rbtree for an existing tcon for this fsuid. If one
4350 * exists, then check to see if it's pending construction. If it is then wait
4351 * for construction to complete. Once it's no longer pending, check to see if
4352 * it failed and either return an error or retry construction, depending on
4353 * the timeout.
4354 *
4355 * If one doesn't exist then insert a new tcon_link struct into the tree and
4356 * try to construct a new one.
4357 *
4358 * REMEMBER to call cifs_put_tlink() after successful calls to cifs_sb_tlink,
4359 * to avoid refcount issues
4360 */
4361 struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info * cifs_sb)4362 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
4363 {
4364 struct tcon_link *tlink, *newtlink;
4365 kuid_t fsuid = current_fsuid();
4366 int err;
4367
4368 if (!(cifs_sb_flags(cifs_sb) & CIFS_MOUNT_MULTIUSER))
4369 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
4370
4371 spin_lock(&cifs_sb->tlink_tree_lock);
4372 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4373 if (tlink)
4374 cifs_get_tlink(tlink);
4375 spin_unlock(&cifs_sb->tlink_tree_lock);
4376
4377 if (tlink == NULL) {
4378 newtlink = kzalloc_obj(*tlink);
4379 if (newtlink == NULL)
4380 return ERR_PTR(-ENOMEM);
4381 newtlink->tl_uid = fsuid;
4382 newtlink->tl_tcon = ERR_PTR(-EACCES);
4383 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
4384 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
4385 cifs_get_tlink(newtlink);
4386
4387 spin_lock(&cifs_sb->tlink_tree_lock);
4388 /* was one inserted after previous search? */
4389 tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
4390 if (tlink) {
4391 cifs_get_tlink(tlink);
4392 spin_unlock(&cifs_sb->tlink_tree_lock);
4393 kfree(newtlink);
4394 goto wait_for_construction;
4395 }
4396 tlink = newtlink;
4397 tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
4398 spin_unlock(&cifs_sb->tlink_tree_lock);
4399 } else {
4400 wait_for_construction:
4401 err = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
4402 TASK_INTERRUPTIBLE);
4403 if (err) {
4404 cifs_put_tlink(tlink);
4405 return ERR_PTR(-ERESTARTSYS);
4406 }
4407
4408 /* if it's good, return it */
4409 if (!IS_ERR(tlink->tl_tcon))
4410 return tlink;
4411
4412 /* return error if we tried this already recently */
4413 if (time_before(jiffies, tlink->tl_time + TLINK_ERROR_EXPIRE)) {
4414 err = PTR_ERR(tlink->tl_tcon);
4415 cifs_put_tlink(tlink);
4416 return ERR_PTR(err);
4417 }
4418
4419 if (test_and_set_bit(TCON_LINK_PENDING, &tlink->tl_flags))
4420 goto wait_for_construction;
4421 }
4422
4423 tlink->tl_tcon = cifs_construct_tcon(cifs_sb, fsuid);
4424 clear_bit(TCON_LINK_PENDING, &tlink->tl_flags);
4425 wake_up_bit(&tlink->tl_flags, TCON_LINK_PENDING);
4426
4427 if (IS_ERR(tlink->tl_tcon)) {
4428 err = PTR_ERR(tlink->tl_tcon);
4429 if (err == -ENOKEY)
4430 err = -EACCES;
4431 cifs_put_tlink(tlink);
4432 return ERR_PTR(err);
4433 }
4434
4435 return tlink;
4436 }
4437
4438 /*
4439 * periodic workqueue job that scans tcon_tree for a superblock and closes
4440 * out tcons.
4441 */
4442 static void
cifs_prune_tlinks(struct work_struct * work)4443 cifs_prune_tlinks(struct work_struct *work)
4444 {
4445 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
4446 prune_tlinks.work);
4447 struct rb_root *root = &cifs_sb->tlink_tree;
4448 struct rb_node *node;
4449 struct rb_node *tmp;
4450 struct tcon_link *tlink;
4451
4452 /*
4453 * Because we drop the spinlock in the loop in order to put the tlink
4454 * it's not guarded against removal of links from the tree. The only
4455 * places that remove entries from the tree are this function and
4456 * umounts. Because this function is non-reentrant and is canceled
4457 * before umount can proceed, this is safe.
4458 */
4459 spin_lock(&cifs_sb->tlink_tree_lock);
4460 node = rb_first(root);
4461 while (node != NULL) {
4462 tmp = node;
4463 node = rb_next(tmp);
4464 tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
4465
4466 if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
4467 atomic_read(&tlink->tl_count) != 0 ||
4468 time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
4469 continue;
4470
4471 cifs_get_tlink(tlink);
4472 clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
4473 rb_erase(tmp, root);
4474
4475 spin_unlock(&cifs_sb->tlink_tree_lock);
4476 cifs_put_tlink(tlink);
4477 spin_lock(&cifs_sb->tlink_tree_lock);
4478 }
4479 spin_unlock(&cifs_sb->tlink_tree_lock);
4480
4481 queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
4482 TLINK_IDLE_EXPIRE);
4483 }
4484
4485 #ifndef CONFIG_CIFS_DFS_UPCALL
cifs_tree_connect(const unsigned int xid,struct cifs_tcon * tcon)4486 int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon)
4487 {
4488 const struct smb_version_operations *ops = tcon->ses->server->ops;
4489 int rc;
4490
4491 /* only send once per connect */
4492 spin_lock(&tcon->tc_lock);
4493
4494 /* if tcon is marked for needing reconnect, update state */
4495 if (tcon->need_reconnect)
4496 tcon->status = TID_NEED_TCON;
4497
4498 if (tcon->status == TID_GOOD) {
4499 spin_unlock(&tcon->tc_lock);
4500 return 0;
4501 }
4502
4503 if (tcon->status != TID_NEW &&
4504 tcon->status != TID_NEED_TCON) {
4505 spin_unlock(&tcon->tc_lock);
4506 return -EHOSTDOWN;
4507 }
4508
4509 tcon->status = TID_IN_TCON;
4510 spin_unlock(&tcon->tc_lock);
4511
4512 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name,
4513 tcon, tcon->ses->local_nls);
4514 if (rc) {
4515 spin_lock(&tcon->tc_lock);
4516 if (tcon->status == TID_IN_TCON)
4517 tcon->status = TID_NEED_TCON;
4518 spin_unlock(&tcon->tc_lock);
4519 } else {
4520 spin_lock(&tcon->tc_lock);
4521 if (tcon->status == TID_IN_TCON)
4522 tcon->status = TID_GOOD;
4523 tcon->need_reconnect = false;
4524 spin_unlock(&tcon->tc_lock);
4525 }
4526
4527 return rc;
4528 }
4529 #endif
4530