xref: /linux/fs/smb/server/connection.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
4  *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
5  */
6 
7 #include <linux/mutex.h>
8 #include <linux/freezer.h>
9 #include <linux/module.h>
10 
11 #include "server.h"
12 #include "smb_common.h"
13 #include "mgmt/ksmbd_ida.h"
14 #include "connection.h"
15 #include "transport_tcp.h"
16 #include "transport_rdma.h"
17 #include "misc.h"
18 
19 static DEFINE_MUTEX(init_lock);
20 
21 static struct ksmbd_conn_ops default_conn_ops;
22 
23 DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS);
24 DECLARE_RWSEM(conn_list_lock);
25 
26 #ifdef CONFIG_PROC_FS
27 static struct proc_dir_entry *proc_clients;
28 
proc_show_clients(struct seq_file * m,void * v)29 static int proc_show_clients(struct seq_file *m, void *v)
30 {
31 	struct ksmbd_conn *conn;
32 	struct timespec64 now, t;
33 	int i;
34 
35 	seq_printf(m, "#%-20s %-10s %-10s %-10s %-10s %-10s\n",
36 			"<name>", "<dialect>", "<credits>", "<open files>",
37 			"<requests>", "<last active>");
38 
39 	down_read(&conn_list_lock);
40 	hash_for_each(conn_list, i, conn, hlist) {
41 		jiffies_to_timespec64(jiffies - conn->last_active, &t);
42 		ktime_get_real_ts64(&now);
43 		t = timespec64_sub(now, t);
44 #if IS_ENABLED(CONFIG_IPV6)
45 		if (!conn->inet_addr)
46 			seq_printf(m, "%-20pI6c", &conn->inet6_addr);
47 		else
48 #endif
49 			seq_printf(m, "%-20pI4", &conn->inet_addr);
50 		seq_printf(m, "   0x%-10x %-10u %-12d %-10d %ptT\n",
51 			   conn->dialect,
52 			   conn->total_credits,
53 			   atomic_read(&conn->stats.open_files_count),
54 			   atomic_read(&conn->req_running),
55 			   &t);
56 	}
57 	up_read(&conn_list_lock);
58 	return 0;
59 }
60 
create_proc_clients(void)61 static int create_proc_clients(void)
62 {
63 	proc_clients = ksmbd_proc_create("clients",
64 					 proc_show_clients, NULL);
65 	if (!proc_clients)
66 		return -ENOMEM;
67 	return 0;
68 }
69 
delete_proc_clients(void)70 static void delete_proc_clients(void)
71 {
72 	if (proc_clients) {
73 		proc_remove(proc_clients);
74 		proc_clients = NULL;
75 	}
76 }
77 #else
create_proc_clients(void)78 static int create_proc_clients(void) { return 0; }
delete_proc_clients(void)79 static void delete_proc_clients(void) {}
80 #endif
81 
82 /**
83  * ksmbd_conn_free() - free resources of the connection instance
84  *
85  * @conn:	connection instance to be cleaned up
86  *
87  * During the thread termination, the corresponding conn instance
88  * resources(sock/memory) are released and finally the conn object is freed.
89  */
ksmbd_conn_free(struct ksmbd_conn * conn)90 void ksmbd_conn_free(struct ksmbd_conn *conn)
91 {
92 	down_write(&conn_list_lock);
93 	hash_del(&conn->hlist);
94 	up_write(&conn_list_lock);
95 
96 	xa_destroy(&conn->sessions);
97 	kvfree(conn->request_buf);
98 	kfree(conn->preauth_info);
99 	if (atomic_dec_and_test(&conn->refcnt)) {
100 		conn->transport->ops->free_transport(conn->transport);
101 		kfree(conn);
102 	}
103 }
104 
105 /**
106  * ksmbd_conn_alloc() - initialize a new connection instance
107  *
108  * Return:	ksmbd_conn struct on success, otherwise NULL
109  */
ksmbd_conn_alloc(void)110 struct ksmbd_conn *ksmbd_conn_alloc(void)
111 {
112 	struct ksmbd_conn *conn;
113 
114 	conn = kzalloc_obj(struct ksmbd_conn, KSMBD_DEFAULT_GFP);
115 	if (!conn)
116 		return NULL;
117 
118 	conn->need_neg = true;
119 	ksmbd_conn_set_new(conn);
120 	conn->local_nls = load_nls("utf8");
121 	if (!conn->local_nls)
122 		conn->local_nls = load_nls_default();
123 	if (IS_ENABLED(CONFIG_UNICODE))
124 		conn->um = utf8_load(UNICODE_AGE(12, 1, 0));
125 	else
126 		conn->um = ERR_PTR(-EOPNOTSUPP);
127 	if (IS_ERR(conn->um))
128 		conn->um = NULL;
129 	atomic_set(&conn->req_running, 0);
130 	atomic_set(&conn->r_count, 0);
131 	atomic_set(&conn->refcnt, 1);
132 	conn->total_credits = 1;
133 	conn->outstanding_credits = 0;
134 
135 	init_waitqueue_head(&conn->req_running_q);
136 	init_waitqueue_head(&conn->r_count_q);
137 	INIT_LIST_HEAD(&conn->requests);
138 	INIT_LIST_HEAD(&conn->async_requests);
139 	spin_lock_init(&conn->request_lock);
140 	spin_lock_init(&conn->credits_lock);
141 	ida_init(&conn->async_ida);
142 	xa_init(&conn->sessions);
143 
144 	spin_lock_init(&conn->llist_lock);
145 	INIT_LIST_HEAD(&conn->lock_list);
146 
147 	init_rwsem(&conn->session_lock);
148 
149 	return conn;
150 }
151 
ksmbd_conn_lookup_dialect(struct ksmbd_conn * c)152 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
153 {
154 	struct ksmbd_conn *t;
155 	int bkt;
156 	bool ret = false;
157 
158 	down_read(&conn_list_lock);
159 	hash_for_each(conn_list, bkt, t, hlist) {
160 		if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
161 			continue;
162 
163 		ret = true;
164 		break;
165 	}
166 	up_read(&conn_list_lock);
167 	return ret;
168 }
169 
ksmbd_conn_enqueue_request(struct ksmbd_work * work)170 void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
171 {
172 	struct ksmbd_conn *conn = work->conn;
173 	struct list_head *requests_queue = NULL;
174 
175 	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
176 		requests_queue = &conn->requests;
177 
178 	atomic_inc(&conn->req_running);
179 	if (requests_queue) {
180 		spin_lock(&conn->request_lock);
181 		list_add_tail(&work->request_entry, requests_queue);
182 		spin_unlock(&conn->request_lock);
183 	}
184 }
185 
ksmbd_conn_try_dequeue_request(struct ksmbd_work * work)186 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
187 {
188 	struct ksmbd_conn *conn = work->conn;
189 
190 	atomic_dec(&conn->req_running);
191 	if (waitqueue_active(&conn->req_running_q))
192 		wake_up(&conn->req_running_q);
193 
194 	if (list_empty(&work->request_entry) &&
195 	    list_empty(&work->async_request_entry))
196 		return;
197 
198 	spin_lock(&conn->request_lock);
199 	list_del_init(&work->request_entry);
200 	spin_unlock(&conn->request_lock);
201 	if (work->asynchronous)
202 		release_async_work(work);
203 
204 	wake_up_all(&conn->req_running_q);
205 }
206 
ksmbd_conn_lock(struct ksmbd_conn * conn)207 void ksmbd_conn_lock(struct ksmbd_conn *conn)
208 {
209 	mutex_lock(&conn->srv_mutex);
210 }
211 
ksmbd_conn_unlock(struct ksmbd_conn * conn)212 void ksmbd_conn_unlock(struct ksmbd_conn *conn)
213 {
214 	mutex_unlock(&conn->srv_mutex);
215 }
216 
ksmbd_all_conn_set_status(u64 sess_id,u32 status)217 void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
218 {
219 	struct ksmbd_conn *conn;
220 	int bkt;
221 
222 	down_read(&conn_list_lock);
223 	hash_for_each(conn_list, bkt, conn, hlist) {
224 		if (conn->binding || xa_load(&conn->sessions, sess_id))
225 			WRITE_ONCE(conn->status, status);
226 	}
227 	up_read(&conn_list_lock);
228 }
229 
ksmbd_conn_wait_idle(struct ksmbd_conn * conn)230 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
231 {
232 	wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
233 }
234 
ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn * curr_conn,u64 sess_id)235 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
236 {
237 	struct ksmbd_conn *conn;
238 	int rc, retry_count = 0, max_timeout = 120;
239 	int rcount = 1, bkt;
240 
241 retry_idle:
242 	if (retry_count >= max_timeout)
243 		return -EIO;
244 
245 	down_read(&conn_list_lock);
246 	hash_for_each(conn_list, bkt, conn, hlist) {
247 		if (conn->binding || xa_load(&conn->sessions, sess_id)) {
248 			if (conn == curr_conn)
249 				rcount = 2;
250 			if (atomic_read(&conn->req_running) >= rcount) {
251 				rc = wait_event_timeout(conn->req_running_q,
252 					atomic_read(&conn->req_running) < rcount,
253 					HZ);
254 				if (!rc) {
255 					up_read(&conn_list_lock);
256 					retry_count++;
257 					goto retry_idle;
258 				}
259 			}
260 		}
261 	}
262 	up_read(&conn_list_lock);
263 
264 	return 0;
265 }
266 
ksmbd_conn_write(struct ksmbd_work * work)267 int ksmbd_conn_write(struct ksmbd_work *work)
268 {
269 	struct ksmbd_conn *conn = work->conn;
270 	int sent;
271 
272 	if (!work->response_buf) {
273 		pr_err("NULL response header\n");
274 		return -EINVAL;
275 	}
276 
277 	if (work->send_no_response)
278 		return 0;
279 
280 	if (!work->iov_idx)
281 		return -EINVAL;
282 
283 	ksmbd_conn_lock(conn);
284 	sent = conn->transport->ops->writev(conn->transport, work->iov,
285 			work->iov_cnt,
286 			get_rfc1002_len(work->iov[0].iov_base) + 4,
287 			work->need_invalidate_rkey,
288 			work->remote_key);
289 	ksmbd_conn_unlock(conn);
290 
291 	if (sent < 0) {
292 		pr_err("Failed to send message: %d\n", sent);
293 		return sent;
294 	}
295 
296 	return 0;
297 }
298 
ksmbd_conn_rdma_read(struct ksmbd_conn * conn,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)299 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
300 			 void *buf, unsigned int buflen,
301 			 struct smbdirect_buffer_descriptor_v1 *desc,
302 			 unsigned int desc_len)
303 {
304 	int ret = -EINVAL;
305 
306 	if (conn->transport->ops->rdma_read)
307 		ret = conn->transport->ops->rdma_read(conn->transport,
308 						      buf, buflen,
309 						      desc, desc_len);
310 	return ret;
311 }
312 
ksmbd_conn_rdma_write(struct ksmbd_conn * conn,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)313 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
314 			  void *buf, unsigned int buflen,
315 			  struct smbdirect_buffer_descriptor_v1 *desc,
316 			  unsigned int desc_len)
317 {
318 	int ret = -EINVAL;
319 
320 	if (conn->transport->ops->rdma_write)
321 		ret = conn->transport->ops->rdma_write(conn->transport,
322 						       buf, buflen,
323 						       desc, desc_len);
324 	return ret;
325 }
326 
ksmbd_conn_alive(struct ksmbd_conn * conn)327 bool ksmbd_conn_alive(struct ksmbd_conn *conn)
328 {
329 	if (!ksmbd_server_running())
330 		return false;
331 
332 	if (ksmbd_conn_exiting(conn))
333 		return false;
334 
335 	if (kthread_should_stop())
336 		return false;
337 
338 	if (atomic_read(&conn->stats.open_files_count) > 0)
339 		return true;
340 
341 	/*
342 	 * Stop current session if the time that get last request from client
343 	 * is bigger than deadtime user configured and opening file count is
344 	 * zero.
345 	 */
346 	if (server_conf.deadtime > 0 &&
347 	    time_after(jiffies, conn->last_active + server_conf.deadtime)) {
348 		ksmbd_debug(CONN, "No response from client in %lu minutes\n",
349 			    server_conf.deadtime / SMB_ECHO_INTERVAL);
350 		return false;
351 	}
352 	return true;
353 }
354 
355 /* "+2" for BCC field (ByteCount, 2 bytes) */
356 #define SMB1_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb_hdr) + 2)
357 #define SMB2_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb2_pdu))
358 
359 /**
360  * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
361  * @p:		connection instance
362  *
363  * One thread each per connection
364  *
365  * Return:	0 on success
366  */
ksmbd_conn_handler_loop(void * p)367 int ksmbd_conn_handler_loop(void *p)
368 {
369 	struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
370 	struct ksmbd_transport *t = conn->transport;
371 	unsigned int pdu_size, max_allowed_pdu_size, max_req;
372 	char hdr_buf[4] = {0,};
373 	int size;
374 
375 	mutex_init(&conn->srv_mutex);
376 	__module_get(THIS_MODULE);
377 
378 	if (t->ops->prepare && t->ops->prepare(t))
379 		goto out;
380 
381 	max_req = server_conf.max_inflight_req;
382 	conn->last_active = jiffies;
383 	set_freezable();
384 	while (ksmbd_conn_alive(conn)) {
385 		if (try_to_freeze())
386 			continue;
387 
388 		kvfree(conn->request_buf);
389 		conn->request_buf = NULL;
390 
391 recheck:
392 		if (atomic_read(&conn->req_running) + 1 > max_req) {
393 			wait_event_interruptible(conn->req_running_q,
394 				atomic_read(&conn->req_running) < max_req);
395 			goto recheck;
396 		}
397 
398 		size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
399 		if (size != sizeof(hdr_buf))
400 			break;
401 
402 		pdu_size = get_rfc1002_len(hdr_buf);
403 		ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
404 
405 		if (ksmbd_conn_good(conn))
406 			max_allowed_pdu_size =
407 				SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
408 		else
409 			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
410 
411 		if (pdu_size > max_allowed_pdu_size) {
412 			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
413 					pdu_size, max_allowed_pdu_size,
414 					READ_ONCE(conn->status));
415 			break;
416 		}
417 
418 		/*
419 		 * Check maximum pdu size(0x00FFFFFF).
420 		 */
421 		if (pdu_size > MAX_STREAM_PROT_LEN)
422 			break;
423 
424 		if (pdu_size < SMB1_MIN_SUPPORTED_PDU_SIZE)
425 			break;
426 
427 		/* 4 for rfc1002 length field */
428 		/* 1 for implied bcc[0] */
429 		size = pdu_size + 4 + 1;
430 		conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP);
431 		if (!conn->request_buf)
432 			break;
433 
434 		memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
435 
436 		/*
437 		 * We already read 4 bytes to find out PDU size, now
438 		 * read in PDU
439 		 */
440 		size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
441 		if (size < 0) {
442 			pr_err("sock_read failed: %d\n", size);
443 			break;
444 		}
445 
446 		if (size != pdu_size) {
447 			pr_err("PDU error. Read: %d, Expected: %d\n",
448 			       size, pdu_size);
449 			continue;
450 		}
451 
452 		if (!ksmbd_smb_request(conn))
453 			break;
454 
455 		if (((struct smb2_hdr *)smb_get_msg(conn->request_buf))->ProtocolId ==
456 		    SMB2_PROTO_NUMBER) {
457 			if (pdu_size < SMB2_MIN_SUPPORTED_PDU_SIZE)
458 				break;
459 		}
460 
461 		if (!default_conn_ops.process_fn) {
462 			pr_err("No connection request callback\n");
463 			break;
464 		}
465 
466 		if (default_conn_ops.process_fn(conn)) {
467 			pr_err("Cannot handle request\n");
468 			break;
469 		}
470 	}
471 
472 out:
473 	ksmbd_conn_set_releasing(conn);
474 	/* Wait till all reference dropped to the Server object*/
475 	ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count));
476 	wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
477 
478 	if (IS_ENABLED(CONFIG_UNICODE))
479 		utf8_unload(conn->um);
480 	unload_nls(conn->local_nls);
481 	if (default_conn_ops.terminate_fn)
482 		default_conn_ops.terminate_fn(conn);
483 	t->ops->disconnect(t);
484 	module_put(THIS_MODULE);
485 	return 0;
486 }
487 
ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops * ops)488 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
489 {
490 	default_conn_ops.process_fn = ops->process_fn;
491 	default_conn_ops.terminate_fn = ops->terminate_fn;
492 }
493 
ksmbd_conn_r_count_inc(struct ksmbd_conn * conn)494 void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn)
495 {
496 	atomic_inc(&conn->r_count);
497 }
498 
ksmbd_conn_r_count_dec(struct ksmbd_conn * conn)499 void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn)
500 {
501 	/*
502 	 * Checking waitqueue to dropping pending requests on
503 	 * disconnection. waitqueue_active is safe because it
504 	 * uses atomic operation for condition.
505 	 */
506 	atomic_inc(&conn->refcnt);
507 	if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
508 		wake_up(&conn->r_count_q);
509 
510 	if (atomic_dec_and_test(&conn->refcnt))
511 		kfree(conn);
512 }
513 
ksmbd_conn_transport_init(void)514 int ksmbd_conn_transport_init(void)
515 {
516 	int ret;
517 
518 	mutex_lock(&init_lock);
519 	ret = ksmbd_tcp_init();
520 	if (ret) {
521 		pr_err("Failed to init TCP subsystem: %d\n", ret);
522 		goto out;
523 	}
524 
525 	ret = ksmbd_rdma_init();
526 	if (ret) {
527 		pr_err("Failed to init RDMA subsystem: %d\n", ret);
528 		goto out;
529 	}
530 out:
531 	mutex_unlock(&init_lock);
532 	create_proc_clients();
533 	return ret;
534 }
535 
stop_sessions(void)536 static void stop_sessions(void)
537 {
538 	struct ksmbd_conn *conn;
539 	struct ksmbd_transport *t;
540 	int bkt;
541 
542 again:
543 	down_read(&conn_list_lock);
544 	hash_for_each(conn_list, bkt, conn, hlist) {
545 		t = conn->transport;
546 		ksmbd_conn_set_exiting(conn);
547 		if (t->ops->shutdown) {
548 			up_read(&conn_list_lock);
549 			t->ops->shutdown(t);
550 			down_read(&conn_list_lock);
551 		}
552 	}
553 	up_read(&conn_list_lock);
554 
555 	if (!hash_empty(conn_list)) {
556 		msleep(100);
557 		goto again;
558 	}
559 }
560 
ksmbd_conn_transport_destroy(void)561 void ksmbd_conn_transport_destroy(void)
562 {
563 	delete_proc_clients();
564 	mutex_lock(&init_lock);
565 	ksmbd_tcp_destroy();
566 	ksmbd_rdma_stop_listening();
567 	stop_sessions();
568 	ksmbd_rdma_destroy();
569 	mutex_unlock(&init_lock);
570 }
571