1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
4 * Copyright (C) 2018 Samsung Electronics Co., Ltd.
5 */
6
7 #include <linux/mutex.h>
8 #include <linux/freezer.h>
9 #include <linux/module.h>
10
11 #include "server.h"
12 #include "smb_common.h"
13 #include "mgmt/ksmbd_ida.h"
14 #include "connection.h"
15 #include "transport_tcp.h"
16 #include "transport_rdma.h"
17 #include "misc.h"
18
19 static DEFINE_MUTEX(init_lock);
20
21 static struct ksmbd_conn_ops default_conn_ops;
22
23 DEFINE_HASHTABLE(conn_list, CONN_HASH_BITS);
24 DECLARE_RWSEM(conn_list_lock);
25
26 #ifdef CONFIG_PROC_FS
27 static struct proc_dir_entry *proc_clients;
28
proc_show_clients(struct seq_file * m,void * v)29 static int proc_show_clients(struct seq_file *m, void *v)
30 {
31 struct ksmbd_conn *conn;
32 struct timespec64 now, t;
33 int i;
34
35 seq_printf(m, "#%-20s %-10s %-10s %-10s %-10s %-10s\n",
36 "<name>", "<dialect>", "<credits>", "<open files>",
37 "<requests>", "<last active>");
38
39 down_read(&conn_list_lock);
40 hash_for_each(conn_list, i, conn, hlist) {
41 jiffies_to_timespec64(jiffies - conn->last_active, &t);
42 ktime_get_real_ts64(&now);
43 t = timespec64_sub(now, t);
44 #if IS_ENABLED(CONFIG_IPV6)
45 if (!conn->inet_addr)
46 seq_printf(m, "%-20pI6c", &conn->inet6_addr);
47 else
48 #endif
49 seq_printf(m, "%-20pI4", &conn->inet_addr);
50 seq_printf(m, " 0x%-10x %-10u %-12d %-10d %ptT\n",
51 conn->dialect,
52 conn->total_credits,
53 atomic_read(&conn->stats.open_files_count),
54 atomic_read(&conn->req_running),
55 &t);
56 }
57 up_read(&conn_list_lock);
58 return 0;
59 }
60
create_proc_clients(void)61 static int create_proc_clients(void)
62 {
63 proc_clients = ksmbd_proc_create("clients",
64 proc_show_clients, NULL);
65 if (!proc_clients)
66 return -ENOMEM;
67 return 0;
68 }
69
delete_proc_clients(void)70 static void delete_proc_clients(void)
71 {
72 if (proc_clients) {
73 proc_remove(proc_clients);
74 proc_clients = NULL;
75 }
76 }
77 #else
create_proc_clients(void)78 static int create_proc_clients(void) { return 0; }
delete_proc_clients(void)79 static void delete_proc_clients(void) {}
80 #endif
81
82 static struct workqueue_struct *ksmbd_conn_wq;
83
ksmbd_conn_wq_init(void)84 int ksmbd_conn_wq_init(void)
85 {
86 ksmbd_conn_wq = alloc_workqueue("ksmbd-conn-release",
87 WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
88 if (!ksmbd_conn_wq)
89 return -ENOMEM;
90 return 0;
91 }
92
ksmbd_conn_wq_destroy(void)93 void ksmbd_conn_wq_destroy(void)
94 {
95 if (ksmbd_conn_wq) {
96 destroy_workqueue(ksmbd_conn_wq);
97 ksmbd_conn_wq = NULL;
98 }
99 }
100
101 /*
102 * __ksmbd_conn_release_work() - perform the final, once-per-struct cleanup
103 * of a ksmbd_conn whose refcount has just dropped to zero.
104 *
105 * This is the common release path used by ksmbd_conn_put() for the embedded
106 * state that outlives the connection thread: async_ida and the attached
107 * transport (which owns the socket and iov for TCP). Called from a workqueue
108 * so that sleep-allowed teardown (sock_release -> tcp_close ->
109 * lock_sock_nested) never runs from an RCU softirq callback (free_opinfo_rcu)
110 * or any other non-sleeping putter context.
111 */
__ksmbd_conn_release_work(struct work_struct * work)112 static void __ksmbd_conn_release_work(struct work_struct *work)
113 {
114 struct ksmbd_conn *conn =
115 container_of(work, struct ksmbd_conn, release_work);
116
117 ida_destroy(&conn->async_ida);
118 conn->transport->ops->free_transport(conn->transport);
119 kfree(conn);
120 }
121
122 /**
123 * ksmbd_conn_get() - take a reference on @conn and return it.
124 *
125 * @conn: connection instance to get a reference to
126 *
127 * Returns @conn unchanged so callers can write
128 * "fp->conn = ksmbd_conn_get(work->conn);" in one expression. Returns NULL
129 * if @conn is NULL.
130 */
ksmbd_conn_get(struct ksmbd_conn * conn)131 struct ksmbd_conn *ksmbd_conn_get(struct ksmbd_conn *conn)
132 {
133 if (!conn)
134 return NULL;
135
136 atomic_inc(&conn->refcnt);
137 return conn;
138 }
139
140 /**
141 * ksmbd_conn_put() - drop a reference and, if it was the last, queue the
142 * release onto ksmbd_conn_wq so it runs from process context.
143 *
144 * @conn: connection instance to put a reference to
145 *
146 * Callable from any context including RCU softirq callbacks and non-sleeping
147 * locks; the actual release is deferred to the workqueue. ksmbd_conn_wq is
148 * created in ksmbd_server_init() before any conn can be allocated and is
149 * destroyed in ksmbd_server_exit() after rcu_barrier(), so it is always
150 * non-NULL while a conn reference is held.
151 */
ksmbd_conn_put(struct ksmbd_conn * conn)152 void ksmbd_conn_put(struct ksmbd_conn *conn)
153 {
154 if (!conn)
155 return;
156
157 if (atomic_dec_and_test(&conn->refcnt))
158 queue_work(ksmbd_conn_wq, &conn->release_work);
159 }
160
161 /**
162 * ksmbd_conn_free() - free resources of the connection instance
163 *
164 * @conn: connection instance to be cleaned up
165 *
166 * During the thread termination, the corresponding conn instance
167 * resources(sock/memory) are released and finally the conn object is freed.
168 */
ksmbd_conn_free(struct ksmbd_conn * conn)169 void ksmbd_conn_free(struct ksmbd_conn *conn)
170 {
171 down_write(&conn_list_lock);
172 hash_del(&conn->hlist);
173 up_write(&conn_list_lock);
174
175 /*
176 * request_buf / preauth_info / mechToken are only ever accessed by the
177 * connection handler thread that owns @conn. ksmbd_conn_free() is
178 * called from the transport free_transport() path when that thread is
179 * exiting, so it is safe to release them unconditionally even when
180 * ksmbd_conn_put() below is not the final putter (oplock / ksmbd_file
181 * holders only retain the conn pointer, not these per-thread buffers).
182 */
183 xa_destroy(&conn->sessions);
184 kvfree(conn->request_buf);
185 kfree(conn->preauth_info);
186 kfree(conn->mechToken);
187 ksmbd_conn_put(conn);
188 }
189
190 /**
191 * ksmbd_conn_alloc() - initialize a new connection instance
192 *
193 * Return: ksmbd_conn struct on success, otherwise NULL
194 */
ksmbd_conn_alloc(void)195 struct ksmbd_conn *ksmbd_conn_alloc(void)
196 {
197 struct ksmbd_conn *conn;
198
199 conn = kzalloc_obj(struct ksmbd_conn, KSMBD_DEFAULT_GFP);
200 if (!conn)
201 return NULL;
202
203 conn->need_neg = true;
204 ksmbd_conn_set_new(conn);
205 conn->local_nls = load_nls("utf8");
206 if (!conn->local_nls)
207 conn->local_nls = load_nls_default();
208 if (IS_ENABLED(CONFIG_UNICODE))
209 conn->um = utf8_load(UNICODE_AGE(12, 1, 0));
210 else
211 conn->um = ERR_PTR(-EOPNOTSUPP);
212 if (IS_ERR(conn->um))
213 conn->um = NULL;
214 INIT_WORK(&conn->release_work, __ksmbd_conn_release_work);
215 atomic_set(&conn->req_running, 0);
216 atomic_set(&conn->r_count, 0);
217 atomic_set(&conn->refcnt, 1);
218 conn->total_credits = 1;
219 conn->outstanding_credits = 0;
220
221 init_waitqueue_head(&conn->req_running_q);
222 init_waitqueue_head(&conn->r_count_q);
223 INIT_LIST_HEAD(&conn->requests);
224 INIT_LIST_HEAD(&conn->async_requests);
225 spin_lock_init(&conn->request_lock);
226 spin_lock_init(&conn->credits_lock);
227 ida_init(&conn->async_ida);
228 xa_init(&conn->sessions);
229
230 spin_lock_init(&conn->llist_lock);
231 INIT_LIST_HEAD(&conn->lock_list);
232
233 init_rwsem(&conn->session_lock);
234
235 return conn;
236 }
237
ksmbd_conn_lookup_dialect(struct ksmbd_conn * c)238 bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
239 {
240 struct ksmbd_conn *t;
241 int bkt;
242 bool ret = false;
243
244 down_read(&conn_list_lock);
245 hash_for_each(conn_list, bkt, t, hlist) {
246 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
247 continue;
248
249 ret = true;
250 break;
251 }
252 up_read(&conn_list_lock);
253 return ret;
254 }
255
ksmbd_conn_enqueue_request(struct ksmbd_work * work)256 void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
257 {
258 struct ksmbd_conn *conn = work->conn;
259 struct list_head *requests_queue = NULL;
260
261 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
262 requests_queue = &conn->requests;
263
264 atomic_inc(&conn->req_running);
265 if (requests_queue) {
266 spin_lock(&conn->request_lock);
267 list_add_tail(&work->request_entry, requests_queue);
268 spin_unlock(&conn->request_lock);
269 }
270 }
271
ksmbd_conn_try_dequeue_request(struct ksmbd_work * work)272 void ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
273 {
274 struct ksmbd_conn *conn = work->conn;
275
276 atomic_dec(&conn->req_running);
277 if (waitqueue_active(&conn->req_running_q))
278 wake_up(&conn->req_running_q);
279
280 if (list_empty(&work->request_entry) &&
281 list_empty(&work->async_request_entry))
282 return;
283
284 spin_lock(&conn->request_lock);
285 list_del_init(&work->request_entry);
286 spin_unlock(&conn->request_lock);
287 if (work->asynchronous)
288 release_async_work(work);
289
290 wake_up_all(&conn->req_running_q);
291 }
292
ksmbd_conn_lock(struct ksmbd_conn * conn)293 void ksmbd_conn_lock(struct ksmbd_conn *conn)
294 {
295 mutex_lock(&conn->srv_mutex);
296 }
297
ksmbd_conn_unlock(struct ksmbd_conn * conn)298 void ksmbd_conn_unlock(struct ksmbd_conn *conn)
299 {
300 mutex_unlock(&conn->srv_mutex);
301 }
302
ksmbd_all_conn_set_status(u64 sess_id,u32 status)303 void ksmbd_all_conn_set_status(u64 sess_id, u32 status)
304 {
305 struct ksmbd_conn *conn;
306 int bkt;
307
308 down_read(&conn_list_lock);
309 hash_for_each(conn_list, bkt, conn, hlist) {
310 if (conn->binding || xa_load(&conn->sessions, sess_id))
311 WRITE_ONCE(conn->status, status);
312 }
313 up_read(&conn_list_lock);
314 }
315
ksmbd_conn_wait_idle(struct ksmbd_conn * conn)316 void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
317 {
318 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
319 }
320
ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn * curr_conn,u64 sess_id)321 int ksmbd_conn_wait_idle_sess_id(struct ksmbd_conn *curr_conn, u64 sess_id)
322 {
323 struct ksmbd_conn *conn;
324 int rc, retry_count = 0, max_timeout = 120;
325 int rcount, bkt;
326
327 retry_idle:
328 if (retry_count >= max_timeout)
329 return -EIO;
330
331 down_read(&conn_list_lock);
332 hash_for_each(conn_list, bkt, conn, hlist) {
333 if (conn->binding || xa_load(&conn->sessions, sess_id)) {
334 rcount = (conn == curr_conn) ? 2 : 1;
335 if (atomic_read(&conn->req_running) >= rcount) {
336 rc = wait_event_timeout(conn->req_running_q,
337 atomic_read(&conn->req_running) < rcount,
338 HZ);
339 if (!rc) {
340 up_read(&conn_list_lock);
341 retry_count++;
342 goto retry_idle;
343 }
344 }
345 }
346 }
347 up_read(&conn_list_lock);
348
349 return 0;
350 }
351
ksmbd_conn_write(struct ksmbd_work * work)352 int ksmbd_conn_write(struct ksmbd_work *work)
353 {
354 struct ksmbd_conn *conn = work->conn;
355 int sent;
356
357 if (!work->response_buf) {
358 pr_err("NULL response header\n");
359 return -EINVAL;
360 }
361
362 if (work->send_no_response)
363 return 0;
364
365 if (!work->iov_idx)
366 return -EINVAL;
367
368 ksmbd_conn_lock(conn);
369 sent = conn->transport->ops->writev(conn->transport, work->iov,
370 work->iov_cnt,
371 get_rfc1002_len(work->iov[0].iov_base) + 4,
372 work->need_invalidate_rkey,
373 work->remote_key);
374 ksmbd_conn_unlock(conn);
375
376 if (sent < 0) {
377 pr_err("Failed to send message: %d\n", sent);
378 return sent;
379 }
380
381 return 0;
382 }
383
ksmbd_conn_rdma_read(struct ksmbd_conn * conn,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)384 int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
385 void *buf, unsigned int buflen,
386 struct smbdirect_buffer_descriptor_v1 *desc,
387 unsigned int desc_len)
388 {
389 int ret = -EINVAL;
390
391 if (conn->transport->ops->rdma_read)
392 ret = conn->transport->ops->rdma_read(conn->transport,
393 buf, buflen,
394 desc, desc_len);
395 return ret;
396 }
397
ksmbd_conn_rdma_write(struct ksmbd_conn * conn,void * buf,unsigned int buflen,struct smbdirect_buffer_descriptor_v1 * desc,unsigned int desc_len)398 int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
399 void *buf, unsigned int buflen,
400 struct smbdirect_buffer_descriptor_v1 *desc,
401 unsigned int desc_len)
402 {
403 int ret = -EINVAL;
404
405 if (conn->transport->ops->rdma_write)
406 ret = conn->transport->ops->rdma_write(conn->transport,
407 buf, buflen,
408 desc, desc_len);
409 return ret;
410 }
411
ksmbd_conn_alive(struct ksmbd_conn * conn)412 bool ksmbd_conn_alive(struct ksmbd_conn *conn)
413 {
414 if (!ksmbd_server_running())
415 return false;
416
417 if (ksmbd_conn_exiting(conn))
418 return false;
419
420 if (kthread_should_stop())
421 return false;
422
423 if (atomic_read(&conn->stats.open_files_count) > 0)
424 return true;
425
426 /*
427 * Stop current session if the time that get last request from client
428 * is bigger than deadtime user configured and opening file count is
429 * zero.
430 */
431 if (server_conf.deadtime > 0 &&
432 time_after(jiffies, conn->last_active + server_conf.deadtime)) {
433 ksmbd_debug(CONN, "No response from client in %lu minutes\n",
434 server_conf.deadtime / SMB_ECHO_INTERVAL);
435 return false;
436 }
437 return true;
438 }
439
440 /* "+2" for BCC field (ByteCount, 2 bytes) */
441 #define SMB1_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb_hdr) + 2)
442 #define SMB2_MIN_SUPPORTED_PDU_SIZE (sizeof(struct smb2_pdu))
443
444 /**
445 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
446 * @p: connection instance
447 *
448 * One thread each per connection
449 *
450 * Return: 0 on success
451 */
ksmbd_conn_handler_loop(void * p)452 int ksmbd_conn_handler_loop(void *p)
453 {
454 struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
455 struct ksmbd_transport *t = conn->transport;
456 unsigned int pdu_size, max_allowed_pdu_size, max_req;
457 char hdr_buf[4] = {0,};
458 int size;
459
460 mutex_init(&conn->srv_mutex);
461 __module_get(THIS_MODULE);
462
463 max_req = server_conf.max_inflight_req;
464 conn->last_active = jiffies;
465 set_freezable();
466 while (ksmbd_conn_alive(conn)) {
467 if (try_to_freeze())
468 continue;
469
470 kvfree(conn->request_buf);
471 conn->request_buf = NULL;
472
473 recheck:
474 if (atomic_read(&conn->req_running) + 1 > max_req) {
475 wait_event_interruptible(conn->req_running_q,
476 atomic_read(&conn->req_running) < max_req);
477 goto recheck;
478 }
479
480 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf), -1);
481 if (size != sizeof(hdr_buf))
482 break;
483
484 pdu_size = get_rfc1002_len(hdr_buf);
485 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
486
487 if (ksmbd_conn_good(conn))
488 max_allowed_pdu_size =
489 SMB3_MAX_MSGSIZE + conn->vals->max_write_size;
490 else
491 max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
492
493 if (pdu_size > max_allowed_pdu_size) {
494 pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
495 pdu_size, max_allowed_pdu_size,
496 READ_ONCE(conn->status));
497 break;
498 }
499
500 /*
501 * Check maximum pdu size(0x00FFFFFF).
502 */
503 if (pdu_size > MAX_STREAM_PROT_LEN)
504 break;
505
506 if (pdu_size < SMB1_MIN_SUPPORTED_PDU_SIZE)
507 break;
508
509 /* 4 for rfc1002 length field */
510 /* 1 for implied bcc[0] */
511 size = pdu_size + 4 + 1;
512 conn->request_buf = kvmalloc(size, KSMBD_DEFAULT_GFP);
513 if (!conn->request_buf)
514 break;
515
516 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
517
518 /*
519 * We already read 4 bytes to find out PDU size, now
520 * read in PDU
521 */
522 size = t->ops->read(t, conn->request_buf + 4, pdu_size, 2);
523 if (size < 0) {
524 pr_err("sock_read failed: %d\n", size);
525 break;
526 }
527
528 if (size != pdu_size) {
529 pr_err("PDU error. Read: %d, Expected: %d\n",
530 size, pdu_size);
531 continue;
532 }
533
534 if (!ksmbd_smb_request(conn))
535 break;
536
537 if (((struct smb2_hdr *)smb_get_msg(conn->request_buf))->ProtocolId ==
538 SMB2_PROTO_NUMBER) {
539 if (pdu_size < SMB2_MIN_SUPPORTED_PDU_SIZE)
540 break;
541 }
542
543 if (!default_conn_ops.process_fn) {
544 pr_err("No connection request callback\n");
545 break;
546 }
547
548 if (default_conn_ops.process_fn(conn)) {
549 pr_err("Cannot handle request\n");
550 break;
551 }
552 }
553
554 ksmbd_conn_set_releasing(conn);
555 /* Wait till all reference dropped to the Server object*/
556 ksmbd_debug(CONN, "Wait for all pending requests(%d)\n", atomic_read(&conn->r_count));
557 wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
558
559 if (IS_ENABLED(CONFIG_UNICODE))
560 utf8_unload(conn->um);
561 unload_nls(conn->local_nls);
562 if (default_conn_ops.terminate_fn)
563 default_conn_ops.terminate_fn(conn);
564 t->ops->disconnect(t);
565 module_put(THIS_MODULE);
566 return 0;
567 }
568
ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops * ops)569 void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
570 {
571 default_conn_ops.process_fn = ops->process_fn;
572 default_conn_ops.terminate_fn = ops->terminate_fn;
573 }
574
ksmbd_conn_r_count_inc(struct ksmbd_conn * conn)575 void ksmbd_conn_r_count_inc(struct ksmbd_conn *conn)
576 {
577 atomic_inc(&conn->r_count);
578 }
579
ksmbd_conn_r_count_dec(struct ksmbd_conn * conn)580 void ksmbd_conn_r_count_dec(struct ksmbd_conn *conn)
581 {
582 /*
583 * Checking waitqueue to dropping pending requests on
584 * disconnection. waitqueue_active is safe because it
585 * uses atomic operation for condition.
586 */
587 atomic_inc(&conn->refcnt);
588 if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
589 wake_up(&conn->r_count_q);
590
591 ksmbd_conn_put(conn);
592 }
593
ksmbd_conn_transport_init(void)594 int ksmbd_conn_transport_init(void)
595 {
596 int ret;
597
598 mutex_lock(&init_lock);
599 ret = ksmbd_tcp_init();
600 if (ret) {
601 pr_err("Failed to init TCP subsystem: %d\n", ret);
602 goto out;
603 }
604
605 ret = ksmbd_rdma_init();
606 if (ret) {
607 pr_err("Failed to init RDMA subsystem: %d\n", ret);
608 goto out;
609 }
610 out:
611 mutex_unlock(&init_lock);
612 create_proc_clients();
613 return ret;
614 }
615
stop_sessions(void)616 static void stop_sessions(void)
617 {
618 struct ksmbd_conn *conn, *target;
619 struct ksmbd_transport *t;
620 bool any;
621 int bkt;
622
623 /*
624 * Serialised via init_lock; no concurrent stop_sessions() can
625 * touch conn->stop_called, so writing it under the read lock is
626 * safe.
627 */
628 again:
629 target = NULL;
630 any = false;
631 down_read(&conn_list_lock);
632 hash_for_each(conn_list, bkt, conn, hlist) {
633 any = true;
634 if (conn->stop_called)
635 continue;
636 atomic_inc(&conn->refcnt);
637 conn->stop_called = true;
638 /*
639 * Mark the connection EXITING while still holding the
640 * read lock so the selection and the status transition
641 * happen together. Do not regress a connection that has
642 * already advanced to RELEASING on its own (e.g. the
643 * handler exited its receive loop for an unrelated
644 * reason).
645 */
646 if (READ_ONCE(conn->status) != KSMBD_SESS_RELEASING)
647 ksmbd_conn_set_exiting(conn);
648 target = conn;
649 break;
650 }
651 up_read(&conn_list_lock);
652
653 if (target) {
654 t = target->transport;
655 if (t->ops->shutdown)
656 t->ops->shutdown(t);
657 if (atomic_dec_and_test(&target->refcnt)) {
658 ida_destroy(&target->async_ida);
659 t->ops->free_transport(t);
660 kfree(target);
661 }
662 goto again;
663 }
664
665 if (any) {
666 msleep(100);
667 goto again;
668 }
669 }
670
ksmbd_conn_transport_destroy(void)671 void ksmbd_conn_transport_destroy(void)
672 {
673 delete_proc_clients();
674 mutex_lock(&init_lock);
675 ksmbd_tcp_destroy();
676 ksmbd_rdma_stop_listening();
677 stop_sessions();
678 mutex_unlock(&init_lock);
679 }
680