xref: /linux/fs/smb/server/ksmbd_work.c (revision 3571e8b091f4270d869dda7a6cc43616c6ad6897)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2019 Samsung Electronics Co., Ltd.
4  */
5 
6 #include <linux/list.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 
11 #include "server.h"
12 #include "connection.h"
13 #include "ksmbd_work.h"
14 #include "mgmt/ksmbd_ida.h"
15 
16 static struct kmem_cache *work_cache;
17 static struct workqueue_struct *ksmbd_wq;
18 
ksmbd_alloc_work_struct(void)19 struct ksmbd_work *ksmbd_alloc_work_struct(void)
20 {
21 	struct ksmbd_work *work = kmem_cache_zalloc(work_cache, KSMBD_DEFAULT_GFP);
22 
23 	if (work) {
24 		work->compound_fid = KSMBD_NO_FID;
25 		work->compound_pfid = KSMBD_NO_FID;
26 		INIT_LIST_HEAD(&work->request_entry);
27 		INIT_LIST_HEAD(&work->async_request_entry);
28 		INIT_LIST_HEAD(&work->fp_entry);
29 		INIT_LIST_HEAD(&work->aux_read_list);
30 		work->iov_alloc_cnt = 4;
31 		work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
32 				    KSMBD_DEFAULT_GFP);
33 		if (!work->iov) {
34 			kmem_cache_free(work_cache, work);
35 			work = NULL;
36 		}
37 	}
38 	return work;
39 }
40 
ksmbd_free_work_struct(struct ksmbd_work * work)41 void ksmbd_free_work_struct(struct ksmbd_work *work)
42 {
43 	struct aux_read *ar, *tmp;
44 
45 	WARN_ON(work->saved_cred != NULL);
46 
47 	kvfree(work->response_buf);
48 
49 	list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
50 		kvfree(ar->buf);
51 		list_del(&ar->entry);
52 		kfree(ar);
53 	}
54 
55 	kfree(work->tr_buf);
56 	kvfree(work->request_buf);
57 	kfree(work->iov);
58 
59 	if (work->async_id)
60 		ksmbd_release_id(&work->conn->async_ida, work->async_id);
61 	kmem_cache_free(work_cache, work);
62 }
63 
ksmbd_work_pool_destroy(void)64 void ksmbd_work_pool_destroy(void)
65 {
66 	kmem_cache_destroy(work_cache);
67 }
68 
ksmbd_work_pool_init(void)69 int ksmbd_work_pool_init(void)
70 {
71 	work_cache = kmem_cache_create("ksmbd_work_cache",
72 				       sizeof(struct ksmbd_work), 0,
73 				       SLAB_HWCACHE_ALIGN, NULL);
74 	if (!work_cache)
75 		return -ENOMEM;
76 	return 0;
77 }
78 
ksmbd_workqueue_init(void)79 int ksmbd_workqueue_init(void)
80 {
81 	ksmbd_wq = alloc_workqueue("ksmbd-io", 0, 0);
82 	if (!ksmbd_wq)
83 		return -ENOMEM;
84 	return 0;
85 }
86 
ksmbd_workqueue_destroy(void)87 void ksmbd_workqueue_destroy(void)
88 {
89 	destroy_workqueue(ksmbd_wq);
90 	ksmbd_wq = NULL;
91 }
92 
ksmbd_queue_work(struct ksmbd_work * work)93 bool ksmbd_queue_work(struct ksmbd_work *work)
94 {
95 	return queue_work(ksmbd_wq, &work->work);
96 }
97 
__ksmbd_iov_pin(struct ksmbd_work * work,void * ib,unsigned int ib_len)98 static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
99 				   unsigned int ib_len)
100 {
101 	work->iov[++work->iov_idx].iov_base = ib;
102 	work->iov[work->iov_idx].iov_len = ib_len;
103 	work->iov_cnt++;
104 }
105 
__ksmbd_iov_pin_rsp(struct ksmbd_work * work,void * ib,int len,void * aux_buf,unsigned int aux_size)106 static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
107 			       void *aux_buf, unsigned int aux_size)
108 {
109 	struct aux_read *ar = NULL;
110 	int need_iov_cnt = 1;
111 
112 	if (aux_size) {
113 		need_iov_cnt++;
114 		ar = kmalloc(sizeof(struct aux_read), KSMBD_DEFAULT_GFP);
115 		if (!ar)
116 			return -ENOMEM;
117 	}
118 
119 	if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
120 		struct kvec *new;
121 
122 		work->iov_alloc_cnt += 4;
123 		new = krealloc(work->iov,
124 			       sizeof(struct kvec) * work->iov_alloc_cnt,
125 			       KSMBD_DEFAULT_GFP | __GFP_ZERO);
126 		if (!new) {
127 			kfree(ar);
128 			work->iov_alloc_cnt -= 4;
129 			return -ENOMEM;
130 		}
131 		work->iov = new;
132 	}
133 
134 	/* Plus rfc_length size on first iov */
135 	if (!work->iov_idx) {
136 		work->iov[work->iov_idx].iov_base = work->response_buf;
137 		*(__be32 *)work->iov[0].iov_base = 0;
138 		work->iov[work->iov_idx].iov_len = 4;
139 		work->iov_cnt++;
140 	}
141 
142 	__ksmbd_iov_pin(work, ib, len);
143 	inc_rfc1001_len(work->iov[0].iov_base, len);
144 
145 	if (aux_size) {
146 		__ksmbd_iov_pin(work, aux_buf, aux_size);
147 		inc_rfc1001_len(work->iov[0].iov_base, aux_size);
148 
149 		ar->buf = aux_buf;
150 		list_add(&ar->entry, &work->aux_read_list);
151 	}
152 
153 	return 0;
154 }
155 
ksmbd_iov_pin_rsp(struct ksmbd_work * work,void * ib,int len)156 int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
157 {
158 	return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
159 }
160 
ksmbd_iov_pin_rsp_read(struct ksmbd_work * work,void * ib,int len,void * aux_buf,unsigned int aux_size)161 int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
162 			   void *aux_buf, unsigned int aux_size)
163 {
164 	return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
165 }
166 
allocate_interim_rsp_buf(struct ksmbd_work * work)167 int allocate_interim_rsp_buf(struct ksmbd_work *work)
168 {
169 	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, KSMBD_DEFAULT_GFP);
170 	if (!work->response_buf)
171 		return -ENOMEM;
172 	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
173 	return 0;
174 }
175