1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * (C) 2001 Clemson University and The University of Chicago
4 *
5 * See COPYING in top-level directory.
6 */
7 #include "protocol.h"
8 #include "orangefs-kernel.h"
9 #include "orangefs-bufmap.h"
10
11 struct slot_map {
12 int c;
13 wait_queue_head_t q;
14 int count;
15 unsigned long *map;
16 };
17
18 static struct slot_map rw_map = {
19 .c = -1,
20 .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
21 };
22 static struct slot_map readdir_map = {
23 .c = -1,
24 .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
25 };
26
27
install(struct slot_map * m,int count,unsigned long * map)28 static void install(struct slot_map *m, int count, unsigned long *map)
29 {
30 spin_lock(&m->q.lock);
31 m->c = m->count = count;
32 m->map = map;
33 wake_up_all_locked(&m->q);
34 spin_unlock(&m->q.lock);
35 }
36
mark_killed(struct slot_map * m)37 static void mark_killed(struct slot_map *m)
38 {
39 spin_lock(&m->q.lock);
40 m->c -= m->count + 1;
41 spin_unlock(&m->q.lock);
42 }
43
run_down(struct slot_map * m)44 static void run_down(struct slot_map *m)
45 {
46 DEFINE_WAIT(wait);
47 spin_lock(&m->q.lock);
48 if (m->c != -1) {
49 for (;;) {
50 if (likely(list_empty(&wait.entry)))
51 __add_wait_queue_entry_tail(&m->q, &wait);
52 set_current_state(TASK_UNINTERRUPTIBLE);
53
54 if (m->c == -1)
55 break;
56
57 spin_unlock(&m->q.lock);
58 schedule();
59 spin_lock(&m->q.lock);
60 }
61 __remove_wait_queue(&m->q, &wait);
62 __set_current_state(TASK_RUNNING);
63 }
64 m->map = NULL;
65 spin_unlock(&m->q.lock);
66 }
67
put(struct slot_map * m,int slot)68 static void put(struct slot_map *m, int slot)
69 {
70 int v;
71 spin_lock(&m->q.lock);
72 __clear_bit(slot, m->map);
73 v = ++m->c;
74 if (v > 0)
75 wake_up_locked(&m->q);
76 if (unlikely(v == -1)) /* finished dying */
77 wake_up_all_locked(&m->q);
78 spin_unlock(&m->q.lock);
79 }
80
wait_for_free(struct slot_map * m)81 static int wait_for_free(struct slot_map *m)
82 {
83 long left = slot_timeout_secs * HZ;
84 DEFINE_WAIT(wait);
85
86 do {
87 long n = left, t;
88 if (likely(list_empty(&wait.entry)))
89 __add_wait_queue_entry_tail_exclusive(&m->q, &wait);
90 set_current_state(TASK_INTERRUPTIBLE);
91
92 if (m->c > 0)
93 break;
94
95 if (m->c < 0) {
96 /* we are waiting for map to be installed */
97 /* it would better be there soon, or we go away */
98 if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
99 n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
100 }
101 spin_unlock(&m->q.lock);
102 t = schedule_timeout(n);
103 spin_lock(&m->q.lock);
104 if (unlikely(!t) && n != left && m->c < 0)
105 left = t;
106 else
107 left = t + (left - n);
108 if (signal_pending(current))
109 left = -EINTR;
110 } while (left > 0);
111
112 if (!list_empty(&wait.entry))
113 list_del(&wait.entry);
114 else if (left <= 0 && waitqueue_active(&m->q))
115 __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
116 __set_current_state(TASK_RUNNING);
117
118 if (likely(left > 0))
119 return 0;
120
121 return left < 0 ? -EINTR : -ETIMEDOUT;
122 }
123
get(struct slot_map * m)124 static int get(struct slot_map *m)
125 {
126 int res = 0;
127 spin_lock(&m->q.lock);
128 if (unlikely(m->c <= 0))
129 res = wait_for_free(m);
130 if (likely(!res)) {
131 m->c--;
132 res = find_first_zero_bit(m->map, m->count);
133 __set_bit(res, m->map);
134 }
135 spin_unlock(&m->q.lock);
136 return res;
137 }
138
139 /* used to describe mapped buffers */
140 struct orangefs_bufmap_desc {
141 void __user *uaddr; /* user space address pointer */
142 struct page **page_array; /* array of mapped pages */
143 int array_count; /* size of above arrays */
144 struct list_head list_link;
145 };
146
147 static struct orangefs_bufmap {
148 int desc_size;
149 int desc_shift;
150 int desc_count;
151 int total_size;
152 int page_count;
153
154 struct page **page_array;
155 struct orangefs_bufmap_desc *desc_array;
156
157 /* array to track usage of buffer descriptors */
158 unsigned long *buffer_index_array;
159
160 /* array to track usage of buffer descriptors for readdir */
161 #define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
162 unsigned long readdir_index_array[N];
163 #undef N
164 } *__orangefs_bufmap;
165
166 static DEFINE_SPINLOCK(orangefs_bufmap_lock);
167
168 static void
orangefs_bufmap_unmap(struct orangefs_bufmap * bufmap)169 orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
170 {
171 unpin_user_pages(bufmap->page_array, bufmap->page_count);
172 }
173
174 static void
orangefs_bufmap_free(struct orangefs_bufmap * bufmap)175 orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
176 {
177 kfree(bufmap->page_array);
178 kfree(bufmap->desc_array);
179 bitmap_free(bufmap->buffer_index_array);
180 kfree(bufmap);
181 }
182
183 /*
184 * XXX: Can the size and shift change while the caller gives up the
185 * XXX: lock between calling this and doing something useful?
186 */
187
orangefs_bufmap_size_query(void)188 int orangefs_bufmap_size_query(void)
189 {
190 struct orangefs_bufmap *bufmap;
191 int size = 0;
192 spin_lock(&orangefs_bufmap_lock);
193 bufmap = __orangefs_bufmap;
194 if (bufmap)
195 size = bufmap->desc_size;
196 spin_unlock(&orangefs_bufmap_lock);
197 return size;
198 }
199
200 static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
201 static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
202
203 static struct orangefs_bufmap *
orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc * user_desc)204 orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
205 {
206 struct orangefs_bufmap *bufmap;
207
208 bufmap = kzalloc_obj(*bufmap);
209 if (!bufmap)
210 goto out;
211
212 bufmap->total_size = user_desc->total_size;
213 bufmap->desc_count = user_desc->count;
214 bufmap->desc_size = user_desc->size;
215 bufmap->desc_shift = ilog2(bufmap->desc_size);
216
217 bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
218 if (!bufmap->buffer_index_array)
219 goto out_free_bufmap;
220
221 bufmap->desc_array =
222 kzalloc_objs(struct orangefs_bufmap_desc, bufmap->desc_count);
223 if (!bufmap->desc_array)
224 goto out_free_index_array;
225
226 bufmap->page_count = bufmap->total_size / PAGE_SIZE;
227
228 /* allocate storage to track our page mappings */
229 bufmap->page_array =
230 kzalloc_objs(struct page *, bufmap->page_count);
231 if (!bufmap->page_array)
232 goto out_free_desc_array;
233
234 return bufmap;
235
236 out_free_desc_array:
237 kfree(bufmap->desc_array);
238 out_free_index_array:
239 bitmap_free(bufmap->buffer_index_array);
240 out_free_bufmap:
241 kfree(bufmap);
242 out:
243 return NULL;
244 }
245
246 static int
orangefs_bufmap_map(struct orangefs_bufmap * bufmap,struct ORANGEFS_dev_map_desc * user_desc)247 orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
248 struct ORANGEFS_dev_map_desc *user_desc)
249 {
250 int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
251 int offset = 0, ret, i;
252
253 /* map the pages */
254 ret = pin_user_pages_fast((unsigned long)user_desc->ptr,
255 bufmap->page_count, FOLL_WRITE, bufmap->page_array);
256
257 if (ret < 0)
258 return ret;
259
260 if (ret != bufmap->page_count) {
261 gossip_err("orangefs error: asked for %d pages, only got %d.\n",
262 bufmap->page_count, ret);
263
264 for (i = 0; i < ret; i++)
265 unpin_user_page(bufmap->page_array[i]);
266 return -ENOMEM;
267 }
268
269 /*
270 * ideally we want to get kernel space pointers for each page, but
271 * we can't kmap that many pages at once if highmem is being used.
272 * so instead, we just kmap/kunmap the page address each time the
273 * kaddr is needed.
274 */
275 for (i = 0; i < bufmap->page_count; i++)
276 flush_dcache_page(bufmap->page_array[i]);
277
278 /* build a list of available descriptors */
279 for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
280 bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
281 bufmap->desc_array[i].array_count = pages_per_desc;
282 bufmap->desc_array[i].uaddr =
283 (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
284 offset += pages_per_desc;
285 }
286
287 return 0;
288 }
289
290 /*
291 * orangefs_bufmap_initialize()
292 *
293 * initializes the mapped buffer interface
294 *
295 * returns 0 on success, -errno on failure
296 */
orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc * user_desc)297 int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
298 {
299 struct orangefs_bufmap *bufmap;
300 int ret = -EINVAL;
301
302 gossip_debug(GOSSIP_BUFMAP_DEBUG,
303 "orangefs_bufmap_initialize: called (ptr ("
304 "%p) sz (%d) cnt(%d).\n",
305 user_desc->ptr,
306 user_desc->size,
307 user_desc->count);
308
309 if (user_desc->total_size < 0 ||
310 user_desc->size < 0 ||
311 user_desc->count < 0)
312 goto out;
313
314 /*
315 * sanity check alignment and size of buffer that caller wants to
316 * work with
317 */
318 if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
319 (unsigned long)user_desc->ptr) {
320 gossip_err("orangefs error: memory alignment (front). %p\n",
321 user_desc->ptr);
322 goto out;
323 }
324
325 if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
326 != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
327 gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
328 user_desc->ptr,
329 user_desc->total_size);
330 goto out;
331 }
332
333 if (user_desc->total_size != (user_desc->size * user_desc->count)) {
334 gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
335 user_desc->total_size,
336 user_desc->size,
337 user_desc->count);
338 goto out;
339 }
340
341 if ((user_desc->size % PAGE_SIZE) != 0) {
342 gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
343 user_desc->size);
344 goto out;
345 }
346
347 ret = -ENOMEM;
348 bufmap = orangefs_bufmap_alloc(user_desc);
349 if (!bufmap)
350 goto out;
351
352 ret = orangefs_bufmap_map(bufmap, user_desc);
353 if (ret)
354 goto out_free_bufmap;
355
356
357 spin_lock(&orangefs_bufmap_lock);
358 if (__orangefs_bufmap) {
359 spin_unlock(&orangefs_bufmap_lock);
360 gossip_err("orangefs: error: bufmap already initialized.\n");
361 ret = -EINVAL;
362 goto out_unmap_bufmap;
363 }
364 __orangefs_bufmap = bufmap;
365 install(&rw_map,
366 bufmap->desc_count,
367 bufmap->buffer_index_array);
368 install(&readdir_map,
369 ORANGEFS_READDIR_DEFAULT_DESC_COUNT,
370 bufmap->readdir_index_array);
371 spin_unlock(&orangefs_bufmap_lock);
372
373 gossip_debug(GOSSIP_BUFMAP_DEBUG,
374 "orangefs_bufmap_initialize: exiting normally\n");
375 return 0;
376
377 out_unmap_bufmap:
378 orangefs_bufmap_unmap(bufmap);
379 out_free_bufmap:
380 orangefs_bufmap_free(bufmap);
381 out:
382 return ret;
383 }
384
385 /*
386 * orangefs_bufmap_finalize()
387 *
388 * shuts down the mapped buffer interface and releases any resources
389 * associated with it
390 *
391 * no return value
392 */
orangefs_bufmap_finalize(void)393 void orangefs_bufmap_finalize(void)
394 {
395 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
396 if (!bufmap)
397 return;
398 gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
399 mark_killed(&rw_map);
400 mark_killed(&readdir_map);
401 gossip_debug(GOSSIP_BUFMAP_DEBUG,
402 "orangefs_bufmap_finalize: exiting normally\n");
403 }
404
orangefs_bufmap_run_down(void)405 void orangefs_bufmap_run_down(void)
406 {
407 struct orangefs_bufmap *bufmap = __orangefs_bufmap;
408 if (!bufmap)
409 return;
410 run_down(&rw_map);
411 run_down(&readdir_map);
412 spin_lock(&orangefs_bufmap_lock);
413 __orangefs_bufmap = NULL;
414 spin_unlock(&orangefs_bufmap_lock);
415 orangefs_bufmap_unmap(bufmap);
416 orangefs_bufmap_free(bufmap);
417 }
418
419 /*
420 * orangefs_bufmap_get()
421 *
422 * gets a free mapped buffer descriptor, will sleep until one becomes
423 * available if necessary
424 *
425 * returns slot on success, -errno on failure
426 */
orangefs_bufmap_get(void)427 int orangefs_bufmap_get(void)
428 {
429 return get(&rw_map);
430 }
431
432 /*
433 * orangefs_bufmap_put()
434 *
435 * returns a mapped buffer descriptor to the collection
436 *
437 * no return value
438 */
orangefs_bufmap_put(int buffer_index)439 void orangefs_bufmap_put(int buffer_index)
440 {
441 put(&rw_map, buffer_index);
442 }
443
444 /*
445 * orangefs_readdir_index_get()
446 *
447 * gets a free descriptor, will sleep until one becomes
448 * available if necessary.
449 * Although the readdir buffers are not mapped into kernel space
450 * we could do that at a later point of time. Regardless, these
451 * indices are used by the client-core.
452 *
453 * returns slot on success, -errno on failure
454 */
orangefs_readdir_index_get(void)455 int orangefs_readdir_index_get(void)
456 {
457 return get(&readdir_map);
458 }
459
orangefs_readdir_index_put(int buffer_index)460 void orangefs_readdir_index_put(int buffer_index)
461 {
462 put(&readdir_map, buffer_index);
463 }
464
465 /*
466 * we've been handed an iovec, we need to copy it to
467 * the shared memory descriptor at "buffer_index".
468 */
orangefs_bufmap_copy_from_iovec(struct iov_iter * iter,int buffer_index,size_t size)469 int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
470 int buffer_index,
471 size_t size)
472 {
473 struct orangefs_bufmap_desc *to;
474 int i;
475
476 gossip_debug(GOSSIP_BUFMAP_DEBUG,
477 "%s: buffer_index:%d: size:%zu:\n",
478 __func__, buffer_index, size);
479
480 to = &__orangefs_bufmap->desc_array[buffer_index];
481 for (i = 0; size; i++) {
482 struct page *page = to->page_array[i];
483 size_t n = size;
484 if (n > PAGE_SIZE)
485 n = PAGE_SIZE;
486 if (copy_page_from_iter(page, 0, n, iter) != n)
487 return -EFAULT;
488 size -= n;
489 }
490 return 0;
491 }
492
493 /*
494 * we've been handed an iovec, we need to fill it from
495 * the shared memory descriptor at "buffer_index".
496 */
orangefs_bufmap_copy_to_iovec(struct iov_iter * iter,int buffer_index,size_t size)497 int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
498 int buffer_index,
499 size_t size)
500 {
501 struct orangefs_bufmap_desc *from;
502 int i;
503
504 from = &__orangefs_bufmap->desc_array[buffer_index];
505 gossip_debug(GOSSIP_BUFMAP_DEBUG,
506 "%s: buffer_index:%d: size:%zu:\n",
507 __func__, buffer_index, size);
508
509
510 for (i = 0; size; i++) {
511 struct page *page = from->page_array[i];
512 size_t n = size;
513 if (n > PAGE_SIZE)
514 n = PAGE_SIZE;
515 n = copy_page_to_iter(page, 0, n, iter);
516 if (!n)
517 return -EFAULT;
518 size -= n;
519 }
520 return 0;
521 }
522