1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * comedi_buf.c
4 *
5 * COMEDI - Linux Control and Measurement Device Interface
6 * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
7 * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8 */
9
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/comedi/comedidev.h>
13 #include "comedi_internal.h"
14
15 #ifdef PAGE_KERNEL_NOCACHE
16 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL_NOCACHE
17 #else
18 #define COMEDI_PAGE_PROTECTION PAGE_KERNEL
19 #endif
20
comedi_buf_map_kref_release(struct kref * kref)21 static void comedi_buf_map_kref_release(struct kref *kref)
22 {
23 struct comedi_buf_map *bm =
24 container_of(kref, struct comedi_buf_map, refcount);
25 struct comedi_buf_page *buf;
26 unsigned int i;
27
28 if (bm->page_list) {
29 if (bm->dma_dir != DMA_NONE) {
30 for (i = 0; i < bm->n_pages; i++) {
31 buf = &bm->page_list[i];
32 dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
33 buf->virt_addr,
34 buf->dma_addr);
35 }
36 } else {
37 for (i = 0; i < bm->n_pages; i++) {
38 buf = &bm->page_list[i];
39 ClearPageReserved(virt_to_page(buf->virt_addr));
40 free_page((unsigned long)buf->virt_addr);
41 }
42 }
43 vfree(bm->page_list);
44 }
45 if (bm->dma_dir != DMA_NONE)
46 put_device(bm->dma_hw_dev);
47 kfree(bm);
48 }
49
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)50 static void __comedi_buf_free(struct comedi_device *dev,
51 struct comedi_subdevice *s)
52 {
53 struct comedi_async *async = s->async;
54 struct comedi_buf_map *bm;
55 unsigned long flags;
56
57 async->prealloc_bufsz = 0;
58 spin_lock_irqsave(&s->spin_lock, flags);
59 bm = async->buf_map;
60 async->buf_map = NULL;
61 spin_unlock_irqrestore(&s->spin_lock, flags);
62 comedi_buf_map_put(bm);
63 }
64
65 static struct comedi_buf_map *
comedi_buf_map_alloc(struct comedi_device * dev,enum dma_data_direction dma_dir,unsigned int n_pages)66 comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
67 unsigned int n_pages)
68 {
69 struct comedi_buf_map *bm;
70 struct comedi_buf_page *buf;
71 unsigned int i;
72
73 bm = kzalloc(sizeof(*bm), GFP_KERNEL);
74 if (!bm)
75 return NULL;
76
77 kref_init(&bm->refcount);
78 bm->dma_dir = dma_dir;
79 if (bm->dma_dir != DMA_NONE) {
80 /* Need ref to hardware device to free buffer later. */
81 bm->dma_hw_dev = get_device(dev->hw_dev);
82 }
83
84 bm->page_list = vzalloc(sizeof(*buf) * n_pages);
85 if (!bm->page_list)
86 goto err;
87
88 if (bm->dma_dir != DMA_NONE) {
89 for (i = 0; i < n_pages; i++) {
90 buf = &bm->page_list[i];
91 buf->virt_addr =
92 dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
93 &buf->dma_addr, GFP_KERNEL);
94 if (!buf->virt_addr)
95 break;
96 }
97 } else {
98 for (i = 0; i < n_pages; i++) {
99 buf = &bm->page_list[i];
100 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
101 if (!buf->virt_addr)
102 break;
103
104 SetPageReserved(virt_to_page(buf->virt_addr));
105 }
106 }
107 bm->n_pages = i;
108 if (i < n_pages)
109 goto err;
110
111 return bm;
112
113 err:
114 comedi_buf_map_put(bm);
115 return NULL;
116 }
117
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned int n_pages)118 static void __comedi_buf_alloc(struct comedi_device *dev,
119 struct comedi_subdevice *s,
120 unsigned int n_pages)
121 {
122 struct comedi_async *async = s->async;
123 struct comedi_buf_map *bm;
124 unsigned long flags;
125
126 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
127 dev_err(dev->class_dev,
128 "dma buffer allocation not supported\n");
129 return;
130 }
131
132 bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
133 if (!bm)
134 return;
135
136 spin_lock_irqsave(&s->spin_lock, flags);
137 async->buf_map = bm;
138 spin_unlock_irqrestore(&s->spin_lock, flags);
139 async->prealloc_bufsz = n_pages << PAGE_SHIFT;
140 }
141
comedi_buf_map_get(struct comedi_buf_map * bm)142 void comedi_buf_map_get(struct comedi_buf_map *bm)
143 {
144 if (bm)
145 kref_get(&bm->refcount);
146 }
147
comedi_buf_map_put(struct comedi_buf_map * bm)148 int comedi_buf_map_put(struct comedi_buf_map *bm)
149 {
150 if (bm)
151 return kref_put(&bm->refcount, comedi_buf_map_kref_release);
152 return 1;
153 }
154
155 /* helper for "access" vm operation */
comedi_buf_map_access(struct comedi_buf_map * bm,unsigned long offset,void * buf,int len,int write)156 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
157 void *buf, int len, int write)
158 {
159 unsigned int pgoff = offset_in_page(offset);
160 unsigned long pg = offset >> PAGE_SHIFT;
161 int done = 0;
162
163 while (done < len && pg < bm->n_pages) {
164 int l = min_t(int, len - done, PAGE_SIZE - pgoff);
165 void *b = bm->page_list[pg].virt_addr + pgoff;
166
167 if (write)
168 memcpy(b, buf, l);
169 else
170 memcpy(buf, b, l);
171 buf += l;
172 done += l;
173 pg++;
174 pgoff = 0;
175 }
176 return done;
177 }
178
179 /* returns s->async->buf_map and increments its kref refcount */
180 struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)181 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
182 {
183 struct comedi_async *async = s->async;
184 struct comedi_buf_map *bm = NULL;
185 unsigned long flags;
186
187 if (!async)
188 return NULL;
189
190 spin_lock_irqsave(&s->spin_lock, flags);
191 bm = async->buf_map;
192 /* only want it if buffer pages allocated */
193 if (bm && bm->n_pages)
194 comedi_buf_map_get(bm);
195 else
196 bm = NULL;
197 spin_unlock_irqrestore(&s->spin_lock, flags);
198
199 return bm;
200 }
201
comedi_buf_is_mmapped(struct comedi_subdevice * s)202 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
203 {
204 struct comedi_buf_map *bm = s->async->buf_map;
205
206 return bm && (kref_read(&bm->refcount) > 1);
207 }
208
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)209 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
210 unsigned long new_size)
211 {
212 struct comedi_async *async = s->async;
213
214 lockdep_assert_held(&dev->mutex);
215
216 /* Round up new_size to multiple of PAGE_SIZE */
217 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
218
219 /* if no change is required, do nothing */
220 if (async->prealloc_bufsz == new_size)
221 return 0;
222
223 /* deallocate old buffer */
224 __comedi_buf_free(dev, s);
225
226 /* allocate new buffer */
227 if (new_size) {
228 unsigned int n_pages = new_size >> PAGE_SHIFT;
229
230 __comedi_buf_alloc(dev, s, n_pages);
231 if (!async->prealloc_bufsz)
232 return -ENOMEM;
233 }
234
235 return 0;
236 }
237
comedi_buf_reset(struct comedi_subdevice * s)238 void comedi_buf_reset(struct comedi_subdevice *s)
239 {
240 struct comedi_async *async = s->async;
241
242 async->buf_write_alloc_count = 0;
243 async->buf_write_count = 0;
244 async->buf_read_alloc_count = 0;
245 async->buf_read_count = 0;
246
247 async->buf_write_ptr = 0;
248 async->buf_read_ptr = 0;
249
250 async->cur_chan = 0;
251 async->scans_done = 0;
252 async->scan_progress = 0;
253 async->munge_chan = 0;
254 async->munge_count = 0;
255 async->munge_ptr = 0;
256
257 async->events = 0;
258 }
259
comedi_buf_write_n_unalloc(struct comedi_subdevice * s)260 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
261 {
262 struct comedi_async *async = s->async;
263 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
264
265 return free_end - async->buf_write_alloc_count;
266 }
267
comedi_buf_write_n_available(struct comedi_subdevice * s)268 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
269 {
270 struct comedi_async *async = s->async;
271 unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
272
273 return free_end - async->buf_write_count;
274 }
275
276 /**
277 * comedi_buf_write_alloc() - Reserve buffer space for writing
278 * @s: COMEDI subdevice.
279 * @nbytes: Maximum space to reserve in bytes.
280 *
281 * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
282 * data buffer associated with the subdevice. The amount reserved is limited
283 * by the space available.
284 *
285 * Return: The amount of space reserved in bytes.
286 */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)287 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
288 unsigned int nbytes)
289 {
290 struct comedi_async *async = s->async;
291 unsigned int unalloc = comedi_buf_write_n_unalloc(s);
292
293 if (nbytes > unalloc)
294 nbytes = unalloc;
295
296 async->buf_write_alloc_count += nbytes;
297
298 /*
299 * ensure the async buffer 'counts' are read and updated
300 * before we write data to the write-alloc'ed buffer space
301 */
302 smp_mb();
303
304 return nbytes;
305 }
306 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
307
308 /*
309 * munging is applied to data by core as it passes between user
310 * and kernel space
311 */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)312 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
313 unsigned int num_bytes)
314 {
315 struct comedi_async *async = s->async;
316 struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
317 unsigned int count = 0;
318 const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
319
320 if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
321 async->munge_count += num_bytes;
322 return num_bytes;
323 }
324
325 /* don't munge partial samples */
326 num_bytes -= num_bytes % num_sample_bytes;
327 while (count < num_bytes) {
328 /*
329 * Do not munge beyond page boundary.
330 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
331 */
332 unsigned int page = async->munge_ptr >> PAGE_SHIFT;
333 unsigned int offset = offset_in_page(async->munge_ptr);
334 unsigned int block_size =
335 min(num_bytes - count, PAGE_SIZE - offset);
336
337 s->munge(s->device, s, buf_page_list[page].virt_addr + offset,
338 block_size, async->munge_chan);
339
340 /*
341 * ensure data is munged in buffer before the
342 * async buffer munge_count is incremented
343 */
344 smp_wmb();
345
346 async->munge_chan += block_size / num_sample_bytes;
347 async->munge_chan %= async->cmd.chanlist_len;
348 async->munge_count += block_size;
349 async->munge_ptr += block_size;
350 if (async->munge_ptr == async->prealloc_bufsz)
351 async->munge_ptr = 0;
352 count += block_size;
353 }
354
355 return count;
356 }
357
comedi_buf_write_n_allocated(struct comedi_subdevice * s)358 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
359 {
360 struct comedi_async *async = s->async;
361
362 return async->buf_write_alloc_count - async->buf_write_count;
363 }
364
365 /**
366 * comedi_buf_write_free() - Free buffer space after it is written
367 * @s: COMEDI subdevice.
368 * @nbytes: Maximum space to free in bytes.
369 *
370 * Free up to @nbytes bytes of space previously reserved for writing in the
371 * COMEDI acquisition data buffer associated with the subdevice. The amount of
372 * space freed is limited to the amount that was reserved. The freed space is
373 * assumed to have been filled with sample data by the writer.
374 *
375 * If the samples in the freed space need to be "munged", do so here. The
376 * freed space becomes available for allocation by the reader.
377 *
378 * Return: The amount of space freed in bytes.
379 */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)380 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
381 unsigned int nbytes)
382 {
383 struct comedi_async *async = s->async;
384 unsigned int allocated = comedi_buf_write_n_allocated(s);
385
386 if (nbytes > allocated)
387 nbytes = allocated;
388
389 async->buf_write_count += nbytes;
390 async->buf_write_ptr += nbytes;
391 comedi_buf_munge(s, async->buf_write_count - async->munge_count);
392 if (async->buf_write_ptr >= async->prealloc_bufsz)
393 async->buf_write_ptr %= async->prealloc_bufsz;
394
395 return nbytes;
396 }
397 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
398
399 /**
400 * comedi_buf_read_n_available() - Determine amount of readable buffer space
401 * @s: COMEDI subdevice.
402 *
403 * Determine the amount of readable buffer space in the COMEDI acquisition data
404 * buffer associated with the subdevice. The readable buffer space is that
405 * which has been freed by the writer and "munged" to the sample data format
406 * expected by COMEDI if necessary.
407 *
408 * Return: The amount of readable buffer space.
409 */
comedi_buf_read_n_available(struct comedi_subdevice * s)410 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
411 {
412 struct comedi_async *async = s->async;
413 unsigned int num_bytes;
414
415 if (!async)
416 return 0;
417
418 num_bytes = async->munge_count - async->buf_read_count;
419
420 /*
421 * ensure the async buffer 'counts' are read before we
422 * attempt to read data from the buffer
423 */
424 smp_rmb();
425
426 return num_bytes;
427 }
428 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
429
430 /**
431 * comedi_buf_read_alloc() - Reserve buffer space for reading
432 * @s: COMEDI subdevice.
433 * @nbytes: Maximum space to reserve in bytes.
434 *
435 * Reserve up to @nbytes bytes of previously written and "munged" buffer space
436 * for reading in the COMEDI acquisition data buffer associated with the
437 * subdevice. The amount reserved is limited to the space available. The
438 * reader can read from the reserved space and then free it. A reader is also
439 * allowed to read from the space before reserving it as long as it determines
440 * the amount of readable data available, but the space needs to be marked as
441 * reserved before it can be freed.
442 *
443 * Return: The amount of space reserved in bytes.
444 */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)445 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
446 unsigned int nbytes)
447 {
448 struct comedi_async *async = s->async;
449 unsigned int available;
450
451 available = async->munge_count - async->buf_read_alloc_count;
452 if (nbytes > available)
453 nbytes = available;
454
455 async->buf_read_alloc_count += nbytes;
456
457 /*
458 * ensure the async buffer 'counts' are read before we
459 * attempt to read data from the read-alloc'ed buffer space
460 */
461 smp_rmb();
462
463 return nbytes;
464 }
465 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
466
comedi_buf_read_n_allocated(struct comedi_async * async)467 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
468 {
469 return async->buf_read_alloc_count - async->buf_read_count;
470 }
471
472 /**
473 * comedi_buf_read_free() - Free buffer space after it has been read
474 * @s: COMEDI subdevice.
475 * @nbytes: Maximum space to free in bytes.
476 *
477 * Free up to @nbytes bytes of buffer space previously reserved for reading in
478 * the COMEDI acquisition data buffer associated with the subdevice. The
479 * amount of space freed is limited to the amount that was reserved.
480 *
481 * The freed space becomes available for allocation by the writer.
482 *
483 * Return: The amount of space freed in bytes.
484 */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)485 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
486 unsigned int nbytes)
487 {
488 struct comedi_async *async = s->async;
489 unsigned int allocated;
490
491 /*
492 * ensure data has been read out of buffer before
493 * the async read count is incremented
494 */
495 smp_mb();
496
497 allocated = comedi_buf_read_n_allocated(async);
498 if (nbytes > allocated)
499 nbytes = allocated;
500
501 async->buf_read_count += nbytes;
502 async->buf_read_ptr += nbytes;
503 async->buf_read_ptr %= async->prealloc_bufsz;
504 return nbytes;
505 }
506 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
507
comedi_buf_memcpy_to(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)508 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
509 const void *data, unsigned int num_bytes)
510 {
511 struct comedi_async *async = s->async;
512 struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
513 unsigned int write_ptr = async->buf_write_ptr;
514
515 while (num_bytes) {
516 /*
517 * Do not copy beyond page boundary.
518 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
519 */
520 unsigned int page = write_ptr >> PAGE_SHIFT;
521 unsigned int offset = offset_in_page(write_ptr);
522 unsigned int block_size = min(num_bytes, PAGE_SIZE - offset);
523
524 memcpy(buf_page_list[page].virt_addr + offset,
525 data, block_size);
526
527 data += block_size;
528 num_bytes -= block_size;
529 write_ptr += block_size;
530 if (write_ptr == async->prealloc_bufsz)
531 write_ptr = 0;
532 }
533 }
534
comedi_buf_memcpy_from(struct comedi_subdevice * s,void * dest,unsigned int nbytes)535 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
536 void *dest, unsigned int nbytes)
537 {
538 struct comedi_async *async = s->async;
539 struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
540 unsigned int read_ptr = async->buf_read_ptr;
541
542 while (nbytes) {
543 /*
544 * Do not copy beyond page boundary.
545 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
546 */
547 unsigned int page = read_ptr >> PAGE_SHIFT;
548 unsigned int offset = offset_in_page(read_ptr);
549 unsigned int block_size = min(nbytes, PAGE_SIZE - offset);
550
551 memcpy(dest, buf_page_list[page].virt_addr + offset,
552 block_size);
553 nbytes -= block_size;
554 dest += block_size;
555 read_ptr += block_size;
556 if (read_ptr == async->prealloc_bufsz)
557 read_ptr = 0;
558 }
559 }
560
561 /**
562 * comedi_buf_write_samples() - Write sample data to COMEDI buffer
563 * @s: COMEDI subdevice.
564 * @data: Pointer to source samples.
565 * @nsamples: Number of samples to write.
566 *
567 * Write up to @nsamples samples to the COMEDI acquisition data buffer
568 * associated with the subdevice, mark it as written and update the
569 * acquisition scan progress. If there is not enough room for the specified
570 * number of samples, the number of samples written is limited to the number
571 * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
572 * acquisition to terminate with an overrun error. Set the %COMEDI_CB_BLOCK
573 * event flag if any samples are written to cause waiting tasks to be woken
574 * when the event flags are processed.
575 *
576 * Return: The amount of data written in bytes.
577 */
comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)578 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
579 const void *data, unsigned int nsamples)
580 {
581 unsigned int max_samples;
582 unsigned int nbytes;
583
584 /*
585 * Make sure there is enough room in the buffer for all the samples.
586 * If not, clamp the nsamples to the number that will fit, flag the
587 * buffer overrun and add the samples that fit.
588 */
589 max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
590 if (nsamples > max_samples) {
591 dev_warn(s->device->class_dev, "buffer overrun\n");
592 s->async->events |= COMEDI_CB_OVERFLOW;
593 nsamples = max_samples;
594 }
595
596 if (nsamples == 0)
597 return 0;
598
599 nbytes = comedi_buf_write_alloc(s,
600 comedi_samples_to_bytes(s, nsamples));
601 comedi_buf_memcpy_to(s, data, nbytes);
602 comedi_buf_write_free(s, nbytes);
603 comedi_inc_scan_progress(s, nbytes);
604 s->async->events |= COMEDI_CB_BLOCK;
605
606 return nbytes;
607 }
608 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
609
610 /**
611 * comedi_buf_read_samples() - Read sample data from COMEDI buffer
612 * @s: COMEDI subdevice.
613 * @data: Pointer to destination.
614 * @nsamples: Maximum number of samples to read.
615 *
616 * Read up to @nsamples samples from the COMEDI acquisition data buffer
617 * associated with the subdevice, mark it as read and update the acquisition
618 * scan progress. Limit the number of samples read to the number available.
619 * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
620 * tasks to be woken when the event flags are processed.
621 *
622 * Return: The amount of data read in bytes.
623 */
comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)624 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
625 void *data, unsigned int nsamples)
626 {
627 unsigned int max_samples;
628 unsigned int nbytes;
629
630 /* clamp nsamples to the number of full samples available */
631 max_samples = comedi_bytes_to_samples(s,
632 comedi_buf_read_n_available(s));
633 if (nsamples > max_samples)
634 nsamples = max_samples;
635
636 if (nsamples == 0)
637 return 0;
638
639 nbytes = comedi_buf_read_alloc(s,
640 comedi_samples_to_bytes(s, nsamples));
641 comedi_buf_memcpy_from(s, data, nbytes);
642 comedi_buf_read_free(s, nbytes);
643 comedi_inc_scan_progress(s, nbytes);
644 s->async->events |= COMEDI_CB_BLOCK;
645
646 return nbytes;
647 }
648 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
649