xref: /linux/drivers/comedi/comedi_buf.c (revision 83bd89291f5cc866f60d32c34e268896c7ba8a3d)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * comedi_buf.c
4  *
5  * COMEDI - Linux Control and Measurement Device Interface
6  * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
7  * Copyright (C) 2002 Frank Mori Hess <fmhess@users.sourceforge.net>
8  */
9 
10 #include <linux/vmalloc.h>
11 #include <linux/slab.h>
12 #include <linux/comedi/comedidev.h>
13 #include "comedi_internal.h"
14 
15 #ifdef PAGE_KERNEL_NOCACHE
16 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL_NOCACHE
17 #else
18 #define COMEDI_PAGE_PROTECTION		PAGE_KERNEL
19 #endif
20 
comedi_buf_map_kref_release(struct kref * kref)21 static void comedi_buf_map_kref_release(struct kref *kref)
22 {
23 	struct comedi_buf_map *bm =
24 		container_of(kref, struct comedi_buf_map, refcount);
25 	struct comedi_buf_page *buf;
26 	unsigned int i;
27 
28 	if (bm->page_list) {
29 		if (bm->dma_dir != DMA_NONE) {
30 			for (i = 0; i < bm->n_pages; i++) {
31 				buf = &bm->page_list[i];
32 				dma_free_coherent(bm->dma_hw_dev, PAGE_SIZE,
33 						  buf->virt_addr,
34 						  buf->dma_addr);
35 			}
36 		} else {
37 			for (i = 0; i < bm->n_pages; i++) {
38 				buf = &bm->page_list[i];
39 				ClearPageReserved(virt_to_page(buf->virt_addr));
40 				free_page((unsigned long)buf->virt_addr);
41 			}
42 		}
43 		vfree(bm->page_list);
44 	}
45 	if (bm->dma_dir != DMA_NONE)
46 		put_device(bm->dma_hw_dev);
47 	kfree(bm);
48 }
49 
__comedi_buf_free(struct comedi_device * dev,struct comedi_subdevice * s)50 static void __comedi_buf_free(struct comedi_device *dev,
51 			      struct comedi_subdevice *s)
52 {
53 	struct comedi_async *async = s->async;
54 	struct comedi_buf_map *bm;
55 	unsigned long flags;
56 
57 	async->prealloc_bufsz = 0;
58 	spin_lock_irqsave(&s->spin_lock, flags);
59 	bm = async->buf_map;
60 	async->buf_map = NULL;
61 	spin_unlock_irqrestore(&s->spin_lock, flags);
62 	comedi_buf_map_put(bm);
63 }
64 
65 static struct comedi_buf_map *
comedi_buf_map_alloc(struct comedi_device * dev,enum dma_data_direction dma_dir,unsigned int n_pages)66 comedi_buf_map_alloc(struct comedi_device *dev, enum dma_data_direction dma_dir,
67 		     unsigned int n_pages)
68 {
69 	struct comedi_buf_map *bm;
70 	struct comedi_buf_page *buf;
71 	unsigned int i;
72 
73 	bm = kzalloc(sizeof(*bm), GFP_KERNEL);
74 	if (!bm)
75 		return NULL;
76 
77 	kref_init(&bm->refcount);
78 	bm->dma_dir = dma_dir;
79 	if (bm->dma_dir != DMA_NONE) {
80 		/* Need ref to hardware device to free buffer later. */
81 		bm->dma_hw_dev = get_device(dev->hw_dev);
82 	}
83 
84 	bm->page_list = vzalloc(sizeof(*buf) * n_pages);
85 	if (!bm->page_list)
86 		goto err;
87 
88 	if (bm->dma_dir != DMA_NONE) {
89 		for (i = 0; i < n_pages; i++) {
90 			buf = &bm->page_list[i];
91 			buf->virt_addr =
92 			    dma_alloc_coherent(bm->dma_hw_dev, PAGE_SIZE,
93 					       &buf->dma_addr, GFP_KERNEL);
94 			if (!buf->virt_addr)
95 				break;
96 		}
97 	} else {
98 		for (i = 0; i < n_pages; i++) {
99 			buf = &bm->page_list[i];
100 			buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
101 			if (!buf->virt_addr)
102 				break;
103 
104 			SetPageReserved(virt_to_page(buf->virt_addr));
105 		}
106 	}
107 	bm->n_pages = i;
108 	if (i < n_pages)
109 		goto err;
110 
111 	return bm;
112 
113 err:
114 	comedi_buf_map_put(bm);
115 	return NULL;
116 }
117 
__comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned int n_pages)118 static void __comedi_buf_alloc(struct comedi_device *dev,
119 			       struct comedi_subdevice *s,
120 			       unsigned int n_pages)
121 {
122 	struct comedi_async *async = s->async;
123 	struct comedi_buf_map *bm;
124 	unsigned long flags;
125 
126 	if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
127 		dev_err(dev->class_dev,
128 			"dma buffer allocation not supported\n");
129 		return;
130 	}
131 
132 	bm = comedi_buf_map_alloc(dev, s->async_dma_dir, n_pages);
133 	if (!bm)
134 		return;
135 
136 	spin_lock_irqsave(&s->spin_lock, flags);
137 	async->buf_map = bm;
138 	spin_unlock_irqrestore(&s->spin_lock, flags);
139 	async->prealloc_bufsz = n_pages << PAGE_SHIFT;
140 }
141 
comedi_buf_map_get(struct comedi_buf_map * bm)142 void comedi_buf_map_get(struct comedi_buf_map *bm)
143 {
144 	if (bm)
145 		kref_get(&bm->refcount);
146 }
147 
comedi_buf_map_put(struct comedi_buf_map * bm)148 int comedi_buf_map_put(struct comedi_buf_map *bm)
149 {
150 	if (bm)
151 		return kref_put(&bm->refcount, comedi_buf_map_kref_release);
152 	return 1;
153 }
154 
155 /* helper for "access" vm operation */
comedi_buf_map_access(struct comedi_buf_map * bm,unsigned long offset,void * buf,int len,int write)156 int comedi_buf_map_access(struct comedi_buf_map *bm, unsigned long offset,
157 			  void *buf, int len, int write)
158 {
159 	unsigned int pgoff = offset_in_page(offset);
160 	unsigned long pg = offset >> PAGE_SHIFT;
161 	int done = 0;
162 
163 	while (done < len && pg < bm->n_pages) {
164 		int l = min_t(int, len - done, PAGE_SIZE - pgoff);
165 		void *b = bm->page_list[pg].virt_addr + pgoff;
166 
167 		if (write)
168 			memcpy(b, buf, l);
169 		else
170 			memcpy(buf, b, l);
171 		buf += l;
172 		done += l;
173 		pg++;
174 		pgoff = 0;
175 	}
176 	return done;
177 }
178 
179 /* returns s->async->buf_map and increments its kref refcount */
180 struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice * s)181 comedi_buf_map_from_subdev_get(struct comedi_subdevice *s)
182 {
183 	struct comedi_async *async = s->async;
184 	struct comedi_buf_map *bm = NULL;
185 	unsigned long flags;
186 
187 	if (!async)
188 		return NULL;
189 
190 	spin_lock_irqsave(&s->spin_lock, flags);
191 	bm = async->buf_map;
192 	/* only want it if buffer pages allocated */
193 	if (bm && bm->n_pages)
194 		comedi_buf_map_get(bm);
195 	else
196 		bm = NULL;
197 	spin_unlock_irqrestore(&s->spin_lock, flags);
198 
199 	return bm;
200 }
201 
comedi_buf_is_mmapped(struct comedi_subdevice * s)202 bool comedi_buf_is_mmapped(struct comedi_subdevice *s)
203 {
204 	struct comedi_buf_map *bm = s->async->buf_map;
205 
206 	return bm && (kref_read(&bm->refcount) > 1);
207 }
208 
comedi_buf_alloc(struct comedi_device * dev,struct comedi_subdevice * s,unsigned long new_size)209 int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
210 		     unsigned long new_size)
211 {
212 	struct comedi_async *async = s->async;
213 
214 	lockdep_assert_held(&dev->mutex);
215 
216 	/* Round up new_size to multiple of PAGE_SIZE */
217 	new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
218 
219 	/* if no change is required, do nothing */
220 	if (async->prealloc_bufsz == new_size)
221 		return 0;
222 
223 	/* deallocate old buffer */
224 	__comedi_buf_free(dev, s);
225 
226 	/* allocate new buffer */
227 	if (new_size) {
228 		unsigned int n_pages = new_size >> PAGE_SHIFT;
229 
230 		__comedi_buf_alloc(dev, s, n_pages);
231 		if (!async->prealloc_bufsz)
232 			return -ENOMEM;
233 	}
234 
235 	return 0;
236 }
237 
comedi_buf_reset(struct comedi_subdevice * s)238 void comedi_buf_reset(struct comedi_subdevice *s)
239 {
240 	struct comedi_async *async = s->async;
241 
242 	async->buf_write_alloc_count = 0;
243 	async->buf_write_count = 0;
244 	async->buf_read_alloc_count = 0;
245 	async->buf_read_count = 0;
246 
247 	async->buf_write_ptr = 0;
248 	async->buf_read_ptr = 0;
249 
250 	async->cur_chan = 0;
251 	async->scans_done = 0;
252 	async->scan_progress = 0;
253 	async->munge_chan = 0;
254 	async->munge_count = 0;
255 	async->munge_ptr = 0;
256 
257 	async->events = 0;
258 }
259 
comedi_buf_write_n_unalloc(struct comedi_subdevice * s)260 static unsigned int comedi_buf_write_n_unalloc(struct comedi_subdevice *s)
261 {
262 	struct comedi_async *async = s->async;
263 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
264 
265 	return free_end - async->buf_write_alloc_count;
266 }
267 
comedi_buf_write_n_available(struct comedi_subdevice * s)268 unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
269 {
270 	struct comedi_async *async = s->async;
271 	unsigned int free_end = async->buf_read_count + async->prealloc_bufsz;
272 
273 	return free_end - async->buf_write_count;
274 }
275 
_comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)276 unsigned int _comedi_buf_write_alloc(struct comedi_subdevice *s,
277 				     unsigned int nbytes)
278 {
279 	struct comedi_async *async = s->async;
280 	unsigned int unalloc = comedi_buf_write_n_unalloc(s);
281 
282 	if (nbytes > unalloc)
283 		nbytes = unalloc;
284 
285 	async->buf_write_alloc_count += nbytes;
286 
287 	/*
288 	 * ensure the async buffer 'counts' are read and updated
289 	 * before we write data to the write-alloc'ed buffer space
290 	 */
291 	smp_mb();
292 
293 	return nbytes;
294 }
295 
296 /**
297  * comedi_buf_write_alloc() - Reserve buffer space for writing
298  * @s: COMEDI subdevice.
299  * @nbytes: Maximum space to reserve in bytes.
300  *
301  * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
302  * data buffer associated with the subdevice.  The amount reserved is limited
303  * by the space available.
304  *
305  * Return: The amount of space reserved in bytes.
306  */
comedi_buf_write_alloc(struct comedi_subdevice * s,unsigned int nbytes)307 unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
308 				    unsigned int nbytes)
309 {
310 	if (comedi_get_is_subdevice_running(s)) {
311 		nbytes = _comedi_buf_write_alloc(s, nbytes);
312 		comedi_put_is_subdevice_running(s);
313 	} else {
314 		nbytes = 0;
315 	}
316 	return nbytes;
317 }
318 EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
319 
320 /*
321  * munging is applied to data by core as it passes between user
322  * and kernel space
323  */
comedi_buf_munge(struct comedi_subdevice * s,unsigned int num_bytes)324 static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
325 				     unsigned int num_bytes)
326 {
327 	struct comedi_async *async = s->async;
328 	struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
329 	unsigned int count = 0;
330 	const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
331 
332 	if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
333 		async->munge_count += num_bytes;
334 		return num_bytes;
335 	}
336 
337 	/* don't munge partial samples */
338 	num_bytes -= num_bytes % num_sample_bytes;
339 	while (count < num_bytes) {
340 		/*
341 		 * Do not munge beyond page boundary.
342 		 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
343 		 */
344 		unsigned int page = async->munge_ptr >> PAGE_SHIFT;
345 		unsigned int offset = offset_in_page(async->munge_ptr);
346 		unsigned int block_size =
347 			     min(num_bytes - count, PAGE_SIZE - offset);
348 
349 		s->munge(s->device, s, buf_page_list[page].virt_addr + offset,
350 			 block_size, async->munge_chan);
351 
352 		/*
353 		 * ensure data is munged in buffer before the
354 		 * async buffer munge_count is incremented
355 		 */
356 		smp_wmb();
357 
358 		async->munge_chan += block_size / num_sample_bytes;
359 		async->munge_chan %= async->cmd.chanlist_len;
360 		async->munge_count += block_size;
361 		async->munge_ptr += block_size;
362 		if (async->munge_ptr == async->prealloc_bufsz)
363 			async->munge_ptr = 0;
364 		count += block_size;
365 	}
366 
367 	return count;
368 }
369 
comedi_buf_write_n_allocated(struct comedi_subdevice * s)370 unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
371 {
372 	struct comedi_async *async = s->async;
373 
374 	return async->buf_write_alloc_count - async->buf_write_count;
375 }
376 
_comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)377 unsigned int _comedi_buf_write_free(struct comedi_subdevice *s,
378 				    unsigned int nbytes)
379 {
380 	struct comedi_async *async = s->async;
381 	unsigned int allocated = comedi_buf_write_n_allocated(s);
382 
383 	if (nbytes > allocated)
384 		nbytes = allocated;
385 
386 	async->buf_write_count += nbytes;
387 	async->buf_write_ptr += nbytes;
388 	comedi_buf_munge(s, async->buf_write_count - async->munge_count);
389 	if (async->buf_write_ptr >= async->prealloc_bufsz)
390 		async->buf_write_ptr %= async->prealloc_bufsz;
391 
392 	return nbytes;
393 }
394 
395 /**
396  * comedi_buf_write_free() - Free buffer space after it is written
397  * @s: COMEDI subdevice.
398  * @nbytes: Maximum space to free in bytes.
399  *
400  * Free up to @nbytes bytes of space previously reserved for writing in the
401  * COMEDI acquisition data buffer associated with the subdevice.  The amount of
402  * space freed is limited to the amount that was reserved.  The freed space is
403  * assumed to have been filled with sample data by the writer.
404  *
405  * If the samples in the freed space need to be "munged", do so here.  The
406  * freed space becomes available for allocation by the reader.
407  *
408  * Return: The amount of space freed in bytes.
409  */
comedi_buf_write_free(struct comedi_subdevice * s,unsigned int nbytes)410 unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
411 				   unsigned int nbytes)
412 {
413 	if (comedi_get_is_subdevice_running(s)) {
414 		nbytes = _comedi_buf_write_free(s, nbytes);
415 		comedi_put_is_subdevice_running(s);
416 	} else {
417 		nbytes = 0;
418 	}
419 	return nbytes;
420 }
421 EXPORT_SYMBOL_GPL(comedi_buf_write_free);
422 
_comedi_buf_read_n_available(struct comedi_subdevice * s)423 unsigned int _comedi_buf_read_n_available(struct comedi_subdevice *s)
424 {
425 	struct comedi_async *async = s->async;
426 	unsigned int num_bytes;
427 
428 	if (!async)
429 		return 0;
430 
431 	num_bytes = async->munge_count - async->buf_read_count;
432 
433 	/*
434 	 * ensure the async buffer 'counts' are read before we
435 	 * attempt to read data from the buffer
436 	 */
437 	smp_rmb();
438 
439 	return num_bytes;
440 }
441 
442 /**
443  * comedi_buf_read_n_available() - Determine amount of readable buffer space
444  * @s: COMEDI subdevice.
445  *
446  * Determine the amount of readable buffer space in the COMEDI acquisition data
447  * buffer associated with the subdevice.  The readable buffer space is that
448  * which has been freed by the writer and "munged" to the sample data format
449  * expected by COMEDI if necessary.
450  *
451  * Return: The amount of readable buffer space.
452  */
comedi_buf_read_n_available(struct comedi_subdevice * s)453 unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
454 {
455 	unsigned int num_bytes;
456 
457 	if (comedi_get_is_subdevice_running(s)) {
458 		num_bytes = _comedi_buf_read_n_available(s);
459 		comedi_put_is_subdevice_running(s);
460 	} else {
461 		num_bytes = 0;
462 	}
463 	return num_bytes;
464 }
465 EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
466 
_comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)467 unsigned int _comedi_buf_read_alloc(struct comedi_subdevice *s,
468 				    unsigned int nbytes)
469 {
470 	struct comedi_async *async = s->async;
471 	unsigned int available;
472 
473 	available = async->munge_count - async->buf_read_alloc_count;
474 	if (nbytes > available)
475 		nbytes = available;
476 
477 	async->buf_read_alloc_count += nbytes;
478 
479 	/*
480 	 * ensure the async buffer 'counts' are read before we
481 	 * attempt to read data from the read-alloc'ed buffer space
482 	 */
483 	smp_rmb();
484 
485 	return nbytes;
486 }
487 
488 /**
489  * comedi_buf_read_alloc() - Reserve buffer space for reading
490  * @s: COMEDI subdevice.
491  * @nbytes: Maximum space to reserve in bytes.
492  *
493  * Reserve up to @nbytes bytes of previously written and "munged" buffer space
494  * for reading in the COMEDI acquisition data buffer associated with the
495  * subdevice.  The amount reserved is limited to the space available.  The
496  * reader can read from the reserved space and then free it.  A reader is also
497  * allowed to read from the space before reserving it as long as it determines
498  * the amount of readable data available, but the space needs to be marked as
499  * reserved before it can be freed.
500  *
501  * Return: The amount of space reserved in bytes.
502  */
comedi_buf_read_alloc(struct comedi_subdevice * s,unsigned int nbytes)503 unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
504 				   unsigned int nbytes)
505 {
506 	if (comedi_get_is_subdevice_running(s)) {
507 		nbytes = _comedi_buf_read_alloc(s, nbytes);
508 		comedi_put_is_subdevice_running(s);
509 	} else {
510 		nbytes = 0;
511 	}
512 	return nbytes;
513 }
514 EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
515 
comedi_buf_read_n_allocated(struct comedi_async * async)516 static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
517 {
518 	return async->buf_read_alloc_count - async->buf_read_count;
519 }
520 
_comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)521 unsigned int _comedi_buf_read_free(struct comedi_subdevice *s,
522 				   unsigned int nbytes)
523 {
524 	struct comedi_async *async = s->async;
525 	unsigned int allocated;
526 
527 	/*
528 	 * ensure data has been read out of buffer before
529 	 * the async read count is incremented
530 	 */
531 	smp_mb();
532 
533 	allocated = comedi_buf_read_n_allocated(async);
534 	if (nbytes > allocated)
535 		nbytes = allocated;
536 
537 	async->buf_read_count += nbytes;
538 	async->buf_read_ptr += nbytes;
539 	async->buf_read_ptr %= async->prealloc_bufsz;
540 	return nbytes;
541 }
542 
543 /**
544  * comedi_buf_read_free() - Free buffer space after it has been read
545  * @s: COMEDI subdevice.
546  * @nbytes: Maximum space to free in bytes.
547  *
548  * Free up to @nbytes bytes of buffer space previously reserved for reading in
549  * the COMEDI acquisition data buffer associated with the subdevice.  The
550  * amount of space freed is limited to the amount that was reserved.
551  *
552  * The freed space becomes available for allocation by the writer.
553  *
554  * Return: The amount of space freed in bytes.
555  */
comedi_buf_read_free(struct comedi_subdevice * s,unsigned int nbytes)556 unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
557 				  unsigned int nbytes)
558 {
559 	if (comedi_get_is_subdevice_running(s)) {
560 		nbytes = _comedi_buf_read_free(s, nbytes);
561 		comedi_put_is_subdevice_running(s);
562 	} else {
563 		nbytes = 0;
564 	}
565 	return nbytes;
566 }
567 EXPORT_SYMBOL_GPL(comedi_buf_read_free);
568 
comedi_buf_memcpy_to(struct comedi_subdevice * s,const void * data,unsigned int num_bytes)569 static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
570 				 const void *data, unsigned int num_bytes)
571 {
572 	struct comedi_async *async = s->async;
573 	struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
574 	unsigned int write_ptr = async->buf_write_ptr;
575 
576 	while (num_bytes) {
577 		/*
578 		 * Do not copy beyond page boundary.
579 		 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
580 		 */
581 		unsigned int page = write_ptr >> PAGE_SHIFT;
582 		unsigned int offset = offset_in_page(write_ptr);
583 		unsigned int block_size = min(num_bytes, PAGE_SIZE - offset);
584 
585 		memcpy(buf_page_list[page].virt_addr + offset,
586 		       data, block_size);
587 
588 		data += block_size;
589 		num_bytes -= block_size;
590 		write_ptr += block_size;
591 		if (write_ptr == async->prealloc_bufsz)
592 			write_ptr = 0;
593 	}
594 }
595 
comedi_buf_memcpy_from(struct comedi_subdevice * s,void * dest,unsigned int nbytes)596 static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
597 				   void *dest, unsigned int nbytes)
598 {
599 	struct comedi_async *async = s->async;
600 	struct comedi_buf_page *buf_page_list = async->buf_map->page_list;
601 	unsigned int read_ptr = async->buf_read_ptr;
602 
603 	while (nbytes) {
604 		/*
605 		 * Do not copy beyond page boundary.
606 		 * Note: prealloc_bufsz is a multiple of PAGE_SIZE.
607 		 */
608 		unsigned int page = read_ptr >> PAGE_SHIFT;
609 		unsigned int offset = offset_in_page(read_ptr);
610 		unsigned int block_size = min(nbytes, PAGE_SIZE - offset);
611 
612 		memcpy(dest, buf_page_list[page].virt_addr + offset,
613 		       block_size);
614 		nbytes -= block_size;
615 		dest += block_size;
616 		read_ptr += block_size;
617 		if (read_ptr == async->prealloc_bufsz)
618 			read_ptr = 0;
619 	}
620 }
621 
_comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)622 static unsigned int _comedi_buf_write_samples(struct comedi_subdevice *s,
623 					      const void *data,
624 					      unsigned int nsamples)
625 {
626 	unsigned int max_samples;
627 	unsigned int nbytes;
628 
629 	/*
630 	 * Make sure there is enough room in the buffer for all the samples.
631 	 * If not, clamp the nsamples to the number that will fit, flag the
632 	 * buffer overrun and add the samples that fit.
633 	 */
634 	max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
635 	if (nsamples > max_samples) {
636 		dev_warn(s->device->class_dev, "buffer overrun\n");
637 		s->async->events |= COMEDI_CB_OVERFLOW;
638 		nsamples = max_samples;
639 	}
640 
641 	if (nsamples == 0)
642 		return 0;
643 
644 	nbytes = comedi_samples_to_bytes(s, nsamples);
645 	nbytes = _comedi_buf_write_alloc(s, nbytes);
646 	comedi_buf_memcpy_to(s, data, nbytes);
647 	_comedi_buf_write_free(s, nbytes);
648 	_comedi_inc_scan_progress(s, nbytes);
649 	s->async->events |= COMEDI_CB_BLOCK;
650 
651 	return nbytes;
652 }
653 
654 /**
655  * comedi_buf_write_samples() - Write sample data to COMEDI buffer
656  * @s: COMEDI subdevice.
657  * @data: Pointer to source samples.
658  * @nsamples: Number of samples to write.
659  *
660  * Write up to @nsamples samples to the COMEDI acquisition data buffer
661  * associated with the subdevice, mark it as written and update the
662  * acquisition scan progress.  If there is not enough room for the specified
663  * number of samples, the number of samples written is limited to the number
664  * that will fit and the %COMEDI_CB_OVERFLOW event flag is set to cause the
665  * acquisition to terminate with an overrun error.  Set the %COMEDI_CB_BLOCK
666  * event flag if any samples are written to cause waiting tasks to be woken
667  * when the event flags are processed.
668  *
669  * Return: The amount of data written in bytes.
670  */
comedi_buf_write_samples(struct comedi_subdevice * s,const void * data,unsigned int nsamples)671 unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
672 				      const void *data, unsigned int nsamples)
673 {
674 	unsigned int nbytes;
675 
676 	if (comedi_get_is_subdevice_running(s)) {
677 		nbytes = _comedi_buf_write_samples(s, data, nsamples);
678 		comedi_put_is_subdevice_running(s);
679 	} else {
680 		nbytes = 0;
681 	}
682 	return nbytes;
683 }
684 EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
685 
_comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)686 static unsigned int _comedi_buf_read_samples(struct comedi_subdevice *s,
687 					    void *data, unsigned int nsamples)
688 {
689 	unsigned int max_samples;
690 	unsigned int nbytes;
691 
692 	/* clamp nsamples to the number of full samples available */
693 	max_samples = comedi_bytes_to_samples(s,
694 					      _comedi_buf_read_n_available(s));
695 	if (nsamples > max_samples)
696 		nsamples = max_samples;
697 
698 	if (nsamples == 0)
699 		return 0;
700 
701 	nbytes = _comedi_buf_read_alloc(s,
702 					comedi_samples_to_bytes(s, nsamples));
703 	comedi_buf_memcpy_from(s, data, nbytes);
704 	_comedi_buf_read_free(s, nbytes);
705 	_comedi_inc_scan_progress(s, nbytes);
706 	s->async->events |= COMEDI_CB_BLOCK;
707 
708 	return nbytes;
709 }
710 
711 /**
712  * comedi_buf_read_samples() - Read sample data from COMEDI buffer
713  * @s: COMEDI subdevice.
714  * @data: Pointer to destination.
715  * @nsamples: Maximum number of samples to read.
716  *
717  * Read up to @nsamples samples from the COMEDI acquisition data buffer
718  * associated with the subdevice, mark it as read and update the acquisition
719  * scan progress.  Limit the number of samples read to the number available.
720  * Set the %COMEDI_CB_BLOCK event flag if any samples are read to cause waiting
721  * tasks to be woken when the event flags are processed.
722  *
723  * Return: The amount of data read in bytes.
724  */
comedi_buf_read_samples(struct comedi_subdevice * s,void * data,unsigned int nsamples)725 unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
726 				     void *data, unsigned int nsamples)
727 {
728 	unsigned int nbytes;
729 
730 	if (comedi_get_is_subdevice_running(s)) {
731 		nbytes = _comedi_buf_read_samples(s, data, nsamples);
732 		comedi_put_is_subdevice_running(s);
733 	} else {
734 		nbytes = 0;
735 	}
736 	return nbytes;
737 }
738 EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
739