1 /* The industrial I/O core 2 * 3 * Copyright (c) 2008 Jonathan Cameron 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * Handling of buffer allocation / resizing. 10 * 11 * 12 * Things to look at here. 13 * - Better memory allocation techniques? 14 * - Alternative access techniques? 15 */ 16 #include <linux/kernel.h> 17 #include <linux/export.h> 18 #include <linux/device.h> 19 #include <linux/fs.h> 20 #include <linux/cdev.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 24 #include <linux/iio/iio.h> 25 #include "iio_core.h" 26 #include <linux/iio/sysfs.h> 27 #include <linux/iio/buffer.h> 28 29 static const char * const iio_endian_prefix[] = { 30 [IIO_BE] = "be", 31 [IIO_LE] = "le", 32 }; 33 34 /** 35 * iio_buffer_read_first_n_outer() - chrdev read for buffer access 36 * 37 * This function relies on all buffer implementations having an 38 * iio_buffer as their first element. 39 **/ 40 ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf, 41 size_t n, loff_t *f_ps) 42 { 43 struct iio_dev *indio_dev = filp->private_data; 44 struct iio_buffer *rb = indio_dev->buffer; 45 46 if (!rb || !rb->access->read_first_n) 47 return -EINVAL; 48 return rb->access->read_first_n(rb, n, buf); 49 } 50 51 /** 52 * iio_buffer_poll() - poll the buffer to find out if it has data 53 */ 54 unsigned int iio_buffer_poll(struct file *filp, 55 struct poll_table_struct *wait) 56 { 57 struct iio_dev *indio_dev = filp->private_data; 58 struct iio_buffer *rb = indio_dev->buffer; 59 60 poll_wait(filp, &rb->pollq, wait); 61 if (rb->stufftoread) 62 return POLLIN | POLLRDNORM; 63 /* need a way of knowing if there may be enough data... */ 64 return 0; 65 } 66 67 void iio_buffer_init(struct iio_buffer *buffer) 68 { 69 INIT_LIST_HEAD(&buffer->demux_list); 70 init_waitqueue_head(&buffer->pollq); 71 } 72 EXPORT_SYMBOL(iio_buffer_init); 73 74 static ssize_t iio_show_scan_index(struct device *dev, 75 struct device_attribute *attr, 76 char *buf) 77 { 78 return sprintf(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 79 } 80 81 static ssize_t iio_show_fixed_type(struct device *dev, 82 struct device_attribute *attr, 83 char *buf) 84 { 85 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 86 u8 type = this_attr->c->scan_type.endianness; 87 88 if (type == IIO_CPU) { 89 #ifdef __LITTLE_ENDIAN 90 type = IIO_LE; 91 #else 92 type = IIO_BE; 93 #endif 94 } 95 return sprintf(buf, "%s:%c%d/%d>>%u\n", 96 iio_endian_prefix[type], 97 this_attr->c->scan_type.sign, 98 this_attr->c->scan_type.realbits, 99 this_attr->c->scan_type.storagebits, 100 this_attr->c->scan_type.shift); 101 } 102 103 static ssize_t iio_scan_el_show(struct device *dev, 104 struct device_attribute *attr, 105 char *buf) 106 { 107 int ret; 108 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 109 110 ret = test_bit(to_iio_dev_attr(attr)->address, 111 indio_dev->buffer->scan_mask); 112 113 return sprintf(buf, "%d\n", ret); 114 } 115 116 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 117 { 118 clear_bit(bit, buffer->scan_mask); 119 return 0; 120 } 121 122 static ssize_t iio_scan_el_store(struct device *dev, 123 struct device_attribute *attr, 124 const char *buf, 125 size_t len) 126 { 127 int ret; 128 bool state; 129 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 130 struct iio_buffer *buffer = indio_dev->buffer; 131 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 132 133 ret = strtobool(buf, &state); 134 if (ret < 0) 135 return ret; 136 mutex_lock(&indio_dev->mlock); 137 if (iio_buffer_enabled(indio_dev)) { 138 ret = -EBUSY; 139 goto error_ret; 140 } 141 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 142 if (ret < 0) 143 goto error_ret; 144 if (!state && ret) { 145 ret = iio_scan_mask_clear(buffer, this_attr->address); 146 if (ret) 147 goto error_ret; 148 } else if (state && !ret) { 149 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 150 if (ret) 151 goto error_ret; 152 } 153 154 error_ret: 155 mutex_unlock(&indio_dev->mlock); 156 157 return ret < 0 ? ret : len; 158 159 } 160 161 static ssize_t iio_scan_el_ts_show(struct device *dev, 162 struct device_attribute *attr, 163 char *buf) 164 { 165 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 166 return sprintf(buf, "%d\n", indio_dev->buffer->scan_timestamp); 167 } 168 169 static ssize_t iio_scan_el_ts_store(struct device *dev, 170 struct device_attribute *attr, 171 const char *buf, 172 size_t len) 173 { 174 int ret; 175 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 176 bool state; 177 178 ret = strtobool(buf, &state); 179 if (ret < 0) 180 return ret; 181 182 mutex_lock(&indio_dev->mlock); 183 if (iio_buffer_enabled(indio_dev)) { 184 ret = -EBUSY; 185 goto error_ret; 186 } 187 indio_dev->buffer->scan_timestamp = state; 188 indio_dev->scan_timestamp = state; 189 error_ret: 190 mutex_unlock(&indio_dev->mlock); 191 192 return ret ? ret : len; 193 } 194 195 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 196 const struct iio_chan_spec *chan) 197 { 198 int ret, attrcount = 0; 199 struct iio_buffer *buffer = indio_dev->buffer; 200 201 ret = __iio_add_chan_devattr("index", 202 chan, 203 &iio_show_scan_index, 204 NULL, 205 0, 206 0, 207 &indio_dev->dev, 208 &buffer->scan_el_dev_attr_list); 209 if (ret) 210 goto error_ret; 211 attrcount++; 212 ret = __iio_add_chan_devattr("type", 213 chan, 214 &iio_show_fixed_type, 215 NULL, 216 0, 217 0, 218 &indio_dev->dev, 219 &buffer->scan_el_dev_attr_list); 220 if (ret) 221 goto error_ret; 222 attrcount++; 223 if (chan->type != IIO_TIMESTAMP) 224 ret = __iio_add_chan_devattr("en", 225 chan, 226 &iio_scan_el_show, 227 &iio_scan_el_store, 228 chan->scan_index, 229 0, 230 &indio_dev->dev, 231 &buffer->scan_el_dev_attr_list); 232 else 233 ret = __iio_add_chan_devattr("en", 234 chan, 235 &iio_scan_el_ts_show, 236 &iio_scan_el_ts_store, 237 chan->scan_index, 238 0, 239 &indio_dev->dev, 240 &buffer->scan_el_dev_attr_list); 241 attrcount++; 242 ret = attrcount; 243 error_ret: 244 return ret; 245 } 246 247 static void iio_buffer_remove_and_free_scan_dev_attr(struct iio_dev *indio_dev, 248 struct iio_dev_attr *p) 249 { 250 kfree(p->dev_attr.attr.name); 251 kfree(p); 252 } 253 254 static void __iio_buffer_attr_cleanup(struct iio_dev *indio_dev) 255 { 256 struct iio_dev_attr *p, *n; 257 struct iio_buffer *buffer = indio_dev->buffer; 258 259 list_for_each_entry_safe(p, n, 260 &buffer->scan_el_dev_attr_list, l) 261 iio_buffer_remove_and_free_scan_dev_attr(indio_dev, p); 262 } 263 264 static const char * const iio_scan_elements_group_name = "scan_elements"; 265 266 int iio_buffer_register(struct iio_dev *indio_dev, 267 const struct iio_chan_spec *channels, 268 int num_channels) 269 { 270 struct iio_dev_attr *p; 271 struct attribute **attr; 272 struct iio_buffer *buffer = indio_dev->buffer; 273 int ret, i, attrn, attrcount, attrcount_orig = 0; 274 275 if (buffer->attrs) 276 indio_dev->groups[indio_dev->groupcounter++] = buffer->attrs; 277 278 if (buffer->scan_el_attrs != NULL) { 279 attr = buffer->scan_el_attrs->attrs; 280 while (*attr++ != NULL) 281 attrcount_orig++; 282 } 283 attrcount = attrcount_orig; 284 INIT_LIST_HEAD(&buffer->scan_el_dev_attr_list); 285 if (channels) { 286 /* new magic */ 287 for (i = 0; i < num_channels; i++) { 288 /* Establish necessary mask length */ 289 if (channels[i].scan_index > 290 (int)indio_dev->masklength - 1) 291 indio_dev->masklength 292 = indio_dev->channels[i].scan_index + 1; 293 294 ret = iio_buffer_add_channel_sysfs(indio_dev, 295 &channels[i]); 296 if (ret < 0) 297 goto error_cleanup_dynamic; 298 attrcount += ret; 299 if (channels[i].type == IIO_TIMESTAMP) 300 indio_dev->scan_index_timestamp = 301 channels[i].scan_index; 302 } 303 if (indio_dev->masklength && buffer->scan_mask == NULL) { 304 buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength), 305 sizeof(*buffer->scan_mask), 306 GFP_KERNEL); 307 if (buffer->scan_mask == NULL) { 308 ret = -ENOMEM; 309 goto error_cleanup_dynamic; 310 } 311 } 312 } 313 314 buffer->scan_el_group.name = iio_scan_elements_group_name; 315 316 buffer->scan_el_group.attrs = kcalloc(attrcount + 1, 317 sizeof(buffer->scan_el_group.attrs[0]), 318 GFP_KERNEL); 319 if (buffer->scan_el_group.attrs == NULL) { 320 ret = -ENOMEM; 321 goto error_free_scan_mask; 322 } 323 if (buffer->scan_el_attrs) 324 memcpy(buffer->scan_el_group.attrs, buffer->scan_el_attrs, 325 sizeof(buffer->scan_el_group.attrs[0])*attrcount_orig); 326 attrn = attrcount_orig; 327 328 list_for_each_entry(p, &buffer->scan_el_dev_attr_list, l) 329 buffer->scan_el_group.attrs[attrn++] = &p->dev_attr.attr; 330 indio_dev->groups[indio_dev->groupcounter++] = &buffer->scan_el_group; 331 332 return 0; 333 334 error_free_scan_mask: 335 kfree(buffer->scan_mask); 336 error_cleanup_dynamic: 337 __iio_buffer_attr_cleanup(indio_dev); 338 339 return ret; 340 } 341 EXPORT_SYMBOL(iio_buffer_register); 342 343 void iio_buffer_unregister(struct iio_dev *indio_dev) 344 { 345 kfree(indio_dev->buffer->scan_mask); 346 kfree(indio_dev->buffer->scan_el_group.attrs); 347 __iio_buffer_attr_cleanup(indio_dev); 348 } 349 EXPORT_SYMBOL(iio_buffer_unregister); 350 351 ssize_t iio_buffer_read_length(struct device *dev, 352 struct device_attribute *attr, 353 char *buf) 354 { 355 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 356 struct iio_buffer *buffer = indio_dev->buffer; 357 358 if (buffer->access->get_length) 359 return sprintf(buf, "%d\n", 360 buffer->access->get_length(buffer)); 361 362 return 0; 363 } 364 EXPORT_SYMBOL(iio_buffer_read_length); 365 366 ssize_t iio_buffer_write_length(struct device *dev, 367 struct device_attribute *attr, 368 const char *buf, 369 size_t len) 370 { 371 int ret; 372 ulong val; 373 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 374 struct iio_buffer *buffer = indio_dev->buffer; 375 376 ret = strict_strtoul(buf, 10, &val); 377 if (ret) 378 return ret; 379 380 if (buffer->access->get_length) 381 if (val == buffer->access->get_length(buffer)) 382 return len; 383 384 mutex_lock(&indio_dev->mlock); 385 if (iio_buffer_enabled(indio_dev)) { 386 ret = -EBUSY; 387 } else { 388 if (buffer->access->set_length) 389 buffer->access->set_length(buffer, val); 390 ret = 0; 391 } 392 mutex_unlock(&indio_dev->mlock); 393 394 return ret ? ret : len; 395 } 396 EXPORT_SYMBOL(iio_buffer_write_length); 397 398 ssize_t iio_buffer_store_enable(struct device *dev, 399 struct device_attribute *attr, 400 const char *buf, 401 size_t len) 402 { 403 int ret; 404 bool requested_state, current_state; 405 int previous_mode; 406 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 407 struct iio_buffer *buffer = indio_dev->buffer; 408 409 mutex_lock(&indio_dev->mlock); 410 previous_mode = indio_dev->currentmode; 411 requested_state = !(buf[0] == '0'); 412 current_state = iio_buffer_enabled(indio_dev); 413 if (current_state == requested_state) { 414 printk(KERN_INFO "iio-buffer, current state requested again\n"); 415 goto done; 416 } 417 if (requested_state) { 418 if (indio_dev->setup_ops->preenable) { 419 ret = indio_dev->setup_ops->preenable(indio_dev); 420 if (ret) { 421 printk(KERN_ERR 422 "Buffer not started:" 423 "buffer preenable failed\n"); 424 goto error_ret; 425 } 426 } 427 if (buffer->access->request_update) { 428 ret = buffer->access->request_update(buffer); 429 if (ret) { 430 printk(KERN_INFO 431 "Buffer not started:" 432 "buffer parameter update failed\n"); 433 goto error_ret; 434 } 435 } 436 /* Definitely possible for devices to support both of these.*/ 437 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) { 438 if (!indio_dev->trig) { 439 printk(KERN_INFO 440 "Buffer not started: no trigger\n"); 441 ret = -EINVAL; 442 goto error_ret; 443 } 444 indio_dev->currentmode = INDIO_BUFFER_TRIGGERED; 445 } else if (indio_dev->modes & INDIO_BUFFER_HARDWARE) 446 indio_dev->currentmode = INDIO_BUFFER_HARDWARE; 447 else { /* should never be reached */ 448 ret = -EINVAL; 449 goto error_ret; 450 } 451 452 if (indio_dev->setup_ops->postenable) { 453 ret = indio_dev->setup_ops->postenable(indio_dev); 454 if (ret) { 455 printk(KERN_INFO 456 "Buffer not started:" 457 "postenable failed\n"); 458 indio_dev->currentmode = previous_mode; 459 if (indio_dev->setup_ops->postdisable) 460 indio_dev->setup_ops-> 461 postdisable(indio_dev); 462 goto error_ret; 463 } 464 } 465 } else { 466 if (indio_dev->setup_ops->predisable) { 467 ret = indio_dev->setup_ops->predisable(indio_dev); 468 if (ret) 469 goto error_ret; 470 } 471 indio_dev->currentmode = INDIO_DIRECT_MODE; 472 if (indio_dev->setup_ops->postdisable) { 473 ret = indio_dev->setup_ops->postdisable(indio_dev); 474 if (ret) 475 goto error_ret; 476 } 477 } 478 done: 479 mutex_unlock(&indio_dev->mlock); 480 return len; 481 482 error_ret: 483 mutex_unlock(&indio_dev->mlock); 484 return ret; 485 } 486 EXPORT_SYMBOL(iio_buffer_store_enable); 487 488 ssize_t iio_buffer_show_enable(struct device *dev, 489 struct device_attribute *attr, 490 char *buf) 491 { 492 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 493 return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev)); 494 } 495 EXPORT_SYMBOL(iio_buffer_show_enable); 496 497 /* note NULL used as error indicator as it doesn't make sense. */ 498 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 499 unsigned int masklength, 500 const unsigned long *mask) 501 { 502 if (bitmap_empty(mask, masklength)) 503 return NULL; 504 while (*av_masks) { 505 if (bitmap_subset(mask, av_masks, masklength)) 506 return av_masks; 507 av_masks += BITS_TO_LONGS(masklength); 508 } 509 return NULL; 510 } 511 512 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, const long *mask, 513 bool timestamp) 514 { 515 const struct iio_chan_spec *ch; 516 unsigned bytes = 0; 517 int length, i; 518 519 /* How much space will the demuxed element take? */ 520 for_each_set_bit(i, mask, 521 indio_dev->masklength) { 522 ch = iio_find_channel_from_si(indio_dev, i); 523 length = ch->scan_type.storagebits / 8; 524 bytes = ALIGN(bytes, length); 525 bytes += length; 526 } 527 if (timestamp) { 528 ch = iio_find_channel_from_si(indio_dev, 529 indio_dev->scan_index_timestamp); 530 length = ch->scan_type.storagebits / 8; 531 bytes = ALIGN(bytes, length); 532 bytes += length; 533 } 534 return bytes; 535 } 536 537 int iio_sw_buffer_preenable(struct iio_dev *indio_dev) 538 { 539 struct iio_buffer *buffer = indio_dev->buffer; 540 dev_dbg(&indio_dev->dev, "%s\n", __func__); 541 542 /* How much space will the demuxed element take? */ 543 indio_dev->scan_bytes = 544 iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 545 buffer->scan_timestamp); 546 buffer->access->set_bytes_per_datum(buffer, indio_dev->scan_bytes); 547 548 /* What scan mask do we actually have ?*/ 549 if (indio_dev->available_scan_masks) 550 indio_dev->active_scan_mask = 551 iio_scan_mask_match(indio_dev->available_scan_masks, 552 indio_dev->masklength, 553 buffer->scan_mask); 554 else 555 indio_dev->active_scan_mask = buffer->scan_mask; 556 iio_update_demux(indio_dev); 557 558 if (indio_dev->info->update_scan_mode) 559 return indio_dev->info 560 ->update_scan_mode(indio_dev, 561 indio_dev->active_scan_mask); 562 return 0; 563 } 564 EXPORT_SYMBOL(iio_sw_buffer_preenable); 565 566 /** 567 * iio_scan_mask_set() - set particular bit in the scan mask 568 * @buffer: the buffer whose scan mask we are interested in 569 * @bit: the bit to be set. 570 **/ 571 int iio_scan_mask_set(struct iio_dev *indio_dev, 572 struct iio_buffer *buffer, int bit) 573 { 574 const unsigned long *mask; 575 unsigned long *trialmask; 576 577 trialmask = kmalloc(sizeof(*trialmask)* 578 BITS_TO_LONGS(indio_dev->masklength), 579 GFP_KERNEL); 580 581 if (trialmask == NULL) 582 return -ENOMEM; 583 if (!indio_dev->masklength) { 584 WARN_ON("trying to set scanmask prior to registering buffer\n"); 585 kfree(trialmask); 586 return -EINVAL; 587 } 588 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 589 set_bit(bit, trialmask); 590 591 if (indio_dev->available_scan_masks) { 592 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 593 indio_dev->masklength, 594 trialmask); 595 if (!mask) { 596 kfree(trialmask); 597 return -EINVAL; 598 } 599 } 600 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 601 602 kfree(trialmask); 603 604 return 0; 605 }; 606 EXPORT_SYMBOL_GPL(iio_scan_mask_set); 607 608 int iio_scan_mask_query(struct iio_dev *indio_dev, 609 struct iio_buffer *buffer, int bit) 610 { 611 if (bit > indio_dev->masklength) 612 return -EINVAL; 613 614 if (!buffer->scan_mask) 615 return 0; 616 617 return test_bit(bit, buffer->scan_mask); 618 }; 619 EXPORT_SYMBOL_GPL(iio_scan_mask_query); 620 621 /** 622 * struct iio_demux_table() - table describing demux memcpy ops 623 * @from: index to copy from 624 * @to: index to copy to 625 * @length: how many bytes to copy 626 * @l: list head used for management 627 */ 628 struct iio_demux_table { 629 unsigned from; 630 unsigned to; 631 unsigned length; 632 struct list_head l; 633 }; 634 635 static unsigned char *iio_demux(struct iio_buffer *buffer, 636 unsigned char *datain) 637 { 638 struct iio_demux_table *t; 639 640 if (list_empty(&buffer->demux_list)) 641 return datain; 642 list_for_each_entry(t, &buffer->demux_list, l) 643 memcpy(buffer->demux_bounce + t->to, 644 datain + t->from, t->length); 645 646 return buffer->demux_bounce; 647 } 648 649 int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data, 650 s64 timestamp) 651 { 652 unsigned char *dataout = iio_demux(buffer, data); 653 654 return buffer->access->store_to(buffer, dataout, timestamp); 655 } 656 EXPORT_SYMBOL_GPL(iio_push_to_buffer); 657 658 static void iio_buffer_demux_free(struct iio_buffer *buffer) 659 { 660 struct iio_demux_table *p, *q; 661 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 662 list_del(&p->l); 663 kfree(p); 664 } 665 } 666 667 int iio_update_demux(struct iio_dev *indio_dev) 668 { 669 const struct iio_chan_spec *ch; 670 struct iio_buffer *buffer = indio_dev->buffer; 671 int ret, in_ind = -1, out_ind, length; 672 unsigned in_loc = 0, out_loc = 0; 673 struct iio_demux_table *p; 674 675 /* Clear out any old demux */ 676 iio_buffer_demux_free(buffer); 677 kfree(buffer->demux_bounce); 678 buffer->demux_bounce = NULL; 679 680 /* First work out which scan mode we will actually have */ 681 if (bitmap_equal(indio_dev->active_scan_mask, 682 buffer->scan_mask, 683 indio_dev->masklength)) 684 return 0; 685 686 /* Now we have the two masks, work from least sig and build up sizes */ 687 for_each_set_bit(out_ind, 688 indio_dev->active_scan_mask, 689 indio_dev->masklength) { 690 in_ind = find_next_bit(indio_dev->active_scan_mask, 691 indio_dev->masklength, 692 in_ind + 1); 693 while (in_ind != out_ind) { 694 in_ind = find_next_bit(indio_dev->active_scan_mask, 695 indio_dev->masklength, 696 in_ind + 1); 697 ch = iio_find_channel_from_si(indio_dev, in_ind); 698 length = ch->scan_type.storagebits/8; 699 /* Make sure we are aligned */ 700 in_loc += length; 701 if (in_loc % length) 702 in_loc += length - in_loc % length; 703 } 704 p = kmalloc(sizeof(*p), GFP_KERNEL); 705 if (p == NULL) { 706 ret = -ENOMEM; 707 goto error_clear_mux_table; 708 } 709 ch = iio_find_channel_from_si(indio_dev, in_ind); 710 length = ch->scan_type.storagebits/8; 711 if (out_loc % length) 712 out_loc += length - out_loc % length; 713 if (in_loc % length) 714 in_loc += length - in_loc % length; 715 p->from = in_loc; 716 p->to = out_loc; 717 p->length = length; 718 list_add_tail(&p->l, &buffer->demux_list); 719 out_loc += length; 720 in_loc += length; 721 } 722 /* Relies on scan_timestamp being last */ 723 if (buffer->scan_timestamp) { 724 p = kmalloc(sizeof(*p), GFP_KERNEL); 725 if (p == NULL) { 726 ret = -ENOMEM; 727 goto error_clear_mux_table; 728 } 729 ch = iio_find_channel_from_si(indio_dev, 730 indio_dev->scan_index_timestamp); 731 length = ch->scan_type.storagebits/8; 732 if (out_loc % length) 733 out_loc += length - out_loc % length; 734 if (in_loc % length) 735 in_loc += length - in_loc % length; 736 p->from = in_loc; 737 p->to = out_loc; 738 p->length = length; 739 list_add_tail(&p->l, &buffer->demux_list); 740 out_loc += length; 741 in_loc += length; 742 } 743 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 744 if (buffer->demux_bounce == NULL) { 745 ret = -ENOMEM; 746 goto error_clear_mux_table; 747 } 748 return 0; 749 750 error_clear_mux_table: 751 iio_buffer_demux_free(buffer); 752 753 return ret; 754 } 755 EXPORT_SYMBOL_GPL(iio_update_demux); 756