Lines Matching refs:trig

53 	struct iio_trigger *trig = to_iio_trigger(dev);  in name_show()  local
55 return sysfs_emit(buf, "%s\n", trig->name); in name_show()
114 int iio_trigger_set_immutable(struct iio_dev *indio_dev, struct iio_trigger *trig) in iio_trigger_set_immutable() argument
118 if (!indio_dev || !trig) in iio_trigger_set_immutable()
125 indio_dev->trig = iio_trigger_get(trig); in iio_trigger_set_immutable()
158 struct iio_trigger *trig = container_of(work, struct iio_trigger, in iio_reenable_work_fn() local
165 trig->ops->reenable(trig); in iio_reenable_work_fn()
182 static void iio_trigger_notify_done_atomic(struct iio_trigger *trig) in iio_trigger_notify_done_atomic() argument
184 if (atomic_dec_and_test(&trig->use_count) && trig->ops && in iio_trigger_notify_done_atomic()
185 trig->ops->reenable) in iio_trigger_notify_done_atomic()
186 schedule_work(&trig->reenable_work); in iio_trigger_notify_done_atomic()
195 void iio_trigger_poll(struct iio_trigger *trig) in iio_trigger_poll() argument
199 if (!atomic_read(&trig->use_count)) { in iio_trigger_poll()
200 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_poll()
203 if (trig->subirqs[i].enabled) in iio_trigger_poll()
204 generic_handle_irq(trig->subirq_base + i); in iio_trigger_poll()
206 iio_trigger_notify_done_atomic(trig); in iio_trigger_poll()
226 void iio_trigger_poll_nested(struct iio_trigger *trig) in iio_trigger_poll_nested() argument
230 if (!atomic_read(&trig->use_count)) { in iio_trigger_poll_nested()
231 atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_poll_nested()
234 if (trig->subirqs[i].enabled) in iio_trigger_poll_nested()
235 handle_nested_irq(trig->subirq_base + i); in iio_trigger_poll_nested()
237 iio_trigger_notify_done(trig); in iio_trigger_poll_nested()
243 void iio_trigger_notify_done(struct iio_trigger *trig) in iio_trigger_notify_done() argument
245 if (atomic_dec_and_test(&trig->use_count) && trig->ops && in iio_trigger_notify_done()
246 trig->ops->reenable) in iio_trigger_notify_done()
247 trig->ops->reenable(trig); in iio_trigger_notify_done()
252 static int iio_trigger_get_irq(struct iio_trigger *trig) in iio_trigger_get_irq() argument
256 scoped_guard(mutex, &trig->pool_lock) { in iio_trigger_get_irq()
257 ret = bitmap_find_free_region(trig->pool, in iio_trigger_get_irq()
264 return ret + trig->subirq_base; in iio_trigger_get_irq()
267 static void iio_trigger_put_irq(struct iio_trigger *trig, int irq) in iio_trigger_put_irq() argument
269 guard(mutex)(&trig->pool_lock); in iio_trigger_put_irq()
270 clear_bit(irq - trig->subirq_base, trig->pool); in iio_trigger_put_irq()
280 int iio_trigger_attach_poll_func(struct iio_trigger *trig, in iio_trigger_attach_poll_func() argument
285 bitmap_empty(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func()
292 pf->irq = iio_trigger_get_irq(trig); in iio_trigger_attach_poll_func()
295 trig->name, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in iio_trigger_attach_poll_func()
307 if (trig->ops && trig->ops->set_trigger_state && notinuse) { in iio_trigger_attach_poll_func()
308 ret = trig->ops->set_trigger_state(trig, true); in iio_trigger_attach_poll_func()
318 if (!iio_validate_own_trigger(pf->indio_dev, trig)) in iio_trigger_attach_poll_func()
319 trig->attached_own_device = true; in iio_trigger_attach_poll_func()
326 iio_trigger_put_irq(trig, pf->irq); in iio_trigger_attach_poll_func()
332 int iio_trigger_detach_poll_func(struct iio_trigger *trig, in iio_trigger_detach_poll_func() argument
337 bitmap_weight(trig->pool, CONFIG_IIO_CONSUMERS_PER_TRIGGER) == 1; in iio_trigger_detach_poll_func()
340 if (trig->ops && trig->ops->set_trigger_state && no_other_users) { in iio_trigger_detach_poll_func()
341 ret = trig->ops->set_trigger_state(trig, false); in iio_trigger_detach_poll_func()
345 if (pf->indio_dev->dev.parent == trig->dev.parent) in iio_trigger_detach_poll_func()
346 trig->attached_own_device = false; in iio_trigger_detach_poll_func()
347 iio_trigger_put_irq(trig, pf->irq); in iio_trigger_detach_poll_func()
419 if (indio_dev->trig) in current_trigger_show()
420 return sysfs_emit(buf, "%s\n", indio_dev->trig->name); in current_trigger_show()
444 struct iio_trigger *oldtrig = indio_dev->trig; in current_trigger_store()
445 struct iio_trigger *trig; in current_trigger_store() local
455 trig = iio_trigger_acquire_by_name(buf); in current_trigger_store()
456 if (oldtrig == trig) { in current_trigger_store()
461 if (trig && indio_dev->info->validate_trigger) { in current_trigger_store()
462 ret = indio_dev->info->validate_trigger(indio_dev, trig); in current_trigger_store()
467 if (trig && trig->ops && trig->ops->validate_device) { in current_trigger_store()
468 ret = trig->ops->validate_device(trig, indio_dev); in current_trigger_store()
473 indio_dev->trig = trig; in current_trigger_store()
481 if (indio_dev->trig) { in current_trigger_store()
483 iio_trigger_attach_poll_func(indio_dev->trig, in current_trigger_store()
490 if (trig) in current_trigger_store()
491 iio_trigger_put(trig); in current_trigger_store()
509 struct iio_trigger *trig = to_iio_trigger(device); in iio_trig_release() local
512 if (trig->subirq_base) { in iio_trig_release()
514 irq_modify_status(trig->subirq_base + i, in iio_trig_release()
517 irq_set_chip(trig->subirq_base + i, in iio_trig_release()
519 irq_set_handler(trig->subirq_base + i, in iio_trig_release()
523 irq_free_descs(trig->subirq_base, in iio_trig_release()
526 kfree(trig->name); in iio_trig_release()
527 kfree(trig); in iio_trig_release()
538 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); in iio_trig_subirqmask() local
540 trig->subirqs[d->irq - trig->subirq_base].enabled = false; in iio_trig_subirqmask()
546 struct iio_trigger *trig = container_of(chip, struct iio_trigger, subirq_chip); in iio_trig_subirqunmask() local
548 trig->subirqs[d->irq - trig->subirq_base].enabled = true; in iio_trig_subirqunmask()
557 struct iio_trigger *trig; in viio_trigger_alloc() local
560 trig = kzalloc(sizeof(*trig), GFP_KERNEL); in viio_trigger_alloc()
561 if (!trig) in viio_trigger_alloc()
564 trig->dev.parent = parent; in viio_trigger_alloc()
565 trig->dev.type = &iio_trig_type; in viio_trigger_alloc()
566 trig->dev.bus = &iio_bus_type; in viio_trigger_alloc()
567 device_initialize(&trig->dev); in viio_trigger_alloc()
568 INIT_WORK(&trig->reenable_work, iio_reenable_work_fn); in viio_trigger_alloc()
570 mutex_init(&trig->pool_lock); in viio_trigger_alloc()
571 trig->subirq_base = irq_alloc_descs(-1, 0, in viio_trigger_alloc()
574 if (trig->subirq_base < 0) in viio_trigger_alloc()
577 trig->name = kvasprintf(GFP_KERNEL, fmt, vargs); in viio_trigger_alloc()
578 if (trig->name == NULL) in viio_trigger_alloc()
581 INIT_LIST_HEAD(&trig->list); in viio_trigger_alloc()
583 trig->owner = this_mod; in viio_trigger_alloc()
585 trig->subirq_chip.name = trig->name; in viio_trigger_alloc()
586 trig->subirq_chip.irq_mask = &iio_trig_subirqmask; in viio_trigger_alloc()
587 trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask; in viio_trigger_alloc()
589 irq_set_chip(trig->subirq_base + i, &trig->subirq_chip); in viio_trigger_alloc()
590 irq_set_handler(trig->subirq_base + i, &handle_simple_irq); in viio_trigger_alloc()
591 irq_modify_status(trig->subirq_base + i, in viio_trigger_alloc()
595 return trig; in viio_trigger_alloc()
598 irq_free_descs(trig->subirq_base, CONFIG_IIO_CONSUMERS_PER_TRIGGER); in viio_trigger_alloc()
600 kfree(trig); in viio_trigger_alloc()
619 struct iio_trigger *trig; in __iio_trigger_alloc() local
623 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); in __iio_trigger_alloc()
626 return trig; in __iio_trigger_alloc()
630 void iio_trigger_free(struct iio_trigger *trig) in iio_trigger_free() argument
632 if (trig) in iio_trigger_free()
633 put_device(&trig->dev); in iio_trigger_free()
661 struct iio_trigger **ptr, *trig; in __devm_iio_trigger_alloc() local
671 trig = viio_trigger_alloc(parent, this_mod, fmt, vargs); in __devm_iio_trigger_alloc()
673 if (trig) { in __devm_iio_trigger_alloc()
674 *ptr = trig; in __devm_iio_trigger_alloc()
680 return trig; in __devm_iio_trigger_alloc()
717 return indio_dev->trig->attached_own_device; in iio_trigger_using_own()
733 int iio_validate_own_trigger(struct iio_dev *idev, struct iio_trigger *trig) in iio_validate_own_trigger() argument
735 if (idev->dev.parent != trig->dev.parent) in iio_validate_own_trigger()
753 int iio_trigger_validate_own_device(struct iio_trigger *trig, in iio_trigger_validate_own_device() argument
756 if (indio_dev->dev.parent != trig->dev.parent) in iio_trigger_validate_own_device()
771 if (indio_dev->trig) in iio_device_unregister_trigger_consumer()
772 iio_trigger_put(indio_dev->trig); in iio_device_unregister_trigger_consumer()