/linux/include/linux/ |
H A D | rwbase_rt.h | 12 atomic_t readers; member 18 .readers = ATOMIC_INIT(READER_BIAS), \ 25 atomic_set(&(rwbase)->readers, READER_BIAS); \ 31 return atomic_read(&rwb->readers) != READER_BIAS; in rw_base_is_locked() 36 return atomic_read(&rwb->readers) == WRITER_BIAS; in rw_base_is_write_locked() 41 return atomic_read(&rwb->readers) > 0; in rw_base_is_contended()
|
H A D | rcu_sync.h | 16 /* Structure to mediate between updaters and fastpath-using readers. */ 26 * rcu_sync_is_idle() - Are readers permitted to use their fastpaths? 29 * Returns true if readers are permitted to use their fastpaths. Must be
|
H A D | rculist.h | 14 * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers 18 * cleanup tasks, when readers have no access to the list being initialized. 19 * However, if the list being initialized is visible to readers, you 195 * the perspective of concurrent readers. It is the caller's responsibility 238 * "first" and "last" tracking list, so initialize it. RCU readers in __list_splice_init_rcu() 247 * Wait for any readers to finish using the list before splicing in __list_splice_init_rcu() 248 * the list body into the new list. Any new readers will see in __list_splice_init_rcu() 257 * Readers are finished with the source list, so perform splice. in __list_splice_init_rcu() 259 * to concurrent RCU readers. Note that RCU readers are not in __list_splice_init_rcu() 526 * the perspective of concurrent readers. It is the caller's responsibility
|
/linux/Documentation/RCU/ |
H A D | checklist.rst | 30 One final exception is where RCU readers are used to prevent 40 RCU does allow *readers* to run (almost) naked, but *writers* must 86 The whole point of RCU is to permit readers to run without 87 any locks or atomic operations. This means that readers will 100 locks (that are acquired by both readers and writers) 101 that guard per-element state. Fields that the readers 107 c. Make updates appear atomic to readers. For example, 111 appear to be atomic to RCU readers, nor will sequences 119 d. Carefully order the updates and the reads so that readers 139 a. Readers must maintain proper ordering of their memory [all …]
|
H A D | whatisRCU.rst | 56 Section 1, though most readers will profit by reading this section at 79 new versions of these data items), and can run concurrently with readers. 81 readers is the semantics of modern CPUs guarantee that readers will see 85 removal phase. Because reclaiming data items can disrupt any readers 87 not start until readers no longer hold references to those data items. 91 reclamation phase until all readers active during the removal phase have 93 callback that is invoked after they finish. Only readers that are active 101 readers cannot gain a reference to it. 103 b. Wait for all previous readers to complete their RCU read-side 106 c. At this point, there cannot be any readers who hold references [all …]
|
H A D | rcu.rst | 10 must be long enough that any readers accessing the item being deleted have 21 The advantage of RCU's two-part approach is that RCU readers need 26 in read-mostly situations. The fact that RCU readers need not 30 if the RCU readers give no indication when they are done? 32 Just as with spinlocks, RCU readers are not permitted to 42 same effect, but require that the readers manipulate CPU-local
|
H A D | lockdep.rst | 43 invoked by both RCU readers and updaters. 47 is invoked by both RCU-bh readers and updaters. 51 is invoked by both RCU-sched readers and updaters. 55 is invoked by both SRCU readers and updaters.
|
H A D | listRCU.rst | 62 list. Readers using ``for_each_process()`` are not protected with the 63 ``tasklist_lock``. To prevent readers from noticing changes in the list 67 any readers traversing the list will see valid ``p->tasks.next`` pointers 71 all existing readers finish, which guarantees that the ``task_struct`` 73 of all RCU readers that might possibly have a reference to that object. 219 need for writers to exclude readers. 226 readers to fail spectacularly. 228 So, when readers can tolerate stale data and when entries are either added or
|
/linux/Documentation/locking/ |
H A D | lockdep-design.rst | 405 spin_lock() or write_lock()), non-recursive readers (i.e. shared lockers, like 406 down_read()) and recursive readers (recursive shared lockers, like rcu_read_lock()). 410 r: stands for non-recursive readers. 411 R: stands for recursive readers. 412 S: stands for all readers (non-recursive + recursive), as both are shared lockers. 413 N: stands for writers and non-recursive readers, as both are not recursive. 417 Recursive readers, as their name indicates, are the lockers allowed to acquire 421 While non-recursive readers will cause a self deadlock if trying to acquire inside 424 The difference between recursive readers and non-recursive readers is because: 425 recursive readers get blocked only by a write lock *holder*, while non-recursive [all …]
|
H A D | locktypes.rst | 95 readers. 135 rw_semaphore is a multiple readers and single writer lock mechanism. 141 exist special-purpose interfaces that allow non-owner release for readers. 151 readers, a preempted low-priority reader will continue holding its lock, 152 thus starving even high-priority writers. In contrast, because readers 155 writer from starving readers. 299 rwlock_t is a multiple readers and single writer lock mechanism. 314 readers, a preempted low-priority reader will continue holding its lock, 315 thus starving even high-priority writers. In contrast, because readers 318 preventing that writer from starving readers.
|
/linux/kernel/rcu/ |
H A D | sync.c | 42 * If it is called by rcu_sync_enter() it signals that all the readers were 51 * readers back onto their fastpaths (after a grace period). If both 54 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers 91 * rcu_sync_enter() - Force readers onto slowpath 94 * This function is used by updaters who need readers to make use of 97 * tells readers to stay off their fastpaths. A later call to 143 * rcu_sync_exit() - Allow readers back onto fast path after grace period 147 * now allow readers to make use of their fastpaths after a grace period 149 * calls to rcu_sync_is_idle() will return true, which tells readers that
|
/linux/kernel/locking/ |
H A D | percpu-rwsem.c | 60 * Conversely, any readers that increment their sem->read_count after in __percpu_down_read_trylock() 113 * We use EXCLUSIVE for both readers and writers to preserve FIFO order, 114 * and play games with the return value to allow waking multiple readers. 116 * Specifically, we wake readers until we've woken a single writer, or until a 138 return !reader; /* wake (readers until) 1 writer */ in percpu_rwsem_wake_function() 204 * newly arriving readers increment a given counter, they will immediately 231 /* Notify readers to take the slow path. */ in percpu_down_write() 236 * Having sem->block set makes new readers block. in percpu_down_write() 252 /* Wait for all active readers to complete. */ in percpu_down_write() 267 * that new readers might fail to see the results of this writer's in percpu_up_write()
|
H A D | rwsem.c | 38 * - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint) 55 * is involved. Ideally we would like to track all the readers that own 109 * 1) rwsem_mark_wake() for readers -- set, clear 294 * The lock is owned by readers when 299 * Having some reader bits set is not enough to guarantee a readers owned 300 * lock as the readers may be in the process of backing out from the count 348 RWSEM_WAKE_READERS, /* Wake readers only */ 360 * Magic number to batch-wakeup waiting readers, even when writers are 407 * Implies rwsem_del_waiter() for all woken readers. 431 * Readers, on the other hand, will block as they in rwsem_mark_wake() [all …]
|
H A D | qrwlock.c | 24 * Readers come here when they cannot get the lock without waiting in queued_read_lock_slowpath() 28 * Readers in interrupt context will get the lock immediately in queued_read_lock_slowpath() 80 /* Set the waiting flag to notify readers that a writer is pending */ in queued_write_lock_slowpath() 83 /* When no more readers or writers, set the locked flag */ in queued_write_lock_slowpath()
|
/linux/fs/bcachefs/ |
H A D | six.c | 104 read_count += *per_cpu_ptr(lock->readers, cpu); in pcpu_read_count() 154 if (type == SIX_LOCK_read && lock->readers) { in __do_six_trylock() 156 this_cpu_inc(*lock->readers); /* signal that we own lock */ in __do_six_trylock() 163 this_cpu_sub(*lock->readers, !ret); in __do_six_trylock() 171 } else if (type == SIX_LOCK_write && lock->readers) { in __do_six_trylock() 576 lock->readers) { in do_six_unlock_type() 578 this_cpu_dec(*lock->readers); in do_six_unlock_type() 664 if (!lock->readers) { in six_lock_tryupgrade() 672 if (lock->readers) in six_lock_tryupgrade() 673 this_cpu_dec(*lock->readers); in six_lock_tryupgrade() [all …]
|
/linux/drivers/misc/ibmasm/ |
H A D | event.c | 30 list_for_each_entry(reader, &sp->event_buffer->readers, node) in wake_up_event_readers() 39 * event readers. 40 * There is no reader marker in the buffer, therefore readers are 73 * Called by event readers (initiated from user space through the file 123 list_add(&reader->node, &sp->event_buffer->readers); in ibmasm_event_reader_register() 153 INIT_LIST_HEAD(&buffer->readers); in ibmasm_event_buffer_init()
|
/linux/drivers/misc/cardreader/ |
H A D | Kconfig | 9 Alcor Micro card readers support access to many types of memory cards, 21 Realtek card readers support access to many types of memory cards, 30 Select this option to get support for Realtek USB 2.0 card readers
|
/linux/drivers/hid/ |
H A D | hid-roccat.c | 18 * It is inspired by hidraw, but uses only one circular buffer for all readers. 47 struct list_head readers; member 48 /* protects modifications of readers list */ 52 * circular_buffer has one writer and multiple readers with their own 191 list_add_tail(&reader->node, &device->readers); in roccat_open() 239 * roccat_report_event() - output data to readers 270 list_for_each_entry(reader, &device->readers, node) { in roccat_report_event() 339 INIT_LIST_HEAD(&device->readers); in roccat_connect()
|
/linux/arch/x86/include/asm/ |
H A D | spinlock.h | 30 * Read-write spinlocks, allowing multiple readers 33 * NOTE! it is quite common to have readers in interrupts 36 * irq-safe write-lock, but readers can get non-irqsafe
|
/linux/tools/testing/selftests/kvm/lib/ |
H A D | userfaultfd_util.c | 125 uffd_desc->readers = calloc(sizeof(pthread_t), num_readers); in uffd_setup_demand_paging() 126 TEST_ASSERT(uffd_desc->readers, "Failed to alloc reader threads"); in uffd_setup_demand_paging() 170 pthread_create(&uffd_desc->readers[i], NULL, uffd_handler_thread_fn, in uffd_setup_demand_paging() 190 TEST_ASSERT(!pthread_join(uffd->readers[i], NULL), in uffd_stop_demand_paging() 201 free(uffd->readers); in uffd_stop_demand_paging()
|
/linux/arch/sh/include/asm/ |
H A D | spinlock-cas.h | 44 * Read-write spinlocks, allowing multiple readers but only one writer. 46 * NOTE! it is quite common to have readers in interrupts but no interrupt 48 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
|
H A D | spinlock-llsc.h | 82 * Read-write spinlocks, allowing multiple readers but only one writer. 84 * NOTE! it is quite common to have readers in interrupts but no interrupt 86 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
|
/linux/fs/ |
H A D | pipe.c | 72 * FIFOs and Pipes now generate SIGIO for both readers and writers. 424 !READ_ONCE(pipe->readers); in pipe_writable() 457 if (!pipe->readers) { in pipe_write() 498 if (!pipe->readers) { in pipe_write() 571 * space. We wake up any readers if necessary, and then in pipe_write() 700 if (!pipe->readers) in pipe_poll() 729 pipe->readers--; in pipe_release() 734 if (!pipe->readers != !pipe->writers) { in pipe_release() 896 pipe->readers = pipe->writers = 1; in get_pipe_inode() 1055 * but that requires that we wake up any other readers/writers [all …]
|
/linux/drivers/ptp/ |
H A D | ptp_private.h | 50 int defunct; /* tells readers to go away when clock is being removed */ 82 * The function queue_cnt() is safe for readers to call without 83 * holding q->lock. Readers use this function to verify that the queue
|
/linux/tools/testing/selftests/kvm/include/ |
H A D | userfaultfd_util.h | 29 /* Holds the write ends of the pipes for killing the readers. */ 31 pthread_t *readers; member
|