| /linux/tools/lib/ |
| H A D | list_sort.c | 16 struct list_head *head, **tail = &head; in merge() local 21 *tail = a; in merge() 22 tail = &a->next; in merge() 25 *tail = b; in merge() 29 *tail = b; in merge() 30 tail = &b->next; in merge() 33 *tail = a; in merge() 52 struct list_head *tail = head; in merge_final() local 57 tail->next = a; in merge_final() 58 a->prev = tail; in merge_final() [all …]
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | circular-buffers.rst | 55 (2) 'tail'索引 - 消费者在缓冲区中找到下一个元素的位置。 57 通常,当tail指针等于head指针时,表明缓冲区是空的;而当head指针比tail指针少一个时, 60 添加元素时,递增head索引;删除元素时,递增tail索引。tail索引不应该跳过head索引, 111 产者控制着head索引,但消费者可能仍然在另一个CPU上耗尽缓冲区并移动tail索引。 116 者控制着tail索引,但生产者可能仍然在另一个CPU上填充缓冲区并移动head索引。 146 unsigned long tail = READ_ONCE(buffer->tail); 148 if (CIRC_SPACE(head, tail, buffer->size) >= 1) { 181 unsigned long tail = buffer->tail; 183 if (CIRC_CNT(head, tail, buffer->size) >= 1) { 186 struct item *item = buffer[tail]; [all …]
|
| /linux/drivers/rpmsg/ |
| H A D | qcom_glink_smem.c | 53 __le32 *tail; member 70 u32 tail; in glink_smem_rx_avail() local 86 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_avail() 88 if (head < tail) in glink_smem_rx_avail() 89 return pipe->native.length - tail + head; in glink_smem_rx_avail() 91 return head - tail; in glink_smem_rx_avail() 99 u32 tail; in glink_smem_rx_peek() local 101 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_peek() 102 tail += offset; in glink_smem_rx_peek() 103 if (tail >= pipe->native.length) in glink_smem_rx_peek() [all …]
|
| H A D | qcom_glink_rpm.c | 51 void __iomem *tail; member 73 unsigned int tail; in glink_rpm_rx_avail() local 76 tail = readl(pipe->tail); in glink_rpm_rx_avail() 78 if (head < tail) in glink_rpm_rx_avail() 79 return pipe->native.length - tail + head; in glink_rpm_rx_avail() 81 return head - tail; in glink_rpm_rx_avail() 88 unsigned int tail; in glink_rpm_rx_peek() local 91 tail = readl(pipe->tail); in glink_rpm_rx_peek() 92 tail += offset; in glink_rpm_rx_peek() 93 if (tail >= pipe->native.length) in glink_rpm_rx_peek() [all …]
|
| /linux/drivers/net/ethernet/intel/fm10k/ |
| H A D | fm10k_mbx.c | 17 fifo->tail = 0; in fm10k_fifo_init() 28 return fifo->tail - fifo->head; in fm10k_fifo_used() 39 return fifo->size + fifo->head - fifo->tail; in fm10k_fifo_unused() 50 return fifo->head == fifo->tail; in fm10k_fifo_empty() 74 return (fifo->tail + offset) & (fifo->size - 1); in fm10k_fifo_tail_offset() 120 fifo->head = fifo->tail; in fm10k_fifo_drop_all() 132 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) in fm10k_mbx_index_len() argument 134 u16 len = tail - head; in fm10k_mbx_index_len() 137 if (len > tail) in fm10k_mbx_index_len() 153 u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_tail_add() local [all …]
|
| /linux/drivers/infiniband/sw/rdmavt/ |
| H A D | cq.c | 35 u32 tail; in rvt_cq_enter() local 43 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail); in rvt_cq_enter() 48 tail = k_wc->tail; in rvt_cq_enter() 62 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter() 321 RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) in rvt_req_notify_cq() 324 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq() 343 u32 head, tail, n; in rvt_resize_cq() local 388 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail); in rvt_resize_cq() 392 tail = old_k_wc->tail; in rvt_resize_cq() 397 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq() [all …]
|
| H A D | srq.c | 137 u32 sz, size, n, head, tail; in rvt_modify_srq() local 175 tail = RDMA_READ_UAPI_ATOMIC(owq->tail); in rvt_modify_srq() 179 tail = okwq->tail; in rvt_modify_srq() 181 if (head >= srq->rq.size || tail >= srq->rq.size) { in rvt_modify_srq() 186 if (n < tail) in rvt_modify_srq() 187 n += srq->rq.size - tail; in rvt_modify_srq() 189 n -= tail; in rvt_modify_srq() 196 while (tail != head) { in rvt_modify_srq() 200 wqe = rvt_get_rwqe_ptr(&srq->rq, tail); in rvt_modify_srq() 207 if (++tail >= srq->rq.size) in rvt_modify_srq() [all …]
|
| H A D | rc.c | 66 u32 tail; in rvt_compute_aeth() local 73 tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); in rvt_compute_aeth() 76 tail = READ_ONCE(qp->r_rq.kwq->tail); in rvt_compute_aeth() 80 if (tail >= qp->r_rq.size) in rvt_compute_aeth() 81 tail = 0; in rvt_compute_aeth() 88 credits = rvt_get_rq_count(&qp->r_rq, head, tail); in rvt_compute_aeth()
|
| /linux/include/linux/ |
| H A D | circ_buf.h | 12 int tail; member 16 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) argument 21 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) argument 26 #define CIRC_CNT_TO_END(head,tail,size) \ argument 27 ({int end = (size) - (tail); \ 32 #define CIRC_SPACE_TO_END(head,tail,size) \ argument 34 int n = (end + (tail)) & ((size)-1); \
|
| H A D | pipe_fs_i.h | 57 pipe_index_t tail; member 175 static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) in pipe_occupancy() argument 177 return (pipe_index_t)(head - tail); in pipe_occupancy() 185 static inline bool pipe_empty(unsigned int head, unsigned int tail) in pipe_empty() argument 187 return !pipe_occupancy(head, tail); in pipe_empty() 196 static inline bool pipe_full(unsigned int head, unsigned int tail, in pipe_full() argument 199 return pipe_occupancy(head, tail) >= limit; in pipe_full() 208 return pipe_full(pipe->head, pipe->tail, pipe->max_usage); in pipe_is_full() 217 return pipe_empty(pipe->head, pipe->tail); in pipe_is_empty() 226 return pipe_occupancy(pipe->head, pipe->tail); in pipe_buf_usage()
|
| /linux/arch/arm/kernel/ |
| H A D | perf_callchain.c | 34 user_backtrace(struct frame_tail __user *tail, in user_backtrace() argument 40 if (!access_ok(tail, sizeof(buftail))) in user_backtrace() 44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in user_backtrace() 56 if (tail + 1 >= buftail.fp) in user_backtrace() 65 struct frame_tail __user *tail; in perf_callchain_user() local 72 tail = (struct frame_tail __user *)regs->ARM_fp - 1; in perf_callchain_user() 75 tail && !((unsigned long)tail & 0x3)) in perf_callchain_user() 76 tail = user_backtrace(tail, entry); in perf_callchain_user()
|
| /linux/drivers/net/wireguard/ |
| H A D | queueing.c | 56 queue->head = queue->tail = STUB(queue); in wg_prev_queue_init() 82 struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue() local 84 if (tail == STUB(queue)) { in wg_prev_queue_dequeue() 87 queue->tail = next; in wg_prev_queue_dequeue() 88 tail = next; in wg_prev_queue_dequeue() 92 queue->tail = next; in wg_prev_queue_dequeue() 94 return tail; in wg_prev_queue_dequeue() 96 if (tail != READ_ONCE(queue->head)) in wg_prev_queue_dequeue() 99 next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue() 101 queue->tail = next; in wg_prev_queue_dequeue() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | intel_ring.h | 25 void intel_ring_reset(struct intel_ring *ring, u32 tail); 90 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) in assert_ring_tail_valid() argument 94 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); in assert_ring_tail_valid() 111 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); in assert_ring_tail_valid() 116 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) in intel_ring_set_tail() argument 124 assert_ring_tail_valid(ring, tail); in intel_ring_set_tail() 125 ring->tail = tail; in intel_ring_set_tail() 126 return tail; in intel_ring_set_tail() 130 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) in __intel_ring_space() argument 138 return (head - tail - CACHELINE_BYTES) & (size - 1); in __intel_ring_space()
|
| /linux/kernel/locking/ |
| H A D | qspinlock.c | 46 * In particular; where the traditional MCS lock consists of a tail pointer 48 * unlock the next pending (next->locked), we compress both these: {tail, 54 * we can encode the tail by combining the 2-bit nesting level with the cpu 55 * number. With one byte for the lock value and 3 bytes for the tail, only a 114 * (queue tail, pending bit, lock value) 133 u32 old, tail; 217 tail = encode_tail(smp_processor_id(), idx); 265 * publish the updated tail via xchg_tail() and potentially link 271 * Publish the updated tail. in __pv_wait_node() 277 old = xchg_tail(lock, tail); in __pv_wait_head_or_lock() 118 u32 tail; encode_tail() local 126 decode_tail(u32 tail) decode_tail() argument 177 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument 221 xchg_tail(struct qspinlock * lock,u32 tail) xchg_tail() argument 316 u32 old, tail; queued_spin_lock_slowpath() local [all...] |
| /linux/Documentation/trace/ |
| H A D | ring-buffer-design.rst | 21 tail 191 It is possible that the page swapped is the commit page and the tail page, 196 reader page commit page tail page 227 tail page 253 +---------+ <--- tail pointer 278 +---------+ <--- tail pointer 291 +---------+ <--- tail pointer 302 +---------+ <--(last full commit and tail pointer) 311 The tail page points to the page with the last write (before 314 The tail page is always equal to or after the commit page. It may [all …]
|
| /linux/net/sunrpc/ |
| H A D | xdr.c | 151 const struct kvec *tail = xdr->tail; in xdr_buf_to_bvec() local 176 if (tail->iov_len) { in xdr_buf_to_bvec() 177 bvec_set_virt(bvec, tail->iov_base, tail->iov_len); in xdr_buf_to_bvec() 204 struct kvec *tail = xdr->tail; in xdr_inline_pages() local 214 tail->iov_base = buf + offset; in xdr_inline_pages() 215 tail->iov_len = buflen - offset; in xdr_inline_pages() 474 xdr_buf_iov_zero(buf->tail, pgbase - buf->page_len, len); in xdr_buf_pages_zero() 478 xdr_buf_iov_zero(buf->tail, 0, pgbase + len - buf->page_len); in xdr_buf_pages_zero() 534 struct kvec *tail = buf->tail; in xdr_buf_try_expand() local 535 unsigned int sum = head->iov_len + buf->page_len + tail->iov_len; in xdr_buf_try_expand() [all …]
|
| /linux/include/drm/ |
| H A D | spsc_queue.h | 43 atomic_long_t tail; member 51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init() 67 struct spsc_node **tail; in spsc_queue_push() local 76 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); in spsc_queue_push() 77 WRITE_ONCE(*tail, node); in spsc_queue_push() 87 return tail == &queue->head; in spsc_queue_push() 109 if (atomic_long_cmpxchg(&queue->tail, in spsc_queue_pop()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | bpf_arena_spin_lock.h | 42 u16 tail; member 46 u16 tail; member 114 u32 tail; in encode_tail() local 116 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; in encode_tail() 117 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail() 119 return tail; in encode_tail() 122 static inline struct arena_mcs_spinlock __arena *decode_tail(u32 tail) in decode_tail() argument 124 u32 cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail() 125 u32 idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail() 148 static __always_inline u32 xchg_tail(arena_spinlock_t __arena *lock, u32 tail) in xchg_tail() argument [all …]
|
| /linux/net/sched/ |
| H A D | sch_choke.c | 67 unsigned int tail; member 77 return (q->tail - q->head) & q->tab_mask; in choke_len() 97 if (q->head == q->tail) in choke_zap_head_holes() 106 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes() 107 if (q->head == q->tail) in choke_zap_tail_holes() 109 } while (q->tab[q->tail] == NULL); in choke_zap_tail_holes() 123 if (idx == q->tail) in choke_drop_by_idx() 205 if (q->head == q->tail) in choke_match_random() 268 q->tab[q->tail] = skb; in choke_enqueue() 269 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue() [all …]
|
| /linux/drivers/infiniband/hw/vmw_pvrdma/ |
| H A D | pvrdma_ring.h | 89 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_space() local 92 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_space() 94 *out_tail = tail & (max_elems - 1); in pvrdma_idx_ring_has_space() 95 return tail != (head ^ max_elems); in pvrdma_idx_ring_has_space() 103 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_data() local 106 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_data() 109 return tail != head; in pvrdma_idx_ring_has_data()
|
| /linux/arch/arm64/kernel/ |
| H A D | stacktrace.c | 516 unwind_user_frame(struct frame_tail __user *tail, void *cookie, in unwind_user_frame() argument 524 if (!access_ok(tail, sizeof(buftail))) in unwind_user_frame() 528 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in unwind_user_frame() 543 if (tail >= buftail.fp) in unwind_user_frame() 565 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie, in unwind_compat_user_frame() argument 572 if (!access_ok(tail, sizeof(buftail))) in unwind_compat_user_frame() 576 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in unwind_compat_user_frame() 589 if (tail + 1 >= (struct compat_frame_tail __user *) in unwind_compat_user_frame() 606 struct frame_tail __user *tail; in arch_stack_walk_user() local 608 tail = (struct frame_tail __user *)regs->regs[29]; in arch_stack_walk_user() [all …]
|
| /linux/net/rxrpc/ |
| H A D | call_accept.c | 45 unsigned int head, tail, call_head, call_tail; in rxrpc_service_prealloc_one() local 69 tail = READ_ONCE(b->peer_backlog_tail); in rxrpc_service_prealloc_one() 70 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one() 82 tail = READ_ONCE(b->conn_backlog_tail); in rxrpc_service_prealloc_one() 83 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one() 183 unsigned int size = RXRPC_BACKLOG_MAX, head, tail; in rxrpc_discard_prealloc() local 196 tail = b->peer_backlog_tail; in rxrpc_discard_prealloc() 197 while (CIRC_CNT(head, tail, size) > 0) { in rxrpc_discard_prealloc() 198 struct rxrpc_peer *peer = b->peer_backlog[tail]; in rxrpc_discard_prealloc() 201 tail = (tail + 1) & (size - 1); in rxrpc_discard_prealloc() [all …]
|
| /linux/drivers/tty/ |
| H A D | n_tty.c | 161 static void tty_copy(const struct tty_struct *tty, void *to, size_t tail, in tty_copy() argument 165 size_t size = N_TTY_BUF_SIZE - tail; in tty_copy() 166 void *from = read_buf_addr(ldata, tail); in tty_copy() 560 static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail, in n_tty_process_echo_ops() argument 570 if (MASK(ldata->echo_commit) == MASK(*tail + 1)) in n_tty_process_echo_ops() 577 op = echo_buf(ldata, *tail + 1); in n_tty_process_echo_ops() 583 if (MASK(ldata->echo_commit) == MASK(*tail + 2)) in n_tty_process_echo_ops() 586 num_chars = echo_buf(ldata, *tail + 2); in n_tty_process_echo_ops() 608 *tail += 3; in n_tty_process_echo_ops() 613 *tail += 2; in n_tty_process_echo_ops() [all …]
|
| /linux/fs/ |
| H A D | splice.c | 201 unsigned int tail = pipe->tail; in splice_to_pipe() local 215 while (!pipe_full(head, tail, pipe->max_usage)) { in splice_to_pipe() 248 unsigned int tail = pipe->tail; in add_to_pipe() local 254 } else if (pipe_full(head, tail, pipe->max_usage)) { in add_to_pipe() 447 unsigned int tail = pipe->tail; in splice_from_pipe_feed() local 450 while (!pipe_empty(head, tail)) { in splice_from_pipe_feed() 451 struct pipe_buffer *buf = pipe_buf(pipe, tail); in splice_from_pipe_feed() 478 tail++; in splice_from_pipe_feed() 479 pipe->tail = tail; in splice_from_pipe_feed() 494 unsigned int tail = pipe->tail; in eat_empty_buffer() local [all …]
|
| /linux/fs/affs/ |
| H A D | inode.c | 22 struct affs_tail *tail; in affs_iget() local 50 tail = AFFS_TAIL(sb, bh); in affs_iget() 51 prot = be32_to_cpu(tail->protect); in affs_iget() 76 id = be16_to_cpu(tail->uid); in affs_iget() 84 id = be16_to_cpu(tail->gid); in affs_iget() 92 switch (be32_to_cpu(tail->stype)) { in affs_iget() 98 if (be32_to_cpu(tail->stype) == ST_USERDIR || in affs_iget() 127 size = be32_to_cpu(tail->size); in affs_iget() 136 if (tail->link_chain) in affs_iget() 153 …ode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * … in affs_iget() [all …]
|