Home
last modified time | relevance | path

Searched refs:tail (Results 1 – 25 of 723) sorted by relevance

12345678910>>...29

/linux/tools/lib/
H A Dlist_sort.c16 struct list_head *head, **tail = &head; in merge() local
21 *tail = a; in merge()
22 tail = &a->next; in merge()
25 *tail = b; in merge()
29 *tail = b; in merge()
30 tail = &b->next; in merge()
33 *tail = a; in merge()
52 struct list_head *tail = head; in merge_final() local
57 tail->next = a; in merge_final()
58 a->prev = tail; in merge_final()
[all …]
/linux/Documentation/translations/zh_CN/core-api/
H A Dcircular-buffers.rst55 (2) 'tail'索引 - 消费者在缓冲区中找到下一个元素的位置。
57 通常,当tail指针等于head指针时,表明缓冲区是空的;而当head指针比tail指针少一个时,
60 添加元素时,递增head索引;删除元素时,递增tail索引。tail索引不应该跳过head索引,
111 产者控制着head索引,但消费者可能仍然在另一个CPU上耗尽缓冲区并移动tail索引。
116 者控制着tail索引,但生产者可能仍然在另一个CPU上填充缓冲区并移动head索引。
146 unsigned long tail = READ_ONCE(buffer->tail);
148 if (CIRC_SPACE(head, tail, buffer->size) >= 1) {
181 unsigned long tail = buffer->tail;
183 if (CIRC_CNT(head, tail, buffer->size) >= 1) {
186 struct item *item = buffer[tail];
[all …]
/linux/drivers/rpmsg/
H A Dqcom_glink_smem.c53 __le32 *tail; member
70 u32 tail; in glink_smem_rx_avail() local
86 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_avail()
88 if (head < tail) in glink_smem_rx_avail()
89 return pipe->native.length - tail + head; in glink_smem_rx_avail()
91 return head - tail; in glink_smem_rx_avail()
99 u32 tail; in glink_smem_rx_peek() local
101 tail = le32_to_cpu(*pipe->tail); in glink_smem_rx_peek()
102 tail += offset; in glink_smem_rx_peek()
103 if (tail >= pipe->native.length) in glink_smem_rx_peek()
[all …]
H A Dqcom_glink_rpm.c51 void __iomem *tail; member
73 unsigned int tail; in glink_rpm_rx_avail() local
76 tail = readl(pipe->tail); in glink_rpm_rx_avail()
78 if (head < tail) in glink_rpm_rx_avail()
79 return pipe->native.length - tail + head; in glink_rpm_rx_avail()
81 return head - tail; in glink_rpm_rx_avail()
88 unsigned int tail; in glink_rpm_rx_peek() local
91 tail = readl(pipe->tail); in glink_rpm_rx_peek()
92 tail += offset; in glink_rpm_rx_peek()
93 if (tail >= pipe->native.length) in glink_rpm_rx_peek()
[all …]
/linux/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_mbx.c17 fifo->tail = 0; in fm10k_fifo_init()
28 return fifo->tail - fifo->head; in fm10k_fifo_used()
39 return fifo->size + fifo->head - fifo->tail; in fm10k_fifo_unused()
50 return fifo->head == fifo->tail; in fm10k_fifo_empty()
74 return (fifo->tail + offset) & (fifo->size - 1); in fm10k_fifo_tail_offset()
120 fifo->head = fifo->tail; in fm10k_fifo_drop_all()
132 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) in fm10k_mbx_index_len() argument
134 u16 len = tail - head; in fm10k_mbx_index_len()
137 if (len > tail) in fm10k_mbx_index_len()
153 u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); in fm10k_mbx_tail_add() local
[all …]
/linux/drivers/infiniband/sw/rdmavt/
H A Dcq.c35 u32 tail; in rvt_cq_enter() local
43 tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail); in rvt_cq_enter()
48 tail = k_wc->tail; in rvt_cq_enter()
62 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter()
321 RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) in rvt_req_notify_cq()
324 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq()
343 u32 head, tail, n; in rvt_resize_cq() local
388 tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail); in rvt_resize_cq()
392 tail = old_k_wc->tail; in rvt_resize_cq()
397 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
[all …]
H A Dsrq.c137 u32 sz, size, n, head, tail; in rvt_modify_srq() local
175 tail = RDMA_READ_UAPI_ATOMIC(owq->tail); in rvt_modify_srq()
179 tail = okwq->tail; in rvt_modify_srq()
181 if (head >= srq->rq.size || tail >= srq->rq.size) { in rvt_modify_srq()
186 if (n < tail) in rvt_modify_srq()
187 n += srq->rq.size - tail; in rvt_modify_srq()
189 n -= tail; in rvt_modify_srq()
196 while (tail != head) { in rvt_modify_srq()
200 wqe = rvt_get_rwqe_ptr(&srq->rq, tail); in rvt_modify_srq()
207 if (++tail >= srq->rq.size) in rvt_modify_srq()
[all …]
/linux/include/linux/
H A Dcirc_buf.h12 int tail; member
16 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) argument
21 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) argument
26 #define CIRC_CNT_TO_END(head,tail,size) \ argument
27 ({int end = (size) - (tail); \
32 #define CIRC_SPACE_TO_END(head,tail,size) \ argument
34 int n = (end + (tail)) & ((size)-1); \
H A Dobjpool.h55 uint32_t tail; member
195 uint32_t head, tail; in __objpool_try_add_slot() local
198 tail = READ_ONCE(slot->tail); in __objpool_try_add_slot()
203 WARN_ON_ONCE(tail - head > pool->nr_objs); in __objpool_try_add_slot()
204 } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); in __objpool_try_add_slot()
207 WRITE_ONCE(slot->entries[tail & slot->mask], obj); in __objpool_try_add_slot()
209 smp_store_release(&slot->last, tail + 1); in __objpool_try_add_slot()
H A Dpipe_fs_i.h57 pipe_index_t tail; member
95 pipe_index_t tail; member
184 static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail) in pipe_occupancy() argument
186 return (pipe_index_t)(head - tail); in pipe_occupancy()
194 static inline bool pipe_empty(unsigned int head, unsigned int tail) in pipe_empty() argument
196 return !pipe_occupancy(head, tail); in pipe_empty()
205 static inline bool pipe_full(unsigned int head, unsigned int tail, in pipe_full() argument
208 return pipe_occupancy(head, tail) >= limit; in pipe_full()
217 return pipe_full(pipe->head, pipe->tail, pipe->max_usage); in pipe_is_full()
226 return pipe_empty(pipe->head, pipe->tail); in pipe_is_empty()
[all …]
/linux/arch/arm/kernel/
H A Dperf_callchain.c34 user_backtrace(struct frame_tail __user *tail, in user_backtrace() argument
40 if (!access_ok(tail, sizeof(buftail))) in user_backtrace()
44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in user_backtrace()
56 if (tail + 1 >= buftail.fp) in user_backtrace()
65 struct frame_tail __user *tail; in perf_callchain_user() local
72 tail = (struct frame_tail __user *)regs->ARM_fp - 1; in perf_callchain_user()
75 tail && !((unsigned long)tail & 0x3)) in perf_callchain_user()
76 tail = user_backtrace(tail, entry); in perf_callchain_user()
/linux/drivers/net/wireguard/
H A Dqueueing.c56 queue->head = queue->tail = STUB(queue); in wg_prev_queue_init()
82 struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue() local
84 if (tail == STUB(queue)) { in wg_prev_queue_dequeue()
87 queue->tail = next; in wg_prev_queue_dequeue()
88 tail = next; in wg_prev_queue_dequeue()
92 queue->tail = next; in wg_prev_queue_dequeue()
94 return tail; in wg_prev_queue_dequeue()
96 if (tail != READ_ONCE(queue->head)) in wg_prev_queue_dequeue()
99 next = smp_load_acquire(&NEXT(tail)); in wg_prev_queue_dequeue()
101 queue->tail = next; in wg_prev_queue_dequeue()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ring.h25 void intel_ring_reset(struct intel_ring *ring, u32 tail);
90 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) in assert_ring_tail_valid() argument
94 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); in assert_ring_tail_valid()
111 GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head); in assert_ring_tail_valid()
116 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) in intel_ring_set_tail() argument
124 assert_ring_tail_valid(ring, tail); in intel_ring_set_tail()
125 ring->tail = tail; in intel_ring_set_tail()
126 return tail; in intel_ring_set_tail()
130 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) in __intel_ring_space() argument
138 return (head - tail - CACHELINE_BYTES) & (size - 1); in __intel_ring_space()
/linux/kernel/locking/
H A Dqspinlock.c118 u32 tail; in encode_tail() local
120 tail = (cpu + 1) << _Q_TAIL_CPU_OFFSET; in encode_tail()
121 tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */ in encode_tail()
123 return tail; in encode_tail()
126 static inline __pure struct mcs_spinlock *decode_tail(u32 tail) in decode_tail() argument
128 int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1; in decode_tail()
129 int idx = (tail & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; in decode_tail()
177 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) in xchg_tail() argument
183 return (u32)xchg_relaxed(&lock->tail, in xchg_tail()
184 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; in xchg_tail()
[all …]
/linux/drivers/media/test-drivers/vidtv/
H A Dvidtv_channel.c165 struct vidtv_psi_table_eit_event *tail = NULL; in vidtv_channel_eit_event_cat_into_new() local
182 tail = vidtv_psi_eit_event_init(tail, event_id); in vidtv_channel_eit_event_cat_into_new()
183 if (!tail) { in vidtv_channel_eit_event_cat_into_new()
189 vidtv_psi_desc_assign(&tail->descriptor, desc); in vidtv_channel_eit_event_cat_into_new()
192 head = tail; in vidtv_channel_eit_event_cat_into_new()
211 struct vidtv_psi_table_sdt_service *tail = NULL; in vidtv_channel_sdt_serv_cat_into_new() local
229 tail = vidtv_psi_sdt_service_init(tail, in vidtv_channel_sdt_serv_cat_into_new()
233 if (!tail) in vidtv_channel_sdt_serv_cat_into_new()
239 vidtv_psi_desc_assign(&tail->descriptor, desc); in vidtv_channel_sdt_serv_cat_into_new()
242 head = tail; in vidtv_channel_sdt_serv_cat_into_new()
[all …]
/linux/Documentation/trace/
H A Dring-buffer-design.rst21 tail
191 It is possible that the page swapped is the commit page and the tail page,
196 reader page commit page tail page
227 tail page
253 +---------+ <--- tail pointer
278 +---------+ <--- tail pointer
291 +---------+ <--- tail pointer
302 +---------+ <--(last full commit and tail pointer)
311 The tail page points to the page with the last write (before
314 The tail page is always equal to or after the commit page. It may
[all …]
/linux/net/sunrpc/
H A Dxdr.c179 const struct kvec *tail = xdr->tail; in xdr_buf_to_bvec() local
204 if (tail->iov_len) { in xdr_buf_to_bvec()
205 bvec_set_virt(bvec, tail->iov_base, tail->iov_len); in xdr_buf_to_bvec()
231 struct kvec *tail = xdr->tail; in xdr_inline_pages() local
241 tail->iov_base = buf + offset; in xdr_inline_pages()
242 tail->iov_len = buflen - offset; in xdr_inline_pages()
501 xdr_buf_iov_zero(buf->tail, pgbas in xdr_buf_pages_zero()
561 struct kvec *tail = buf->tail; xdr_buf_try_expand() local
588 const struct kvec *tail = buf->tail; xdr_buf_tail_copy_right() local
602 const struct kvec *tail = buf->tail; xdr_buf_pages_copy_right() local
636 const struct kvec *tail = buf->tail; xdr_buf_head_copy_right() local
682 const struct kvec *tail = buf->tail; xdr_buf_tail_shift_right() local
727 const struct kvec *tail = buf->tail; xdr_buf_tail_copy_left() local
1189 struct kvec *tail = buf->tail; xdr_truncate_encode() local
1294 struct kvec *tail = buf->tail; xdr_write_pages() local
[all...]
/linux/include/drm/
H A Dspsc_queue.h43 atomic_long_t tail; member
51 atomic_long_set(&queue->tail, (long)&queue->head); in spsc_queue_init()
67 struct spsc_node **tail; in spsc_queue_push() local
73 tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next); in spsc_queue_push()
74 WRITE_ONCE(*tail, node); in spsc_queue_push()
85 return tail == &queue->head; in spsc_queue_push()
107 if (atomic_long_cmpxchg(&queue->tail, in spsc_queue_pop()
/linux/net/sched/
H A Dsch_choke.c67 unsigned int tail; member
77 return (q->tail - q->head) & q->tab_mask; in choke_len()
97 if (q->head == q->tail) in choke_zap_head_holes()
106 q->tail = (q->tail - 1) & q->tab_mask; in choke_zap_tail_holes()
107 if (q->head == q->tail) in choke_zap_tail_holes()
109 } while (q->tab[q->tail] == NULL); in choke_zap_tail_holes()
123 if (idx == q->tail) in choke_drop_by_idx()
205 if (q->head == q->tail) in choke_match_random()
268 q->tab[q->tail] = skb; in choke_enqueue()
269 q->tail = (q->tail + 1) & q->tab_mask; in choke_enqueue()
[all …]
/linux/fs/netfs/
H A Dmisc.c23 struct folio_queue *tail = *_buffer, *p; in netfs_alloc_folioq_buffer() local
29 if (tail) in netfs_alloc_folioq_buffer()
30 while (tail->next) in netfs_alloc_folioq_buffer()
31 tail = tail->next; in netfs_alloc_folioq_buffer()
37 if (!tail || folioq_full(tail)) { in netfs_alloc_folioq_buffer()
41 if (tail) { in netfs_alloc_folioq_buffer()
42 tail->next = p; in netfs_alloc_folioq_buffer()
43 p->prev = tail; in netfs_alloc_folioq_buffer()
47 tail = p; in netfs_alloc_folioq_buffer()
63 slot = folioq_append_mark(tail, folio); in netfs_alloc_folioq_buffer()
[all …]
/linux/arch/arm64/kernel/
H A Dstacktrace.c483 unwind_user_frame(struct frame_tail __user *tail, void *cookie, in unwind_user_frame() argument
491 if (!access_ok(tail, sizeof(buftail))) in unwind_user_frame()
495 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in unwind_user_frame()
510 if (tail >= buftail.fp) in unwind_user_frame()
532 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie, in unwind_compat_user_frame() argument
539 if (!access_ok(tail, sizeof(buftail))) in unwind_compat_user_frame()
543 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); in unwind_compat_user_frame()
556 if (tail + 1 >= (struct compat_frame_tail __user *) in unwind_compat_user_frame()
573 struct frame_tail __user *tail; in arch_stack_walk_user() local
575 tail = (struct frame_tail __user *)regs->regs[29]; in arch_stack_walk_user()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_trace_guc.h62 TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
63 TP_ARGS(xe, gt_id, action, len, _head, tail),
70 __field(u32, tail)
79 __entry->tail = tail;
85 __entry->tail, __entry->_head)
89 TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
90 TP_ARGS(xe, gt_id, action, len, _head, tail)
94 TP_PROTO(struct xe_device *xe, u8 gt_id, u32 action, u32 len, u32 _head, u32 tail),
95 TP_ARGS(xe, gt_id, action, len, _head, tail),
99 __entry->tail, __entry->_head)
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_ring.h89 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_space() local
92 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_space()
94 *out_tail = tail & (max_elems - 1); in pvrdma_idx_ring_has_space()
95 return tail != (head ^ max_elems); in pvrdma_idx_ring_has_space()
103 const __u32 tail = atomic_read(&r->prod_tail); in pvrdma_idx_ring_has_data() local
106 if (pvrdma_idx_valid(tail, max_elems) && in pvrdma_idx_ring_has_data()
109 return tail != head; in pvrdma_idx_ring_has_data()
/linux/fs/
H A Dsplice.c201 unsigned int tail = pipe->tail; in splice_to_pipe() local
216 while (!pipe_full(head, tail, pipe->max_usage)) { in splice_to_pipe()
249 unsigned int tail = pipe->tail; in add_to_pipe() local
256 } else if (pipe_full(head, tail, pipe->max_usage)) { in add_to_pipe()
449 unsigned int tail = pipe->tail; in splice_from_pipe_feed() local
453 while (!pipe_empty(head, tail)) { in splice_from_pipe_feed()
454 struct pipe_buffer *buf = &pipe->bufs[tail & mask]; in splice_from_pipe_feed()
481 tail++; in splice_from_pipe_feed()
482 pipe->tail = tail; in splice_from_pipe_feed()
497 unsigned int tail = pipe->tail; in eat_empty_buffer() local
[all …]
/linux/fs/affs/
H A Dinode.c22 struct affs_tail *tail; in affs_iget() local
50 tail = AFFS_TAIL(sb, bh); in affs_iget()
51 prot = be32_to_cpu(tail->protect); in affs_iget()
76 id = be16_to_cpu(tail->uid); in affs_iget()
84 id = be16_to_cpu(tail->gid); in affs_iget()
92 switch (be32_to_cpu(tail->stype)) { in affs_iget()
98 if (be32_to_cpu(tail->stype) == ST_USERDIR || in affs_iget()
127 size = be32_to_cpu(tail->size); in affs_iget()
136 if (tail->link_chain) in affs_iget()
153 …ode_set_ctime(inode, (be32_to_cpu(tail->change.days) * 86400LL + be32_to_cpu(tail->change.mins) * … in affs_iget()
[all …]

12345678910>>...29