/linux/drivers/gpu/drm/vc4/ |
H A D | vc4_trace.h | 18 TP_PROTO(struct drm_device *dev, uint64_t seqno, uint64_t timeout), 19 TP_ARGS(dev, seqno, timeout), 23 __field(u64, seqno) 29 __entry->seqno = seqno; 34 __entry->dev, __entry->seqno, __entry->timeout) 38 TP_PROTO(struct drm_device *dev, uint64_t seqno), 39 TP_ARGS(dev, seqno), 43 __field(u64, seqno) 48 __entry->seqno = seqno; 52 __entry->dev, __entry->seqno) [all …]
|
H A D | vc4_gem.c | 384 vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, in vc4_wait_for_seqno() argument 395 if (vc4->finished_seqno >= seqno) in vc4_wait_for_seqno() 403 trace_vc4_wait_for_seqno_begin(dev, seqno, timeout_ns); in vc4_wait_for_seqno() 414 if (vc4->finished_seqno >= seqno) in vc4_wait_for_seqno() 429 trace_vc4_wait_for_seqno_end(dev, seqno); in vc4_wait_for_seqno() 497 trace_vc4_submit_cl(dev, false, exec->seqno, exec->ct0ca, in vc4_submit_next_bin_job() 536 trace_vc4_submit_cl(dev, true, exec->seqno, exec->ct1ca, exec->ct1ea); in vc4_submit_next_render_job() 555 vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno) in vc4_update_bo_seqnos() argument 562 bo->seqno = seqno; in vc4_update_bo_seqnos() 569 bo->seqno = seqno; in vc4_update_bo_seqnos() [all …]
|
/linux/drivers/media/pci/saa7164/ |
H A D | saa7164-cmd.c | 22 ret = dev->cmds[i].seqno; in saa7164_cmd_alloc_seqno() 31 static void saa7164_cmd_free_seqno(struct saa7164_dev *dev, u8 seqno) in saa7164_cmd_free_seqno() argument 34 if ((dev->cmds[seqno].inuse == 1) && in saa7164_cmd_free_seqno() 35 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_free_seqno() 36 dev->cmds[seqno].inuse = 0; in saa7164_cmd_free_seqno() 37 dev->cmds[seqno].signalled = 0; in saa7164_cmd_free_seqno() 38 dev->cmds[seqno].timeout = 0; in saa7164_cmd_free_seqno() 43 static void saa7164_cmd_timeout_seqno(struct saa7164_dev *dev, u8 seqno) in saa7164_cmd_timeout_seqno() argument 46 if ((dev->cmds[seqno].inuse == 1) && in saa7164_cmd_timeout_seqno() 47 (dev->cmds[seqno].seqno == seqno)) { in saa7164_cmd_timeout_seqno() [all …]
|
/linux/drivers/gpu/drm/v3d/ |
H A D | v3d_trace.h | 39 uint64_t seqno, 41 TP_ARGS(dev, is_render, seqno, ctnqba, ctnqea), 46 __field(u64, seqno) 54 __entry->seqno = seqno; 62 __entry->seqno, 69 uint64_t seqno), 70 TP_ARGS(dev, seqno), 74 __field(u64, seqno) 79 __entry->seqno = seqno; 84 __entry->seqno) [all …]
|
/linux/drivers/dma-buf/ |
H A D | dma-fence-chain.c | 90 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno) in dma_fence_chain_find_seqno() argument 94 if (!seqno) in dma_fence_chain_find_seqno() 98 if (!chain || chain->base.seqno < seqno) in dma_fence_chain_find_seqno() 103 to_dma_fence_chain(*pfence)->prev_seqno < seqno) in dma_fence_chain_find_seqno() 244 uint64_t seqno) in dma_fence_chain_init() argument 255 if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { in dma_fence_chain_init() 257 chain->prev_seqno = prev->seqno; in dma_fence_chain_init() 262 seqno = max(prev->seqno, seqno); in dma_fence_chain_init() 266 &chain->lock, context, seqno); in dma_fence_chain_init()
|
H A D | st-dma-fence-chain.c | 63 u64 seqno) in mock_chain() argument 72 seqno); in mock_chain() 281 fence->seqno); in find_signaled() 326 fence ? fence->seqno : 0); in find_out_of_order() 363 fence->seqno, in find_gap() 401 int seqno; in __find_race() local 403 seqno = get_random_u32_inclusive(1, data->fc.chain_length); in __find_race() 405 err = dma_fence_chain_find_seqno(&fence, seqno); in __find_race() 408 seqno); in __find_race() 419 if (fence->seqno == seqno) { in __find_race() [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
H A D | i915_syncmap.c | 146 static int check_seqno(struct i915_syncmap *leaf, unsigned int idx, u32 seqno) in check_seqno() argument 154 if (__sync_seqno(leaf)[idx] != seqno) { in check_seqno() 156 __func__, idx, __sync_seqno(leaf)[idx], seqno); in check_seqno() 163 static int check_one(struct i915_syncmap **sync, u64 context, u32 seqno) in check_one() argument 167 err = i915_syncmap_set(sync, context, seqno); in check_one() 189 err = check_seqno((*sync), ilog2((*sync)->bitmap), seqno); in check_one() 193 if (!i915_syncmap_is_later(sync, context, seqno)) { in check_one() 195 context, seqno); in check_one() 238 static int check_leaf(struct i915_syncmap **sync, u64 context, u32 seqno) in check_leaf() argument 242 err = i915_syncmap_set(sync, context, seqno); in check_leaf() [all …]
|
H A D | igt_spinner.c | 89 if (!spin->seqno) { in igt_spinner_pin() 94 spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); in igt_spinner_pin() 178 *batch++ = rq->fence.seqno; in igt_spinner_create_request() 222 u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); in hws_seqno() local 224 return READ_ONCE(*seqno); in hws_seqno() 246 if (spin->seqno) { in igt_spinner_fini() 259 rq->fence.seqno), in igt_wait_for_spinner() 262 rq->fence.seqno), in igt_wait_for_spinner()
|
/linux/drivers/gpu/drm/radeon/ |
H A D | radeon_trace.h | 127 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 129 TP_ARGS(dev, ring, seqno), 134 __field(u32, seqno) 140 __entry->seqno = seqno; 144 __entry->dev, __entry->ring, __entry->seqno) 149 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 151 TP_ARGS(dev, ring, seqno) 156 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 158 TP_ARGS(dev, ring, seqno) 163 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), [all …]
|
/linux/include/trace/events/ |
H A D | rpcgss.h | 322 __field(u32, seqno) 329 __entry->seqno = gc->gc_seq; 334 __entry->xid, __entry->seqno) 403 __field(u32, seqno) 412 __entry->seqno = rqst->rq_seqno; 417 __entry->xid, __entry->seqno) 434 __field(u32, seqno) 443 __entry->seqno = task->tk_rqstp->rq_seqno; 450 __entry->xid, __entry->seqno, __entry->seq_xmit, 492 u32 seqno [all …]
|
H A D | dma_fence.h | 22 __field(unsigned int, seqno) 29 __entry->seqno = fence->seqno; 32 TP_printk("driver=%s timeline=%s context=%u seqno=%u", 34 __entry->seqno)
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | selftest_timeline.c | 199 u32 seqno; member 211 if (__intel_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) { in __igt_sync() 213 name, p->name, ctx, p->seqno, str_yes_no(p->expected)); in __igt_sync() 218 ret = __intel_timeline_sync_set(tl, ctx, p->seqno); in __igt_sync() 396 u32 seqno = prandom_u32_state(&prng); in bench_sync() local 398 if (!__intel_timeline_sync_is_later(&tl, id, seqno)) in bench_sync() 399 __intel_timeline_sync_set(&tl, id, seqno); in bench_sync() 496 if (READ_ONCE(*tl->hwsp_seqno) != tl->seqno) { in checked_tl_write() 498 *tl->hwsp_seqno, tl->seqno); in checked_tl_write() 697 u32 seqno[2]; in live_hwsp_wrap() local [all …]
|
H A D | intel_timeline.h | 46 u64 context, u32 seqno) in __intel_timeline_sync_set() argument 48 return i915_syncmap_set(&tl->sync, context, seqno); in __intel_timeline_sync_set() 54 return __intel_timeline_sync_set(tl, fence->context, fence->seqno); in intel_timeline_sync_set() 58 u64 context, u32 seqno) in __intel_timeline_sync_is_later() argument 60 return i915_syncmap_is_later(&tl->sync, context, seqno); in __intel_timeline_sync_is_later() 66 return __intel_timeline_sync_is_later(tl, fence->context, fence->seqno); in intel_timeline_sync_is_later() 74 u32 *seqno);
|
H A D | intel_tlb.c | 113 static bool tlb_seqno_passed(const struct intel_gt *gt, u32 seqno) in tlb_seqno_passed() argument 118 return (s32)(cur - ALIGN(seqno, 2)) > 0; in tlb_seqno_passed() 121 void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno) in intel_gt_invalidate_tlb_full() argument 131 if (tlb_seqno_passed(gt, seqno)) in intel_gt_invalidate_tlb_full() 138 if (tlb_seqno_passed(gt, seqno)) in intel_gt_invalidate_tlb_full() 154 write_seqcount_invalidate(>->tlb.seqno); in intel_gt_invalidate_tlb_full() 163 seqcount_mutex_init(>->tlb.seqno, >->tlb.invalidate_lock); in intel_gt_init_tlb()
|
H A D | intel_timeline.c | 230 WRITE_ONCE(*hwsp_seqno, tl->seqno); in intel_timeline_reset_seqno() 302 GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb); in timeline_advance() 304 return tl->seqno += 1 + tl->has_initial_breadcrumb; in timeline_advance() 309 u32 *seqno) in __intel_timeline_get_seqno() argument 321 *seqno = timeline_advance(tl); in __intel_timeline_get_seqno() 322 GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno)); in __intel_timeline_get_seqno() 328 u32 *seqno) in intel_timeline_get_seqno() argument 330 *seqno = timeline_advance(tl); in intel_timeline_get_seqno() 333 if (unlikely(!*seqno && tl->has_initial_breadcrumb)) in intel_timeline_get_seqno() 334 return __intel_timeline_get_seqno(tl, seqno); in intel_timeline_get_seqno() [all …]
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_syncmap.c | 79 DECLARE_FLEX_ARRAY(u32, seqno); 99 return p->seqno; in __sync_seqno() 151 bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_is_later() argument 192 return seqno_later(__sync_seqno(p)[idx], seqno); in i915_syncmap_is_later() 200 p = kmalloc(struct_size(p, seqno, KSYNCMAP), GFP_KERNEL); in __sync_alloc_leaf() 211 static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno) in __sync_set_seqno() argument 216 __sync_seqno(p)[idx] = seqno; in __sync_set_seqno() 227 static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno) in __sync_set() argument 332 __sync_set_seqno(p, id, seqno); in __sync_set() 350 int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno) in i915_syncmap_set() argument [all …]
|
H A D | i915_trace.h | 275 __field(u32, seqno) 284 __entry->seqno = rq->fence.seqno; 290 __entry->ctx, __entry->seqno, __entry->flags) 302 __field(u32, seqno) 311 __entry->seqno = rq->fence.seqno; 317 __entry->ctx, __entry->seqno, __entry->tail) 350 __field(u32, seqno) 360 __entry->seqno = rq->fence.seqno; 367 __entry->ctx, __entry->seqno, 380 __field(u32, seqno) [all …]
|
/linux/net/dccp/ |
H A D | ackvec.c | 51 int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum) in dccp_ackvec_update_records() argument 59 avr->avr_ack_seqno = seqno; in dccp_ackvec_update_records() 132 u64 seqno, enum dccp_ackvec_states state) in dccp_ackvec_update_old() argument 163 (unsigned long long)seqno, state); in dccp_ackvec_update_old() 197 u64 seqno, enum dccp_ackvec_states state) in dccp_ackvec_add_new() argument 237 av->av_buf_ackno = seqno; in dccp_ackvec_add_new() 250 u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq; in dccp_ackvec_input() local 254 dccp_ackvec_add_new(av, 1, seqno, state); in dccp_ackvec_input() 255 av->av_tail_ackno = seqno; in dccp_ackvec_input() 258 s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno); in dccp_ackvec_input() [all …]
|
/linux/drivers/net/wireless/mediatek/mt76/ |
H A D | agg-rx.c | 84 mt76_rx_aggr_release_frames(tid, frames, status->seqno); in mt76_rx_aggr_check_release() 126 u16 seqno; in mt76_rx_aggr_check_ctl() local 135 seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); in mt76_rx_aggr_check_ctl() 142 mt76_rx_aggr_release_frames(tid, frames, seqno); in mt76_rx_aggr_check_ctl() 155 u16 seqno, head, size, idx; in mt76_rx_aggr_reorder() local 187 seqno = status->seqno; in mt76_rx_aggr_reorder() 189 sn_less = ieee80211_sn_less(seqno, head); in mt76_rx_aggr_reorder() 204 if (seqno == head) { in mt76_rx_aggr_reorder() 217 if (!ieee80211_sn_less(seqno, head + size)) { in mt76_rx_aggr_reorder() 218 head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size)); in mt76_rx_aggr_reorder() [all …]
|
/linux/drivers/gpu/drm/vmwgfx/ |
H A D | vmwgfx_irq.c | 120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) in vmw_fifo_idle() argument 128 uint32_t seqno = vmw_fence_read(dev_priv); in vmw_update_seqno() local 130 if (dev_priv->last_read_seqno != seqno) { in vmw_update_seqno() 131 dev_priv->last_read_seqno = seqno; in vmw_update_seqno() 137 uint32_t seqno) in vmw_seqno_passed() argument 141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed() 145 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed() 148 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno)) in vmw_seqno_passed() 156 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) in vmw_seqno_passed() 165 uint32_t seqno, in vmw_fallback_wait() argument [all …]
|
/linux/net/dccp/ccids/lib/ |
H A D | packet_history.h | 38 u64 seqno; member 43 tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno) in tfrc_tx_hist_find_entry() argument 45 while (head != NULL && head->seqno != seqno) in tfrc_tx_hist_find_entry() 50 int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gpu_trace.h | 40 __field(u32, seqno) 47 __entry->seqno = submit->seqno; 51 __entry->id, __entry->pid, __entry->ringid, __entry->seqno, 64 __field(u32, seqno) 74 __entry->seqno = submit->seqno; 81 __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
|
/linux/include/uapi/linux/ |
H A D | batadv_packet.h | 221 __be32 seqno; member 247 __be32 seqno; member 267 __be32 seqno; member 320 __be16 seqno; member 350 __be32 seqno; member 388 __be16 seqno; member 471 __be16 seqno; member 489 __be32 seqno; member
|
/linux/net/batman-adv/ |
H A D | fragmentation.c | 107 u16 seqno) in batadv_frag_init_chain() argument 111 if (chain->seqno == seqno) in batadv_frag_init_chain() 118 chain->seqno = seqno; in batadv_frag_init_chain() 145 u16 seqno, hdr_size = sizeof(struct batadv_frag_packet); in batadv_frag_insert_packet() local 156 seqno = ntohs(frag_packet->seqno); in batadv_frag_insert_packet() 157 bucket = seqno % BATADV_FRAG_BUFFER_COUNT; in batadv_frag_insert_packet() 172 if (batadv_frag_init_chain(chain, seqno)) { in batadv_frag_insert_packet() 487 frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno)); in batadv_frag_send_packet()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_ring_ops.c | 242 u64 batch_addr, u32 seqno) in __emit_job_gen12_simple() argument 253 seqno, true, dw, i); in __emit_job_gen12_simple() 257 seqno, dw, i); in __emit_job_gen12_simple() 269 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); in __emit_job_gen12_simple() 293 u64 batch_addr, u32 seqno) in __emit_job_gen12_video() argument 315 seqno, true, dw, i); in __emit_job_gen12_video() 321 seqno, dw, i); in __emit_job_gen12_video() 332 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); in __emit_job_gen12_video() 343 u64 batch_addr, u32 seqno) in __emit_job_gen12_render_compute() argument 370 seqno, dw, i); in __emit_job_gen12_render_compute() [all …]
|