/linux/include/trace/events/ |
H A D | scmi.h | 39 bool poll, int inflight), 40 TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll, inflight), 48 __field(int, inflight) 57 __entry->inflight = inflight; 60 TP_printk("pt=%02X msg_id=%02X seq=%04X transfer_id=%X poll=%u inflight=%d", 62 __entry->transfer_id, __entry->poll, __entry->inflight) 95 int status, int inflight), 96 TP_ARGS(transfer_id, msg_id, protocol_id, seq, status, inflight), 104 __field(int, inflight) 113 __entry->inflight = inflight; [all …]
|
H A D | wbt.h | 127 * @inflight: tracked writes inflight 132 int step, unsigned int inflight), 134 TP_ARGS(bdi, status, step, inflight), 140 __field(unsigned int, inflight) 148 __entry->inflight = inflight; 151 TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name, 152 __entry->status, __entry->step, __entry->inflight)
|
H A D | page_pool.h | 17 s32 inflight, u32 hold, u32 release), 19 TP_ARGS(pool, inflight, hold, release), 23 __field(s32, inflight) 31 __entry->inflight = inflight; 37 TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu", 38 __entry->pool, __entry->inflight, __entry->hold,
|
/linux/net/unix/ |
H A D | garbage.c | 129 /* If the receiver socket is not inflight, no cyclic in unix_update_graph() 209 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges() local 212 if (!inflight) in unix_add_edges() 216 edge->predecessor = inflight; in unix_add_edges() 229 fpl->inflight = true; in unix_add_edges() 260 fpl->inflight = false; in unix_del_edges() 267 * inflight graph, and GC will not see it, so no lock needed. in unix_update_edges() 309 if (fpl->inflight) in unix_destroy_fpl() 325 /* The vertex's fd can be received by a non-inflight socket. */ in unix_vertex_dead() 329 /* The vertex's fd can be received by an inflight socket in in unix_vertex_dead() [all …]
|
/linux/drivers/crypto/chelsio/ |
H A D | chcr_core.c | 56 if (atomic_read(&dev->inflight)) { in detach_work_fn() 59 pr_debug("Request Inflight Count %d\n", in detach_work_fn() 60 atomic_read(&dev->inflight)); in detach_work_fn() 65 atomic_read(&dev->inflight)); in detach_work_fn() 103 atomic_set(&dev->inflight, 0); in chcr_dev_add() 122 atomic_set(&dev->inflight, 0); in chcr_dev_init() 232 if (atomic_read(&dev->inflight) != 0) { in chcr_detach_device()
|
/linux/net/core/ |
H A D | page_pool.c | 641 s32 inflight; in page_pool_inflight() local 643 inflight = _distance(hold_cnt, release_cnt); in page_pool_inflight() 646 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight() 647 WARN(inflight < 0, "Negative(%d) inflight packet-pages", in page_pool_inflight() 648 inflight); in page_pool_inflight() 650 inflight = max(0, inflight); in page_pool_inflight() 653 return inflight; in page_pool_inflight() 686 /* Always account for inflight pages, even if we didn't in __page_pool_release_netmem_dma() 1153 int inflight; in page_pool_release() local 1156 inflight = page_pool_inflight(pool, true); in page_pool_release() [all …]
|
H A D | page_pool_user.c | 219 size_t inflight, refsz; in page_pool_nl_fill() local 240 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill() 242 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || in page_pool_nl_fill() 244 inflight * refsz)) in page_pool_nl_fill()
|
/linux/block/ |
H A D | genhd.c | 129 unsigned int inflight[2], bool mq_driver) in bdev_count_inflight_rw() 136 blk_mq_in_driver_rw(part, inflight); in bdev_count_inflight_rw() 148 * causing the inflight number to be negative. in bdev_count_inflight_rw() 150 inflight[READ] = read > 0 ? read : 0; in bdev_count_inflight_rw() 151 inflight[WRITE] = write > 0 ? write : 0; in bdev_count_inflight_rw() 155 * bdev_count_inflight - get the number of inflight IOs for a block device. 159 * Inflight here means started IO accounting, from bdev_start_io_acct() for 165 unsigned int inflight[2] = {0}; in bdev_count_inflight() local 167 bdev_count_inflight_rw(part, inflight, false); in bdev_count_inflight() 169 return inflight[READ] + inflight[WRITE]; in bdev_count_inflight() [all …]
|
H A D | blk-wbt.c | 199 int inflight, limit; in wbt_rqw_done() local 201 inflight = atomic_dec_return(&rqw->inflight); in wbt_rqw_done() 219 if (inflight && inflight >= limit) in wbt_rqw_done() 223 int diff = limit - inflight; in wbt_rqw_done() 225 if (!inflight || diff >= rwb->wb_background / 2) in wbt_rqw_done() 293 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight() 427 unsigned int inflight = wbt_inflight(rwb); in wb_timer_fn() local 435 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight); in wb_timer_fn() 477 if (rqd->scale_step || inflight) in wb_timer_fn() 825 seq_printf(m, "%d: inflight %d\n", i, in wbt_inflight_show() [all …]
|
/linux/net/ipv4/ |
H A D | tcp_bbr.c | 42 * it briefly enters PROBE_RTT to cut inflight to a minimum value to re-probe 85 BBR_PROBE_RTT, /* cut inflight to min to probe min_rtt */ 405 /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ in bbr_quantization_budget() 412 /* Find inflight based on min RTT and the estimated bottleneck bandwidth. */ 415 u32 inflight; in bbr_inflight() local 417 inflight = bbr_bdp(sk, bw, gain); in bbr_inflight() 418 inflight = bbr_quantization_budget(sk, inflight); in bbr_inflight() 420 return inflight; in bbr_inflight() 427 * inflight level that it estimates has already been "baked in" by previous 432 * If we're increasing inflight, then we want to know if the transmit of the [all …]
|
/linux/tools/testing/selftests/net/af_unix/ |
H A D | scm_rights.c | 270 int inflight, int receiver) in TEST_F() 284 self->fd[inflight * 2], in TEST_F() 285 self->fd[inflight * 2], in TEST_F() 316 #define send_fd(inflight, receiver) \ 317 __send_fd(_metadata, self, variant, inflight, receiver) 200 __send_fd(struct __test_metadata * _metadata,const FIXTURE_DATA (scm_rights)* self,const FIXTURE_VARIANT (scm_rights)* variant,int inflight,int receiver) __send_fd() argument 240 send_fd(inflight,receiver) global() argument
|
/linux/drivers/vhost/ |
H A D | scsi.c | 107 /* Refcount for the inflight reqs */ 143 /* Used to track inflight cmd */ 144 struct vhost_scsi_inflight *inflight; member 217 * Reference counting for inflight reqs, used for flush operation. At 223 * Indicate current inflight in use, protected by vq->mutex. 262 struct vhost_scsi_inflight *inflight; member 296 struct vhost_scsi_inflight *inflight; in vhost_scsi_done_inflight() local 298 inflight = container_of(kref, struct vhost_scsi_inflight, kref); in vhost_scsi_done_inflight() 299 complete(&inflight->comp); in vhost_scsi_done_inflight() 314 /* store old inflight */ in vhost_scsi_init_inflight() [all …]
|
/linux/net/atm/ |
H A D | pppoatm.c | 64 atomic_t inflight; member 75 * inflight == -2 represents an empty queue, -1 one packet, and zero means 139 atomic_dec(&pvcc->inflight); in pppoatm_pop() 244 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 250 * *must* be set before we do the atomic_inc() on pvcc->inflight. in pppoatm_may_send() 274 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 406 atomic_set(&pvcc->inflight, NONE_INFLIGHT); in pppoatm_assign_vcc()
|
/linux/drivers/infiniband/ulp/rtrs/ |
H A D | README | 100 corresponding path is disconnected, all the inflight IO are failed over to a 131 inflight IO and for the error code. 149 inflight IO and for the error code. The new rkey is sent back using 171 outstanding inflight IO and the error code. 192 outstanding inflight IO and the error code. The new rkey is sent back using
|
H A D | rtrs-clt-stats.c | 102 atomic_read(&stats->inflight), sum.failover_cnt); in rtrs_clt_stats_rdma_to_str() 158 atomic_set(&s->inflight, 0); in rtrs_clt_reset_all_stats() 182 atomic_inc(&stats->inflight); in rtrs_clt_update_all_stats()
|
/linux/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/ |
H A D | instruction.json | 105 …scription": "Prefetch response received but was dropped since we don't support inflight upgrades.", 108 …escription": "Prefetch response received but was dropped since we don't support inflight upgrades."
|
/linux/drivers/gpu/drm/i915/gt/ |
H A D | intel_execlists_submission.c | 336 * If the inflight context did not trigger the preemption, then maybe in need_preempt() 529 old = ce->inflight; in execlists_schedule_in() 532 WRITE_ONCE(ce->inflight, ptr_inc(old)); in execlists_schedule_in() 558 * before we clear ce->inflight make sure that the context has been in kick_siblings() 593 GEM_BUG_ON(ce->inflight != engine); in __execlists_schedule_out() 638 WRITE_ONCE(ce->inflight, NULL); in __execlists_schedule_out() 648 GEM_BUG_ON(!ce->inflight); in execlists_schedule_out() 649 ce->inflight = ptr_dec(ce->inflight); in execlists_schedule_out() 650 if (!__intel_context_inflight_count(ce->inflight)) in execlists_schedule_out() 991 const struct intel_engine_cs *inflight; in virtual_matches() local [all …]
|
H A D | intel_timeline.c | 423 unsigned long count, ready, inflight; in intel_gt_show_timelines() local 440 inflight = 0; in intel_gt_show_timelines() 449 inflight++; in intel_gt_show_timelines() 453 drm_printf(m, "count: %lu, ready: %lu, inflight: %lu", in intel_gt_show_timelines() 454 count, ready, inflight); in intel_gt_show_timelines()
|
H A D | intel_context_types.h | 91 struct intel_engine_cs *inflight; member 95 __intel_context_inflight(READ_ONCE((ce)->inflight)) 97 __intel_context_inflight_count(READ_ONCE((ce)->inflight))
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_scheduler_types.h | 118 * @requests: list of requests inflight on this schedule engine 144 * However, since the we may have recorded the priority of an inflight 183 * @bump_inflight_request_prio: update priority of an inflight request
|
/linux/drivers/crypto/cavium/cpt/ |
H A D | cpt_hw_types.h | 239 * when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]). 430 * inflight:8 [7:0](RO/H) Inflight count. Counts the number of instructions 436 * 2. [INFLIGHT] is polled until equals to zero. 443 u64 inflight:8; member 445 u64 inflight:8; 634 * clearing [ENA]) only when quiescent (see CPT()_VQ()_INPROG[INFLIGHT]). 639 * See also CPT()_PF_Q()_CTL[CONT_ERR] and CPT()_VQ()_INPROG[INFLIGHT].
|
/linux/include/linux/ |
H A D | bpf_mprog.h | 33 * // ensure there are no inflight users of @entry: 57 * // ensure there are no inflight users of @entry: 108 * assumes that for the old bpf_mprog_entry there are no inflight users 227 * bpf_mprog ensured that there are no inflight users anymore. in bpf_mprog_complete_release()
|
/linux/Documentation/devicetree/bindings/net/dsa/ |
H A D | brcm,sf2.yaml | 74 brcm,acb-packets-inflight: 156 brcm,acb-packets-inflight;
|
/linux/Documentation/netlink/specs/ |
H A D | netdev.yaml | 160 name: inflight 168 name: inflight-mem 171 Amount of memory held by inflight pages. 614 - inflight 615 - inflight-mem
|
/linux/tools/testing/selftests/net/ |
H A D | nl_netdev.py | 198 refs = sum([pp["inflight"] for pp in pp_list]) 210 refs = sum([pp["inflight"] for pp in pp_list if pp.get("ifindex") == nsim.ifindex]) 217 refs = sum([pp["inflight"] for pp in pp_list])
|