/linux/tools/lib/ |
H A D | list_sort.c | 117 * 2:1 balanced merges. Given two pending sublists of size 2^k, they are 127 * pending lists. This is beautifully simple code, but rather subtle. 135 * 2^k, which is when we have 2^k elements pending in smaller lists, 140 * a third list of size 2^(k+1), so there are never more than two pending. 142 * The number of pending lists of size 2^k is determined by the 151 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k 152 * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k 153 * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k 154 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k 155 * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k [all …]
|
/linux/fs/bcachefs/ |
H A D | rcu_pending.c | 187 static noinline void __process_finished_items(struct rcu_pending *pending, in __process_finished_items() argument 196 __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) { in __process_finished_items() 208 switch ((ulong) pending->process) { in __process_finished_items() 262 pending->process(pending, *genradix_ptr(&objs.objs, i)); in __process_finished_items() 272 pending->process(pending, obj); in __process_finished_items() 278 static bool process_finished_items(struct rcu_pending *pending, in process_finished_items() argument 287 if ((p->objs.nr && __poll_state_synchronize_rcu(pending->srcu, p->objs.data[0].seq)) || in process_finished_items() 288 (p->lists[0].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[0].seq)) || in process_finished_items() 289 (p->lists[1].head && __poll_state_synchronize_rcu(pending->srcu, p->lists[1].seq)) || in process_finished_items() 291 __process_finished_items(pending, p, flags); in process_finished_items() [all …]
|
/linux/tools/perf/pmu-events/arch/x86/snowridgex/ |
H A D | uncore-memory.json | 446 "BriefDescription": "Read Pending Queue Full Cycles", 452 …"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read P… 456 "BriefDescription": "Read Pending Queue Full Cycles", 462 …"PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read P… 466 "BriefDescription": "Read Pending Queue Not Empty", 472 …Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. T… 477 "BriefDescription": "Read Pending Queue Not Empty", 483 …Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. T… 488 "BriefDescription": "Read Pending Queue Allocations", 493 …"PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the R… [all …]
|
/linux/drivers/gpu/drm/mediatek/ |
H A D | mtk_plane.c | 48 state->pending.format = DRM_FORMAT_RGB565; in mtk_plane_reset() 49 state->pending.modifier = DRM_FORMAT_MOD_LINEAR; in mtk_plane_reset() 65 state->pending = old_state->pending; in mtk_plane_duplicate_state() 200 mtk_plane_state->pending.enable = true; in mtk_plane_update_new_state() 201 mtk_plane_state->pending.pitch = pitch; in mtk_plane_update_new_state() 202 mtk_plane_state->pending.hdr_pitch = hdr_pitch; in mtk_plane_update_new_state() 203 mtk_plane_state->pending.format = format; in mtk_plane_update_new_state() 204 mtk_plane_state->pending.modifier = modifier; in mtk_plane_update_new_state() 205 mtk_plane_state->pending.addr = addr; in mtk_plane_update_new_state() 206 mtk_plane_state->pending.hdr_addr = hdr_addr; in mtk_plane_update_new_state() [all …]
|
/linux/arch/mips/sni/ |
H A D | pcit.c | 183 u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG; in pcit_hwint1() local 187 irq = ffs((pending >> 16) & 0x7f); in pcit_hwint1() 196 u32 pending = *(volatile u32 *)SNI_PCIT_INT_REG; in pcit_hwint0() local 200 irq = ffs((pending >> 16) & 0x3f); in pcit_hwint0() 209 u32 pending = read_c0_cause() & read_c0_status(); in sni_pcit_hwint() local 211 if (pending & C_IRQ1) in sni_pcit_hwint() 213 else if (pending & C_IRQ2) in sni_pcit_hwint() 215 else if (pending & C_IRQ3) in sni_pcit_hwint() 217 else if (pending & C_IRQ5) in sni_pcit_hwint() 223 u32 pending = read_c0_cause() & read_c0_status(); in sni_pcit_hwint_cplus() local [all …]
|
/linux/drivers/platform/surface/aggregator/ |
H A D | ssh_request_layer.c | 47 * SSH_RTL_MAX_PENDING - Maximum number of pending requests. 132 spin_lock(&rtl->pending.lock); in ssh_rtl_pending_remove() 135 spin_unlock(&rtl->pending.lock); in ssh_rtl_pending_remove() 139 atomic_dec(&rtl->pending.count); in ssh_rtl_pending_remove() 142 spin_unlock(&rtl->pending.lock); in ssh_rtl_pending_remove() 151 spin_lock(&rtl->pending.lock); in ssh_rtl_tx_pending_push() 154 spin_unlock(&rtl->pending.lock); in ssh_rtl_tx_pending_push() 159 spin_unlock(&rtl->pending.lock); in ssh_rtl_tx_pending_push() 163 atomic_inc(&rtl->pending.count); in ssh_rtl_tx_pending_push() 164 list_add_tail(&ssh_request_get(rqst)->node, &rtl->pending.head); in ssh_rtl_tx_pending_push() [all …]
|
H A D | ssh_packet_layer.c | 46 * - NAK received (this is equivalent to issuing re-submit for all pending 49 * - cancel (non-pending and pending) 54 * containing all packets scheduled for transmission, and the set of pending 61 * - the pending set, 71 * sets-up the timeout and adds the packet to the pending set before starting 79 * reference to the packet from the pending set. The receiver thread will then 82 * On receival of a NAK, the receiver thread re-submits all currently pending 87 * checking all currently pending packets if their timeout has expired. If the 99 * packet cannot be added to the queue, the pending set, and the timeout, or 102 * references from the data structures (queue, pending) have to be removed. [all …]
|
H A D | ssh_request_layer.h | 52 * @pending: Set/list of pending requests. 53 * @pending.lock: Lock for modifying the request set. 54 * @pending.head: List-head of the pending set/list. 55 * @pending.count: Number of currently pending requests. 78 } pending; member 130 bool ssh_rtl_cancel(struct ssh_request *rqst, bool pending);
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_sw_fence.c | 149 atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */ in __i915_sw_fence_wake_up_all() 196 if (!atomic_dec_and_test(&fence->pending)) in __i915_sw_fence_complete() 224 int pending; in i915_sw_fence_await() local 230 pending = atomic_read(&fence->pending); in i915_sw_fence_await() 232 if (pending < 1) in i915_sw_fence_await() 234 } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1)); in i915_sw_fence_await() 257 atomic_set(&fence->pending, 1); in i915_sw_fence_reinit() 344 unsigned int pending; in __i915_sw_fence_await_sw_fence() local 361 pending = I915_SW_FENCE_FLAG_FENCE; in __i915_sw_fence_await_sw_fence() 373 pending |= I915_SW_FENCE_FLAG_ALLOC; in __i915_sw_fence_await_sw_fence() [all …]
|
/linux/tools/perf/pmu-events/arch/x86/icelakex/ |
H A D | uncore-memory.json | 407 …Queue Inserts : Counts number of read requests allocated in the PMM Read Pending Queue. This inc… 411 "BriefDescription": "PMM Read Pending Queue Occupancy", 416 …"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of th… 421 "BriefDescription": "PMM Read Pending Queue Occupancy", 427 …"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of th… 432 "BriefDescription": "PMM Read Pending Queue Occupancy", 438 …"PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of th… 484 … Write Queue Inserts : Counts number of write requests allocated in the PMM Write Pending Queue.", 488 "BriefDescription": "PMM Write Pending Queue Occupancy", 493 …"PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of t… [all …]
|
/linux/tools/testing/selftests/bpf/progs/ |
H A D | cgroup_hierarchical_stats.c | 20 /* State propagated through children, pending aggregation */ 21 __u64 pending; member 57 static int create_attach_counter(__u64 cg_id, __u64 state, __u64 pending) in create_attach_counter() 59 struct attach_counter init = {.state = state, .pending = pending}; in create_attach_counter() 109 /* Collect pending stats from subtree */ in BPF_PROG() 110 if (total_counter->pending) { in BPF_PROG() 111 delta += total_counter->pending; in BPF_PROG() 112 total_counter->pending = 0; in BPF_PROG() 127 parent_counter->pending in BPF_PROG() 56 create_attach_counter(__u64 cg_id,__u64 state,__u64 pending) create_attach_counter() argument [all...] |
/linux/Documentation/virt/kvm/devices/ |
H A D | xics.rst | 47 * Pending interrupt priority, 8 bits 48 Zero is the highest priority, 255 means no interrupt is pending. 50 * Pending IPI (inter-processor interrupt) priority, 8 bits 51 Zero is the highest priority, 255 means no IPI is pending. 53 * Pending interrupt source number, 24 bits 54 Zero means no interrupt pending, 2 means an IPI is pending 88 * Pending flag, 1 bit 90 This bit is 1 if the source has a pending interrupt, otherwise 0.
|
/linux/arch/mips/cobalt/ |
H A D | irq.c | 25 unsigned pending = read_c0_status() & read_c0_cause() & ST0_IM; in plat_irq_dispatch() local 28 if (pending & CAUSEF_IP2) in plat_irq_dispatch() 30 else if (pending & CAUSEF_IP6) { in plat_irq_dispatch() 36 } else if (pending & CAUSEF_IP3) in plat_irq_dispatch() 38 else if (pending & CAUSEF_IP4) in plat_irq_dispatch() 40 else if (pending & CAUSEF_IP5) in plat_irq_dispatch() 42 else if (pending & CAUSEF_IP7) in plat_irq_dispatch()
|
/linux/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_fw_update.c | 502 * ixgbe_get_pending_updates - Check if the component has a pending update 504 * @pending: on return, bitmap of updates pending 507 * Check if the device has any pending updates on any flash components. 510 * pending with the bitmap of pending updates. 512 int ixgbe_get_pending_updates(struct ixgbe_adapter *adapter, u8 *pending, in ixgbe_get_pending_updates() argument 531 *pending = 0; in ixgbe_get_pending_updates() 534 *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NVM; in ixgbe_get_pending_updates() 537 *pending |= IXGBE_ACI_NVM_ACTIV_SEL_OROM; in ixgbe_get_pending_updates() 540 *pending |= IXGBE_ACI_NVM_ACTIV_SEL_NETLIST; in ixgbe_get_pending_updates() 548 * ixgbe_cancel_pending_update - Cancel any pending update for a component [all …]
|
/linux/include/asm-generic/ |
H A D | qspinlock_types.h | 20 * pending bit, we can allow better optimization of the lock 21 * acquisition for the pending bit holder. 26 u8 pending; member 39 u8 pending; member 56 * 8: pending 63 * 8: pending 79 #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
|
/linux/net/vmw_vsock/ |
H A D | vmci_transport.c | 43 struct sock *pending, 484 struct sock *pending; in vmci_transport_get_pending() local 495 pending = sk_vsock(vpending); in vmci_transport_get_pending() 496 sock_hold(pending); in vmci_transport_get_pending() 501 pending = NULL; in vmci_transport_get_pending() 503 return pending; in vmci_transport_get_pending() 507 static void vmci_transport_release_pending(struct sock *pending) in vmci_transport_release_pending() argument 509 sock_put(pending); in vmci_transport_release_pending() 912 /* Processing of pending connections for servers goes through in vmci_transport_recv_pkt_work() 943 struct sock *pending; in vmci_transport_recv_listen() local [all …]
|
/linux/drivers/gpu/drm/qxl/ |
H A D | qxl_irq.c | 36 uint32_t pending; in qxl_irq_handler() local 38 pending = xchg(&qdev->ram_header->int_pending, 0); in qxl_irq_handler() 40 if (!pending) in qxl_irq_handler() 45 if (pending & QXL_INTERRUPT_DISPLAY) { in qxl_irq_handler() 50 if (pending & QXL_INTERRUPT_CURSOR) { in qxl_irq_handler() 54 if (pending & QXL_INTERRUPT_IO_CMD) { in qxl_irq_handler() 58 if (pending & QXL_INTERRUPT_ERROR) { in qxl_irq_handler() 66 if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { in qxl_irq_handler()
|
/linux/drivers/irqchip/ |
H A D | irq-loongson-htpic.c | 33 uint32_t pending; in htpic_irq_dispatch() local 36 pending = readl(priv->base); in htpic_irq_dispatch() 38 writel(pending, priv->base); in htpic_irq_dispatch() 40 if (!pending) in htpic_irq_dispatch() 43 while (pending) { in htpic_irq_dispatch() 44 int bit = __ffs(pending); in htpic_irq_dispatch() 52 pending &= ~BIT(bit); in htpic_irq_dispatch() 66 /* Ack all possible pending IRQs */ in htpic_reg_init()
|
H A D | irq-ath79-cpu.c | 36 unsigned long pending; in plat_irq_dispatch() local 39 pending = read_c0_status() & read_c0_cause() & ST0_IM; in plat_irq_dispatch() 41 if (!pending) { in plat_irq_dispatch() 46 pending >>= CAUSEB_IP; in plat_irq_dispatch() 47 while (pending) { in plat_irq_dispatch() 48 irq = fls(pending) - 1; in plat_irq_dispatch() 52 pending &= ~BIT(irq); in plat_irq_dispatch()
|
H A D | irq-keystone.c | 83 unsigned long pending; in keystone_irq_handler() local 88 pending = keystone_irq_readl(kirq); in keystone_irq_handler() 89 keystone_irq_writel(kirq, pending); in keystone_irq_handler() 91 dev_dbg(kirq->dev, "pending 0x%lx, mask 0x%x\n", pending, kirq->mask); in keystone_irq_handler() 93 pending = (pending >> BIT_OFS) & ~kirq->mask; in keystone_irq_handler() 95 dev_dbg(kirq->dev, "pending after mask 0x%lx\n", pending); in keystone_irq_handler() 98 if (BIT(src) & pending) { in keystone_irq_handler()
|
/linux/kernel/locking/ |
H A D | qspinlock.c | 48 * unlock the next pending (next->locked), we compress both these: {tail, 114 * (queue tail, pending bit, lock value) 121 * pending : (0,1,1) +--> (0,1,0) \ | : in encode_tail() 145 * Wait for in-progress pending->locked hand-overs with a bounded 163 * trylock || pending in clear_pending_set_locked() 165 * 0,0,* -> 0,1,* -> 0,0,1 pending, trylock in clear_pending_set_locked() 172 * Undo and queue; our setting of PENDING might have made the 178 /* Undo PENDING if we set it. */ in xchg_tail() 186 * We're pending, wait for the owner to go away. 200 * take ownership and clear the pending bi [all...] |
/linux/drivers/net/ethernet/broadcom/bnx2x/ |
H A D | bnx2x_sp.h | 31 /* Wait until all pending commands complete */ 41 * pending commands list. 44 /* If there is another pending ramrod, wait until it finishes and 88 int state; /* "ramrod is pending" state bit */ 193 /* Commands pending for an execution. */ 196 /* Commands pending for an completion. */ 216 * Called before removing pending commands, cleaning allocated 222 * This will try to cancel the current pending commands list 303 bool head_exe_request; /* Pending execution request. */ 304 unsigned long saved_ramrod_flags; /* Ramrods of pending execution */ [all …]
|
/linux/kernel/irq/ |
H A D | irq_sim.c | 17 unsigned long *pending; member 62 *state = test_bit(hwirq, irq_ctx->work_ctx->pending); in irq_sim_get_irqchip_state() 80 assign_bit(hwirq, irq_ctx->work_ctx->pending, state); in irq_sim_set_irqchip_state() 136 while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) { in irq_sim_handle_irq() 137 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq() 139 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq() 210 unsigned long *pending __free(bitmap) = bitmap_zalloc(num_irqs, GFP_KERNEL); in irq_domain_create_sim_full() 211 if (!pending) in irq_domain_create_sim_full() 222 work_ctx->pending = no_free_ptr(pending); in irq_domain_create_sim_full() 243 bitmap_free(work_ctx->pending); in irq_domain_remove_sim()
|
H A D | migration.c | 9 * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU 11 * @force_clear: If set clear the move pending bit unconditionally. 13 * last one in the pending mask. 15 * Returns true if the pending bit was set and the pending mask contains an 26 * The outgoing CPU might be the last online target in a pending in irq_fixup_move_pending() 27 * interrupt move. If that's the case clear the pending move bit. in irq_fixup_move_pending() 92 * If the there is a cleanup pending in the underlying in irq_move_masked_irq()
|
/linux/arch/mips/loongson32/common/ |
H A D | irq.c | 119 /* Get pending sources, masked by current enables */ in ls1x_irq_dispatch() 131 unsigned int pending; in plat_irq_dispatch() local 133 pending = read_c0_cause() & read_c0_status() & ST0_IM; in plat_irq_dispatch() 135 if (pending & CAUSEF_IP7) in plat_irq_dispatch() 137 else if (pending & CAUSEF_IP2) in plat_irq_dispatch() 139 else if (pending & CAUSEF_IP3) in plat_irq_dispatch() 141 else if (pending & CAUSEF_IP4) in plat_irq_dispatch() 143 else if (pending & CAUSEF_IP5) in plat_irq_dispatch() 145 else if (pending & CAUSEF_IP6) in plat_irq_dispatch() 156 /* Disable interrupts and clear pending, in ls1x_irq_init()
|