Home
last modified time | relevance | path

Searched full:deliver (Results 1 – 25 of 365) sorted by relevance

12345678910>>...15

/linux/drivers/iommu/iommufd/
H A Deventq.c33 list_for_each_entry_safe(group, next, &fault->common.deliver, node) { in iommufd_auto_response_faults()
70 list_for_each_entry_safe(group, next, &fault->common.deliver, node) { in iommufd_fault_destroy()
99 /* Fetch the first node out of the fault->deliver list */
103 struct list_head *list = &fault->common.deliver; in iommufd_fault_deliver_fetch()
115 /* Restore a node back to the head of the fault->deliver list */
120 list_add(&group->node, &fault->common.deliver); in iommufd_fault_deliver_restore()
231 list_for_each_entry_safe(cur, next, &eventq->deliver, node) { in iommufd_veventq_abort()
255 struct list_head *list = &eventq->deliver; in iommufd_veventq_deliver_fetch()
284 struct list_head *list = &eventq->deliver; in iommufd_veventq_deliver_restore()
365 if (!list_empty(&eventq->deliver)) in iommufd_eventq_fops_poll()
[all …]
H A Diommufd_private.h552 spinlock_t lock; /* protects the deliver list */
553 struct list_head deliver; member
567 * An iommufd_fault object represents an interface to deliver I/O page faults
600 struct list_head node; /* for iommufd_eventq::deliver */
609 * An iommufd_veventq object represents an interface to deliver vIOMMU events to
656 if (list_is_last(&veventq->lost_events_header.node, &eventq->deliver)) in iommufd_vevent_handler()
658 list_add_tail(&vevent->node, &eventq->deliver); in iommufd_vevent_handler()
/linux/fs/afs/
H A Dyfsclient.c328 * Deliver reply data to operations that just return a file status and a volume
350 * Deliver reply data to an YFS.FetchData64.
460 .deliver = yfs_deliver_fs_fetch_data64,
508 * Deliver reply data for YFS.CreateFile or YFS.MakeDir.
542 .deliver = yfs_deliver_fs_create_vnode,
593 .deliver = yfs_deliver_fs_create_vnode,
640 * Deliver reply data to a YFS.RemoveFile2 operation.
683 .deliver = yfs_deliver_fs_remove_file2,
726 * Deliver reply data to a YFS.RemoveFile or YFS.RemoveDir operation.
753 .deliver = yfs_deliver_fs_remove,
[all …]
H A Dvlclient.c15 * Deliver reply data to a VL.GetEntryByNameU call.
102 .deliver = afs_deliver_vl_get_entry_by_name_u,
165 * Deliver reply data to a VL.GetAddrsU call.
250 .deliver = afs_deliver_vl_get_addrs_u,
315 * Deliver reply data to an VL.GetCapabilities operation.
372 .deliver = afs_deliver_vl_get_capabilities,
423 * Deliver reply data to a YFSVL.GetEndpoints call.
626 .deliver = afs_deliver_yfsvl_get_endpoints,
678 * Deliver reply data to a YFSVL.GetCellName operation.
745 .deliver = afs_deliver_yfsvl_get_cell_name,
/linux/arch/x86/kvm/
H A Dxen.c692 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_inject_pending_events()
1858 * and prod the vCPU to deliver it for itself. in kvm_xen_set_evtchn_fast()
1880 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_set_evtchn_fast()
2058 } deliver; member
2088 if (!evtchnfd->deliver.port.port || in kvm_xen_eventfd_update()
2089 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) in kvm_xen_eventfd_update()
2093 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_update()
2096 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_update()
2097 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { in kvm_xen_eventfd_update()
2098 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_update()
[all …]
/linux/tools/testing/selftests/kvm/x86/
H A Dxen_shinfo_test.c230 /* Attempt to deliver a *masked* interrupt */ in guest_code()
238 /* Now deliver an *unmasked* interrupt */ in guest_code()
243 /* Change memslots and deliver an interrupt */ in guest_code()
248 /* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */ in guest_code()
255 /* Our turn. Deliver event channel (to ourselves) with in guest_code()
274 /* Deliver "outbound" event channel to an eventfd which in guest_code()
590 .u.evtchn.deliver.port.port = EVTCHN_TEST1, in main()
591 .u.evtchn.deliver.port.vcpu = vcpu->id + 1, in main()
592 .u.evtchn.deliver.port.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL, in main()
598 inj.u.evtchn.deliver.port.vcpu = vcpu->id; in main()
[all …]
/linux/drivers/irqchip/
H A Dirq-csky-mpintc.c143 * The csky,mpintc could support auto irq deliver, but it only in csky_irq_set_affinity()
144 * could deliver external irq to one cpu or all cpus. So it in csky_irq_set_affinity()
145 * doesn't support deliver external irq to a group of cpus in csky_irq_set_affinity()
147 * SO we only use auto deliver mode when affinity mask_val is in csky_irq_set_affinity()
/linux/net/rxrpc/
H A Drxperf.c66 int (*deliver)(struct rxperf_call *call);
160 call->deliver = rxperf_deliver_param_block; in rxperf_charge_preallocation()
278 * deliver messages to a call in rxperf_deliver_to_call()
301 ret = call->deliver(call); in rxperf_deliver_to_call()
405 call->deliver = rxperf_deliver_request; in rxperf_deliver_param_block()
435 return call->deliver(call);
439 * Deliver the request data. in rxperf_deliver_request()
65 int (*deliver)(struct rxperf_call *call); global() member
/linux/net/ipv6/
H A Dip6_input.c361 * Deliver the packet to the host
514 bool deliver; in ip6_mc_input() local
530 deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL); in ip6_mc_input()
558 deliver = false; in ip6_mc_input()
570 deliver = true; in ip6_mc_input()
577 if (deliver) { in ip6_mc_input()
589 if (likely(deliver)) { in ip6_mc_input()
/linux/tools/perf/util/
H A Dordered-events.c245 ret = oe->deliver(oe, iter); in do_flush()
361 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver, in ordered_events__init() argument
369 oe->deliver = deliver; in ordered_events__init()
412 ordered_events__deliver_t old_deliver = oe->deliver; in ordered_events__reinit()
H A Dordered-events.h47 ordered_events__deliver_t deliver; member
61 void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
/linux/net/mctp/test/
H A Droute-test.c229 bool deliver; member
254 if (params->deliver) { in mctp_test_route_input_sk()
278 { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 0, .deliver = true },
279 { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 1, .deliver = false },
280 { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E), .type = 0, .deliver = false },
281 { .hdr = RX_HDR(1, 10, 8, FL_E | FL_TO), .type = 0, .deliver = false },
282 { .hdr = RX_HDR(1, 10, 8, FL_TO), .type = 0, .deliver = false },
283 { .hdr = RX_HDR(1, 10, 8, 0), .type = 0, .deliver = false },
434 bool deliver; member
491 if (params->deliver) in mctp_test_route_input_sk_keys()
[all …]
/linux/Documentation/networking/
H A Dx25-iface.rst80 call "netif_rx" to deliver the received packets. Instead, it should
81 call "netif_receive_skb_core" from softirq context to deliver them.
/linux/net/can/
H A Daf_can.c573 static inline void deliver(struct sk_buff *skb, struct receiver *rcv) in deliver() function
593 deliver(skb, rcv); in can_rcv_filter()
602 deliver(skb, rcv); in can_rcv_filter()
609 deliver(skb, rcv); in can_rcv_filter()
617 deliver(skb, rcv); in can_rcv_filter()
629 deliver(skb, rcv); in can_rcv_filter()
636 deliver(skb, rcv); in can_rcv_filter()
661 /* deliver the packet to sockets listening on all devices */ in can_receive()
/linux/arch/um/kernel/
H A Dsignal.c102 /* Whee! Actually deliver the signal. */ in do_signal()
124 * if there's no signal to deliver, we just put the saved sigmask in do_signal()
/linux/include/linux/
H A Dmii_timestamper.h20 * the MII time stamping device promises to deliver it using
26 * device promises to deliver it using skb_complete_tx_timestamp()
/linux/Documentation/devicetree/bindings/timer/
H A Darm,arch_timer.yaml17 The per-core architected timer is attached to a GIC to deliver its
19 to deliver its interrupts via SPIs.
/linux/net/ipv4/
H A Dtcp_rate.c5 * can currently deliver outbound data packets for this flow. At a high
46 * the full time the network needs to deliver all in-flight in tcp_rate_skb_sent()
49 * able to deliver those packets completely in the sampling in tcp_rate_skb_sent()
/linux/include/xen/
H A Devents.h94 irq will be disabled so it won't deliver an interrupt. */
98 * the irq will be disabled so it won't deliver an interrupt. */
/linux/arch/powerpc/kvm/
H A Dbook3s_xics.c76 XICS_DBG("ics deliver %#x (level: %d)\n", irq, level); in ics_deliver_irq()
144 bool deliver; in write_xive() local
153 deliver = false; in write_xive()
157 deliver = true; in write_xive()
163 return deliver; in write_xive()
342 XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority, in icp_try_to_deliver()
350 /* See if we can deliver */ in icp_try_to_deliver()
365 * If we failed to deliver we set need_resend in icp_try_to_deliver()
481 * We failed to deliver the interrupt we need to set the in icp_deliver_irq()
/linux/arch/riscv/kernel/
H A Dsignal.c372 pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", in setup_rt_frame()
428 * Get the signal to deliver. When running under ptrace, at this point in arch_do_signal_or_restart()
446 /* Actually deliver the signal */ in arch_do_signal_or_restart()
459 * If there is no signal to deliver, we just put the saved in arch_do_signal_or_restart()
/linux/drivers/net/ethernet/sfc/
H A Dtc.h53 * @deliver: used to indicate a deliver action should take place
74 u16 deliver:1; member
/linux/arch/microblaze/kernel/
H A Dsignal.c223 pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n", in setup_rt_frame()
291 /* Whee! Actually deliver the signal. */ in do_signal()
302 * If there's no signal to deliver, we just put the saved sigmask in do_signal()
/linux/net/tipc/
H A Dbcast.c310 /* tipc_mcast_send_sync - deliver a dummy message with SYN bit
368 /* tipc_mcast_xmit - deliver message to indicated destination nodes
432 /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
826 /* Deliver non-SYN message from other link, otherwise queue it */ in tipc_mcast_filter_msg()
852 /* Deliver subsequent non-SYN messages from same peer */ in tipc_mcast_filter_msg()
/linux/sound/core/seq/
H A Dseq_ump_convert.c361 /* convert UMP packet from MIDI 1.0 to MIDI 2.0 and deliver it */
431 /* convert UMP packet from MIDI 2.0 to MIDI 1.0 and deliver it */
503 /* convert UMP to a legacy ALSA seq event and deliver it */
550 /* Replace UMP group field with the destination and deliver */
581 /* Convert from UMP packet and deliver */
1131 /* Convert ALSA seq event to UMP MIDI 1.0 and deliver it */
1165 /* Convert ALSA seq event to UMP MIDI 2.0 and deliver it */
1213 /* Convert sysex var event to UMP sysex7 packets and deliver them */
1272 /* Convert to UMP packet and deliver */

12345678910>>...15