Home
last modified time | relevance | path

Searched full:call (Results 1 – 25 of 5479) sorted by relevance

12345678910>>...220

/linux/fs/afs/
H A Drxrpc.c29 /* asynchronous incoming call initial processing */
135 * Allocate a call.
141 struct afs_call *call; in afs_alloc_call() local
144 call = kzalloc(sizeof(*call), gfp); in afs_alloc_call()
145 if (!call) in afs_alloc_call()
148 call->type = type; in afs_alloc_call()
149 call->net = net; in afs_alloc_call()
150 call->debug_id = atomic_inc_return(&rxrpc_debug_id); in afs_alloc_call()
151 refcount_set(&call->ref, 1); in afs_alloc_call()
152 INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call); in afs_alloc_call()
[all …]
H A Dcmservice.c105 * route an incoming cache manager call
108 bool afs_cm_incoming_call(struct afs_call *call) in afs_cm_incoming_call() argument
110 _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID); in afs_cm_incoming_call()
112 switch (call->operation_ID) { in afs_cm_incoming_call()
114 call->type = &afs_SRXCBCallBack; in afs_cm_incoming_call()
117 call->type = &afs_SRXCBInitCallBackState; in afs_cm_incoming_call()
120 call->type = &afs_SRXCBInitCallBackState3; in afs_cm_incoming_call()
123 call->type = &afs_SRXCBProbe; in afs_cm_incoming_call()
126 call->type = &afs_SRXCBProbeUuid; in afs_cm_incoming_call()
129 call->type = &afs_SRXCBTellMeAboutYourself; in afs_cm_incoming_call()
[all …]
H A Dvlclient.c15 * Deliver reply data to a VL.GetEntryByNameU call.
17 static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) in afs_deliver_vl_get_entry_by_name_u() argument
26 ret = afs_transfer_reply(call); in afs_deliver_vl_get_entry_by_name_u()
31 uvldb = call->buffer; in afs_deliver_vl_get_entry_by_name_u()
32 entry = call->ret_vldb; in afs_deliver_vl_get_entry_by_name_u()
115 struct afs_call *call; in afs_vl_get_entry_by_name_u() local
129 call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByNameU, reqsz, in afs_vl_get_entry_by_name_u()
131 if (!call) { in afs_vl_get_entry_by_name_u()
136 call->key = vc->key; in afs_vl_get_entry_by_name_u()
137 call->ret_vldb = entry; in afs_vl_get_entry_by_name_u()
[all …]
H A Dfsclient.c55 struct afs_call *call, in xdr_decode_AFSFetchStatus() argument
60 bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus); in xdr_decode_AFSFetchStatus()
128 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_AFSFetchStatus()
132 static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry) in xdr_decode_expiry() argument
134 return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry; in xdr_decode_expiry()
138 struct afs_call *call, in xdr_decode_AFSCallBack() argument
145 cb->expires_at = xdr_decode_expiry(call, ntohl(*bp++)); in xdr_decode_AFSCallBack()
238 static int afs_deliver_fs_fetch_status(struct afs_call *call) in afs_deliver_fs_fetch_status() argument
240 struct afs_operation *op = call->op; in afs_deliver_fs_fetch_status()
245 ret = afs_transfer_reply(call); in afs_deliver_fs_fetch_status()
[all …]
H A Dyfsclient.c138 static void yfs_check_req(struct afs_call *call, __be32 *bp) in yfs_check_req() argument
140 size_t len = (void *)bp - call->request; in yfs_check_req()
142 if (len > call->request_size) in yfs_check_req()
144 call->type->name, len, call->request_size); in yfs_check_req()
145 else if (len < call->request_size) in yfs_check_req()
147 call->type->name, len, call->request_size); in yfs_check_req()
174 struct afs_call *call, in xdr_decode_YFSFetchStatus() argument
220 afs_protocol_error(call, afs_eproto_bad_status); in xdr_decode_YFSFetchStatus()
228 struct afs_call *call, in xdr_decode_YFSCallBack() argument
235 cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100); in xdr_decode_YFSCallBack()
[all …]
/linux/net/rxrpc/
H A Dcall_object.c2 /* RxRPC individual remote procedure call handling
45 void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what) in rxrpc_poke_call() argument
47 struct rxrpc_local *local = call->local; in rxrpc_poke_call()
50 if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) { in rxrpc_poke_call()
52 busy = !list_empty(&call->attend_link); in rxrpc_poke_call()
53 trace_rxrpc_poke_call(call, busy, what); in rxrpc_poke_call()
54 if (!busy && !rxrpc_try_get_call(call, rxrpc_call_get_poke)) in rxrpc_poke_call()
57 list_add_tail(&call->attend_link, &local->call_attend_q); in rxrpc_poke_call()
67 struct rxrpc_call *call = from_timer(call, t, timer); in rxrpc_call_timer_expired() local
69 _enter("%d", call->debug_id); in rxrpc_call_timer_expired()
[all …]
H A Dinput.c23 static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq, in rxrpc_proto_abort() argument
26 rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why); in rxrpc_proto_abort()
32 static void rxrpc_congestion_management(struct rxrpc_call *call, in rxrpc_congestion_management() argument
36 summary->in_flight = rxrpc_tx_in_flight(call); in rxrpc_congestion_management()
38 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) { in rxrpc_congestion_management()
40 call->cong_ssthresh = umax(summary->in_flight / 2, 2); in rxrpc_congestion_management()
41 call->cong_cwnd = 1; in rxrpc_congestion_management()
42 if (call->cong_cwnd >= call->cong_ssthresh && in rxrpc_congestion_management()
43 call->cong_ca_state == RXRPC_CA_SLOW_START) { in rxrpc_congestion_management()
44 call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE; in rxrpc_congestion_management()
[all …]
H A Dsendmsg.c23 bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, in rxrpc_propose_abort() argument
26 _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); in rxrpc_propose_abort()
28 if (!call->send_abort && !rxrpc_call_is_complete(call)) { in rxrpc_propose_abort()
29 call->send_abort_why = why; in rxrpc_propose_abort()
30 call->send_abort_err = error; in rxrpc_propose_abort()
31 call->send_abort_seq = 0; in rxrpc_propose_abort()
32 trace_rxrpc_abort_call(call, abort_code); in rxrpc_propose_abort()
34 smp_store_release(&call->send_abort, abort_code); in rxrpc_propose_abort()
35 rxrpc_poke_call(call, rxrpc_call_poke_abort); in rxrpc_propose_abort()
43 * Wait for a call to become connected. Interruption here doesn't cause the
[all …]
H A Drxperf.c65 int (*deliver)(struct rxperf_call *call);
74 static int rxperf_deliver_param_block(struct rxperf_call *call);
75 static int rxperf_deliver_request(struct rxperf_call *call);
76 static int rxperf_process_call(struct rxperf_call *call);
82 static inline void rxperf_set_call_state(struct rxperf_call *call, in rxperf_set_call_state() argument
85 call->state = to; in rxperf_set_call_state()
88 static inline void rxperf_set_call_complete(struct rxperf_call *call, in rxperf_set_call_complete() argument
91 if (call->state != RXPERF_CALL_COMPLETE) { in rxperf_set_call_complete()
92 call->abort_code = remote_abort; in rxperf_set_call_complete()
93 call->error = error; in rxperf_set_call_complete()
[all …]
H A Doutput.c48 static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret) in rxrpc_tx_backoff() argument
51 if (call->tx_backoff < 1000) in rxrpc_tx_backoff()
52 call->tx_backoff += 100; in rxrpc_tx_backoff()
54 call->tx_backoff = 0; in rxrpc_tx_backoff()
60 * lets the far side know we're still interested in this call and helps keep
66 static void rxrpc_set_keepalive(struct rxrpc_call *call, ktime_t now) in rxrpc_set_keepalive() argument
68 ktime_t delay = ms_to_ktime(READ_ONCE(call->next_rx_timo) / 6); in rxrpc_set_keepalive()
70 call->keepalive_at = ktime_add(ktime_get_real(), delay); in rxrpc_set_keepalive()
71 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_keepalive); in rxrpc_set_keepalive()
77 static int rxrpc_alloc_ack(struct rxrpc_call *call, size_t sack_size) in rxrpc_alloc_ack() argument
[all …]
H A Dcall_state.c2 /* Call state changing functions.
11 * Transition a call to the complete state.
13 bool rxrpc_set_call_completion(struct rxrpc_call *call, in rxrpc_set_call_completion() argument
18 if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE) in rxrpc_set_call_completion()
21 call->abort_code = abort_code; in rxrpc_set_call_completion()
22 call->error = error; in rxrpc_set_call_completion()
23 call->completion = compl; in rxrpc_set_call_completion()
25 rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE); in rxrpc_set_call_completion()
26 trace_rxrpc_call_complete(call); in rxrpc_set_call_completion()
27 wake_up(&call->waitq); in rxrpc_set_call_completion()
[all …]
/linux/include/trace/events/
H A Drxrpc.h51 EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \
52 EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \
72 EM(rxrpc_abort_call_improper_term, "call-improper-term") \
73 EM(rxrpc_abort_call_reset, "call-reset") \
74 EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \
75 EM(rxrpc_abort_call_sock_release, "call-sock-rel") \
76 EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \
77 EM(rxrpc_abort_call_timeout, "call-timeout") \
89 EM(rxrpc_badmsg_zero_call, "zero-call") \
101 EM(rxrpc_eproto_no_client_call, "no-cl-call") \
[all …]
/linux/include/trace/
H A Dtrace_events.h8 * struct trace_event_raw_<call> {
114 * struct trace_event_data_offsets_<call> {
128 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
129 struct trace_event_data_offsets_##call { \
157 * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
160 * struct trace_event_raw_<call> *field; <-- defined in stage 1
169 * if (entry->type != event_<call>->event.type) {
177 * return trace_output_call(iter, <call>, <TP_printk> "\n");
203 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
205 trace_raw_output_##call(struct trace_iterator *iter, int flags, \
[all …]
H A Dtrace_custom_events.h62 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
63 struct trace_custom_event_data_offsets_##call { \
77 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
79 trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \
84 struct trace_custom_event_raw_##call *field; \
97 static struct trace_event_functions trace_custom_event_type_funcs_##call = { \
98 .trace = trace_custom_raw_output_##call, \
108 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ argument
109 static struct trace_event_fields trace_custom_event_fields_##call[] = { \
120 #define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
[all …]
H A Dbpf_probe.h45 #define __BPF_DECLARE_TRACE(call, proto, args) \ argument
47 __bpf_trace_##call(void *__data, proto) \
53 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ argument
54 __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args))
56 #define __BPF_DECLARE_TRACE_SYSCALL(call, proto, args) \ argument
58 __bpf_trace_##call(void *__data, proto) \
67 #define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ argument
68 __BPF_DECLARE_TRACE_SYSCALL(call, PARAMS(proto), PARAMS(args))
75 #define __DEFINE_EVENT(template, call, proto, args, size) \ argument
76 static inline void bpf_test_probe_##call(void) \
[all …]
/linux/include/asm-generic/
H A Dsyscall.h3 * Access to user system call parameters and results
23 * syscall_get_nr - find what system call a task is executing
27 * If @task is executing a system call or is at system call
28 * tracing about to attempt one, returns the system call number.
29 * If @task is not executing a system call, i.e. it's blocked
33 * system call number can be meaningful. If the actual arch value
36 * It's only valid to call this when @task is known to be blocked.
41 * syscall_rollback - roll back registers after an aborted system call
42 * @task: task of interest, must be in system call exi
[all...]
/linux/tools/
H A DMakefile67 $(call descend,power/$@)
70 $(call descend,power/$@)
73 $(call descend,$@)
76 $(call descend,$@)
79 $(call descend,lib/api)
82 $(call descend,include/nolibc)
85 $(call descend,include/nolibc,$(patsubst nolibc_%,%,$@))
96 $(call descend,sched_ext)
99 $(call descend,testing/$@)
102 $(call descend,lib/$@)
[all …]
/linux/Documentation/networking/
H A Drxrpc.rst64 (3) Retention of the reusable bits of the transport system set up for one call
122 (#) Each RxRPC operation is a "call". A connection may make up to four
147 explicitly sequenced per call.
158 (#) An call is complete when the request has been sent, the reply has been
162 (#) An call may be aborted by either end at any time up to its completion.
182 the last call currently using it has completed in case a new call is made
215 be used in all other sendmsgs or recvmsgs associated with that call. The
220 first sendmsg() of a call (struct msghdr::msg_name).
226 first sendmsg() of the call must specify the target address. The server's
229 (#) Once the application has received the last message associated with a call,
[all …]
/linux/include/linux/firmware/intel/
H A Dstratix10-smc.h13 * This file defines the Secure Monitor Call (SMC) message protocol used for
29 * FAST call executes atomic operations, returns when the requested operation
31 * STD call starts a operation which can be preempted by a non-secure
32 * interrupt. The call can return before the requested operation has
51 * Return values in INTEL_SIP_SMC_* call
81 * Sync call used by service driver at EL1 to request the FPGA in EL3 to
84 * Call register usage:
101 * Async call used by service driver at EL1 to provide FPGA configuration data
104 * Call register usage:
127 * Sync call used by service driver at EL1 to track the completed write
[all …]
/linux/tools/perf/util/
H A Dthread-stack.h3 * thread-stack.h: Synthesize a thread's stack using call / return events
24 * Call/Return flags.
26 * CALL_RETURN_NO_CALL: 'return' but no matching 'call'
27 * CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
28 * CALL_RETURN_NON_CALL: a branch but not a 'call' to the start of a different
38 * struct call_return - paired call/return information.
39 * @thread: thread in which call/return occurred
40 * @comm: comm in which call/return occurred
41 * @cp: call path
42 * @call_time: timestamp of call (if known)
[all …]
/linux/tools/testing/selftests/bpf/progs/
H A Dverifier_spin_lock.c32 call %[bpf_map_lookup_elem]; \ in spin_lock_test1_success()
38 call %[bpf_spin_lock]; \ in spin_lock_test1_success()
42 call %[bpf_spin_unlock]; \ in spin_lock_test1_success()
65 call %[bpf_map_lookup_elem]; \ in lock_test2_direct_ld_st()
71 call %[bpf_spin_lock]; \ in lock_test2_direct_ld_st()
75 call %[bpf_spin_unlock]; \ in lock_test2_direct_ld_st()
99 call %[bpf_map_lookup_elem]; \ in __flag()
105 call %[bpf_spin_lock]; \ in __flag()
109 call %[bpf_spin_unlock]; \ in __flag()
133 call %[bpf_map_lookup_elem]; \ in __flag()
[all …]
H A Dverifier_subreg.c27 call %[bpf_get_prandom_u32]; \ in add32_reg_zero_extend_check()
44 call %[bpf_get_prandom_u32]; \ in add32_imm_zero_extend_check()
56 call %[bpf_get_prandom_u32]; \ in add32_imm_zero_extend_check()
74 call %[bpf_get_prandom_u32]; \ in sub32_reg_zero_extend_check()
91 call %[bpf_get_prandom_u32]; \ in sub32_imm_zero_extend_check()
97 call %[bpf_get_prandom_u32]; \ in sub32_imm_zero_extend_check()
115 call %[bpf_get_prandom_u32]; \ in mul32_reg_zero_extend_check()
132 call %[bpf_get_prandom_u32]; \ in mul32_imm_zero_extend_check()
138 call %[bpf_get_prandom_u32]; \ in mul32_imm_zero_extend_check()
156 call %[bpf_get_prandom_u32]; \ in div32_reg_zero_extend_check()
[all …]
/linux/arch/x86/
H A DMakefile_32.cpu5 tune = $(call cc-option,-mtune=$(1),$(2))
8 align := -falign-functions=0 $(call cc-option,-falign-jumps=0) $(call cc-option,-falign-loops=0)
19 cflags-$(CONFIG_MPENTIUMII) += -march=i686 $(call tune,pentium2)
20 cflags-$(CONFIG_MPENTIUMIII) += -march=i686 $(call tune,pentium3)
21 cflags-$(CONFIG_MPENTIUMM) += -march=i686 $(call tune,pentium3)
22 cflags-$(CONFIG_MPENTIUM4) += -march=i686 $(call tune,pentium4)
27 cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
29 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)
30 cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
31 cflags-$(CONFIG_MWINCHIP3D) += $(call cc-option,-march=winchip2,-march=i586)
[all …]
/linux/Documentation/process/
H A Dadding-syscalls.rst4 Adding a New System Call
7 This document describes what's involved in adding a new system call to the
12 System Call Alternatives
15 The first thing to consider when adding a new system call is whether one of
43 :manpage:`fcntl(2)` is a multiplexing system call that hides a lot of complexity, so
49 with :manpage:`fcntl(2)`, this system call is a complicated multiplexor so
57 A new system call forms part of the API of the kernel, and has to be supported
70 system call. To make sure that userspace programs can safely use flags
72 flags, and reject the system call (with ``EINVAL``) if it does::
109 If your new system call allows userspace to refer to a kernel object, it
[all …]
/linux/scripts/
H A DMakefile.compiler4 # Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-)
19 # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
32 # Usage: aflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
34 as-option = $(call try-run,\
38 # Usage: aflags-y += $(call as-instr,instr,option1,option2)
40 as-instr = $(call try-run,\
44 # Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
45 __cc-option = $(call try-run,\
49 # Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
51 cc-option = $(call __cc-option, $(CC),\
[all …]

12345678910>>...220