Lines Matching +full:part +full:- +full:number

6  * Copyright (c) 2004-2009 Silicon Graphics, Inc.  All Rights Reserved.
12 * This is the part of XPC that manages the channels and
31 lockdep_assert_held(&ch->lock); in xpc_process_connect()
33 if (!(ch->flags & XPC_C_OPENREQUEST) || in xpc_process_connect()
34 !(ch->flags & XPC_C_ROPENREQUEST)) { in xpc_process_connect()
38 DBUG_ON(!(ch->flags & XPC_C_CONNECTING)); in xpc_process_connect()
40 if (!(ch->flags & XPC_C_SETUP)) { in xpc_process_connect()
41 spin_unlock_irqrestore(&ch->lock, *irq_flags); in xpc_process_connect()
43 spin_lock_irqsave(&ch->lock, *irq_flags); in xpc_process_connect()
48 ch->flags |= XPC_C_SETUP; in xpc_process_connect()
50 if (ch->flags & XPC_C_DISCONNECTING) in xpc_process_connect()
54 if (!(ch->flags & XPC_C_OPENREPLY)) { in xpc_process_connect()
55 ch->flags |= XPC_C_OPENREPLY; in xpc_process_connect()
59 if (!(ch->flags & XPC_C_ROPENREPLY)) in xpc_process_connect()
62 if (!(ch->flags & XPC_C_OPENCOMPLETE)) { in xpc_process_connect()
63 ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED); in xpc_process_connect()
67 if (!(ch->flags & XPC_C_ROPENCOMPLETE)) in xpc_process_connect()
71 ch->number, ch->partid); in xpc_process_connect()
73 ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ in xpc_process_connect()
82 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_process_disconnect() local
83 u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED); in xpc_process_disconnect()
85 lockdep_assert_held(&ch->lock); in xpc_process_disconnect()
87 if (!(ch->flags & XPC_C_DISCONNECTING)) in xpc_process_disconnect()
90 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); in xpc_process_disconnect()
94 if (atomic_read(&ch->kthreads_assigned) > 0 || in xpc_process_disconnect()
95 atomic_read(&ch->references) > 0) { in xpc_process_disconnect()
98 DBUG_ON((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && in xpc_process_disconnect()
99 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); in xpc_process_disconnect()
101 if (part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
103 if (xpc_arch_ops.partition_engaged(ch->partid)) in xpc_process_disconnect()
110 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) in xpc_process_disconnect()
113 if (!(ch->flags & XPC_C_CLOSEREPLY)) { in xpc_process_disconnect()
114 ch->flags |= XPC_C_CLOSEREPLY; in xpc_process_disconnect()
118 if (!(ch->flags & XPC_C_RCLOSEREPLY)) in xpc_process_disconnect()
123 if (atomic_read(&ch->n_to_notify) > 0) { in xpc_process_disconnect()
124 /* we do callout while holding ch->lock, callout can't block */ in xpc_process_disconnect()
130 if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { in xpc_process_disconnect()
131 spin_unlock_irqrestore(&ch->lock, *irq_flags); in xpc_process_disconnect()
133 spin_lock_irqsave(&ch->lock, *irq_flags); in xpc_process_disconnect()
136 DBUG_ON(atomic_read(&ch->n_to_notify) != 0); in xpc_process_disconnect()
141 ch->func = NULL; in xpc_process_disconnect()
142 ch->key = NULL; in xpc_process_disconnect()
143 ch->entry_size = 0; in xpc_process_disconnect()
144 ch->local_nentries = 0; in xpc_process_disconnect()
145 ch->remote_nentries = 0; in xpc_process_disconnect()
146 ch->kthreads_assigned_limit = 0; in xpc_process_disconnect()
147 ch->kthreads_idle_limit = 0; in xpc_process_disconnect()
155 ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); in xpc_process_disconnect()
157 atomic_dec(&part->nchannels_active); in xpc_process_disconnect()
161 "reason=%d\n", ch->number, ch->partid, ch->reason); in xpc_process_disconnect()
164 if (ch->flags & XPC_C_WDISCONNECT) { in xpc_process_disconnect()
165 /* we won't lose the CPU since we're holding ch->lock */ in xpc_process_disconnect()
166 complete(&ch->wdisconnect_wait); in xpc_process_disconnect()
167 } else if (ch->delayed_chctl_flags) { in xpc_process_disconnect()
168 if (part->act_state != XPC_P_AS_DEACTIVATING) { in xpc_process_disconnect()
170 spin_lock(&part->chctl_lock); in xpc_process_disconnect()
171 part->chctl.flags[ch->number] |= in xpc_process_disconnect()
172 ch->delayed_chctl_flags; in xpc_process_disconnect()
173 spin_unlock(&part->chctl_lock); in xpc_process_disconnect()
175 ch->delayed_chctl_flags = 0; in xpc_process_disconnect()
183 xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, in xpc_process_openclose_chctl_flags() argument
188 &part->remote_openclose_args[ch_number]; in xpc_process_openclose_chctl_flags()
189 struct xpc_channel *ch = &part->channels[ch_number]; in xpc_process_openclose_chctl_flags()
194 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_process_openclose_chctl_flags()
198 if ((ch->flags & XPC_C_DISCONNECTED) && in xpc_process_openclose_chctl_flags()
199 (ch->flags & XPC_C_WDISCONNECT)) { in xpc_process_openclose_chctl_flags()
204 ch->delayed_chctl_flags |= chctl_flags; in xpc_process_openclose_chctl_flags()
211 "from partid=%d, channel=%d\n", args->reason, in xpc_process_openclose_chctl_flags()
212 ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
220 if (ch->flags & XPC_C_RCLOSEREQUEST) { in xpc_process_openclose_chctl_flags()
221 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); in xpc_process_openclose_chctl_flags()
222 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); in xpc_process_openclose_chctl_flags()
223 DBUG_ON(!(ch->flags & XPC_C_CLOSEREPLY)); in xpc_process_openclose_chctl_flags()
224 DBUG_ON(ch->flags & XPC_C_RCLOSEREPLY); in xpc_process_openclose_chctl_flags()
228 ch->flags |= XPC_C_RCLOSEREPLY; in xpc_process_openclose_chctl_flags()
232 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); in xpc_process_openclose_chctl_flags()
236 if (ch->flags & XPC_C_DISCONNECTED) { in xpc_process_openclose_chctl_flags()
238 if (part->chctl.flags[ch_number] & in xpc_process_openclose_chctl_flags()
241 DBUG_ON(ch->delayed_chctl_flags != 0); in xpc_process_openclose_chctl_flags()
242 spin_lock(&part->chctl_lock); in xpc_process_openclose_chctl_flags()
243 part->chctl.flags[ch_number] |= in xpc_process_openclose_chctl_flags()
245 spin_unlock(&part->chctl_lock); in xpc_process_openclose_chctl_flags()
251 ch->flags &= ~XPC_C_DISCONNECTED; in xpc_process_openclose_chctl_flags()
253 atomic_inc(&part->nchannels_active); in xpc_process_openclose_chctl_flags()
254 ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); in xpc_process_openclose_chctl_flags()
265 ch->flags |= XPC_C_RCLOSEREQUEST; in xpc_process_openclose_chctl_flags()
267 if (!(ch->flags & XPC_C_DISCONNECTING)) { in xpc_process_openclose_chctl_flags()
268 reason = args->reason; in xpc_process_openclose_chctl_flags()
286 "%d, channel=%d\n", ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
288 if (ch->flags & XPC_C_DISCONNECTED) { in xpc_process_openclose_chctl_flags()
289 DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); in xpc_process_openclose_chctl_flags()
293 DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); in xpc_process_openclose_chctl_flags()
295 if (!(ch->flags & XPC_C_RCLOSEREQUEST)) { in xpc_process_openclose_chctl_flags()
296 if (part->chctl.flags[ch_number] & in xpc_process_openclose_chctl_flags()
299 DBUG_ON(ch->delayed_chctl_flags != 0); in xpc_process_openclose_chctl_flags()
300 spin_lock(&part->chctl_lock); in xpc_process_openclose_chctl_flags()
301 part->chctl.flags[ch_number] |= in xpc_process_openclose_chctl_flags()
303 spin_unlock(&part->chctl_lock); in xpc_process_openclose_chctl_flags()
308 ch->flags |= XPC_C_RCLOSEREPLY; in xpc_process_openclose_chctl_flags()
310 if (ch->flags & XPC_C_CLOSEREPLY) { in xpc_process_openclose_chctl_flags()
320 "channel=%d\n", args->entry_size, args->local_nentries, in xpc_process_openclose_chctl_flags()
321 ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
323 if (part->act_state == XPC_P_AS_DEACTIVATING || in xpc_process_openclose_chctl_flags()
324 (ch->flags & XPC_C_ROPENREQUEST)) { in xpc_process_openclose_chctl_flags()
328 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { in xpc_process_openclose_chctl_flags()
329 ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; in xpc_process_openclose_chctl_flags()
332 DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | in xpc_process_openclose_chctl_flags()
334 DBUG_ON(ch->flags & (XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | in xpc_process_openclose_chctl_flags()
342 if (args->entry_size == 0 || args->local_nentries == 0) { in xpc_process_openclose_chctl_flags()
347 ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); in xpc_process_openclose_chctl_flags()
348 ch->remote_nentries = args->local_nentries; in xpc_process_openclose_chctl_flags()
350 if (ch->flags & XPC_C_OPENREQUEST) { in xpc_process_openclose_chctl_flags()
351 if (args->entry_size != ch->entry_size) { in xpc_process_openclose_chctl_flags()
357 ch->entry_size = args->entry_size; in xpc_process_openclose_chctl_flags()
360 ch->flags &= ~XPC_C_DISCONNECTED; in xpc_process_openclose_chctl_flags()
362 atomic_inc(&part->nchannels_active); in xpc_process_openclose_chctl_flags()
373 args->local_msgqueue_pa, args->local_nentries, in xpc_process_openclose_chctl_flags()
374 args->remote_nentries, ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
376 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) in xpc_process_openclose_chctl_flags()
379 if (!(ch->flags & XPC_C_OPENREQUEST)) { in xpc_process_openclose_chctl_flags()
385 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); in xpc_process_openclose_chctl_flags()
386 DBUG_ON(ch->flags & XPC_C_CONNECTED); in xpc_process_openclose_chctl_flags()
395 DBUG_ON(args->local_msgqueue_pa == 0); in xpc_process_openclose_chctl_flags()
396 DBUG_ON(args->local_nentries == 0); in xpc_process_openclose_chctl_flags()
397 DBUG_ON(args->remote_nentries == 0); in xpc_process_openclose_chctl_flags()
400 args->local_msgqueue_pa); in xpc_process_openclose_chctl_flags()
405 ch->flags |= XPC_C_ROPENREPLY; in xpc_process_openclose_chctl_flags()
407 if (args->local_nentries < ch->remote_nentries) { in xpc_process_openclose_chctl_flags()
411 args->local_nentries, ch->remote_nentries, in xpc_process_openclose_chctl_flags()
412 ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
414 ch->remote_nentries = args->local_nentries; in xpc_process_openclose_chctl_flags()
416 if (args->remote_nentries < ch->local_nentries) { in xpc_process_openclose_chctl_flags()
420 args->remote_nentries, ch->local_nentries, in xpc_process_openclose_chctl_flags()
421 ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
423 ch->local_nentries = args->remote_nentries; in xpc_process_openclose_chctl_flags()
432 "partid=%d, channel=%d\n", ch->partid, ch->number); in xpc_process_openclose_chctl_flags()
434 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) in xpc_process_openclose_chctl_flags()
437 if (!(ch->flags & XPC_C_OPENREQUEST) || in xpc_process_openclose_chctl_flags()
438 !(ch->flags & XPC_C_OPENREPLY)) { in xpc_process_openclose_chctl_flags()
444 DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); in xpc_process_openclose_chctl_flags()
445 DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY)); in xpc_process_openclose_chctl_flags()
446 DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); in xpc_process_openclose_chctl_flags()
448 ch->flags |= XPC_C_ROPENCOMPLETE; in xpc_process_openclose_chctl_flags()
455 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_process_openclose_chctl_flags()
468 struct xpc_registration *registration = &xpc_registrations[ch->number]; in xpc_connect_channel()
470 if (mutex_trylock(&registration->mutex) == 0) in xpc_connect_channel()
473 if (!XPC_CHANNEL_REGISTERED(ch->number)) { in xpc_connect_channel()
474 mutex_unlock(&registration->mutex); in xpc_connect_channel()
478 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_connect_channel()
480 DBUG_ON(ch->flags & XPC_C_CONNECTED); in xpc_connect_channel()
481 DBUG_ON(ch->flags & XPC_C_OPENREQUEST); in xpc_connect_channel()
483 if (ch->flags & XPC_C_DISCONNECTING) { in xpc_connect_channel()
484 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_connect_channel()
485 mutex_unlock(&registration->mutex); in xpc_connect_channel()
486 return ch->reason; in xpc_connect_channel()
491 ch->kthreads_assigned_limit = registration->assigned_limit; in xpc_connect_channel()
492 ch->kthreads_idle_limit = registration->idle_limit; in xpc_connect_channel()
493 DBUG_ON(atomic_read(&ch->kthreads_assigned) != 0); in xpc_connect_channel()
494 DBUG_ON(atomic_read(&ch->kthreads_idle) != 0); in xpc_connect_channel()
495 DBUG_ON(atomic_read(&ch->kthreads_active) != 0); in xpc_connect_channel()
497 ch->func = registration->func; in xpc_connect_channel()
498 DBUG_ON(registration->func == NULL); in xpc_connect_channel()
499 ch->key = registration->key; in xpc_connect_channel()
501 ch->local_nentries = registration->nentries; in xpc_connect_channel()
503 if (ch->flags & XPC_C_ROPENREQUEST) { in xpc_connect_channel()
504 if (registration->entry_size != ch->entry_size) { in xpc_connect_channel()
511 * done with the part that required the registration in xpc_connect_channel()
516 mutex_unlock(&registration->mutex); in xpc_connect_channel()
519 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_connect_channel()
523 ch->entry_size = registration->entry_size; in xpc_connect_channel()
526 ch->flags &= ~XPC_C_DISCONNECTED; in xpc_connect_channel()
528 atomic_inc(&xpc_partitions[ch->partid].nchannels_active); in xpc_connect_channel()
531 mutex_unlock(&registration->mutex); in xpc_connect_channel()
535 ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); in xpc_connect_channel()
540 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_connect_channel()
546 xpc_process_sent_chctl_flags(struct xpc_partition *part) in xpc_process_sent_chctl_flags() argument
554 chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part); in xpc_process_sent_chctl_flags()
563 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { in xpc_process_sent_chctl_flags()
564 ch = &part->channels[ch_number]; in xpc_process_sent_chctl_flags()
572 xpc_process_openclose_chctl_flags(part, ch_number, in xpc_process_sent_chctl_flags()
576 ch_flags = ch->flags; /* need an atomic snapshot of flags */ in xpc_process_sent_chctl_flags()
579 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_process_sent_chctl_flags()
581 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_process_sent_chctl_flags()
585 if (part->act_state == XPC_P_AS_DEACTIVATING) in xpc_process_sent_chctl_flags()
603 xpc_arch_ops.process_msg_chctl_flags(part, ch_number); in xpc_process_sent_chctl_flags()
617 xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) in xpc_partition_going_down() argument
624 XPC_PARTID(part), reason); in xpc_partition_going_down()
626 if (!xpc_part_ref(part)) { in xpc_partition_going_down()
633 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { in xpc_partition_going_down()
634 ch = &part->channels[ch_number]; in xpc_partition_going_down()
637 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_partition_going_down()
641 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_partition_going_down()
645 xpc_wakeup_channel_mgr(part); in xpc_partition_going_down()
647 xpc_part_deref(part); in xpc_partition_going_down()
658 struct xpc_partition *part; in xpc_initiate_connect() local
663 part = &xpc_partitions[partid]; in xpc_initiate_connect()
665 if (xpc_part_ref(part)) { in xpc_initiate_connect()
670 xpc_wakeup_channel_mgr(part); in xpc_initiate_connect()
671 xpc_part_deref(part); in xpc_initiate_connect()
681 if (ch->func != NULL) { in xpc_connected_callout()
682 dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " in xpc_connected_callout()
683 "partid=%d, channel=%d\n", ch->partid, ch->number); in xpc_connected_callout()
685 ch->func(xpConnected, ch->partid, ch->number, in xpc_connected_callout()
686 (void *)(u64)ch->local_nentries, ch->key); in xpc_connected_callout()
688 dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " in xpc_connected_callout()
689 "partid=%d, channel=%d\n", ch->partid, ch->number); in xpc_connected_callout()
704 * ch_number - channel # to unregister.
711 struct xpc_partition *part; in xpc_initiate_disconnect() local
718 part = &xpc_partitions[partid]; in xpc_initiate_disconnect()
720 if (xpc_part_ref(part)) { in xpc_initiate_disconnect()
721 ch = &part->channels[ch_number]; in xpc_initiate_disconnect()
724 spin_lock_irqsave(&ch->lock, irq_flags); in xpc_initiate_disconnect()
726 if (!(ch->flags & XPC_C_DISCONNECTED)) { in xpc_initiate_disconnect()
727 ch->flags |= XPC_C_WDISCONNECT; in xpc_initiate_disconnect()
733 spin_unlock_irqrestore(&ch->lock, irq_flags); in xpc_initiate_disconnect()
736 xpc_part_deref(part); in xpc_initiate_disconnect()
756 u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); in xpc_disconnect_channel()
758 lockdep_assert_held(&ch->lock); in xpc_disconnect_channel()
760 if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) in xpc_disconnect_channel()
763 DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED))); in xpc_disconnect_channel()
766 reason, line, ch->partid, ch->number); in xpc_disconnect_channel()
770 ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING); in xpc_disconnect_channel()
772 ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY | in xpc_disconnect_channel()
779 ch->flags |= XPC_C_WASCONNECTED; in xpc_disconnect_channel()
781 spin_unlock_irqrestore(&ch->lock, *irq_flags); in xpc_disconnect_channel()
784 if (atomic_read(&ch->kthreads_idle) > 0) { in xpc_disconnect_channel()
785 wake_up_all(&ch->idle_wq); in xpc_disconnect_channel()
787 } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && in xpc_disconnect_channel()
788 !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { in xpc_disconnect_channel()
794 if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) in xpc_disconnect_channel()
795 wake_up(&ch->msg_allocate_wq); in xpc_disconnect_channel()
797 spin_lock_irqsave(&ch->lock, *irq_flags); in xpc_disconnect_channel()
809 if (ch->func != NULL) { in xpc_disconnect_callout()
810 dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, " in xpc_disconnect_callout()
811 "channel=%d\n", reason, ch->partid, ch->number); in xpc_disconnect_callout()
813 ch->func(reason, ch->partid, ch->number, NULL, ch->key); in xpc_disconnect_callout()
815 dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, " in xpc_disconnect_callout()
816 "channel=%d\n", reason, ch->partid, ch->number); in xpc_disconnect_callout()
830 if (ch->flags & XPC_C_DISCONNECTING) { in xpc_allocate_msg_wait()
831 DBUG_ON(ch->reason == xpInterrupted); in xpc_allocate_msg_wait()
832 return ch->reason; in xpc_allocate_msg_wait()
835 atomic_inc(&ch->n_on_msg_allocate_wq); in xpc_allocate_msg_wait()
836 prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); in xpc_allocate_msg_wait()
838 finish_wait(&ch->msg_allocate_wq, &wait); in xpc_allocate_msg_wait()
839 atomic_dec(&ch->n_on_msg_allocate_wq); in xpc_allocate_msg_wait()
841 if (ch->flags & XPC_C_DISCONNECTING) { in xpc_allocate_msg_wait()
842 ret = ch->reason; in xpc_allocate_msg_wait()
843 DBUG_ON(ch->reason == xpInterrupted); in xpc_allocate_msg_wait()
865 * partid - ID of partition to which the channel is connected.
866 * ch_number - channel # to send message on.
867 * flags - see xp.h for valid flags.
868 * payload - pointer to the payload which is to be sent.
869 * payload_size - size of the payload in bytes.
875 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_initiate_send() local
882 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); in xpc_initiate_send()
885 if (xpc_part_ref(part)) { in xpc_initiate_send()
886 ret = xpc_arch_ops.send_payload(&part->channels[ch_number], in xpc_initiate_send()
888 xpc_part_deref(part); in xpc_initiate_send()
905 * allows the sender to free up or re-use any buffers referenced by the
913 * partid - ID of partition to which the channel is connected.
914 * ch_number - channel # to send message on.
915 * flags - see xp.h for valid flags.
916 * payload - pointer to the payload which is to be sent.
917 * payload_size - size of the payload in bytes.
918 * func - function to call with asynchronous notification of message
919 * receipt. THIS FUNCTION MUST BE NON-BLOCKING.
920 * key - user-defined key to be passed to the function when it's called.
926 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_initiate_send_notify() local
933 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); in xpc_initiate_send_notify()
937 if (xpc_part_ref(part)) { in xpc_initiate_send_notify()
938 ret = xpc_arch_ops.send_payload(&part->channels[ch_number], in xpc_initiate_send_notify()
940 xpc_part_deref(part); in xpc_initiate_send_notify()
963 atomic_inc(&ch->kthreads_active); in xpc_deliver_payload()
965 if (ch->func != NULL) { in xpc_deliver_payload()
966 dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " in xpc_deliver_payload()
967 "partid=%d channel=%d\n", payload, ch->partid, in xpc_deliver_payload()
968 ch->number); in xpc_deliver_payload()
971 ch->func(xpMsgReceived, ch->partid, ch->number, payload, in xpc_deliver_payload()
972 ch->key); in xpc_deliver_payload()
974 dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " in xpc_deliver_payload()
975 "partid=%d channel=%d\n", payload, ch->partid, in xpc_deliver_payload()
976 ch->number); in xpc_deliver_payload()
979 atomic_dec(&ch->kthreads_active); in xpc_deliver_payload()
992 * partid - ID of partition to which the channel is connected.
993 * ch_number - channel # message received on.
994 * payload - pointer to the payload area allocated via
1000 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_initiate_received() local
1004 DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); in xpc_initiate_received()
1006 ch = &part->channels[ch_number]; in xpc_initiate_received()