xref: /freebsd/contrib/llvm-project/libc/shared/rpc.h (revision 8f6978f83cc64a2e644d9bdf380a6996d3acdc4b)
1*8f6978f8SDimitry Andric //===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
2*8f6978f8SDimitry Andric //
3*8f6978f8SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*8f6978f8SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*8f6978f8SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*8f6978f8SDimitry Andric //
7*8f6978f8SDimitry Andric //===----------------------------------------------------------------------===//
8*8f6978f8SDimitry Andric //
9*8f6978f8SDimitry Andric // This file implements a remote procedure call mechanism to communicate between
10*8f6978f8SDimitry Andric // heterogeneous devices that can share an address space atomically. We provide
11*8f6978f8SDimitry Andric // a client and a server to facilitate the remote call. The client makes request
12*8f6978f8SDimitry Andric // to the server using a shared communication channel. We use separate atomic
13*8f6978f8SDimitry Andric // signals to indicate which side, the client or the server is in ownership of
14*8f6978f8SDimitry Andric // the buffer.
15*8f6978f8SDimitry Andric //
16*8f6978f8SDimitry Andric //===----------------------------------------------------------------------===//
17*8f6978f8SDimitry Andric 
18*8f6978f8SDimitry Andric #ifndef LLVM_LIBC_SHARED_RPC_H
19*8f6978f8SDimitry Andric #define LLVM_LIBC_SHARED_RPC_H
20*8f6978f8SDimitry Andric 
21*8f6978f8SDimitry Andric #include "rpc_util.h"
22*8f6978f8SDimitry Andric 
23*8f6978f8SDimitry Andric namespace rpc {
24*8f6978f8SDimitry Andric 
25*8f6978f8SDimitry Andric /// Use scoped atomic variants if they are available for the target.
26*8f6978f8SDimitry Andric #if !__has_builtin(__scoped_atomic_load_n)
27*8f6978f8SDimitry Andric #define __scoped_atomic_load_n(src, ord, scp) __atomic_load_n(src, ord)
28*8f6978f8SDimitry Andric #define __scoped_atomic_store_n(dst, src, ord, scp)                            \
29*8f6978f8SDimitry Andric   __atomic_store_n(dst, src, ord)
30*8f6978f8SDimitry Andric #define __scoped_atomic_fetch_or(src, val, ord, scp)                           \
31*8f6978f8SDimitry Andric   __atomic_fetch_or(src, val, ord)
32*8f6978f8SDimitry Andric #define __scoped_atomic_fetch_and(src, val, ord, scp)                          \
33*8f6978f8SDimitry Andric   __atomic_fetch_and(src, val, ord)
34*8f6978f8SDimitry Andric #endif
35*8f6978f8SDimitry Andric #if !__has_builtin(__scoped_atomic_thread_fence)
36*8f6978f8SDimitry Andric #define __scoped_atomic_thread_fence(ord, scp) __atomic_thread_fence(ord)
37*8f6978f8SDimitry Andric #endif
38*8f6978f8SDimitry Andric 
39*8f6978f8SDimitry Andric /// Generic codes that can be used whem implementing the server.
40*8f6978f8SDimitry Andric enum Status {
41*8f6978f8SDimitry Andric   RPC_SUCCESS = 0x0,
42*8f6978f8SDimitry Andric   RPC_ERROR = 0x1000,
43*8f6978f8SDimitry Andric   RPC_UNHANDLED_OPCODE = 0x1001,
44*8f6978f8SDimitry Andric };
45*8f6978f8SDimitry Andric 
46*8f6978f8SDimitry Andric /// A fixed size channel used to communicate between the RPC client and server.
47*8f6978f8SDimitry Andric struct Buffer {
48*8f6978f8SDimitry Andric   uint64_t data[8];
49*8f6978f8SDimitry Andric };
50*8f6978f8SDimitry Andric static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
51*8f6978f8SDimitry Andric 
52*8f6978f8SDimitry Andric /// The information associated with a packet. This indicates which operations to
53*8f6978f8SDimitry Andric /// perform and which threads are active in the slots.
54*8f6978f8SDimitry Andric struct Header {
55*8f6978f8SDimitry Andric   uint64_t mask;
56*8f6978f8SDimitry Andric   uint32_t opcode;
57*8f6978f8SDimitry Andric };
58*8f6978f8SDimitry Andric 
59*8f6978f8SDimitry Andric /// The maximum number of parallel ports that the RPC interface can support.
60*8f6978f8SDimitry Andric constexpr static uint64_t MAX_PORT_COUNT = 4096;
61*8f6978f8SDimitry Andric 
62*8f6978f8SDimitry Andric /// A common process used to synchronize communication between a client and a
63*8f6978f8SDimitry Andric /// server. The process contains a read-only inbox and a write-only outbox used
64*8f6978f8SDimitry Andric /// for signaling ownership of the shared buffer between both sides. We assign
65*8f6978f8SDimitry Andric /// ownership of the buffer to the client if the inbox and outbox bits match,
66*8f6978f8SDimitry Andric /// otherwise it is owned by the server.
67*8f6978f8SDimitry Andric ///
68*8f6978f8SDimitry Andric /// This process is designed to allow the client and the server to exchange data
69*8f6978f8SDimitry Andric /// using a fixed size packet in a mostly arbitrary order using the 'send' and
70*8f6978f8SDimitry Andric /// 'recv' operations. The following restrictions to this scheme apply:
71*8f6978f8SDimitry Andric ///   - The client will always start with a 'send' operation.
72*8f6978f8SDimitry Andric ///   - The server will always start with a 'recv' operation.
73*8f6978f8SDimitry Andric ///   - Every 'send' or 'recv' call is mirrored by the other process.
74*8f6978f8SDimitry Andric template <bool Invert> struct Process {
75*8f6978f8SDimitry Andric   RPC_ATTRS Process() = default;
76*8f6978f8SDimitry Andric   RPC_ATTRS Process(const Process &) = delete;
77*8f6978f8SDimitry Andric   RPC_ATTRS Process &operator=(const Process &) = delete;
78*8f6978f8SDimitry Andric   RPC_ATTRS Process(Process &&) = default;
79*8f6978f8SDimitry Andric   RPC_ATTRS Process &operator=(Process &&) = default;
80*8f6978f8SDimitry Andric   RPC_ATTRS ~Process() = default;
81*8f6978f8SDimitry Andric 
82*8f6978f8SDimitry Andric   const uint32_t port_count = 0;
83*8f6978f8SDimitry Andric   const uint32_t *const inbox = nullptr;
84*8f6978f8SDimitry Andric   uint32_t *const outbox = nullptr;
85*8f6978f8SDimitry Andric   Header *const header = nullptr;
86*8f6978f8SDimitry Andric   Buffer *const packet = nullptr;
87*8f6978f8SDimitry Andric 
88*8f6978f8SDimitry Andric   static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
89*8f6978f8SDimitry Andric   uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
90*8f6978f8SDimitry Andric 
ProcessProcess91*8f6978f8SDimitry Andric   RPC_ATTRS Process(uint32_t port_count, void *buffer)
92*8f6978f8SDimitry Andric       : port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
93*8f6978f8SDimitry Andric                                     advance(buffer, inbox_offset(port_count)))),
94*8f6978f8SDimitry Andric         outbox(reinterpret_cast<uint32_t *>(
95*8f6978f8SDimitry Andric             advance(buffer, outbox_offset(port_count)))),
96*8f6978f8SDimitry Andric         header(reinterpret_cast<Header *>(
97*8f6978f8SDimitry Andric             advance(buffer, header_offset(port_count)))),
98*8f6978f8SDimitry Andric         packet(reinterpret_cast<Buffer *>(
99*8f6978f8SDimitry Andric             advance(buffer, buffer_offset(port_count)))) {}
100*8f6978f8SDimitry Andric 
101*8f6978f8SDimitry Andric   /// Allocate a memory buffer sufficient to store the following equivalent
102*8f6978f8SDimitry Andric   /// representation in memory.
103*8f6978f8SDimitry Andric   ///
104*8f6978f8SDimitry Andric   /// struct Equivalent {
105*8f6978f8SDimitry Andric   ///   Atomic<uint32_t> primary[port_count];
106*8f6978f8SDimitry Andric   ///   Atomic<uint32_t> secondary[port_count];
107*8f6978f8SDimitry Andric   ///   Header header[port_count];
108*8f6978f8SDimitry Andric   ///   Buffer packet[port_count][lane_size];
109*8f6978f8SDimitry Andric   /// };
allocation_sizeProcess110*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t port_count,
111*8f6978f8SDimitry Andric                                                       uint32_t lane_size) {
112*8f6978f8SDimitry Andric     return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
113*8f6978f8SDimitry Andric   }
114*8f6978f8SDimitry Andric 
115*8f6978f8SDimitry Andric   /// Retrieve the inbox state from memory shared between processes.
load_inboxProcess116*8f6978f8SDimitry Andric   RPC_ATTRS uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
117*8f6978f8SDimitry Andric     return rpc::broadcast_value(
118*8f6978f8SDimitry Andric         lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
119*8f6978f8SDimitry Andric                                           __MEMORY_SCOPE_SYSTEM));
120*8f6978f8SDimitry Andric   }
121*8f6978f8SDimitry Andric 
122*8f6978f8SDimitry Andric   /// Retrieve the outbox state from memory shared between processes.
load_outboxProcess123*8f6978f8SDimitry Andric   RPC_ATTRS uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
124*8f6978f8SDimitry Andric     return rpc::broadcast_value(
125*8f6978f8SDimitry Andric         lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
126*8f6978f8SDimitry Andric                                           __MEMORY_SCOPE_SYSTEM));
127*8f6978f8SDimitry Andric   }
128*8f6978f8SDimitry Andric 
129*8f6978f8SDimitry Andric   /// Signal to the other process that this one is finished with the buffer.
130*8f6978f8SDimitry Andric   /// Equivalent to loading outbox followed by store of the inverted value
131*8f6978f8SDimitry Andric   /// The outbox is write only by this warp and tracking the value locally is
132*8f6978f8SDimitry Andric   /// cheaper than calling load_outbox to get the value to store.
invert_outboxProcess133*8f6978f8SDimitry Andric   RPC_ATTRS uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
134*8f6978f8SDimitry Andric     uint32_t inverted_outbox = !current_outbox;
135*8f6978f8SDimitry Andric     __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
136*8f6978f8SDimitry Andric     __scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
137*8f6978f8SDimitry Andric                             __MEMORY_SCOPE_SYSTEM);
138*8f6978f8SDimitry Andric     return inverted_outbox;
139*8f6978f8SDimitry Andric   }
140*8f6978f8SDimitry Andric 
141*8f6978f8SDimitry Andric   // Given the current outbox and inbox values, wait until the inbox changes
142*8f6978f8SDimitry Andric   // to indicate that this thread owns the buffer element.
wait_for_ownershipProcess143*8f6978f8SDimitry Andric   RPC_ATTRS void wait_for_ownership(uint64_t lane_mask, uint32_t index,
144*8f6978f8SDimitry Andric                                     uint32_t outbox, uint32_t in) {
145*8f6978f8SDimitry Andric     while (buffer_unavailable(in, outbox)) {
146*8f6978f8SDimitry Andric       sleep_briefly();
147*8f6978f8SDimitry Andric       in = load_inbox(lane_mask, index);
148*8f6978f8SDimitry Andric     }
149*8f6978f8SDimitry Andric     __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_SYSTEM);
150*8f6978f8SDimitry Andric   }
151*8f6978f8SDimitry Andric 
152*8f6978f8SDimitry Andric   /// The packet is a linearly allocated array of buffers used to communicate
153*8f6978f8SDimitry Andric   /// with the other process. This function returns the appropriate slot in this
154*8f6978f8SDimitry Andric   /// array such that the process can operate on an entire warp or wavefront.
get_packetProcess155*8f6978f8SDimitry Andric   RPC_ATTRS Buffer *get_packet(uint32_t index, uint32_t lane_size) {
156*8f6978f8SDimitry Andric     return &packet[index * lane_size];
157*8f6978f8SDimitry Andric   }
158*8f6978f8SDimitry Andric 
159*8f6978f8SDimitry Andric   /// Determines if this process needs to wait for ownership of the buffer. We
160*8f6978f8SDimitry Andric   /// invert the condition on one of the processes to indicate that if one
161*8f6978f8SDimitry Andric   /// process owns the buffer then the other does not.
buffer_unavailableProcess162*8f6978f8SDimitry Andric   RPC_ATTRS static bool buffer_unavailable(uint32_t in, uint32_t out) {
163*8f6978f8SDimitry Andric     bool cond = in != out;
164*8f6978f8SDimitry Andric     return Invert ? !cond : cond;
165*8f6978f8SDimitry Andric   }
166*8f6978f8SDimitry Andric 
167*8f6978f8SDimitry Andric   /// Attempt to claim the lock at index. Return true on lock taken.
168*8f6978f8SDimitry Andric   /// lane_mask is a bitmap of the threads in the warp that would hold the
169*8f6978f8SDimitry Andric   /// single lock on success, e.g. the result of rpc::get_lane_mask()
170*8f6978f8SDimitry Andric   /// The lock is held when the n-th bit of the lock bitfield is set.
try_lockProcess171*8f6978f8SDimitry Andric   RPC_ATTRS bool try_lock(uint64_t lane_mask, uint32_t index) {
172*8f6978f8SDimitry Andric     // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
173*8f6978f8SDimitry Andric     // On volta, need to handle differences between the threads running and
174*8f6978f8SDimitry Andric     // the threads that were detected in the previous call to get_lane_mask()
175*8f6978f8SDimitry Andric     //
176*8f6978f8SDimitry Andric     // All threads in lane_mask try to claim the lock. At most one can succeed.
177*8f6978f8SDimitry Andric     // There may be threads active which are not in lane mask which must not
178*8f6978f8SDimitry Andric     // succeed in taking the lock, as otherwise it will leak. This is handled
179*8f6978f8SDimitry Andric     // by making threads which are not in lane_mask or with 0, a no-op.
180*8f6978f8SDimitry Andric     uint32_t id = rpc::get_lane_id();
181*8f6978f8SDimitry Andric     bool id_in_lane_mask = lane_mask & (1ul << id);
182*8f6978f8SDimitry Andric 
183*8f6978f8SDimitry Andric     // All threads in the warp call fetch_or. Possibly at the same time.
184*8f6978f8SDimitry Andric     bool before = set_nth(lock, index, id_in_lane_mask);
185*8f6978f8SDimitry Andric     uint64_t packed = rpc::ballot(lane_mask, before);
186*8f6978f8SDimitry Andric 
187*8f6978f8SDimitry Andric     // If every bit set in lane_mask is also set in packed, every single thread
188*8f6978f8SDimitry Andric     // in the warp failed to get the lock. Ballot returns unset for threads not
189*8f6978f8SDimitry Andric     // in the lane mask.
190*8f6978f8SDimitry Andric     //
191*8f6978f8SDimitry Andric     // Cases, per thread:
192*8f6978f8SDimitry Andric     // mask==0 -> unspecified before, discarded by ballot -> 0
193*8f6978f8SDimitry Andric     // mask==1 and before==0 (success), set zero by ballot -> 0
194*8f6978f8SDimitry Andric     // mask==1 and before==1 (failure), set one by ballot -> 1
195*8f6978f8SDimitry Andric     //
196*8f6978f8SDimitry Andric     // mask != packed implies at least one of the threads got the lock
197*8f6978f8SDimitry Andric     // atomic semantics of fetch_or mean at most one of the threads for the lock
198*8f6978f8SDimitry Andric 
199*8f6978f8SDimitry Andric     // If holding the lock then the caller can load values knowing said loads
200*8f6978f8SDimitry Andric     // won't move past the lock. No such guarantee is needed if the lock acquire
201*8f6978f8SDimitry Andric     // failed. This conditional branch is expected to fold in the caller after
202*8f6978f8SDimitry Andric     // inlining the current function.
203*8f6978f8SDimitry Andric     bool holding_lock = lane_mask != packed;
204*8f6978f8SDimitry Andric     if (holding_lock)
205*8f6978f8SDimitry Andric       __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
206*8f6978f8SDimitry Andric     return holding_lock;
207*8f6978f8SDimitry Andric   }
208*8f6978f8SDimitry Andric 
209*8f6978f8SDimitry Andric   /// Unlock the lock at index. We need a lane sync to keep this function
210*8f6978f8SDimitry Andric   /// convergent, otherwise the compiler will sink the store and deadlock.
unlockProcess211*8f6978f8SDimitry Andric   RPC_ATTRS void unlock(uint64_t lane_mask, uint32_t index) {
212*8f6978f8SDimitry Andric     // Do not move any writes past the unlock.
213*8f6978f8SDimitry Andric     __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
214*8f6978f8SDimitry Andric 
215*8f6978f8SDimitry Andric     // Use exactly one thread to clear the nth bit in the lock array Must
216*8f6978f8SDimitry Andric     // restrict to a single thread to avoid one thread dropping the lock, then
217*8f6978f8SDimitry Andric     // an unrelated warp claiming the lock, then a second thread in this warp
218*8f6978f8SDimitry Andric     // dropping the lock again.
219*8f6978f8SDimitry Andric     clear_nth(lock, index, rpc::is_first_lane(lane_mask));
220*8f6978f8SDimitry Andric     rpc::sync_lane(lane_mask);
221*8f6978f8SDimitry Andric   }
222*8f6978f8SDimitry Andric 
223*8f6978f8SDimitry Andric   /// Number of bytes to allocate for an inbox or outbox.
mailbox_bytesProcess224*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
225*8f6978f8SDimitry Andric     return port_count * sizeof(uint32_t);
226*8f6978f8SDimitry Andric   }
227*8f6978f8SDimitry Andric 
228*8f6978f8SDimitry Andric   /// Number of bytes to allocate for the buffer containing the packets.
buffer_bytesProcess229*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t buffer_bytes(uint32_t port_count,
230*8f6978f8SDimitry Andric                                                    uint32_t lane_size) {
231*8f6978f8SDimitry Andric     return port_count * lane_size * sizeof(Buffer);
232*8f6978f8SDimitry Andric   }
233*8f6978f8SDimitry Andric 
234*8f6978f8SDimitry Andric   /// Offset of the inbox in memory. This is the same as the outbox if inverted.
inbox_offsetProcess235*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t inbox_offset(uint32_t port_count) {
236*8f6978f8SDimitry Andric     return Invert ? mailbox_bytes(port_count) : 0;
237*8f6978f8SDimitry Andric   }
238*8f6978f8SDimitry Andric 
239*8f6978f8SDimitry Andric   /// Offset of the outbox in memory. This is the same as the inbox if inverted.
outbox_offsetProcess240*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t outbox_offset(uint32_t port_count) {
241*8f6978f8SDimitry Andric     return Invert ? 0 : mailbox_bytes(port_count);
242*8f6978f8SDimitry Andric   }
243*8f6978f8SDimitry Andric 
244*8f6978f8SDimitry Andric   /// Offset of the buffer containing the packets after the inbox and outbox.
header_offsetProcess245*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t header_offset(uint32_t port_count) {
246*8f6978f8SDimitry Andric     return align_up(2 * mailbox_bytes(port_count), alignof(Header));
247*8f6978f8SDimitry Andric   }
248*8f6978f8SDimitry Andric 
249*8f6978f8SDimitry Andric   /// Offset of the buffer containing the packets after the inbox and outbox.
buffer_offsetProcess250*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t buffer_offset(uint32_t port_count) {
251*8f6978f8SDimitry Andric     return align_up(header_offset(port_count) + port_count * sizeof(Header),
252*8f6978f8SDimitry Andric                     alignof(Buffer));
253*8f6978f8SDimitry Andric   }
254*8f6978f8SDimitry Andric 
255*8f6978f8SDimitry Andric   /// Conditionally set the n-th bit in the atomic bitfield.
set_nthProcess256*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
257*8f6978f8SDimitry Andric                                               bool cond) {
258*8f6978f8SDimitry Andric     uint32_t slot = index / NUM_BITS_IN_WORD;
259*8f6978f8SDimitry Andric     uint32_t bit = index % NUM_BITS_IN_WORD;
260*8f6978f8SDimitry Andric     return __scoped_atomic_fetch_or(&bits[slot],
261*8f6978f8SDimitry Andric                                     static_cast<uint32_t>(cond) << bit,
262*8f6978f8SDimitry Andric                                     __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
263*8f6978f8SDimitry Andric            (1u << bit);
264*8f6978f8SDimitry Andric   }
265*8f6978f8SDimitry Andric 
266*8f6978f8SDimitry Andric   /// Conditionally clear the n-th bit in the atomic bitfield.
clear_nthProcess267*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
268*8f6978f8SDimitry Andric                                                 bool cond) {
269*8f6978f8SDimitry Andric     uint32_t slot = index / NUM_BITS_IN_WORD;
270*8f6978f8SDimitry Andric     uint32_t bit = index % NUM_BITS_IN_WORD;
271*8f6978f8SDimitry Andric     return __scoped_atomic_fetch_and(&bits[slot],
272*8f6978f8SDimitry Andric                                      ~0u ^ (static_cast<uint32_t>(cond) << bit),
273*8f6978f8SDimitry Andric                                      __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
274*8f6978f8SDimitry Andric            (1u << bit);
275*8f6978f8SDimitry Andric   }
276*8f6978f8SDimitry Andric };
277*8f6978f8SDimitry Andric 
278*8f6978f8SDimitry Andric /// Invokes a function across every active buffer across the total lane size.
279*8f6978f8SDimitry Andric template <typename F>
invoke_rpc(F && fn,uint32_t lane_size,uint64_t lane_mask,Buffer * slot)280*8f6978f8SDimitry Andric RPC_ATTRS static void invoke_rpc(F &&fn, uint32_t lane_size, uint64_t lane_mask,
281*8f6978f8SDimitry Andric                                  Buffer *slot) {
282*8f6978f8SDimitry Andric   if constexpr (is_process_gpu()) {
283*8f6978f8SDimitry Andric     fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
284*8f6978f8SDimitry Andric   } else {
285*8f6978f8SDimitry Andric     for (uint32_t i = 0; i < lane_size; i += rpc::get_num_lanes())
286*8f6978f8SDimitry Andric       if (lane_mask & (1ul << i))
287*8f6978f8SDimitry Andric         fn(&slot[i], i);
288*8f6978f8SDimitry Andric   }
289*8f6978f8SDimitry Andric }
290*8f6978f8SDimitry Andric 
291*8f6978f8SDimitry Andric /// The port provides the interface to communicate between the multiple
292*8f6978f8SDimitry Andric /// processes. A port is conceptually an index into the memory provided by the
293*8f6978f8SDimitry Andric /// underlying process that is guarded by a lock bit.
294*8f6978f8SDimitry Andric template <bool T> struct Port {
PortPort295*8f6978f8SDimitry Andric   RPC_ATTRS Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
296*8f6978f8SDimitry Andric                  uint32_t index, uint32_t out)
297*8f6978f8SDimitry Andric       : process(process), lane_mask(lane_mask), lane_size(lane_size),
298*8f6978f8SDimitry Andric         index(index), out(out), receive(false), owns_buffer(true) {}
299*8f6978f8SDimitry Andric   RPC_ATTRS ~Port() = default;
300*8f6978f8SDimitry Andric 
301*8f6978f8SDimitry Andric private:
302*8f6978f8SDimitry Andric   RPC_ATTRS Port(const Port &) = delete;
303*8f6978f8SDimitry Andric   RPC_ATTRS Port &operator=(const Port &) = delete;
304*8f6978f8SDimitry Andric   RPC_ATTRS Port(Port &&) = default;
305*8f6978f8SDimitry Andric   RPC_ATTRS Port &operator=(Port &&) = default;
306*8f6978f8SDimitry Andric 
307*8f6978f8SDimitry Andric   friend struct Client;
308*8f6978f8SDimitry Andric   friend struct Server;
309*8f6978f8SDimitry Andric   friend class rpc::optional<Port<T>>;
310*8f6978f8SDimitry Andric 
311*8f6978f8SDimitry Andric public:
312*8f6978f8SDimitry Andric   template <typename U> RPC_ATTRS void recv(U use);
313*8f6978f8SDimitry Andric   template <typename F> RPC_ATTRS void send(F fill);
314*8f6978f8SDimitry Andric   template <typename F, typename U> RPC_ATTRS void send_and_recv(F fill, U use);
315*8f6978f8SDimitry Andric   template <typename W> RPC_ATTRS void recv_and_send(W work);
316*8f6978f8SDimitry Andric   RPC_ATTRS void send_n(const void *const *src, uint64_t *size);
317*8f6978f8SDimitry Andric   RPC_ATTRS void send_n(const void *src, uint64_t size);
318*8f6978f8SDimitry Andric   template <typename A>
319*8f6978f8SDimitry Andric   RPC_ATTRS void recv_n(void **dst, uint64_t *size, A &&alloc);
320*8f6978f8SDimitry Andric 
get_opcodePort321*8f6978f8SDimitry Andric   RPC_ATTRS uint32_t get_opcode() const { return process.header[index].opcode; }
322*8f6978f8SDimitry Andric 
get_indexPort323*8f6978f8SDimitry Andric   RPC_ATTRS uint32_t get_index() const { return index; }
324*8f6978f8SDimitry Andric 
closePort325*8f6978f8SDimitry Andric   RPC_ATTRS void close() {
326*8f6978f8SDimitry Andric     // Wait for all lanes to finish using the port.
327*8f6978f8SDimitry Andric     rpc::sync_lane(lane_mask);
328*8f6978f8SDimitry Andric 
329*8f6978f8SDimitry Andric     // The server is passive, if it own the buffer when it closes we need to
330*8f6978f8SDimitry Andric     // give ownership back to the client.
331*8f6978f8SDimitry Andric     if (owns_buffer && T)
332*8f6978f8SDimitry Andric       out = process.invert_outbox(index, out);
333*8f6978f8SDimitry Andric     process.unlock(lane_mask, index);
334*8f6978f8SDimitry Andric   }
335*8f6978f8SDimitry Andric 
336*8f6978f8SDimitry Andric private:
337*8f6978f8SDimitry Andric   Process<T> &process;
338*8f6978f8SDimitry Andric   uint64_t lane_mask;
339*8f6978f8SDimitry Andric   uint32_t lane_size;
340*8f6978f8SDimitry Andric   uint32_t index;
341*8f6978f8SDimitry Andric   uint32_t out;
342*8f6978f8SDimitry Andric   bool receive;
343*8f6978f8SDimitry Andric   bool owns_buffer;
344*8f6978f8SDimitry Andric };
345*8f6978f8SDimitry Andric 
346*8f6978f8SDimitry Andric /// The RPC client used to make requests to the server.
347*8f6978f8SDimitry Andric struct Client {
348*8f6978f8SDimitry Andric   RPC_ATTRS Client() = default;
349*8f6978f8SDimitry Andric   RPC_ATTRS Client(const Client &) = delete;
350*8f6978f8SDimitry Andric   RPC_ATTRS Client &operator=(const Client &) = delete;
351*8f6978f8SDimitry Andric   RPC_ATTRS ~Client() = default;
352*8f6978f8SDimitry Andric 
ClientClient353*8f6978f8SDimitry Andric   RPC_ATTRS Client(uint32_t port_count, void *buffer)
354*8f6978f8SDimitry Andric       : process(port_count, buffer) {}
355*8f6978f8SDimitry Andric 
356*8f6978f8SDimitry Andric   using Port = rpc::Port<false>;
357*8f6978f8SDimitry Andric   template <uint32_t opcode> RPC_ATTRS Port open();
358*8f6978f8SDimitry Andric 
359*8f6978f8SDimitry Andric private:
360*8f6978f8SDimitry Andric   Process<false> process;
361*8f6978f8SDimitry Andric };
362*8f6978f8SDimitry Andric 
363*8f6978f8SDimitry Andric /// The RPC server used to respond to the client.
364*8f6978f8SDimitry Andric struct Server {
365*8f6978f8SDimitry Andric   RPC_ATTRS Server() = default;
366*8f6978f8SDimitry Andric   RPC_ATTRS Server(const Server &) = delete;
367*8f6978f8SDimitry Andric   RPC_ATTRS Server &operator=(const Server &) = delete;
368*8f6978f8SDimitry Andric   RPC_ATTRS ~Server() = default;
369*8f6978f8SDimitry Andric 
ServerServer370*8f6978f8SDimitry Andric   RPC_ATTRS Server(uint32_t port_count, void *buffer)
371*8f6978f8SDimitry Andric       : process(port_count, buffer) {}
372*8f6978f8SDimitry Andric 
373*8f6978f8SDimitry Andric   using Port = rpc::Port<true>;
374*8f6978f8SDimitry Andric   RPC_ATTRS rpc::optional<Port> try_open(uint32_t lane_size,
375*8f6978f8SDimitry Andric                                          uint32_t start = 0);
376*8f6978f8SDimitry Andric   RPC_ATTRS Port open(uint32_t lane_size);
377*8f6978f8SDimitry Andric 
allocation_sizeServer378*8f6978f8SDimitry Andric   RPC_ATTRS static constexpr uint64_t allocation_size(uint32_t lane_size,
379*8f6978f8SDimitry Andric                                                       uint32_t port_count) {
380*8f6978f8SDimitry Andric     return Process<true>::allocation_size(port_count, lane_size);
381*8f6978f8SDimitry Andric   }
382*8f6978f8SDimitry Andric 
383*8f6978f8SDimitry Andric private:
384*8f6978f8SDimitry Andric   Process<true> process;
385*8f6978f8SDimitry Andric };
386*8f6978f8SDimitry Andric 
387*8f6978f8SDimitry Andric /// Applies \p fill to the shared buffer and initiates a send operation.
send(F fill)388*8f6978f8SDimitry Andric template <bool T> template <typename F> RPC_ATTRS void Port<T>::send(F fill) {
389*8f6978f8SDimitry Andric   uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
390*8f6978f8SDimitry Andric 
391*8f6978f8SDimitry Andric   // We need to wait until we own the buffer before sending.
392*8f6978f8SDimitry Andric   process.wait_for_ownership(lane_mask, index, out, in);
393*8f6978f8SDimitry Andric 
394*8f6978f8SDimitry Andric   // Apply the \p fill function to initialize the buffer and release the memory.
395*8f6978f8SDimitry Andric   invoke_rpc(fill, lane_size, process.header[index].mask,
396*8f6978f8SDimitry Andric              process.get_packet(index, lane_size));
397*8f6978f8SDimitry Andric   out = process.invert_outbox(index, out);
398*8f6978f8SDimitry Andric   owns_buffer = false;
399*8f6978f8SDimitry Andric   receive = false;
400*8f6978f8SDimitry Andric }
401*8f6978f8SDimitry Andric 
402*8f6978f8SDimitry Andric /// Applies \p use to the shared buffer and acknowledges the send.
recv(U use)403*8f6978f8SDimitry Andric template <bool T> template <typename U> RPC_ATTRS void Port<T>::recv(U use) {
404*8f6978f8SDimitry Andric   // We only exchange ownership of the buffer during a receive if we are waiting
405*8f6978f8SDimitry Andric   // for a previous receive to finish.
406*8f6978f8SDimitry Andric   if (receive) {
407*8f6978f8SDimitry Andric     out = process.invert_outbox(index, out);
408*8f6978f8SDimitry Andric     owns_buffer = false;
409*8f6978f8SDimitry Andric   }
410*8f6978f8SDimitry Andric 
411*8f6978f8SDimitry Andric   uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
412*8f6978f8SDimitry Andric 
413*8f6978f8SDimitry Andric   // We need to wait until we own the buffer before receiving.
414*8f6978f8SDimitry Andric   process.wait_for_ownership(lane_mask, index, out, in);
415*8f6978f8SDimitry Andric 
416*8f6978f8SDimitry Andric   // Apply the \p use function to read the memory out of the buffer.
417*8f6978f8SDimitry Andric   invoke_rpc(use, lane_size, process.header[index].mask,
418*8f6978f8SDimitry Andric              process.get_packet(index, lane_size));
419*8f6978f8SDimitry Andric   receive = true;
420*8f6978f8SDimitry Andric   owns_buffer = true;
421*8f6978f8SDimitry Andric }
422*8f6978f8SDimitry Andric 
423*8f6978f8SDimitry Andric /// Combines a send and receive into a single function.
424*8f6978f8SDimitry Andric template <bool T>
425*8f6978f8SDimitry Andric template <typename F, typename U>
send_and_recv(F fill,U use)426*8f6978f8SDimitry Andric RPC_ATTRS void Port<T>::send_and_recv(F fill, U use) {
427*8f6978f8SDimitry Andric   send(fill);
428*8f6978f8SDimitry Andric   recv(use);
429*8f6978f8SDimitry Andric }
430*8f6978f8SDimitry Andric 
431*8f6978f8SDimitry Andric /// Combines a receive and send operation into a single function. The \p work
432*8f6978f8SDimitry Andric /// function modifies the buffer in-place and the send is only used to initiate
433*8f6978f8SDimitry Andric /// the copy back.
434*8f6978f8SDimitry Andric template <bool T>
435*8f6978f8SDimitry Andric template <typename W>
recv_and_send(W work)436*8f6978f8SDimitry Andric RPC_ATTRS void Port<T>::recv_and_send(W work) {
437*8f6978f8SDimitry Andric   recv(work);
438*8f6978f8SDimitry Andric   send([](Buffer *, uint32_t) { /* no-op */ });
439*8f6978f8SDimitry Andric }
440*8f6978f8SDimitry Andric 
441*8f6978f8SDimitry Andric /// Helper routine to simplify the interface when sending from the GPU using
442*8f6978f8SDimitry Andric /// thread private pointers to the underlying value.
443*8f6978f8SDimitry Andric template <bool T>
send_n(const void * src,uint64_t size)444*8f6978f8SDimitry Andric RPC_ATTRS void Port<T>::send_n(const void *src, uint64_t size) {
445*8f6978f8SDimitry Andric   const void **src_ptr = &src;
446*8f6978f8SDimitry Andric   uint64_t *size_ptr = &size;
447*8f6978f8SDimitry Andric   send_n(src_ptr, size_ptr);
448*8f6978f8SDimitry Andric }
449*8f6978f8SDimitry Andric 
450*8f6978f8SDimitry Andric /// Sends an arbitrarily sized data buffer \p src across the shared channel in
451*8f6978f8SDimitry Andric /// multiples of the packet length.
452*8f6978f8SDimitry Andric template <bool T>
send_n(const void * const * src,uint64_t * size)453*8f6978f8SDimitry Andric RPC_ATTRS void Port<T>::send_n(const void *const *src, uint64_t *size) {
454*8f6978f8SDimitry Andric   uint64_t num_sends = 0;
455*8f6978f8SDimitry Andric   send([&](Buffer *buffer, uint32_t id) {
456*8f6978f8SDimitry Andric     reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
457*8f6978f8SDimitry Andric     num_sends = is_process_gpu() ? lane_value(size, id)
458*8f6978f8SDimitry Andric                                  : rpc::max(lane_value(size, id), num_sends);
459*8f6978f8SDimitry Andric     uint64_t len =
460*8f6978f8SDimitry Andric         lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
461*8f6978f8SDimitry Andric             ? sizeof(Buffer::data) - sizeof(uint64_t)
462*8f6978f8SDimitry Andric             : lane_value(size, id);
463*8f6978f8SDimitry Andric     rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
464*8f6978f8SDimitry Andric   });
465*8f6978f8SDimitry Andric   uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
466*8f6978f8SDimitry Andric   uint64_t mask = process.header[index].mask;
467*8f6978f8SDimitry Andric   while (rpc::ballot(mask, idx < num_sends)) {
468*8f6978f8SDimitry Andric     send([=](Buffer *buffer, uint32_t id) {
469*8f6978f8SDimitry Andric       uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
470*8f6978f8SDimitry Andric                          ? sizeof(Buffer::data)
471*8f6978f8SDimitry Andric                          : lane_value(size, id) - idx;
472*8f6978f8SDimitry Andric       if (idx < lane_value(size, id))
473*8f6978f8SDimitry Andric         rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
474*8f6978f8SDimitry Andric     });
475*8f6978f8SDimitry Andric     idx += sizeof(Buffer::data);
476*8f6978f8SDimitry Andric   }
477*8f6978f8SDimitry Andric }
478*8f6978f8SDimitry Andric 
479*8f6978f8SDimitry Andric /// Receives an arbitrarily sized data buffer across the shared channel in
480*8f6978f8SDimitry Andric /// multiples of the packet length. The \p alloc function is called with the
481*8f6978f8SDimitry Andric /// size of the data so that we can initialize the size of the \p dst buffer.
482*8f6978f8SDimitry Andric template <bool T>
483*8f6978f8SDimitry Andric template <typename A>
recv_n(void ** dst,uint64_t * size,A && alloc)484*8f6978f8SDimitry Andric RPC_ATTRS void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
485*8f6978f8SDimitry Andric   uint64_t num_recvs = 0;
486*8f6978f8SDimitry Andric   recv([&](Buffer *buffer, uint32_t id) {
487*8f6978f8SDimitry Andric     lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
488*8f6978f8SDimitry Andric     lane_value(dst, id) =
489*8f6978f8SDimitry Andric         reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
490*8f6978f8SDimitry Andric     num_recvs = is_process_gpu() ? lane_value(size, id)
491*8f6978f8SDimitry Andric                                  : rpc::max(lane_value(size, id), num_recvs);
492*8f6978f8SDimitry Andric     uint64_t len =
493*8f6978f8SDimitry Andric         lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
494*8f6978f8SDimitry Andric             ? sizeof(Buffer::data) - sizeof(uint64_t)
495*8f6978f8SDimitry Andric             : lane_value(size, id);
496*8f6978f8SDimitry Andric     rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
497*8f6978f8SDimitry Andric   });
498*8f6978f8SDimitry Andric   uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
499*8f6978f8SDimitry Andric   uint64_t mask = process.header[index].mask;
500*8f6978f8SDimitry Andric   while (rpc::ballot(mask, idx < num_recvs)) {
501*8f6978f8SDimitry Andric     recv([=](Buffer *buffer, uint32_t id) {
502*8f6978f8SDimitry Andric       uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
503*8f6978f8SDimitry Andric                          ? sizeof(Buffer::data)
504*8f6978f8SDimitry Andric                          : lane_value(size, id) - idx;
505*8f6978f8SDimitry Andric       if (idx < lane_value(size, id))
506*8f6978f8SDimitry Andric         rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
507*8f6978f8SDimitry Andric     });
508*8f6978f8SDimitry Andric     idx += sizeof(Buffer::data);
509*8f6978f8SDimitry Andric   }
510*8f6978f8SDimitry Andric }
511*8f6978f8SDimitry Andric 
512*8f6978f8SDimitry Andric /// Continually attempts to open a port to use as the client. The client can
513*8f6978f8SDimitry Andric /// only open a port if we find an index that is in a valid sending state. That
514*8f6978f8SDimitry Andric /// is, there are send operations pending that haven't been serviced on this
515*8f6978f8SDimitry Andric /// port. Each port instance uses an associated \p opcode to tell the server
516*8f6978f8SDimitry Andric /// what to do. The Client interface provides the appropriate lane size to the
517*8f6978f8SDimitry Andric /// port using the platform's returned value.
open()518*8f6978f8SDimitry Andric template <uint32_t opcode> RPC_ATTRS Client::Port Client::open() {
519*8f6978f8SDimitry Andric   // Repeatedly perform a naive linear scan for a port that can be opened to
520*8f6978f8SDimitry Andric   // send data.
521*8f6978f8SDimitry Andric   for (uint32_t index = 0;; ++index) {
522*8f6978f8SDimitry Andric     // Start from the beginning if we run out of ports to check.
523*8f6978f8SDimitry Andric     if (index >= process.port_count)
524*8f6978f8SDimitry Andric       index = 0;
525*8f6978f8SDimitry Andric 
526*8f6978f8SDimitry Andric     // Attempt to acquire the lock on this index.
527*8f6978f8SDimitry Andric     uint64_t lane_mask = rpc::get_lane_mask();
528*8f6978f8SDimitry Andric     if (!process.try_lock(lane_mask, index))
529*8f6978f8SDimitry Andric       continue;
530*8f6978f8SDimitry Andric 
531*8f6978f8SDimitry Andric     uint32_t in = process.load_inbox(lane_mask, index);
532*8f6978f8SDimitry Andric     uint32_t out = process.load_outbox(lane_mask, index);
533*8f6978f8SDimitry Andric 
534*8f6978f8SDimitry Andric     // Once we acquire the index we need to check if we are in a valid sending
535*8f6978f8SDimitry Andric     // state.
536*8f6978f8SDimitry Andric     if (process.buffer_unavailable(in, out)) {
537*8f6978f8SDimitry Andric       process.unlock(lane_mask, index);
538*8f6978f8SDimitry Andric       continue;
539*8f6978f8SDimitry Andric     }
540*8f6978f8SDimitry Andric 
541*8f6978f8SDimitry Andric     if (rpc::is_first_lane(lane_mask)) {
542*8f6978f8SDimitry Andric       process.header[index].opcode = opcode;
543*8f6978f8SDimitry Andric       process.header[index].mask = lane_mask;
544*8f6978f8SDimitry Andric     }
545*8f6978f8SDimitry Andric     rpc::sync_lane(lane_mask);
546*8f6978f8SDimitry Andric     return Port(process, lane_mask, rpc::get_num_lanes(), index, out);
547*8f6978f8SDimitry Andric   }
548*8f6978f8SDimitry Andric }
549*8f6978f8SDimitry Andric 
550*8f6978f8SDimitry Andric /// Attempts to open a port to use as the server. The server can only open a
551*8f6978f8SDimitry Andric /// port if it has a pending receive operation
552*8f6978f8SDimitry Andric RPC_ATTRS rpc::optional<typename Server::Port>
try_open(uint32_t lane_size,uint32_t start)553*8f6978f8SDimitry Andric Server::try_open(uint32_t lane_size, uint32_t start) {
554*8f6978f8SDimitry Andric   // Perform a naive linear scan for a port that has a pending request.
555*8f6978f8SDimitry Andric   for (uint32_t index = start; index < process.port_count; ++index) {
556*8f6978f8SDimitry Andric     uint64_t lane_mask = rpc::get_lane_mask();
557*8f6978f8SDimitry Andric     uint32_t in = process.load_inbox(lane_mask, index);
558*8f6978f8SDimitry Andric     uint32_t out = process.load_outbox(lane_mask, index);
559*8f6978f8SDimitry Andric 
560*8f6978f8SDimitry Andric     // The server is passive, if there is no work pending don't bother
561*8f6978f8SDimitry Andric     // opening a port.
562*8f6978f8SDimitry Andric     if (process.buffer_unavailable(in, out))
563*8f6978f8SDimitry Andric       continue;
564*8f6978f8SDimitry Andric 
565*8f6978f8SDimitry Andric     // Attempt to acquire the lock on this index.
566*8f6978f8SDimitry Andric     if (!process.try_lock(lane_mask, index))
567*8f6978f8SDimitry Andric       continue;
568*8f6978f8SDimitry Andric 
569*8f6978f8SDimitry Andric     in = process.load_inbox(lane_mask, index);
570*8f6978f8SDimitry Andric     out = process.load_outbox(lane_mask, index);
571*8f6978f8SDimitry Andric 
572*8f6978f8SDimitry Andric     if (process.buffer_unavailable(in, out)) {
573*8f6978f8SDimitry Andric       process.unlock(lane_mask, index);
574*8f6978f8SDimitry Andric       continue;
575*8f6978f8SDimitry Andric     }
576*8f6978f8SDimitry Andric 
577*8f6978f8SDimitry Andric     return Port(process, lane_mask, lane_size, index, out);
578*8f6978f8SDimitry Andric   }
579*8f6978f8SDimitry Andric   return rpc::nullopt;
580*8f6978f8SDimitry Andric }
581*8f6978f8SDimitry Andric 
open(uint32_t lane_size)582*8f6978f8SDimitry Andric RPC_ATTRS Server::Port Server::open(uint32_t lane_size) {
583*8f6978f8SDimitry Andric   for (;;) {
584*8f6978f8SDimitry Andric     if (rpc::optional<Server::Port> p = try_open(lane_size))
585*8f6978f8SDimitry Andric       return rpc::move(p.value());
586*8f6978f8SDimitry Andric     sleep_briefly();
587*8f6978f8SDimitry Andric   }
588*8f6978f8SDimitry Andric }
589*8f6978f8SDimitry Andric 
590*8f6978f8SDimitry Andric #undef RPC_ATTRS
591*8f6978f8SDimitry Andric #if !__has_builtin(__scoped_atomic_load_n)
592*8f6978f8SDimitry Andric #undef __scoped_atomic_load_n
593*8f6978f8SDimitry Andric #undef __scoped_atomic_store_n
594*8f6978f8SDimitry Andric #undef __scoped_atomic_fetch_or
595*8f6978f8SDimitry Andric #undef __scoped_atomic_fetch_and
596*8f6978f8SDimitry Andric #endif
597*8f6978f8SDimitry Andric #if !__has_builtin(__scoped_atomic_thread_fence)
598*8f6978f8SDimitry Andric #undef __scoped_atomic_thread_fence
599*8f6978f8SDimitry Andric #endif
600*8f6978f8SDimitry Andric 
601*8f6978f8SDimitry Andric } // namespace rpc
602*8f6978f8SDimitry Andric 
603*8f6978f8SDimitry Andric #endif // LLVM_LIBC_SHARED_RPC_H
604