1*71db0c75SAndroid Build Coastguard Worker //===-- Shared memory RPC client / server interface -------------*- C++ -*-===//
2*71db0c75SAndroid Build Coastguard Worker //
3*71db0c75SAndroid Build Coastguard Worker // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*71db0c75SAndroid Build Coastguard Worker // See https://llvm.org/LICENSE.txt for license information.
5*71db0c75SAndroid Build Coastguard Worker // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*71db0c75SAndroid Build Coastguard Worker //
7*71db0c75SAndroid Build Coastguard Worker //===----------------------------------------------------------------------===//
8*71db0c75SAndroid Build Coastguard Worker //
9*71db0c75SAndroid Build Coastguard Worker // This file implements a remote procedure call mechanism to communicate between
10*71db0c75SAndroid Build Coastguard Worker // heterogeneous devices that can share an address space atomically. We provide
11*71db0c75SAndroid Build Coastguard Worker // a client and a server to facilitate the remote call. The client makes request
12*71db0c75SAndroid Build Coastguard Worker // to the server using a shared communication channel. We use separate atomic
13*71db0c75SAndroid Build Coastguard Worker // signals to indicate which side, the client or the server is in ownership of
14*71db0c75SAndroid Build Coastguard Worker // the buffer.
15*71db0c75SAndroid Build Coastguard Worker //
16*71db0c75SAndroid Build Coastguard Worker //===----------------------------------------------------------------------===//
17*71db0c75SAndroid Build Coastguard Worker
18*71db0c75SAndroid Build Coastguard Worker #ifndef LLVM_LIBC_SHARED_RPC_H
19*71db0c75SAndroid Build Coastguard Worker #define LLVM_LIBC_SHARED_RPC_H
20*71db0c75SAndroid Build Coastguard Worker
21*71db0c75SAndroid Build Coastguard Worker #include "rpc_util.h"
22*71db0c75SAndroid Build Coastguard Worker
23*71db0c75SAndroid Build Coastguard Worker #include <stdint.h>
24*71db0c75SAndroid Build Coastguard Worker
25*71db0c75SAndroid Build Coastguard Worker #ifndef RPC_INLINE
26*71db0c75SAndroid Build Coastguard Worker #define RPC_INLINE inline
27*71db0c75SAndroid Build Coastguard Worker #endif
28*71db0c75SAndroid Build Coastguard Worker
29*71db0c75SAndroid Build Coastguard Worker namespace rpc {
30*71db0c75SAndroid Build Coastguard Worker
31*71db0c75SAndroid Build Coastguard Worker /// Use scoped atomic variants if they are available for the target.
32*71db0c75SAndroid Build Coastguard Worker #if !__has_builtin(__scoped_atomic_load_n)
33*71db0c75SAndroid Build Coastguard Worker #define __scoped_atomic_load_n(src, ord, scp) __atomic_load_n(src, ord)
34*71db0c75SAndroid Build Coastguard Worker #define __scoped_atomic_store_n(dst, src, ord, scp) \
35*71db0c75SAndroid Build Coastguard Worker __atomic_store_n(dst, src, ord)
36*71db0c75SAndroid Build Coastguard Worker #define __scoped_atomic_fetch_or(src, val, ord, scp) \
37*71db0c75SAndroid Build Coastguard Worker __atomic_fetch_or(src, val, ord)
38*71db0c75SAndroid Build Coastguard Worker #define __scoped_atomic_fetch_and(src, val, ord, scp) \
39*71db0c75SAndroid Build Coastguard Worker __atomic_fetch_and(src, val, ord)
40*71db0c75SAndroid Build Coastguard Worker #endif
41*71db0c75SAndroid Build Coastguard Worker #if !__has_builtin(__scoped_atomic_thread_fence)
42*71db0c75SAndroid Build Coastguard Worker #define __scoped_atomic_thread_fence(ord, scp) __atomic_thread_fence(ord)
43*71db0c75SAndroid Build Coastguard Worker #endif
44*71db0c75SAndroid Build Coastguard Worker
45*71db0c75SAndroid Build Coastguard Worker /// Generic codes that can be used whem implementing the server.
46*71db0c75SAndroid Build Coastguard Worker enum Status {
47*71db0c75SAndroid Build Coastguard Worker SUCCESS = 0x0,
48*71db0c75SAndroid Build Coastguard Worker ERROR = 0x1000,
49*71db0c75SAndroid Build Coastguard Worker UNHANDLED_OPCODE = 0x1001,
50*71db0c75SAndroid Build Coastguard Worker };
51*71db0c75SAndroid Build Coastguard Worker
52*71db0c75SAndroid Build Coastguard Worker /// A fixed size channel used to communicate between the RPC client and server.
53*71db0c75SAndroid Build Coastguard Worker struct Buffer {
54*71db0c75SAndroid Build Coastguard Worker uint64_t data[8];
55*71db0c75SAndroid Build Coastguard Worker };
56*71db0c75SAndroid Build Coastguard Worker static_assert(sizeof(Buffer) == 64, "Buffer size mismatch");
57*71db0c75SAndroid Build Coastguard Worker
58*71db0c75SAndroid Build Coastguard Worker /// The information associated with a packet. This indicates which operations to
59*71db0c75SAndroid Build Coastguard Worker /// perform and which threads are active in the slots.
60*71db0c75SAndroid Build Coastguard Worker struct Header {
61*71db0c75SAndroid Build Coastguard Worker uint64_t mask;
62*71db0c75SAndroid Build Coastguard Worker uint32_t opcode;
63*71db0c75SAndroid Build Coastguard Worker };
64*71db0c75SAndroid Build Coastguard Worker
65*71db0c75SAndroid Build Coastguard Worker /// The maximum number of parallel ports that the RPC interface can support.
66*71db0c75SAndroid Build Coastguard Worker constexpr static uint64_t MAX_PORT_COUNT = 4096;
67*71db0c75SAndroid Build Coastguard Worker
68*71db0c75SAndroid Build Coastguard Worker /// A common process used to synchronize communication between a client and a
69*71db0c75SAndroid Build Coastguard Worker /// server. The process contains a read-only inbox and a write-only outbox used
70*71db0c75SAndroid Build Coastguard Worker /// for signaling ownership of the shared buffer between both sides. We assign
71*71db0c75SAndroid Build Coastguard Worker /// ownership of the buffer to the client if the inbox and outbox bits match,
72*71db0c75SAndroid Build Coastguard Worker /// otherwise it is owned by the server.
73*71db0c75SAndroid Build Coastguard Worker ///
74*71db0c75SAndroid Build Coastguard Worker /// This process is designed to allow the client and the server to exchange data
75*71db0c75SAndroid Build Coastguard Worker /// using a fixed size packet in a mostly arbitrary order using the 'send' and
76*71db0c75SAndroid Build Coastguard Worker /// 'recv' operations. The following restrictions to this scheme apply:
77*71db0c75SAndroid Build Coastguard Worker /// - The client will always start with a 'send' operation.
78*71db0c75SAndroid Build Coastguard Worker /// - The server will always start with a 'recv' operation.
79*71db0c75SAndroid Build Coastguard Worker /// - Every 'send' or 'recv' call is mirrored by the other process.
80*71db0c75SAndroid Build Coastguard Worker template <bool Invert> struct Process {
81*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process() = default;
82*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process(const Process &) = delete;
83*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process &operator=(const Process &) = delete;
84*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process(Process &&) = default;
85*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process &operator=(Process &&) = default;
86*71db0c75SAndroid Build Coastguard Worker RPC_INLINE ~Process() = default;
87*71db0c75SAndroid Build Coastguard Worker
88*71db0c75SAndroid Build Coastguard Worker const uint32_t port_count = 0;
89*71db0c75SAndroid Build Coastguard Worker const uint32_t *const inbox = nullptr;
90*71db0c75SAndroid Build Coastguard Worker uint32_t *const outbox = nullptr;
91*71db0c75SAndroid Build Coastguard Worker Header *const header = nullptr;
92*71db0c75SAndroid Build Coastguard Worker Buffer *const packet = nullptr;
93*71db0c75SAndroid Build Coastguard Worker
94*71db0c75SAndroid Build Coastguard Worker static constexpr uint64_t NUM_BITS_IN_WORD = sizeof(uint32_t) * 8;
95*71db0c75SAndroid Build Coastguard Worker uint32_t lock[MAX_PORT_COUNT / NUM_BITS_IN_WORD] = {0};
96*71db0c75SAndroid Build Coastguard Worker
ProcessProcess97*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Process(uint32_t port_count, void *buffer)
98*71db0c75SAndroid Build Coastguard Worker : port_count(port_count), inbox(reinterpret_cast<uint32_t *>(
99*71db0c75SAndroid Build Coastguard Worker advance(buffer, inbox_offset(port_count)))),
100*71db0c75SAndroid Build Coastguard Worker outbox(reinterpret_cast<uint32_t *>(
101*71db0c75SAndroid Build Coastguard Worker advance(buffer, outbox_offset(port_count)))),
102*71db0c75SAndroid Build Coastguard Worker header(reinterpret_cast<Header *>(
103*71db0c75SAndroid Build Coastguard Worker advance(buffer, header_offset(port_count)))),
104*71db0c75SAndroid Build Coastguard Worker packet(reinterpret_cast<Buffer *>(
105*71db0c75SAndroid Build Coastguard Worker advance(buffer, buffer_offset(port_count)))) {}
106*71db0c75SAndroid Build Coastguard Worker
107*71db0c75SAndroid Build Coastguard Worker /// Allocate a memory buffer sufficient to store the following equivalent
108*71db0c75SAndroid Build Coastguard Worker /// representation in memory.
109*71db0c75SAndroid Build Coastguard Worker ///
110*71db0c75SAndroid Build Coastguard Worker /// struct Equivalent {
111*71db0c75SAndroid Build Coastguard Worker /// Atomic<uint32_t> primary[port_count];
112*71db0c75SAndroid Build Coastguard Worker /// Atomic<uint32_t> secondary[port_count];
113*71db0c75SAndroid Build Coastguard Worker /// Header header[port_count];
114*71db0c75SAndroid Build Coastguard Worker /// Buffer packet[port_count][lane_size];
115*71db0c75SAndroid Build Coastguard Worker /// };
allocation_sizeProcess116*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t allocation_size(uint32_t port_count,
117*71db0c75SAndroid Build Coastguard Worker uint32_t lane_size) {
118*71db0c75SAndroid Build Coastguard Worker return buffer_offset(port_count) + buffer_bytes(port_count, lane_size);
119*71db0c75SAndroid Build Coastguard Worker }
120*71db0c75SAndroid Build Coastguard Worker
121*71db0c75SAndroid Build Coastguard Worker /// Retrieve the inbox state from memory shared between processes.
load_inboxProcess122*71db0c75SAndroid Build Coastguard Worker RPC_INLINE uint32_t load_inbox(uint64_t lane_mask, uint32_t index) const {
123*71db0c75SAndroid Build Coastguard Worker return rpc::broadcast_value(
124*71db0c75SAndroid Build Coastguard Worker lane_mask, __scoped_atomic_load_n(&inbox[index], __ATOMIC_RELAXED,
125*71db0c75SAndroid Build Coastguard Worker __MEMORY_SCOPE_SYSTEM));
126*71db0c75SAndroid Build Coastguard Worker }
127*71db0c75SAndroid Build Coastguard Worker
128*71db0c75SAndroid Build Coastguard Worker /// Retrieve the outbox state from memory shared between processes.
load_outboxProcess129*71db0c75SAndroid Build Coastguard Worker RPC_INLINE uint32_t load_outbox(uint64_t lane_mask, uint32_t index) const {
130*71db0c75SAndroid Build Coastguard Worker return rpc::broadcast_value(
131*71db0c75SAndroid Build Coastguard Worker lane_mask, __scoped_atomic_load_n(&outbox[index], __ATOMIC_RELAXED,
132*71db0c75SAndroid Build Coastguard Worker __MEMORY_SCOPE_SYSTEM));
133*71db0c75SAndroid Build Coastguard Worker }
134*71db0c75SAndroid Build Coastguard Worker
135*71db0c75SAndroid Build Coastguard Worker /// Signal to the other process that this one is finished with the buffer.
136*71db0c75SAndroid Build Coastguard Worker /// Equivalent to loading outbox followed by store of the inverted value
137*71db0c75SAndroid Build Coastguard Worker /// The outbox is write only by this warp and tracking the value locally is
138*71db0c75SAndroid Build Coastguard Worker /// cheaper than calling load_outbox to get the value to store.
invert_outboxProcess139*71db0c75SAndroid Build Coastguard Worker RPC_INLINE uint32_t invert_outbox(uint32_t index, uint32_t current_outbox) {
140*71db0c75SAndroid Build Coastguard Worker uint32_t inverted_outbox = !current_outbox;
141*71db0c75SAndroid Build Coastguard Worker __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_SYSTEM);
142*71db0c75SAndroid Build Coastguard Worker __scoped_atomic_store_n(&outbox[index], inverted_outbox, __ATOMIC_RELAXED,
143*71db0c75SAndroid Build Coastguard Worker __MEMORY_SCOPE_SYSTEM);
144*71db0c75SAndroid Build Coastguard Worker return inverted_outbox;
145*71db0c75SAndroid Build Coastguard Worker }
146*71db0c75SAndroid Build Coastguard Worker
147*71db0c75SAndroid Build Coastguard Worker // Given the current outbox and inbox values, wait until the inbox changes
148*71db0c75SAndroid Build Coastguard Worker // to indicate that this thread owns the buffer element.
wait_for_ownershipProcess149*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void wait_for_ownership(uint64_t lane_mask, uint32_t index,
150*71db0c75SAndroid Build Coastguard Worker uint32_t outbox, uint32_t in) {
151*71db0c75SAndroid Build Coastguard Worker while (buffer_unavailable(in, outbox)) {
152*71db0c75SAndroid Build Coastguard Worker sleep_briefly();
153*71db0c75SAndroid Build Coastguard Worker in = load_inbox(lane_mask, index);
154*71db0c75SAndroid Build Coastguard Worker }
155*71db0c75SAndroid Build Coastguard Worker __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_SYSTEM);
156*71db0c75SAndroid Build Coastguard Worker }
157*71db0c75SAndroid Build Coastguard Worker
158*71db0c75SAndroid Build Coastguard Worker /// The packet is a linearly allocated array of buffers used to communicate
159*71db0c75SAndroid Build Coastguard Worker /// with the other process. This function returns the appropriate slot in this
160*71db0c75SAndroid Build Coastguard Worker /// array such that the process can operate on an entire warp or wavefront.
get_packetProcess161*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Buffer *get_packet(uint32_t index, uint32_t lane_size) {
162*71db0c75SAndroid Build Coastguard Worker return &packet[index * lane_size];
163*71db0c75SAndroid Build Coastguard Worker }
164*71db0c75SAndroid Build Coastguard Worker
165*71db0c75SAndroid Build Coastguard Worker /// Determines if this process needs to wait for ownership of the buffer. We
166*71db0c75SAndroid Build Coastguard Worker /// invert the condition on one of the processes to indicate that if one
167*71db0c75SAndroid Build Coastguard Worker /// process owns the buffer then the other does not.
buffer_unavailableProcess168*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static bool buffer_unavailable(uint32_t in, uint32_t out) {
169*71db0c75SAndroid Build Coastguard Worker bool cond = in != out;
170*71db0c75SAndroid Build Coastguard Worker return Invert ? !cond : cond;
171*71db0c75SAndroid Build Coastguard Worker }
172*71db0c75SAndroid Build Coastguard Worker
173*71db0c75SAndroid Build Coastguard Worker /// Attempt to claim the lock at index. Return true on lock taken.
174*71db0c75SAndroid Build Coastguard Worker /// lane_mask is a bitmap of the threads in the warp that would hold the
175*71db0c75SAndroid Build Coastguard Worker /// single lock on success, e.g. the result of rpc::get_lane_mask()
176*71db0c75SAndroid Build Coastguard Worker /// The lock is held when the n-th bit of the lock bitfield is set.
try_lockProcess177*71db0c75SAndroid Build Coastguard Worker RPC_INLINE bool try_lock(uint64_t lane_mask, uint32_t index) {
178*71db0c75SAndroid Build Coastguard Worker // On amdgpu, test and set to the nth lock bit and a sync_lane would suffice
179*71db0c75SAndroid Build Coastguard Worker // On volta, need to handle differences between the threads running and
180*71db0c75SAndroid Build Coastguard Worker // the threads that were detected in the previous call to get_lane_mask()
181*71db0c75SAndroid Build Coastguard Worker //
182*71db0c75SAndroid Build Coastguard Worker // All threads in lane_mask try to claim the lock. At most one can succeed.
183*71db0c75SAndroid Build Coastguard Worker // There may be threads active which are not in lane mask which must not
184*71db0c75SAndroid Build Coastguard Worker // succeed in taking the lock, as otherwise it will leak. This is handled
185*71db0c75SAndroid Build Coastguard Worker // by making threads which are not in lane_mask or with 0, a no-op.
186*71db0c75SAndroid Build Coastguard Worker uint32_t id = rpc::get_lane_id();
187*71db0c75SAndroid Build Coastguard Worker bool id_in_lane_mask = lane_mask & (1ul << id);
188*71db0c75SAndroid Build Coastguard Worker
189*71db0c75SAndroid Build Coastguard Worker // All threads in the warp call fetch_or. Possibly at the same time.
190*71db0c75SAndroid Build Coastguard Worker bool before = set_nth(lock, index, id_in_lane_mask);
191*71db0c75SAndroid Build Coastguard Worker uint64_t packed = rpc::ballot(lane_mask, before);
192*71db0c75SAndroid Build Coastguard Worker
193*71db0c75SAndroid Build Coastguard Worker // If every bit set in lane_mask is also set in packed, every single thread
194*71db0c75SAndroid Build Coastguard Worker // in the warp failed to get the lock. Ballot returns unset for threads not
195*71db0c75SAndroid Build Coastguard Worker // in the lane mask.
196*71db0c75SAndroid Build Coastguard Worker //
197*71db0c75SAndroid Build Coastguard Worker // Cases, per thread:
198*71db0c75SAndroid Build Coastguard Worker // mask==0 -> unspecified before, discarded by ballot -> 0
199*71db0c75SAndroid Build Coastguard Worker // mask==1 and before==0 (success), set zero by ballot -> 0
200*71db0c75SAndroid Build Coastguard Worker // mask==1 and before==1 (failure), set one by ballot -> 1
201*71db0c75SAndroid Build Coastguard Worker //
202*71db0c75SAndroid Build Coastguard Worker // mask != packed implies at least one of the threads got the lock
203*71db0c75SAndroid Build Coastguard Worker // atomic semantics of fetch_or mean at most one of the threads for the lock
204*71db0c75SAndroid Build Coastguard Worker
205*71db0c75SAndroid Build Coastguard Worker // If holding the lock then the caller can load values knowing said loads
206*71db0c75SAndroid Build Coastguard Worker // won't move past the lock. No such guarantee is needed if the lock acquire
207*71db0c75SAndroid Build Coastguard Worker // failed. This conditional branch is expected to fold in the caller after
208*71db0c75SAndroid Build Coastguard Worker // inlining the current function.
209*71db0c75SAndroid Build Coastguard Worker bool holding_lock = lane_mask != packed;
210*71db0c75SAndroid Build Coastguard Worker if (holding_lock)
211*71db0c75SAndroid Build Coastguard Worker __scoped_atomic_thread_fence(__ATOMIC_ACQUIRE, __MEMORY_SCOPE_DEVICE);
212*71db0c75SAndroid Build Coastguard Worker return holding_lock;
213*71db0c75SAndroid Build Coastguard Worker }
214*71db0c75SAndroid Build Coastguard Worker
215*71db0c75SAndroid Build Coastguard Worker /// Unlock the lock at index. We need a lane sync to keep this function
216*71db0c75SAndroid Build Coastguard Worker /// convergent, otherwise the compiler will sink the store and deadlock.
unlockProcess217*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void unlock(uint64_t lane_mask, uint32_t index) {
218*71db0c75SAndroid Build Coastguard Worker // Do not move any writes past the unlock.
219*71db0c75SAndroid Build Coastguard Worker __scoped_atomic_thread_fence(__ATOMIC_RELEASE, __MEMORY_SCOPE_DEVICE);
220*71db0c75SAndroid Build Coastguard Worker
221*71db0c75SAndroid Build Coastguard Worker // Use exactly one thread to clear the nth bit in the lock array Must
222*71db0c75SAndroid Build Coastguard Worker // restrict to a single thread to avoid one thread dropping the lock, then
223*71db0c75SAndroid Build Coastguard Worker // an unrelated warp claiming the lock, then a second thread in this warp
224*71db0c75SAndroid Build Coastguard Worker // dropping the lock again.
225*71db0c75SAndroid Build Coastguard Worker clear_nth(lock, index, rpc::is_first_lane(lane_mask));
226*71db0c75SAndroid Build Coastguard Worker rpc::sync_lane(lane_mask);
227*71db0c75SAndroid Build Coastguard Worker }
228*71db0c75SAndroid Build Coastguard Worker
229*71db0c75SAndroid Build Coastguard Worker /// Number of bytes to allocate for an inbox or outbox.
mailbox_bytesProcess230*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t mailbox_bytes(uint32_t port_count) {
231*71db0c75SAndroid Build Coastguard Worker return port_count * sizeof(uint32_t);
232*71db0c75SAndroid Build Coastguard Worker }
233*71db0c75SAndroid Build Coastguard Worker
234*71db0c75SAndroid Build Coastguard Worker /// Number of bytes to allocate for the buffer containing the packets.
buffer_bytesProcess235*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t buffer_bytes(uint32_t port_count,
236*71db0c75SAndroid Build Coastguard Worker uint32_t lane_size) {
237*71db0c75SAndroid Build Coastguard Worker return port_count * lane_size * sizeof(Buffer);
238*71db0c75SAndroid Build Coastguard Worker }
239*71db0c75SAndroid Build Coastguard Worker
240*71db0c75SAndroid Build Coastguard Worker /// Offset of the inbox in memory. This is the same as the outbox if inverted.
inbox_offsetProcess241*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t inbox_offset(uint32_t port_count) {
242*71db0c75SAndroid Build Coastguard Worker return Invert ? mailbox_bytes(port_count) : 0;
243*71db0c75SAndroid Build Coastguard Worker }
244*71db0c75SAndroid Build Coastguard Worker
245*71db0c75SAndroid Build Coastguard Worker /// Offset of the outbox in memory. This is the same as the inbox if inverted.
outbox_offsetProcess246*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t outbox_offset(uint32_t port_count) {
247*71db0c75SAndroid Build Coastguard Worker return Invert ? 0 : mailbox_bytes(port_count);
248*71db0c75SAndroid Build Coastguard Worker }
249*71db0c75SAndroid Build Coastguard Worker
250*71db0c75SAndroid Build Coastguard Worker /// Offset of the buffer containing the packets after the inbox and outbox.
header_offsetProcess251*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t header_offset(uint32_t port_count) {
252*71db0c75SAndroid Build Coastguard Worker return align_up(2 * mailbox_bytes(port_count), alignof(Header));
253*71db0c75SAndroid Build Coastguard Worker }
254*71db0c75SAndroid Build Coastguard Worker
255*71db0c75SAndroid Build Coastguard Worker /// Offset of the buffer containing the packets after the inbox and outbox.
buffer_offsetProcess256*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint64_t buffer_offset(uint32_t port_count) {
257*71db0c75SAndroid Build Coastguard Worker return align_up(header_offset(port_count) + port_count * sizeof(Header),
258*71db0c75SAndroid Build Coastguard Worker alignof(Buffer));
259*71db0c75SAndroid Build Coastguard Worker }
260*71db0c75SAndroid Build Coastguard Worker
261*71db0c75SAndroid Build Coastguard Worker /// Conditionally set the n-th bit in the atomic bitfield.
set_nthProcess262*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint32_t set_nth(uint32_t *bits, uint32_t index,
263*71db0c75SAndroid Build Coastguard Worker bool cond) {
264*71db0c75SAndroid Build Coastguard Worker uint32_t slot = index / NUM_BITS_IN_WORD;
265*71db0c75SAndroid Build Coastguard Worker uint32_t bit = index % NUM_BITS_IN_WORD;
266*71db0c75SAndroid Build Coastguard Worker return __scoped_atomic_fetch_or(&bits[slot],
267*71db0c75SAndroid Build Coastguard Worker static_cast<uint32_t>(cond) << bit,
268*71db0c75SAndroid Build Coastguard Worker __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
269*71db0c75SAndroid Build Coastguard Worker (1u << bit);
270*71db0c75SAndroid Build Coastguard Worker }
271*71db0c75SAndroid Build Coastguard Worker
272*71db0c75SAndroid Build Coastguard Worker /// Conditionally clear the n-th bit in the atomic bitfield.
clear_nthProcess273*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static constexpr uint32_t clear_nth(uint32_t *bits, uint32_t index,
274*71db0c75SAndroid Build Coastguard Worker bool cond) {
275*71db0c75SAndroid Build Coastguard Worker uint32_t slot = index / NUM_BITS_IN_WORD;
276*71db0c75SAndroid Build Coastguard Worker uint32_t bit = index % NUM_BITS_IN_WORD;
277*71db0c75SAndroid Build Coastguard Worker return __scoped_atomic_fetch_and(&bits[slot],
278*71db0c75SAndroid Build Coastguard Worker ~0u ^ (static_cast<uint32_t>(cond) << bit),
279*71db0c75SAndroid Build Coastguard Worker __ATOMIC_RELAXED, __MEMORY_SCOPE_DEVICE) &
280*71db0c75SAndroid Build Coastguard Worker (1u << bit);
281*71db0c75SAndroid Build Coastguard Worker }
282*71db0c75SAndroid Build Coastguard Worker };
283*71db0c75SAndroid Build Coastguard Worker
284*71db0c75SAndroid Build Coastguard Worker /// Invokes a function accross every active buffer across the total lane size.
285*71db0c75SAndroid Build Coastguard Worker template <typename F>
invoke_rpc(F && fn,uint32_t lane_size,uint64_t lane_mask,Buffer * slot)286*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static void invoke_rpc(F &&fn, uint32_t lane_size,
287*71db0c75SAndroid Build Coastguard Worker uint64_t lane_mask, Buffer *slot) {
288*71db0c75SAndroid Build Coastguard Worker if constexpr (is_process_gpu()) {
289*71db0c75SAndroid Build Coastguard Worker fn(&slot[rpc::get_lane_id()], rpc::get_lane_id());
290*71db0c75SAndroid Build Coastguard Worker } else {
291*71db0c75SAndroid Build Coastguard Worker for (uint32_t i = 0; i < lane_size; i += rpc::get_num_lanes())
292*71db0c75SAndroid Build Coastguard Worker if (lane_mask & (1ul << i))
293*71db0c75SAndroid Build Coastguard Worker fn(&slot[i], i);
294*71db0c75SAndroid Build Coastguard Worker }
295*71db0c75SAndroid Build Coastguard Worker }
296*71db0c75SAndroid Build Coastguard Worker
297*71db0c75SAndroid Build Coastguard Worker /// The port provides the interface to communicate between the multiple
298*71db0c75SAndroid Build Coastguard Worker /// processes. A port is conceptually an index into the memory provided by the
299*71db0c75SAndroid Build Coastguard Worker /// underlying process that is guarded by a lock bit.
300*71db0c75SAndroid Build Coastguard Worker template <bool T> struct Port {
PortPort301*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port(Process<T> &process, uint64_t lane_mask, uint32_t lane_size,
302*71db0c75SAndroid Build Coastguard Worker uint32_t index, uint32_t out)
303*71db0c75SAndroid Build Coastguard Worker : process(process), lane_mask(lane_mask), lane_size(lane_size),
304*71db0c75SAndroid Build Coastguard Worker index(index), out(out), receive(false), owns_buffer(true) {}
305*71db0c75SAndroid Build Coastguard Worker RPC_INLINE ~Port() = default;
306*71db0c75SAndroid Build Coastguard Worker
307*71db0c75SAndroid Build Coastguard Worker private:
308*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port(const Port &) = delete;
309*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port &operator=(const Port &) = delete;
310*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port(Port &&) = default;
311*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port &operator=(Port &&) = default;
312*71db0c75SAndroid Build Coastguard Worker
313*71db0c75SAndroid Build Coastguard Worker friend struct Client;
314*71db0c75SAndroid Build Coastguard Worker friend struct Server;
315*71db0c75SAndroid Build Coastguard Worker friend class rpc::optional<Port<T>>;
316*71db0c75SAndroid Build Coastguard Worker
317*71db0c75SAndroid Build Coastguard Worker public:
318*71db0c75SAndroid Build Coastguard Worker template <typename U> RPC_INLINE void recv(U use);
319*71db0c75SAndroid Build Coastguard Worker template <typename F> RPC_INLINE void send(F fill);
320*71db0c75SAndroid Build Coastguard Worker template <typename F, typename U>
321*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void send_and_recv(F fill, U use);
322*71db0c75SAndroid Build Coastguard Worker template <typename W> RPC_INLINE void recv_and_send(W work);
323*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void send_n(const void *const *src, uint64_t *size);
324*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void send_n(const void *src, uint64_t size);
325*71db0c75SAndroid Build Coastguard Worker template <typename A>
326*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void recv_n(void **dst, uint64_t *size, A &&alloc);
327*71db0c75SAndroid Build Coastguard Worker
get_opcodePort328*71db0c75SAndroid Build Coastguard Worker RPC_INLINE uint32_t get_opcode() const {
329*71db0c75SAndroid Build Coastguard Worker return process.header[index].opcode;
330*71db0c75SAndroid Build Coastguard Worker }
331*71db0c75SAndroid Build Coastguard Worker
get_indexPort332*71db0c75SAndroid Build Coastguard Worker RPC_INLINE uint32_t get_index() const { return index; }
333*71db0c75SAndroid Build Coastguard Worker
closePort334*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void close() {
335*71db0c75SAndroid Build Coastguard Worker // Wait for all lanes to finish using the port.
336*71db0c75SAndroid Build Coastguard Worker rpc::sync_lane(lane_mask);
337*71db0c75SAndroid Build Coastguard Worker
338*71db0c75SAndroid Build Coastguard Worker // The server is passive, if it own the buffer when it closes we need to
339*71db0c75SAndroid Build Coastguard Worker // give ownership back to the client.
340*71db0c75SAndroid Build Coastguard Worker if (owns_buffer && T)
341*71db0c75SAndroid Build Coastguard Worker out = process.invert_outbox(index, out);
342*71db0c75SAndroid Build Coastguard Worker process.unlock(lane_mask, index);
343*71db0c75SAndroid Build Coastguard Worker }
344*71db0c75SAndroid Build Coastguard Worker
345*71db0c75SAndroid Build Coastguard Worker private:
346*71db0c75SAndroid Build Coastguard Worker Process<T> &process;
347*71db0c75SAndroid Build Coastguard Worker uint64_t lane_mask;
348*71db0c75SAndroid Build Coastguard Worker uint32_t lane_size;
349*71db0c75SAndroid Build Coastguard Worker uint32_t index;
350*71db0c75SAndroid Build Coastguard Worker uint32_t out;
351*71db0c75SAndroid Build Coastguard Worker bool receive;
352*71db0c75SAndroid Build Coastguard Worker bool owns_buffer;
353*71db0c75SAndroid Build Coastguard Worker };
354*71db0c75SAndroid Build Coastguard Worker
355*71db0c75SAndroid Build Coastguard Worker /// The RPC client used to make requests to the server.
356*71db0c75SAndroid Build Coastguard Worker struct Client {
357*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Client() = default;
358*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Client(const Client &) = delete;
359*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Client &operator=(const Client &) = delete;
360*71db0c75SAndroid Build Coastguard Worker RPC_INLINE ~Client() = default;
361*71db0c75SAndroid Build Coastguard Worker
ClientClient362*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Client(uint32_t port_count, void *buffer)
363*71db0c75SAndroid Build Coastguard Worker : process(port_count, buffer) {}
364*71db0c75SAndroid Build Coastguard Worker
365*71db0c75SAndroid Build Coastguard Worker using Port = rpc::Port<false>;
366*71db0c75SAndroid Build Coastguard Worker template <uint32_t opcode> RPC_INLINE Port open();
367*71db0c75SAndroid Build Coastguard Worker
368*71db0c75SAndroid Build Coastguard Worker private:
369*71db0c75SAndroid Build Coastguard Worker Process<false> process;
370*71db0c75SAndroid Build Coastguard Worker };
371*71db0c75SAndroid Build Coastguard Worker
372*71db0c75SAndroid Build Coastguard Worker /// The RPC server used to respond to the client.
373*71db0c75SAndroid Build Coastguard Worker struct Server {
374*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Server() = default;
375*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Server(const Server &) = delete;
376*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Server &operator=(const Server &) = delete;
377*71db0c75SAndroid Build Coastguard Worker RPC_INLINE ~Server() = default;
378*71db0c75SAndroid Build Coastguard Worker
ServerServer379*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Server(uint32_t port_count, void *buffer)
380*71db0c75SAndroid Build Coastguard Worker : process(port_count, buffer) {}
381*71db0c75SAndroid Build Coastguard Worker
382*71db0c75SAndroid Build Coastguard Worker using Port = rpc::Port<true>;
383*71db0c75SAndroid Build Coastguard Worker RPC_INLINE rpc::optional<Port> try_open(uint32_t lane_size,
384*71db0c75SAndroid Build Coastguard Worker uint32_t start = 0);
385*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Port open(uint32_t lane_size);
386*71db0c75SAndroid Build Coastguard Worker
allocation_sizeServer387*71db0c75SAndroid Build Coastguard Worker RPC_INLINE static uint64_t allocation_size(uint32_t lane_size,
388*71db0c75SAndroid Build Coastguard Worker uint32_t port_count) {
389*71db0c75SAndroid Build Coastguard Worker return Process<true>::allocation_size(port_count, lane_size);
390*71db0c75SAndroid Build Coastguard Worker }
391*71db0c75SAndroid Build Coastguard Worker
392*71db0c75SAndroid Build Coastguard Worker private:
393*71db0c75SAndroid Build Coastguard Worker Process<true> process;
394*71db0c75SAndroid Build Coastguard Worker };
395*71db0c75SAndroid Build Coastguard Worker
396*71db0c75SAndroid Build Coastguard Worker /// Applies \p fill to the shared buffer and initiates a send operation.
send(F fill)397*71db0c75SAndroid Build Coastguard Worker template <bool T> template <typename F> RPC_INLINE void Port<T>::send(F fill) {
398*71db0c75SAndroid Build Coastguard Worker uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
399*71db0c75SAndroid Build Coastguard Worker
400*71db0c75SAndroid Build Coastguard Worker // We need to wait until we own the buffer before sending.
401*71db0c75SAndroid Build Coastguard Worker process.wait_for_ownership(lane_mask, index, out, in);
402*71db0c75SAndroid Build Coastguard Worker
403*71db0c75SAndroid Build Coastguard Worker // Apply the \p fill function to initialize the buffer and release the memory.
404*71db0c75SAndroid Build Coastguard Worker invoke_rpc(fill, lane_size, process.header[index].mask,
405*71db0c75SAndroid Build Coastguard Worker process.get_packet(index, lane_size));
406*71db0c75SAndroid Build Coastguard Worker out = process.invert_outbox(index, out);
407*71db0c75SAndroid Build Coastguard Worker owns_buffer = false;
408*71db0c75SAndroid Build Coastguard Worker receive = false;
409*71db0c75SAndroid Build Coastguard Worker }
410*71db0c75SAndroid Build Coastguard Worker
411*71db0c75SAndroid Build Coastguard Worker /// Applies \p use to the shared buffer and acknowledges the send.
recv(U use)412*71db0c75SAndroid Build Coastguard Worker template <bool T> template <typename U> RPC_INLINE void Port<T>::recv(U use) {
413*71db0c75SAndroid Build Coastguard Worker // We only exchange ownership of the buffer during a receive if we are waiting
414*71db0c75SAndroid Build Coastguard Worker // for a previous receive to finish.
415*71db0c75SAndroid Build Coastguard Worker if (receive) {
416*71db0c75SAndroid Build Coastguard Worker out = process.invert_outbox(index, out);
417*71db0c75SAndroid Build Coastguard Worker owns_buffer = false;
418*71db0c75SAndroid Build Coastguard Worker }
419*71db0c75SAndroid Build Coastguard Worker
420*71db0c75SAndroid Build Coastguard Worker uint32_t in = owns_buffer ? out ^ T : process.load_inbox(lane_mask, index);
421*71db0c75SAndroid Build Coastguard Worker
422*71db0c75SAndroid Build Coastguard Worker // We need to wait until we own the buffer before receiving.
423*71db0c75SAndroid Build Coastguard Worker process.wait_for_ownership(lane_mask, index, out, in);
424*71db0c75SAndroid Build Coastguard Worker
425*71db0c75SAndroid Build Coastguard Worker // Apply the \p use function to read the memory out of the buffer.
426*71db0c75SAndroid Build Coastguard Worker invoke_rpc(use, lane_size, process.header[index].mask,
427*71db0c75SAndroid Build Coastguard Worker process.get_packet(index, lane_size));
428*71db0c75SAndroid Build Coastguard Worker receive = true;
429*71db0c75SAndroid Build Coastguard Worker owns_buffer = true;
430*71db0c75SAndroid Build Coastguard Worker }
431*71db0c75SAndroid Build Coastguard Worker
432*71db0c75SAndroid Build Coastguard Worker /// Combines a send and receive into a single function.
433*71db0c75SAndroid Build Coastguard Worker template <bool T>
434*71db0c75SAndroid Build Coastguard Worker template <typename F, typename U>
send_and_recv(F fill,U use)435*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void Port<T>::send_and_recv(F fill, U use) {
436*71db0c75SAndroid Build Coastguard Worker send(fill);
437*71db0c75SAndroid Build Coastguard Worker recv(use);
438*71db0c75SAndroid Build Coastguard Worker }
439*71db0c75SAndroid Build Coastguard Worker
440*71db0c75SAndroid Build Coastguard Worker /// Combines a receive and send operation into a single function. The \p work
441*71db0c75SAndroid Build Coastguard Worker /// function modifies the buffer in-place and the send is only used to initiate
442*71db0c75SAndroid Build Coastguard Worker /// the copy back.
443*71db0c75SAndroid Build Coastguard Worker template <bool T>
444*71db0c75SAndroid Build Coastguard Worker template <typename W>
recv_and_send(W work)445*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void Port<T>::recv_and_send(W work) {
446*71db0c75SAndroid Build Coastguard Worker recv(work);
447*71db0c75SAndroid Build Coastguard Worker send([](Buffer *, uint32_t) { /* no-op */ });
448*71db0c75SAndroid Build Coastguard Worker }
449*71db0c75SAndroid Build Coastguard Worker
450*71db0c75SAndroid Build Coastguard Worker /// Helper routine to simplify the interface when sending from the GPU using
451*71db0c75SAndroid Build Coastguard Worker /// thread private pointers to the underlying value.
452*71db0c75SAndroid Build Coastguard Worker template <bool T>
send_n(const void * src,uint64_t size)453*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void Port<T>::send_n(const void *src, uint64_t size) {
454*71db0c75SAndroid Build Coastguard Worker const void **src_ptr = &src;
455*71db0c75SAndroid Build Coastguard Worker uint64_t *size_ptr = &size;
456*71db0c75SAndroid Build Coastguard Worker send_n(src_ptr, size_ptr);
457*71db0c75SAndroid Build Coastguard Worker }
458*71db0c75SAndroid Build Coastguard Worker
459*71db0c75SAndroid Build Coastguard Worker /// Sends an arbitrarily sized data buffer \p src across the shared channel in
460*71db0c75SAndroid Build Coastguard Worker /// multiples of the packet length.
461*71db0c75SAndroid Build Coastguard Worker template <bool T>
send_n(const void * const * src,uint64_t * size)462*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void Port<T>::send_n(const void *const *src, uint64_t *size) {
463*71db0c75SAndroid Build Coastguard Worker uint64_t num_sends = 0;
464*71db0c75SAndroid Build Coastguard Worker send([&](Buffer *buffer, uint32_t id) {
465*71db0c75SAndroid Build Coastguard Worker reinterpret_cast<uint64_t *>(buffer->data)[0] = lane_value(size, id);
466*71db0c75SAndroid Build Coastguard Worker num_sends = is_process_gpu() ? lane_value(size, id)
467*71db0c75SAndroid Build Coastguard Worker : rpc::max(lane_value(size, id), num_sends);
468*71db0c75SAndroid Build Coastguard Worker uint64_t len =
469*71db0c75SAndroid Build Coastguard Worker lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
470*71db0c75SAndroid Build Coastguard Worker ? sizeof(Buffer::data) - sizeof(uint64_t)
471*71db0c75SAndroid Build Coastguard Worker : lane_value(size, id);
472*71db0c75SAndroid Build Coastguard Worker rpc_memcpy(&buffer->data[1], lane_value(src, id), len);
473*71db0c75SAndroid Build Coastguard Worker });
474*71db0c75SAndroid Build Coastguard Worker uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
475*71db0c75SAndroid Build Coastguard Worker uint64_t mask = process.header[index].mask;
476*71db0c75SAndroid Build Coastguard Worker while (rpc::ballot(mask, idx < num_sends)) {
477*71db0c75SAndroid Build Coastguard Worker send([=](Buffer *buffer, uint32_t id) {
478*71db0c75SAndroid Build Coastguard Worker uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
479*71db0c75SAndroid Build Coastguard Worker ? sizeof(Buffer::data)
480*71db0c75SAndroid Build Coastguard Worker : lane_value(size, id) - idx;
481*71db0c75SAndroid Build Coastguard Worker if (idx < lane_value(size, id))
482*71db0c75SAndroid Build Coastguard Worker rpc_memcpy(buffer->data, advance(lane_value(src, id), idx), len);
483*71db0c75SAndroid Build Coastguard Worker });
484*71db0c75SAndroid Build Coastguard Worker idx += sizeof(Buffer::data);
485*71db0c75SAndroid Build Coastguard Worker }
486*71db0c75SAndroid Build Coastguard Worker }
487*71db0c75SAndroid Build Coastguard Worker
488*71db0c75SAndroid Build Coastguard Worker /// Receives an arbitrarily sized data buffer across the shared channel in
489*71db0c75SAndroid Build Coastguard Worker /// multiples of the packet length. The \p alloc function is called with the
490*71db0c75SAndroid Build Coastguard Worker /// size of the data so that we can initialize the size of the \p dst buffer.
491*71db0c75SAndroid Build Coastguard Worker template <bool T>
492*71db0c75SAndroid Build Coastguard Worker template <typename A>
recv_n(void ** dst,uint64_t * size,A && alloc)493*71db0c75SAndroid Build Coastguard Worker RPC_INLINE void Port<T>::recv_n(void **dst, uint64_t *size, A &&alloc) {
494*71db0c75SAndroid Build Coastguard Worker uint64_t num_recvs = 0;
495*71db0c75SAndroid Build Coastguard Worker recv([&](Buffer *buffer, uint32_t id) {
496*71db0c75SAndroid Build Coastguard Worker lane_value(size, id) = reinterpret_cast<uint64_t *>(buffer->data)[0];
497*71db0c75SAndroid Build Coastguard Worker lane_value(dst, id) =
498*71db0c75SAndroid Build Coastguard Worker reinterpret_cast<uint8_t *>(alloc(lane_value(size, id)));
499*71db0c75SAndroid Build Coastguard Worker num_recvs = is_process_gpu() ? lane_value(size, id)
500*71db0c75SAndroid Build Coastguard Worker : rpc::max(lane_value(size, id), num_recvs);
501*71db0c75SAndroid Build Coastguard Worker uint64_t len =
502*71db0c75SAndroid Build Coastguard Worker lane_value(size, id) > sizeof(Buffer::data) - sizeof(uint64_t)
503*71db0c75SAndroid Build Coastguard Worker ? sizeof(Buffer::data) - sizeof(uint64_t)
504*71db0c75SAndroid Build Coastguard Worker : lane_value(size, id);
505*71db0c75SAndroid Build Coastguard Worker rpc_memcpy(lane_value(dst, id), &buffer->data[1], len);
506*71db0c75SAndroid Build Coastguard Worker });
507*71db0c75SAndroid Build Coastguard Worker uint64_t idx = sizeof(Buffer::data) - sizeof(uint64_t);
508*71db0c75SAndroid Build Coastguard Worker uint64_t mask = process.header[index].mask;
509*71db0c75SAndroid Build Coastguard Worker while (rpc::ballot(mask, idx < num_recvs)) {
510*71db0c75SAndroid Build Coastguard Worker recv([=](Buffer *buffer, uint32_t id) {
511*71db0c75SAndroid Build Coastguard Worker uint64_t len = lane_value(size, id) - idx > sizeof(Buffer::data)
512*71db0c75SAndroid Build Coastguard Worker ? sizeof(Buffer::data)
513*71db0c75SAndroid Build Coastguard Worker : lane_value(size, id) - idx;
514*71db0c75SAndroid Build Coastguard Worker if (idx < lane_value(size, id))
515*71db0c75SAndroid Build Coastguard Worker rpc_memcpy(advance(lane_value(dst, id), idx), buffer->data, len);
516*71db0c75SAndroid Build Coastguard Worker });
517*71db0c75SAndroid Build Coastguard Worker idx += sizeof(Buffer::data);
518*71db0c75SAndroid Build Coastguard Worker }
519*71db0c75SAndroid Build Coastguard Worker }
520*71db0c75SAndroid Build Coastguard Worker
521*71db0c75SAndroid Build Coastguard Worker /// Continually attempts to open a port to use as the client. The client can
522*71db0c75SAndroid Build Coastguard Worker /// only open a port if we find an index that is in a valid sending state. That
523*71db0c75SAndroid Build Coastguard Worker /// is, there are send operations pending that haven't been serviced on this
524*71db0c75SAndroid Build Coastguard Worker /// port. Each port instance uses an associated \p opcode to tell the server
525*71db0c75SAndroid Build Coastguard Worker /// what to do. The Client interface provides the appropriate lane size to the
526*71db0c75SAndroid Build Coastguard Worker /// port using the platform's returned value.
open()527*71db0c75SAndroid Build Coastguard Worker template <uint32_t opcode> RPC_INLINE Client::Port Client::open() {
528*71db0c75SAndroid Build Coastguard Worker // Repeatedly perform a naive linear scan for a port that can be opened to
529*71db0c75SAndroid Build Coastguard Worker // send data.
530*71db0c75SAndroid Build Coastguard Worker for (uint32_t index = 0;; ++index) {
531*71db0c75SAndroid Build Coastguard Worker // Start from the beginning if we run out of ports to check.
532*71db0c75SAndroid Build Coastguard Worker if (index >= process.port_count)
533*71db0c75SAndroid Build Coastguard Worker index = 0;
534*71db0c75SAndroid Build Coastguard Worker
535*71db0c75SAndroid Build Coastguard Worker // Attempt to acquire the lock on this index.
536*71db0c75SAndroid Build Coastguard Worker uint64_t lane_mask = rpc::get_lane_mask();
537*71db0c75SAndroid Build Coastguard Worker if (!process.try_lock(lane_mask, index))
538*71db0c75SAndroid Build Coastguard Worker continue;
539*71db0c75SAndroid Build Coastguard Worker
540*71db0c75SAndroid Build Coastguard Worker uint32_t in = process.load_inbox(lane_mask, index);
541*71db0c75SAndroid Build Coastguard Worker uint32_t out = process.load_outbox(lane_mask, index);
542*71db0c75SAndroid Build Coastguard Worker
543*71db0c75SAndroid Build Coastguard Worker // Once we acquire the index we need to check if we are in a valid sending
544*71db0c75SAndroid Build Coastguard Worker // state.
545*71db0c75SAndroid Build Coastguard Worker if (process.buffer_unavailable(in, out)) {
546*71db0c75SAndroid Build Coastguard Worker process.unlock(lane_mask, index);
547*71db0c75SAndroid Build Coastguard Worker continue;
548*71db0c75SAndroid Build Coastguard Worker }
549*71db0c75SAndroid Build Coastguard Worker
550*71db0c75SAndroid Build Coastguard Worker if (rpc::is_first_lane(lane_mask)) {
551*71db0c75SAndroid Build Coastguard Worker process.header[index].opcode = opcode;
552*71db0c75SAndroid Build Coastguard Worker process.header[index].mask = lane_mask;
553*71db0c75SAndroid Build Coastguard Worker }
554*71db0c75SAndroid Build Coastguard Worker rpc::sync_lane(lane_mask);
555*71db0c75SAndroid Build Coastguard Worker return Port(process, lane_mask, rpc::get_num_lanes(), index, out);
556*71db0c75SAndroid Build Coastguard Worker }
557*71db0c75SAndroid Build Coastguard Worker }
558*71db0c75SAndroid Build Coastguard Worker
559*71db0c75SAndroid Build Coastguard Worker /// Attempts to open a port to use as the server. The server can only open a
560*71db0c75SAndroid Build Coastguard Worker /// port if it has a pending receive operation
561*71db0c75SAndroid Build Coastguard Worker RPC_INLINE rpc::optional<typename Server::Port>
try_open(uint32_t lane_size,uint32_t start)562*71db0c75SAndroid Build Coastguard Worker Server::try_open(uint32_t lane_size, uint32_t start) {
563*71db0c75SAndroid Build Coastguard Worker // Perform a naive linear scan for a port that has a pending request.
564*71db0c75SAndroid Build Coastguard Worker for (uint32_t index = start; index < process.port_count; ++index) {
565*71db0c75SAndroid Build Coastguard Worker uint64_t lane_mask = rpc::get_lane_mask();
566*71db0c75SAndroid Build Coastguard Worker uint32_t in = process.load_inbox(lane_mask, index);
567*71db0c75SAndroid Build Coastguard Worker uint32_t out = process.load_outbox(lane_mask, index);
568*71db0c75SAndroid Build Coastguard Worker
569*71db0c75SAndroid Build Coastguard Worker // The server is passive, if there is no work pending don't bother
570*71db0c75SAndroid Build Coastguard Worker // opening a port.
571*71db0c75SAndroid Build Coastguard Worker if (process.buffer_unavailable(in, out))
572*71db0c75SAndroid Build Coastguard Worker continue;
573*71db0c75SAndroid Build Coastguard Worker
574*71db0c75SAndroid Build Coastguard Worker // Attempt to acquire the lock on this index.
575*71db0c75SAndroid Build Coastguard Worker if (!process.try_lock(lane_mask, index))
576*71db0c75SAndroid Build Coastguard Worker continue;
577*71db0c75SAndroid Build Coastguard Worker
578*71db0c75SAndroid Build Coastguard Worker in = process.load_inbox(lane_mask, index);
579*71db0c75SAndroid Build Coastguard Worker out = process.load_outbox(lane_mask, index);
580*71db0c75SAndroid Build Coastguard Worker
581*71db0c75SAndroid Build Coastguard Worker if (process.buffer_unavailable(in, out)) {
582*71db0c75SAndroid Build Coastguard Worker process.unlock(lane_mask, index);
583*71db0c75SAndroid Build Coastguard Worker continue;
584*71db0c75SAndroid Build Coastguard Worker }
585*71db0c75SAndroid Build Coastguard Worker
586*71db0c75SAndroid Build Coastguard Worker return Port(process, lane_mask, lane_size, index, out);
587*71db0c75SAndroid Build Coastguard Worker }
588*71db0c75SAndroid Build Coastguard Worker return rpc::nullopt;
589*71db0c75SAndroid Build Coastguard Worker }
590*71db0c75SAndroid Build Coastguard Worker
open(uint32_t lane_size)591*71db0c75SAndroid Build Coastguard Worker RPC_INLINE Server::Port Server::open(uint32_t lane_size) {
592*71db0c75SAndroid Build Coastguard Worker for (;;) {
593*71db0c75SAndroid Build Coastguard Worker if (rpc::optional<Server::Port> p = try_open(lane_size))
594*71db0c75SAndroid Build Coastguard Worker return rpc::move(p.value());
595*71db0c75SAndroid Build Coastguard Worker sleep_briefly();
596*71db0c75SAndroid Build Coastguard Worker }
597*71db0c75SAndroid Build Coastguard Worker }
598*71db0c75SAndroid Build Coastguard Worker
599*71db0c75SAndroid Build Coastguard Worker #if !__has_builtin(__scoped_atomic_load_n)
600*71db0c75SAndroid Build Coastguard Worker #undef __scoped_atomic_load_n
601*71db0c75SAndroid Build Coastguard Worker #undef __scoped_atomic_store_n
602*71db0c75SAndroid Build Coastguard Worker #undef __scoped_atomic_fetch_or
603*71db0c75SAndroid Build Coastguard Worker #undef __scoped_atomic_fetch_and
604*71db0c75SAndroid Build Coastguard Worker #endif
605*71db0c75SAndroid Build Coastguard Worker #if !__has_builtin(__scoped_atomic_thread_fence)
606*71db0c75SAndroid Build Coastguard Worker #undef __scoped_atomic_thread_fence
607*71db0c75SAndroid Build Coastguard Worker #endif
608*71db0c75SAndroid Build Coastguard Worker
609*71db0c75SAndroid Build Coastguard Worker } // namespace rpc
610*71db0c75SAndroid Build Coastguard Worker
611*71db0c75SAndroid Build Coastguard Worker #endif // LLVM_LIBC_SHARED_RPC_H
612