xref: /freebsd/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_fd.cpp (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 //===-- tsan_fd.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "tsan_fd.h"
14 
15 #include <sanitizer_common/sanitizer_atomic.h>
16 
17 #include "tsan_interceptors.h"
18 #include "tsan_rtl.h"
19 
20 namespace __tsan {
21 
22 const int kTableSizeL1 = 1024;
23 const int kTableSizeL2 = 1024;
24 const int kTableSize = kTableSizeL1 * kTableSizeL2;
25 
26 struct FdSync {
27   atomic_uint64_t rc;
28 };
29 
30 struct FdDesc {
31   FdSync *sync;
32   // This is used to establish write -> epoll_wait synchronization
33   // where epoll_wait receives notification about the write.
34   atomic_uintptr_t aux_sync;  // FdSync*
35   Tid creation_tid;
36   StackID creation_stack;
37 };
38 
39 struct FdContext {
40   atomic_uintptr_t tab[kTableSizeL1];
41   // Addresses used for synchronization.
42   FdSync globsync;
43   FdSync filesync;
44   FdSync socksync;
45   u64 connectsync;
46 };
47 
48 static FdContext fdctx;
49 
50 static bool bogusfd(int fd) {
51   // Apparently a bogus fd value.
52   return fd < 0 || fd >= kTableSize;
53 }
54 
55 static FdSync *allocsync(ThreadState *thr, uptr pc) {
56   FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
57       kDefaultAlignment, false);
58   atomic_store(&s->rc, 1, memory_order_relaxed);
59   return s;
60 }
61 
62 static FdSync *ref(FdSync *s) {
63   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
64     atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
65   return s;
66 }
67 
68 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
69   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
70     if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
71       CHECK_NE(s, &fdctx.globsync);
72       CHECK_NE(s, &fdctx.filesync);
73       CHECK_NE(s, &fdctx.socksync);
74       user_free(thr, pc, s, false);
75     }
76   }
77 }
78 
79 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
80   CHECK_GE(fd, 0);
81   CHECK_LT(fd, kTableSize);
82   atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
83   uptr l1 = atomic_load(pl1, memory_order_consume);
84   if (l1 == 0) {
85     uptr size = kTableSizeL2 * sizeof(FdDesc);
86     // We need this to reside in user memory to properly catch races on it.
87     void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
88     internal_memset(p, 0, size);
89     MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
90     if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
91       l1 = (uptr)p;
92     else
93       user_free(thr, pc, p, false);
94   }
95   FdDesc *fds = reinterpret_cast<FdDesc *>(l1);
96   return &fds[fd % kTableSizeL2];
97 }
98 
99 // pd must be already ref'ed.
100 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
101     bool write = true) {
102   FdDesc *d = fddesc(thr, pc, fd);
103   // As a matter of fact, we don't intercept all close calls.
104   // See e.g. libc __res_iclose().
105   if (d->sync) {
106     unref(thr, pc, d->sync);
107     d->sync = 0;
108   }
109   unref(thr, pc,
110         reinterpret_cast<FdSync *>(
111             atomic_load(&d->aux_sync, memory_order_relaxed)));
112   atomic_store(&d->aux_sync, 0, memory_order_relaxed);
113   if (flags()->io_sync == 0) {
114     unref(thr, pc, s);
115   } else if (flags()->io_sync == 1) {
116     d->sync = s;
117   } else if (flags()->io_sync == 2) {
118     unref(thr, pc, s);
119     d->sync = &fdctx.globsync;
120   }
121   d->creation_tid = thr->tid;
122   d->creation_stack = CurrentStackId(thr, pc);
123   // This prevents false positives on fd_close_norace3.cpp test.
124   // The mechanics of the false positive are not completely clear,
125   // but it happens only if global reset is enabled (flush_memory_ms=1)
126   // and may be related to lost writes during asynchronous MADV_DONTNEED.
127   SlotLocker locker(thr);
128   if (write) {
129     // To catch races between fd usage and open.
130     MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
131   } else {
132     // See the dup-related comment in FdClose.
133     MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead | kAccessSlotLocked);
134   }
135 }
136 
137 void FdInit() {
138   atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
139   atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
140   atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
141 }
142 
143 void FdOnFork(ThreadState *thr, uptr pc) {
144   // On fork() we need to reset all fd's, because the child is going
145   // close all them, and that will cause races between previous read/write
146   // and the close.
147   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
148     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
149     if (tab == 0)
150       break;
151     for (int l2 = 0; l2 < kTableSizeL2; l2++) {
152       FdDesc *d = &tab[l2];
153       MemoryResetRange(thr, pc, (uptr)d, 8);
154     }
155   }
156 }
157 
158 bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
159   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
160     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
161     if (tab == 0)
162       break;
163     if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
164       int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
165       FdDesc *d = &tab[l2];
166       *fd = l1 * kTableSizeL1 + l2;
167       *tid = d->creation_tid;
168       *stack = d->creation_stack;
169       return true;
170     }
171   }
172   return false;
173 }
174 
175 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
176   if (bogusfd(fd))
177     return;
178   FdDesc *d = fddesc(thr, pc, fd);
179   FdSync *s = d->sync;
180   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
181   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
182   if (s)
183     Acquire(thr, pc, (uptr)s);
184 }
185 
186 void FdRelease(ThreadState *thr, uptr pc, int fd) {
187   if (bogusfd(fd))
188     return;
189   FdDesc *d = fddesc(thr, pc, fd);
190   FdSync *s = d->sync;
191   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
192   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
193   if (s)
194     Release(thr, pc, (uptr)s);
195   if (uptr aux_sync = atomic_load(&d->aux_sync, memory_order_acquire))
196     Release(thr, pc, aux_sync);
197 }
198 
199 void FdAccess(ThreadState *thr, uptr pc, int fd) {
200   DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
201   if (bogusfd(fd))
202     return;
203   FdDesc *d = fddesc(thr, pc, fd);
204   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
205 }
206 
207 void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
208   DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
209   if (bogusfd(fd))
210     return;
211   FdDesc *d = fddesc(thr, pc, fd);
212   {
213     // Need to lock the slot to make MemoryAccess and MemoryResetRange atomic
214     // with respect to global reset. See the comment in MemoryRangeFreed.
215     SlotLocker locker(thr);
216     if (!MustIgnoreInterceptor(thr)) {
217       if (write) {
218         // To catch races between fd usage and close.
219         MemoryAccess(thr, pc, (uptr)d, 8,
220                      kAccessWrite | kAccessCheckOnly | kAccessSlotLocked);
221       } else {
222         // This path is used only by dup2/dup3 calls.
223         // We do read instead of write because there is a number of legitimate
224         // cases where write would lead to false positives:
225         // 1. Some software dups a closed pipe in place of a socket before
226         // closing
227         //    the socket (to prevent races actually).
228         // 2. Some daemons dup /dev/null in place of stdin/stdout.
229         // On the other hand we have not seen cases when write here catches real
230         // bugs.
231         MemoryAccess(thr, pc, (uptr)d, 8,
232                      kAccessRead | kAccessCheckOnly | kAccessSlotLocked);
233       }
234     }
235     // We need to clear it, because if we do not intercept any call out there
236     // that creates fd, we will hit false postives.
237     MemoryResetRange(thr, pc, (uptr)d, 8);
238   }
239   unref(thr, pc, d->sync);
240   d->sync = 0;
241   unref(thr, pc,
242         reinterpret_cast<FdSync *>(
243             atomic_load(&d->aux_sync, memory_order_relaxed)));
244   atomic_store(&d->aux_sync, 0, memory_order_relaxed);
245   d->creation_tid = kInvalidTid;
246   d->creation_stack = kInvalidStackID;
247 }
248 
249 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
250   DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
251   if (bogusfd(fd))
252     return;
253   init(thr, pc, fd, &fdctx.filesync);
254 }
255 
256 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
257   DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
258   if (bogusfd(oldfd) || bogusfd(newfd))
259     return;
260   // Ignore the case when user dups not yet connected socket.
261   FdDesc *od = fddesc(thr, pc, oldfd);
262   MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
263   FdClose(thr, pc, newfd, write);
264   init(thr, pc, newfd, ref(od->sync), write);
265 }
266 
267 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
268   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
269   FdSync *s = allocsync(thr, pc);
270   init(thr, pc, rfd, ref(s));
271   init(thr, pc, wfd, ref(s));
272   unref(thr, pc, s);
273 }
274 
275 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
276   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
277   if (bogusfd(fd))
278     return;
279   init(thr, pc, fd, allocsync(thr, pc));
280 }
281 
282 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
283   DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
284   if (bogusfd(fd))
285     return;
286   init(thr, pc, fd, 0);
287 }
288 
289 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
290   DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
291   if (bogusfd(fd))
292     return;
293   init(thr, pc, fd, 0);
294 }
295 
296 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
297   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
298   if (bogusfd(fd))
299     return;
300   init(thr, pc, fd, allocsync(thr, pc));
301 }
302 
303 void FdPollAdd(ThreadState *thr, uptr pc, int epfd, int fd) {
304   DPrintf("#%d: FdPollAdd(%d, %d)\n", thr->tid, epfd, fd);
305   if (bogusfd(epfd) || bogusfd(fd))
306     return;
307   FdDesc *d = fddesc(thr, pc, fd);
308   // Associate fd with epoll fd only once.
309   // While an fd can be associated with multiple epolls at the same time,
310   // or with different epolls during different phases of lifetime,
311   // synchronization semantics (and examples) of this are unclear.
312   // So we don't support this for now.
313   // If we change the association, it will also create lifetime management
314   // problem for FdRelease which accesses the aux_sync.
315   if (atomic_load(&d->aux_sync, memory_order_relaxed))
316     return;
317   FdDesc *epd = fddesc(thr, pc, epfd);
318   FdSync *s = epd->sync;
319   if (!s)
320     return;
321   uptr cmp = 0;
322   if (atomic_compare_exchange_strong(
323           &d->aux_sync, &cmp, reinterpret_cast<uptr>(s), memory_order_release))
324     ref(s);
325 }
326 
327 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
328   DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
329   if (bogusfd(fd))
330     return;
331   // It can be a UDP socket.
332   init(thr, pc, fd, &fdctx.socksync);
333 }
334 
335 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
336   DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
337   if (bogusfd(fd))
338     return;
339   // Synchronize connect->accept.
340   Acquire(thr, pc, (uptr)&fdctx.connectsync);
341   init(thr, pc, newfd, &fdctx.socksync);
342 }
343 
344 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
345   DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
346   if (bogusfd(fd))
347     return;
348   // Synchronize connect->accept.
349   Release(thr, pc, (uptr)&fdctx.connectsync);
350 }
351 
352 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
353   DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
354   if (bogusfd(fd))
355     return;
356   init(thr, pc, fd, &fdctx.socksync);
357 }
358 
359 uptr File2addr(const char *path) {
360   (void)path;
361   static u64 addr;
362   return (uptr)&addr;
363 }
364 
365 uptr Dir2addr(const char *path) {
366   (void)path;
367   static u64 addr;
368   return (uptr)&addr;
369 }
370 
371 }  //  namespace __tsan
372