1
1
#include "SentryHook.h"
2
+ #include "SentryCrashMemory.h"
3
+ #include "SentryCrashStackCursor.h"
2
4
#include "fishhook.h"
3
5
#include <dispatch/dispatch.h>
4
6
#include <execinfo.h>
5
7
#include <mach/mach.h>
6
8
#include <pthread.h>
7
9
8
- // NOTE on accessing thread-locals across threads:
9
- // We save the async stacktrace as a thread local when dispatching async calls,
10
- // but the various crash handlers need to access these thread-locals across threads
11
- // sometimes, which we do here:
12
- // While `pthread_t` is an opaque type, the offset of `thread specific data` (tsd)
13
- // is fixed due to backwards compatibility.
14
- // See:
15
- // https://github.com/apple/darwin-libpthread/blob/c60d249cc84dfd6097a7e71c68a36b47cbe076d1/src/types_internal.h#L409-L432
16
-
17
- #if __LP64__
18
- # define TSD_OFFSET 224
19
- #else
20
- # define TSD_OFFSET 176
21
- #endif
22
-
23
- static pthread_key_t async_caller_key = 0 ;
24
-
25
- sentrycrash_async_backtrace_t *
26
- sentrycrash_get_async_caller_for_thread (SentryCrashThread thread )
27
- {
28
- return NULL ;
29
-
30
- // TODO: Disabled because still experimental.
31
- // const pthread_t pthread = pthread_from_mach_thread_np((thread_t)thread);
32
- // void **tsd_slots = (void *)((uint8_t *)pthread + TSD_OFFSET);
33
- //
34
- // return (sentrycrash_async_backtrace_t *)__atomic_load_n(
35
- // &tsd_slots[async_caller_key], __ATOMIC_SEQ_CST);
36
- }
10
+ static bool hooks_active = true;
37
11
38
12
void
39
13
sentrycrash__async_backtrace_incref (sentrycrash_async_backtrace_t * bt )
@@ -45,17 +19,112 @@ sentrycrash__async_backtrace_incref(sentrycrash_async_backtrace_t *bt)
45
19
}
46
20
47
21
void
48
- sentrycrash__async_backtrace_decref (sentrycrash_async_backtrace_t * bt )
22
+ sentrycrash_async_backtrace_decref (sentrycrash_async_backtrace_t * bt )
49
23
{
50
24
if (!bt ) {
51
25
return ;
52
26
}
53
27
if (__atomic_fetch_add (& bt -> refcount , -1 , __ATOMIC_SEQ_CST ) == 1 ) {
54
- sentrycrash__async_backtrace_decref (bt -> async_caller );
28
+ sentrycrash_async_backtrace_decref (bt -> async_caller );
55
29
free (bt );
56
30
}
57
31
}
58
32
33
+ /**
34
+ * This is a poor-mans concurrent hashtable.
35
+ * We have N slots, using a simple FNV-like hashing function on the mach-thread id.
36
+ *
37
+ * *Writes* to a slot will only ever happen using the *current* thread. See
38
+ * the `set`/`unset` functions for SAFETY descriptions.
39
+ * *Reads* will mostly happen on the *current* thread as well, but might happen
40
+ * across threads through `sentrycrashsc_initWithMachineContext`. See the `get`
41
+ * function for possible UNSAFETY.
42
+ *
43
+ * We use a fixed number of slots and do not account for collisions, so a high
44
+ * number of threads might lead to loss of async caller information.
45
+ */
46
+
47
+ #define SENTRY_MAX_ASYNC_THREADS (128 - 1)
48
+
49
+ typedef struct {
50
+ SentryCrashThread thread ;
51
+ sentrycrash_async_backtrace_t * backtrace ;
52
+ } sentrycrash_async_caller_slot_t ;
53
+
54
+ static sentrycrash_async_caller_slot_t sentry_async_callers [SENTRY_MAX_ASYNC_THREADS ] = { 0 };
55
+
56
+ static size_t
57
+ sentrycrash__thread_idx (SentryCrashThread thread )
58
+ {
59
+ // This uses the same magic numbers as FNV, but rather follows the simpler
60
+ // `(x * b + c) mod n` hashing scheme. Also note that `SentryCrashThread`
61
+ // is a typedef for a mach thread id, which is different from a `pthread_t`
62
+ // and should have a better distribution itself.
63
+ return (thread * 0x100000001b3 + 0xcbf29ce484222325 ) % SENTRY_MAX_ASYNC_THREADS ;
64
+ }
65
+
66
+ sentrycrash_async_backtrace_t *
67
+ sentrycrash_get_async_caller_for_thread (SentryCrashThread thread )
68
+ {
69
+ if (!__atomic_load_n (& hooks_active , __ATOMIC_RELAXED )) {
70
+ return NULL ;
71
+ }
72
+
73
+ size_t idx = sentrycrash__thread_idx (thread );
74
+ sentrycrash_async_caller_slot_t * slot = & sentry_async_callers [idx ];
75
+ if (__atomic_load_n (& slot -> thread , __ATOMIC_ACQUIRE ) == thread ) {
76
+ sentrycrash_async_backtrace_t * backtrace
77
+ = __atomic_load_n (& slot -> backtrace , __ATOMIC_RELAXED );
78
+ // UNSAFETY WARNING: There is a tiny chance of use-after-free here.
79
+ // This call can happen across threads, and the thread that "owns" the
80
+ // slot can decref and free the backtrace before *this* thread gets a
81
+ // chance to incref.
82
+ // The only codepath where this happens across threads is as part of
83
+ // `sentrycrashsc_initWithMachineContext` which is always done after a
84
+ // `sentrycrashmc_suspendEnvironment` or as part of
85
+ // `sentrycrashreport_writeStandardReport` which does lack such a guard.
86
+ sentrycrash__async_backtrace_incref (backtrace );
87
+ return backtrace ;
88
+ }
89
+ return NULL ;
90
+ }
91
+
92
+ static void
93
+ sentrycrash__set_async_caller (sentrycrash_async_backtrace_t * backtrace )
94
+ {
95
+ SentryCrashThread thread = sentrycrashthread_self ();
96
+
97
+ size_t idx = sentrycrash__thread_idx (thread );
98
+ sentrycrash_async_caller_slot_t * slot = & sentry_async_callers [idx ];
99
+
100
+ SentryCrashThread expected = (SentryCrashThread )NULL ;
101
+ bool success = __atomic_compare_exchange_n (
102
+ & slot -> thread , & expected , thread , false, __ATOMIC_SEQ_CST , __ATOMIC_SEQ_CST );
103
+
104
+ // SAFETY: While multiple threads can race on a "set" call to the same slot,
105
+ // the cmpxchg makes sure that only one thread succeeds
106
+ if (success ) {
107
+ __atomic_store_n (& slot -> backtrace , backtrace , __ATOMIC_RELEASE );
108
+ }
109
+ }
110
+
111
+ static void
112
+ sentrycrash__unset_async_caller (sentrycrash_async_backtrace_t * backtrace )
113
+ {
114
+ SentryCrashThread thread = sentrycrashthread_self ();
115
+
116
+ size_t idx = sentrycrash__thread_idx (thread );
117
+ sentrycrash_async_caller_slot_t * slot = & sentry_async_callers [idx ];
118
+
119
+ // SAFETY: The condition makes sure that the current thread *owns* this slot.
120
+ if (__atomic_load_n (& slot -> thread , __ATOMIC_ACQUIRE ) == thread ) {
121
+ __atomic_store_n (& slot -> backtrace , NULL , __ATOMIC_RELAXED );
122
+ __atomic_store_n (& slot -> thread , (SentryCrashThread )NULL , __ATOMIC_RELEASE );
123
+ }
124
+
125
+ sentrycrash_async_backtrace_decref (backtrace );
126
+ }
127
+
59
128
sentrycrash_async_backtrace_t *
60
129
sentrycrash__async_backtrace_capture (void )
61
130
{
@@ -64,15 +133,12 @@ sentrycrash__async_backtrace_capture(void)
64
133
65
134
bt -> len = backtrace (bt -> backtrace , MAX_BACKTRACE_FRAMES );
66
135
67
- sentrycrash_async_backtrace_t * caller = pthread_getspecific (async_caller_key );
68
- sentrycrash__async_backtrace_incref (caller );
69
- bt -> async_caller = caller ;
136
+ SentryCrashThread thread = sentrycrashthread_self ();
137
+ bt -> async_caller = sentrycrash_get_async_caller_for_thread (thread );
70
138
71
139
return bt ;
72
140
}
73
141
74
- static bool hooks_active = true;
75
-
76
142
static void (* real_dispatch_async )(dispatch_queue_t queue , dispatch_block_t block );
77
143
78
144
void
@@ -87,14 +153,13 @@ sentrycrash__hook_dispatch_async(dispatch_queue_t queue, dispatch_block_t block)
87
153
88
154
return real_dispatch_async (queue , ^{
89
155
// inside the async context, save the backtrace in a thread local for later consumption
90
- pthread_setspecific ( async_caller_key , bt );
156
+ sentrycrash__set_async_caller ( bt );
91
157
92
158
// call through to the original block
93
159
block ();
94
160
95
161
// and decref our current backtrace
96
- pthread_setspecific (async_caller_key , NULL );
97
- sentrycrash__async_backtrace_decref (bt );
162
+ sentrycrash__unset_async_caller (bt );
98
163
});
99
164
}
100
165
@@ -127,14 +192,13 @@ sentrycrash__hook_dispatch_after(
127
192
128
193
return real_dispatch_after (when , queue , ^{
129
194
// inside the async context, save the backtrace in a thread local for later consumption
130
- pthread_setspecific ( async_caller_key , bt );
195
+ sentrycrash__set_async_caller ( bt );
131
196
132
197
// call through to the original block
133
198
block ();
134
199
135
200
// and decref our current backtrace
136
- pthread_setspecific (async_caller_key , NULL );
137
- sentrycrash__async_backtrace_decref (bt );
201
+ sentrycrash__unset_async_caller (bt );
138
202
});
139
203
}
140
204
@@ -165,14 +229,13 @@ sentrycrash__hook_dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_
165
229
166
230
return real_dispatch_barrier_async (queue , ^{
167
231
// inside the async context, save the backtrace in a thread local for later consumption
168
- pthread_setspecific ( async_caller_key , bt );
232
+ sentrycrash__set_async_caller ( bt );
169
233
170
234
// call through to the original block
171
235
block ();
172
236
173
237
// and decref our current backtrace
174
- pthread_setspecific (async_caller_key , NULL );
175
- sentrycrash__async_backtrace_decref (bt );
238
+ sentrycrash__unset_async_caller (bt );
176
239
});
177
240
}
178
241
@@ -199,9 +262,6 @@ sentrycrash_install_async_hooks(void)
199
262
if (__atomic_exchange_n (& hooks_installed , true, __ATOMIC_SEQ_CST )) {
200
263
return ;
201
264
}
202
- if (pthread_key_create (& async_caller_key , NULL ) != 0 ) {
203
- return ;
204
- }
205
265
206
266
sentrycrash__hook_rebind_symbols (
207
267
(struct rebinding [6 ]) {
0 commit comments