Skip to content
This repository was archived by the owner on May 20, 2025. It is now read-only.

Commit 34ecec9

Browse files
committed
Atomically delete object queue only when empty
Once the threads object queue is empty, other threads may explicitly merge reference counts immediately. It's no longer safe for the original thread to do RC operations because of races with accesses to ob_ref_local. This ensures that the queue is empty when it's removed from the hashtable mapping thread-id to queue. There are still other RC operations that can happen during thread destruction, so the call to _Py_queue_destroy will probably need to be moved. See #50
1 parent 605f207 commit 34ecec9

File tree

1 file changed

+20
-31
lines changed

1 file changed

+20
-31
lines changed

Python/pyrefcnt.c

Lines changed: 20 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -22,23 +22,6 @@ _Py_RefcntQueue_Push(PyThreadState *tstate, PyObject *ob)
2222
}
2323
}
2424

25-
// NOTE: ABA problem
26-
static PyObject *
27-
_Py_RefcntQueue_Pop(PyThreadState *tstate)
28-
{
29-
PyObject **object_queue = &tstate->object_queue;
30-
for (;;) {
31-
PyObject *head = _Py_atomic_load_ptr_relaxed(object_queue);
32-
if (!head) {
33-
return NULL;
34-
}
35-
PyObject *next = _Py_atomic_load_ptr_relaxed(&head->ob_tid);
36-
if (_Py_atomic_compare_exchange_ptr(object_queue, head, next)) {
37-
return head;
38-
}
39-
}
40-
}
41-
4225
void
4326
_Py_queue_object(PyObject *ob)
4427
{
@@ -68,26 +51,25 @@ _Py_queue_object(PyObject *ob)
6851
return;
6952
}
7053

71-
PyThreadState *target_tstate = NULL;
72-
7354
int err;
7455
if ((err = pthread_rwlock_rdlock(&interp->object_queues_lk)) != 0) {
7556
Py_FatalError("_Py_queue_object: unable to lock");
7657
return;
7758
}
7859

60+
PyThreadState *target_tstate = NULL;
7961
_Py_hashtable_entry_t *entry = _Py_HASHTABLE_GET_ENTRY(ht, ob_tid);
8062
if (entry) {
8163
_Py_HASHTABLE_ENTRY_READ_DATA(ht, entry, target_tstate);
8264
}
8365

84-
pthread_rwlock_unlock(&interp->object_queues_lk);
85-
8666
if (target_tstate) {
8767
_Py_RefcntQueue_Push(target_tstate, ob);
8868
}
89-
else {
90-
// printf("NO queue for %ld\n", ob_tid);
69+
70+
pthread_rwlock_unlock(&interp->object_queues_lk);
71+
72+
if (!target_tstate) {
9173
_Py_ExplicitMergeRefcount(ob);
9274
}
9375

@@ -98,9 +80,11 @@ _Py_queue_process(PyThreadState *tstate)
9880
{
9981
assert(tstate);
10082

101-
PyObject *ob;
102-
while ((ob = _Py_RefcntQueue_Pop(tstate)) != NULL) {
103-
_Py_ExplicitMergeRefcount(ob);
83+
PyObject *head = _Py_atomic_exchange_ptr(&tstate->object_queue, NULL);
84+
while (head) {
85+
PyObject *next = (PyObject *)head->ob_tid;
86+
_Py_ExplicitMergeRefcount(head);
87+
head = next;
10488
}
10589
}
10690

@@ -131,14 +115,21 @@ _Py_queue_destroy(PyThreadState *tstate)
131115
PyInterpreterState *interp = tstate->interp;
132116
assert(interp);
133117

118+
_Py_hashtable_t *ht = interp->object_queues;
119+
uint64_t tid = tstate->fast_thread_id;
120+
121+
retry:
122+
_Py_queue_process(tstate);
123+
134124
if (pthread_rwlock_wrlock(&interp->object_queues_lk) != 0) {
135125
Py_FatalError("_Py_queue_destroy: unable to lock");
136126
return;
137127
}
138128

139-
_Py_hashtable_t *ht = interp->object_queues;
140-
uint64_t tid = tstate->fast_thread_id;
141-
// printf("destroying queue for %ld\n", tid);
129+
if (tstate->object_queue) {
130+
pthread_rwlock_unlock(&interp->object_queues_lk);
131+
goto retry;
132+
}
142133

143134
PyThreadState *value = NULL;
144135
if (!_Py_HASHTABLE_POP(ht, tid, value)) {
@@ -147,6 +138,4 @@ _Py_queue_destroy(PyThreadState *tstate)
147138
assert(value == tstate);
148139

149140
pthread_rwlock_unlock(&interp->object_queues_lk);
150-
151-
_Py_queue_process(tstate);
152141
}

0 commit comments

Comments
 (0)