diff --git a/Include/internal/pycore_object.h b/Include/internal/pycore_object.h index 81548f819198e3..4bef71053896c6 100644 --- a/Include/internal/pycore_object.h +++ b/Include/internal/pycore_object.h @@ -19,7 +19,7 @@ PyAPI_FUNC(int) _PyDict_CheckConsistency(PyObject *mp, int check_content); * NB: While the object is tracked by the collector, it must be safe to call the * ob_traverse method. * - * Internal note: _PyRuntime.gc.generation0->_gc_prev doesn't have any bit flags + * Internal note: PyInterpreterState.gc.generation0->_gc_prev doesn't have any bit flags * because it's not object header. So we don't use _PyGCHead_PREV() and * _PyGCHead_SET_PREV() for it to avoid unnecessary bitwise operations. * @@ -38,11 +38,12 @@ static inline void _PyObject_GC_TRACK_impl(const char *filename, int lineno, "object is in generation which is garbage collected", filename, lineno, "_PyObject_GC_TRACK"); - PyGC_Head *last = (PyGC_Head*)(_PyRuntime.gc.generation0->_gc_prev); + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + PyGC_Head *last = (PyGC_Head*)(interp->gc.generation0->_gc_prev); _PyGCHead_SET_NEXT(last, gc); _PyGCHead_SET_PREV(gc, last); - _PyGCHead_SET_NEXT(gc, _PyRuntime.gc.generation0); - _PyRuntime.gc.generation0->_gc_prev = (uintptr_t)gc; + _PyGCHead_SET_NEXT(gc, interp->gc.generation0); + interp->gc.generation0->_gc_prev = (uintptr_t)gc; } #define _PyObject_GC_TRACK(op) \ diff --git a/Include/internal/pycore_pylifecycle.h b/Include/internal/pycore_pylifecycle.h index 7144bbcda7cb72..a0ce641fd0f488 100644 --- a/Include/internal/pycore_pylifecycle.h +++ b/Include/internal/pycore_pylifecycle.h @@ -73,7 +73,7 @@ extern void PyAsyncGen_Fini(void); extern void _PyExc_Fini(void); extern void _PyImport_Fini(void); extern void _PyImport_Fini2(void); -extern void _PyGC_Fini(_PyRuntimeState *runtime); +extern void _PyGC_Fini(PyInterpreterState *interp); extern void _PyType_Fini(void); extern void _Py_HashRandomization_Fini(void); extern void _PyUnicode_Fini(void); @@ -89,7 +89,7 @@ extern void _PyGILState_Init( PyThreadState *tstate); extern void _PyGILState_Fini(_PyRuntimeState *runtime); -PyAPI_FUNC(void) _PyGC_DumpShutdownStats(_PyRuntimeState *runtime); +PyAPI_FUNC(void) _PyGC_DumpShutdownStats(PyInterpreterState *interp); PyAPI_FUNC(_PyInitError) _Py_PreInitializeFromPyArgv( const _PyPreConfig *src_config, diff --git a/Include/internal/pycore_pystate.h b/Include/internal/pycore_pystate.h index 69ceecba40dac5..a0bcd196d7972e 100644 --- a/Include/internal/pycore_pystate.h +++ b/Include/internal/pycore_pystate.h @@ -92,6 +92,7 @@ struct _is { uint64_t tstate_next_unique_id; struct _warnings_runtime_state warnings; + struct _gc_runtime_state gc; }; PyAPI_FUNC(struct _is*) _PyInterpreterState_LookUpID(PY_INT64_T); @@ -180,7 +181,6 @@ typedef struct pyruntimestate { void (*exitfuncs[NEXITFUNCS])(void); int nexitfuncs; - struct _gc_runtime_state gc; struct _ceval_runtime_state ceval; struct _gilstate_runtime_state gilstate; diff --git a/Misc/NEWS.d/next/Core and Builtins/2019-05-09-11-37-28.bpo-36854.dpBVTw.rst b/Misc/NEWS.d/next/Core and Builtins/2019-05-09-11-37-28.bpo-36854.dpBVTw.rst new file mode 100644 index 00000000000000..6e32cb12b2228b --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2019-05-09-11-37-28.bpo-36854.dpBVTw.rst @@ -0,0 +1,8 @@ +Move GC runtime state from _PyRuntimeState to PyInterpreterState. As part +of this, Python threads (created by the threading module) are marked as +"deleted" earlier in finalization. That marker is how joining threads get +unblocked. It was happening in `PyThreadState_Delete()` but now it will +happen in `PyThreadState_Clear()`. This is necessary to ensure that the +callback gets called *before* much interpreter/runtime finalization happens. +(This change could impact daemon threads, but we already can't guarantee +behavior for those once finalization starts.) diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c index be9b73a8446073..ed6d9c669a6ec6 100644 --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -1257,7 +1257,8 @@ static PyObject * gc_enable_impl(PyObject *module) /*[clinic end generated code: output=45a427e9dce9155c input=81ac4940ca579707]*/ { - _PyRuntime.gc.enabled = 1; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + interp->gc.enabled = 1; Py_RETURN_NONE; } @@ -1271,7 +1272,8 @@ static PyObject * gc_disable_impl(PyObject *module) /*[clinic end generated code: output=97d1030f7aa9d279 input=8c2e5a14e800d83b]*/ { - _PyRuntime.gc.enabled = 0; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + interp->gc.enabled = 0; Py_RETURN_NONE; } @@ -1285,7 +1287,8 @@ static int gc_isenabled_impl(PyObject *module) /*[clinic end generated code: output=1874298331c49130 input=30005e0422373b31]*/ { - return _PyRuntime.gc.enabled; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + return interp->gc.enabled; } /*[clinic input] @@ -1312,7 +1315,8 @@ gc_collect_impl(PyObject *module, int generation) return -1; } - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; Py_ssize_t n; if (state->collecting) { /* already collecting, don't do anything */ @@ -1348,7 +1352,8 @@ static PyObject * gc_set_debug_impl(PyObject *module, int flags) /*[clinic end generated code: output=7c8366575486b228 input=5e5ce15e84fbed15]*/ { - _PyRuntime.gc.debug = flags; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + interp->gc.debug = flags; Py_RETURN_NONE; } @@ -1363,7 +1368,8 @@ static int gc_get_debug_impl(PyObject *module) /*[clinic end generated code: output=91242f3506cd1e50 input=91a101e1c3b98366]*/ { - return _PyRuntime.gc.debug; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + return interp->gc.debug; } PyDoc_STRVAR(gc_set_thresh__doc__, @@ -1375,7 +1381,8 @@ PyDoc_STRVAR(gc_set_thresh__doc__, static PyObject * gc_set_threshold(PyObject *self, PyObject *args) { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; if (!PyArg_ParseTuple(args, "i|ii:set_threshold", &state->generations[0].threshold, &state->generations[1].threshold, @@ -1398,7 +1405,8 @@ static PyObject * gc_get_threshold_impl(PyObject *module) /*[clinic end generated code: output=7902bc9f41ecbbd8 input=286d79918034d6e6]*/ { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; return Py_BuildValue("(iii)", state->generations[0].threshold, state->generations[1].threshold, @@ -1415,7 +1423,8 @@ static PyObject * gc_get_count_impl(PyObject *module) /*[clinic end generated code: output=354012e67b16398f input=a392794a08251751]*/ { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; return Py_BuildValue("(iii)", state->generations[0].count, state->generations[1].count, @@ -1462,7 +1471,8 @@ gc_get_referrers(PyObject *self, PyObject *args) PyObject *result = PyList_New(0); if (!result) return NULL; - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; for (i = 0; i < NUM_GENERATIONS; i++) { if (!(gc_referrers_for(args, GEN_HEAD(state, i), result))) { Py_DECREF(result); @@ -1526,7 +1536,8 @@ gc_get_objects_impl(PyObject *module, Py_ssize_t generation) { int i; PyObject* result; - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; result = PyList_New(0); if (result == NULL) { @@ -1584,7 +1595,8 @@ gc_get_stats_impl(PyObject *module) /* To get consistent values despite allocations while constructing the result list, we use a snapshot of the running stats. */ - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; for (i = 0; i < NUM_GENERATIONS; i++) { stats[i] = state->generation_stats[i]; } @@ -1656,7 +1668,8 @@ static PyObject * gc_freeze_impl(PyObject *module) /*[clinic end generated code: output=502159d9cdc4c139 input=b602b16ac5febbe5]*/ { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; for (int i = 0; i < NUM_GENERATIONS; ++i) { gc_list_merge(GEN_HEAD(state, i), &state->permanent_generation.head); state->generations[i].count = 0; @@ -1676,7 +1689,8 @@ static PyObject * gc_unfreeze_impl(PyObject *module) /*[clinic end generated code: output=1c15f2043b25e169 input=2dd52b170f4cef6c]*/ { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; gc_list_merge(&state->permanent_generation.head, GEN_HEAD(state, NUM_GENERATIONS-1)); Py_RETURN_NONE; } @@ -1691,7 +1705,8 @@ static Py_ssize_t gc_get_freeze_count_impl(PyObject *module) /*[clinic end generated code: output=61cbd9f43aa032e1 input=45ffbc65cfe2a6ed]*/ { - return gc_list_size(&_PyRuntime.gc.permanent_generation.head); + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + return gc_list_size(&interp->gc.permanent_generation.head); } @@ -1762,7 +1777,8 @@ PyInit_gc(void) return NULL; } - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; if (state->garbage == NULL) { state->garbage = PyList_New(0); if (state->garbage == NULL) @@ -1795,7 +1811,8 @@ PyInit_gc(void) Py_ssize_t PyGC_Collect(void) { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; if (!state->enabled) { return 0; } @@ -1828,7 +1845,8 @@ _PyGC_CollectNoFail(void) { assert(!PyErr_Occurred()); - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; Py_ssize_t n; /* Ideally, this function is only called on interpreter shutdown, @@ -1849,9 +1867,9 @@ _PyGC_CollectNoFail(void) } void -_PyGC_DumpShutdownStats(_PyRuntimeState *runtime) +_PyGC_DumpShutdownStats(PyInterpreterState *interp) { - struct _gc_runtime_state *state = &runtime->gc; + struct _gc_runtime_state *state = &interp->gc; if (!(state->debug & DEBUG_SAVEALL) && state->garbage != NULL && PyList_GET_SIZE(state->garbage) > 0) { const char *message; @@ -1886,9 +1904,9 @@ _PyGC_DumpShutdownStats(_PyRuntimeState *runtime) } void -_PyGC_Fini(_PyRuntimeState *runtime) +_PyGC_Fini(PyInterpreterState *interp) { - struct _gc_runtime_state *state = &runtime->gc; + struct _gc_runtime_state *state = &interp->gc; Py_CLEAR(state->garbage); Py_CLEAR(state->callbacks); } @@ -1930,7 +1948,8 @@ PyObject_GC_UnTrack(void *op_raw) static PyObject * _PyObject_GC_Alloc(int use_calloc, size_t basicsize) { - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; PyObject *op; PyGC_Head *g; size_t size; @@ -2023,7 +2042,8 @@ PyObject_GC_Del(void *op) if (_PyObject_GC_IS_TRACKED(op)) { gc_list_remove(g); } - struct _gc_runtime_state *state = &_PyRuntime.gc; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + struct _gc_runtime_state *state = &interp->gc; if (state->generations[0].count > 0) { state->generations[0].count--; } diff --git a/Objects/object.c b/Objects/object.c index cb727943cb342e..c66aa16be53c31 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2052,8 +2052,9 @@ _PyTrash_deposit_object(PyObject *op) _PyObject_ASSERT(op, PyObject_IS_GC(op)); _PyObject_ASSERT(op, !_PyObject_GC_IS_TRACKED(op)); _PyObject_ASSERT(op, op->ob_refcnt == 0); - _PyGCHead_SET_PREV(_Py_AS_GC(op), _PyRuntime.gc.trash_delete_later); - _PyRuntime.gc.trash_delete_later = op; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + _PyGCHead_SET_PREV(_Py_AS_GC(op), interp->gc.trash_delete_later); + interp->gc.trash_delete_later = op; } /* The equivalent API, using per-thread state recursion info */ @@ -2074,11 +2075,12 @@ _PyTrash_thread_deposit_object(PyObject *op) void _PyTrash_destroy_chain(void) { - while (_PyRuntime.gc.trash_delete_later) { - PyObject *op = _PyRuntime.gc.trash_delete_later; + PyInterpreterState *interp = _PyInterpreterState_GET_UNSAFE(); + while (interp->gc.trash_delete_later) { + PyObject *op = interp->gc.trash_delete_later; destructor dealloc = Py_TYPE(op)->tp_dealloc; - _PyRuntime.gc.trash_delete_later = + interp->gc.trash_delete_later = (PyObject*) _PyGCHead_PREV(_Py_AS_GC(op)); /* Call the deallocator directly. This used to try to @@ -2088,9 +2090,9 @@ _PyTrash_destroy_chain(void) * up distorting allocation statistics. */ _PyObject_ASSERT(op, op->ob_refcnt == 0); - ++_PyRuntime.gc.trash_delete_nesting; + ++interp->gc.trash_delete_nesting; (*dealloc)(op); - --_PyRuntime.gc.trash_delete_nesting; + --interp->gc.trash_delete_nesting; } } diff --git a/Python/import.c b/Python/import.c index 9290f39c0ae284..39ed87b874c2b0 100644 --- a/Python/import.c +++ b/Python/import.c @@ -535,7 +535,7 @@ PyImport_Cleanup(void) _PyGC_CollectNoFail(); /* Dump GC stats before it's too late, since it uses the warnings machinery. */ - _PyGC_DumpShutdownStats(&_PyRuntime); + _PyGC_DumpShutdownStats(interp); /* Now, if there are any modules left alive, clear their globals to minimize potential leaks. All C extension modules actually end diff --git a/Python/pylifecycle.c b/Python/pylifecycle.c index 32902aa0d597d0..b314906fb685f9 100644 --- a/Python/pylifecycle.c +++ b/Python/pylifecycle.c @@ -511,6 +511,8 @@ pycore_create_interpreter(_PyRuntimeState *runtime, } *interp_p = interp; + _PyGC_Initialize(&interp->gc); + _PyInitError err = _PyCoreConfig_Copy(&interp->core_config, core_config); if (_Py_INIT_FAILED(err)) { return err; @@ -1131,16 +1133,16 @@ Py_FinalizeEx(void) return status; } + /* Get current thread state and interpreter pointer */ + PyThreadState *tstate = _PyThreadState_GET(); + PyInterpreterState *interp = tstate->interp; + // Wrap up existing "threading"-module-created, non-daemon threads. wait_for_thread_shutdown(); // Make any remaining pending calls. _Py_FinishPendingCalls(); - /* Get current thread state and interpreter pointer */ - PyThreadState *tstate = _PyThreadState_GET(); - PyInterpreterState *interp = tstate->interp; - /* The interpreter is still entirely intact at this point, and the * exit funcs may be relying on that. In particular, if some thread * or exit func is still waiting to do an import, the import machinery @@ -1287,7 +1289,7 @@ Py_FinalizeEx(void) PyFloat_Fini(); PyDict_Fini(); PySlice_Fini(); - _PyGC_Fini(runtime); + _PyGC_Fini(interp); _PyWarnings_Fini(interp); _Py_HashRandomization_Fini(); _PyArg_Fini(); @@ -1380,6 +1382,8 @@ new_interpreter(PyThreadState **tstate_p) return _Py_INIT_OK(); } + _PyGC_Initialize(&interp->gc); + PyThreadState *tstate = PyThreadState_New(interp); if (tstate == NULL) { PyInterpreterState_Delete(interp); @@ -1558,6 +1562,9 @@ Py_EndInterpreter(PyThreadState *tstate) PyImport_Cleanup(); PyInterpreterState_Clear(interp); + + _PyGC_Fini(interp); + PyThreadState_Swap(NULL); PyInterpreterState_Delete(interp); } @@ -2187,6 +2194,9 @@ wait_for_thread_shutdown(void) Py_DECREF(result); } Py_DECREF(threading); + + // All threading module threads are marked as "done" later + // in PyThreadState_Clear(). } #define NEXITFUNCS 32 diff --git a/Python/pystate.c b/Python/pystate.c index 44acfed6b98355..8e96174ecdd5ca 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -47,7 +47,6 @@ _PyRuntimeState_Init_impl(_PyRuntimeState *runtime) { memset(runtime, 0, sizeof(*runtime)); - _PyGC_Initialize(&runtime->gc); _PyEval_Initialize(&runtime->ceval); runtime->preconfig = _PyPreConfig_INIT; @@ -782,6 +781,13 @@ PyThreadState_Clear(PyThreadState *tstate) Py_CLEAR(tstate->async_gen_finalizer); Py_CLEAR(tstate->context); + + if (tstate->on_delete != NULL) { + // This will unblock any joining threads. + tstate->on_delete(tstate->on_delete_data); + tstate->on_delete = NULL; + tstate->on_delete_data = NULL; + } } @@ -805,6 +811,8 @@ tstate_delete_common(_PyRuntimeState *runtime, PyThreadState *tstate) tstate->next->prev = tstate->prev; HEAD_UNLOCK(runtime); if (tstate->on_delete != NULL) { + // This will unblock any joining threads. + // We also do this in PyThreadState_Clear(), but do it here to be sure. tstate->on_delete(tstate->on_delete_data); } PyMem_RawFree(tstate);