@@ -135,14 +135,39 @@ static inline void _Py_RefcntAdd(PyObject* op, Py_ssize_t n)
135
135
_Py_INCREF_IMMORTAL_STAT_INC ();
136
136
return ;
137
137
}
138
+ #ifndef Py_GIL_DISABLED
138
139
Py_ssize_t refcnt = _Py_REFCNT (op );
139
140
Py_ssize_t new_refcnt = refcnt + n ;
140
141
if (new_refcnt >= (Py_ssize_t )_Py_IMMORTAL_MINIMUM_REFCNT ) {
141
142
new_refcnt = _Py_IMMORTAL_INITIAL_REFCNT ;
142
143
}
143
- Py_SET_REFCNT (op , new_refcnt );
144
- #ifdef Py_REF_DEBUG
144
+ # if SIZEOF_VOID_P > 4
145
+ op -> ob_refcnt = (PY_UINT32_T )new_refcnt ;
146
+ # else
147
+ op -> ob_refcnt = new_refcnt ;
148
+ # endif
149
+ # ifdef Py_REF_DEBUG
145
150
_Py_AddRefTotal (_PyThreadState_GET (), new_refcnt - refcnt );
151
+ # endif
152
+ #else
153
+ if (_Py_IsOwnedByCurrentThread (op )) {
154
+ uint32_t local = op -> ob_ref_local ;
155
+ Py_ssize_t refcnt = (Py_ssize_t )local + n ;
156
+ # if PY_SSIZE_T_MAX > UINT32_MAX
157
+ if (refcnt > (Py_ssize_t )UINT32_MAX ) {
158
+ // Make the object immortal if the 32-bit local reference count
159
+ // would overflow.
160
+ refcnt = _Py_IMMORTAL_REFCNT_LOCAL ;
161
+ }
162
+ # endif
163
+ _Py_atomic_store_uint32_relaxed (& op -> ob_ref_local , (uint32_t )refcnt );
164
+ }
165
+ else {
166
+ _Py_atomic_add_ssize (& op -> ob_ref_shared , (n << _Py_REF_SHARED_SHIFT ));
167
+ }
168
+ # ifdef Py_REF_DEBUG
169
+ _Py_AddRefTotal (_PyThreadState_GET (), n );
170
+ # endif
146
171
#endif
147
172
// Although the ref count was increased by `n` (which may be greater than 1)
148
173
// it is only a single increment (i.e. addition) operation, so only 1 refcnt
0 commit comments