1
2 /* Thread and interpreter state structures and their interfaces */
3
4 #include "Python.h"
5 #include "pycore_ceval.h"
6 #include "pycore_code.h" // stats
7 #include "pycore_frame.h"
8 #include "pycore_initconfig.h"
9 #include "pycore_object.h" // _PyType_InitCache()
10 #include "pycore_pyerrors.h"
11 #include "pycore_pylifecycle.h"
12 #include "pycore_pymem.h" // _PyMem_SetDefaultAllocator()
13 #include "pycore_pystate.h" // _PyThreadState_GET()
14 #include "pycore_runtime_init.h" // _PyRuntimeState_INIT
15 #include "pycore_sysmodule.h"
16
17 /* --------------------------------------------------------------------------
18 CAUTION
19
20 Always use PyMem_RawMalloc() and PyMem_RawFree() directly in this file. A
21 number of these functions are advertised as safe to call when the GIL isn't
22 held, and in a debug build Python redirects (e.g.) PyMem_NEW (etc) to Python's
23 debugging obmalloc functions. Those aren't thread-safe (they rely on the GIL
24 to avoid the expense of doing their own locking).
25 -------------------------------------------------------------------------- */
26
27 #ifdef HAVE_DLOPEN
28 #ifdef HAVE_DLFCN_H
29 #include <dlfcn.h>
30 #endif
31 #if !HAVE_DECL_RTLD_LAZY
32 #define RTLD_LAZY 1
33 #endif
34 #endif
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #define _PyRuntimeGILState_GetThreadState(gilstate) \
41 ((PyThreadState*)_Py_atomic_load_relaxed(&(gilstate)->tstate_current))
42 #define _PyRuntimeGILState_SetThreadState(gilstate, value) \
43 _Py_atomic_store_relaxed(&(gilstate)->tstate_current, \
44 (uintptr_t)(value))
45
46 /* Forward declarations */
47 static PyThreadState *_PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate);
48 static void _PyThreadState_Delete(PyThreadState *tstate, int check_current);
49
50 /* Suppress deprecation warning for PyBytesObject.ob_shash */
51 _Py_COMP_DIAG_PUSH
52 _Py_COMP_DIAG_IGNORE_DEPR_DECLS
53 /* We use "initial" if the runtime gets re-used
54 (e.g. Py_Finalize() followed by Py_Initialize(). */
55 static const _PyRuntimeState initial = _PyRuntimeState_INIT;
56 _Py_COMP_DIAG_POP
57
58 static int
alloc_for_runtime(PyThread_type_lock * plock1,PyThread_type_lock * plock2,PyThread_type_lock * plock3)59 alloc_for_runtime(PyThread_type_lock *plock1, PyThread_type_lock *plock2,
60 PyThread_type_lock *plock3)
61 {
62 /* Force default allocator, since _PyRuntimeState_Fini() must
63 use the same allocator than this function. */
64 PyMemAllocatorEx old_alloc;
65 _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
66
67 PyThread_type_lock lock1 = PyThread_allocate_lock();
68 if (lock1 == NULL) {
69 return -1;
70 }
71
72 PyThread_type_lock lock2 = PyThread_allocate_lock();
73 if (lock2 == NULL) {
74 PyThread_free_lock(lock1);
75 return -1;
76 }
77
78 PyThread_type_lock lock3 = PyThread_allocate_lock();
79 if (lock3 == NULL) {
80 PyThread_free_lock(lock1);
81 PyThread_free_lock(lock2);
82 return -1;
83 }
84
85 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
86
87 *plock1 = lock1;
88 *plock2 = lock2;
89 *plock3 = lock3;
90 return 0;
91 }
92
93 static void
init_runtime(_PyRuntimeState * runtime,void * open_code_hook,void * open_code_userdata,_Py_AuditHookEntry * audit_hook_head,Py_ssize_t unicode_next_index,PyThread_type_lock unicode_ids_mutex,PyThread_type_lock interpreters_mutex,PyThread_type_lock xidregistry_mutex)94 init_runtime(_PyRuntimeState *runtime,
95 void *open_code_hook, void *open_code_userdata,
96 _Py_AuditHookEntry *audit_hook_head,
97 Py_ssize_t unicode_next_index,
98 PyThread_type_lock unicode_ids_mutex,
99 PyThread_type_lock interpreters_mutex,
100 PyThread_type_lock xidregistry_mutex)
101 {
102 if (runtime->_initialized) {
103 Py_FatalError("runtime already initialized");
104 }
105 assert(!runtime->preinitializing &&
106 !runtime->preinitialized &&
107 !runtime->core_initialized &&
108 !runtime->initialized);
109
110 runtime->open_code_hook = open_code_hook;
111 runtime->open_code_userdata = open_code_userdata;
112 runtime->audit_hook_head = audit_hook_head;
113
114 _PyEval_InitRuntimeState(&runtime->ceval);
115
116 PyPreConfig_InitPythonConfig(&runtime->preconfig);
117
118 runtime->interpreters.mutex = interpreters_mutex;
119
120 runtime->xidregistry.mutex = xidregistry_mutex;
121
122 // Set it to the ID of the main thread of the main interpreter.
123 runtime->main_thread = PyThread_get_thread_ident();
124
125 runtime->unicode_ids.next_index = unicode_next_index;
126 runtime->unicode_ids.lock = unicode_ids_mutex;
127
128 runtime->_initialized = 1;
129 }
130
131 PyStatus
_PyRuntimeState_Init(_PyRuntimeState * runtime)132 _PyRuntimeState_Init(_PyRuntimeState *runtime)
133 {
134 /* We preserve the hook across init, because there is
135 currently no public API to set it between runtime
136 initialization and interpreter initialization. */
137 void *open_code_hook = runtime->open_code_hook;
138 void *open_code_userdata = runtime->open_code_userdata;
139 _Py_AuditHookEntry *audit_hook_head = runtime->audit_hook_head;
140 // bpo-42882: Preserve next_index value if Py_Initialize()/Py_Finalize()
141 // is called multiple times.
142 Py_ssize_t unicode_next_index = runtime->unicode_ids.next_index;
143
144 PyThread_type_lock lock1, lock2, lock3;
145 if (alloc_for_runtime(&lock1, &lock2, &lock3) != 0) {
146 return _PyStatus_NO_MEMORY();
147 }
148
149 if (runtime->_initialized) {
150 // Py_Initialize() must be running again.
151 // Reset to _PyRuntimeState_INIT.
152 memcpy(runtime, &initial, sizeof(*runtime));
153 }
154 init_runtime(runtime, open_code_hook, open_code_userdata, audit_hook_head,
155 unicode_next_index, lock1, lock2, lock3);
156
157 return _PyStatus_OK();
158 }
159
160 void
_PyRuntimeState_Fini(_PyRuntimeState * runtime)161 _PyRuntimeState_Fini(_PyRuntimeState *runtime)
162 {
163 /* Force the allocator used by _PyRuntimeState_Init(). */
164 PyMemAllocatorEx old_alloc;
165 _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
166 #define FREE_LOCK(LOCK) \
167 if (LOCK != NULL) { \
168 PyThread_free_lock(LOCK); \
169 LOCK = NULL; \
170 }
171
172 FREE_LOCK(runtime->interpreters.mutex);
173 FREE_LOCK(runtime->xidregistry.mutex);
174 FREE_LOCK(runtime->unicode_ids.lock);
175
176 #undef FREE_LOCK
177 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
178 }
179
180 #ifdef HAVE_FORK
181 /* This function is called from PyOS_AfterFork_Child to ensure that
182 newly created child processes do not share locks with the parent. */
183 PyStatus
_PyRuntimeState_ReInitThreads(_PyRuntimeState * runtime)184 _PyRuntimeState_ReInitThreads(_PyRuntimeState *runtime)
185 {
186 // This was initially set in _PyRuntimeState_Init().
187 runtime->main_thread = PyThread_get_thread_ident();
188
189 /* Force default allocator, since _PyRuntimeState_Fini() must
190 use the same allocator than this function. */
191 PyMemAllocatorEx old_alloc;
192 _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
193
194 int reinit_interp = _PyThread_at_fork_reinit(&runtime->interpreters.mutex);
195 int reinit_xidregistry = _PyThread_at_fork_reinit(&runtime->xidregistry.mutex);
196 int reinit_unicode_ids = _PyThread_at_fork_reinit(&runtime->unicode_ids.lock);
197
198 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
199
200 /* bpo-42540: id_mutex is freed by _PyInterpreterState_Delete, which does
201 * not force the default allocator. */
202 int reinit_main_id = _PyThread_at_fork_reinit(&runtime->interpreters.main->id_mutex);
203
204 if (reinit_interp < 0
205 || reinit_main_id < 0
206 || reinit_xidregistry < 0
207 || reinit_unicode_ids < 0)
208 {
209 return _PyStatus_ERR("Failed to reinitialize runtime locks");
210
211 }
212 return _PyStatus_OK();
213 }
214 #endif
215
216 #define HEAD_LOCK(runtime) \
217 PyThread_acquire_lock((runtime)->interpreters.mutex, WAIT_LOCK)
218 #define HEAD_UNLOCK(runtime) \
219 PyThread_release_lock((runtime)->interpreters.mutex)
220
221 /* Forward declaration */
222 static void _PyGILState_NoteThreadState(
223 struct _gilstate_runtime_state *gilstate, PyThreadState* tstate);
224
225 PyStatus
_PyInterpreterState_Enable(_PyRuntimeState * runtime)226 _PyInterpreterState_Enable(_PyRuntimeState *runtime)
227 {
228 struct pyinterpreters *interpreters = &runtime->interpreters;
229 interpreters->next_id = 0;
230
231 /* Py_Finalize() calls _PyRuntimeState_Fini() which clears the mutex.
232 Create a new mutex if needed. */
233 if (interpreters->mutex == NULL) {
234 /* Force default allocator, since _PyRuntimeState_Fini() must
235 use the same allocator than this function. */
236 PyMemAllocatorEx old_alloc;
237 _PyMem_SetDefaultAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
238
239 interpreters->mutex = PyThread_allocate_lock();
240
241 PyMem_SetAllocator(PYMEM_DOMAIN_RAW, &old_alloc);
242
243 if (interpreters->mutex == NULL) {
244 return _PyStatus_ERR("Can't initialize threads for interpreter");
245 }
246 }
247
248 return _PyStatus_OK();
249 }
250
251 static PyInterpreterState *
alloc_interpreter(void)252 alloc_interpreter(void)
253 {
254 return PyMem_RawCalloc(1, sizeof(PyInterpreterState));
255 }
256
257 static void
free_interpreter(PyInterpreterState * interp)258 free_interpreter(PyInterpreterState *interp)
259 {
260 if (!interp->_static) {
261 PyMem_RawFree(interp);
262 }
263 }
264
265 /* Get the interpreter state to a minimal consistent state.
266 Further init happens in pylifecycle.c before it can be used.
267 All fields not initialized here are expected to be zeroed out,
268 e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
269 The runtime state is not manipulated. Instead it is assumed that
270 the interpreter is getting added to the runtime.
271 */
272
273 static void
init_interpreter(PyInterpreterState * interp,_PyRuntimeState * runtime,int64_t id,PyInterpreterState * next,PyThread_type_lock pending_lock)274 init_interpreter(PyInterpreterState *interp,
275 _PyRuntimeState *runtime, int64_t id,
276 PyInterpreterState *next,
277 PyThread_type_lock pending_lock)
278 {
279 if (interp->_initialized) {
280 Py_FatalError("interpreter already initialized");
281 }
282
283 assert(runtime != NULL);
284 interp->runtime = runtime;
285
286 assert(id > 0 || (id == 0 && interp == runtime->interpreters.main));
287 interp->id = id;
288
289 assert(runtime->interpreters.head == interp);
290 assert(next != NULL || (interp == runtime->interpreters.main));
291 interp->next = next;
292
293 _PyEval_InitState(&interp->ceval, pending_lock);
294 _PyGC_InitState(&interp->gc);
295 PyConfig_InitPythonConfig(&interp->config);
296 _PyType_InitCache(interp);
297
298 interp->_initialized = 1;
299 }
300
301 PyInterpreterState *
PyInterpreterState_New(void)302 PyInterpreterState_New(void)
303 {
304 PyInterpreterState *interp;
305 PyThreadState *tstate = _PyThreadState_GET();
306
307 /* tstate is NULL when Py_InitializeFromConfig() calls
308 PyInterpreterState_New() to create the main interpreter. */
309 if (_PySys_Audit(tstate, "cpython.PyInterpreterState_New", NULL) < 0) {
310 return NULL;
311 }
312
313 PyThread_type_lock pending_lock = PyThread_allocate_lock();
314 if (pending_lock == NULL) {
315 if (tstate != NULL) {
316 _PyErr_NoMemory(tstate);
317 }
318 return NULL;
319 }
320
321 /* Don't get runtime from tstate since tstate can be NULL. */
322 _PyRuntimeState *runtime = &_PyRuntime;
323 struct pyinterpreters *interpreters = &runtime->interpreters;
324
325 /* We completely serialize creation of multiple interpreters, since
326 it simplifies things here and blocking concurrent calls isn't a problem.
327 Regardless, we must fully block subinterpreter creation until
328 after the main interpreter is created. */
329 HEAD_LOCK(runtime);
330
331 int64_t id = interpreters->next_id;
332 interpreters->next_id += 1;
333
334 // Allocate the interpreter and add it to the runtime state.
335 PyInterpreterState *old_head = interpreters->head;
336 if (old_head == NULL) {
337 // We are creating the main interpreter.
338 assert(interpreters->main == NULL);
339 assert(id == 0);
340
341 interp = &runtime->_main_interpreter;
342 assert(interp->id == 0);
343 assert(interp->next == NULL);
344 assert(interp->_static);
345
346 interpreters->main = interp;
347 }
348 else {
349 assert(interpreters->main != NULL);
350 assert(id != 0);
351
352 interp = alloc_interpreter();
353 if (interp == NULL) {
354 goto error;
355 }
356 // Set to _PyInterpreterState_INIT.
357 memcpy(interp, &initial._main_interpreter,
358 sizeof(*interp));
359 // We need to adjust any fields that are different from the initial
360 // interpreter (as defined in _PyInterpreterState_INIT):
361 interp->_static = false;
362
363 if (id < 0) {
364 /* overflow or Py_Initialize() not called yet! */
365 if (tstate != NULL) {
366 _PyErr_SetString(tstate, PyExc_RuntimeError,
367 "failed to get an interpreter ID");
368 }
369 goto error;
370 }
371 }
372 interpreters->head = interp;
373
374 init_interpreter(interp, runtime, id, old_head, pending_lock);
375
376 HEAD_UNLOCK(runtime);
377 return interp;
378
379 error:
380 HEAD_UNLOCK(runtime);
381
382 PyThread_free_lock(pending_lock);
383 if (interp != NULL) {
384 free_interpreter(interp);
385 }
386 return NULL;
387 }
388
389
390 static void
interpreter_clear(PyInterpreterState * interp,PyThreadState * tstate)391 interpreter_clear(PyInterpreterState *interp, PyThreadState *tstate)
392 {
393 _PyRuntimeState *runtime = interp->runtime;
394
395 if (_PySys_Audit(tstate, "cpython.PyInterpreterState_Clear", NULL) < 0) {
396 _PyErr_Clear(tstate);
397 }
398
399 // Clear the current/main thread state last.
400 HEAD_LOCK(runtime);
401 PyThreadState *p = interp->threads.head;
402 HEAD_UNLOCK(runtime);
403 while (p != NULL) {
404 // See https://github.com/python/cpython/issues/102126
405 // Must be called without HEAD_LOCK held as it can deadlock
406 // if any finalizer tries to acquire that lock.
407 PyThreadState_Clear(p);
408 HEAD_LOCK(runtime);
409 p = p->next;
410 HEAD_UNLOCK(runtime);
411 }
412
413 Py_CLEAR(interp->audit_hooks);
414
415 PyConfig_Clear(&interp->config);
416 Py_CLEAR(interp->codec_search_path);
417 Py_CLEAR(interp->codec_search_cache);
418 Py_CLEAR(interp->codec_error_registry);
419 Py_CLEAR(interp->modules);
420 Py_CLEAR(interp->modules_by_index);
421 Py_CLEAR(interp->builtins_copy);
422 Py_CLEAR(interp->importlib);
423 Py_CLEAR(interp->import_func);
424 Py_CLEAR(interp->dict);
425 #ifdef HAVE_FORK
426 Py_CLEAR(interp->before_forkers);
427 Py_CLEAR(interp->after_forkers_parent);
428 Py_CLEAR(interp->after_forkers_child);
429 #endif
430
431 _PyAST_Fini(interp);
432 _PyWarnings_Fini(interp);
433 _PyAtExit_Fini(interp);
434
435 // All Python types must be destroyed before the last GC collection. Python
436 // types create a reference cycle to themselves in their in their
437 // PyTypeObject.tp_mro member (the tuple contains the type).
438
439 /* Last garbage collection on this interpreter */
440 _PyGC_CollectNoFail(tstate);
441 _PyGC_Fini(interp);
442
443 /* We don't clear sysdict and builtins until the end of this function.
444 Because clearing other attributes can execute arbitrary Python code
445 which requires sysdict and builtins. */
446 PyDict_Clear(interp->sysdict);
447 PyDict_Clear(interp->builtins);
448 Py_CLEAR(interp->sysdict);
449 Py_CLEAR(interp->builtins);
450
451 // XXX Once we have one allocator per interpreter (i.e.
452 // per-interpreter GC) we must ensure that all of the interpreter's
453 // objects have been cleaned up at the point.
454 }
455
456
457 void
PyInterpreterState_Clear(PyInterpreterState * interp)458 PyInterpreterState_Clear(PyInterpreterState *interp)
459 {
460 // Use the current Python thread state to call audit hooks and to collect
461 // garbage. It can be different than the current Python thread state
462 // of 'interp'.
463 PyThreadState *current_tstate = _PyThreadState_GET();
464
465 interpreter_clear(interp, current_tstate);
466 }
467
468
469 void
_PyInterpreterState_Clear(PyThreadState * tstate)470 _PyInterpreterState_Clear(PyThreadState *tstate)
471 {
472 interpreter_clear(tstate->interp, tstate);
473 }
474
475
476 static void
zapthreads(PyInterpreterState * interp,int check_current)477 zapthreads(PyInterpreterState *interp, int check_current)
478 {
479 PyThreadState *tstate;
480 /* No need to lock the mutex here because this should only happen
481 when the threads are all really dead (XXX famous last words). */
482 while ((tstate = interp->threads.head) != NULL) {
483 _PyThreadState_Delete(tstate, check_current);
484 }
485 }
486
487
488 void
PyInterpreterState_Delete(PyInterpreterState * interp)489 PyInterpreterState_Delete(PyInterpreterState *interp)
490 {
491 _PyRuntimeState *runtime = interp->runtime;
492 struct pyinterpreters *interpreters = &runtime->interpreters;
493 zapthreads(interp, 0);
494
495 _PyEval_FiniState(&interp->ceval);
496
497 /* Delete current thread. After this, many C API calls become crashy. */
498 _PyThreadState_Swap(&runtime->gilstate, NULL);
499
500 HEAD_LOCK(runtime);
501 PyInterpreterState **p;
502 for (p = &interpreters->head; ; p = &(*p)->next) {
503 if (*p == NULL) {
504 Py_FatalError("NULL interpreter");
505 }
506 if (*p == interp) {
507 break;
508 }
509 }
510 if (interp->threads.head != NULL) {
511 Py_FatalError("remaining threads");
512 }
513 *p = interp->next;
514
515 if (interpreters->main == interp) {
516 interpreters->main = NULL;
517 if (interpreters->head != NULL) {
518 Py_FatalError("remaining subinterpreters");
519 }
520 }
521 HEAD_UNLOCK(runtime);
522
523 if (interp->id_mutex != NULL) {
524 PyThread_free_lock(interp->id_mutex);
525 }
526 free_interpreter(interp);
527 }
528
529
530 #ifdef HAVE_FORK
531 /*
532 * Delete all interpreter states except the main interpreter. If there
533 * is a current interpreter state, it *must* be the main interpreter.
534 */
535 PyStatus
_PyInterpreterState_DeleteExceptMain(_PyRuntimeState * runtime)536 _PyInterpreterState_DeleteExceptMain(_PyRuntimeState *runtime)
537 {
538 struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
539 struct pyinterpreters *interpreters = &runtime->interpreters;
540
541 PyThreadState *tstate = _PyThreadState_Swap(gilstate, NULL);
542 if (tstate != NULL && tstate->interp != interpreters->main) {
543 return _PyStatus_ERR("not main interpreter");
544 }
545
546 HEAD_LOCK(runtime);
547 PyInterpreterState *interp = interpreters->head;
548 interpreters->head = NULL;
549 while (interp != NULL) {
550 if (interp == interpreters->main) {
551 interpreters->main->next = NULL;
552 interpreters->head = interp;
553 interp = interp->next;
554 continue;
555 }
556
557 PyInterpreterState_Clear(interp); // XXX must activate?
558 zapthreads(interp, 1);
559 if (interp->id_mutex != NULL) {
560 PyThread_free_lock(interp->id_mutex);
561 }
562 PyInterpreterState *prev_interp = interp;
563 interp = interp->next;
564 free_interpreter(prev_interp);
565 }
566 HEAD_UNLOCK(runtime);
567
568 if (interpreters->head == NULL) {
569 return _PyStatus_ERR("missing main interpreter");
570 }
571 _PyThreadState_Swap(gilstate, tstate);
572 return _PyStatus_OK();
573 }
574 #endif
575
576
577 PyInterpreterState *
PyInterpreterState_Get(void)578 PyInterpreterState_Get(void)
579 {
580 PyThreadState *tstate = _PyThreadState_GET();
581 _Py_EnsureTstateNotNULL(tstate);
582 PyInterpreterState *interp = tstate->interp;
583 if (interp == NULL) {
584 Py_FatalError("no current interpreter");
585 }
586 return interp;
587 }
588
589
590 int64_t
PyInterpreterState_GetID(PyInterpreterState * interp)591 PyInterpreterState_GetID(PyInterpreterState *interp)
592 {
593 if (interp == NULL) {
594 PyErr_SetString(PyExc_RuntimeError, "no interpreter provided");
595 return -1;
596 }
597 return interp->id;
598 }
599
600
601 static PyInterpreterState *
interp_look_up_id(_PyRuntimeState * runtime,int64_t requested_id)602 interp_look_up_id(_PyRuntimeState *runtime, int64_t requested_id)
603 {
604 PyInterpreterState *interp = runtime->interpreters.head;
605 while (interp != NULL) {
606 int64_t id = PyInterpreterState_GetID(interp);
607 if (id < 0) {
608 return NULL;
609 }
610 if (requested_id == id) {
611 return interp;
612 }
613 interp = PyInterpreterState_Next(interp);
614 }
615 return NULL;
616 }
617
618 PyInterpreterState *
_PyInterpreterState_LookUpID(int64_t requested_id)619 _PyInterpreterState_LookUpID(int64_t requested_id)
620 {
621 PyInterpreterState *interp = NULL;
622 if (requested_id >= 0) {
623 _PyRuntimeState *runtime = &_PyRuntime;
624 HEAD_LOCK(runtime);
625 interp = interp_look_up_id(runtime, requested_id);
626 HEAD_UNLOCK(runtime);
627 }
628 if (interp == NULL && !PyErr_Occurred()) {
629 PyErr_Format(PyExc_RuntimeError,
630 "unrecognized interpreter ID %lld", requested_id);
631 }
632 return interp;
633 }
634
635
636 int
_PyInterpreterState_IDInitref(PyInterpreterState * interp)637 _PyInterpreterState_IDInitref(PyInterpreterState *interp)
638 {
639 if (interp->id_mutex != NULL) {
640 return 0;
641 }
642 interp->id_mutex = PyThread_allocate_lock();
643 if (interp->id_mutex == NULL) {
644 PyErr_SetString(PyExc_RuntimeError,
645 "failed to create init interpreter ID mutex");
646 return -1;
647 }
648 interp->id_refcount = 0;
649 return 0;
650 }
651
652
653 int
_PyInterpreterState_IDIncref(PyInterpreterState * interp)654 _PyInterpreterState_IDIncref(PyInterpreterState *interp)
655 {
656 if (_PyInterpreterState_IDInitref(interp) < 0) {
657 return -1;
658 }
659
660 PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK);
661 interp->id_refcount += 1;
662 PyThread_release_lock(interp->id_mutex);
663 return 0;
664 }
665
666
667 void
_PyInterpreterState_IDDecref(PyInterpreterState * interp)668 _PyInterpreterState_IDDecref(PyInterpreterState *interp)
669 {
670 assert(interp->id_mutex != NULL);
671
672 struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
673 PyThread_acquire_lock(interp->id_mutex, WAIT_LOCK);
674 assert(interp->id_refcount != 0);
675 interp->id_refcount -= 1;
676 int64_t refcount = interp->id_refcount;
677 PyThread_release_lock(interp->id_mutex);
678
679 if (refcount == 0 && interp->requires_idref) {
680 // XXX Using the "head" thread isn't strictly correct.
681 PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
682 // XXX Possible GILState issues?
683 PyThreadState *save_tstate = _PyThreadState_Swap(gilstate, tstate);
684 Py_EndInterpreter(tstate);
685 _PyThreadState_Swap(gilstate, save_tstate);
686 }
687 }
688
689 int
_PyInterpreterState_RequiresIDRef(PyInterpreterState * interp)690 _PyInterpreterState_RequiresIDRef(PyInterpreterState *interp)
691 {
692 return interp->requires_idref;
693 }
694
695 void
_PyInterpreterState_RequireIDRef(PyInterpreterState * interp,int required)696 _PyInterpreterState_RequireIDRef(PyInterpreterState *interp, int required)
697 {
698 interp->requires_idref = required ? 1 : 0;
699 }
700
701 PyObject *
_PyInterpreterState_GetMainModule(PyInterpreterState * interp)702 _PyInterpreterState_GetMainModule(PyInterpreterState *interp)
703 {
704 if (interp->modules == NULL) {
705 PyErr_SetString(PyExc_RuntimeError, "interpreter not initialized");
706 return NULL;
707 }
708 return PyMapping_GetItemString(interp->modules, "__main__");
709 }
710
711 PyObject *
PyInterpreterState_GetDict(PyInterpreterState * interp)712 PyInterpreterState_GetDict(PyInterpreterState *interp)
713 {
714 if (interp->dict == NULL) {
715 interp->dict = PyDict_New();
716 if (interp->dict == NULL) {
717 PyErr_Clear();
718 }
719 }
720 /* Returning NULL means no per-interpreter dict is available. */
721 return interp->dict;
722 }
723
724 /* Minimum size of data stack chunk */
725 #define DATA_STACK_CHUNK_SIZE (16*1024)
726
727 static _PyStackChunk*
allocate_chunk(int size_in_bytes,_PyStackChunk * previous)728 allocate_chunk(int size_in_bytes, _PyStackChunk* previous)
729 {
730 assert(size_in_bytes % sizeof(PyObject **) == 0);
731 _PyStackChunk *res = _PyObject_VirtualAlloc(size_in_bytes);
732 if (res == NULL) {
733 return NULL;
734 }
735 res->previous = previous;
736 res->size = size_in_bytes;
737 res->top = 0;
738 return res;
739 }
740
741 static PyThreadState *
alloc_threadstate(void)742 alloc_threadstate(void)
743 {
744 return PyMem_RawCalloc(1, sizeof(PyThreadState));
745 }
746
747 static void
free_threadstate(PyThreadState * tstate)748 free_threadstate(PyThreadState *tstate)
749 {
750 if (!tstate->_static) {
751 PyMem_RawFree(tstate);
752 }
753 }
754
755 /* Get the thread state to a minimal consistent state.
756 Further init happens in pylifecycle.c before it can be used.
757 All fields not initialized here are expected to be zeroed out,
758 e.g. by PyMem_RawCalloc() or memset(), or otherwise pre-initialized.
759 The interpreter state is not manipulated. Instead it is assumed that
760 the thread is getting added to the interpreter.
761 */
762
763 static void
init_threadstate(PyThreadState * tstate,PyInterpreterState * interp,uint64_t id,PyThreadState * next)764 init_threadstate(PyThreadState *tstate,
765 PyInterpreterState *interp, uint64_t id,
766 PyThreadState *next)
767 {
768 if (tstate->_initialized) {
769 Py_FatalError("thread state already initialized");
770 }
771
772 assert(interp != NULL);
773 tstate->interp = interp;
774
775 assert(id > 0);
776 tstate->id = id;
777
778 assert(interp->threads.head == tstate);
779 assert((next != NULL && id != 1) || (next == NULL && id == 1));
780 if (next != NULL) {
781 assert(next->prev == NULL || next->prev == tstate);
782 next->prev = tstate;
783 }
784 tstate->next = next;
785 assert(tstate->prev == NULL);
786
787 tstate->thread_id = PyThread_get_thread_ident();
788 #ifdef PY_HAVE_THREAD_NATIVE_ID
789 tstate->native_thread_id = PyThread_get_thread_native_id();
790 #endif
791
792 tstate->recursion_limit = interp->ceval.recursion_limit,
793 tstate->recursion_remaining = interp->ceval.recursion_limit,
794
795 tstate->exc_info = &tstate->exc_state;
796
797 tstate->cframe = &tstate->root_cframe;
798 tstate->datastack_chunk = NULL;
799 tstate->datastack_top = NULL;
800 tstate->datastack_limit = NULL;
801
802 tstate->_initialized = 1;
803 }
804
805 static PyThreadState *
new_threadstate(PyInterpreterState * interp)806 new_threadstate(PyInterpreterState *interp)
807 {
808 PyThreadState *tstate;
809 _PyRuntimeState *runtime = interp->runtime;
810 // We don't need to allocate a thread state for the main interpreter
811 // (the common case), but doing it later for the other case revealed a
812 // reentrancy problem (deadlock). So for now we always allocate before
813 // taking the interpreters lock. See GH-96071.
814 PyThreadState *new_tstate = alloc_threadstate();
815 int used_newtstate;
816 if (new_tstate == NULL) {
817 return NULL;
818 }
819 /* We serialize concurrent creation to protect global state. */
820 HEAD_LOCK(runtime);
821
822 interp->threads.next_unique_id += 1;
823 uint64_t id = interp->threads.next_unique_id;
824
825 // Allocate the thread state and add it to the interpreter.
826 PyThreadState *old_head = interp->threads.head;
827 if (old_head == NULL) {
828 // It's the interpreter's initial thread state.
829 assert(id == 1);
830 used_newtstate = 0;
831 tstate = &interp->_initial_thread;
832 assert(tstate->_static);
833 }
834 else {
835 // Every valid interpreter must have at least one thread.
836 assert(id > 1);
837 assert(old_head->prev == NULL);
838 used_newtstate = 1;
839 tstate = new_tstate;
840 // Set to _PyThreadState_INIT.
841 memcpy(tstate,
842 &initial._main_interpreter._initial_thread,
843 sizeof(*tstate));
844 // We need to adjust any fields that are different from the initial
845 // thread (as defined in _PyThreadState_INIT):
846 tstate->_static = false;
847 }
848 interp->threads.head = tstate;
849
850 init_threadstate(tstate, interp, id, old_head);
851
852 HEAD_UNLOCK(runtime);
853 if (!used_newtstate) {
854 // Must be called with lock unlocked to avoid re-entrancy deadlock.
855 PyMem_RawFree(new_tstate);
856 }
857 return tstate;
858 }
859
860 PyThreadState *
PyThreadState_New(PyInterpreterState * interp)861 PyThreadState_New(PyInterpreterState *interp)
862 {
863 PyThreadState *tstate = new_threadstate(interp);
864 _PyThreadState_SetCurrent(tstate);
865 return tstate;
866 }
867
868 PyThreadState *
_PyThreadState_Prealloc(PyInterpreterState * interp)869 _PyThreadState_Prealloc(PyInterpreterState *interp)
870 {
871 return new_threadstate(interp);
872 }
873
874 // We keep this around for (accidental) stable ABI compatibility.
875 // Realisically, no extensions are using it.
876 void
_PyThreadState_Init(PyThreadState * tstate)877 _PyThreadState_Init(PyThreadState *tstate)
878 {
879 Py_FatalError("_PyThreadState_Init() is for internal use only");
880 }
881
882 void
_PyThreadState_SetCurrent(PyThreadState * tstate)883 _PyThreadState_SetCurrent(PyThreadState *tstate)
884 {
885 _PyGILState_NoteThreadState(&tstate->interp->runtime->gilstate, tstate);
886 }
887
888 PyObject*
PyState_FindModule(PyModuleDef * module)889 PyState_FindModule(PyModuleDef* module)
890 {
891 Py_ssize_t index = module->m_base.m_index;
892 PyInterpreterState *state = _PyInterpreterState_GET();
893 PyObject *res;
894 if (module->m_slots) {
895 return NULL;
896 }
897 if (index == 0)
898 return NULL;
899 if (state->modules_by_index == NULL)
900 return NULL;
901 if (index >= PyList_GET_SIZE(state->modules_by_index))
902 return NULL;
903 res = PyList_GET_ITEM(state->modules_by_index, index);
904 return res==Py_None ? NULL : res;
905 }
906
907 int
_PyState_AddModule(PyThreadState * tstate,PyObject * module,PyModuleDef * def)908 _PyState_AddModule(PyThreadState *tstate, PyObject* module, PyModuleDef* def)
909 {
910 if (!def) {
911 assert(_PyErr_Occurred(tstate));
912 return -1;
913 }
914 if (def->m_slots) {
915 _PyErr_SetString(tstate,
916 PyExc_SystemError,
917 "PyState_AddModule called on module with slots");
918 return -1;
919 }
920
921 PyInterpreterState *interp = tstate->interp;
922 if (!interp->modules_by_index) {
923 interp->modules_by_index = PyList_New(0);
924 if (!interp->modules_by_index) {
925 return -1;
926 }
927 }
928
929 while (PyList_GET_SIZE(interp->modules_by_index) <= def->m_base.m_index) {
930 if (PyList_Append(interp->modules_by_index, Py_None) < 0) {
931 return -1;
932 }
933 }
934
935 Py_INCREF(module);
936 return PyList_SetItem(interp->modules_by_index,
937 def->m_base.m_index, module);
938 }
939
940 int
PyState_AddModule(PyObject * module,PyModuleDef * def)941 PyState_AddModule(PyObject* module, PyModuleDef* def)
942 {
943 if (!def) {
944 Py_FatalError("module definition is NULL");
945 return -1;
946 }
947
948 PyThreadState *tstate = _PyThreadState_GET();
949 PyInterpreterState *interp = tstate->interp;
950 Py_ssize_t index = def->m_base.m_index;
951 if (interp->modules_by_index &&
952 index < PyList_GET_SIZE(interp->modules_by_index) &&
953 module == PyList_GET_ITEM(interp->modules_by_index, index))
954 {
955 _Py_FatalErrorFormat(__func__, "module %p already added", module);
956 return -1;
957 }
958 return _PyState_AddModule(tstate, module, def);
959 }
960
961 int
PyState_RemoveModule(PyModuleDef * def)962 PyState_RemoveModule(PyModuleDef* def)
963 {
964 PyThreadState *tstate = _PyThreadState_GET();
965 PyInterpreterState *interp = tstate->interp;
966
967 if (def->m_slots) {
968 _PyErr_SetString(tstate,
969 PyExc_SystemError,
970 "PyState_RemoveModule called on module with slots");
971 return -1;
972 }
973
974 Py_ssize_t index = def->m_base.m_index;
975 if (index == 0) {
976 Py_FatalError("invalid module index");
977 }
978 if (interp->modules_by_index == NULL) {
979 Py_FatalError("Interpreters module-list not accessible.");
980 }
981 if (index > PyList_GET_SIZE(interp->modules_by_index)) {
982 Py_FatalError("Module index out of bounds.");
983 }
984
985 Py_INCREF(Py_None);
986 return PyList_SetItem(interp->modules_by_index, index, Py_None);
987 }
988
989 // Used by finalize_modules()
990 void
_PyInterpreterState_ClearModules(PyInterpreterState * interp)991 _PyInterpreterState_ClearModules(PyInterpreterState *interp)
992 {
993 if (!interp->modules_by_index) {
994 return;
995 }
996
997 Py_ssize_t i;
998 for (i = 0; i < PyList_GET_SIZE(interp->modules_by_index); i++) {
999 PyObject *m = PyList_GET_ITEM(interp->modules_by_index, i);
1000 if (PyModule_Check(m)) {
1001 /* cleanup the saved copy of module dicts */
1002 PyModuleDef *md = PyModule_GetDef(m);
1003 if (md) {
1004 Py_CLEAR(md->m_base.m_copy);
1005 }
1006 }
1007 }
1008
1009 /* Setting modules_by_index to NULL could be dangerous, so we
1010 clear the list instead. */
1011 if (PyList_SetSlice(interp->modules_by_index,
1012 0, PyList_GET_SIZE(interp->modules_by_index),
1013 NULL)) {
1014 PyErr_WriteUnraisable(interp->modules_by_index);
1015 }
1016 }
1017
1018 void
PyThreadState_Clear(PyThreadState * tstate)1019 PyThreadState_Clear(PyThreadState *tstate)
1020 {
1021 int verbose = _PyInterpreterState_GetConfig(tstate->interp)->verbose;
1022
1023 if (verbose && tstate->cframe->current_frame != NULL) {
1024 /* bpo-20526: After the main thread calls
1025 _PyRuntimeState_SetFinalizing() in Py_FinalizeEx(), threads must
1026 exit when trying to take the GIL. If a thread exit in the middle of
1027 _PyEval_EvalFrameDefault(), tstate->frame is not reset to its
1028 previous value. It is more likely with daemon threads, but it can
1029 happen with regular threads if threading._shutdown() fails
1030 (ex: interrupted by CTRL+C). */
1031 fprintf(stderr,
1032 "PyThreadState_Clear: warning: thread still has a frame\n");
1033 }
1034
1035 /* Don't clear tstate->pyframe: it is a borrowed reference */
1036
1037 Py_CLEAR(tstate->dict);
1038 Py_CLEAR(tstate->async_exc);
1039
1040 Py_CLEAR(tstate->curexc_type);
1041 Py_CLEAR(tstate->curexc_value);
1042 Py_CLEAR(tstate->curexc_traceback);
1043
1044 Py_CLEAR(tstate->exc_state.exc_value);
1045
1046 /* The stack of exception states should contain just this thread. */
1047 if (verbose && tstate->exc_info != &tstate->exc_state) {
1048 fprintf(stderr,
1049 "PyThreadState_Clear: warning: thread still has a generator\n");
1050 }
1051
1052 tstate->c_profilefunc = NULL;
1053 tstate->c_tracefunc = NULL;
1054 Py_CLEAR(tstate->c_profileobj);
1055 Py_CLEAR(tstate->c_traceobj);
1056
1057 Py_CLEAR(tstate->async_gen_firstiter);
1058 Py_CLEAR(tstate->async_gen_finalizer);
1059
1060 Py_CLEAR(tstate->context);
1061
1062 if (tstate->on_delete != NULL) {
1063 tstate->on_delete(tstate->on_delete_data);
1064 }
1065 }
1066
1067
1068 /* Common code for PyThreadState_Delete() and PyThreadState_DeleteCurrent() */
1069 static void
tstate_delete_common(PyThreadState * tstate,struct _gilstate_runtime_state * gilstate)1070 tstate_delete_common(PyThreadState *tstate,
1071 struct _gilstate_runtime_state *gilstate)
1072 {
1073 _Py_EnsureTstateNotNULL(tstate);
1074 PyInterpreterState *interp = tstate->interp;
1075 if (interp == NULL) {
1076 Py_FatalError("NULL interpreter");
1077 }
1078 _PyRuntimeState *runtime = interp->runtime;
1079
1080 HEAD_LOCK(runtime);
1081 if (tstate->prev) {
1082 tstate->prev->next = tstate->next;
1083 }
1084 else {
1085 interp->threads.head = tstate->next;
1086 }
1087 if (tstate->next) {
1088 tstate->next->prev = tstate->prev;
1089 }
1090 HEAD_UNLOCK(runtime);
1091
1092 if (gilstate->autoInterpreterState &&
1093 PyThread_tss_get(&gilstate->autoTSSkey) == tstate)
1094 {
1095 PyThread_tss_set(&gilstate->autoTSSkey, NULL);
1096 }
1097 _PyStackChunk *chunk = tstate->datastack_chunk;
1098 tstate->datastack_chunk = NULL;
1099 while (chunk != NULL) {
1100 _PyStackChunk *prev = chunk->previous;
1101 _PyObject_VirtualFree(chunk, chunk->size);
1102 chunk = prev;
1103 }
1104 }
1105
1106 static void
_PyThreadState_Delete(PyThreadState * tstate,int check_current)1107 _PyThreadState_Delete(PyThreadState *tstate, int check_current)
1108 {
1109 struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate;
1110 if (check_current) {
1111 if (tstate == _PyRuntimeGILState_GetThreadState(gilstate)) {
1112 _Py_FatalErrorFormat(__func__, "tstate %p is still current", tstate);
1113 }
1114 }
1115 tstate_delete_common(tstate, gilstate);
1116 free_threadstate(tstate);
1117 }
1118
1119
1120 void
PyThreadState_Delete(PyThreadState * tstate)1121 PyThreadState_Delete(PyThreadState *tstate)
1122 {
1123 _PyThreadState_Delete(tstate, 1);
1124 }
1125
1126
1127 void
_PyThreadState_DeleteCurrent(PyThreadState * tstate)1128 _PyThreadState_DeleteCurrent(PyThreadState *tstate)
1129 {
1130 _Py_EnsureTstateNotNULL(tstate);
1131 struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate;
1132 tstate_delete_common(tstate, gilstate);
1133 _PyRuntimeGILState_SetThreadState(gilstate, NULL);
1134 _PyEval_ReleaseLock(tstate);
1135 free_threadstate(tstate);
1136 }
1137
1138 void
PyThreadState_DeleteCurrent(void)1139 PyThreadState_DeleteCurrent(void)
1140 {
1141 struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
1142 PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate);
1143 _PyThreadState_DeleteCurrent(tstate);
1144 }
1145
1146
1147 /*
1148 * Delete all thread states except the one passed as argument.
1149 * Note that, if there is a current thread state, it *must* be the one
1150 * passed as argument. Also, this won't touch any other interpreters
1151 * than the current one, since we don't know which thread state should
1152 * be kept in those other interpreters.
1153 */
1154 void
_PyThreadState_DeleteExcept(_PyRuntimeState * runtime,PyThreadState * tstate)1155 _PyThreadState_DeleteExcept(_PyRuntimeState *runtime, PyThreadState *tstate)
1156 {
1157 PyInterpreterState *interp = tstate->interp;
1158
1159 HEAD_LOCK(runtime);
1160 /* Remove all thread states, except tstate, from the linked list of
1161 thread states. This will allow calling PyThreadState_Clear()
1162 without holding the lock. */
1163 PyThreadState *list = interp->threads.head;
1164 if (list == tstate) {
1165 list = tstate->next;
1166 }
1167 if (tstate->prev) {
1168 tstate->prev->next = tstate->next;
1169 }
1170 if (tstate->next) {
1171 tstate->next->prev = tstate->prev;
1172 }
1173 tstate->prev = tstate->next = NULL;
1174 interp->threads.head = tstate;
1175 HEAD_UNLOCK(runtime);
1176
1177 /* Clear and deallocate all stale thread states. Even if this
1178 executes Python code, we should be safe since it executes
1179 in the current thread, not one of the stale threads. */
1180 PyThreadState *p, *next;
1181 for (p = list; p; p = next) {
1182 next = p->next;
1183 PyThreadState_Clear(p);
1184 free_threadstate(p);
1185 }
1186 }
1187
1188
1189 PyThreadState *
_PyThreadState_UncheckedGet(void)1190 _PyThreadState_UncheckedGet(void)
1191 {
1192 return _PyThreadState_GET();
1193 }
1194
1195
1196 PyThreadState *
PyThreadState_Get(void)1197 PyThreadState_Get(void)
1198 {
1199 PyThreadState *tstate = _PyThreadState_GET();
1200 _Py_EnsureTstateNotNULL(tstate);
1201 return tstate;
1202 }
1203
1204
1205 PyThreadState *
_PyThreadState_Swap(struct _gilstate_runtime_state * gilstate,PyThreadState * newts)1206 _PyThreadState_Swap(struct _gilstate_runtime_state *gilstate, PyThreadState *newts)
1207 {
1208 PyThreadState *oldts = _PyRuntimeGILState_GetThreadState(gilstate);
1209
1210 _PyRuntimeGILState_SetThreadState(gilstate, newts);
1211 /* It should not be possible for more than one thread state
1212 to be used for a thread. Check this the best we can in debug
1213 builds.
1214 */
1215 #if defined(Py_DEBUG)
1216 if (newts) {
1217 /* This can be called from PyEval_RestoreThread(). Similar
1218 to it, we need to ensure errno doesn't change.
1219 */
1220 int err = errno;
1221 PyThreadState *check = _PyGILState_GetThisThreadState(gilstate);
1222 if (check && check->interp == newts->interp && check != newts)
1223 Py_FatalError("Invalid thread state for this thread");
1224 errno = err;
1225 }
1226 #endif
1227 return oldts;
1228 }
1229
1230 PyThreadState *
PyThreadState_Swap(PyThreadState * newts)1231 PyThreadState_Swap(PyThreadState *newts)
1232 {
1233 return _PyThreadState_Swap(&_PyRuntime.gilstate, newts);
1234 }
1235
1236 /* An extension mechanism to store arbitrary additional per-thread state.
1237 PyThreadState_GetDict() returns a dictionary that can be used to hold such
1238 state; the caller should pick a unique key and store its state there. If
1239 PyThreadState_GetDict() returns NULL, an exception has *not* been raised
1240 and the caller should assume no per-thread state is available. */
1241
1242 PyObject *
_PyThreadState_GetDict(PyThreadState * tstate)1243 _PyThreadState_GetDict(PyThreadState *tstate)
1244 {
1245 assert(tstate != NULL);
1246 if (tstate->dict == NULL) {
1247 tstate->dict = PyDict_New();
1248 if (tstate->dict == NULL) {
1249 _PyErr_Clear(tstate);
1250 }
1251 }
1252 return tstate->dict;
1253 }
1254
1255
1256 PyObject *
PyThreadState_GetDict(void)1257 PyThreadState_GetDict(void)
1258 {
1259 PyThreadState *tstate = _PyThreadState_GET();
1260 if (tstate == NULL) {
1261 return NULL;
1262 }
1263 return _PyThreadState_GetDict(tstate);
1264 }
1265
1266
1267 PyInterpreterState *
PyThreadState_GetInterpreter(PyThreadState * tstate)1268 PyThreadState_GetInterpreter(PyThreadState *tstate)
1269 {
1270 assert(tstate != NULL);
1271 return tstate->interp;
1272 }
1273
1274
1275 PyFrameObject*
PyThreadState_GetFrame(PyThreadState * tstate)1276 PyThreadState_GetFrame(PyThreadState *tstate)
1277 {
1278 assert(tstate != NULL);
1279 _PyInterpreterFrame *f = tstate->cframe->current_frame;
1280 while (f && _PyFrame_IsIncomplete(f)) {
1281 f = f->previous;
1282 }
1283 if (f == NULL) {
1284 return NULL;
1285 }
1286 PyFrameObject *frame = _PyFrame_GetFrameObject(f);
1287 if (frame == NULL) {
1288 PyErr_Clear();
1289 }
1290 Py_XINCREF(frame);
1291 return frame;
1292 }
1293
1294
1295 uint64_t
PyThreadState_GetID(PyThreadState * tstate)1296 PyThreadState_GetID(PyThreadState *tstate)
1297 {
1298 assert(tstate != NULL);
1299 return tstate->id;
1300 }
1301
1302
1303 /* Asynchronously raise an exception in a thread.
1304 Requested by Just van Rossum and Alex Martelli.
1305 To prevent naive misuse, you must write your own extension
1306 to call this, or use ctypes. Must be called with the GIL held.
1307 Returns the number of tstates modified (normally 1, but 0 if `id` didn't
1308 match any known thread id). Can be called with exc=NULL to clear an
1309 existing async exception. This raises no exceptions. */
1310
1311 int
PyThreadState_SetAsyncExc(unsigned long id,PyObject * exc)1312 PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
1313 {
1314 _PyRuntimeState *runtime = &_PyRuntime;
1315 PyInterpreterState *interp = _PyRuntimeState_GetThreadState(runtime)->interp;
1316
1317 /* Although the GIL is held, a few C API functions can be called
1318 * without the GIL held, and in particular some that create and
1319 * destroy thread and interpreter states. Those can mutate the
1320 * list of thread states we're traversing, so to prevent that we lock
1321 * head_mutex for the duration.
1322 */
1323 HEAD_LOCK(runtime);
1324 for (PyThreadState *tstate = interp->threads.head; tstate != NULL; tstate = tstate->next) {
1325 if (tstate->thread_id != id) {
1326 continue;
1327 }
1328
1329 /* Tricky: we need to decref the current value
1330 * (if any) in tstate->async_exc, but that can in turn
1331 * allow arbitrary Python code to run, including
1332 * perhaps calls to this function. To prevent
1333 * deadlock, we need to release head_mutex before
1334 * the decref.
1335 */
1336 PyObject *old_exc = tstate->async_exc;
1337 Py_XINCREF(exc);
1338 tstate->async_exc = exc;
1339 HEAD_UNLOCK(runtime);
1340
1341 Py_XDECREF(old_exc);
1342 _PyEval_SignalAsyncExc(tstate->interp);
1343 return 1;
1344 }
1345 HEAD_UNLOCK(runtime);
1346 return 0;
1347 }
1348
1349 /* Routines for advanced debuggers, requested by David Beazley.
1350 Don't use unless you know what you are doing! */
1351
1352 PyInterpreterState *
PyInterpreterState_Head(void)1353 PyInterpreterState_Head(void)
1354 {
1355 return _PyRuntime.interpreters.head;
1356 }
1357
1358 PyInterpreterState *
PyInterpreterState_Main(void)1359 PyInterpreterState_Main(void)
1360 {
1361 return _PyInterpreterState_Main();
1362 }
1363
1364 PyInterpreterState *
PyInterpreterState_Next(PyInterpreterState * interp)1365 PyInterpreterState_Next(PyInterpreterState *interp) {
1366 return interp->next;
1367 }
1368
1369 PyThreadState *
PyInterpreterState_ThreadHead(PyInterpreterState * interp)1370 PyInterpreterState_ThreadHead(PyInterpreterState *interp) {
1371 return interp->threads.head;
1372 }
1373
1374 PyThreadState *
PyThreadState_Next(PyThreadState * tstate)1375 PyThreadState_Next(PyThreadState *tstate) {
1376 return tstate->next;
1377 }
1378
1379 /* The implementation of sys._current_frames(). This is intended to be
1380 called with the GIL held, as it will be when called via
1381 sys._current_frames(). It's possible it would work fine even without
1382 the GIL held, but haven't thought enough about that.
1383 */
1384 PyObject *
_PyThread_CurrentFrames(void)1385 _PyThread_CurrentFrames(void)
1386 {
1387 PyThreadState *tstate = _PyThreadState_GET();
1388 if (_PySys_Audit(tstate, "sys._current_frames", NULL) < 0) {
1389 return NULL;
1390 }
1391
1392 PyObject *result = PyDict_New();
1393 if (result == NULL) {
1394 return NULL;
1395 }
1396
1397 /* for i in all interpreters:
1398 * for t in all of i's thread states:
1399 * if t's frame isn't NULL, map t's id to its frame
1400 * Because these lists can mutate even when the GIL is held, we
1401 * need to grab head_mutex for the duration.
1402 */
1403 _PyRuntimeState *runtime = tstate->interp->runtime;
1404 HEAD_LOCK(runtime);
1405 PyInterpreterState *i;
1406 for (i = runtime->interpreters.head; i != NULL; i = i->next) {
1407 PyThreadState *t;
1408 for (t = i->threads.head; t != NULL; t = t->next) {
1409 _PyInterpreterFrame *frame = t->cframe->current_frame;
1410 while (frame && _PyFrame_IsIncomplete(frame)) {
1411 frame = frame->previous;
1412 }
1413 if (frame == NULL) {
1414 continue;
1415 }
1416 PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
1417 if (id == NULL) {
1418 goto fail;
1419 }
1420 PyObject *frameobj = (PyObject *)_PyFrame_GetFrameObject(frame);
1421 if (frameobj == NULL) {
1422 Py_DECREF(id);
1423 goto fail;
1424 }
1425 int stat = PyDict_SetItem(result, id, frameobj);
1426 Py_DECREF(id);
1427 if (stat < 0) {
1428 goto fail;
1429 }
1430 }
1431 }
1432 goto done;
1433
1434 fail:
1435 Py_CLEAR(result);
1436
1437 done:
1438 HEAD_UNLOCK(runtime);
1439 return result;
1440 }
1441
1442 PyObject *
_PyThread_CurrentExceptions(void)1443 _PyThread_CurrentExceptions(void)
1444 {
1445 PyThreadState *tstate = _PyThreadState_GET();
1446
1447 _Py_EnsureTstateNotNULL(tstate);
1448
1449 if (_PySys_Audit(tstate, "sys._current_exceptions", NULL) < 0) {
1450 return NULL;
1451 }
1452
1453 PyObject *result = PyDict_New();
1454 if (result == NULL) {
1455 return NULL;
1456 }
1457
1458 /* for i in all interpreters:
1459 * for t in all of i's thread states:
1460 * if t's frame isn't NULL, map t's id to its frame
1461 * Because these lists can mutate even when the GIL is held, we
1462 * need to grab head_mutex for the duration.
1463 */
1464 _PyRuntimeState *runtime = tstate->interp->runtime;
1465 HEAD_LOCK(runtime);
1466 PyInterpreterState *i;
1467 for (i = runtime->interpreters.head; i != NULL; i = i->next) {
1468 PyThreadState *t;
1469 for (t = i->threads.head; t != NULL; t = t->next) {
1470 _PyErr_StackItem *err_info = _PyErr_GetTopmostException(t);
1471 if (err_info == NULL) {
1472 continue;
1473 }
1474 PyObject *id = PyLong_FromUnsignedLong(t->thread_id);
1475 if (id == NULL) {
1476 goto fail;
1477 }
1478 PyObject *exc_info = _PyErr_StackItemToExcInfoTuple(err_info);
1479 if (exc_info == NULL) {
1480 Py_DECREF(id);
1481 goto fail;
1482 }
1483 int stat = PyDict_SetItem(result, id, exc_info);
1484 Py_DECREF(id);
1485 Py_DECREF(exc_info);
1486 if (stat < 0) {
1487 goto fail;
1488 }
1489 }
1490 }
1491 goto done;
1492
1493 fail:
1494 Py_CLEAR(result);
1495
1496 done:
1497 HEAD_UNLOCK(runtime);
1498 return result;
1499 }
1500
1501 /* Python "auto thread state" API. */
1502
1503 /* Keep this as a static, as it is not reliable! It can only
1504 ever be compared to the state for the *current* thread.
1505 * If not equal, then it doesn't matter that the actual
1506 value may change immediately after comparison, as it can't
1507 possibly change to the current thread's state.
1508 * If equal, then the current thread holds the lock, so the value can't
1509 change until we yield the lock.
1510 */
1511 static int
PyThreadState_IsCurrent(PyThreadState * tstate)1512 PyThreadState_IsCurrent(PyThreadState *tstate)
1513 {
1514 /* Must be the tstate for this thread */
1515 struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
1516 assert(_PyGILState_GetThisThreadState(gilstate) == tstate);
1517 return tstate == _PyRuntimeGILState_GetThreadState(gilstate);
1518 }
1519
1520 /* Internal initialization/finalization functions called by
1521 Py_Initialize/Py_FinalizeEx
1522 */
1523 PyStatus
_PyGILState_Init(_PyRuntimeState * runtime)1524 _PyGILState_Init(_PyRuntimeState *runtime)
1525 {
1526 struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
1527 if (PyThread_tss_create(&gilstate->autoTSSkey) != 0) {
1528 return _PyStatus_NO_MEMORY();
1529 }
1530 // PyThreadState_New() calls _PyGILState_NoteThreadState() which does
1531 // nothing before autoInterpreterState is set.
1532 assert(gilstate->autoInterpreterState == NULL);
1533 return _PyStatus_OK();
1534 }
1535
1536
1537 PyStatus
_PyGILState_SetTstate(PyThreadState * tstate)1538 _PyGILState_SetTstate(PyThreadState *tstate)
1539 {
1540 if (!_Py_IsMainInterpreter(tstate->interp)) {
1541 /* Currently, PyGILState is shared by all interpreters. The main
1542 * interpreter is responsible to initialize it. */
1543 return _PyStatus_OK();
1544 }
1545
1546 /* must init with valid states */
1547 assert(tstate != NULL);
1548 assert(tstate->interp != NULL);
1549
1550 struct _gilstate_runtime_state *gilstate = &tstate->interp->runtime->gilstate;
1551
1552 gilstate->autoInterpreterState = tstate->interp;
1553 assert(PyThread_tss_get(&gilstate->autoTSSkey) == NULL);
1554 assert(tstate->gilstate_counter == 0);
1555
1556 _PyGILState_NoteThreadState(gilstate, tstate);
1557 return _PyStatus_OK();
1558 }
1559
1560 PyInterpreterState *
_PyGILState_GetInterpreterStateUnsafe(void)1561 _PyGILState_GetInterpreterStateUnsafe(void)
1562 {
1563 return _PyRuntime.gilstate.autoInterpreterState;
1564 }
1565
1566 void
_PyGILState_Fini(PyInterpreterState * interp)1567 _PyGILState_Fini(PyInterpreterState *interp)
1568 {
1569 struct _gilstate_runtime_state *gilstate = &interp->runtime->gilstate;
1570 PyThread_tss_delete(&gilstate->autoTSSkey);
1571 gilstate->autoInterpreterState = NULL;
1572 }
1573
1574 #ifdef HAVE_FORK
1575 /* Reset the TSS key - called by PyOS_AfterFork_Child().
1576 * This should not be necessary, but some - buggy - pthread implementations
1577 * don't reset TSS upon fork(), see issue #10517.
1578 */
1579 PyStatus
_PyGILState_Reinit(_PyRuntimeState * runtime)1580 _PyGILState_Reinit(_PyRuntimeState *runtime)
1581 {
1582 struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
1583 PyThreadState *tstate = _PyGILState_GetThisThreadState(gilstate);
1584
1585 PyThread_tss_delete(&gilstate->autoTSSkey);
1586 if (PyThread_tss_create(&gilstate->autoTSSkey) != 0) {
1587 return _PyStatus_NO_MEMORY();
1588 }
1589
1590 /* If the thread had an associated auto thread state, reassociate it with
1591 * the new key. */
1592 if (tstate &&
1593 PyThread_tss_set(&gilstate->autoTSSkey, (void *)tstate) != 0)
1594 {
1595 return _PyStatus_ERR("failed to set autoTSSkey");
1596 }
1597 return _PyStatus_OK();
1598 }
1599 #endif
1600
1601 /* When a thread state is created for a thread by some mechanism other than
1602 PyGILState_Ensure, it's important that the GILState machinery knows about
1603 it so it doesn't try to create another thread state for the thread (this is
1604 a better fix for SF bug #1010677 than the first one attempted).
1605 */
1606 static void
_PyGILState_NoteThreadState(struct _gilstate_runtime_state * gilstate,PyThreadState * tstate)1607 _PyGILState_NoteThreadState(struct _gilstate_runtime_state *gilstate, PyThreadState* tstate)
1608 {
1609 /* If autoTSSkey isn't initialized, this must be the very first
1610 threadstate created in Py_Initialize(). Don't do anything for now
1611 (we'll be back here when _PyGILState_Init is called). */
1612 if (!gilstate->autoInterpreterState) {
1613 return;
1614 }
1615
1616 /* Stick the thread state for this thread in thread specific storage.
1617
1618 The only situation where you can legitimately have more than one
1619 thread state for an OS level thread is when there are multiple
1620 interpreters.
1621
1622 You shouldn't really be using the PyGILState_ APIs anyway (see issues
1623 #10915 and #15751).
1624
1625 The first thread state created for that given OS level thread will
1626 "win", which seems reasonable behaviour.
1627 */
1628 if (PyThread_tss_get(&gilstate->autoTSSkey) == NULL) {
1629 if ((PyThread_tss_set(&gilstate->autoTSSkey, (void *)tstate)) != 0) {
1630 Py_FatalError("Couldn't create autoTSSkey mapping");
1631 }
1632 }
1633
1634 /* PyGILState_Release must not try to delete this thread state. */
1635 tstate->gilstate_counter = 1;
1636 }
1637
1638 /* The public functions */
1639 static PyThreadState *
_PyGILState_GetThisThreadState(struct _gilstate_runtime_state * gilstate)1640 _PyGILState_GetThisThreadState(struct _gilstate_runtime_state *gilstate)
1641 {
1642 if (gilstate->autoInterpreterState == NULL)
1643 return NULL;
1644 return (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey);
1645 }
1646
1647 PyThreadState *
PyGILState_GetThisThreadState(void)1648 PyGILState_GetThisThreadState(void)
1649 {
1650 return _PyGILState_GetThisThreadState(&_PyRuntime.gilstate);
1651 }
1652
1653 int
PyGILState_Check(void)1654 PyGILState_Check(void)
1655 {
1656 struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
1657 if (!gilstate->check_enabled) {
1658 return 1;
1659 }
1660
1661 if (!PyThread_tss_is_created(&gilstate->autoTSSkey)) {
1662 return 1;
1663 }
1664
1665 PyThreadState *tstate = _PyRuntimeGILState_GetThreadState(gilstate);
1666 if (tstate == NULL) {
1667 return 0;
1668 }
1669
1670 return (tstate == _PyGILState_GetThisThreadState(gilstate));
1671 }
1672
1673 PyGILState_STATE
PyGILState_Ensure(void)1674 PyGILState_Ensure(void)
1675 {
1676 _PyRuntimeState *runtime = &_PyRuntime;
1677 struct _gilstate_runtime_state *gilstate = &runtime->gilstate;
1678
1679 /* Note that we do not auto-init Python here - apart from
1680 potential races with 2 threads auto-initializing, pep-311
1681 spells out other issues. Embedders are expected to have
1682 called Py_Initialize(). */
1683
1684 /* Ensure that _PyEval_InitThreads() and _PyGILState_Init() have been
1685 called by Py_Initialize() */
1686 assert(_PyEval_ThreadsInitialized(runtime));
1687 assert(gilstate->autoInterpreterState);
1688
1689 PyThreadState *tcur = (PyThreadState *)PyThread_tss_get(&gilstate->autoTSSkey);
1690 int current;
1691 if (tcur == NULL) {
1692 /* Create a new Python thread state for this thread */
1693 tcur = PyThreadState_New(gilstate->autoInterpreterState);
1694 if (tcur == NULL) {
1695 Py_FatalError("Couldn't create thread-state for new thread");
1696 }
1697
1698 /* This is our thread state! We'll need to delete it in the
1699 matching call to PyGILState_Release(). */
1700 tcur->gilstate_counter = 0;
1701 current = 0; /* new thread state is never current */
1702 }
1703 else {
1704 current = PyThreadState_IsCurrent(tcur);
1705 }
1706
1707 if (current == 0) {
1708 PyEval_RestoreThread(tcur);
1709 }
1710
1711 /* Update our counter in the thread-state - no need for locks:
1712 - tcur will remain valid as we hold the GIL.
1713 - the counter is safe as we are the only thread "allowed"
1714 to modify this value
1715 */
1716 ++tcur->gilstate_counter;
1717
1718 return current ? PyGILState_LOCKED : PyGILState_UNLOCKED;
1719 }
1720
1721 void
PyGILState_Release(PyGILState_STATE oldstate)1722 PyGILState_Release(PyGILState_STATE oldstate)
1723 {
1724 _PyRuntimeState *runtime = &_PyRuntime;
1725 PyThreadState *tstate = PyThread_tss_get(&runtime->gilstate.autoTSSkey);
1726 if (tstate == NULL) {
1727 Py_FatalError("auto-releasing thread-state, "
1728 "but no thread-state for this thread");
1729 }
1730
1731 /* We must hold the GIL and have our thread state current */
1732 /* XXX - remove the check - the assert should be fine,
1733 but while this is very new (April 2003), the extra check
1734 by release-only users can't hurt.
1735 */
1736 if (!PyThreadState_IsCurrent(tstate)) {
1737 _Py_FatalErrorFormat(__func__,
1738 "thread state %p must be current when releasing",
1739 tstate);
1740 }
1741 assert(PyThreadState_IsCurrent(tstate));
1742 --tstate->gilstate_counter;
1743 assert(tstate->gilstate_counter >= 0); /* illegal counter value */
1744
1745 /* If we're going to destroy this thread-state, we must
1746 * clear it while the GIL is held, as destructors may run.
1747 */
1748 if (tstate->gilstate_counter == 0) {
1749 /* can't have been locked when we created it */
1750 assert(oldstate == PyGILState_UNLOCKED);
1751 PyThreadState_Clear(tstate);
1752 /* Delete the thread-state. Note this releases the GIL too!
1753 * It's vital that the GIL be held here, to avoid shutdown
1754 * races; see bugs 225673 and 1061968 (that nasty bug has a
1755 * habit of coming back).
1756 */
1757 assert(_PyRuntimeGILState_GetThreadState(&runtime->gilstate) == tstate);
1758 _PyThreadState_DeleteCurrent(tstate);
1759 }
1760 /* Release the lock if necessary */
1761 else if (oldstate == PyGILState_UNLOCKED)
1762 PyEval_SaveThread();
1763 }
1764
1765
1766 /**************************/
1767 /* cross-interpreter data */
1768 /**************************/
1769
1770 /* cross-interpreter data */
1771
1772 crossinterpdatafunc _PyCrossInterpreterData_Lookup(PyObject *);
1773
1774 /* This is a separate func from _PyCrossInterpreterData_Lookup in order
1775 to keep the registry code separate. */
1776 static crossinterpdatafunc
_lookup_getdata(PyObject * obj)1777 _lookup_getdata(PyObject *obj)
1778 {
1779 crossinterpdatafunc getdata = _PyCrossInterpreterData_Lookup(obj);
1780 if (getdata == NULL && PyErr_Occurred() == 0)
1781 PyErr_Format(PyExc_ValueError,
1782 "%S does not support cross-interpreter data", obj);
1783 return getdata;
1784 }
1785
1786 int
_PyObject_CheckCrossInterpreterData(PyObject * obj)1787 _PyObject_CheckCrossInterpreterData(PyObject *obj)
1788 {
1789 crossinterpdatafunc getdata = _lookup_getdata(obj);
1790 if (getdata == NULL) {
1791 return -1;
1792 }
1793 return 0;
1794 }
1795
1796 static int
_check_xidata(PyThreadState * tstate,_PyCrossInterpreterData * data)1797 _check_xidata(PyThreadState *tstate, _PyCrossInterpreterData *data)
1798 {
1799 // data->data can be anything, including NULL, so we don't check it.
1800
1801 // data->obj may be NULL, so we don't check it.
1802
1803 if (data->interp < 0) {
1804 _PyErr_SetString(tstate, PyExc_SystemError, "missing interp");
1805 return -1;
1806 }
1807
1808 if (data->new_object == NULL) {
1809 _PyErr_SetString(tstate, PyExc_SystemError, "missing new_object func");
1810 return -1;
1811 }
1812
1813 // data->free may be NULL, so we don't check it.
1814
1815 return 0;
1816 }
1817
1818 int
_PyObject_GetCrossInterpreterData(PyObject * obj,_PyCrossInterpreterData * data)1819 _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data)
1820 {
1821 PyThreadState *tstate = _PyThreadState_GET();
1822 #ifdef Py_DEBUG
1823 // The caller must hold the GIL
1824 _Py_EnsureTstateNotNULL(tstate);
1825 #endif
1826 PyInterpreterState *interp = tstate->interp;
1827
1828 // Reset data before re-populating.
1829 *data = (_PyCrossInterpreterData){0};
1830 data->free = PyMem_RawFree; // Set a default that may be overridden.
1831
1832 // Call the "getdata" func for the object.
1833 Py_INCREF(obj);
1834 crossinterpdatafunc getdata = _lookup_getdata(obj);
1835 if (getdata == NULL) {
1836 Py_DECREF(obj);
1837 return -1;
1838 }
1839 int res = getdata(obj, data);
1840 Py_DECREF(obj);
1841 if (res != 0) {
1842 return -1;
1843 }
1844
1845 // Fill in the blanks and validate the result.
1846 data->interp = interp->id;
1847 if (_check_xidata(tstate, data) != 0) {
1848 _PyCrossInterpreterData_Release(data);
1849 return -1;
1850 }
1851
1852 return 0;
1853 }
1854
1855 static void
_release_xidata(void * arg)1856 _release_xidata(void *arg)
1857 {
1858 _PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg;
1859 if (data->free != NULL) {
1860 data->free(data->data);
1861 }
1862 Py_XDECREF(data->obj);
1863 }
1864
1865 static void
_call_in_interpreter(struct _gilstate_runtime_state * gilstate,PyInterpreterState * interp,void (* func)(void *),void * arg)1866 _call_in_interpreter(struct _gilstate_runtime_state *gilstate,
1867 PyInterpreterState *interp,
1868 void (*func)(void *), void *arg)
1869 {
1870 /* We would use Py_AddPendingCall() if it weren't specific to the
1871 * main interpreter (see bpo-33608). In the meantime we take a
1872 * naive approach.
1873 */
1874 PyThreadState *save_tstate = NULL;
1875 if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) {
1876 // XXX Using the "head" thread isn't strictly correct.
1877 PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
1878 // XXX Possible GILState issues?
1879 save_tstate = _PyThreadState_Swap(gilstate, tstate);
1880 }
1881
1882 func(arg);
1883
1884 // Switch back.
1885 if (save_tstate != NULL) {
1886 _PyThreadState_Swap(gilstate, save_tstate);
1887 }
1888 }
1889
1890 void
_PyCrossInterpreterData_Release(_PyCrossInterpreterData * data)1891 _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
1892 {
1893 if (data->data == NULL && data->obj == NULL) {
1894 // Nothing to release!
1895 return;
1896 }
1897
1898 // Switch to the original interpreter.
1899 PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp);
1900 if (interp == NULL) {
1901 // The interpreter was already destroyed.
1902 if (data->free != NULL) {
1903 // XXX Someone leaked some memory...
1904 }
1905 return;
1906 }
1907
1908 // "Release" the data and/or the object.
1909 struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
1910 _call_in_interpreter(gilstate, interp, _release_xidata, data);
1911 }
1912
1913 PyObject *
_PyCrossInterpreterData_NewObject(_PyCrossInterpreterData * data)1914 _PyCrossInterpreterData_NewObject(_PyCrossInterpreterData *data)
1915 {
1916 return data->new_object(data);
1917 }
1918
1919 /* registry of {type -> crossinterpdatafunc} */
1920
1921 /* For now we use a global registry of shareable classes. An
1922 alternative would be to add a tp_* slot for a class's
1923 crossinterpdatafunc. It would be simpler and more efficient. */
1924
1925 static int
_register_xidata(struct _xidregistry * xidregistry,PyTypeObject * cls,crossinterpdatafunc getdata)1926 _register_xidata(struct _xidregistry *xidregistry, PyTypeObject *cls,
1927 crossinterpdatafunc getdata)
1928 {
1929 // Note that we effectively replace already registered classes
1930 // rather than failing.
1931 struct _xidregitem *newhead = PyMem_RawMalloc(sizeof(struct _xidregitem));
1932 if (newhead == NULL)
1933 return -1;
1934 newhead->cls = cls;
1935 newhead->getdata = getdata;
1936 newhead->next = xidregistry->head;
1937 xidregistry->head = newhead;
1938 return 0;
1939 }
1940
1941 static void _register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry);
1942
1943 int
_PyCrossInterpreterData_RegisterClass(PyTypeObject * cls,crossinterpdatafunc getdata)1944 _PyCrossInterpreterData_RegisterClass(PyTypeObject *cls,
1945 crossinterpdatafunc getdata)
1946 {
1947 if (!PyType_Check(cls)) {
1948 PyErr_Format(PyExc_ValueError, "only classes may be registered");
1949 return -1;
1950 }
1951 if (getdata == NULL) {
1952 PyErr_Format(PyExc_ValueError, "missing 'getdata' func");
1953 return -1;
1954 }
1955
1956 // Make sure the class isn't ever deallocated.
1957 Py_INCREF((PyObject *)cls);
1958
1959 struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ;
1960 PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK);
1961 if (xidregistry->head == NULL) {
1962 _register_builtins_for_crossinterpreter_data(xidregistry);
1963 }
1964 int res = _register_xidata(xidregistry, cls, getdata);
1965 PyThread_release_lock(xidregistry->mutex);
1966 return res;
1967 }
1968
1969 /* Cross-interpreter objects are looked up by exact match on the class.
1970 We can reassess this policy when we move from a global registry to a
1971 tp_* slot. */
1972
1973 crossinterpdatafunc
_PyCrossInterpreterData_Lookup(PyObject * obj)1974 _PyCrossInterpreterData_Lookup(PyObject *obj)
1975 {
1976 struct _xidregistry *xidregistry = &_PyRuntime.xidregistry ;
1977 PyObject *cls = PyObject_Type(obj);
1978 crossinterpdatafunc getdata = NULL;
1979 PyThread_acquire_lock(xidregistry->mutex, WAIT_LOCK);
1980 struct _xidregitem *cur = xidregistry->head;
1981 if (cur == NULL) {
1982 _register_builtins_for_crossinterpreter_data(xidregistry);
1983 cur = xidregistry->head;
1984 }
1985 for(; cur != NULL; cur = cur->next) {
1986 if (cur->cls == (PyTypeObject *)cls) {
1987 getdata = cur->getdata;
1988 break;
1989 }
1990 }
1991 Py_DECREF(cls);
1992 PyThread_release_lock(xidregistry->mutex);
1993 return getdata;
1994 }
1995
1996 /* cross-interpreter data for builtin types */
1997
1998 struct _shared_bytes_data {
1999 char *bytes;
2000 Py_ssize_t len;
2001 };
2002
2003 static PyObject *
_new_bytes_object(_PyCrossInterpreterData * data)2004 _new_bytes_object(_PyCrossInterpreterData *data)
2005 {
2006 struct _shared_bytes_data *shared = (struct _shared_bytes_data *)(data->data);
2007 return PyBytes_FromStringAndSize(shared->bytes, shared->len);
2008 }
2009
2010 static int
_bytes_shared(PyObject * obj,_PyCrossInterpreterData * data)2011 _bytes_shared(PyObject *obj, _PyCrossInterpreterData *data)
2012 {
2013 struct _shared_bytes_data *shared = PyMem_NEW(struct _shared_bytes_data, 1);
2014 if (PyBytes_AsStringAndSize(obj, &shared->bytes, &shared->len) < 0) {
2015 return -1;
2016 }
2017 data->data = (void *)shared;
2018 Py_INCREF(obj);
2019 data->obj = obj; // Will be "released" (decref'ed) when data released.
2020 data->new_object = _new_bytes_object;
2021 data->free = PyMem_Free;
2022 return 0;
2023 }
2024
2025 struct _shared_str_data {
2026 int kind;
2027 const void *buffer;
2028 Py_ssize_t len;
2029 };
2030
2031 static PyObject *
_new_str_object(_PyCrossInterpreterData * data)2032 _new_str_object(_PyCrossInterpreterData *data)
2033 {
2034 struct _shared_str_data *shared = (struct _shared_str_data *)(data->data);
2035 return PyUnicode_FromKindAndData(shared->kind, shared->buffer, shared->len);
2036 }
2037
2038 static int
_str_shared(PyObject * obj,_PyCrossInterpreterData * data)2039 _str_shared(PyObject *obj, _PyCrossInterpreterData *data)
2040 {
2041 struct _shared_str_data *shared = PyMem_NEW(struct _shared_str_data, 1);
2042 shared->kind = PyUnicode_KIND(obj);
2043 shared->buffer = PyUnicode_DATA(obj);
2044 shared->len = PyUnicode_GET_LENGTH(obj);
2045 data->data = (void *)shared;
2046 Py_INCREF(obj);
2047 data->obj = obj; // Will be "released" (decref'ed) when data released.
2048 data->new_object = _new_str_object;
2049 data->free = PyMem_Free;
2050 return 0;
2051 }
2052
2053 static PyObject *
_new_long_object(_PyCrossInterpreterData * data)2054 _new_long_object(_PyCrossInterpreterData *data)
2055 {
2056 return PyLong_FromSsize_t((Py_ssize_t)(data->data));
2057 }
2058
2059 static int
_long_shared(PyObject * obj,_PyCrossInterpreterData * data)2060 _long_shared(PyObject *obj, _PyCrossInterpreterData *data)
2061 {
2062 /* Note that this means the size of shareable ints is bounded by
2063 * sys.maxsize. Hence on 32-bit architectures that is half the
2064 * size of maximum shareable ints on 64-bit.
2065 */
2066 Py_ssize_t value = PyLong_AsSsize_t(obj);
2067 if (value == -1 && PyErr_Occurred()) {
2068 if (PyErr_ExceptionMatches(PyExc_OverflowError)) {
2069 PyErr_SetString(PyExc_OverflowError, "try sending as bytes");
2070 }
2071 return -1;
2072 }
2073 data->data = (void *)value;
2074 data->obj = NULL;
2075 data->new_object = _new_long_object;
2076 data->free = NULL;
2077 return 0;
2078 }
2079
2080 static PyObject *
_new_none_object(_PyCrossInterpreterData * data)2081 _new_none_object(_PyCrossInterpreterData *data)
2082 {
2083 // XXX Singleton refcounts are problematic across interpreters...
2084 Py_INCREF(Py_None);
2085 return Py_None;
2086 }
2087
2088 static int
_none_shared(PyObject * obj,_PyCrossInterpreterData * data)2089 _none_shared(PyObject *obj, _PyCrossInterpreterData *data)
2090 {
2091 data->data = NULL;
2092 // data->obj remains NULL
2093 data->new_object = _new_none_object;
2094 data->free = NULL; // There is nothing to free.
2095 return 0;
2096 }
2097
2098 static void
_register_builtins_for_crossinterpreter_data(struct _xidregistry * xidregistry)2099 _register_builtins_for_crossinterpreter_data(struct _xidregistry *xidregistry)
2100 {
2101 // None
2102 if (_register_xidata(xidregistry, (PyTypeObject *)PyObject_Type(Py_None), _none_shared) != 0) {
2103 Py_FatalError("could not register None for cross-interpreter sharing");
2104 }
2105
2106 // int
2107 if (_register_xidata(xidregistry, &PyLong_Type, _long_shared) != 0) {
2108 Py_FatalError("could not register int for cross-interpreter sharing");
2109 }
2110
2111 // bytes
2112 if (_register_xidata(xidregistry, &PyBytes_Type, _bytes_shared) != 0) {
2113 Py_FatalError("could not register bytes for cross-interpreter sharing");
2114 }
2115
2116 // str
2117 if (_register_xidata(xidregistry, &PyUnicode_Type, _str_shared) != 0) {
2118 Py_FatalError("could not register str for cross-interpreter sharing");
2119 }
2120 }
2121
2122
2123 _PyFrameEvalFunction
_PyInterpreterState_GetEvalFrameFunc(PyInterpreterState * interp)2124 _PyInterpreterState_GetEvalFrameFunc(PyInterpreterState *interp)
2125 {
2126 if (interp->eval_frame == NULL) {
2127 return _PyEval_EvalFrameDefault;
2128 }
2129 return interp->eval_frame;
2130 }
2131
2132
2133 void
_PyInterpreterState_SetEvalFrameFunc(PyInterpreterState * interp,_PyFrameEvalFunction eval_frame)2134 _PyInterpreterState_SetEvalFrameFunc(PyInterpreterState *interp,
2135 _PyFrameEvalFunction eval_frame)
2136 {
2137 if (eval_frame == _PyEval_EvalFrameDefault) {
2138 interp->eval_frame = NULL;
2139 }
2140 else {
2141 interp->eval_frame = eval_frame;
2142 }
2143 }
2144
2145
2146 const PyConfig*
_PyInterpreterState_GetConfig(PyInterpreterState * interp)2147 _PyInterpreterState_GetConfig(PyInterpreterState *interp)
2148 {
2149 return &interp->config;
2150 }
2151
2152
2153 int
_PyInterpreterState_GetConfigCopy(PyConfig * config)2154 _PyInterpreterState_GetConfigCopy(PyConfig *config)
2155 {
2156 PyInterpreterState *interp = PyInterpreterState_Get();
2157
2158 PyStatus status = _PyConfig_Copy(config, &interp->config);
2159 if (PyStatus_Exception(status)) {
2160 _PyErr_SetFromPyStatus(status);
2161 return -1;
2162 }
2163 return 0;
2164 }
2165
2166
2167 const PyConfig*
_Py_GetConfig(void)2168 _Py_GetConfig(void)
2169 {
2170 assert(PyGILState_Check());
2171 PyThreadState *tstate = _PyThreadState_GET();
2172 return _PyInterpreterState_GetConfig(tstate->interp);
2173 }
2174
2175 #define MINIMUM_OVERHEAD 1000
2176
2177 static PyObject **
push_chunk(PyThreadState * tstate,int size)2178 push_chunk(PyThreadState *tstate, int size)
2179 {
2180 int allocate_size = DATA_STACK_CHUNK_SIZE;
2181 while (allocate_size < (int)sizeof(PyObject*)*(size + MINIMUM_OVERHEAD)) {
2182 allocate_size *= 2;
2183 }
2184 _PyStackChunk *new = allocate_chunk(allocate_size, tstate->datastack_chunk);
2185 if (new == NULL) {
2186 return NULL;
2187 }
2188 if (tstate->datastack_chunk) {
2189 tstate->datastack_chunk->top = tstate->datastack_top -
2190 &tstate->datastack_chunk->data[0];
2191 }
2192 tstate->datastack_chunk = new;
2193 tstate->datastack_limit = (PyObject **)(((char *)new) + allocate_size);
2194 // When new is the "root" chunk (i.e. new->previous == NULL), we can keep
2195 // _PyThreadState_PopFrame from freeing it later by "skipping" over the
2196 // first element:
2197 PyObject **res = &new->data[new->previous == NULL];
2198 tstate->datastack_top = res + size;
2199 return res;
2200 }
2201
2202 _PyInterpreterFrame *
_PyThreadState_BumpFramePointerSlow(PyThreadState * tstate,size_t size)2203 _PyThreadState_BumpFramePointerSlow(PyThreadState *tstate, size_t size)
2204 {
2205 if (_PyThreadState_HasStackSpace(tstate, size)) {
2206 _PyInterpreterFrame *res = (_PyInterpreterFrame *)tstate->datastack_top;
2207 tstate->datastack_top += size;
2208 return res;
2209 }
2210 if (size > INT_MAX/2) {
2211 PyErr_NoMemory();
2212 return NULL;
2213 }
2214 return (_PyInterpreterFrame *)push_chunk(tstate, (int)size);
2215 }
2216
2217 void
_PyThreadState_PopFrame(PyThreadState * tstate,_PyInterpreterFrame * frame)2218 _PyThreadState_PopFrame(PyThreadState *tstate, _PyInterpreterFrame * frame)
2219 {
2220 assert(tstate->datastack_chunk);
2221 PyObject **base = (PyObject **)frame;
2222 if (base == &tstate->datastack_chunk->data[0]) {
2223 _PyStackChunk *chunk = tstate->datastack_chunk;
2224 _PyStackChunk *previous = chunk->previous;
2225 // push_chunk ensures that the root chunk is never popped:
2226 assert(previous);
2227 tstate->datastack_top = &previous->data[previous->top];
2228 tstate->datastack_chunk = previous;
2229 _PyObject_VirtualFree(chunk, chunk->size);
2230 tstate->datastack_limit = (PyObject **)(((char *)previous) + previous->size);
2231 }
2232 else {
2233 assert(tstate->datastack_top);
2234 assert(tstate->datastack_top >= base);
2235 tstate->datastack_top = base;
2236 }
2237 }
2238
2239
2240 #ifdef __cplusplus
2241 }
2242 #endif
2243