2021-05-11 09:08:00 +00:00
|
|
|
/* Stack cache management for NPTL.
|
2025-01-01 18:14:45 +00:00
|
|
|
Copyright (C) 2002-2025 Free Software Foundation, Inc.
|
2021-05-11 09:08:00 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
|
|
|
|
|
|
#include <nptl-stack.h>
|
|
|
|
|
#include <ldsodefs.h>
|
2021-05-21 20:35:00 +00:00
|
|
|
#include <pthreadP.h>
|
2021-05-11 09:08:00 +00:00
|
|
|
|
2021-06-28 13:48:58 +00:00
|
|
|
size_t __nptl_stack_cache_maxsize = 40 * 1024 * 1024;
|
2023-04-14 15:12:20 +00:00
|
|
|
int32_t __nptl_stack_hugetlb = 1;
|
2021-05-11 09:08:00 +00:00
|
|
|
|
|
|
|
|
void
|
|
|
|
|
__nptl_stack_list_del (list_t *elem)
|
|
|
|
|
{
|
|
|
|
|
GL (dl_in_flight_stack) = (uintptr_t) elem;
|
|
|
|
|
|
|
|
|
|
atomic_write_barrier ();
|
|
|
|
|
|
|
|
|
|
list_del (elem);
|
|
|
|
|
|
|
|
|
|
atomic_write_barrier ();
|
|
|
|
|
|
|
|
|
|
GL (dl_in_flight_stack) = 0;
|
|
|
|
|
}
|
|
|
|
|
libc_hidden_def (__nptl_stack_list_del)
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
__nptl_stack_list_add (list_t *elem, list_t *list)
|
|
|
|
|
{
|
|
|
|
|
GL (dl_in_flight_stack) = (uintptr_t) elem | 1;
|
|
|
|
|
|
|
|
|
|
atomic_write_barrier ();
|
|
|
|
|
|
|
|
|
|
list_add (elem, list);
|
|
|
|
|
|
|
|
|
|
atomic_write_barrier ();
|
|
|
|
|
|
|
|
|
|
GL (dl_in_flight_stack) = 0;
|
|
|
|
|
}
|
|
|
|
|
libc_hidden_def (__nptl_stack_list_add)
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
__nptl_free_stacks (size_t limit)
|
|
|
|
|
{
|
|
|
|
|
/* We reduce the size of the cache. Remove the last entries until
|
|
|
|
|
the size is below the limit. */
|
|
|
|
|
list_t *entry;
|
|
|
|
|
list_t *prev;
|
|
|
|
|
|
|
|
|
|
/* Search from the end of the list. */
|
|
|
|
|
list_for_each_prev_safe (entry, prev, &GL (dl_stack_cache))
|
|
|
|
|
{
|
|
|
|
|
struct pthread *curr;
|
|
|
|
|
|
|
|
|
|
curr = list_entry (entry, struct pthread, list);
|
|
|
|
|
if (__nptl_stack_in_use (curr))
|
|
|
|
|
{
|
|
|
|
|
/* Unlink the block. */
|
|
|
|
|
__nptl_stack_list_del (entry);
|
|
|
|
|
|
|
|
|
|
/* Account for the freed memory. */
|
|
|
|
|
GL (dl_stack_cache_actsize) -= curr->stackblock_size;
|
|
|
|
|
|
tls: Add debug logging for TLS and TCB management
Introduce the `DL_DEBUG_TLS` debug mask to enable detailed logging for
Thread-Local Storage (TLS) and Thread Control Block (TCB) management.
This change integrates a new `tls` option into the `LD_DEBUG`
environment variable, allowing developers to trace:
- TCB allocation, deallocation, and reuse events in `dl-tls.c`,
`nptl/allocatestack.c`, and `nptl/nptl-stack.c`.
- Thread startup events, including the TID and TCB address, in
`nptl/pthread_create.c`.
A new test, `tst-dl-debug-tid`, has been added to validate the
functionality of this new debug logging, ensuring that relevant messages
are correctly generated for both main and worker threads.
This enhances the debugging capabilities for diagnosing issues related
to TLS allocation and thread lifecycle within the dynamic linker.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-09-05 14:14:38 +00:00
|
|
|
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_TLS))
|
|
|
|
|
GLRO (dl_debug_printf) (
|
|
|
|
|
"TCB cache full, deallocating: TID=%ld, TCB=0x%lx\n",
|
|
|
|
|
(long int) curr->tid, (unsigned long int) curr);
|
|
|
|
|
|
2021-05-11 09:08:00 +00:00
|
|
|
/* Free the memory associated with the ELF TLS. */
|
|
|
|
|
_dl_deallocate_tls (TLS_TPADJ (curr), false);
|
|
|
|
|
|
|
|
|
|
/* Remove this block. This should never fail. If it does
|
|
|
|
|
something is really wrong. */
|
|
|
|
|
if (__munmap (curr->stackblock, curr->stackblock_size) != 0)
|
|
|
|
|
abort ();
|
|
|
|
|
|
|
|
|
|
/* Maybe we have freed enough. */
|
|
|
|
|
if (GL (dl_stack_cache_actsize) <= limit)
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Add a stack frame which is not used anymore to the stack. Must be
|
|
|
|
|
called with the cache lock held. */
|
|
|
|
|
static inline void
|
|
|
|
|
__attribute ((always_inline))
|
|
|
|
|
queue_stack (struct pthread *stack)
|
|
|
|
|
{
|
tls: Add debug logging for TLS and TCB management
Introduce the `DL_DEBUG_TLS` debug mask to enable detailed logging for
Thread-Local Storage (TLS) and Thread Control Block (TCB) management.
This change integrates a new `tls` option into the `LD_DEBUG`
environment variable, allowing developers to trace:
- TCB allocation, deallocation, and reuse events in `dl-tls.c`,
`nptl/allocatestack.c`, and `nptl/nptl-stack.c`.
- Thread startup events, including the TID and TCB address, in
`nptl/pthread_create.c`.
A new test, `tst-dl-debug-tid`, has been added to validate the
functionality of this new debug logging, ensuring that relevant messages
are correctly generated for both main and worker threads.
This enhances the debugging capabilities for diagnosing issues related
to TLS allocation and thread lifecycle within the dynamic linker.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-09-05 14:14:38 +00:00
|
|
|
/* The 'stack' parameter is a pointer to the TCB (struct pthread),
|
|
|
|
|
not just the stack. */
|
|
|
|
|
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_TLS))
|
|
|
|
|
GLRO (dl_debug_printf) ("TCB deallocated into cache: TID=%ld, TCB=0x%lx\n",
|
|
|
|
|
(long int) stack->tid, (unsigned long int) stack);
|
|
|
|
|
|
2021-05-11 09:08:00 +00:00
|
|
|
/* We unconditionally add the stack to the list. The memory may
|
|
|
|
|
still be in use but it will not be reused until the kernel marks
|
|
|
|
|
the stack as not used anymore. */
|
|
|
|
|
__nptl_stack_list_add (&stack->list, &GL (dl_stack_cache));
|
|
|
|
|
|
|
|
|
|
GL (dl_stack_cache_actsize) += stack->stackblock_size;
|
2021-06-28 13:48:58 +00:00
|
|
|
if (__glibc_unlikely (GL (dl_stack_cache_actsize)
|
|
|
|
|
> __nptl_stack_cache_maxsize))
|
|
|
|
|
__nptl_free_stacks (__nptl_stack_cache_maxsize);
|
2021-05-11 09:08:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
__nptl_deallocate_stack (struct pthread *pd)
|
|
|
|
|
{
|
|
|
|
|
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
|
|
|
|
|
|
|
|
|
|
/* Remove the thread from the list of threads with user defined
|
|
|
|
|
stacks. */
|
|
|
|
|
__nptl_stack_list_del (&pd->list);
|
|
|
|
|
|
|
|
|
|
/* Not much to do. Just free the mmap()ed memory. Note that we do
|
|
|
|
|
not reset the 'used' flag in the 'tid' field. This is done by
|
|
|
|
|
the kernel. If no thread has been created yet this field is
|
|
|
|
|
still zero. */
|
nptl: Add support for setup guard pages with MADV_GUARD_INSTALL
Linux 6.13 (662df3e5c3766) added a lightweight way to define guard areas
through madvise syscall. Instead of PROT_NONE the guard region through
mprotect, userland can madvise the same area with a special flag, and
the kernel ensures that accessing the area will trigger a SIGSEGV (as for
PROT_NONE mapping).
The madvise way has the advantage of less kernel memory consumption for
the process page-table (one less VMA per guard area), and slightly less
contention on kernel (also due to the fewer VMA areas being tracked).
The pthread_create allocates a new thread stack in two ways: if a guard
area is set (the default) it allocates the memory range required using
PROT_NONE and then mprotect the usable stack area. Otherwise, if a
guard page is not set it allocates the region with the required flags.
For the MADV_GUARD_INSTALL support, the stack area region is allocated
with required flags and then the guard region is installed. If the
kernel does not support it, the usual way is used instead (and
MADV_GUARD_INSTALL is disabled for future stack creations).
The stack allocation strategy is recorded on the pthread struct, and it
is used in case the guard region needs to be resized. To avoid needing
an extra field, the 'user_stack' is repurposed and renamed to 'stack_mode'.
This patch also adds a proper test for the pthread guard.
I checked on x86_64, aarch64, powerpc64le, and hppa with kernel 6.13.0-rc7.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-01-08 18:16:48 +00:00
|
|
|
if (__glibc_likely (pd->stack_mode != ALLOCATE_GUARD_USER))
|
2021-05-11 09:08:00 +00:00
|
|
|
(void) queue_stack (pd);
|
|
|
|
|
else
|
tls: Add debug logging for TLS and TCB management
Introduce the `DL_DEBUG_TLS` debug mask to enable detailed logging for
Thread-Local Storage (TLS) and Thread Control Block (TCB) management.
This change integrates a new `tls` option into the `LD_DEBUG`
environment variable, allowing developers to trace:
- TCB allocation, deallocation, and reuse events in `dl-tls.c`,
`nptl/allocatestack.c`, and `nptl/nptl-stack.c`.
- Thread startup events, including the TID and TCB address, in
`nptl/pthread_create.c`.
A new test, `tst-dl-debug-tid`, has been added to validate the
functionality of this new debug logging, ensuring that relevant messages
are correctly generated for both main and worker threads.
This enhances the debugging capabilities for diagnosing issues related
to TLS allocation and thread lifecycle within the dynamic linker.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-09-05 14:14:38 +00:00
|
|
|
{
|
|
|
|
|
/* User-provided stack. We must not free it. But we must free
|
|
|
|
|
the TLS memory. */
|
|
|
|
|
if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_TLS))
|
|
|
|
|
GLRO (dl_debug_printf) (
|
|
|
|
|
"TCB for user-supplied stack deallocated: TID=%ld, TCB=0x%lx\n",
|
|
|
|
|
(long int) pd->tid, (unsigned long int) pd);
|
|
|
|
|
/* Free the memory associated with the ELF TLS. */
|
|
|
|
|
_dl_deallocate_tls (TLS_TPADJ (pd), false);
|
|
|
|
|
}
|
2021-05-11 09:08:00 +00:00
|
|
|
|
|
|
|
|
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
|
|
|
|
|
}
|
|
|
|
|
libc_hidden_def (__nptl_deallocate_stack)
|
2021-05-21 20:35:00 +00:00
|
|
|
|
|
|
|
|
/* This function is internal (it has a GLIBC_PRIVATE) version, but it
|
|
|
|
|
is widely used (either via weak symbol, or dlsym) to obtain the
|
|
|
|
|
__static_tls_size value. This value is then used to adjust the
|
|
|
|
|
value of the stack size attribute, so that applications receive the
|
|
|
|
|
full requested stack size, not diminished by the TCB and static TLS
|
|
|
|
|
allocation on the stack. Once the TCB is separately allocated,
|
|
|
|
|
this function should be removed or renamed (if it is still
|
|
|
|
|
necessary at that point). */
|
|
|
|
|
size_t
|
|
|
|
|
__pthread_get_minstack (const pthread_attr_t *attr)
|
|
|
|
|
{
|
|
|
|
|
return (GLRO(dl_pagesize) + __nptl_tls_static_size_for_stack ()
|
|
|
|
|
+ PTHREAD_STACK_MIN);
|
|
|
|
|
}
|
|
|
|
|
libc_hidden_def (__pthread_get_minstack)
|