2021-04-21 17:49:51 +00:00
|
|
|
/* Completion of TCB initialization after TLS_INIT_TP. NPTL version.
|
2025-01-01 18:14:45 +00:00
|
|
|
Copyright (C) 2020-2025 Free Software Foundation, Inc.
|
2021-04-21 17:49:51 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <kernel-features.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <ldsodefs.h>
|
|
|
|
#include <list.h>
|
2021-06-22 07:50:27 +00:00
|
|
|
#include <pthreadP.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
#include <tls.h>
|
2021-12-09 08:49:32 +00:00
|
|
|
#include <rseq-internal.h>
|
2021-12-09 08:49:32 +00:00
|
|
|
#include <thread_pointer.h>
|
2021-04-21 17:49:51 +00:00
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
#define TUNABLE_NAMESPACE pthread
|
|
|
|
#include <dl-tunables.h>
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
2021-07-09 18:09:14 +00:00
|
|
|
bool __nptl_set_robust_list_avail;
|
2021-04-21 17:49:51 +00:00
|
|
|
rtld_hidden_data_def (__nptl_set_robust_list_avail)
|
|
|
|
#endif
|
|
|
|
|
2021-07-09 18:09:14 +00:00
|
|
|
bool __nptl_initial_report_events;
|
2021-05-17 07:59:14 +00:00
|
|
|
rtld_hidden_def (__nptl_initial_report_events)
|
|
|
|
|
2021-05-10 08:31:41 +00:00
|
|
|
#ifdef SHARED
|
|
|
|
/* Dummy implementation. See __rtld_mutex_init. */
|
|
|
|
static int
|
|
|
|
rtld_mutex_dummy (pthread_mutex_t *lock)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
const unsigned int __rseq_flags;
|
2024-07-03 16:35:34 +00:00
|
|
|
|
2024-07-10 19:37:28 +00:00
|
|
|
size_t _rseq_align attribute_hidden;
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
void
|
2021-05-10 08:31:41 +00:00
|
|
|
__tls_pre_init_tp (void)
|
2021-04-21 17:49:51 +00:00
|
|
|
{
|
2021-05-10 08:31:41 +00:00
|
|
|
/* The list data structures are not consistent until
|
|
|
|
initialized. */
|
2021-04-21 17:49:51 +00:00
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_used));
|
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_user));
|
2021-05-10 08:31:41 +00:00
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_cache));
|
2021-05-10 08:31:41 +00:00
|
|
|
|
|
|
|
#ifdef SHARED
|
|
|
|
___rtld_mutex_lock = rtld_mutex_dummy;
|
|
|
|
___rtld_mutex_unlock = rtld_mutex_dummy;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
__tls_init_tp (void)
|
|
|
|
{
|
2021-12-09 08:49:32 +00:00
|
|
|
struct pthread *pd = THREAD_SELF;
|
|
|
|
|
2021-05-10 08:31:41 +00:00
|
|
|
/* Set up thread stack list management. */
|
2021-12-09 08:49:32 +00:00
|
|
|
list_add (&pd->list, &GL (dl_stack_user));
|
2021-04-21 17:49:51 +00:00
|
|
|
|
|
|
|
/* Early initialization of the TCB. */
|
|
|
|
pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid);
|
|
|
|
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
|
nptl: Add support for setup guard pages with MADV_GUARD_INSTALL
Linux 6.13 (662df3e5c3766) added a lightweight way to define guard areas
through madvise syscall. Instead of PROT_NONE the guard region through
mprotect, userland can madvise the same area with a special flag, and
the kernel ensures that accessing the area will trigger a SIGSEGV (as for
PROT_NONE mapping).
The madvise way has the advantage of less kernel memory consumption for
the process page-table (one less VMA per guard area), and slightly less
contention on kernel (also due to the fewer VMA areas being tracked).
The pthread_create allocates a new thread stack in two ways: if a guard
area is set (the default) it allocates the memory range required using
PROT_NONE and then mprotect the usable stack area. Otherwise, if a
guard page is not set it allocates the region with the required flags.
For the MADV_GUARD_INSTALL support, the stack area region is allocated
with required flags and then the guard region is installed. If the
kernel does not support it, the usual way is used instead (and
MADV_GUARD_INSTALL is disabled for future stack creations).
The stack allocation strategy is recorded on the pthread struct, and it
is used in case the guard region needs to be resized. To avoid needing
an extra field, the 'user_stack' is repurposed and renamed to 'stack_mode'.
This patch also adds a proper test for the pthread guard.
I checked on x86_64, aarch64, powerpc64le, and hppa with kernel 6.13.0-rc7.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-01-08 18:16:48 +00:00
|
|
|
THREAD_SETMEM (pd, stack_mode, ALLOCATE_GUARD_USER);
|
2021-04-21 17:49:51 +00:00
|
|
|
|
2021-05-17 07:59:14 +00:00
|
|
|
/* Before initializing GL (dl_stack_user), the debugger could not
|
|
|
|
find us and had to set __nptl_initial_report_events. Propagate
|
|
|
|
its setting. */
|
|
|
|
THREAD_SETMEM (pd, report_events, __nptl_initial_report_events);
|
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
/* Initialize the robust mutex data. */
|
|
|
|
{
|
|
|
|
#if __PTHREAD_MUTEX_HAVE_PREV
|
|
|
|
pd->robust_prev = &pd->robust_head;
|
|
|
|
#endif
|
|
|
|
pd->robust_head.list = &pd->robust_head;
|
|
|
|
pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
|
|
|
|
- offsetof (pthread_mutex_t,
|
|
|
|
__data.__list.__next));
|
|
|
|
int res = INTERNAL_SYSCALL_CALL (set_robust_list, &pd->robust_head,
|
|
|
|
sizeof (struct robust_list_head));
|
|
|
|
if (!INTERNAL_SYSCALL_ERROR_P (res))
|
|
|
|
{
|
|
|
|
#ifndef __ASSUME_SET_ROBUST_LIST
|
|
|
|
__nptl_set_robust_list_avail = true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-09 08:49:32 +00:00
|
|
|
{
|
2024-07-10 19:37:28 +00:00
|
|
|
/* If the registration fails or is disabled by tunable, the public
|
|
|
|
'__rseq_size' will be set to '0' regardless of the feature size of the
|
|
|
|
allocated rseq area. An rseq area of at least 32 bytes is always
|
|
|
|
allocated since application code is allowed to check the status of the
|
|
|
|
rseq registration by reading the content of the 'cpu_id' field. */
|
|
|
|
bool do_rseq = TUNABLE_GET (rseq, int, NULL);
|
|
|
|
if (!rseq_register_current_thread (pd, do_rseq))
|
|
|
|
_rseq_size = 0;
|
2021-12-09 08:49:32 +00:00
|
|
|
}
|
2021-12-09 08:49:32 +00:00
|
|
|
|
2021-04-21 17:49:51 +00:00
|
|
|
/* Set initial thread's stack block from 0 up to __libc_stack_end.
|
|
|
|
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
|
|
|
purposes this is good enough. */
|
|
|
|
THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
|
2021-04-21 17:49:51 +00:00
|
|
|
}
|