2021-01-18 18:18:13 +00:00
|
|
|
/* System specific fork hooks. Linux version.
|
2025-01-01 18:14:45 +00:00
|
|
|
Copyright (C) 2021-2025 Free Software Foundation, Inc.
|
2021-01-18 18:18:13 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
|
|
modify it under the terms of the GNU Lesser General Public
|
|
|
|
License as published by the Free Software Foundation; either
|
|
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
|
|
License along with the GNU C Library; if not, see
|
|
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
|
|
|
|
#ifndef _FORK_H
|
|
|
|
#define _FORK_H
|
|
|
|
|
|
|
|
#include <assert.h>
|
2021-06-25 08:51:31 +00:00
|
|
|
#include <kernel-posix-timers.h>
|
2021-01-18 18:18:13 +00:00
|
|
|
#include <ldsodefs.h>
|
|
|
|
#include <list.h>
|
2021-06-25 08:42:52 +00:00
|
|
|
#include <mqueue.h>
|
2021-01-18 18:18:13 +00:00
|
|
|
#include <pthreadP.h>
|
|
|
|
#include <sysdep.h>
|
linux: Add support for getrandom vDSO
Linux 6.11 has getrandom() in vDSO. It operates on a thread-local opaque
state allocated with mmap using flags specified by the vDSO.
Multiple states are allocated at once, as many as fit into a page, and
these are held in an array of available states to be doled out to each
thread upon first use, and recycled when a thread terminates. As these
states run low, more are allocated.
To make this procedure async-signal-safe, a simple guard is used in the
LSB of the opaque state address, falling back to the syscall if there's
reentrancy contention.
Also, _Fork() is handled by blocking signals on opaque state allocation
(so _Fork() always sees a consistent state even if it interrupts a
getrandom() call) and by iterating over the thread stack cache on
reclaim_stack. Each opaque state will be in the free states list
(grnd_alloc.states) or allocated to a running thread.
The cancellation is handled by always using GRND_NONBLOCK flags while
calling the vDSO, and falling back to the cancellable syscall if the
kernel returns EAGAIN (would block). Since getrandom is not defined by
POSIX and cancellation is supported as an extension, the cancellation is
handled as 'may occur' instead of 'shall occur' [1], meaning that if
vDSO does not block (the expected behavior) getrandom will not act as a
cancellation entrypoint. It avoids a pthread_testcancel call on the fast
path (different than 'shall occur' functions, like sem_wait()).
It is currently enabled for x86_64, which is available in Linux 6.11,
and aarch64, powerpc32, powerpc64, loongarch64, and s390x, which are
available in Linux 6.12.
Link: https://pubs.opengroup.org/onlinepubs/9799919799/nframe.html [1]
Co-developed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Tested-by: Jason A. Donenfeld <Jason@zx2c4.com> # x86_64
Tested-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> # x86_64, aarch64
Tested-by: Xi Ruoyao <xry111@xry111.site> # x86_64, aarch64, loongarch64
Tested-by: Stefan Liebler <stli@linux.ibm.com> # s390x
2024-09-18 14:01:22 +00:00
|
|
|
#include <getrandom-internal.h>
|
2021-01-18 18:18:13 +00:00
|
|
|
|
|
|
|
static inline void
|
|
|
|
fork_system_setup (void)
|
|
|
|
{
|
|
|
|
/* See __pthread_once. */
|
|
|
|
__fork_generation += __PTHREAD_ONCE_FORK_GEN_INCR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fork_system_setup_after_fork (void)
|
|
|
|
{
|
|
|
|
/* There is one thread running. */
|
|
|
|
__nptl_nthreads = 1;
|
|
|
|
|
|
|
|
/* Initialize thread library locks. */
|
|
|
|
GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER;
|
|
|
|
__default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
|
2021-06-25 08:42:52 +00:00
|
|
|
|
|
|
|
call_function_static_weak (__mq_notify_fork_subprocess);
|
2021-06-25 08:51:31 +00:00
|
|
|
call_function_static_weak (__timer_fork_subprocess);
|
linux: Add support for getrandom vDSO
Linux 6.11 has getrandom() in vDSO. It operates on a thread-local opaque
state allocated with mmap using flags specified by the vDSO.
Multiple states are allocated at once, as many as fit into a page, and
these are held in an array of available states to be doled out to each
thread upon first use, and recycled when a thread terminates. As these
states run low, more are allocated.
To make this procedure async-signal-safe, a simple guard is used in the
LSB of the opaque state address, falling back to the syscall if there's
reentrancy contention.
Also, _Fork() is handled by blocking signals on opaque state allocation
(so _Fork() always sees a consistent state even if it interrupts a
getrandom() call) and by iterating over the thread stack cache on
reclaim_stack. Each opaque state will be in the free states list
(grnd_alloc.states) or allocated to a running thread.
The cancellation is handled by always using GRND_NONBLOCK flags while
calling the vDSO, and falling back to the cancellable syscall if the
kernel returns EAGAIN (would block). Since getrandom is not defined by
POSIX and cancellation is supported as an extension, the cancellation is
handled as 'may occur' instead of 'shall occur' [1], meaning that if
vDSO does not block (the expected behavior) getrandom will not act as a
cancellation entrypoint. It avoids a pthread_testcancel call on the fast
path (different than 'shall occur' functions, like sem_wait()).
It is currently enabled for x86_64, which is available in Linux 6.11,
and aarch64, powerpc32, powerpc64, loongarch64, and s390x, which are
available in Linux 6.12.
Link: https://pubs.opengroup.org/onlinepubs/9799919799/nframe.html [1]
Co-developed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Tested-by: Jason A. Donenfeld <Jason@zx2c4.com> # x86_64
Tested-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> # x86_64, aarch64
Tested-by: Xi Ruoyao <xry111@xry111.site> # x86_64, aarch64, loongarch64
Tested-by: Stefan Liebler <stli@linux.ibm.com> # s390x
2024-09-18 14:01:22 +00:00
|
|
|
call_function_static_weak (__getrandom_fork_subprocess);
|
2021-01-18 18:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* In case of a fork() call the memory allocation in the child will be
|
|
|
|
the same but only one thread is running. All stacks except that of
|
|
|
|
the one running thread are not used anymore. We have to recycle
|
|
|
|
them. */
|
|
|
|
static void
|
|
|
|
reclaim_stacks (void)
|
|
|
|
{
|
|
|
|
struct pthread *self = (struct pthread *) THREAD_SELF;
|
|
|
|
|
|
|
|
/* No locking necessary. The caller is the only stack in use. But
|
|
|
|
we have to be aware that we might have interrupted a list
|
|
|
|
operation. */
|
|
|
|
|
|
|
|
if (GL (dl_in_flight_stack) != 0)
|
|
|
|
{
|
|
|
|
bool add_p = GL (dl_in_flight_stack) & 1;
|
|
|
|
list_t *elem = (list_t *) (GL (dl_in_flight_stack) & ~(uintptr_t) 1);
|
|
|
|
|
|
|
|
if (add_p)
|
|
|
|
{
|
|
|
|
/* We always add at the beginning of the list. So in this case we
|
|
|
|
only need to check the beginning of these lists to see if the
|
|
|
|
pointers at the head of the list are inconsistent. */
|
|
|
|
list_t *l = NULL;
|
|
|
|
|
|
|
|
if (GL (dl_stack_used).next->prev != &GL (dl_stack_used))
|
|
|
|
l = &GL (dl_stack_used);
|
|
|
|
else if (GL (dl_stack_cache).next->prev != &GL (dl_stack_cache))
|
|
|
|
l = &GL (dl_stack_cache);
|
|
|
|
|
|
|
|
if (l != NULL)
|
|
|
|
{
|
|
|
|
assert (l->next->prev == elem);
|
|
|
|
elem->next = l->next;
|
|
|
|
elem->prev = l;
|
|
|
|
l->next = elem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* We can simply always replay the delete operation. */
|
|
|
|
elem->next->prev = elem->prev;
|
|
|
|
elem->prev->next = elem->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
GL (dl_in_flight_stack) = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mark all stacks except the still running one as free. */
|
|
|
|
list_t *runp;
|
|
|
|
list_for_each (runp, &GL (dl_stack_used))
|
|
|
|
{
|
|
|
|
struct pthread *curp = list_entry (runp, struct pthread, list);
|
|
|
|
if (curp != self)
|
|
|
|
{
|
|
|
|
/* This marks the stack as free. */
|
|
|
|
curp->tid = 0;
|
|
|
|
|
|
|
|
/* Account for the size of the stack. */
|
|
|
|
GL (dl_stack_cache_actsize) += curp->stackblock_size;
|
|
|
|
|
|
|
|
if (curp->specific_used)
|
|
|
|
{
|
|
|
|
/* Clear the thread-specific data. */
|
|
|
|
memset (curp->specific_1stblock, '\0',
|
|
|
|
sizeof (curp->specific_1stblock));
|
|
|
|
|
|
|
|
curp->specific_used = false;
|
|
|
|
|
|
|
|
for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
|
|
|
|
if (curp->specific[cnt] != NULL)
|
|
|
|
{
|
|
|
|
memset (curp->specific[cnt], '\0',
|
|
|
|
sizeof (curp->specific_1stblock));
|
|
|
|
|
|
|
|
/* We have allocated the block which we do not
|
|
|
|
free here so re-set the bit. */
|
|
|
|
curp->specific_used = true;
|
|
|
|
}
|
|
|
|
}
|
linux: Add support for getrandom vDSO
Linux 6.11 has getrandom() in vDSO. It operates on a thread-local opaque
state allocated with mmap using flags specified by the vDSO.
Multiple states are allocated at once, as many as fit into a page, and
these are held in an array of available states to be doled out to each
thread upon first use, and recycled when a thread terminates. As these
states run low, more are allocated.
To make this procedure async-signal-safe, a simple guard is used in the
LSB of the opaque state address, falling back to the syscall if there's
reentrancy contention.
Also, _Fork() is handled by blocking signals on opaque state allocation
(so _Fork() always sees a consistent state even if it interrupts a
getrandom() call) and by iterating over the thread stack cache on
reclaim_stack. Each opaque state will be in the free states list
(grnd_alloc.states) or allocated to a running thread.
The cancellation is handled by always using GRND_NONBLOCK flags while
calling the vDSO, and falling back to the cancellable syscall if the
kernel returns EAGAIN (would block). Since getrandom is not defined by
POSIX and cancellation is supported as an extension, the cancellation is
handled as 'may occur' instead of 'shall occur' [1], meaning that if
vDSO does not block (the expected behavior) getrandom will not act as a
cancellation entrypoint. It avoids a pthread_testcancel call on the fast
path (different than 'shall occur' functions, like sem_wait()).
It is currently enabled for x86_64, which is available in Linux 6.11,
and aarch64, powerpc32, powerpc64, loongarch64, and s390x, which are
available in Linux 6.12.
Link: https://pubs.opengroup.org/onlinepubs/9799919799/nframe.html [1]
Co-developed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Tested-by: Jason A. Donenfeld <Jason@zx2c4.com> # x86_64
Tested-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> # x86_64, aarch64
Tested-by: Xi Ruoyao <xry111@xry111.site> # x86_64, aarch64, loongarch64
Tested-by: Stefan Liebler <stli@linux.ibm.com> # s390x
2024-09-18 14:01:22 +00:00
|
|
|
|
|
|
|
call_function_static_weak (__getrandom_reset_state, curp);
|
2021-01-18 18:18:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
linux: Add support for getrandom vDSO
Linux 6.11 has getrandom() in vDSO. It operates on a thread-local opaque
state allocated with mmap using flags specified by the vDSO.
Multiple states are allocated at once, as many as fit into a page, and
these are held in an array of available states to be doled out to each
thread upon first use, and recycled when a thread terminates. As these
states run low, more are allocated.
To make this procedure async-signal-safe, a simple guard is used in the
LSB of the opaque state address, falling back to the syscall if there's
reentrancy contention.
Also, _Fork() is handled by blocking signals on opaque state allocation
(so _Fork() always sees a consistent state even if it interrupts a
getrandom() call) and by iterating over the thread stack cache on
reclaim_stack. Each opaque state will be in the free states list
(grnd_alloc.states) or allocated to a running thread.
The cancellation is handled by always using GRND_NONBLOCK flags while
calling the vDSO, and falling back to the cancellable syscall if the
kernel returns EAGAIN (would block). Since getrandom is not defined by
POSIX and cancellation is supported as an extension, the cancellation is
handled as 'may occur' instead of 'shall occur' [1], meaning that if
vDSO does not block (the expected behavior) getrandom will not act as a
cancellation entrypoint. It avoids a pthread_testcancel call on the fast
path (different than 'shall occur' functions, like sem_wait()).
It is currently enabled for x86_64, which is available in Linux 6.11,
and aarch64, powerpc32, powerpc64, loongarch64, and s390x, which are
available in Linux 6.12.
Link: https://pubs.opengroup.org/onlinepubs/9799919799/nframe.html [1]
Co-developed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Tested-by: Jason A. Donenfeld <Jason@zx2c4.com> # x86_64
Tested-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> # x86_64, aarch64
Tested-by: Xi Ruoyao <xry111@xry111.site> # x86_64, aarch64, loongarch64
Tested-by: Stefan Liebler <stli@linux.ibm.com> # s390x
2024-09-18 14:01:22 +00:00
|
|
|
/* Also reset stale getrandom states for user stack threads. */
|
|
|
|
list_for_each (runp, &GL (dl_stack_user))
|
|
|
|
{
|
|
|
|
struct pthread *curp = list_entry (runp, struct pthread, list);
|
|
|
|
if (curp != self)
|
|
|
|
call_function_static_weak (__getrandom_reset_state, curp);
|
|
|
|
}
|
|
|
|
|
2021-01-18 18:18:13 +00:00
|
|
|
/* Add the stack of all running threads to the cache. */
|
|
|
|
list_splice (&GL (dl_stack_used), &GL (dl_stack_cache));
|
|
|
|
|
|
|
|
/* Remove the entry for the current thread to from the cache list
|
|
|
|
and add it to the list of running threads. Which of the two
|
|
|
|
lists is decided by the user_stack flag. */
|
|
|
|
list_del (&self->list);
|
|
|
|
|
|
|
|
/* Re-initialize the lists for all the threads. */
|
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_used));
|
|
|
|
INIT_LIST_HEAD (&GL (dl_stack_user));
|
|
|
|
|
nptl: Add support for setup guard pages with MADV_GUARD_INSTALL
Linux 6.13 (662df3e5c3766) added a lightweight way to define guard areas
through madvise syscall. Instead of PROT_NONE the guard region through
mprotect, userland can madvise the same area with a special flag, and
the kernel ensures that accessing the area will trigger a SIGSEGV (as for
PROT_NONE mapping).
The madvise way has the advantage of less kernel memory consumption for
the process page-table (one less VMA per guard area), and slightly less
contention on kernel (also due to the fewer VMA areas being tracked).
The pthread_create allocates a new thread stack in two ways: if a guard
area is set (the default) it allocates the memory range required using
PROT_NONE and then mprotect the usable stack area. Otherwise, if a
guard page is not set it allocates the region with the required flags.
For the MADV_GUARD_INSTALL support, the stack area region is allocated
with required flags and then the guard region is installed. If the
kernel does not support it, the usual way is used instead (and
MADV_GUARD_INSTALL is disabled for future stack creations).
The stack allocation strategy is recorded on the pthread struct, and it
is used in case the guard region needs to be resized. To avoid needing
an extra field, the 'user_stack' is repurposed and renamed to 'stack_mode'.
This patch also adds a proper test for the pthread guard.
I checked on x86_64, aarch64, powerpc64le, and hppa with kernel 6.13.0-rc7.
Reviewed-by: DJ Delorie <dj@redhat.com>
2025-01-08 18:16:48 +00:00
|
|
|
if (__glibc_unlikely (THREAD_GETMEM (self, stack_mode)
|
|
|
|
== ALLOCATE_GUARD_USER))
|
2021-01-18 18:18:13 +00:00
|
|
|
list_add (&self->list, &GL (dl_stack_user));
|
|
|
|
else
|
|
|
|
list_add (&self->list, &GL (dl_stack_used));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#endif
|