nptl: Add support for setup guard pages with MADV_GUARD_INSTALL

Linux 6.13 (662df3e5c3766) added a lightweight way to define guard areas
through madvise syscall.  Instead of PROT_NONE the guard region through
mprotect, userland can madvise the same area with a special flag, and
the kernel ensures that accessing the area will trigger a SIGSEGV (as for
PROT_NONE mapping).

The madvise way has the advantage of less kernel memory consumption for
the process page-table (one less VMA per guard area), and slightly less
contention on kernel (also due to the fewer VMA areas being tracked).

The pthread_create allocates a new thread stack in two ways: if a guard
area is set (the default) it allocates the memory range required using
PROT_NONE and then mprotect the usable stack area. Otherwise, if a
guard page is not set it allocates the region with the required flags.

For the MADV_GUARD_INSTALL support, the stack area region is allocated
with required flags and then the guard region is installed.  If the
kernel does not support it, the usual way is used instead (and
MADV_GUARD_INSTALL is disabled for future stack creations).

The stack allocation strategy is recorded on the pthread struct, and it
is used in case the guard region needs to be resized.  To avoid needing
an extra field, the 'user_stack' is repurposed and renamed to 'stack_mode'.

This patch also adds a proper test for the pthread guard.

I checked on x86_64, aarch64, powerpc64le, and hppa with kernel 6.13.0-rc7.

Reviewed-by: DJ Delorie <dj@redhat.com>
This commit is contained in:
Adhemerval Zanella 2025-01-08 15:16:48 -03:00
parent 8e86549d14
commit a6fbe36b7f
10 changed files with 561 additions and 95 deletions

View File

@ -289,6 +289,7 @@ tests = \
tst-dlsym1 \
tst-exec4 \
tst-exec5 \
tst-guard1 \
tst-initializers1 \
tst-initializers1-c11 \
tst-initializers1-c89 \

View File

@ -1,7 +1,3 @@
pthread_attr_setguardsize
test effectiveness
pthread_attr_[sg]etschedparam
what to test?

View File

@ -146,10 +146,37 @@ get_cached_stack (size_t *sizep, void **memp)
return result;
}
/* Assume support for MADV_ADVISE_GUARD, setup_stack_prot will disable it
and fallback to ALLOCATE_GUARD_PROT_NONE if the madvise call fails. */
static int allocate_stack_mode = ALLOCATE_GUARD_MADV_GUARD;
static inline int stack_prot (void)
{
return (PROT_READ | PROT_WRITE
| ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
}
static void *
allocate_thread_stack (size_t size, size_t guardsize)
{
/* MADV_ADVISE_GUARD does not require an additional PROT_NONE mapping. */
int prot = stack_prot ();
if (atomic_load_relaxed (&allocate_stack_mode) == ALLOCATE_GUARD_PROT_NONE)
/* If a guard page is required, avoid committing memory by first allocate
with PROT_NONE and then reserve with required permission excluding the
guard page. */
prot = guardsize == 0 ? prot : PROT_NONE;
return __mmap (NULL, size, prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1,
0);
}
/* Return the guard page position on allocated stack. */
static inline char *
__attribute ((always_inline))
guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd,
guard_position (void *mem, size_t size, size_t guardsize, const struct pthread *pd,
size_t pagesize_m1)
{
#if _STACK_GROWS_DOWN
@ -159,27 +186,131 @@ guard_position (void *mem, size_t size, size_t guardsize, struct pthread *pd,
#endif
}
/* Based on stack allocated with PROT_NONE, setup the required portions with
'prot' flags based on the guard page position. */
static inline int
setup_stack_prot (char *mem, size_t size, char *guard, size_t guardsize,
const int prot)
/* Setup the MEM thread stack of SIZE bytes with the required protection flags
along with a guard area of GUARDSIZE size. It first tries with
MADV_GUARD_INSTALL, and then fallback to setup the guard area using the
extra PROT_NONE mapping. Update PD with the type of guard area setup. */
static inline bool
setup_stack_prot (char *mem, size_t size, struct pthread *pd,
size_t guardsize, size_t pagesize_m1)
{
char *guardend = guard + guardsize;
if (__glibc_unlikely (guardsize == 0))
return true;
char *guard = guard_position (mem, size, guardsize, pd, pagesize_m1);
if (atomic_load_relaxed (&allocate_stack_mode) == ALLOCATE_GUARD_MADV_GUARD)
{
if (__madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0)
{
pd->stack_mode = ALLOCATE_GUARD_MADV_GUARD;
return true;
}
/* If madvise fails it means the kernel does not support the guard
advise (we assume that the syscall is available, guard is page-aligned
and length is non negative). The stack has already the expected
protection flags, so it just need to PROT_NONE the guard area. */
atomic_store_relaxed (&allocate_stack_mode, ALLOCATE_GUARD_PROT_NONE);
if (__mprotect (guard, guardsize, PROT_NONE) != 0)
return false;
}
else
{
const int prot = stack_prot ();
char *guardend = guard + guardsize;
#if _STACK_GROWS_DOWN
/* As defined at guard_position, for architectures with downward stack
the guard page is always at start of the allocated area. */
if (__mprotect (guardend, size - guardsize, prot) != 0)
return errno;
/* As defined at guard_position, for architectures with downward stack
the guard page is always at start of the allocated area. */
if (__mprotect (guardend, size - guardsize, prot) != 0)
return false;
#else
size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem;
if (__mprotect (mem, mprots1, prot) != 0)
return errno;
size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend;
if (__mprotect (guardend, mprots2, prot) != 0)
return errno;
size_t mprots1 = (uintptr_t) guard - (uintptr_t) mem;
if (__mprotect (mem, mprots1, prot) != 0)
return false;
size_t mprots2 = ((uintptr_t) mem + size) - (uintptr_t) guardend;
if (__mprotect (guardend, mprots2, prot) != 0)
return false;
#endif
return 0;
}
pd->stack_mode = ALLOCATE_GUARD_PROT_NONE;
return true;
}
/* Update the guard area of the thread stack MEM of size SIZE with the new
GUARDISZE. It uses the method defined by PD stack_mode. */
static inline bool
adjust_stack_prot (char *mem, size_t size, const struct pthread *pd,
size_t guardsize, size_t pagesize_m1)
{
/* The required guard area is larger than the current one. For
_STACK_GROWS_DOWN it means the guard should increase as:
|guard|---------------------------------stack|
|new guard--|---------------------------stack|
while for _STACK_GROWS_UP:
|stack---------------------------|guard|-----|
|stack--------------------|new guard---|-----|
Both madvise and mprotect allows overlap the required region,
so use the new guard placement with the new size. */
if (guardsize > pd->guardsize)
{
char *guard = guard_position (mem, size, guardsize, pd, pagesize_m1);
if (pd->stack_mode == ALLOCATE_GUARD_MADV_GUARD)
return __madvise (guard, guardsize, MADV_GUARD_INSTALL) == 0;
else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE)
return __mprotect (guard, guardsize, PROT_NONE) == 0;
}
/* The current guard area is larger than the required one. For
_STACK_GROWS_DOWN is means change the guard as:
|guard-------|-------------------------stack|
|new guard|----------------------------stack|
And for _STACK_GROWS_UP:
|stack---------------------|guard-------|---|
|stack------------------------|new guard|---|
For ALLOCATE_GUARD_MADV_GUARD it means remove the slack area
(disjointed region of guard and new guard), while for
ALLOCATE_GUARD_PROT_NONE it requires to mprotect it with the stack
protection flags. */
else if (pd->guardsize > guardsize)
{
size_t slacksize = pd->guardsize - guardsize;
if (pd->stack_mode == ALLOCATE_GUARD_MADV_GUARD)
{
void *slack =
#if _STACK_GROWS_DOWN
mem + guardsize;
#else
guard_position (mem, size, pd->guardsize, pd, pagesize_m1);
#endif
return __madvise (slack, slacksize, MADV_GUARD_REMOVE) == 0;
}
else if (pd->stack_mode == ALLOCATE_GUARD_PROT_NONE)
{
const int prot = stack_prot ();
#if _STACK_GROWS_DOWN
return __mprotect (mem + guardsize, slacksize, prot) == 0;
#else
char *new_guard = (char *)(((uintptr_t) pd - guardsize)
& ~pagesize_m1);
char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize)
& ~pagesize_m1);
/* The guard size difference might be > 0, but once rounded
to the nearest page the size difference might be zero. */
if (new_guard > old_guard
&& __mprotect (old_guard, new_guard - old_guard, prot) != 0)
return false;
#endif
}
}
return true;
}
/* Mark the memory of the stack as usable to the kernel. It frees everything
@ -291,7 +422,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* This is a user-provided stack. It will not be queued in the
stack cache nor will the memory (except the TLS memory) be freed. */
pd->user_stack = true;
pd->stack_mode = ALLOCATE_GUARD_USER;
/* This is at least the second thread. */
pd->header.multiple_threads = 1;
@ -325,10 +456,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
/* Allocate some anonymous memory. If possible use the cache. */
size_t guardsize;
size_t reported_guardsize;
size_t reqsize;
void *mem;
const int prot = (PROT_READ | PROT_WRITE
| ((GL(dl_stack_flags) & PF_X) ? PROT_EXEC : 0));
/* Adjust the stack size for alignment. */
size &= ~tls_static_align_m1;
@ -358,16 +486,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
return EINVAL;
/* Try to get a stack from the cache. */
reqsize = size;
pd = get_cached_stack (&size, &mem);
if (pd == NULL)
{
/* If a guard page is required, avoid committing memory by first
allocate with PROT_NONE and then reserve with required permission
excluding the guard page. */
mem = __mmap (NULL, size, (guardsize == 0) ? prot : PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
mem = allocate_thread_stack (size, guardsize);
if (__glibc_unlikely (mem == MAP_FAILED))
return errno;
@ -394,15 +516,10 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
#endif
/* Now mprotect the required region excluding the guard area. */
if (__glibc_likely (guardsize > 0))
if (!setup_stack_prot (mem, size, pd, guardsize, pagesize_m1))
{
char *guard = guard_position (mem, size, guardsize, pd,
pagesize_m1);
if (setup_stack_prot (mem, size, guard, guardsize, prot) != 0)
{
__munmap (mem, size);
return errno;
}
__munmap (mem, size);
return errno;
}
/* Remember the stack-related values. */
@ -456,59 +573,31 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
which will be read next. */
}
/* Create or resize the guard area if necessary. */
if (__glibc_unlikely (guardsize > pd->guardsize))
/* Create or resize the guard area if necessary on an already
allocated stack. */
if (!adjust_stack_prot (mem, size, pd, guardsize, pagesize_m1))
{
char *guard = guard_position (mem, size, guardsize, pd,
pagesize_m1);
if (__mprotect (guard, guardsize, PROT_NONE) != 0)
{
mprot_error:
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
lll_lock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Remove the thread from the list. */
__nptl_stack_list_del (&pd->list);
/* Remove the thread from the list. */
__nptl_stack_list_del (&pd->list);
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
lll_unlock (GL (dl_stack_cache_lock), LLL_PRIVATE);
/* Get rid of the TLS block we allocated. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
/* Get rid of the TLS block we allocated. */
_dl_deallocate_tls (TLS_TPADJ (pd), false);
/* Free the stack memory regardless of whether the size
of the cache is over the limit or not. If this piece
of memory caused problems we better do not use it
anymore. Uh, and we ignore possible errors. There
is nothing we could do. */
(void) __munmap (mem, size);
/* Free the stack memory regardless of whether the size
of the cache is over the limit or not. If this piece
of memory caused problems we better do not use it
anymore. Uh, and we ignore possible errors. There
is nothing we could do. */
(void) __munmap (mem, size);
return errno;
}
pd->guardsize = guardsize;
return errno;
}
else if (__builtin_expect (pd->guardsize - guardsize > size - reqsize,
0))
{
/* The old guard area is too large. */
#if _STACK_GROWS_DOWN
if (__mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
prot) != 0)
goto mprot_error;
#elif _STACK_GROWS_UP
char *new_guard = (char *)(((uintptr_t) pd - guardsize)
& ~pagesize_m1);
char *old_guard = (char *)(((uintptr_t) pd - pd->guardsize)
& ~pagesize_m1);
/* The guard size difference might be > 0, but once rounded
to the nearest page the size difference might be zero. */
if (new_guard > old_guard
&& __mprotect (old_guard, new_guard - old_guard, prot) != 0)
goto mprot_error;
#endif
pd->guardsize = guardsize;
}
pd->guardsize = guardsize;
/* The pthread_getattr_np() calls need to get passed the size
requested in the attribute, regardless of how large the
actually used guardsize is. */
@ -568,19 +657,21 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
static void
name_stack_maps (struct pthread *pd, bool set)
{
size_t adjust = pd->stack_mode == ALLOCATE_GUARD_PROT_NONE ?
pd->guardsize : 0;
#if _STACK_GROWS_DOWN
void *stack = pd->stackblock + pd->guardsize;
void *stack = pd->stackblock + adjust;
#else
void *stack = pd->stackblock;
#endif
size_t stacksize = pd->stackblock_size - pd->guardsize;
size_t stacksize = pd->stackblock_size - adjust;
if (!set)
__set_vma_name (stack, stacksize, NULL);
__set_vma_name (stack, stacksize, " glibc: unused stack");
else
{
unsigned int tid = pd->tid;
if (pd->user_stack)
if (pd->stack_mode == ALLOCATE_GUARD_USER)
SET_STACK_NAME (" glibc: pthread user stack: ", stack, stacksize, tid);
else
SET_STACK_NAME (" glibc: pthread stack: ", stack, stacksize, tid);

View File

@ -125,6 +125,12 @@ struct priority_protection_data
unsigned int priomap[];
};
enum allocate_stack_mode_t
{
ALLOCATE_GUARD_MADV_GUARD = 0,
ALLOCATE_GUARD_PROT_NONE = 1,
ALLOCATE_GUARD_USER = 2,
};
/* Thread descriptor data structure. */
struct pthread
@ -324,7 +330,7 @@ struct pthread
bool report_events;
/* True if the user provided the stack. */
bool user_stack;
enum allocate_stack_mode_t stack_mode;
/* True if thread must stop at startup time. */
bool stopped_start;

View File

@ -120,7 +120,7 @@ __nptl_deallocate_stack (struct pthread *pd)
not reset the 'used' flag in the 'tid' field. This is done by
the kernel. If no thread has been created yet this field is
still zero. */
if (__glibc_likely (! pd->user_stack))
if (__glibc_likely (pd->stack_mode != ALLOCATE_GUARD_USER))
(void) queue_stack (pd);
else
/* Free the memory associated with the ELF TLS. */

View File

@ -554,7 +554,7 @@ start_thread (void *arg)
to avoid creating a new free-state block during thread release. */
__getrandom_vdso_release (pd);
if (!pd->user_stack)
if (pd->stack_mode != ALLOCATE_GUARD_USER)
advise_stack_range (pd->stackblock, pd->stackblock_size, (uintptr_t) pd,
pd->guardsize);

369
nptl/tst-guard1.c Normal file
View File

@ -0,0 +1,369 @@
/* Basic tests for pthread guard area.
Copyright (C) 2025 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<https://www.gnu.org/licenses/>. */
#include <array_length.h>
#include <pthreaddef.h>
#include <setjmp.h>
#include <stackinfo.h>
#include <stdio.h>
#include <support/check.h>
#include <support/test-driver.h>
#include <support/xsignal.h>
#include <support/xthread.h>
#include <support/xunistd.h>
#include <sys/mman.h>
#include <stdlib.h>
static long int pagesz;
/* To check if the guard region is inaccessible, the thread tries read/writes
on it and checks if a SIGSEGV is generated. */
static volatile sig_atomic_t signal_jump_set;
static sigjmp_buf signal_jmp_buf;
static void
sigsegv_handler (int sig)
{
if (signal_jump_set == 0)
return;
siglongjmp (signal_jmp_buf, sig);
}
static bool
try_access_buf (char *ptr, bool write)
{
signal_jump_set = true;
bool failed = sigsetjmp (signal_jmp_buf, 0) != 0;
if (!failed)
{
if (write)
*(volatile char *)(ptr) = 'x';
else
*(volatile char *)(ptr);
}
signal_jump_set = false;
return !failed;
}
static bool
try_read_buf (char *ptr)
{
return try_access_buf (ptr, false);
}
static bool
try_write_buf (char *ptr)
{
return try_access_buf (ptr, true);
}
static bool
try_read_write_buf (char *ptr)
{
return try_read_buf (ptr) && try_write_buf(ptr);
}
/* Return the guard region of the current thread (it only makes sense on
a thread created by pthread_created). */
struct stack_t
{
char *stack;
size_t stacksize;
char *guard;
size_t guardsize;
};
static inline size_t
adjust_stacksize (size_t stacksize)
{
/* For some ABIs, The guard page depends of the thread descriptor, which in
turn rely on the require static TLS. The only supported _STACK_GROWS_UP
ABI, hppa, defines TLS_DTV_AT_TP and it is not straightforward to
calculate the guard region with current pthread APIs. So to get a
correct stack size assumes an extra page after the guard area. */
#if _STACK_GROWS_DOWN
return stacksize;
#elif _STACK_GROWS_UP
return stacksize - pagesz;
#endif
}
struct stack_t
get_current_stack_info (void)
{
pthread_attr_t attr;
TEST_VERIFY_EXIT (pthread_getattr_np (pthread_self (), &attr) == 0);
void *stack;
size_t stacksize;
TEST_VERIFY_EXIT (pthread_attr_getstack (&attr, &stack, &stacksize) == 0);
size_t guardsize;
TEST_VERIFY_EXIT (pthread_attr_getguardsize (&attr, &guardsize) == 0);
/* The guardsize is reported as the current page size, although it might
be adjusted to a larger value (aarch64 for instance). */
if (guardsize != 0 && guardsize < ARCH_MIN_GUARD_SIZE)
guardsize = ARCH_MIN_GUARD_SIZE;
#if _STACK_GROWS_DOWN
void *guard = guardsize ? stack - guardsize : 0;
#elif _STACK_GROWS_UP
stacksize = adjust_stacksize (stacksize);
void *guard = guardsize ? stack + stacksize : 0;
#endif
pthread_attr_destroy (&attr);
return (struct stack_t) { stack, stacksize, guard, guardsize };
}
struct thread_args_t
{
size_t stacksize;
size_t guardsize;
};
struct thread_args_t
get_thread_args (const pthread_attr_t *attr)
{
size_t stacksize;
size_t guardsize;
TEST_COMPARE (pthread_attr_getstacksize (attr, &stacksize), 0);
TEST_COMPARE (pthread_attr_getguardsize (attr, &guardsize), 0);
if (guardsize < ARCH_MIN_GUARD_SIZE)
guardsize = ARCH_MIN_GUARD_SIZE;
return (struct thread_args_t) { stacksize, guardsize };
}
static void
set_thread_args (pthread_attr_t *attr, const struct thread_args_t *args)
{
xpthread_attr_setstacksize (attr, args->stacksize);
xpthread_attr_setguardsize (attr, args->guardsize);
}
static void *
tf (void *closure)
{
struct thread_args_t *args = closure;
struct stack_t s = get_current_stack_info ();
if (test_verbose)
printf ("debug: [tid=%jd] stack = { .stack=%p, stacksize=%#zx, guard=%p, "
"guardsize=%#zx }\n",
(intmax_t) gettid (),
s.stack,
s.stacksize,
s.guard,
s.guardsize);
if (args != NULL)
{
TEST_COMPARE (adjust_stacksize (args->stacksize), s.stacksize);
TEST_COMPARE (args->guardsize, s.guardsize);
}
/* Ensure we can access the stack area. */
TEST_COMPARE (try_read_buf (s.stack), true);
TEST_COMPARE (try_read_buf (&s.stack[s.stacksize / 2]), true);
TEST_COMPARE (try_read_buf (&s.stack[s.stacksize - 1]), true);
/* Check if accessing the guard area results in SIGSEGV. */
if (s.guardsize > 0)
{
TEST_COMPARE (try_read_write_buf (s.guard), false);
TEST_COMPARE (try_read_write_buf (&s.guard[s.guardsize / 2]), false);
TEST_COMPARE (try_read_write_buf (&s.guard[s.guardsize] - 1), false);
}
return NULL;
}
/* Test 1: caller provided stack without guard. */
static void
do_test1 (void)
{
pthread_attr_t attr;
xpthread_attr_init (&attr);
size_t stacksize = support_small_thread_stack_size ();
void *stack = xmmap (0,
stacksize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_STACK,
-1);
xpthread_attr_setstack (&attr, stack, stacksize);
xpthread_attr_setguardsize (&attr, 0);
struct thread_args_t args = { stacksize, 0 };
pthread_t t = xpthread_create (&attr, tf, &args);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
xpthread_attr_destroy (&attr);
xmunmap (stack, stacksize);
}
/* Test 2: same as 1., but with a guard area. */
static void
do_test2 (void)
{
pthread_attr_t attr;
xpthread_attr_init (&attr);
size_t stacksize = support_small_thread_stack_size ();
void *stack = xmmap (0,
stacksize,
PROT_READ | PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS|MAP_STACK,
-1);
xpthread_attr_setstack (&attr, stack, stacksize);
xpthread_attr_setguardsize (&attr, pagesz);
struct thread_args_t args = { stacksize, 0 };
pthread_t t = xpthread_create (&attr, tf, &args);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
xpthread_attr_destroy (&attr);
xmunmap (stack, stacksize);
}
/* Test 3: pthread_create with default values. */
static void
do_test3 (void)
{
pthread_t t = xpthread_create (NULL, tf, NULL);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
}
/* Test 4: pthread_create without a guard area. */
static void
do_test4 (void)
{
pthread_attr_t attr;
xpthread_attr_init (&attr);
struct thread_args_t args = get_thread_args (&attr);
args.stacksize += args.guardsize;
args.guardsize = 0;
set_thread_args (&attr, &args);
pthread_t t = xpthread_create (&attr, tf, &args);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
xpthread_attr_destroy (&attr);
}
/* Test 5: pthread_create with non default stack and guard size value. */
static void
do_test5 (void)
{
pthread_attr_t attr;
xpthread_attr_init (&attr);
struct thread_args_t args = get_thread_args (&attr);
args.guardsize += pagesz;
args.stacksize += pagesz;
set_thread_args (&attr, &args);
pthread_t t = xpthread_create (&attr, tf, &args);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
xpthread_attr_destroy (&attr);
}
/* Test 6: thread with the required size (stack + guard) that matches the
test 3, but with a larger guard area. The pthread_create will need to
increase the guard area. */
static void
do_test6 (void)
{
pthread_attr_t attr;
xpthread_attr_init (&attr);
struct thread_args_t args = get_thread_args (&attr);
args.guardsize += pagesz;
args.stacksize -= pagesz;
set_thread_args (&attr, &args);
pthread_t t = xpthread_create (&attr, tf, &args);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
xpthread_attr_destroy (&attr);
}
/* Test 7: pthread_create with default values, the requires size matches the
one from test 3 and 6 (but with a reduced guard ares). The
pthread_create should use the cached stack from previous tests, but it
would require to reduce the guard area. */
static void
do_test7 (void)
{
pthread_t t = xpthread_create (NULL, tf, NULL);
void *status = xpthread_join (t);
TEST_VERIFY (status == 0);
}
static int
do_test (void)
{
pagesz = sysconf (_SC_PAGESIZE);
{
struct sigaction sa = {
.sa_handler = sigsegv_handler,
.sa_flags = SA_NODEFER,
};
sigemptyset (&sa.sa_mask);
xsigaction (SIGSEGV, &sa, NULL);
/* Some system generates SIGBUS accessing the guard area when it is
setup with madvise. */
xsigaction (SIGBUS, &sa, NULL);
}
static const struct {
const char *descr;
void (*test)(void);
} tests[] = {
{ "user provided stack without guard", do_test1 },
{ "user provided stack with guard", do_test2 },
{ "default attribute", do_test3 },
{ "default attribute without guard", do_test4 },
{ "non default stack and guard sizes", do_test5 },
{ "reused stack with larger guard", do_test6 },
{ "reused stack with smaller guard", do_test7 },
};
for (int i = 0; i < array_length (tests); i++)
{
printf ("debug: test%01d: %s\n", i, tests[i].descr);
tests[i].test();
}
return 0;
}
#include <support/test-driver.c>

View File

@ -74,7 +74,7 @@ __tls_init_tp (void)
/* Early initialization of the TCB. */
pd->tid = INTERNAL_SYSCALL_CALL (set_tid_address, &pd->tid);
THREAD_SETMEM (pd, specific[0], &pd->specific_1stblock[0]);
THREAD_SETMEM (pd, user_stack, true);
THREAD_SETMEM (pd, stack_mode, ALLOCATE_GUARD_USER);
/* Before initializing GL (dl_stack_user), the debugger could not
find us and had to set __nptl_initial_report_events. Propagate

View File

@ -155,7 +155,8 @@ reclaim_stacks (void)
INIT_LIST_HEAD (&GL (dl_stack_used));
INIT_LIST_HEAD (&GL (dl_stack_user));
if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
if (__glibc_unlikely (THREAD_GETMEM (self, stack_mode)
== ALLOCATE_GUARD_USER))
list_add (&self->list, &GL (dl_stack_user));
else
list_add (&self->list, &GL (dl_stack_used));

View File

@ -113,6 +113,8 @@
locked pages too. */
# define MADV_COLLAPSE 25 /* Synchronous hugepage collapse. */
# define MADV_HWPOISON 100 /* Poison a page for testing. */
# define MADV_GUARD_INSTALL 102 /* Fatal signal on access to range */
# define MADV_GUARD_REMOVE 103 /* Unguard range */
#endif
/* The POSIX people had to invent similar names for the same things. */