blob: 150473037636e939219db55cf87ff3b067656af6 [file] [log] [blame]
Austin Schuh745610d2015-09-06 18:19:50 -07001// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
2/* Copyright (c) 2005-2007, Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 * * Neither the name of Google Inc. nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * ---
32 * Author: Markus Gutschke
33 */
34
35#include "base/linuxthreads.h"
36
37#ifdef THREADS
38#ifdef __cplusplus
39extern "C" {
40#endif
41
42#include <sched.h>
43#include <signal.h>
44#include <stdlib.h>
45#include <string.h>
46#include <fcntl.h>
47#include <sys/socket.h>
48#include <sys/wait.h>
49#include <sys/prctl.h>
50#include <semaphore.h>
51
52#include "base/linux_syscall_support.h"
53#include "base/thread_lister.h"
54
55#ifndef CLONE_UNTRACED
56#define CLONE_UNTRACED 0x00800000
57#endif
58
59
60/* Synchronous signals that should not be blocked while in the lister thread.
61 */
62static const int sync_signals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
63 SIGXCPU, SIGXFSZ };
64
65/* itoa() is not a standard function, and we cannot safely call printf()
66 * after suspending threads. So, we just implement our own copy. A
67 * recursive approach is the easiest here.
68 */
69static char *local_itoa(char *buf, int i) {
70 if (i < 0) {
71 *buf++ = '-';
72 return local_itoa(buf, -i);
73 } else {
74 if (i >= 10)
75 buf = local_itoa(buf, i/10);
76 *buf++ = (i%10) + '0';
77 *buf = '\000';
78 return buf;
79 }
80}
81
82
83/* Wrapper around clone() that runs "fn" on the same stack as the
84 * caller! Unlike fork(), the cloned thread shares the same address space.
85 * The caller must be careful to use only minimal amounts of stack until
86 * the cloned thread has returned.
87 * There is a good chance that the cloned thread and the caller will share
88 * the same copy of errno!
89 */
90#ifdef __GNUC__
91#if __GNUC__ == 3 && __GNUC_MINOR__ >= 1 || __GNUC__ > 3
92/* Try to force this function into a separate stack frame, and make sure
93 * that arguments are passed on the stack.
94 */
95static int local_clone (int (*fn)(void *), void *arg, ...)
96 __attribute__ ((noinline));
97#endif
98#endif
99
100/* To avoid the gap cross page boundaries, increase by the large parge
101 * size mostly PowerPC system uses. */
102#ifdef __PPC64__
103#define CLONE_STACK_SIZE 65536
104#else
105#define CLONE_STACK_SIZE 4096
106#endif
107
108static int local_clone (int (*fn)(void *), void *arg, ...) {
109 /* Leave 4kB of gap between the callers stack and the new clone. This
110 * should be more than sufficient for the caller to call waitpid() until
111 * the cloned thread terminates.
112 *
113 * It is important that we set the CLONE_UNTRACED flag, because newer
114 * versions of "gdb" otherwise attempt to attach to our thread, and will
115 * attempt to reap its status codes. This subsequently results in the
116 * caller hanging indefinitely in waitpid(), waiting for a change in
117 * status that will never happen. By setting the CLONE_UNTRACED flag, we
118 * prevent "gdb" from stealing events, but we still expect the thread
119 * lister to fail, because it cannot PTRACE_ATTACH to the process that
120 * is being debugged. This is OK and the error code will be reported
121 * correctly.
122 */
123 return sys_clone(fn, (char *)&arg - CLONE_STACK_SIZE,
124 CLONE_VM|CLONE_FS|CLONE_FILES|CLONE_UNTRACED, arg, 0, 0, 0);
125}
126
127
128/* Local substitute for the atoi() function, which is not necessarily safe
129 * to call once threads are suspended (depending on whether libc looks up
130 * locale information, when executing atoi()).
131 */
132static int local_atoi(const char *s) {
133 int n = 0;
134 int neg = *s == '-';
135 if (neg)
136 s++;
137 while (*s >= '0' && *s <= '9')
138 n = 10*n + (*s++ - '0');
139 return neg ? -n : n;
140}
141
142
143/* Re-runs fn until it doesn't cause EINTR
144 */
145#define NO_INTR(fn) do {} while ((fn) < 0 && errno == EINTR)
146
147
148/* Wrap a class around system calls, in order to give us access to
149 * a private copy of errno. This only works in C++, but it has the
150 * advantage of not needing nested functions, which are a non-standard
151 * language extension.
152 */
153#ifdef __cplusplus
154namespace {
155 class SysCalls {
156 public:
157 #define SYS_CPLUSPLUS
158 #define SYS_ERRNO my_errno
159 #define SYS_INLINE inline
160 #define SYS_PREFIX -1
161 #undef SYS_LINUX_SYSCALL_SUPPORT_H
162 #include "linux_syscall_support.h"
163 SysCalls() : my_errno(0) { }
164 int my_errno;
165 };
166}
167#define ERRNO sys.my_errno
168#else
169#define ERRNO my_errno
170#endif
171
172
173/* Wrapper for open() which is guaranteed to never return EINTR.
174 */
175static int c_open(const char *fname, int flags, int mode) {
176 ssize_t rc;
177 NO_INTR(rc = sys_open(fname, flags, mode));
178 return rc;
179}
180
181
182/* abort() is not safely reentrant, and changes it's behavior each time
183 * it is called. This means, if the main application ever called abort()
184 * we cannot safely call it again. This would happen if we were called
185 * from a SIGABRT signal handler in the main application. So, document
186 * that calling SIGABRT from the thread lister makes it not signal safe
187 * (and vice-versa).
188 * Also, since we share address space with the main application, we
189 * cannot call abort() from the callback and expect the main application
190 * to behave correctly afterwards. In fact, the only thing we can do, is
191 * to terminate the main application with extreme prejudice (aka
192 * PTRACE_KILL).
193 * We set up our own SIGABRT handler to do this.
194 * In order to find the main application from the signal handler, we
195 * need to store information about it in global variables. This is
196 * safe, because the main application should be suspended at this
197 * time. If the callback ever called TCMalloc_ResumeAllProcessThreads(), then
198 * we are running a higher risk, though. So, try to avoid calling
199 * abort() after calling TCMalloc_ResumeAllProcessThreads.
200 */
201static volatile int *sig_pids, sig_num_threads, sig_proc, sig_marker;
202
203
204/* Signal handler to help us recover from dying while we are attached to
205 * other threads.
206 */
207static void SignalHandler(int signum, siginfo_t *si, void *data) {
208 if (sig_pids != NULL) {
209 if (signum == SIGABRT) {
James Kuszmaul9776b392023-01-14 14:08:08 -0800210 while (sig_num_threads > 0) {
211 sig_num_threads = sig_num_threads - 1;
Austin Schuh745610d2015-09-06 18:19:50 -0700212 /* Not sure if sched_yield is really necessary here, but it does not */
213 /* hurt, and it might be necessary for the same reasons that we have */
214 /* to do so in sys_ptrace_detach(). */
215 sys_sched_yield();
216 sys_ptrace(PTRACE_KILL, sig_pids[sig_num_threads], 0, 0);
217 }
218 } else if (sig_num_threads > 0) {
219 TCMalloc_ResumeAllProcessThreads(sig_num_threads, (int *)sig_pids);
220 }
221 }
222 sig_pids = NULL;
223 if (sig_marker >= 0)
224 NO_INTR(sys_close(sig_marker));
225 sig_marker = -1;
226 if (sig_proc >= 0)
227 NO_INTR(sys_close(sig_proc));
228 sig_proc = -1;
229
230 sys__exit(signum == SIGABRT ? 1 : 2);
231}
232
233
234/* Try to dirty the stack, and hope that the compiler is not smart enough
235 * to optimize this function away. Or worse, the compiler could inline the
236 * function and permanently allocate the data on the stack.
237 */
238static void DirtyStack(size_t amount) {
239 char buf[amount];
240 memset(buf, 0, amount);
241 sys_read(-1, buf, amount);
242}
243
244
245/* Data structure for passing arguments to the lister thread.
246 */
247#define ALT_STACKSIZE (MINSIGSTKSZ + 4096)
248
249struct ListerParams {
250 int result, err;
251 char *altstack_mem;
252 ListAllProcessThreadsCallBack callback;
253 void *parameter;
254 va_list ap;
255 sem_t *lock;
256};
257
258
259static void ListerThread(struct ListerParams *args) {
260 int found_parent = 0;
261 pid_t clone_pid = sys_gettid(), ppid = sys_getppid();
262 char proc_self_task[80], marker_name[48], *marker_path;
263 const char *proc_paths[3];
264 const char *const *proc_path = proc_paths;
265 int proc = -1, marker = -1, num_threads = 0;
266 int max_threads = 0, sig;
267 struct kernel_stat marker_sb, proc_sb;
268 stack_t altstack;
269
270 /* Wait for parent thread to set appropriate permissions
271 * to allow ptrace activity
272 */
273 if (sem_wait(args->lock) < 0) {
274 goto failure;
275 }
276
277 /* Create "marker" that we can use to detect threads sharing the same
278 * address space and the same file handles. By setting the FD_CLOEXEC flag
279 * we minimize the risk of misidentifying child processes as threads;
280 * and since there is still a race condition, we will filter those out
281 * later, anyway.
282 */
283 if ((marker = sys_socket(PF_LOCAL, SOCK_DGRAM, 0)) < 0 ||
284 sys_fcntl(marker, F_SETFD, FD_CLOEXEC) < 0) {
285 failure:
286 args->result = -1;
287 args->err = errno;
288 if (marker >= 0)
289 NO_INTR(sys_close(marker));
290 sig_marker = marker = -1;
291 if (proc >= 0)
292 NO_INTR(sys_close(proc));
293 sig_proc = proc = -1;
294 sys__exit(1);
295 }
296
297 /* Compute search paths for finding thread directories in /proc */
298 local_itoa(strrchr(strcpy(proc_self_task, "/proc/"), '\000'), ppid);
299 strcpy(marker_name, proc_self_task);
300 marker_path = marker_name + strlen(marker_name);
301 strcat(proc_self_task, "/task/");
302 proc_paths[0] = proc_self_task; /* /proc/$$/task/ */
303 proc_paths[1] = "/proc/"; /* /proc/ */
304 proc_paths[2] = NULL;
305
306 /* Compute path for marker socket in /proc */
307 local_itoa(strcpy(marker_path, "/fd/") + 4, marker);
308 if (sys_stat(marker_name, &marker_sb) < 0) {
309 goto failure;
310 }
311
312 /* Catch signals on an alternate pre-allocated stack. This way, we can
313 * safely execute the signal handler even if we ran out of memory.
314 */
315 memset(&altstack, 0, sizeof(altstack));
316 altstack.ss_sp = args->altstack_mem;
317 altstack.ss_flags = 0;
318 altstack.ss_size = ALT_STACKSIZE;
319 sys_sigaltstack(&altstack, (const stack_t *)NULL);
320
321 /* Some kernels forget to wake up traced processes, when the
322 * tracer dies. So, intercept synchronous signals and make sure
323 * that we wake up our tracees before dying. It is the caller's
324 * responsibility to ensure that asynchronous signals do not
325 * interfere with this function.
326 */
327 sig_marker = marker;
328 sig_proc = -1;
329 for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
330 struct kernel_sigaction sa;
331 memset(&sa, 0, sizeof(sa));
332 sa.sa_sigaction_ = SignalHandler;
333 sys_sigfillset(&sa.sa_mask);
334 sa.sa_flags = SA_ONSTACK|SA_SIGINFO|SA_RESETHAND;
335 sys_sigaction(sync_signals[sig], &sa, (struct kernel_sigaction *)NULL);
336 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800337
Austin Schuh745610d2015-09-06 18:19:50 -0700338 /* Read process directories in /proc/... */
339 for (;;) {
340 /* Some kernels know about threads, and hide them in "/proc"
341 * (although they are still there, if you know the process
342 * id). Threads are moved into a separate "task" directory. We
343 * check there first, and then fall back on the older naming
344 * convention if necessary.
345 */
James Kuszmaul9776b392023-01-14 14:08:08 -0800346 sig_proc = proc = c_open(*proc_path, O_RDONLY|O_DIRECTORY, 0);
347 if (sig_proc < 0) {
Austin Schuh745610d2015-09-06 18:19:50 -0700348 if (*++proc_path != NULL)
349 continue;
350 goto failure;
351 }
352 if (sys_fstat(proc, &proc_sb) < 0)
353 goto failure;
Brian Silverman20350ac2021-11-17 18:19:55 -0800354
Austin Schuh745610d2015-09-06 18:19:50 -0700355 /* Since we are suspending threads, we cannot call any libc
356 * functions that might acquire locks. Most notably, we cannot
357 * call malloc(). So, we have to allocate memory on the stack,
358 * instead. Since we do not know how much memory we need, we
359 * make a best guess. And if we guessed incorrectly we retry on
360 * a second iteration (by jumping to "detach_threads").
361 *
362 * Unless the number of threads is increasing very rapidly, we
363 * should never need to do so, though, as our guestimate is very
364 * conservative.
365 */
366 if (max_threads < proc_sb.st_nlink + 100)
367 max_threads = proc_sb.st_nlink + 100;
Brian Silverman20350ac2021-11-17 18:19:55 -0800368
Austin Schuh745610d2015-09-06 18:19:50 -0700369 /* scope */ {
370 pid_t pids[max_threads];
371 int added_entries = 0;
372 sig_num_threads = num_threads;
373 sig_pids = pids;
374 for (;;) {
375 struct KERNEL_DIRENT *entry;
376 char buf[4096];
377 ssize_t nbytes = GETDENTS(proc, (struct KERNEL_DIRENT *)buf,
378 sizeof(buf));
379 if (nbytes < 0)
380 goto failure;
381 else if (nbytes == 0) {
382 if (added_entries) {
383 /* Need to keep iterating over "/proc" in multiple
384 * passes until we no longer find any more threads. This
385 * algorithm eventually completes, when all threads have
386 * been suspended.
387 */
388 added_entries = 0;
389 sys_lseek(proc, 0, SEEK_SET);
390 continue;
391 }
392 break;
393 }
394 for (entry = (struct KERNEL_DIRENT *)buf;
395 entry < (struct KERNEL_DIRENT *)&buf[nbytes];
396 entry = (struct KERNEL_DIRENT *)((char *)entry+entry->d_reclen)) {
397 if (entry->d_ino != 0) {
398 const char *ptr = entry->d_name;
399 pid_t pid;
Brian Silverman20350ac2021-11-17 18:19:55 -0800400
Austin Schuh745610d2015-09-06 18:19:50 -0700401 /* Some kernels hide threads by preceding the pid with a '.' */
402 if (*ptr == '.')
403 ptr++;
Brian Silverman20350ac2021-11-17 18:19:55 -0800404
Austin Schuh745610d2015-09-06 18:19:50 -0700405 /* If the directory is not numeric, it cannot be a
406 * process/thread
407 */
408 if (*ptr < '0' || *ptr > '9')
409 continue;
410 pid = local_atoi(ptr);
411
412 /* Attach (and suspend) all threads */
413 if (pid && pid != clone_pid) {
414 struct kernel_stat tmp_sb;
415 char fname[entry->d_reclen + 48];
416 strcat(strcat(strcpy(fname, "/proc/"),
417 entry->d_name), marker_path);
Brian Silverman20350ac2021-11-17 18:19:55 -0800418
Austin Schuh745610d2015-09-06 18:19:50 -0700419 /* Check if the marker is identical to the one we created */
420 if (sys_stat(fname, &tmp_sb) >= 0 &&
421 marker_sb.st_ino == tmp_sb.st_ino) {
422 long i, j;
423
424 /* Found one of our threads, make sure it is no duplicate */
425 for (i = 0; i < num_threads; i++) {
426 /* Linear search is slow, but should not matter much for
427 * the typically small number of threads.
428 */
429 if (pids[i] == pid) {
430 /* Found a duplicate; most likely on second pass */
431 goto next_entry;
432 }
433 }
Brian Silverman20350ac2021-11-17 18:19:55 -0800434
Austin Schuh745610d2015-09-06 18:19:50 -0700435 /* Check whether data structure needs growing */
436 if (num_threads >= max_threads) {
437 /* Back to square one, this time with more memory */
438 NO_INTR(sys_close(proc));
439 goto detach_threads;
440 }
441
442 /* Attaching to thread suspends it */
443 pids[num_threads++] = pid;
444 sig_num_threads = num_threads;
445 if (sys_ptrace(PTRACE_ATTACH, pid, (void *)0,
446 (void *)0) < 0) {
447 /* If operation failed, ignore thread. Maybe it
448 * just died? There might also be a race
449 * condition with a concurrent core dumper or
450 * with a debugger. In that case, we will just
451 * make a best effort, rather than failing
452 * entirely.
453 */
454 num_threads--;
455 sig_num_threads = num_threads;
456 goto next_entry;
457 }
458 while (sys_waitpid(pid, (int *)0, __WALL) < 0) {
459 if (errno != EINTR) {
460 sys_ptrace_detach(pid);
461 num_threads--;
462 sig_num_threads = num_threads;
463 goto next_entry;
464 }
465 }
466
467 if (sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i++ != j ||
468 sys_ptrace(PTRACE_PEEKDATA, pid, &i, &j) || i != j) {
469 /* Address spaces are distinct, even though both
470 * processes show the "marker". This is probably
471 * a forked child process rather than a thread.
472 */
473 sys_ptrace_detach(pid);
474 num_threads--;
475 sig_num_threads = num_threads;
476 } else {
477 found_parent |= pid == ppid;
478 added_entries++;
479 }
480 }
481 }
482 }
483 next_entry:;
484 }
485 }
486 NO_INTR(sys_close(proc));
487 sig_proc = proc = -1;
488
489 /* If we failed to find any threads, try looking somewhere else in
490 * /proc. Maybe, threads are reported differently on this system.
491 */
492 if (num_threads > 1 || !*++proc_path) {
493 NO_INTR(sys_close(marker));
494 sig_marker = marker = -1;
495
496 /* If we never found the parent process, something is very wrong.
497 * Most likely, we are running in debugger. Any attempt to operate
498 * on the threads would be very incomplete. Let's just report an
499 * error to the caller.
500 */
501 if (!found_parent) {
502 TCMalloc_ResumeAllProcessThreads(num_threads, pids);
503 sys__exit(3);
504 }
505
506 /* Now we are ready to call the callback,
507 * which takes care of resuming the threads for us.
508 */
509 args->result = args->callback(args->parameter, num_threads,
510 pids, args->ap);
511 args->err = errno;
512
513 /* Callback should have resumed threads, but better safe than sorry */
514 if (TCMalloc_ResumeAllProcessThreads(num_threads, pids)) {
515 /* Callback forgot to resume at least one thread, report error */
516 args->err = EINVAL;
517 args->result = -1;
518 }
519
520 sys__exit(0);
521 }
522 detach_threads:
523 /* Resume all threads prior to retrying the operation */
524 TCMalloc_ResumeAllProcessThreads(num_threads, pids);
525 sig_pids = NULL;
526 num_threads = 0;
527 sig_num_threads = num_threads;
528 max_threads += 100;
529 }
530 }
531}
532
533
534/* This function gets the list of all linux threads of the current process
535 * passes them to the 'callback' along with the 'parameter' pointer; at the
536 * call back call time all the threads are paused via
537 * PTRACE_ATTACH.
538 * The callback is executed from a separate thread which shares only the
539 * address space, the filesystem, and the filehandles with the caller. Most
540 * notably, it does not share the same pid and ppid; and if it terminates,
541 * the rest of the application is still there. 'callback' is supposed to do
542 * or arrange for TCMalloc_ResumeAllProcessThreads. This happens automatically, if
543 * the thread raises a synchronous signal (e.g. SIGSEGV); asynchronous
544 * signals are blocked. If the 'callback' decides to unblock them, it must
545 * ensure that they cannot terminate the application, or that
546 * TCMalloc_ResumeAllProcessThreads will get called.
547 * It is an error for the 'callback' to make any library calls that could
548 * acquire locks. Most notably, this means that most system calls have to
549 * avoid going through libc. Also, this means that it is not legal to call
550 * exit() or abort().
551 * We return -1 on error and the return value of 'callback' on success.
552 */
553int TCMalloc_ListAllProcessThreads(void *parameter,
554 ListAllProcessThreadsCallBack callback, ...) {
555 char altstack_mem[ALT_STACKSIZE];
556 struct ListerParams args;
557 pid_t clone_pid;
558 int dumpable = 1, sig;
559 struct kernel_sigset_t sig_blocked, sig_old;
560 sem_t lock;
561
562 va_start(args.ap, callback);
563
564 /* If we are short on virtual memory, initializing the alternate stack
565 * might trigger a SIGSEGV. Let's do this early, before it could get us
566 * into more trouble (i.e. before signal handlers try to use the alternate
567 * stack, and before we attach to other threads).
568 */
569 memset(altstack_mem, 0, sizeof(altstack_mem));
570
571 /* Some of our cleanup functions could conceivable use more stack space.
572 * Try to touch the stack right now. This could be defeated by the compiler
573 * being too smart for it's own good, so try really hard.
574 */
575 DirtyStack(32768);
576
577 /* Make this process "dumpable". This is necessary in order to ptrace()
578 * after having called setuid().
579 */
580 dumpable = sys_prctl(PR_GET_DUMPABLE, 0);
581 if (!dumpable)
582 sys_prctl(PR_SET_DUMPABLE, 1);
583
584 /* Fill in argument block for dumper thread */
585 args.result = -1;
586 args.err = 0;
587 args.altstack_mem = altstack_mem;
588 args.parameter = parameter;
589 args.callback = callback;
590 args.lock = &lock;
591
592 /* Before cloning the thread lister, block all asynchronous signals, as we */
593 /* are not prepared to handle them. */
594 sys_sigfillset(&sig_blocked);
595 for (sig = 0; sig < sizeof(sync_signals)/sizeof(*sync_signals); sig++) {
596 sys_sigdelset(&sig_blocked, sync_signals[sig]);
597 }
598 if (sys_sigprocmask(SIG_BLOCK, &sig_blocked, &sig_old)) {
599 args.err = errno;
600 args.result = -1;
601 goto failed;
602 }
603
604 /* scope */ {
605 /* After cloning, both the parent and the child share the same instance
606 * of errno. We must make sure that at least one of these processes
607 * (in our case, the parent) uses modified syscall macros that update
608 * a local copy of errno, instead.
609 */
610 #ifdef __cplusplus
611 #define sys0_sigprocmask sys.sigprocmask
612 #define sys0_waitpid sys.waitpid
613 SysCalls sys;
614 #else
615 int my_errno;
616 #define SYS_ERRNO my_errno
617 #define SYS_INLINE inline
618 #define SYS_PREFIX 0
619 #undef SYS_LINUX_SYSCALL_SUPPORT_H
620 #include "linux_syscall_support.h"
621 #endif
622
623 /* Lock before clone so that parent can set
624 * ptrace permissions (if necessary) prior
625 * to ListerThread actually executing
626 */
627 if (sem_init(&lock, 0, 0) == 0) {
628
629 int clone_errno;
630 clone_pid = local_clone((int (*)(void *))ListerThread, &args);
631 clone_errno = errno;
632
633 sys_sigprocmask(SIG_SETMASK, &sig_old, &sig_old);
634
635 if (clone_pid >= 0) {
636#ifdef PR_SET_PTRACER
637 /* In newer versions of glibc permission must explicitly
638 * be given to allow for ptrace.
639 */
640 prctl(PR_SET_PTRACER, clone_pid, 0, 0, 0);
641#endif
642 /* Releasing the lock here allows the
643 * ListerThread to execute and ptrace us.
644 */
645 sem_post(&lock);
646 int status, rc;
647 while ((rc = sys0_waitpid(clone_pid, &status, __WALL)) < 0 &&
648 ERRNO == EINTR) {
649 /* Keep waiting */
650 }
651 if (rc < 0) {
652 args.err = ERRNO;
653 args.result = -1;
654 } else if (WIFEXITED(status)) {
655 switch (WEXITSTATUS(status)) {
656 case 0: break; /* Normal process termination */
657 case 2: args.err = EFAULT; /* Some fault (e.g. SIGSEGV) detected */
658 args.result = -1;
659 break;
660 case 3: args.err = EPERM; /* Process is already being traced */
661 args.result = -1;
662 break;
663 default:args.err = ECHILD; /* Child died unexpectedly */
664 args.result = -1;
665 break;
666 }
667 } else if (!WIFEXITED(status)) {
668 args.err = EFAULT; /* Terminated due to an unhandled signal*/
669 args.result = -1;
670 }
671 sem_destroy(&lock);
672 } else {
673 args.result = -1;
674 args.err = clone_errno;
675 }
676 } else {
677 args.result = -1;
678 args.err = errno;
679 }
680 }
681
682 /* Restore the "dumpable" state of the process */
683failed:
684 if (!dumpable)
685 sys_prctl(PR_SET_DUMPABLE, dumpable);
686
687 va_end(args.ap);
688
689 errno = args.err;
690 return args.result;
691}
692
693/* This function resumes the list of all linux threads that
694 * TCMalloc_ListAllProcessThreads pauses before giving to its callback.
695 * The function returns non-zero if at least one thread was
696 * suspended and has now been resumed.
697 */
698int TCMalloc_ResumeAllProcessThreads(int num_threads, pid_t *thread_pids) {
699 int detached_at_least_one = 0;
700 while (num_threads-- > 0) {
701 detached_at_least_one |= sys_ptrace_detach(thread_pids[num_threads]) >= 0;
702 }
703 return detached_at_least_one;
704}
705
706#ifdef __cplusplus
707}
708#endif
709#endif