Home | History | Annotate | Download | only in stdlib
      1 /*	$OpenBSD: atexit.c,v 1.14 2007/09/05 20:47:47 chl Exp $ */
      2 /*
      3  * Copyright (c) 2002 Daniel Hartmeier
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  *
     10  *    - Redistributions of source code must retain the above copyright
     11  *      notice, this list of conditions and the following disclaimer.
     12  *    - Redistributions in binary form must reproduce the above
     13  *      copyright notice, this list of conditions and the following
     14  *      disclaimer in the documentation and/or other materials provided
     15  *      with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     21  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
     23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
     24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
     25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     27  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     28  * POSSIBILITY OF SUCH DAMAGE.
     29  *
     30  */
     31 
     32 #include <sys/types.h>
     33 #include <sys/mman.h>
     34 #include <stdlib.h>
     35 #include <string.h>
     36 #include <unistd.h>
     37 #include "atexit.h"
     38 #include "thread_private.h"
     39 
     40 int __atexit_invalid = 1;
     41 struct atexit *__atexit;
     42 
     43 /*
     44  * Function pointers are stored in a linked list of pages. The list
     45  * is initially empty, and pages are allocated on demand. The first
     46  * function pointer in the first allocated page (the last one in
     47  * the linked list) is reserved for the cleanup function.
     48  *
     49  * Outside the following functions, all pages are mprotect()'ed
     50  * to prevent unintentional/malicious corruption.
     51  */
     52 
     53 /*
     54  * Register a function to be performed at exit or when a shared object
     55  * with the given dso handle is unloaded dynamically.  Also used as
     56  * the backend for atexit().  For more info on this API, see:
     57  *
     58  *	http://www.codesourcery.com/cxx-abi/abi.html#dso-dtor
     59  */
     60 int
     61 __cxa_atexit(void (*func)(void *), void *arg, void *dso)
     62 {
     63 	struct atexit *p = __atexit;
     64 	struct atexit_fn *fnp;
     65 	int pgsize = getpagesize();
     66 	int ret = -1;
     67 
     68 	if (pgsize < (int)sizeof(*p))
     69 		return (-1);
     70 	_ATEXIT_LOCK();
     71 	p = __atexit;
     72 	if (p != NULL) {
     73 		if (p->ind + 1 >= p->max)
     74 			p = NULL;
     75 		else if (mprotect(p, pgsize, PROT_READ | PROT_WRITE))
     76 			goto unlock;
     77 	}
     78 	if (p == NULL) {
     79 		p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
     80 		    MAP_ANON | MAP_PRIVATE, -1, 0);
     81 		if (p == MAP_FAILED)
     82 			goto unlock;
     83 		if (__atexit == NULL) {
     84 			memset(&p->fns[0], 0, sizeof(p->fns[0]));
     85 			p->ind = 1;
     86 		} else
     87 			p->ind = 0;
     88 		p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) /
     89 		    sizeof(p->fns[0]);
     90 		p->next = __atexit;
     91 		__atexit = p;
     92 		if (__atexit_invalid)
     93 			__atexit_invalid = 0;
     94 	}
     95 	fnp = &p->fns[p->ind++];
     96 	fnp->fn_ptr.cxa_func = func;
     97 	fnp->fn_arg = arg;
     98 	fnp->fn_dso = dso;
     99 	if (mprotect(p, pgsize, PROT_READ))
    100 		goto unlock;
    101 	ret = 0;
    102 unlock:
    103 	_ATEXIT_UNLOCK();
    104 	return (ret);
    105 }
    106 
    107 /*
    108  * Call all handlers registered with __cxa_atexit() for the shared
    109  * object owning 'dso'.
    110  * Note: if 'dso' is NULL, then all remaining handlers are called.
    111  */
    112 void
    113 __cxa_finalize(void *dso)
    114 {
    115 	struct atexit *p, *q;
    116 	struct atexit_fn fn;
    117 	int n, pgsize = getpagesize();
    118 	static int call_depth;
    119 
    120 	if (__atexit_invalid)
    121 		return;
    122 
    123 	_ATEXIT_LOCK();
    124 	call_depth++;
    125 
    126 	for (p = __atexit; p != NULL; p = p->next) {
    127 		for (n = p->ind; --n >= 0;) {
    128 			if (p->fns[n].fn_ptr.cxa_func == NULL)
    129 				continue;	/* already called */
    130 			if (dso != NULL && dso != p->fns[n].fn_dso)
    131 				continue;	/* wrong DSO */
    132 
    133 			/*
    134 			 * Mark handler as having been already called to avoid
    135 			 * dupes and loops, then call the appropriate function.
    136 			 */
    137 			fn = p->fns[n];
    138 			if (mprotect(p, pgsize, PROT_READ | PROT_WRITE) == 0) {
    139 				p->fns[n].fn_ptr.cxa_func = NULL;
    140 				mprotect(p, pgsize, PROT_READ);
    141 			}
    142 			_ATEXIT_UNLOCK();
    143 #if ANDROID
    144                         /* it looks like we should always call the function
    145                          * with an argument, even if dso is not NULL. Otherwise
    146                          * static destructors will not be called properly on
    147                          * the ARM.
    148                          */
    149                         (*fn.fn_ptr.cxa_func)(fn.fn_arg);
    150 #else /* !ANDROID */
    151 			if (dso != NULL)
    152 				(*fn.fn_ptr.cxa_func)(fn.fn_arg);
    153 			else
    154 				(*fn.fn_ptr.std_func)();
    155 #endif /* !ANDROID */
    156 			_ATEXIT_LOCK();
    157 		}
    158 	}
    159 
    160 	/*
    161 	 * If called via exit(), unmap the pages since we have now run
    162 	 * all the handlers.  We defer this until calldepth == 0 so that
    163 	 * we don't unmap things prematurely if called recursively.
    164 	 */
    165 	if (dso == NULL && --call_depth == 0) {
    166 		for (p = __atexit; p != NULL; ) {
    167 			q = p;
    168 			p = p->next;
    169 			munmap(q, pgsize);
    170 		}
    171 		__atexit = NULL;
    172 	}
    173 	_ATEXIT_UNLOCK();
    174 }
    175 
    176 /*
    177  * Register the cleanup function
    178  */
    179 void
    180 __atexit_register_cleanup(void (*func)(void))
    181 {
    182 	struct atexit *p;
    183 	int pgsize = getpagesize();
    184 
    185 	if (pgsize < (int)sizeof(*p))
    186 		return;
    187 	_ATEXIT_LOCK();
    188 	p = __atexit;
    189 	while (p != NULL && p->next != NULL)
    190 		p = p->next;
    191 	if (p == NULL) {
    192 		p = mmap(NULL, pgsize, PROT_READ | PROT_WRITE,
    193 		    MAP_ANON | MAP_PRIVATE, -1, 0);
    194 		if (p == MAP_FAILED)
    195 			goto unlock;
    196 		p->ind = 1;
    197 		p->max = (pgsize - ((char *)&p->fns[0] - (char *)p)) /
    198 		    sizeof(p->fns[0]);
    199 		p->next = NULL;
    200 		__atexit = p;
    201 		if (__atexit_invalid)
    202 			__atexit_invalid = 0;
    203 	} else {
    204 		if (mprotect(p, pgsize, PROT_READ | PROT_WRITE))
    205 			goto unlock;
    206 	}
    207 	p->fns[0].fn_ptr.std_func = func;
    208 	p->fns[0].fn_arg = NULL;
    209 	p->fns[0].fn_dso = NULL;
    210 	mprotect(p, pgsize, PROT_READ);
    211 unlock:
    212 	_ATEXIT_UNLOCK();
    213 }
    214