Home | History | Annotate | Download | only in libevent
      1 /*
      2  * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  * 3. The name of the author may not be used to endorse or promote products
     13  *    derived from this software without specific prior written permission.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 #include "event2/event-config.h"
     28 #include "evconfig-private.h"
     29 
     30 #ifndef EVENT__DISABLE_THREAD_SUPPORT
     31 
     32 #include "event2/thread.h"
     33 
     34 #include <stdlib.h>
     35 #include <string.h>
     36 
     37 #include "log-internal.h"
     38 #include "mm-internal.h"
     39 #include "util-internal.h"
     40 #include "evthread-internal.h"
     41 
     42 #ifdef EVTHREAD_EXPOSE_STRUCTS
     43 #define GLOBAL
     44 #else
     45 #define GLOBAL static
     46 #endif
     47 
     48 #ifndef EVENT__DISABLE_DEBUG_MODE
     49 extern int event_debug_created_threadable_ctx_;
     50 extern int event_debug_mode_on_;
     51 #endif
     52 
     53 /* globals */
     54 GLOBAL int evthread_lock_debugging_enabled_ = 0;
     55 GLOBAL struct evthread_lock_callbacks evthread_lock_fns_ = {
     56 	0, 0, NULL, NULL, NULL, NULL
     57 };
     58 GLOBAL unsigned long (*evthread_id_fn_)(void) = NULL;
     59 GLOBAL struct evthread_condition_callbacks evthread_cond_fns_ = {
     60 	0, NULL, NULL, NULL, NULL
     61 };
     62 
     63 /* Used for debugging */
     64 static struct evthread_lock_callbacks original_lock_fns_ = {
     65 	0, 0, NULL, NULL, NULL, NULL
     66 };
     67 static struct evthread_condition_callbacks original_cond_fns_ = {
     68 	0, NULL, NULL, NULL, NULL
     69 };
     70 
     71 void
     72 evthread_set_id_callback(unsigned long (*id_fn)(void))
     73 {
     74 	evthread_id_fn_ = id_fn;
     75 }
     76 
     77 struct evthread_lock_callbacks *evthread_get_lock_callbacks()
     78 {
     79 	return evthread_lock_debugging_enabled_
     80 	    ? &original_lock_fns_ : &evthread_lock_fns_;
     81 }
     82 struct evthread_condition_callbacks *evthread_get_condition_callbacks()
     83 {
     84 	return evthread_lock_debugging_enabled_
     85 	    ? &original_cond_fns_ : &evthread_cond_fns_;
     86 }
     87 void evthreadimpl_disable_lock_debugging_(void)
     88 {
     89 	evthread_lock_debugging_enabled_ = 0;
     90 }
     91 
     92 int
     93 evthread_set_lock_callbacks(const struct evthread_lock_callbacks *cbs)
     94 {
     95 	struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
     96 
     97 #ifndef EVENT__DISABLE_DEBUG_MODE
     98 	if (event_debug_mode_on_) {
     99 		if (event_debug_created_threadable_ctx_) {
    100 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
    101 		}
    102 	}
    103 #endif
    104 
    105 	if (!cbs) {
    106 		if (target->alloc)
    107 			event_warnx("Trying to disable lock functions after "
    108 			    "they have been set up will probaby not work.");
    109 		memset(target, 0, sizeof(evthread_lock_fns_));
    110 		return 0;
    111 	}
    112 	if (target->alloc) {
    113 		/* Uh oh; we already had locking callbacks set up.*/
    114 		if (target->lock_api_version == cbs->lock_api_version &&
    115 			target->supported_locktypes == cbs->supported_locktypes &&
    116 			target->alloc == cbs->alloc &&
    117 			target->free == cbs->free &&
    118 			target->lock == cbs->lock &&
    119 			target->unlock == cbs->unlock) {
    120 			/* no change -- allow this. */
    121 			return 0;
    122 		}
    123 		event_warnx("Can't change lock callbacks once they have been "
    124 		    "initialized.");
    125 		return -1;
    126 	}
    127 	if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
    128 		memcpy(target, cbs, sizeof(evthread_lock_fns_));
    129 		return event_global_setup_locks_(1);
    130 	} else {
    131 		return -1;
    132 	}
    133 }
    134 
    135 int
    136 evthread_set_condition_callbacks(const struct evthread_condition_callbacks *cbs)
    137 {
    138 	struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
    139 
    140 #ifndef EVENT__DISABLE_DEBUG_MODE
    141 	if (event_debug_mode_on_) {
    142 		if (event_debug_created_threadable_ctx_) {
    143 		    event_errx(1, "evthread initialization must be called BEFORE anything else!");
    144 		}
    145 	}
    146 #endif
    147 
    148 	if (!cbs) {
    149 		if (target->alloc_condition)
    150 			event_warnx("Trying to disable condition functions "
    151 			    "after they have been set up will probaby not "
    152 			    "work.");
    153 		memset(target, 0, sizeof(evthread_cond_fns_));
    154 		return 0;
    155 	}
    156 	if (target->alloc_condition) {
    157 		/* Uh oh; we already had condition callbacks set up.*/
    158 		if (target->condition_api_version == cbs->condition_api_version &&
    159 			target->alloc_condition == cbs->alloc_condition &&
    160 			target->free_condition == cbs->free_condition &&
    161 			target->signal_condition == cbs->signal_condition &&
    162 			target->wait_condition == cbs->wait_condition) {
    163 			/* no change -- allow this. */
    164 			return 0;
    165 		}
    166 		event_warnx("Can't change condition callbacks once they "
    167 		    "have been initialized.");
    168 		return -1;
    169 	}
    170 	if (cbs->alloc_condition && cbs->free_condition &&
    171 	    cbs->signal_condition && cbs->wait_condition) {
    172 		memcpy(target, cbs, sizeof(evthread_cond_fns_));
    173 	}
    174 	if (evthread_lock_debugging_enabled_) {
    175 		evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
    176 		evthread_cond_fns_.free_condition = cbs->free_condition;
    177 		evthread_cond_fns_.signal_condition = cbs->signal_condition;
    178 	}
    179 	return 0;
    180 }
    181 
    182 #define DEBUG_LOCK_SIG	0xdeb0b10c
    183 
    184 struct debug_lock {
    185 	unsigned signature;
    186 	unsigned locktype;
    187 	unsigned long held_by;
    188 	/* XXXX if we ever use read-write locks, we will need a separate
    189 	 * lock to protect count. */
    190 	int count;
    191 	void *lock;
    192 };
    193 
    194 static void *
    195 debug_lock_alloc(unsigned locktype)
    196 {
    197 	struct debug_lock *result = mm_malloc(sizeof(struct debug_lock));
    198 	if (!result)
    199 		return NULL;
    200 	if (original_lock_fns_.alloc) {
    201 		if (!(result->lock = original_lock_fns_.alloc(
    202 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE))) {
    203 			mm_free(result);
    204 			return NULL;
    205 		}
    206 	} else {
    207 		result->lock = NULL;
    208 	}
    209 	result->signature = DEBUG_LOCK_SIG;
    210 	result->locktype = locktype;
    211 	result->count = 0;
    212 	result->held_by = 0;
    213 	return result;
    214 }
    215 
    216 static void
    217 debug_lock_free(void *lock_, unsigned locktype)
    218 {
    219 	struct debug_lock *lock = lock_;
    220 	EVUTIL_ASSERT(lock->count == 0);
    221 	EVUTIL_ASSERT(locktype == lock->locktype);
    222 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
    223 	if (original_lock_fns_.free) {
    224 		original_lock_fns_.free(lock->lock,
    225 		    lock->locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
    226 	}
    227 	lock->lock = NULL;
    228 	lock->count = -100;
    229 	lock->signature = 0x12300fda;
    230 	mm_free(lock);
    231 }
    232 
    233 static void
    234 evthread_debug_lock_mark_locked(unsigned mode, struct debug_lock *lock)
    235 {
    236 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
    237 	++lock->count;
    238 	if (!(lock->locktype & EVTHREAD_LOCKTYPE_RECURSIVE))
    239 		EVUTIL_ASSERT(lock->count == 1);
    240 	if (evthread_id_fn_) {
    241 		unsigned long me;
    242 		me = evthread_id_fn_();
    243 		if (lock->count > 1)
    244 			EVUTIL_ASSERT(lock->held_by == me);
    245 		lock->held_by = me;
    246 	}
    247 }
    248 
    249 static int
    250 debug_lock_lock(unsigned mode, void *lock_)
    251 {
    252 	struct debug_lock *lock = lock_;
    253 	int res = 0;
    254 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
    255 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
    256 	else
    257 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
    258 	if (original_lock_fns_.lock)
    259 		res = original_lock_fns_.lock(mode, lock->lock);
    260 	if (!res) {
    261 		evthread_debug_lock_mark_locked(mode, lock);
    262 	}
    263 	return res;
    264 }
    265 
    266 static void
    267 evthread_debug_lock_mark_unlocked(unsigned mode, struct debug_lock *lock)
    268 {
    269 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
    270 	if (lock->locktype & EVTHREAD_LOCKTYPE_READWRITE)
    271 		EVUTIL_ASSERT(mode & (EVTHREAD_READ|EVTHREAD_WRITE));
    272 	else
    273 		EVUTIL_ASSERT((mode & (EVTHREAD_READ|EVTHREAD_WRITE)) == 0);
    274 	if (evthread_id_fn_) {
    275 		unsigned long me;
    276 		me = evthread_id_fn_();
    277 		EVUTIL_ASSERT(lock->held_by == me);
    278 		if (lock->count == 1)
    279 			lock->held_by = 0;
    280 	}
    281 	--lock->count;
    282 	EVUTIL_ASSERT(lock->count >= 0);
    283 }
    284 
    285 static int
    286 debug_lock_unlock(unsigned mode, void *lock_)
    287 {
    288 	struct debug_lock *lock = lock_;
    289 	int res = 0;
    290 	evthread_debug_lock_mark_unlocked(mode, lock);
    291 	if (original_lock_fns_.unlock)
    292 		res = original_lock_fns_.unlock(mode, lock->lock);
    293 	return res;
    294 }
    295 
    296 static int
    297 debug_cond_wait(void *cond_, void *lock_, const struct timeval *tv)
    298 {
    299 	int r;
    300 	struct debug_lock *lock = lock_;
    301 	EVUTIL_ASSERT(lock);
    302 	EVUTIL_ASSERT(DEBUG_LOCK_SIG == lock->signature);
    303 	EVLOCK_ASSERT_LOCKED(lock_);
    304 	evthread_debug_lock_mark_unlocked(0, lock);
    305 	r = original_cond_fns_.wait_condition(cond_, lock->lock, tv);
    306 	evthread_debug_lock_mark_locked(0, lock);
    307 	return r;
    308 }
    309 
    310 /* misspelled version for backward compatibility */
    311 void
    312 evthread_enable_lock_debuging(void)
    313 {
    314 	evthread_enable_lock_debugging();
    315 }
    316 
    317 void
    318 evthread_enable_lock_debugging(void)
    319 {
    320 	struct evthread_lock_callbacks cbs = {
    321 		EVTHREAD_LOCK_API_VERSION,
    322 		EVTHREAD_LOCKTYPE_RECURSIVE,
    323 		debug_lock_alloc,
    324 		debug_lock_free,
    325 		debug_lock_lock,
    326 		debug_lock_unlock
    327 	};
    328 	if (evthread_lock_debugging_enabled_)
    329 		return;
    330 	memcpy(&original_lock_fns_, &evthread_lock_fns_,
    331 	    sizeof(struct evthread_lock_callbacks));
    332 	memcpy(&evthread_lock_fns_, &cbs,
    333 	    sizeof(struct evthread_lock_callbacks));
    334 
    335 	memcpy(&original_cond_fns_, &evthread_cond_fns_,
    336 	    sizeof(struct evthread_condition_callbacks));
    337 	evthread_cond_fns_.wait_condition = debug_cond_wait;
    338 	evthread_lock_debugging_enabled_ = 1;
    339 
    340 	/* XXX return value should get checked. */
    341 	event_global_setup_locks_(0);
    342 }
    343 
    344 int
    345 evthread_is_debug_lock_held_(void *lock_)
    346 {
    347 	struct debug_lock *lock = lock_;
    348 	if (! lock->count)
    349 		return 0;
    350 	if (evthread_id_fn_) {
    351 		unsigned long me = evthread_id_fn_();
    352 		if (lock->held_by != me)
    353 			return 0;
    354 	}
    355 	return 1;
    356 }
    357 
    358 void *
    359 evthread_debug_get_real_lock_(void *lock_)
    360 {
    361 	struct debug_lock *lock = lock_;
    362 	return lock->lock;
    363 }
    364 
    365 void *
    366 evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
    367 {
    368 	/* there are four cases here:
    369 	   1) we're turning on debugging; locking is not on.
    370 	   2) we're turning on debugging; locking is on.
    371 	   3) we're turning on locking; debugging is not on.
    372 	   4) we're turning on locking; debugging is on. */
    373 
    374 	if (!enable_locks && original_lock_fns_.alloc == NULL) {
    375 		/* Case 1: allocate a debug lock. */
    376 		EVUTIL_ASSERT(lock_ == NULL);
    377 		return debug_lock_alloc(locktype);
    378 	} else if (!enable_locks && original_lock_fns_.alloc != NULL) {
    379 		/* Case 2: wrap the lock in a debug lock. */
    380 		struct debug_lock *lock;
    381 		EVUTIL_ASSERT(lock_ != NULL);
    382 
    383 		if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) {
    384 			/* We can't wrap it: We need a recursive lock */
    385 			original_lock_fns_.free(lock_, locktype);
    386 			return debug_lock_alloc(locktype);
    387 		}
    388 		lock = mm_malloc(sizeof(struct debug_lock));
    389 		if (!lock) {
    390 			original_lock_fns_.free(lock_, locktype);
    391 			return NULL;
    392 		}
    393 		lock->lock = lock_;
    394 		lock->locktype = locktype;
    395 		lock->count = 0;
    396 		lock->held_by = 0;
    397 		return lock;
    398 	} else if (enable_locks && ! evthread_lock_debugging_enabled_) {
    399 		/* Case 3: allocate a regular lock */
    400 		EVUTIL_ASSERT(lock_ == NULL);
    401 		return evthread_lock_fns_.alloc(locktype);
    402 	} else {
    403 		/* Case 4: Fill in a debug lock with a real lock */
    404 		struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
    405 		EVUTIL_ASSERT(enable_locks &&
    406 		              evthread_lock_debugging_enabled_);
    407 		EVUTIL_ASSERT(lock->locktype == locktype);
    408 		if (!lock->lock) {
    409 			lock->lock = original_lock_fns_.alloc(
    410 				locktype|EVTHREAD_LOCKTYPE_RECURSIVE);
    411 			if (!lock->lock) {
    412 				lock->count = -200;
    413 				mm_free(lock);
    414 				return NULL;
    415 			}
    416 		}
    417 		return lock;
    418 	}
    419 }
    420 
    421 
    422 #ifndef EVTHREAD_EXPOSE_STRUCTS
    423 unsigned long
    424 evthreadimpl_get_id_()
    425 {
    426 	return evthread_id_fn_ ? evthread_id_fn_() : 1;
    427 }
    428 void *
    429 evthreadimpl_lock_alloc_(unsigned locktype)
    430 {
    431 #ifndef EVENT__DISABLE_DEBUG_MODE
    432 	if (event_debug_mode_on_) {
    433 		event_debug_created_threadable_ctx_ = 1;
    434 	}
    435 #endif
    436 
    437 	return evthread_lock_fns_.alloc ?
    438 	    evthread_lock_fns_.alloc(locktype) : NULL;
    439 }
    440 void
    441 evthreadimpl_lock_free_(void *lock, unsigned locktype)
    442 {
    443 	if (evthread_lock_fns_.free)
    444 		evthread_lock_fns_.free(lock, locktype);
    445 }
    446 int
    447 evthreadimpl_lock_lock_(unsigned mode, void *lock)
    448 {
    449 	if (evthread_lock_fns_.lock)
    450 		return evthread_lock_fns_.lock(mode, lock);
    451 	else
    452 		return 0;
    453 }
    454 int
    455 evthreadimpl_lock_unlock_(unsigned mode, void *lock)
    456 {
    457 	if (evthread_lock_fns_.unlock)
    458 		return evthread_lock_fns_.unlock(mode, lock);
    459 	else
    460 		return 0;
    461 }
    462 void *
    463 evthreadimpl_cond_alloc_(unsigned condtype)
    464 {
    465 #ifndef EVENT__DISABLE_DEBUG_MODE
    466 	if (event_debug_mode_on_) {
    467 		event_debug_created_threadable_ctx_ = 1;
    468 	}
    469 #endif
    470 
    471 	return evthread_cond_fns_.alloc_condition ?
    472 	    evthread_cond_fns_.alloc_condition(condtype) : NULL;
    473 }
    474 void
    475 evthreadimpl_cond_free_(void *cond)
    476 {
    477 	if (evthread_cond_fns_.free_condition)
    478 		evthread_cond_fns_.free_condition(cond);
    479 }
    480 int
    481 evthreadimpl_cond_signal_(void *cond, int broadcast)
    482 {
    483 	if (evthread_cond_fns_.signal_condition)
    484 		return evthread_cond_fns_.signal_condition(cond, broadcast);
    485 	else
    486 		return 0;
    487 }
    488 int
    489 evthreadimpl_cond_wait_(void *cond, void *lock, const struct timeval *tv)
    490 {
    491 	if (evthread_cond_fns_.wait_condition)
    492 		return evthread_cond_fns_.wait_condition(cond, lock, tv);
    493 	else
    494 		return 0;
    495 }
    496 int
    497 evthreadimpl_is_lock_debugging_enabled_(void)
    498 {
    499 	return evthread_lock_debugging_enabled_;
    500 }
    501 
    502 int
    503 evthreadimpl_locking_enabled_(void)
    504 {
    505 	return evthread_lock_fns_.lock != NULL;
    506 }
    507 #endif
    508 
    509 #endif
    510