Home | History | Annotate | Download | only in libevent
      1 /*
      2  * Copyright (c) 2002-2007 Niels Provos <provos (at) citi.umich.edu>
      3  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
      4  *
      5  * Redistribution and use in source and binary forms, with or without
      6  * modification, are permitted provided that the following conditions
      7  * are met:
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. The name of the author may not be used to endorse or promote products
     14  *    derived from this software without specific prior written permission.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26  */
     27 
     28 #include "event2/event-config.h"
     29 
     30 #ifdef WIN32
     31 #include <winsock2.h>
     32 #include <windows.h>
     33 #include <io.h>
     34 #endif
     35 
     36 #ifdef _EVENT_HAVE_VASPRINTF
     37 /* If we have vasprintf, we need to define this before we include stdio.h. */
     38 #define _GNU_SOURCE
     39 #endif
     40 
     41 #include <sys/types.h>
     42 
     43 #ifdef _EVENT_HAVE_SYS_TIME_H
     44 #include <sys/time.h>
     45 #endif
     46 
     47 #ifdef _EVENT_HAVE_SYS_SOCKET_H
     48 #include <sys/socket.h>
     49 #endif
     50 
     51 #ifdef _EVENT_HAVE_SYS_UIO_H
     52 #include <sys/uio.h>
     53 #endif
     54 
     55 #ifdef _EVENT_HAVE_SYS_IOCTL_H
     56 #include <sys/ioctl.h>
     57 #endif
     58 
     59 #ifdef _EVENT_HAVE_SYS_MMAN_H
     60 #include <sys/mman.h>
     61 #endif
     62 
     63 #ifdef _EVENT_HAVE_SYS_SENDFILE_H
     64 #include <sys/sendfile.h>
     65 #endif
     66 
     67 #include <errno.h>
     68 #include <stdio.h>
     69 #include <stdlib.h>
     70 #include <string.h>
     71 #ifdef _EVENT_HAVE_STDARG_H
     72 #include <stdarg.h>
     73 #endif
     74 #ifdef _EVENT_HAVE_UNISTD_H
     75 #include <unistd.h>
     76 #endif
     77 #include <limits.h>
     78 
     79 #include "event2/event.h"
     80 #include "event2/buffer.h"
     81 #include "event2/buffer_compat.h"
     82 #include "event2/bufferevent.h"
     83 #include "event2/bufferevent_compat.h"
     84 #include "event2/bufferevent_struct.h"
     85 #include "event2/thread.h"
     86 #include "event2/event-config.h"
     87 #include "event-internal.h"
     88 #include "log-internal.h"
     89 #include "mm-internal.h"
     90 #include "util-internal.h"
     91 #include "evthread-internal.h"
     92 #include "evbuffer-internal.h"
     93 #include "bufferevent-internal.h"
     94 
     95 /* some systems do not have MAP_FAILED */
     96 #ifndef MAP_FAILED
     97 #define MAP_FAILED	((void *)-1)
     98 #endif
     99 
    100 /* send file support */
    101 #if defined(_EVENT_HAVE_SYS_SENDFILE_H) && defined(_EVENT_HAVE_SENDFILE) && defined(__linux__)
    102 #define USE_SENDFILE		1
    103 #define SENDFILE_IS_LINUX	1
    104 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__FreeBSD__)
    105 #define USE_SENDFILE		1
    106 #define SENDFILE_IS_FREEBSD	1
    107 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__APPLE__)
    108 #define USE_SENDFILE		1
    109 #define SENDFILE_IS_MACOSX	1
    110 #elif defined(_EVENT_HAVE_SENDFILE) && defined(__sun__) && defined(__svr4__)
    111 #define USE_SENDFILE		1
    112 #define SENDFILE_IS_SOLARIS	1
    113 #endif
    114 
    115 #ifdef USE_SENDFILE
    116 static int use_sendfile = 1;
    117 #endif
    118 #ifdef _EVENT_HAVE_MMAP
    119 static int use_mmap = 1;
    120 #endif
    121 
    122 
    123 /* Mask of user-selectable callback flags. */
    124 #define EVBUFFER_CB_USER_FLAGS	    0xffff
    125 /* Mask of all internal-use-only flags. */
    126 #define EVBUFFER_CB_INTERNAL_FLAGS  0xffff0000
    127 
    128 /* Flag set if the callback is using the cb_obsolete function pointer  */
    129 #define EVBUFFER_CB_OBSOLETE	       0x00040000
    130 
    131 /* evbuffer_chain support */
    132 #define CHAIN_SPACE_PTR(ch) ((ch)->buffer + (ch)->misalign + (ch)->off)
    133 #define CHAIN_SPACE_LEN(ch) ((ch)->flags & EVBUFFER_IMMUTABLE ? \
    134 	    0 : (ch)->buffer_len - ((ch)->misalign + (ch)->off))
    135 
    136 #define CHAIN_PINNED(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_ANY) != 0)
    137 #define CHAIN_PINNED_R(ch)  (((ch)->flags & EVBUFFER_MEM_PINNED_R) != 0)
    138 
    139 static void evbuffer_chain_align(struct evbuffer_chain *chain);
    140 static int evbuffer_chain_should_realign(struct evbuffer_chain *chain,
    141     size_t datalen);
    142 static void evbuffer_deferred_callback(struct deferred_cb *cb, void *arg);
    143 static int evbuffer_ptr_memcmp(const struct evbuffer *buf,
    144     const struct evbuffer_ptr *pos, const char *mem, size_t len);
    145 static struct evbuffer_chain *evbuffer_expand_singlechain(struct evbuffer *buf,
    146     size_t datlen);
    147 
    148 #ifdef WIN32
    149 static int evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd,
    150     ev_ssize_t howmuch);
    151 #else
    152 #define evbuffer_readfile evbuffer_read
    153 #endif
    154 
    155 static struct evbuffer_chain *
    156 evbuffer_chain_new(size_t size)
    157 {
    158 	struct evbuffer_chain *chain;
    159 	size_t to_alloc;
    160 
    161 	if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE)
    162 		return (NULL);
    163 
    164 	size += EVBUFFER_CHAIN_SIZE;
    165 
    166 	/* get the next largest memory that can hold the buffer */
    167 	if (size < EVBUFFER_CHAIN_MAX / 2) {
    168 		to_alloc = MIN_BUFFER_SIZE;
    169 		while (to_alloc < size) {
    170 			to_alloc <<= 1;
    171 		}
    172 	} else {
    173 		to_alloc = size;
    174 	}
    175 
    176 	/* we get everything in one chunk */
    177 	if ((chain = mm_malloc(to_alloc)) == NULL)
    178 		return (NULL);
    179 
    180 	memset(chain, 0, EVBUFFER_CHAIN_SIZE);
    181 
    182 	chain->buffer_len = to_alloc - EVBUFFER_CHAIN_SIZE;
    183 
    184 	/* this way we can manipulate the buffer to different addresses,
    185 	 * which is required for mmap for example.
    186 	 */
    187 	chain->buffer = EVBUFFER_CHAIN_EXTRA(u_char, chain);
    188 
    189 	return (chain);
    190 }
    191 
    192 static inline void
    193 evbuffer_chain_free(struct evbuffer_chain *chain)
    194 {
    195 	if (CHAIN_PINNED(chain)) {
    196 		chain->flags |= EVBUFFER_DANGLING;
    197 		return;
    198 	}
    199 	if (chain->flags & (EVBUFFER_MMAP|EVBUFFER_SENDFILE|
    200 		EVBUFFER_REFERENCE)) {
    201 		if (chain->flags & EVBUFFER_REFERENCE) {
    202 			struct evbuffer_chain_reference *info =
    203 			    EVBUFFER_CHAIN_EXTRA(
    204 				    struct evbuffer_chain_reference,
    205 				    chain);
    206 			if (info->cleanupfn)
    207 				(*info->cleanupfn)(chain->buffer,
    208 				    chain->buffer_len,
    209 				    info->extra);
    210 		}
    211 #ifdef _EVENT_HAVE_MMAP
    212 		if (chain->flags & EVBUFFER_MMAP) {
    213 			struct evbuffer_chain_fd *info =
    214 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
    215 				chain);
    216 			if (munmap(chain->buffer, chain->buffer_len) == -1)
    217 				event_warn("%s: munmap failed", __func__);
    218 			if (close(info->fd) == -1)
    219 				event_warn("%s: close(%d) failed",
    220 				    __func__, info->fd);
    221 		}
    222 #endif
    223 #ifdef USE_SENDFILE
    224 		if (chain->flags & EVBUFFER_SENDFILE) {
    225 			struct evbuffer_chain_fd *info =
    226 			    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd,
    227 				chain);
    228 			if (close(info->fd) == -1)
    229 				event_warn("%s: close(%d) failed",
    230 				    __func__, info->fd);
    231 		}
    232 #endif
    233 	}
    234 
    235 	mm_free(chain);
    236 }
    237 
    238 static void
    239 evbuffer_free_all_chains(struct evbuffer_chain *chain)
    240 {
    241 	struct evbuffer_chain *next;
    242 	for (; chain; chain = next) {
    243 		next = chain->next;
    244 		evbuffer_chain_free(chain);
    245 	}
    246 }
    247 
    248 #ifndef NDEBUG
    249 static int
    250 evbuffer_chains_all_empty(struct evbuffer_chain *chain)
    251 {
    252 	for (; chain; chain = chain->next) {
    253 		if (chain->off)
    254 			return 0;
    255 	}
    256 	return 1;
    257 }
    258 #else
    259 /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
    260 "unused variable" warnings. */
    261 static inline int evbuffer_chains_all_empty(struct evbuffer_chain *chain) {
    262 	return 1;
    263 }
    264 #endif
    265 
    266 /* Free all trailing chains in 'buf' that are neither pinned nor empty, prior
    267  * to replacing them all with a new chain.  Return a pointer to the place
    268  * where the new chain will go.
    269  *
    270  * Internal; requires lock.  The caller must fix up buf->last and buf->first
    271  * as needed; they might have been freed.
    272  */
    273 static struct evbuffer_chain **
    274 evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
    275 {
    276 	struct evbuffer_chain **ch = buf->last_with_datap;
    277 	/* Find the first victim chain.  It might be *last_with_datap */
    278 	while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
    279 		ch = &(*ch)->next;
    280 	if (*ch) {
    281 		EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
    282 		evbuffer_free_all_chains(*ch);
    283 		*ch = NULL;
    284 	}
    285 	return ch;
    286 }
    287 
    288 /* Add a single chain 'chain' to the end of 'buf', freeing trailing empty
    289  * chains as necessary.  Requires lock.  Does not schedule callbacks.
    290  */
    291 static void
    292 evbuffer_chain_insert(struct evbuffer *buf,
    293     struct evbuffer_chain *chain)
    294 {
    295 	ASSERT_EVBUFFER_LOCKED(buf);
    296 	if (*buf->last_with_datap == NULL) {
    297 		/* There are no chains data on the buffer at all. */
    298 		EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
    299 		EVUTIL_ASSERT(buf->first == NULL);
    300 		buf->first = buf->last = chain;
    301 	} else {
    302 		struct evbuffer_chain **ch = buf->last_with_datap;
    303 		/* Find the first victim chain.  It might be *last_with_datap */
    304 		while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
    305 			ch = &(*ch)->next;
    306 		if (*ch == NULL) {
    307 			/* There is no victim; just append this new chain. */
    308 			buf->last->next = chain;
    309 			if (chain->off)
    310 				buf->last_with_datap = &buf->last->next;
    311 		} else {
    312 			/* Replace all victim chains with this chain. */
    313 			EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
    314 			evbuffer_free_all_chains(*ch);
    315 			*ch = chain;
    316 		}
    317 		buf->last = chain;
    318 	}
    319 	buf->total_len += chain->off;
    320 }
    321 
    322 static inline struct evbuffer_chain *
    323 evbuffer_chain_insert_new(struct evbuffer *buf, size_t datlen)
    324 {
    325 	struct evbuffer_chain *chain;
    326 	if ((chain = evbuffer_chain_new(datlen)) == NULL)
    327 		return NULL;
    328 	evbuffer_chain_insert(buf, chain);
    329 	return chain;
    330 }
    331 
    332 void
    333 _evbuffer_chain_pin(struct evbuffer_chain *chain, unsigned flag)
    334 {
    335 	EVUTIL_ASSERT((chain->flags & flag) == 0);
    336 	chain->flags |= flag;
    337 }
    338 
    339 void
    340 _evbuffer_chain_unpin(struct evbuffer_chain *chain, unsigned flag)
    341 {
    342 	EVUTIL_ASSERT((chain->flags & flag) != 0);
    343 	chain->flags &= ~flag;
    344 	if (chain->flags & EVBUFFER_DANGLING)
    345 		evbuffer_chain_free(chain);
    346 }
    347 
    348 struct evbuffer *
    349 evbuffer_new(void)
    350 {
    351 	struct evbuffer *buffer;
    352 
    353 	buffer = mm_calloc(1, sizeof(struct evbuffer));
    354 	if (buffer == NULL)
    355 		return (NULL);
    356 
    357 	TAILQ_INIT(&buffer->callbacks);
    358 	buffer->refcnt = 1;
    359 	buffer->last_with_datap = &buffer->first;
    360 
    361 	return (buffer);
    362 }
    363 
    364 int
    365 evbuffer_set_flags(struct evbuffer *buf, ev_uint64_t flags)
    366 {
    367 	EVBUFFER_LOCK(buf);
    368 	buf->flags |= (ev_uint32_t)flags;
    369 	EVBUFFER_UNLOCK(buf);
    370 	return 0;
    371 }
    372 
    373 int
    374 evbuffer_clear_flags(struct evbuffer *buf, ev_uint64_t flags)
    375 {
    376 	EVBUFFER_LOCK(buf);
    377 	buf->flags &= ~(ev_uint32_t)flags;
    378 	EVBUFFER_UNLOCK(buf);
    379 	return 0;
    380 }
    381 
    382 void
    383 _evbuffer_incref(struct evbuffer *buf)
    384 {
    385 	EVBUFFER_LOCK(buf);
    386 	++buf->refcnt;
    387 	EVBUFFER_UNLOCK(buf);
    388 }
    389 
    390 void
    391 _evbuffer_incref_and_lock(struct evbuffer *buf)
    392 {
    393 	EVBUFFER_LOCK(buf);
    394 	++buf->refcnt;
    395 }
    396 
    397 int
    398 evbuffer_defer_callbacks(struct evbuffer *buffer, struct event_base *base)
    399 {
    400 	EVBUFFER_LOCK(buffer);
    401 	buffer->cb_queue = event_base_get_deferred_cb_queue(base);
    402 	buffer->deferred_cbs = 1;
    403 	event_deferred_cb_init(&buffer->deferred,
    404 	    evbuffer_deferred_callback, buffer);
    405 	EVBUFFER_UNLOCK(buffer);
    406 	return 0;
    407 }
    408 
    409 int
    410 evbuffer_enable_locking(struct evbuffer *buf, void *lock)
    411 {
    412 #ifdef _EVENT_DISABLE_THREAD_SUPPORT
    413 	return -1;
    414 #else
    415 	if (buf->lock)
    416 		return -1;
    417 
    418 	if (!lock) {
    419 		EVTHREAD_ALLOC_LOCK(lock, EVTHREAD_LOCKTYPE_RECURSIVE);
    420 		if (!lock)
    421 			return -1;
    422 		buf->lock = lock;
    423 		buf->own_lock = 1;
    424 	} else {
    425 		buf->lock = lock;
    426 		buf->own_lock = 0;
    427 	}
    428 
    429 	return 0;
    430 #endif
    431 }
    432 
    433 void
    434 evbuffer_set_parent(struct evbuffer *buf, struct bufferevent *bev)
    435 {
    436 	EVBUFFER_LOCK(buf);
    437 	buf->parent = bev;
    438 	EVBUFFER_UNLOCK(buf);
    439 }
    440 
    441 static void
    442 evbuffer_run_callbacks(struct evbuffer *buffer, int running_deferred)
    443 {
    444 	struct evbuffer_cb_entry *cbent, *next;
    445 	struct evbuffer_cb_info info;
    446 	size_t new_size;
    447 	ev_uint32_t mask, masked_val;
    448 	int clear = 1;
    449 
    450 	if (running_deferred) {
    451 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
    452 		masked_val = EVBUFFER_CB_ENABLED;
    453 	} else if (buffer->deferred_cbs) {
    454 		mask = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
    455 		masked_val = EVBUFFER_CB_NODEFER|EVBUFFER_CB_ENABLED;
    456 		/* Don't zero-out n_add/n_del, since the deferred callbacks
    457 		   will want to see them. */
    458 		clear = 0;
    459 	} else {
    460 		mask = EVBUFFER_CB_ENABLED;
    461 		masked_val = EVBUFFER_CB_ENABLED;
    462 	}
    463 
    464 	ASSERT_EVBUFFER_LOCKED(buffer);
    465 
    466 	if (TAILQ_EMPTY(&buffer->callbacks)) {
    467 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
    468 		return;
    469 	}
    470 	if (buffer->n_add_for_cb == 0 && buffer->n_del_for_cb == 0)
    471 		return;
    472 
    473 	new_size = buffer->total_len;
    474 	info.orig_size = new_size + buffer->n_del_for_cb - buffer->n_add_for_cb;
    475 	info.n_added = buffer->n_add_for_cb;
    476 	info.n_deleted = buffer->n_del_for_cb;
    477 	if (clear) {
    478 		buffer->n_add_for_cb = 0;
    479 		buffer->n_del_for_cb = 0;
    480 	}
    481 	for (cbent = TAILQ_FIRST(&buffer->callbacks);
    482 	     cbent != TAILQ_END(&buffer->callbacks);
    483 	     cbent = next) {
    484 		/* Get the 'next' pointer now in case this callback decides
    485 		 * to remove itself or something. */
    486 		next = TAILQ_NEXT(cbent, next);
    487 
    488 		if ((cbent->flags & mask) != masked_val)
    489 			continue;
    490 
    491 		if ((cbent->flags & EVBUFFER_CB_OBSOLETE))
    492 			cbent->cb.cb_obsolete(buffer,
    493 			    info.orig_size, new_size, cbent->cbarg);
    494 		else
    495 			cbent->cb.cb_func(buffer, &info, cbent->cbarg);
    496 	}
    497 }
    498 
    499 void
    500 evbuffer_invoke_callbacks(struct evbuffer *buffer)
    501 {
    502 	if (TAILQ_EMPTY(&buffer->callbacks)) {
    503 		buffer->n_add_for_cb = buffer->n_del_for_cb = 0;
    504 		return;
    505 	}
    506 
    507 	if (buffer->deferred_cbs) {
    508 		if (buffer->deferred.queued)
    509 			return;
    510 		_evbuffer_incref_and_lock(buffer);
    511 		if (buffer->parent)
    512 			bufferevent_incref(buffer->parent);
    513 		EVBUFFER_UNLOCK(buffer);
    514 		event_deferred_cb_schedule(buffer->cb_queue, &buffer->deferred);
    515 	}
    516 
    517 	evbuffer_run_callbacks(buffer, 0);
    518 }
    519 
    520 static void
    521 evbuffer_deferred_callback(struct deferred_cb *cb, void *arg)
    522 {
    523 	struct bufferevent *parent = NULL;
    524 	struct evbuffer *buffer = arg;
    525 
    526 	/* XXXX It would be better to run these callbacks without holding the
    527 	 * lock */
    528 	EVBUFFER_LOCK(buffer);
    529 	parent = buffer->parent;
    530 	evbuffer_run_callbacks(buffer, 1);
    531 	_evbuffer_decref_and_unlock(buffer);
    532 	if (parent)
    533 		bufferevent_decref(parent);
    534 }
    535 
    536 static void
    537 evbuffer_remove_all_callbacks(struct evbuffer *buffer)
    538 {
    539 	struct evbuffer_cb_entry *cbent;
    540 
    541 	while ((cbent = TAILQ_FIRST(&buffer->callbacks))) {
    542 	    TAILQ_REMOVE(&buffer->callbacks, cbent, next);
    543 	    mm_free(cbent);
    544 	}
    545 }
    546 
    547 void
    548 _evbuffer_decref_and_unlock(struct evbuffer *buffer)
    549 {
    550 	struct evbuffer_chain *chain, *next;
    551 	ASSERT_EVBUFFER_LOCKED(buffer);
    552 
    553 	EVUTIL_ASSERT(buffer->refcnt > 0);
    554 
    555 	if (--buffer->refcnt > 0) {
    556 		EVBUFFER_UNLOCK(buffer);
    557 		return;
    558 	}
    559 
    560 	for (chain = buffer->first; chain != NULL; chain = next) {
    561 		next = chain->next;
    562 		evbuffer_chain_free(chain);
    563 	}
    564 	evbuffer_remove_all_callbacks(buffer);
    565 	if (buffer->deferred_cbs)
    566 		event_deferred_cb_cancel(buffer->cb_queue, &buffer->deferred);
    567 
    568 	EVBUFFER_UNLOCK(buffer);
    569 	if (buffer->own_lock)
    570 		EVTHREAD_FREE_LOCK(buffer->lock, EVTHREAD_LOCKTYPE_RECURSIVE);
    571 	mm_free(buffer);
    572 }
    573 
    574 void
    575 evbuffer_free(struct evbuffer *buffer)
    576 {
    577 	EVBUFFER_LOCK(buffer);
    578 	_evbuffer_decref_and_unlock(buffer);
    579 }
    580 
    581 void
    582 evbuffer_lock(struct evbuffer *buf)
    583 {
    584 	EVBUFFER_LOCK(buf);
    585 }
    586 
    587 void
    588 evbuffer_unlock(struct evbuffer *buf)
    589 {
    590 	EVBUFFER_UNLOCK(buf);
    591 }
    592 
    593 size_t
    594 evbuffer_get_length(const struct evbuffer *buffer)
    595 {
    596 	size_t result;
    597 
    598 	EVBUFFER_LOCK(buffer);
    599 
    600 	result = (buffer->total_len);
    601 
    602 	EVBUFFER_UNLOCK(buffer);
    603 
    604 	return result;
    605 }
    606 
    607 size_t
    608 evbuffer_get_contiguous_space(const struct evbuffer *buf)
    609 {
    610 	struct evbuffer_chain *chain;
    611 	size_t result;
    612 
    613 	EVBUFFER_LOCK(buf);
    614 	chain = buf->first;
    615 	result = (chain != NULL ? chain->off : 0);
    616 	EVBUFFER_UNLOCK(buf);
    617 
    618 	return result;
    619 }
    620 
    621 int
    622 evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size,
    623     struct evbuffer_iovec *vec, int n_vecs)
    624 {
    625 	struct evbuffer_chain *chain, **chainp;
    626 	int n = -1;
    627 
    628 	EVBUFFER_LOCK(buf);
    629 	if (buf->freeze_end)
    630 		goto done;
    631 	if (n_vecs < 1)
    632 		goto done;
    633 	if (n_vecs == 1) {
    634 		if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL)
    635 			goto done;
    636 
    637 		vec[0].iov_base = CHAIN_SPACE_PTR(chain);
    638 		vec[0].iov_len = (size_t) CHAIN_SPACE_LEN(chain);
    639 		EVUTIL_ASSERT(size<0 || (size_t)vec[0].iov_len >= (size_t)size);
    640 		n = 1;
    641 	} else {
    642 		if (_evbuffer_expand_fast(buf, size, n_vecs)<0)
    643 			goto done;
    644 		n = _evbuffer_read_setup_vecs(buf, size, vec, n_vecs,
    645 				&chainp, 0);
    646 	}
    647 
    648 done:
    649 	EVBUFFER_UNLOCK(buf);
    650 	return n;
    651 
    652 }
    653 
    654 static int
    655 advance_last_with_data(struct evbuffer *buf)
    656 {
    657 	int n = 0;
    658 	ASSERT_EVBUFFER_LOCKED(buf);
    659 
    660 	if (!*buf->last_with_datap)
    661 		return 0;
    662 
    663 	while ((*buf->last_with_datap)->next && (*buf->last_with_datap)->next->off) {
    664 		buf->last_with_datap = &(*buf->last_with_datap)->next;
    665 		++n;
    666 	}
    667 	return n;
    668 }
    669 
    670 int
    671 evbuffer_commit_space(struct evbuffer *buf,
    672     struct evbuffer_iovec *vec, int n_vecs)
    673 {
    674 	struct evbuffer_chain *chain, **firstchainp, **chainp;
    675 	int result = -1;
    676 	size_t added = 0;
    677 	int i;
    678 
    679 	EVBUFFER_LOCK(buf);
    680 
    681 	if (buf->freeze_end)
    682 		goto done;
    683 	if (n_vecs == 0) {
    684 		result = 0;
    685 		goto done;
    686 	} else if (n_vecs == 1 &&
    687 	    (buf->last && vec[0].iov_base == (void*)CHAIN_SPACE_PTR(buf->last))) {
    688 		/* The user only got or used one chain; it might not
    689 		 * be the first one with space in it. */
    690 		if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last))
    691 			goto done;
    692 		buf->last->off += vec[0].iov_len;
    693 		added = vec[0].iov_len;
    694 		if (added)
    695 			advance_last_with_data(buf);
    696 		goto okay;
    697 	}
    698 
    699 	/* Advance 'firstchain' to the first chain with space in it. */
    700 	firstchainp = buf->last_with_datap;
    701 	if (!*firstchainp)
    702 		goto done;
    703 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
    704 		firstchainp = &(*firstchainp)->next;
    705 	}
    706 
    707 	chain = *firstchainp;
    708 	/* pass 1: make sure that the pointers and lengths of vecs[] are in
    709 	 * bounds before we try to commit anything. */
    710 	for (i=0; i<n_vecs; ++i) {
    711 		if (!chain)
    712 			goto done;
    713 		if (vec[i].iov_base != (void*)CHAIN_SPACE_PTR(chain) ||
    714 		    (size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain))
    715 			goto done;
    716 		chain = chain->next;
    717 	}
    718 	/* pass 2: actually adjust all the chains. */
    719 	chainp = firstchainp;
    720 	for (i=0; i<n_vecs; ++i) {
    721 		(*chainp)->off += vec[i].iov_len;
    722 		added += vec[i].iov_len;
    723 		if (vec[i].iov_len) {
    724 			buf->last_with_datap = chainp;
    725 		}
    726 		chainp = &(*chainp)->next;
    727 	}
    728 
    729 okay:
    730 	buf->total_len += added;
    731 	buf->n_add_for_cb += added;
    732 	result = 0;
    733 	evbuffer_invoke_callbacks(buf);
    734 
    735 done:
    736 	EVBUFFER_UNLOCK(buf);
    737 	return result;
    738 }
    739 
    740 static inline int
    741 HAS_PINNED_R(struct evbuffer *buf)
    742 {
    743 	return (buf->last && CHAIN_PINNED_R(buf->last));
    744 }
    745 
    746 static inline void
    747 ZERO_CHAIN(struct evbuffer *dst)
    748 {
    749 	ASSERT_EVBUFFER_LOCKED(dst);
    750 	dst->first = NULL;
    751 	dst->last = NULL;
    752 	dst->last_with_datap = &(dst)->first;
    753 	dst->total_len = 0;
    754 }
    755 
    756 /* Prepares the contents of src to be moved to another buffer by removing
    757  * read-pinned chains. The first pinned chain is saved in first, and the
    758  * last in last. If src has no read-pinned chains, first and last are set
    759  * to NULL. */
    760 static int
    761 PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first,
    762 		struct evbuffer_chain **last)
    763 {
    764 	struct evbuffer_chain *chain, **pinned;
    765 
    766 	ASSERT_EVBUFFER_LOCKED(src);
    767 
    768 	if (!HAS_PINNED_R(src)) {
    769 		*first = *last = NULL;
    770 		return 0;
    771 	}
    772 
    773 	pinned = src->last_with_datap;
    774 	if (!CHAIN_PINNED_R(*pinned))
    775 		pinned = &(*pinned)->next;
    776 	EVUTIL_ASSERT(CHAIN_PINNED_R(*pinned));
    777 	chain = *first = *pinned;
    778 	*last = src->last;
    779 
    780 	/* If there's data in the first pinned chain, we need to allocate
    781 	 * a new chain and copy the data over. */
    782 	if (chain->off) {
    783 		struct evbuffer_chain *tmp;
    784 
    785 		EVUTIL_ASSERT(pinned == src->last_with_datap);
    786 		tmp = evbuffer_chain_new(chain->off);
    787 		if (!tmp)
    788 			return -1;
    789 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
    790 			chain->off);
    791 		tmp->off = chain->off;
    792 		*src->last_with_datap = tmp;
    793 		src->last = tmp;
    794 		chain->misalign += chain->off;
    795 		chain->off = 0;
    796 	} else {
    797 		src->last = *src->last_with_datap;
    798 		*pinned = NULL;
    799 	}
    800 
    801 	return 0;
    802 }
    803 
    804 static inline void
    805 RESTORE_PINNED(struct evbuffer *src, struct evbuffer_chain *pinned,
    806 		struct evbuffer_chain *last)
    807 {
    808 	ASSERT_EVBUFFER_LOCKED(src);
    809 
    810 	if (!pinned) {
    811 		ZERO_CHAIN(src);
    812 		return;
    813 	}
    814 
    815 	src->first = pinned;
    816 	src->last = last;
    817 	src->last_with_datap = &src->first;
    818 	src->total_len = 0;
    819 }
    820 
    821 static inline void
    822 COPY_CHAIN(struct evbuffer *dst, struct evbuffer *src)
    823 {
    824 	ASSERT_EVBUFFER_LOCKED(dst);
    825 	ASSERT_EVBUFFER_LOCKED(src);
    826 	dst->first = src->first;
    827 	if (src->last_with_datap == &src->first)
    828 		dst->last_with_datap = &dst->first;
    829 	else
    830 		dst->last_with_datap = src->last_with_datap;
    831 	dst->last = src->last;
    832 	dst->total_len = src->total_len;
    833 }
    834 
    835 static void
    836 APPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
    837 {
    838 	ASSERT_EVBUFFER_LOCKED(dst);
    839 	ASSERT_EVBUFFER_LOCKED(src);
    840 	dst->last->next = src->first;
    841 	if (src->last_with_datap == &src->first)
    842 		dst->last_with_datap = &dst->last->next;
    843 	else
    844 		dst->last_with_datap = src->last_with_datap;
    845 	dst->last = src->last;
    846 	dst->total_len += src->total_len;
    847 }
    848 
    849 static void
    850 PREPEND_CHAIN(struct evbuffer *dst, struct evbuffer *src)
    851 {
    852 	ASSERT_EVBUFFER_LOCKED(dst);
    853 	ASSERT_EVBUFFER_LOCKED(src);
    854 	src->last->next = dst->first;
    855 	dst->first = src->first;
    856 	dst->total_len += src->total_len;
    857 	if (*dst->last_with_datap == NULL) {
    858 		if (src->last_with_datap == &(src)->first)
    859 			dst->last_with_datap = &dst->first;
    860 		else
    861 			dst->last_with_datap = src->last_with_datap;
    862 	} else if (dst->last_with_datap == &dst->first) {
    863 		dst->last_with_datap = &src->last->next;
    864 	}
    865 }
    866 
    867 int
    868 evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
    869 {
    870 	struct evbuffer_chain *pinned, *last;
    871 	size_t in_total_len, out_total_len;
    872 	int result = 0;
    873 
    874 	EVBUFFER_LOCK2(inbuf, outbuf);
    875 	in_total_len = inbuf->total_len;
    876 	out_total_len = outbuf->total_len;
    877 
    878 	if (in_total_len == 0 || outbuf == inbuf)
    879 		goto done;
    880 
    881 	if (outbuf->freeze_end || inbuf->freeze_start) {
    882 		result = -1;
    883 		goto done;
    884 	}
    885 
    886 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
    887 		result = -1;
    888 		goto done;
    889 	}
    890 
    891 	if (out_total_len == 0) {
    892 		/* There might be an empty chain at the start of outbuf; free
    893 		 * it. */
    894 		evbuffer_free_all_chains(outbuf->first);
    895 		COPY_CHAIN(outbuf, inbuf);
    896 	} else {
    897 		APPEND_CHAIN(outbuf, inbuf);
    898 	}
    899 
    900 	RESTORE_PINNED(inbuf, pinned, last);
    901 
    902 	inbuf->n_del_for_cb += in_total_len;
    903 	outbuf->n_add_for_cb += in_total_len;
    904 
    905 	evbuffer_invoke_callbacks(inbuf);
    906 	evbuffer_invoke_callbacks(outbuf);
    907 
    908 done:
    909 	EVBUFFER_UNLOCK2(inbuf, outbuf);
    910 	return result;
    911 }
    912 
    913 int
    914 evbuffer_prepend_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
    915 {
    916 	struct evbuffer_chain *pinned, *last;
    917 	size_t in_total_len, out_total_len;
    918 	int result = 0;
    919 
    920 	EVBUFFER_LOCK2(inbuf, outbuf);
    921 
    922 	in_total_len = inbuf->total_len;
    923 	out_total_len = outbuf->total_len;
    924 
    925 	if (!in_total_len || inbuf == outbuf)
    926 		goto done;
    927 
    928 	if (outbuf->freeze_start || inbuf->freeze_start) {
    929 		result = -1;
    930 		goto done;
    931 	}
    932 
    933 	if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
    934 		result = -1;
    935 		goto done;
    936 	}
    937 
    938 	if (out_total_len == 0) {
    939 		/* There might be an empty chain at the start of outbuf; free
    940 		 * it. */
    941 		evbuffer_free_all_chains(outbuf->first);
    942 		COPY_CHAIN(outbuf, inbuf);
    943 	} else {
    944 		PREPEND_CHAIN(outbuf, inbuf);
    945 	}
    946 
    947 	RESTORE_PINNED(inbuf, pinned, last);
    948 
    949 	inbuf->n_del_for_cb += in_total_len;
    950 	outbuf->n_add_for_cb += in_total_len;
    951 
    952 	evbuffer_invoke_callbacks(inbuf);
    953 	evbuffer_invoke_callbacks(outbuf);
    954 done:
    955 	EVBUFFER_UNLOCK2(inbuf, outbuf);
    956 	return result;
    957 }
    958 
    959 int
    960 evbuffer_drain(struct evbuffer *buf, size_t len)
    961 {
    962 	struct evbuffer_chain *chain, *next;
    963 	size_t remaining, old_len;
    964 	int result = 0;
    965 
    966 	EVBUFFER_LOCK(buf);
    967 	old_len = buf->total_len;
    968 
    969 	if (old_len == 0)
    970 		goto done;
    971 
    972 	if (buf->freeze_start) {
    973 		result = -1;
    974 		goto done;
    975 	}
    976 
    977 	if (len >= old_len && !HAS_PINNED_R(buf)) {
    978 		len = old_len;
    979 		for (chain = buf->first; chain != NULL; chain = next) {
    980 			next = chain->next;
    981 			evbuffer_chain_free(chain);
    982 		}
    983 
    984 		ZERO_CHAIN(buf);
    985 	} else {
    986 		if (len >= old_len)
    987 			len = old_len;
    988 
    989 		buf->total_len -= len;
    990 		remaining = len;
    991 		for (chain = buf->first;
    992 		     remaining >= chain->off;
    993 		     chain = next) {
    994 			next = chain->next;
    995 			remaining -= chain->off;
    996 
    997 			if (chain == *buf->last_with_datap) {
    998 				buf->last_with_datap = &buf->first;
    999 			}
   1000 			if (&chain->next == buf->last_with_datap)
   1001 				buf->last_with_datap = &buf->first;
   1002 
   1003 			if (CHAIN_PINNED_R(chain)) {
   1004 				EVUTIL_ASSERT(remaining == 0);
   1005 				chain->misalign += chain->off;
   1006 				chain->off = 0;
   1007 				break;
   1008 			} else
   1009 				evbuffer_chain_free(chain);
   1010 		}
   1011 
   1012 		buf->first = chain;
   1013 		if (chain) {
   1014 			EVUTIL_ASSERT(remaining <= chain->off);
   1015 			chain->misalign += remaining;
   1016 			chain->off -= remaining;
   1017 		}
   1018 	}
   1019 
   1020 	buf->n_del_for_cb += len;
   1021 	/* Tell someone about changes in this buffer */
   1022 	evbuffer_invoke_callbacks(buf);
   1023 
   1024 done:
   1025 	EVBUFFER_UNLOCK(buf);
   1026 	return result;
   1027 }
   1028 
   1029 /* Reads data from an event buffer and drains the bytes read */
   1030 int
   1031 evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
   1032 {
   1033 	ev_ssize_t n;
   1034 	EVBUFFER_LOCK(buf);
   1035 	n = evbuffer_copyout(buf, data_out, datlen);
   1036 	if (n > 0) {
   1037 		if (evbuffer_drain(buf, n)<0)
   1038 			n = -1;
   1039 	}
   1040 	EVBUFFER_UNLOCK(buf);
   1041 	return (int)n;
   1042 }
   1043 
   1044 ev_ssize_t
   1045 evbuffer_copyout(struct evbuffer *buf, void *data_out, size_t datlen)
   1046 {
   1047 	/*XXX fails badly on sendfile case. */
   1048 	struct evbuffer_chain *chain;
   1049 	char *data = data_out;
   1050 	size_t nread;
   1051 	ev_ssize_t result = 0;
   1052 
   1053 	EVBUFFER_LOCK(buf);
   1054 
   1055 	chain = buf->first;
   1056 
   1057 	if (datlen >= buf->total_len)
   1058 		datlen = buf->total_len;
   1059 
   1060 	if (datlen == 0)
   1061 		goto done;
   1062 
   1063 	if (buf->freeze_start) {
   1064 		result = -1;
   1065 		goto done;
   1066 	}
   1067 
   1068 	nread = datlen;
   1069 
   1070 	while (datlen && datlen >= chain->off) {
   1071 		memcpy(data, chain->buffer + chain->misalign, chain->off);
   1072 		data += chain->off;
   1073 		datlen -= chain->off;
   1074 
   1075 		chain = chain->next;
   1076 		EVUTIL_ASSERT(chain || datlen==0);
   1077 	}
   1078 
   1079 	if (datlen) {
   1080 		EVUTIL_ASSERT(chain);
   1081 		EVUTIL_ASSERT(datlen <= chain->off);
   1082 		memcpy(data, chain->buffer + chain->misalign, datlen);
   1083 	}
   1084 
   1085 	result = nread;
   1086 done:
   1087 	EVBUFFER_UNLOCK(buf);
   1088 	return result;
   1089 }
   1090 
   1091 /* reads data from the src buffer to the dst buffer, avoids memcpy as
   1092  * possible. */
   1093 /*  XXXX should return ev_ssize_t */
   1094 int
   1095 evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
   1096     size_t datlen)
   1097 {
   1098 	/*XXX We should have an option to force this to be zero-copy.*/
   1099 
   1100 	/*XXX can fail badly on sendfile case. */
   1101 	struct evbuffer_chain *chain, *previous;
   1102 	size_t nread = 0;
   1103 	int result;
   1104 
   1105 	EVBUFFER_LOCK2(src, dst);
   1106 
   1107 	chain = previous = src->first;
   1108 
   1109 	if (datlen == 0 || dst == src) {
   1110 		result = 0;
   1111 		goto done;
   1112 	}
   1113 
   1114 	if (dst->freeze_end || src->freeze_start) {
   1115 		result = -1;
   1116 		goto done;
   1117 	}
   1118 
   1119 	/* short-cut if there is no more data buffered */
   1120 	if (datlen >= src->total_len) {
   1121 		datlen = src->total_len;
   1122 		evbuffer_add_buffer(dst, src);
   1123 		result = (int)datlen; /*XXXX should return ev_ssize_t*/
   1124 		goto done;
   1125 	}
   1126 
   1127 	/* removes chains if possible */
   1128 	while (chain->off <= datlen) {
   1129 		/* We can't remove the last with data from src unless we
   1130 		 * remove all chains, in which case we would have done the if
   1131 		 * block above */
   1132 		EVUTIL_ASSERT(chain != *src->last_with_datap);
   1133 		nread += chain->off;
   1134 		datlen -= chain->off;
   1135 		previous = chain;
   1136 		if (src->last_with_datap == &chain->next)
   1137 			src->last_with_datap = &src->first;
   1138 		chain = chain->next;
   1139 	}
   1140 
   1141 	if (nread) {
   1142 		/* we can remove the chain */
   1143 		struct evbuffer_chain **chp;
   1144 		chp = evbuffer_free_trailing_empty_chains(dst);
   1145 
   1146 		if (dst->first == NULL) {
   1147 			dst->first = src->first;
   1148 		} else {
   1149 			*chp = src->first;
   1150 		}
   1151 		dst->last = previous;
   1152 		previous->next = NULL;
   1153 		src->first = chain;
   1154 		advance_last_with_data(dst);
   1155 
   1156 		dst->total_len += nread;
   1157 		dst->n_add_for_cb += nread;
   1158 	}
   1159 
   1160 	/* we know that there is more data in the src buffer than
   1161 	 * we want to read, so we manually drain the chain */
   1162 	evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
   1163 	chain->misalign += datlen;
   1164 	chain->off -= datlen;
   1165 	nread += datlen;
   1166 
   1167 	/* You might think we would want to increment dst->n_add_for_cb
   1168 	 * here too.  But evbuffer_add above already took care of that.
   1169 	 */
   1170 	src->total_len -= nread;
   1171 	src->n_del_for_cb += nread;
   1172 
   1173 	if (nread) {
   1174 		evbuffer_invoke_callbacks(dst);
   1175 		evbuffer_invoke_callbacks(src);
   1176 	}
   1177 	result = (int)nread;/*XXXX should change return type */
   1178 
   1179 done:
   1180 	EVBUFFER_UNLOCK2(src, dst);
   1181 	return result;
   1182 }
   1183 
   1184 unsigned char *
   1185 evbuffer_pullup(struct evbuffer *buf, ev_ssize_t size)
   1186 {
   1187 	struct evbuffer_chain *chain, *next, *tmp, *last_with_data;
   1188 	unsigned char *buffer, *result = NULL;
   1189 	ev_ssize_t remaining;
   1190 	int removed_last_with_data = 0;
   1191 	int removed_last_with_datap = 0;
   1192 
   1193 	EVBUFFER_LOCK(buf);
   1194 
   1195 	chain = buf->first;
   1196 
   1197 	if (size < 0)
   1198 		size = buf->total_len;
   1199 	/* if size > buf->total_len, we cannot guarantee to the user that she
   1200 	 * is going to have a long enough buffer afterwards; so we return
   1201 	 * NULL */
   1202 	if (size == 0 || (size_t)size > buf->total_len)
   1203 		goto done;
   1204 
   1205 	/* No need to pull up anything; the first size bytes are
   1206 	 * already here. */
   1207 	if (chain->off >= (size_t)size) {
   1208 		result = chain->buffer + chain->misalign;
   1209 		goto done;
   1210 	}
   1211 
   1212 	/* Make sure that none of the chains we need to copy from is pinned. */
   1213 	remaining = size - chain->off;
   1214 	EVUTIL_ASSERT(remaining >= 0);
   1215 	for (tmp=chain->next; tmp; tmp=tmp->next) {
   1216 		if (CHAIN_PINNED(tmp))
   1217 			goto done;
   1218 		if (tmp->off >= (size_t)remaining)
   1219 			break;
   1220 		remaining -= tmp->off;
   1221 	}
   1222 
   1223 	if (CHAIN_PINNED(chain)) {
   1224 		size_t old_off = chain->off;
   1225 		if (CHAIN_SPACE_LEN(chain) < size - chain->off) {
   1226 			/* not enough room at end of chunk. */
   1227 			goto done;
   1228 		}
   1229 		buffer = CHAIN_SPACE_PTR(chain);
   1230 		tmp = chain;
   1231 		tmp->off = size;
   1232 		size -= old_off;
   1233 		chain = chain->next;
   1234 	} else if (chain->buffer_len - chain->misalign >= (size_t)size) {
   1235 		/* already have enough space in the first chain */
   1236 		size_t old_off = chain->off;
   1237 		buffer = chain->buffer + chain->misalign + chain->off;
   1238 		tmp = chain;
   1239 		tmp->off = size;
   1240 		size -= old_off;
   1241 		chain = chain->next;
   1242 	} else {
   1243 		if ((tmp = evbuffer_chain_new(size)) == NULL) {
   1244 			event_warn("%s: out of memory", __func__);
   1245 			goto done;
   1246 		}
   1247 		buffer = tmp->buffer;
   1248 		tmp->off = size;
   1249 		buf->first = tmp;
   1250 	}
   1251 
   1252 	/* TODO(niels): deal with buffers that point to NULL like sendfile */
   1253 
   1254 	/* Copy and free every chunk that will be entirely pulled into tmp */
   1255 	last_with_data = *buf->last_with_datap;
   1256 	for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
   1257 		next = chain->next;
   1258 
   1259 		memcpy(buffer, chain->buffer + chain->misalign, chain->off);
   1260 		size -= chain->off;
   1261 		buffer += chain->off;
   1262 		if (chain == last_with_data)
   1263 			removed_last_with_data = 1;
   1264 		if (&chain->next == buf->last_with_datap)
   1265 			removed_last_with_datap = 1;
   1266 
   1267 		evbuffer_chain_free(chain);
   1268 	}
   1269 
   1270 	if (chain != NULL) {
   1271 		memcpy(buffer, chain->buffer + chain->misalign, size);
   1272 		chain->misalign += size;
   1273 		chain->off -= size;
   1274 	} else {
   1275 		buf->last = tmp;
   1276 	}
   1277 
   1278 	tmp->next = chain;
   1279 
   1280 	if (removed_last_with_data) {
   1281 		buf->last_with_datap = &buf->first;
   1282 	} else if (removed_last_with_datap) {
   1283 		if (buf->first->next && buf->first->next->off)
   1284 			buf->last_with_datap = &buf->first->next;
   1285 		else
   1286 			buf->last_with_datap = &buf->first;
   1287 	}
   1288 
   1289 	result = (tmp->buffer + tmp->misalign);
   1290 
   1291 done:
   1292 	EVBUFFER_UNLOCK(buf);
   1293 	return result;
   1294 }
   1295 
   1296 /*
   1297  * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
   1298  * The returned buffer needs to be freed by the called.
   1299  */
   1300 char *
   1301 evbuffer_readline(struct evbuffer *buffer)
   1302 {
   1303 	return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
   1304 }
   1305 
   1306 static inline ev_ssize_t
   1307 evbuffer_strchr(struct evbuffer_ptr *it, const char chr)
   1308 {
   1309 	struct evbuffer_chain *chain = it->_internal.chain;
   1310 	size_t i = it->_internal.pos_in_chain;
   1311 	while (chain != NULL) {
   1312 		char *buffer = (char *)chain->buffer + chain->misalign;
   1313 		char *cp = memchr(buffer+i, chr, chain->off-i);
   1314 		if (cp) {
   1315 			it->_internal.chain = chain;
   1316 			it->_internal.pos_in_chain = cp - buffer;
   1317 			it->pos += (cp - buffer - i);
   1318 			return it->pos;
   1319 		}
   1320 		it->pos += chain->off - i;
   1321 		i = 0;
   1322 		chain = chain->next;
   1323 	}
   1324 
   1325 	return (-1);
   1326 }
   1327 
   1328 static inline char *
   1329 find_eol_char(char *s, size_t len)
   1330 {
   1331 #define CHUNK_SZ 128
   1332 	/* Lots of benchmarking found this approach to be faster in practice
   1333 	 * than doing two memchrs over the whole buffer, doin a memchr on each
   1334 	 * char of the buffer, or trying to emulate memchr by hand. */
   1335 	char *s_end, *cr, *lf;
   1336 	s_end = s+len;
   1337 	while (s < s_end) {
   1338 		size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
   1339 		cr = memchr(s, '\r', chunk);
   1340 		lf = memchr(s, '\n', chunk);
   1341 		if (cr) {
   1342 			if (lf && lf < cr)
   1343 				return lf;
   1344 			return cr;
   1345 		} else if (lf) {
   1346 			return lf;
   1347 		}
   1348 		s += CHUNK_SZ;
   1349 	}
   1350 
   1351 	return NULL;
   1352 #undef CHUNK_SZ
   1353 }
   1354 
   1355 static ev_ssize_t
   1356 evbuffer_find_eol_char(struct evbuffer_ptr *it)
   1357 {
   1358 	struct evbuffer_chain *chain = it->_internal.chain;
   1359 	size_t i = it->_internal.pos_in_chain;
   1360 	while (chain != NULL) {
   1361 		char *buffer = (char *)chain->buffer + chain->misalign;
   1362 		char *cp = find_eol_char(buffer+i, chain->off-i);
   1363 		if (cp) {
   1364 			it->_internal.chain = chain;
   1365 			it->_internal.pos_in_chain = cp - buffer;
   1366 			it->pos += (cp - buffer) - i;
   1367 			return it->pos;
   1368 		}
   1369 		it->pos += chain->off - i;
   1370 		i = 0;
   1371 		chain = chain->next;
   1372 	}
   1373 
   1374 	return (-1);
   1375 }
   1376 
   1377 static inline int
   1378 evbuffer_strspn(
   1379 	struct evbuffer_ptr *ptr, const char *chrset)
   1380 {
   1381 	int count = 0;
   1382 	struct evbuffer_chain *chain = ptr->_internal.chain;
   1383 	size_t i = ptr->_internal.pos_in_chain;
   1384 
   1385 	if (!chain)
   1386 		return -1;
   1387 
   1388 	while (1) {
   1389 		char *buffer = (char *)chain->buffer + chain->misalign;
   1390 		for (; i < chain->off; ++i) {
   1391 			const char *p = chrset;
   1392 			while (*p) {
   1393 				if (buffer[i] == *p++)
   1394 					goto next;
   1395 			}
   1396 			ptr->_internal.chain = chain;
   1397 			ptr->_internal.pos_in_chain = i;
   1398 			ptr->pos += count;
   1399 			return count;
   1400 		next:
   1401 			++count;
   1402 		}
   1403 		i = 0;
   1404 
   1405 		if (! chain->next) {
   1406 			ptr->_internal.chain = chain;
   1407 			ptr->_internal.pos_in_chain = i;
   1408 			ptr->pos += count;
   1409 			return count;
   1410 		}
   1411 
   1412 		chain = chain->next;
   1413 	}
   1414 }
   1415 
   1416 
   1417 static inline char
   1418 evbuffer_getchr(struct evbuffer_ptr *it)
   1419 {
   1420 	struct evbuffer_chain *chain = it->_internal.chain;
   1421 	size_t off = it->_internal.pos_in_chain;
   1422 
   1423 	return chain->buffer[chain->misalign + off];
   1424 }
   1425 
   1426 struct evbuffer_ptr
   1427 evbuffer_search_eol(struct evbuffer *buffer,
   1428     struct evbuffer_ptr *start, size_t *eol_len_out,
   1429     enum evbuffer_eol_style eol_style)
   1430 {
   1431 	struct evbuffer_ptr it, it2;
   1432 	size_t extra_drain = 0;
   1433 	int ok = 0;
   1434 
   1435 	EVBUFFER_LOCK(buffer);
   1436 
   1437 	if (start) {
   1438 		memcpy(&it, start, sizeof(it));
   1439 	} else {
   1440 		it.pos = 0;
   1441 		it._internal.chain = buffer->first;
   1442 		it._internal.pos_in_chain = 0;
   1443 	}
   1444 
   1445 	/* the eol_style determines our first stop character and how many
   1446 	 * characters we are going to drain afterwards. */
   1447 	switch (eol_style) {
   1448 	case EVBUFFER_EOL_ANY:
   1449 		if (evbuffer_find_eol_char(&it) < 0)
   1450 			goto done;
   1451 		memcpy(&it2, &it, sizeof(it));
   1452 		extra_drain = evbuffer_strspn(&it2, "\r\n");
   1453 		break;
   1454 	case EVBUFFER_EOL_CRLF_STRICT: {
   1455 		it = evbuffer_search(buffer, "\r\n", 2, &it);
   1456 		if (it.pos < 0)
   1457 			goto done;
   1458 		extra_drain = 2;
   1459 		break;
   1460 	}
   1461 	case EVBUFFER_EOL_CRLF:
   1462 		while (1) {
   1463 			if (evbuffer_find_eol_char(&it) < 0)
   1464 				goto done;
   1465 			if (evbuffer_getchr(&it) == '\n') {
   1466 				extra_drain = 1;
   1467 				break;
   1468 			} else if (!evbuffer_ptr_memcmp(
   1469 				    buffer, &it, "\r\n", 2)) {
   1470 				extra_drain = 2;
   1471 				break;
   1472 			} else {
   1473 				if (evbuffer_ptr_set(buffer, &it, 1,
   1474 					EVBUFFER_PTR_ADD)<0)
   1475 					goto done;
   1476 			}
   1477 		}
   1478 		break;
   1479 	case EVBUFFER_EOL_LF:
   1480 		if (evbuffer_strchr(&it, '\n') < 0)
   1481 			goto done;
   1482 		extra_drain = 1;
   1483 		break;
   1484 	default:
   1485 		goto done;
   1486 	}
   1487 
   1488 	ok = 1;
   1489 done:
   1490 	EVBUFFER_UNLOCK(buffer);
   1491 
   1492 	if (!ok) {
   1493 		it.pos = -1;
   1494 	}
   1495 	if (eol_len_out)
   1496 		*eol_len_out = extra_drain;
   1497 
   1498 	return it;
   1499 }
   1500 
   1501 char *
   1502 evbuffer_readln(struct evbuffer *buffer, size_t *n_read_out,
   1503 		enum evbuffer_eol_style eol_style)
   1504 {
   1505 	struct evbuffer_ptr it;
   1506 	char *line;
   1507 	size_t n_to_copy=0, extra_drain=0;
   1508 	char *result = NULL;
   1509 
   1510 	EVBUFFER_LOCK(buffer);
   1511 
   1512 	if (buffer->freeze_start) {
   1513 		goto done;
   1514 	}
   1515 
   1516 	it = evbuffer_search_eol(buffer, NULL, &extra_drain, eol_style);
   1517 	if (it.pos < 0)
   1518 		goto done;
   1519 	n_to_copy = it.pos;
   1520 
   1521 	if ((line = mm_malloc(n_to_copy+1)) == NULL) {
   1522 		event_warn("%s: out of memory", __func__);
   1523 		goto done;
   1524 	}
   1525 
   1526 	evbuffer_remove(buffer, line, n_to_copy);
   1527 	line[n_to_copy] = '\0';
   1528 
   1529 	evbuffer_drain(buffer, extra_drain);
   1530 	result = line;
   1531 done:
   1532 	EVBUFFER_UNLOCK(buffer);
   1533 
   1534 	if (n_read_out)
   1535 		*n_read_out = result ? n_to_copy : 0;
   1536 
   1537 	return result;
   1538 }
   1539 
   1540 #define EVBUFFER_CHAIN_MAX_AUTO_SIZE 4096
   1541 
   1542 /* Adds data to an event buffer */
   1543 
   1544 int
   1545 evbuffer_add(struct evbuffer *buf, const void *data_in, size_t datlen)
   1546 {
   1547 	struct evbuffer_chain *chain, *tmp;
   1548 	const unsigned char *data = data_in;
   1549 	size_t remain, to_alloc;
   1550 	int result = -1;
   1551 
   1552 	EVBUFFER_LOCK(buf);
   1553 
   1554 	if (buf->freeze_end) {
   1555 		goto done;
   1556 	}
   1557 	/* Prevent buf->total_len overflow */
   1558 	if (datlen > EV_SIZE_MAX - buf->total_len) {
   1559 		goto done;
   1560 	}
   1561 
   1562 	chain = buf->last;
   1563 
   1564 	/* If there are no chains allocated for this buffer, allocate one
   1565 	 * big enough to hold all the data. */
   1566 	if (chain == NULL) {
   1567 		chain = evbuffer_chain_new(datlen);
   1568 		if (!chain)
   1569 			goto done;
   1570 		evbuffer_chain_insert(buf, chain);
   1571 	}
   1572 
   1573 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
   1574 		/* Always true for mutable buffers */
   1575 		EVUTIL_ASSERT(chain->misalign >= 0 &&
   1576 		    (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
   1577 		remain = chain->buffer_len - (size_t)chain->misalign - chain->off;
   1578 		if (remain >= datlen) {
   1579 			/* there's enough space to hold all the data in the
   1580 			 * current last chain */
   1581 			memcpy(chain->buffer + chain->misalign + chain->off,
   1582 			    data, datlen);
   1583 			chain->off += datlen;
   1584 			buf->total_len += datlen;
   1585 			buf->n_add_for_cb += datlen;
   1586 			goto out;
   1587 		} else if (!CHAIN_PINNED(chain) &&
   1588 		    evbuffer_chain_should_realign(chain, datlen)) {
   1589 			/* we can fit the data into the misalignment */
   1590 			evbuffer_chain_align(chain);
   1591 
   1592 			memcpy(chain->buffer + chain->off, data, datlen);
   1593 			chain->off += datlen;
   1594 			buf->total_len += datlen;
   1595 			buf->n_add_for_cb += datlen;
   1596 			goto out;
   1597 		}
   1598 	} else {
   1599 		/* we cannot write any data to the last chain */
   1600 		remain = 0;
   1601 	}
   1602 
   1603 	/* we need to add another chain */
   1604 	to_alloc = chain->buffer_len;
   1605 	if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
   1606 		to_alloc <<= 1;
   1607 	if (datlen > to_alloc)
   1608 		to_alloc = datlen;
   1609 	tmp = evbuffer_chain_new(to_alloc);
   1610 	if (tmp == NULL)
   1611 		goto done;
   1612 
   1613 	if (remain) {
   1614 		memcpy(chain->buffer + chain->misalign + chain->off,
   1615 		    data, remain);
   1616 		chain->off += remain;
   1617 		buf->total_len += remain;
   1618 		buf->n_add_for_cb += remain;
   1619 	}
   1620 
   1621 	data += remain;
   1622 	datlen -= remain;
   1623 
   1624 	memcpy(tmp->buffer, data, datlen);
   1625 	tmp->off = datlen;
   1626 	evbuffer_chain_insert(buf, tmp);
   1627 	buf->n_add_for_cb += datlen;
   1628 
   1629 out:
   1630 	evbuffer_invoke_callbacks(buf);
   1631 	result = 0;
   1632 done:
   1633 	EVBUFFER_UNLOCK(buf);
   1634 	return result;
   1635 }
   1636 
   1637 int
   1638 evbuffer_prepend(struct evbuffer *buf, const void *data, size_t datlen)
   1639 {
   1640 	struct evbuffer_chain *chain, *tmp;
   1641 	int result = -1;
   1642 
   1643 	EVBUFFER_LOCK(buf);
   1644 
   1645 	if (buf->freeze_start) {
   1646 		goto done;
   1647 	}
   1648 	if (datlen > EV_SIZE_MAX - buf->total_len) {
   1649 		goto done;
   1650 	}
   1651 
   1652 	chain = buf->first;
   1653 
   1654 	if (chain == NULL) {
   1655 		chain = evbuffer_chain_new(datlen);
   1656 		if (!chain)
   1657 			goto done;
   1658 		evbuffer_chain_insert(buf, chain);
   1659 	}
   1660 
   1661 	/* we cannot touch immutable buffers */
   1662 	if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) {
   1663 		/* Always true for mutable buffers */
   1664 		EVUTIL_ASSERT(chain->misalign >= 0 &&
   1665 		    (ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
   1666 
   1667 		/* If this chain is empty, we can treat it as
   1668 		 * 'empty at the beginning' rather than 'empty at the end' */
   1669 		if (chain->off == 0)
   1670 			chain->misalign = chain->buffer_len;
   1671 
   1672 		if ((size_t)chain->misalign >= datlen) {
   1673 			/* we have enough space to fit everything */
   1674 			memcpy(chain->buffer + chain->misalign - datlen,
   1675 			    data, datlen);
   1676 			chain->off += datlen;
   1677 			chain->misalign -= datlen;
   1678 			buf->total_len += datlen;
   1679 			buf->n_add_for_cb += datlen;
   1680 			goto out;
   1681 		} else if (chain->misalign) {
   1682 			/* we can only fit some of the data. */
   1683 			memcpy(chain->buffer,
   1684 			    (char*)data + datlen - chain->misalign,
   1685 			    (size_t)chain->misalign);
   1686 			chain->off += (size_t)chain->misalign;
   1687 			buf->total_len += (size_t)chain->misalign;
   1688 			buf->n_add_for_cb += (size_t)chain->misalign;
   1689 			datlen -= (size_t)chain->misalign;
   1690 			chain->misalign = 0;
   1691 		}
   1692 	}
   1693 
   1694 	/* we need to add another chain */
   1695 	if ((tmp = evbuffer_chain_new(datlen)) == NULL)
   1696 		goto done;
   1697 	buf->first = tmp;
   1698 	if (buf->last_with_datap == &buf->first)
   1699 		buf->last_with_datap = &tmp->next;
   1700 
   1701 	tmp->next = chain;
   1702 
   1703 	tmp->off = datlen;
   1704 	EVUTIL_ASSERT(datlen <= tmp->buffer_len);
   1705 	tmp->misalign = tmp->buffer_len - datlen;
   1706 
   1707 	memcpy(tmp->buffer + tmp->misalign, data, datlen);
   1708 	buf->total_len += datlen;
   1709 	buf->n_add_for_cb += (size_t)chain->misalign;
   1710 
   1711 out:
   1712 	evbuffer_invoke_callbacks(buf);
   1713 	result = 0;
   1714 done:
   1715 	EVBUFFER_UNLOCK(buf);
   1716 	return result;
   1717 }
   1718 
   1719 /** Helper: realigns the memory in chain->buffer so that misalign is 0. */
   1720 static void
   1721 evbuffer_chain_align(struct evbuffer_chain *chain)
   1722 {
   1723 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_IMMUTABLE));
   1724 	EVUTIL_ASSERT(!(chain->flags & EVBUFFER_MEM_PINNED_ANY));
   1725 	memmove(chain->buffer, chain->buffer + chain->misalign, chain->off);
   1726 	chain->misalign = 0;
   1727 }
   1728 
   1729 #define MAX_TO_COPY_IN_EXPAND 4096
   1730 #define MAX_TO_REALIGN_IN_EXPAND 2048
   1731 
   1732 /** Helper: return true iff we should realign chain to fit datalen bytes of
   1733     data in it. */
   1734 static int
   1735 evbuffer_chain_should_realign(struct evbuffer_chain *chain,
   1736     size_t datlen)
   1737 {
   1738 	return chain->buffer_len - chain->off >= datlen &&
   1739 	    (chain->off < chain->buffer_len / 2) &&
   1740 	    (chain->off <= MAX_TO_REALIGN_IN_EXPAND);
   1741 }
   1742 
   1743 /* Expands the available space in the event buffer to at least datlen, all in
   1744  * a single chunk.  Return that chunk. */
   1745 static struct evbuffer_chain *
   1746 evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
   1747 {
   1748 	struct evbuffer_chain *chain, **chainp;
   1749 	struct evbuffer_chain *result = NULL;
   1750 	ASSERT_EVBUFFER_LOCKED(buf);
   1751 
   1752 	chainp = buf->last_with_datap;
   1753 
   1754 	/* XXX If *chainp is no longer writeable, but has enough space in its
   1755 	 * misalign, this might be a bad idea: we could still use *chainp, not
   1756 	 * (*chainp)->next. */
   1757 	if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
   1758 		chainp = &(*chainp)->next;
   1759 
   1760 	/* 'chain' now points to the first chain with writable space (if any)
   1761 	 * We will either use it, realign it, replace it, or resize it. */
   1762 	chain = *chainp;
   1763 
   1764 	if (chain == NULL ||
   1765 	    (chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) {
   1766 		/* We can't use the last_with_data chain at all.  Just add a
   1767 		 * new one that's big enough. */
   1768 		goto insert_new;
   1769 	}
   1770 
   1771 	/* If we can fit all the data, then we don't have to do anything */
   1772 	if (CHAIN_SPACE_LEN(chain) >= datlen) {
   1773 		result = chain;
   1774 		goto ok;
   1775 	}
   1776 
   1777 	/* If the chain is completely empty, just replace it by adding a new
   1778 	 * empty chain. */
   1779 	if (chain->off == 0) {
   1780 		goto insert_new;
   1781 	}
   1782 
   1783 	/* If the misalignment plus the remaining space fulfills our data
   1784 	 * needs, we could just force an alignment to happen.  Afterwards, we
   1785 	 * have enough space.  But only do this if we're saving a lot of space
   1786 	 * and not moving too much data.  Otherwise the space savings are
   1787 	 * probably offset by the time lost in copying.
   1788 	 */
   1789 	if (evbuffer_chain_should_realign(chain, datlen)) {
   1790 		evbuffer_chain_align(chain);
   1791 		result = chain;
   1792 		goto ok;
   1793 	}
   1794 
   1795 	/* At this point, we can either resize the last chunk with space in
   1796 	 * it, use the next chunk after it, or   If we add a new chunk, we waste
   1797 	 * CHAIN_SPACE_LEN(chain) bytes in the former last chunk.  If we
   1798 	 * resize, we have to copy chain->off bytes.
   1799 	 */
   1800 
   1801 	/* Would expanding this chunk be affordable and worthwhile? */
   1802 	if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
   1803 	    chain->off > MAX_TO_COPY_IN_EXPAND ||
   1804 	    (datlen < EVBUFFER_CHAIN_MAX &&
   1805 		EVBUFFER_CHAIN_MAX - datlen >= chain->off)) {
   1806 		/* It's not worth resizing this chain. Can the next one be
   1807 		 * used? */
   1808 		if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) {
   1809 			/* Yes, we can just use the next chain (which should
   1810 			 * be empty. */
   1811 			result = chain->next;
   1812 			goto ok;
   1813 		} else {
   1814 			/* No; append a new chain (which will free all
   1815 			 * terminal empty chains.) */
   1816 			goto insert_new;
   1817 		}
   1818 	} else {
   1819 		/* Okay, we're going to try to resize this chain: Not doing so
   1820 		 * would waste at least 1/8 of its current allocation, and we
   1821 		 * can do so without having to copy more than
   1822 		 * MAX_TO_COPY_IN_EXPAND bytes. */
   1823 		/* figure out how much space we need */
   1824 		size_t length = chain->off + datlen;
   1825 		struct evbuffer_chain *tmp = evbuffer_chain_new(length);
   1826 		if (tmp == NULL)
   1827 			goto err;
   1828 
   1829 		/* copy the data over that we had so far */
   1830 		tmp->off = chain->off;
   1831 		memcpy(tmp->buffer, chain->buffer + chain->misalign,
   1832 		    chain->off);
   1833 		/* fix up the list */
   1834 		EVUTIL_ASSERT(*chainp == chain);
   1835 		result = *chainp = tmp;
   1836 
   1837 		if (buf->last == chain)
   1838 			buf->last = tmp;
   1839 
   1840 		tmp->next = chain->next;
   1841 		evbuffer_chain_free(chain);
   1842 		goto ok;
   1843 	}
   1844 
   1845 insert_new:
   1846 	result = evbuffer_chain_insert_new(buf, datlen);
   1847 	if (!result)
   1848 		goto err;
   1849 ok:
   1850 	EVUTIL_ASSERT(result);
   1851 	EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
   1852 err:
   1853 	return result;
   1854 }
   1855 
   1856 /* Make sure that datlen bytes are available for writing in the last n
   1857  * chains.  Never copies or moves data. */
   1858 int
   1859 _evbuffer_expand_fast(struct evbuffer *buf, size_t datlen, int n)
   1860 {
   1861 	struct evbuffer_chain *chain = buf->last, *tmp, *next;
   1862 	size_t avail;
   1863 	int used;
   1864 
   1865 	ASSERT_EVBUFFER_LOCKED(buf);
   1866 	EVUTIL_ASSERT(n >= 2);
   1867 
   1868 	if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) {
   1869 		/* There is no last chunk, or we can't touch the last chunk.
   1870 		 * Just add a new chunk. */
   1871 		chain = evbuffer_chain_new(datlen);
   1872 		if (chain == NULL)
   1873 			return (-1);
   1874 
   1875 		evbuffer_chain_insert(buf, chain);
   1876 		return (0);
   1877 	}
   1878 
   1879 	used = 0; /* number of chains we're using space in. */
   1880 	avail = 0; /* how much space they have. */
   1881 	/* How many bytes can we stick at the end of buffer as it is?  Iterate
   1882 	 * over the chains at the end of the buffer, tring to see how much
   1883 	 * space we have in the first n. */
   1884 	for (chain = *buf->last_with_datap; chain; chain = chain->next) {
   1885 		if (chain->off) {
   1886 			size_t space = (size_t) CHAIN_SPACE_LEN(chain);
   1887 			EVUTIL_ASSERT(chain == *buf->last_with_datap);
   1888 			if (space) {
   1889 				avail += space;
   1890 				++used;
   1891 			}
   1892 		} else {
   1893 			/* No data in chain; realign it. */
   1894 			chain->misalign = 0;
   1895 			avail += chain->buffer_len;
   1896 			++used;
   1897 		}
   1898 		if (avail >= datlen) {
   1899 			/* There is already enough space.  Just return */
   1900 			return (0);
   1901 		}
   1902 		if (used == n)
   1903 			break;
   1904 	}
   1905 
   1906 	/* There wasn't enough space in the first n chains with space in
   1907 	 * them. Either add a new chain with enough space, or replace all
   1908 	 * empty chains with one that has enough space, depending on n. */
   1909 	if (used < n) {
   1910 		/* The loop ran off the end of the chains before it hit n
   1911 		 * chains; we can add another. */
   1912 		EVUTIL_ASSERT(chain == NULL);
   1913 
   1914 		tmp = evbuffer_chain_new(datlen - avail);
   1915 		if (tmp == NULL)
   1916 			return (-1);
   1917 
   1918 		buf->last->next = tmp;
   1919 		buf->last = tmp;
   1920 		/* (we would only set last_with_data if we added the first
   1921 		 * chain. But if the buffer had no chains, we would have
   1922 		 * just allocated a new chain earlier) */
   1923 		return (0);
   1924 	} else {
   1925 		/* Nuke _all_ the empty chains. */
   1926 		int rmv_all = 0; /* True iff we removed last_with_data. */
   1927 		chain = *buf->last_with_datap;
   1928 		if (!chain->off) {
   1929 			EVUTIL_ASSERT(chain == buf->first);
   1930 			rmv_all = 1;
   1931 			avail = 0;
   1932 		} else {
   1933 			/* can't overflow, since only mutable chains have
   1934 			 * huge misaligns. */
   1935 			avail = (size_t) CHAIN_SPACE_LEN(chain);
   1936 			chain = chain->next;
   1937 		}
   1938 
   1939 
   1940 		for (; chain; chain = next) {
   1941 			next = chain->next;
   1942 			EVUTIL_ASSERT(chain->off == 0);
   1943 			evbuffer_chain_free(chain);
   1944 		}
   1945 		EVUTIL_ASSERT(datlen >= avail);
   1946 		tmp = evbuffer_chain_new(datlen - avail);
   1947 		if (tmp == NULL) {
   1948 			if (rmv_all) {
   1949 				ZERO_CHAIN(buf);
   1950 			} else {
   1951 				buf->last = *buf->last_with_datap;
   1952 				(*buf->last_with_datap)->next = NULL;
   1953 			}
   1954 			return (-1);
   1955 		}
   1956 
   1957 		if (rmv_all) {
   1958 			buf->first = buf->last = tmp;
   1959 			buf->last_with_datap = &buf->first;
   1960 		} else {
   1961 			(*buf->last_with_datap)->next = tmp;
   1962 			buf->last = tmp;
   1963 		}
   1964 		return (0);
   1965 	}
   1966 }
   1967 
   1968 int
   1969 evbuffer_expand(struct evbuffer *buf, size_t datlen)
   1970 {
   1971 	struct evbuffer_chain *chain;
   1972 
   1973 	EVBUFFER_LOCK(buf);
   1974 	chain = evbuffer_expand_singlechain(buf, datlen);
   1975 	EVBUFFER_UNLOCK(buf);
   1976 	return chain ? 0 : -1;
   1977 }
   1978 
   1979 /*
   1980  * Reads data from a file descriptor into a buffer.
   1981  */
   1982 
   1983 #if defined(_EVENT_HAVE_SYS_UIO_H) || defined(WIN32)
   1984 #define USE_IOVEC_IMPL
   1985 #endif
   1986 
   1987 #ifdef USE_IOVEC_IMPL
   1988 
   1989 #ifdef _EVENT_HAVE_SYS_UIO_H
   1990 /* number of iovec we use for writev, fragmentation is going to determine
   1991  * how much we end up writing */
   1992 
   1993 #define DEFAULT_WRITE_IOVEC 128
   1994 
   1995 #if defined(UIO_MAXIOV) && UIO_MAXIOV < DEFAULT_WRITE_IOVEC
   1996 #define NUM_WRITE_IOVEC UIO_MAXIOV
   1997 #elif defined(IOV_MAX) && IOV_MAX < DEFAULT_WRITE_IOVEC
   1998 #define NUM_WRITE_IOVEC IOV_MAX
   1999 #else
   2000 #define NUM_WRITE_IOVEC DEFAULT_WRITE_IOVEC
   2001 #endif
   2002 
   2003 #define IOV_TYPE struct iovec
   2004 #define IOV_PTR_FIELD iov_base
   2005 #define IOV_LEN_FIELD iov_len
   2006 #define IOV_LEN_TYPE size_t
   2007 #else
   2008 #define NUM_WRITE_IOVEC 16
   2009 #define IOV_TYPE WSABUF
   2010 #define IOV_PTR_FIELD buf
   2011 #define IOV_LEN_FIELD len
   2012 #define IOV_LEN_TYPE unsigned long
   2013 #endif
   2014 #endif
   2015 #define NUM_READ_IOVEC 4
   2016 
   2017 #define EVBUFFER_MAX_READ	4096
   2018 
   2019 /** Helper function to figure out which space to use for reading data into
   2020     an evbuffer.  Internal use only.
   2021 
   2022     @param buf The buffer to read into
   2023     @param howmuch How much we want to read.
   2024     @param vecs An array of two or more iovecs or WSABUFs.
   2025     @param n_vecs_avail The length of vecs
   2026     @param chainp A pointer to a variable to hold the first chain we're
   2027       reading into.
   2028     @param exact Boolean: if true, we do not provide more than 'howmuch'
   2029       space in the vectors, even if more space is available.
   2030     @return The number of buffers we're using.
   2031  */
   2032 int
   2033 _evbuffer_read_setup_vecs(struct evbuffer *buf, ev_ssize_t howmuch,
   2034     struct evbuffer_iovec *vecs, int n_vecs_avail,
   2035     struct evbuffer_chain ***chainp, int exact)
   2036 {
   2037 	struct evbuffer_chain *chain;
   2038 	struct evbuffer_chain **firstchainp;
   2039 	size_t so_far;
   2040 	int i;
   2041 	ASSERT_EVBUFFER_LOCKED(buf);
   2042 
   2043 	if (howmuch < 0)
   2044 		return -1;
   2045 
   2046 	so_far = 0;
   2047 	/* Let firstchain be the first chain with any space on it */
   2048 	firstchainp = buf->last_with_datap;
   2049 	if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
   2050 		firstchainp = &(*firstchainp)->next;
   2051 	}
   2052 
   2053 	chain = *firstchainp;
   2054 	for (i = 0; i < n_vecs_avail && so_far < (size_t)howmuch; ++i) {
   2055 		size_t avail = (size_t) CHAIN_SPACE_LEN(chain);
   2056 		if (avail > (howmuch - so_far) && exact)
   2057 			avail = howmuch - so_far;
   2058 		vecs[i].iov_base = CHAIN_SPACE_PTR(chain);
   2059 		vecs[i].iov_len = avail;
   2060 		so_far += avail;
   2061 		chain = chain->next;
   2062 	}
   2063 
   2064 	*chainp = firstchainp;
   2065 	return i;
   2066 }
   2067 
   2068 static int
   2069 get_n_bytes_readable_on_socket(evutil_socket_t fd)
   2070 {
   2071 #if defined(FIONREAD) && defined(WIN32)
   2072 	unsigned long lng = EVBUFFER_MAX_READ;
   2073 	if (ioctlsocket(fd, FIONREAD, &lng) < 0)
   2074 		return -1;
   2075 	/* Can overflow, but mostly harmlessly. XXXX */
   2076 	return (int)lng;
   2077 #elif defined(FIONREAD)
   2078 	int n = EVBUFFER_MAX_READ;
   2079 	if (ioctl(fd, FIONREAD, &n) < 0)
   2080 		return -1;
   2081 	return n;
   2082 #else
   2083 	return EVBUFFER_MAX_READ;
   2084 #endif
   2085 }
   2086 
   2087 /* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
   2088  * as howmuch? */
   2089 int
   2090 evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
   2091 {
   2092 	struct evbuffer_chain **chainp;
   2093 	int n;
   2094 	int result;
   2095 
   2096 #ifdef USE_IOVEC_IMPL
   2097 	int nvecs, i, remaining;
   2098 #else
   2099 	struct evbuffer_chain *chain;
   2100 	unsigned char *p;
   2101 #endif
   2102 
   2103 	EVBUFFER_LOCK(buf);
   2104 
   2105 	if (buf->freeze_end) {
   2106 		result = -1;
   2107 		goto done;
   2108 	}
   2109 
   2110 	n = get_n_bytes_readable_on_socket(fd);
   2111 	if (n <= 0 || n > EVBUFFER_MAX_READ)
   2112 		n = EVBUFFER_MAX_READ;
   2113 	if (howmuch < 0 || howmuch > n)
   2114 		howmuch = n;
   2115 
   2116 #ifdef USE_IOVEC_IMPL
   2117 	/* Since we can use iovecs, we're willing to use the last
   2118 	 * NUM_READ_IOVEC chains. */
   2119 	if (_evbuffer_expand_fast(buf, howmuch, NUM_READ_IOVEC) == -1) {
   2120 		result = -1;
   2121 		goto done;
   2122 	} else {
   2123 		IOV_TYPE vecs[NUM_READ_IOVEC];
   2124 #ifdef _EVBUFFER_IOVEC_IS_NATIVE
   2125 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, vecs,
   2126 		    NUM_READ_IOVEC, &chainp, 1);
   2127 #else
   2128 		/* We aren't using the native struct iovec.  Therefore,
   2129 		   we are on win32. */
   2130 		struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
   2131 		nvecs = _evbuffer_read_setup_vecs(buf, howmuch, ev_vecs, 2,
   2132 		    &chainp, 1);
   2133 
   2134 		for (i=0; i < nvecs; ++i)
   2135 			WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]);
   2136 #endif
   2137 
   2138 #ifdef WIN32
   2139 		{
   2140 			DWORD bytesRead;
   2141 			DWORD flags=0;
   2142 			if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) {
   2143 				/* The read failed. It might be a close,
   2144 				 * or it might be an error. */
   2145 				if (WSAGetLastError() == WSAECONNABORTED)
   2146 					n = 0;
   2147 				else
   2148 					n = -1;
   2149 			} else
   2150 				n = bytesRead;
   2151 		}
   2152 #else
   2153 		n = readv(fd, vecs, nvecs);
   2154 #endif
   2155 	}
   2156 
   2157 #else /*!USE_IOVEC_IMPL*/
   2158 	/* If we don't have FIONREAD, we might waste some space here */
   2159 	/* XXX we _will_ waste some space here if there is any space left
   2160 	 * over on buf->last. */
   2161 	if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
   2162 		result = -1;
   2163 		goto done;
   2164 	}
   2165 
   2166 	/* We can append new data at this point */
   2167 	p = chain->buffer + chain->misalign + chain->off;
   2168 
   2169 #ifndef WIN32
   2170 	n = read(fd, p, howmuch);
   2171 #else
   2172 	n = recv(fd, p, howmuch, 0);
   2173 #endif
   2174 #endif /* USE_IOVEC_IMPL */
   2175 
   2176 	if (n == -1) {
   2177 		result = -1;
   2178 		goto done;
   2179 	}
   2180 	if (n == 0) {
   2181 		result = 0;
   2182 		goto done;
   2183 	}
   2184 
   2185 #ifdef USE_IOVEC_IMPL
   2186 	remaining = n;
   2187 	for (i=0; i < nvecs; ++i) {
   2188 		/* can't overflow, since only mutable chains have
   2189 		 * huge misaligns. */
   2190 		size_t space = (size_t) CHAIN_SPACE_LEN(*chainp);
   2191 		/* XXXX This is a kludge that can waste space in perverse
   2192 		 * situations. */
   2193 		if (space > EVBUFFER_CHAIN_MAX)
   2194 			space = EVBUFFER_CHAIN_MAX;
   2195 		if ((ev_ssize_t)space < remaining) {
   2196 			(*chainp)->off += space;
   2197 			remaining -= (int)space;
   2198 		} else {
   2199 			(*chainp)->off += remaining;
   2200 			buf->last_with_datap = chainp;
   2201 			break;
   2202 		}
   2203 		chainp = &(*chainp)->next;
   2204 	}
   2205 #else
   2206 	chain->off += n;
   2207 	advance_last_with_data(buf);
   2208 #endif
   2209 	buf->total_len += n;
   2210 	buf->n_add_for_cb += n;
   2211 
   2212 	/* Tell someone about changes in this buffer */
   2213 	evbuffer_invoke_callbacks(buf);
   2214 	result = n;
   2215 done:
   2216 	EVBUFFER_UNLOCK(buf);
   2217 	return result;
   2218 }
   2219 
   2220 #ifdef WIN32
   2221 static int
   2222 evbuffer_readfile(struct evbuffer *buf, evutil_socket_t fd, ev_ssize_t howmuch)
   2223 {
   2224 	int result;
   2225 	int nchains, n;
   2226 	struct evbuffer_iovec v[2];
   2227 
   2228 	EVBUFFER_LOCK(buf);
   2229 
   2230 	if (buf->freeze_end) {
   2231 		result = -1;
   2232 		goto done;
   2233 	}
   2234 
   2235 	if (howmuch < 0)
   2236 		howmuch = 16384;
   2237 
   2238 
   2239 	/* XXX we _will_ waste some space here if there is any space left
   2240 	 * over on buf->last. */
   2241 	nchains = evbuffer_reserve_space(buf, howmuch, v, 2);
   2242 	if (nchains < 1 || nchains > 2) {
   2243 		result = -1;
   2244 		goto done;
   2245 	}
   2246 	n = read((int)fd, v[0].iov_base, (unsigned int)v[0].iov_len);
   2247 	if (n <= 0) {
   2248 		result = n;
   2249 		goto done;
   2250 	}
   2251 	v[0].iov_len = (IOV_LEN_TYPE) n; /* XXXX another problem with big n.*/
   2252 	if (nchains > 1) {
   2253 		n = read((int)fd, v[1].iov_base, (unsigned int)v[1].iov_len);
   2254 		if (n <= 0) {
   2255 			result = (unsigned long) v[0].iov_len;
   2256 			evbuffer_commit_space(buf, v, 1);
   2257 			goto done;
   2258 		}
   2259 		v[1].iov_len = n;
   2260 	}
   2261 	evbuffer_commit_space(buf, v, nchains);
   2262 
   2263 	result = n;
   2264 done:
   2265 	EVBUFFER_UNLOCK(buf);
   2266 	return result;
   2267 }
   2268 #endif
   2269 
   2270 #ifdef USE_IOVEC_IMPL
   2271 static inline int
   2272 evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
   2273     ev_ssize_t howmuch)
   2274 {
   2275 	IOV_TYPE iov[NUM_WRITE_IOVEC];
   2276 	struct evbuffer_chain *chain = buffer->first;
   2277 	int n, i = 0;
   2278 
   2279 	if (howmuch < 0)
   2280 		return -1;
   2281 
   2282 	ASSERT_EVBUFFER_LOCKED(buffer);
   2283 	/* XXX make this top out at some maximal data length?  if the
   2284 	 * buffer has (say) 1MB in it, split over 128 chains, there's
   2285 	 * no way it all gets written in one go. */
   2286 	while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) {
   2287 #ifdef USE_SENDFILE
   2288 		/* we cannot write the file info via writev */
   2289 		if (chain->flags & EVBUFFER_SENDFILE)
   2290 			break;
   2291 #endif
   2292 		iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign);
   2293 		if ((size_t)howmuch >= chain->off) {
   2294 			/* XXXcould be problematic when windows supports mmap*/
   2295 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
   2296 			howmuch -= chain->off;
   2297 		} else {
   2298 			/* XXXcould be problematic when windows supports mmap*/
   2299 			iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch;
   2300 			break;
   2301 		}
   2302 		chain = chain->next;
   2303 	}
   2304 	if (! i)
   2305 		return 0;
   2306 #ifdef WIN32
   2307 	{
   2308 		DWORD bytesSent;
   2309 		if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
   2310 			n = -1;
   2311 		else
   2312 			n = bytesSent;
   2313 	}
   2314 #else
   2315 	n = writev(fd, iov, i);
   2316 #endif
   2317 	return (n);
   2318 }
   2319 #endif
   2320 
   2321 #ifdef USE_SENDFILE
   2322 static inline int
   2323 evbuffer_write_sendfile(struct evbuffer *buffer, evutil_socket_t fd,
   2324     ev_ssize_t howmuch)
   2325 {
   2326 	struct evbuffer_chain *chain = buffer->first;
   2327 	struct evbuffer_chain_fd *info =
   2328 	    EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
   2329 #if defined(SENDFILE_IS_MACOSX) || defined(SENDFILE_IS_FREEBSD)
   2330 	int res;
   2331 	off_t len = chain->off;
   2332 #elif defined(SENDFILE_IS_LINUX) || defined(SENDFILE_IS_SOLARIS)
   2333 	ev_ssize_t res;
   2334 	off_t offset = chain->misalign;
   2335 #endif
   2336 
   2337 	ASSERT_EVBUFFER_LOCKED(buffer);
   2338 
   2339 #if defined(SENDFILE_IS_MACOSX)
   2340 	res = sendfile(info->fd, fd, chain->misalign, &len, NULL, 0);
   2341 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
   2342 		return (-1);
   2343 
   2344 	return (len);
   2345 #elif defined(SENDFILE_IS_FREEBSD)
   2346 	res = sendfile(info->fd, fd, chain->misalign, chain->off, NULL, &len, 0);
   2347 	if (res == -1 && !EVUTIL_ERR_RW_RETRIABLE(errno))
   2348 		return (-1);
   2349 
   2350 	return (len);
   2351 #elif defined(SENDFILE_IS_LINUX)
   2352 	/* TODO(niels): implement splice */
   2353 	res = sendfile(fd, info->fd, &offset, chain->off);
   2354 	if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
   2355 		/* if this is EAGAIN or EINTR return 0; otherwise, -1 */
   2356 		return (0);
   2357 	}
   2358 	return (res);
   2359 #elif defined(SENDFILE_IS_SOLARIS)
   2360 	{
   2361 		const off_t offset_orig = offset;
   2362 		res = sendfile(fd, info->fd, &offset, chain->off);
   2363 		if (res == -1 && EVUTIL_ERR_RW_RETRIABLE(errno)) {
   2364 			if (offset - offset_orig)
   2365 				return offset - offset_orig;
   2366 			/* if this is EAGAIN or EINTR and no bytes were
   2367 			 * written, return 0 */
   2368 			return (0);
   2369 		}
   2370 		return (res);
   2371 	}
   2372 #endif
   2373 }
   2374 #endif
   2375 
   2376 int
   2377 evbuffer_write_atmost(struct evbuffer *buffer, evutil_socket_t fd,
   2378     ev_ssize_t howmuch)
   2379 {
   2380 	int n = -1;
   2381 
   2382 	EVBUFFER_LOCK(buffer);
   2383 
   2384 	if (buffer->freeze_start) {
   2385 		goto done;
   2386 	}
   2387 
   2388 	if (howmuch < 0 || (size_t)howmuch > buffer->total_len)
   2389 		howmuch = buffer->total_len;
   2390 
   2391 	if (howmuch > 0) {
   2392 #ifdef USE_SENDFILE
   2393 		struct evbuffer_chain *chain = buffer->first;
   2394 		if (chain != NULL && (chain->flags & EVBUFFER_SENDFILE))
   2395 			n = evbuffer_write_sendfile(buffer, fd, howmuch);
   2396 		else {
   2397 #endif
   2398 #ifdef USE_IOVEC_IMPL
   2399 		n = evbuffer_write_iovec(buffer, fd, howmuch);
   2400 #elif defined(WIN32)
   2401 		/* XXX(nickm) Don't disable this code until we know if
   2402 		 * the WSARecv code above works. */
   2403 		void *p = evbuffer_pullup(buffer, howmuch);
   2404 		EVUTIL_ASSERT(p || !howmuch);
   2405 		n = send(fd, p, howmuch, 0);
   2406 #else
   2407 		void *p = evbuffer_pullup(buffer, howmuch);
   2408 		EVUTIL_ASSERT(p || !howmuch);
   2409 		n = write(fd, p, howmuch);
   2410 #endif
   2411 #ifdef USE_SENDFILE
   2412 		}
   2413 #endif
   2414 	}
   2415 
   2416 	if (n > 0)
   2417 		evbuffer_drain(buffer, n);
   2418 
   2419 done:
   2420 	EVBUFFER_UNLOCK(buffer);
   2421 	return (n);
   2422 }
   2423 
   2424 int
   2425 evbuffer_write(struct evbuffer *buffer, evutil_socket_t fd)
   2426 {
   2427 	return evbuffer_write_atmost(buffer, fd, -1);
   2428 }
   2429 
   2430 unsigned char *
   2431 evbuffer_find(struct evbuffer *buffer, const unsigned char *what, size_t len)
   2432 {
   2433 	unsigned char *search;
   2434 	struct evbuffer_ptr ptr;
   2435 
   2436 	EVBUFFER_LOCK(buffer);
   2437 
   2438 	ptr = evbuffer_search(buffer, (const char *)what, len, NULL);
   2439 	if (ptr.pos < 0) {
   2440 		search = NULL;
   2441 	} else {
   2442 		search = evbuffer_pullup(buffer, ptr.pos + len);
   2443 		if (search)
   2444 			search += ptr.pos;
   2445 	}
   2446 	EVBUFFER_UNLOCK(buffer);
   2447 	return search;
   2448 }
   2449 
   2450 int
   2451 evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
   2452     size_t position, enum evbuffer_ptr_how how)
   2453 {
   2454 	size_t left = position;
   2455 	struct evbuffer_chain *chain = NULL;
   2456 
   2457 	EVBUFFER_LOCK(buf);
   2458 
   2459 	switch (how) {
   2460 	case EVBUFFER_PTR_SET:
   2461 		chain = buf->first;
   2462 		pos->pos = position;
   2463 		position = 0;
   2464 		break;
   2465 	case EVBUFFER_PTR_ADD:
   2466 		/* this avoids iterating over all previous chains if
   2467 		   we just want to advance the position */
   2468 		if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
   2469 			EVBUFFER_UNLOCK(buf);
   2470 			return -1;
   2471 		}
   2472 		chain = pos->_internal.chain;
   2473 		pos->pos += position;
   2474 		position = pos->_internal.pos_in_chain;
   2475 		break;
   2476 	}
   2477 
   2478 	EVUTIL_ASSERT(EV_SIZE_MAX - left >= position);
   2479 	while (chain && position + left >= chain->off) {
   2480 		left -= chain->off - position;
   2481 		chain = chain->next;
   2482 		position = 0;
   2483 	}
   2484 	if (chain) {
   2485 		pos->_internal.chain = chain;
   2486 		pos->_internal.pos_in_chain = position + left;
   2487 	} else {
   2488 		pos->_internal.chain = NULL;
   2489 		pos->pos = -1;
   2490 	}
   2491 
   2492 	EVBUFFER_UNLOCK(buf);
   2493 
   2494 	return chain != NULL ? 0 : -1;
   2495 }
   2496 
   2497 /**
   2498    Compare the bytes in buf at position pos to the len bytes in mem.  Return
   2499    less than 0, 0, or greater than 0 as memcmp.
   2500  */
   2501 static int
   2502 evbuffer_ptr_memcmp(const struct evbuffer *buf, const struct evbuffer_ptr *pos,
   2503     const char *mem, size_t len)
   2504 {
   2505 	struct evbuffer_chain *chain;
   2506 	size_t position;
   2507 	int r;
   2508 
   2509 	ASSERT_EVBUFFER_LOCKED(buf);
   2510 
   2511 	if (pos->pos < 0 ||
   2512 	    EV_SIZE_MAX - len < (size_t)pos->pos ||
   2513 	    pos->pos + len > buf->total_len)
   2514 		return -1;
   2515 
   2516 	chain = pos->_internal.chain;
   2517 	position = pos->_internal.pos_in_chain;
   2518 	while (len && chain) {
   2519 		size_t n_comparable;
   2520 		if (len + position > chain->off)
   2521 			n_comparable = chain->off - position;
   2522 		else
   2523 			n_comparable = len;
   2524 		r = memcmp(chain->buffer + chain->misalign + position, mem,
   2525 		    n_comparable);
   2526 		if (r)
   2527 			return r;
   2528 		mem += n_comparable;
   2529 		len -= n_comparable;
   2530 		position = 0;
   2531 		chain = chain->next;
   2532 	}
   2533 
   2534 	return 0;
   2535 }
   2536 
   2537 struct evbuffer_ptr
   2538 evbuffer_search(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start)
   2539 {
   2540 	return evbuffer_search_range(buffer, what, len, start, NULL);
   2541 }
   2542 
   2543 struct evbuffer_ptr
   2544 evbuffer_search_range(struct evbuffer *buffer, const char *what, size_t len, const struct evbuffer_ptr *start, const struct evbuffer_ptr *end)
   2545 {
   2546 	struct evbuffer_ptr pos;
   2547 	struct evbuffer_chain *chain, *last_chain = NULL;
   2548 	const unsigned char *p;
   2549 	char first;
   2550 
   2551 	EVBUFFER_LOCK(buffer);
   2552 
   2553 	if (start) {
   2554 		memcpy(&pos, start, sizeof(pos));
   2555 		chain = pos._internal.chain;
   2556 	} else {
   2557 		pos.pos = 0;
   2558 		chain = pos._internal.chain = buffer->first;
   2559 		pos._internal.pos_in_chain = 0;
   2560 	}
   2561 
   2562 	if (end)
   2563 		last_chain = end->_internal.chain;
   2564 
   2565 	if (!len || len > EV_SSIZE_MAX)
   2566 		goto done;
   2567 
   2568 	first = what[0];
   2569 
   2570 	while (chain) {
   2571 		const unsigned char *start_at =
   2572 		    chain->buffer + chain->misalign +
   2573 		    pos._internal.pos_in_chain;
   2574 		p = memchr(start_at, first,
   2575 		    chain->off - pos._internal.pos_in_chain);
   2576 		if (p) {
   2577 			pos.pos += p - start_at;
   2578 			pos._internal.pos_in_chain += p - start_at;
   2579 			if (!evbuffer_ptr_memcmp(buffer, &pos, what, len)) {
   2580 				if (end && pos.pos + (ev_ssize_t)len > end->pos)
   2581 					goto not_found;
   2582 				else
   2583 					goto done;
   2584 			}
   2585 			++pos.pos;
   2586 			++pos._internal.pos_in_chain;
   2587 			if (pos._internal.pos_in_chain == chain->off) {
   2588 				chain = pos._internal.chain = chain->next;
   2589 				pos._internal.pos_in_chain = 0;
   2590 			}
   2591 		} else {
   2592 			if (chain == last_chain)
   2593 				goto not_found;
   2594 			pos.pos += chain->off - pos._internal.pos_in_chain;
   2595 			chain = pos._internal.chain = chain->next;
   2596 			pos._internal.pos_in_chain = 0;
   2597 		}
   2598 	}
   2599 
   2600 not_found:
   2601 	pos.pos = -1;
   2602 	pos._internal.chain = NULL;
   2603 done:
   2604 	EVBUFFER_UNLOCK(buffer);
   2605 	return pos;
   2606 }
   2607 
   2608 int
   2609 evbuffer_peek(struct evbuffer *buffer, ev_ssize_t len,
   2610     struct evbuffer_ptr *start_at,
   2611     struct evbuffer_iovec *vec, int n_vec)
   2612 {
   2613 	struct evbuffer_chain *chain;
   2614 	int idx = 0;
   2615 	ev_ssize_t len_so_far = 0;
   2616 
   2617 	EVBUFFER_LOCK(buffer);
   2618 
   2619 	if (start_at) {
   2620 		chain = start_at->_internal.chain;
   2621 		len_so_far = chain->off
   2622 		    - start_at->_internal.pos_in_chain;
   2623 		idx = 1;
   2624 		if (n_vec > 0) {
   2625 			vec[0].iov_base = chain->buffer + chain->misalign
   2626 			    + start_at->_internal.pos_in_chain;
   2627 			vec[0].iov_len = len_so_far;
   2628 		}
   2629 		chain = chain->next;
   2630 	} else {
   2631 		chain = buffer->first;
   2632 	}
   2633 
   2634 	if (n_vec == 0 && len < 0) {
   2635 		/* If no vectors are provided and they asked for "everything",
   2636 		 * pretend they asked for the actual available amount. */
   2637 		len = buffer->total_len;
   2638 		if (start_at) {
   2639 			len -= start_at->pos;
   2640 		}
   2641 	}
   2642 
   2643 	while (chain) {
   2644 		if (len >= 0 && len_so_far >= len)
   2645 			break;
   2646 		if (idx<n_vec) {
   2647 			vec[idx].iov_base = chain->buffer + chain->misalign;
   2648 			vec[idx].iov_len = chain->off;
   2649 		} else if (len<0) {
   2650 			break;
   2651 		}
   2652 		++idx;
   2653 		len_so_far += chain->off;
   2654 		chain = chain->next;
   2655 	}
   2656 
   2657 	EVBUFFER_UNLOCK(buffer);
   2658 
   2659 	return idx;
   2660 }
   2661 
   2662 
   2663 int
   2664 evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
   2665 {
   2666 	char *buffer;
   2667 	size_t space;
   2668 	int sz, result = -1;
   2669 	va_list aq;
   2670 	struct evbuffer_chain *chain;
   2671 
   2672 
   2673 	EVBUFFER_LOCK(buf);
   2674 
   2675 	if (buf->freeze_end) {
   2676 		goto done;
   2677 	}
   2678 
   2679 	/* make sure that at least some space is available */
   2680 	if ((chain = evbuffer_expand_singlechain(buf, 64)) == NULL)
   2681 		goto done;
   2682 
   2683 	for (;;) {
   2684 #if 0
   2685 		size_t used = chain->misalign + chain->off;
   2686 		buffer = (char *)chain->buffer + chain->misalign + chain->off;
   2687 		EVUTIL_ASSERT(chain->buffer_len >= used);
   2688 		space = chain->buffer_len - used;
   2689 #endif
   2690 		buffer = (char*) CHAIN_SPACE_PTR(chain);
   2691 		space = (size_t) CHAIN_SPACE_LEN(chain);
   2692 
   2693 #ifndef va_copy
   2694 #define	va_copy(dst, src)	memcpy(&(dst), &(src), sizeof(va_list))
   2695 #endif
   2696 		va_copy(aq, ap);
   2697 
   2698 		sz = evutil_vsnprintf(buffer, space, fmt, aq);
   2699 
   2700 		va_end(aq);
   2701 
   2702 		if (sz < 0)
   2703 			goto done;
   2704 		if (INT_MAX >= EVBUFFER_CHAIN_MAX &&
   2705 		    (size_t)sz >= EVBUFFER_CHAIN_MAX)
   2706 			goto done;
   2707 		if ((size_t)sz < space) {
   2708 			chain->off += sz;
   2709 			buf->total_len += sz;
   2710 			buf->n_add_for_cb += sz;
   2711 
   2712 			advance_last_with_data(buf);
   2713 			evbuffer_invoke_callbacks(buf);
   2714 			result = sz;
   2715 			goto done;
   2716 		}
   2717 		if ((chain = evbuffer_expand_singlechain(buf, sz + 1)) == NULL)
   2718 			goto done;
   2719 	}
   2720 	/* NOTREACHED */
   2721 
   2722 done:
   2723 	EVBUFFER_UNLOCK(buf);
   2724 	return result;
   2725 }
   2726 
   2727 int
   2728 evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
   2729 {
   2730 	int res = -1;
   2731 	va_list ap;
   2732 
   2733 	va_start(ap, fmt);
   2734 	res = evbuffer_add_vprintf(buf, fmt, ap);
   2735 	va_end(ap);
   2736 
   2737 	return (res);
   2738 }
   2739 
   2740 int
   2741 evbuffer_add_reference(struct evbuffer *outbuf,
   2742     const void *data, size_t datlen,
   2743     evbuffer_ref_cleanup_cb cleanupfn, void *extra)
   2744 {
   2745 	struct evbuffer_chain *chain;
   2746 	struct evbuffer_chain_reference *info;
   2747 	int result = -1;
   2748 
   2749 	chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_reference));
   2750 	if (!chain)
   2751 		return (-1);
   2752 	chain->flags |= EVBUFFER_REFERENCE | EVBUFFER_IMMUTABLE;
   2753 	chain->buffer = (u_char *)data;
   2754 	chain->buffer_len = datlen;
   2755 	chain->off = datlen;
   2756 
   2757 	info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_reference, chain);
   2758 	info->cleanupfn = cleanupfn;
   2759 	info->extra = extra;
   2760 
   2761 	EVBUFFER_LOCK(outbuf);
   2762 	if (outbuf->freeze_end) {
   2763 		/* don't call chain_free; we do not want to actually invoke
   2764 		 * the cleanup function */
   2765 		mm_free(chain);
   2766 		goto done;
   2767 	}
   2768 	evbuffer_chain_insert(outbuf, chain);
   2769 	outbuf->n_add_for_cb += datlen;
   2770 
   2771 	evbuffer_invoke_callbacks(outbuf);
   2772 
   2773 	result = 0;
   2774 done:
   2775 	EVBUFFER_UNLOCK(outbuf);
   2776 
   2777 	return result;
   2778 }
   2779 
   2780 /* TODO(niels): maybe we don't want to own the fd, however, in that
   2781  * case, we should dup it - dup is cheap.  Perhaps, we should use a
   2782  * callback instead?
   2783  */
   2784 /* TODO(niels): we may want to add to automagically convert to mmap, in
   2785  * case evbuffer_remove() or evbuffer_pullup() are being used.
   2786  */
   2787 int
   2788 evbuffer_add_file(struct evbuffer *outbuf, int fd,
   2789     ev_off_t offset, ev_off_t length)
   2790 {
   2791 #if defined(USE_SENDFILE) || defined(_EVENT_HAVE_MMAP)
   2792 	struct evbuffer_chain *chain;
   2793 	struct evbuffer_chain_fd *info;
   2794 #endif
   2795 #if defined(USE_SENDFILE)
   2796 	int sendfile_okay = 1;
   2797 #endif
   2798 	int ok = 1;
   2799 
   2800 	if (offset < 0 || length < 0 ||
   2801 	    ((ev_uint64_t)length > EVBUFFER_CHAIN_MAX) ||
   2802 	    (ev_uint64_t)offset > (ev_uint64_t)(EVBUFFER_CHAIN_MAX - length))
   2803 		return (-1);
   2804 
   2805 #if defined(USE_SENDFILE)
   2806 	if (use_sendfile) {
   2807 		EVBUFFER_LOCK(outbuf);
   2808 		sendfile_okay = outbuf->flags & EVBUFFER_FLAG_DRAINS_TO_FD;
   2809 		EVBUFFER_UNLOCK(outbuf);
   2810 	}
   2811 
   2812 	if (use_sendfile && sendfile_okay) {
   2813 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
   2814 		if (chain == NULL) {
   2815 			event_warn("%s: out of memory", __func__);
   2816 			return (-1);
   2817 		}
   2818 
   2819 		chain->flags |= EVBUFFER_SENDFILE | EVBUFFER_IMMUTABLE;
   2820 		chain->buffer = NULL;	/* no reading possible */
   2821 		chain->buffer_len = length + offset;
   2822 		chain->off = length;
   2823 		chain->misalign = offset;
   2824 
   2825 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
   2826 		info->fd = fd;
   2827 
   2828 		EVBUFFER_LOCK(outbuf);
   2829 		if (outbuf->freeze_end) {
   2830 			mm_free(chain);
   2831 			ok = 0;
   2832 		} else {
   2833 			outbuf->n_add_for_cb += length;
   2834 			evbuffer_chain_insert(outbuf, chain);
   2835 		}
   2836 	} else
   2837 #endif
   2838 #if defined(_EVENT_HAVE_MMAP)
   2839 	if (use_mmap) {
   2840 		void *mapped = mmap(NULL, length + offset, PROT_READ,
   2841 #ifdef MAP_NOCACHE
   2842 		    MAP_NOCACHE |
   2843 #endif
   2844 #ifdef MAP_FILE
   2845 		    MAP_FILE |
   2846 #endif
   2847 		    MAP_PRIVATE,
   2848 		    fd, 0);
   2849 		/* some mmap implementations require offset to be a multiple of
   2850 		 * the page size.  most users of this api, are likely to use 0
   2851 		 * so mapping everything is not likely to be a problem.
   2852 		 * TODO(niels): determine page size and round offset to that
   2853 		 * page size to avoid mapping too much memory.
   2854 		 */
   2855 		if (mapped == MAP_FAILED) {
   2856 			event_warn("%s: mmap(%d, %d, %zu) failed",
   2857 			    __func__, fd, 0, (size_t)(offset + length));
   2858 			return (-1);
   2859 		}
   2860 		chain = evbuffer_chain_new(sizeof(struct evbuffer_chain_fd));
   2861 		if (chain == NULL) {
   2862 			event_warn("%s: out of memory", __func__);
   2863 			munmap(mapped, length);
   2864 			return (-1);
   2865 		}
   2866 
   2867 		chain->flags |= EVBUFFER_MMAP | EVBUFFER_IMMUTABLE;
   2868 		chain->buffer = mapped;
   2869 		chain->buffer_len = length + offset;
   2870 		chain->off = length + offset;
   2871 
   2872 		info = EVBUFFER_CHAIN_EXTRA(struct evbuffer_chain_fd, chain);
   2873 		info->fd = fd;
   2874 
   2875 		EVBUFFER_LOCK(outbuf);
   2876 		if (outbuf->freeze_end) {
   2877 			info->fd = -1;
   2878 			evbuffer_chain_free(chain);
   2879 			ok = 0;
   2880 		} else {
   2881 			outbuf->n_add_for_cb += length;
   2882 
   2883 			evbuffer_chain_insert(outbuf, chain);
   2884 
   2885 			/* we need to subtract whatever we don't need */
   2886 			evbuffer_drain(outbuf, offset);
   2887 		}
   2888 	} else
   2889 #endif
   2890 	{
   2891 		/* the default implementation */
   2892 		struct evbuffer *tmp = evbuffer_new();
   2893 		ev_ssize_t read;
   2894 
   2895 		if (tmp == NULL)
   2896 			return (-1);
   2897 
   2898 #ifdef WIN32
   2899 #define lseek _lseeki64
   2900 #endif
   2901 		if (lseek(fd, offset, SEEK_SET) == -1) {
   2902 			evbuffer_free(tmp);
   2903 			return (-1);
   2904 		}
   2905 
   2906 		/* we add everything to a temporary buffer, so that we
   2907 		 * can abort without side effects if the read fails.
   2908 		 */
   2909 		while (length) {
   2910 			ev_ssize_t to_read = length > EV_SSIZE_MAX ? EV_SSIZE_MAX : (ev_ssize_t)length;
   2911 			read = evbuffer_readfile(tmp, fd, to_read);
   2912 			if (read == -1) {
   2913 				evbuffer_free(tmp);
   2914 				return (-1);
   2915 			}
   2916 
   2917 			length -= read;
   2918 		}
   2919 
   2920 		EVBUFFER_LOCK(outbuf);
   2921 		if (outbuf->freeze_end) {
   2922 			evbuffer_free(tmp);
   2923 			ok = 0;
   2924 		} else {
   2925 			evbuffer_add_buffer(outbuf, tmp);
   2926 			evbuffer_free(tmp);
   2927 
   2928 #ifdef WIN32
   2929 #define close _close
   2930 #endif
   2931 			close(fd);
   2932 		}
   2933 	}
   2934 
   2935 	if (ok)
   2936 		evbuffer_invoke_callbacks(outbuf);
   2937 	EVBUFFER_UNLOCK(outbuf);
   2938 
   2939 	return ok ? 0 : -1;
   2940 }
   2941 
   2942 
   2943 void
   2944 evbuffer_setcb(struct evbuffer *buffer, evbuffer_cb cb, void *cbarg)
   2945 {
   2946 	EVBUFFER_LOCK(buffer);
   2947 
   2948 	if (!TAILQ_EMPTY(&buffer->callbacks))
   2949 		evbuffer_remove_all_callbacks(buffer);
   2950 
   2951 	if (cb) {
   2952 		struct evbuffer_cb_entry *ent =
   2953 		    evbuffer_add_cb(buffer, NULL, cbarg);
   2954 		ent->cb.cb_obsolete = cb;
   2955 		ent->flags |= EVBUFFER_CB_OBSOLETE;
   2956 	}
   2957 	EVBUFFER_UNLOCK(buffer);
   2958 }
   2959 
   2960 struct evbuffer_cb_entry *
   2961 evbuffer_add_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
   2962 {
   2963 	struct evbuffer_cb_entry *e;
   2964 	if (! (e = mm_calloc(1, sizeof(struct evbuffer_cb_entry))))
   2965 		return NULL;
   2966 	EVBUFFER_LOCK(buffer);
   2967 	e->cb.cb_func = cb;
   2968 	e->cbarg = cbarg;
   2969 	e->flags = EVBUFFER_CB_ENABLED;
   2970 	TAILQ_INSERT_HEAD(&buffer->callbacks, e, next);
   2971 	EVBUFFER_UNLOCK(buffer);
   2972 	return e;
   2973 }
   2974 
   2975 int
   2976 evbuffer_remove_cb_entry(struct evbuffer *buffer,
   2977 			 struct evbuffer_cb_entry *ent)
   2978 {
   2979 	EVBUFFER_LOCK(buffer);
   2980 	TAILQ_REMOVE(&buffer->callbacks, ent, next);
   2981 	EVBUFFER_UNLOCK(buffer);
   2982 	mm_free(ent);
   2983 	return 0;
   2984 }
   2985 
   2986 int
   2987 evbuffer_remove_cb(struct evbuffer *buffer, evbuffer_cb_func cb, void *cbarg)
   2988 {
   2989 	struct evbuffer_cb_entry *cbent;
   2990 	int result = -1;
   2991 	EVBUFFER_LOCK(buffer);
   2992 	TAILQ_FOREACH(cbent, &buffer->callbacks, next) {
   2993 		if (cb == cbent->cb.cb_func && cbarg == cbent->cbarg) {
   2994 			result = evbuffer_remove_cb_entry(buffer, cbent);
   2995 			goto done;
   2996 		}
   2997 	}
   2998 done:
   2999 	EVBUFFER_UNLOCK(buffer);
   3000 	return result;
   3001 }
   3002 
   3003 int
   3004 evbuffer_cb_set_flags(struct evbuffer *buffer,
   3005 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
   3006 {
   3007 	/* the user isn't allowed to mess with these. */
   3008 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
   3009 	EVBUFFER_LOCK(buffer);
   3010 	cb->flags |= flags;
   3011 	EVBUFFER_UNLOCK(buffer);
   3012 	return 0;
   3013 }
   3014 
   3015 int
   3016 evbuffer_cb_clear_flags(struct evbuffer *buffer,
   3017 		      struct evbuffer_cb_entry *cb, ev_uint32_t flags)
   3018 {
   3019 	/* the user isn't allowed to mess with these. */
   3020 	flags &= ~EVBUFFER_CB_INTERNAL_FLAGS;
   3021 	EVBUFFER_LOCK(buffer);
   3022 	cb->flags &= ~flags;
   3023 	EVBUFFER_UNLOCK(buffer);
   3024 	return 0;
   3025 }
   3026 
   3027 int
   3028 evbuffer_freeze(struct evbuffer *buffer, int start)
   3029 {
   3030 	EVBUFFER_LOCK(buffer);
   3031 	if (start)
   3032 		buffer->freeze_start = 1;
   3033 	else
   3034 		buffer->freeze_end = 1;
   3035 	EVBUFFER_UNLOCK(buffer);
   3036 	return 0;
   3037 }
   3038 
   3039 int
   3040 evbuffer_unfreeze(struct evbuffer *buffer, int start)
   3041 {
   3042 	EVBUFFER_LOCK(buffer);
   3043 	if (start)
   3044 		buffer->freeze_start = 0;
   3045 	else
   3046 		buffer->freeze_end = 0;
   3047 	EVBUFFER_UNLOCK(buffer);
   3048 	return 0;
   3049 }
   3050 
   3051 #if 0
   3052 void
   3053 evbuffer_cb_suspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
   3054 {
   3055 	if (!(cb->flags & EVBUFFER_CB_SUSPENDED)) {
   3056 		cb->size_before_suspend = evbuffer_get_length(buffer);
   3057 		cb->flags |= EVBUFFER_CB_SUSPENDED;
   3058 	}
   3059 }
   3060 
   3061 void
   3062 evbuffer_cb_unsuspend(struct evbuffer *buffer, struct evbuffer_cb_entry *cb)
   3063 {
   3064 	if ((cb->flags & EVBUFFER_CB_SUSPENDED)) {
   3065 		unsigned call = (cb->flags & EVBUFFER_CB_CALL_ON_UNSUSPEND);
   3066 		size_t sz = cb->size_before_suspend;
   3067 		cb->flags &= ~(EVBUFFER_CB_SUSPENDED|
   3068 			       EVBUFFER_CB_CALL_ON_UNSUSPEND);
   3069 		cb->size_before_suspend = 0;
   3070 		if (call && (cb->flags & EVBUFFER_CB_ENABLED)) {
   3071 			cb->cb(buffer, sz, evbuffer_get_length(buffer), cb->cbarg);
   3072 		}
   3073 	}
   3074 }
   3075 #endif
   3076 
   3077 /* These hooks are exposed so that the unit tests can temporarily disable
   3078  * sendfile support in order to test mmap, or both to test linear
   3079  * access. Don't use it; if we need to add a way to disable sendfile support
   3080  * in the future, it will probably be via an alternate version of
   3081  * evbuffer_add_file() with a 'flags' argument.
   3082  */
   3083 int _evbuffer_testing_use_sendfile(void);
   3084 int _evbuffer_testing_use_mmap(void);
   3085 int _evbuffer_testing_use_linear_file_access(void);
   3086 
   3087 int
   3088 _evbuffer_testing_use_sendfile(void)
   3089 {
   3090 	int ok = 0;
   3091 #ifdef USE_SENDFILE
   3092 	use_sendfile = 1;
   3093 	ok = 1;
   3094 #endif
   3095 #ifdef _EVENT_HAVE_MMAP
   3096 	use_mmap = 0;
   3097 #endif
   3098 	return ok;
   3099 }
   3100 int
   3101 _evbuffer_testing_use_mmap(void)
   3102 {
   3103 	int ok = 0;
   3104 #ifdef USE_SENDFILE
   3105 	use_sendfile = 0;
   3106 #endif
   3107 #ifdef _EVENT_HAVE_MMAP
   3108 	use_mmap = 1;
   3109 	ok = 1;
   3110 #endif
   3111 	return ok;
   3112 }
   3113 int
   3114 _evbuffer_testing_use_linear_file_access(void)
   3115 {
   3116 #ifdef USE_SENDFILE
   3117 	use_sendfile = 0;
   3118 #endif
   3119 #ifdef _EVENT_HAVE_MMAP
   3120 	use_mmap = 0;
   3121 #endif
   3122 	return 1;
   3123 }
   3124