Home | History | Annotate | Download | only in sljit
      1 /*
      2  *    Stack-less Just-In-Time compiler
      3  *
      4  *    Copyright Zoltan Herczeg (hzmester (at) freemail.hu). All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without modification, are
      7  * permitted provided that the following conditions are met:
      8  *
      9  *   1. Redistributions of source code must retain the above copyright notice, this list of
     10  *      conditions and the following disclaimer.
     11  *
     12  *   2. Redistributions in binary form must reproduce the above copyright notice, this list
     13  *      of conditions and the following disclaimer in the documentation and/or other materials
     14  *      provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY
     17  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
     19  * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
     21  * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
     22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 /*
     28    This file contains a simple executable memory allocator
     29 
     30    It is assumed, that executable code blocks are usually medium (or sometimes
     31    large) memory blocks, and the allocator is not too frequently called (less
     32    optimized than other allocators). Thus, using it as a generic allocator is
     33    not suggested.
     34 
     35    How does it work:
     36      Memory is allocated in continuous memory areas called chunks by alloc_chunk()
     37      Chunk format:
     38      [ block ][ block ] ... [ block ][ block terminator ]
     39 
     40    All blocks and the block terminator is started with block_header. The block
     41    header contains the size of the previous and the next block. These sizes
     42    can also contain special values.
     43      Block size:
     44        0 - The block is a free_block, with a different size member.
     45        1 - The block is a block terminator.
     46        n - The block is used at the moment, and the value contains its size.
     47      Previous block size:
     48        0 - This is the first block of the memory chunk.
     49        n - The size of the previous block.
     50 
     51    Using these size values we can go forward or backward on the block chain.
     52    The unused blocks are stored in a chain list pointed by free_blocks. This
     53    list is useful if we need to find a suitable memory area when the allocator
     54    is called.
     55 
     56    When a block is freed, the new free block is connected to its adjacent free
     57    blocks if possible.
     58 
     59      [ free block ][ used block ][ free block ]
     60    and "used block" is freed, the three blocks are connected together:
     61      [           one big free block           ]
     62 */
     63 
     64 /* --------------------------------------------------------------------- */
     65 /*  System (OS) functions                                                */
     66 /* --------------------------------------------------------------------- */
     67 
     68 /* 64 KByte. */
     69 #define CHUNK_SIZE	0x10000
     70 
     71 struct chunk_header {
     72 	void *executable;
     73 	int fd;
     74 };
     75 
     76 /*
     77    alloc_chunk / free_chunk :
     78      * allocate executable system memory chunks
     79      * the size is always divisible by CHUNK_SIZE
     80    allocator_grab_lock / allocator_release_lock :
     81      * make the allocator thread safe
     82      * can be empty if the OS (or the application) does not support threading
     83      * only the allocator requires this lock, sljit is fully thread safe
     84        as it only uses local variables
     85 */
     86 
     87 #include <fcntl.h>
     88 
     89 #ifndef O_NOATIME
     90 #define O_NOATIME 0
     91 #endif
     92 
     93 #ifdef __O_TMPFILE
     94 #ifndef O_TMPFILE
     95 #define O_TMPFILE	(__O_TMPFILE | O_DIRECTORY)
     96 #endif
     97 #endif
     98 
     99 int mkostemp(char *template, int flags);
    100 char *secure_getenv(const char *name);
    101 
    102 static SLJIT_INLINE int create_tempfile(void)
    103 {
    104 	int fd;
    105 
    106 	char tmp_name[256];
    107 	size_t tmp_name_len;
    108 	char *dir;
    109 	size_t len;
    110 
    111 #ifdef P_tmpdir
    112 	len = (P_tmpdir != NULL) ? strlen(P_tmpdir) : 0;
    113 
    114 	if (len > 0 && len < sizeof(tmp_name)) {
    115 		strcpy(tmp_name, P_tmpdir);
    116 		tmp_name_len = len;
    117 	}
    118 	else {
    119 		strcpy(tmp_name, "/tmp");
    120 		tmp_name_len = 4;
    121 	}
    122 #else
    123 	strcpy(tmp_name, "/tmp");
    124 	tmp_name_len = 4;
    125 #endif
    126 
    127 	dir = secure_getenv("TMPDIR");
    128 	if (dir) {
    129 		len = strlen(dir);
    130 		if (len > 0 && len < sizeof(tmp_name)) {
    131 			strcpy(tmp_name, dir);
    132 			tmp_name_len = len;
    133 		}
    134 	}
    135 
    136 	SLJIT_ASSERT(tmp_name_len > 0 && tmp_name_len < sizeof(tmp_name));
    137 
    138 	while (tmp_name_len > 0 && tmp_name[tmp_name_len - 1] == '/') {
    139 		tmp_name_len--;
    140 		tmp_name[tmp_name_len] = '\0';
    141 	}
    142 
    143 #ifdef O_TMPFILE
    144 	fd = open(tmp_name, O_TMPFILE | O_EXCL | O_RDWR | O_NOATIME | O_CLOEXEC, S_IRUSR | S_IWUSR);
    145 	if (fd != -1)
    146 		return fd;
    147 #endif
    148 
    149 	if (tmp_name_len + 7 >= sizeof(tmp_name))
    150 	{
    151 		return -1;
    152 	}
    153 
    154 	strcpy(tmp_name + tmp_name_len, "/XXXXXX");
    155 	fd = mkostemp(tmp_name, O_CLOEXEC | O_NOATIME);
    156 
    157 	if (fd == -1)
    158 		return fd;
    159 
    160 	if (unlink(tmp_name)) {
    161 		close(fd);
    162 		return -1;
    163 	}
    164 
    165 	return fd;
    166 }
    167 
    168 static SLJIT_INLINE struct chunk_header* alloc_chunk(sljit_uw size)
    169 {
    170 	struct chunk_header *retval;
    171 	int fd;
    172 
    173 	fd = create_tempfile();
    174 	if (fd == -1)
    175 		return NULL;
    176 
    177 	if (ftruncate(fd, size)) {
    178 		close(fd);
    179 		return NULL;
    180 	}
    181 
    182 	retval = (struct chunk_header *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
    183 
    184 	if (retval == MAP_FAILED) {
    185 		close(fd);
    186 		return NULL;
    187 	}
    188 
    189 	retval->executable = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
    190 
    191 	if (retval->executable == MAP_FAILED) {
    192 		munmap(retval, size);
    193 		close(fd);
    194 		return NULL;
    195 	}
    196 
    197 	retval->fd = fd;
    198 	return retval;
    199 }
    200 
    201 static SLJIT_INLINE void free_chunk(void *chunk, sljit_uw size)
    202 {
    203 	struct chunk_header *header = ((struct chunk_header *)chunk) - 1;
    204 
    205 	int fd = header->fd;
    206 	munmap(header->executable, size);
    207 	munmap(header, size);
    208 	close(fd);
    209 }
    210 
    211 /* --------------------------------------------------------------------- */
    212 /*  Common functions                                                     */
    213 /* --------------------------------------------------------------------- */
    214 
    215 #define CHUNK_MASK	(~(CHUNK_SIZE - 1))
    216 
    217 struct block_header {
    218 	sljit_uw size;
    219 	sljit_uw prev_size;
    220 	sljit_sw executable_offset;
    221 };
    222 
    223 struct free_block {
    224 	struct block_header header;
    225 	struct free_block *next;
    226 	struct free_block *prev;
    227 	sljit_uw size;
    228 };
    229 
    230 #define AS_BLOCK_HEADER(base, offset) \
    231 	((struct block_header*)(((sljit_u8*)base) + offset))
    232 #define AS_FREE_BLOCK(base, offset) \
    233 	((struct free_block*)(((sljit_u8*)base) + offset))
    234 #define MEM_START(base)		((void*)((base) + 1))
    235 #define ALIGN_SIZE(size)	(((size) + sizeof(struct block_header) + 7) & ~7)
    236 
    237 static struct free_block* free_blocks;
    238 static sljit_uw allocated_size;
    239 static sljit_uw total_size;
    240 
    241 static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size)
    242 {
    243 	free_block->header.size = 0;
    244 	free_block->size = size;
    245 
    246 	free_block->next = free_blocks;
    247 	free_block->prev = NULL;
    248 	if (free_blocks)
    249 		free_blocks->prev = free_block;
    250 	free_blocks = free_block;
    251 }
    252 
    253 static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block)
    254 {
    255 	if (free_block->next)
    256 		free_block->next->prev = free_block->prev;
    257 
    258 	if (free_block->prev)
    259 		free_block->prev->next = free_block->next;
    260 	else {
    261 		SLJIT_ASSERT(free_blocks == free_block);
    262 		free_blocks = free_block->next;
    263 	}
    264 }
    265 
    266 SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size)
    267 {
    268 	struct chunk_header *chunk_header;
    269 	struct block_header *header;
    270 	struct block_header *next_header;
    271 	struct free_block *free_block;
    272 	sljit_uw chunk_size;
    273 	sljit_sw executable_offset;
    274 
    275 	allocator_grab_lock();
    276 	if (size < (64 - sizeof(struct block_header)))
    277 		size = (64 - sizeof(struct block_header));
    278 	size = ALIGN_SIZE(size);
    279 
    280 	free_block = free_blocks;
    281 	while (free_block) {
    282 		if (free_block->size >= size) {
    283 			chunk_size = free_block->size;
    284 			if (chunk_size > size + 64) {
    285 				/* We just cut a block from the end of the free block. */
    286 				chunk_size -= size;
    287 				free_block->size = chunk_size;
    288 				header = AS_BLOCK_HEADER(free_block, chunk_size);
    289 				header->prev_size = chunk_size;
    290 				header->executable_offset = free_block->header.executable_offset;
    291 				AS_BLOCK_HEADER(header, size)->prev_size = size;
    292 			}
    293 			else {
    294 				sljit_remove_free_block(free_block);
    295 				header = (struct block_header*)free_block;
    296 				size = chunk_size;
    297 			}
    298 			allocated_size += size;
    299 			header->size = size;
    300 			allocator_release_lock();
    301 			return MEM_START(header);
    302 		}
    303 		free_block = free_block->next;
    304 	}
    305 
    306 	chunk_size = sizeof(struct chunk_header) + sizeof(struct block_header);
    307 	chunk_size = (chunk_size + size + CHUNK_SIZE - 1) & CHUNK_MASK;
    308 
    309 	chunk_header = alloc_chunk(chunk_size);
    310 	if (!chunk_header) {
    311 		allocator_release_lock();
    312 		return NULL;
    313 	}
    314 
    315 	executable_offset = (sljit_sw)((sljit_u8*)chunk_header->executable - (sljit_u8*)chunk_header);
    316 
    317 	chunk_size -= sizeof(struct chunk_header) + sizeof(struct block_header);
    318 	total_size += chunk_size;
    319 
    320 	header = (struct block_header *)(chunk_header + 1);
    321 
    322 	header->prev_size = 0;
    323 	header->executable_offset = executable_offset;
    324 	if (chunk_size > size + 64) {
    325 		/* Cut the allocated space into a free and a used block. */
    326 		allocated_size += size;
    327 		header->size = size;
    328 		chunk_size -= size;
    329 
    330 		free_block = AS_FREE_BLOCK(header, size);
    331 		free_block->header.prev_size = size;
    332 		free_block->header.executable_offset = executable_offset;
    333 		sljit_insert_free_block(free_block, chunk_size);
    334 		next_header = AS_BLOCK_HEADER(free_block, chunk_size);
    335 	}
    336 	else {
    337 		/* All space belongs to this allocation. */
    338 		allocated_size += chunk_size;
    339 		header->size = chunk_size;
    340 		next_header = AS_BLOCK_HEADER(header, chunk_size);
    341 	}
    342 	next_header->size = 1;
    343 	next_header->prev_size = chunk_size;
    344 	next_header->executable_offset = executable_offset;
    345 	allocator_release_lock();
    346 	return MEM_START(header);
    347 }
    348 
    349 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr)
    350 {
    351 	struct block_header *header;
    352 	struct free_block* free_block;
    353 
    354 	allocator_grab_lock();
    355 	header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header));
    356 	header = AS_BLOCK_HEADER(header, -header->executable_offset);
    357 	allocated_size -= header->size;
    358 
    359 	/* Connecting free blocks together if possible. */
    360 
    361 	/* If header->prev_size == 0, free_block will equal to header.
    362 	   In this case, free_block->header.size will be > 0. */
    363 	free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size);
    364 	if (SLJIT_UNLIKELY(!free_block->header.size)) {
    365 		free_block->size += header->size;
    366 		header = AS_BLOCK_HEADER(free_block, free_block->size);
    367 		header->prev_size = free_block->size;
    368 	}
    369 	else {
    370 		free_block = (struct free_block*)header;
    371 		sljit_insert_free_block(free_block, header->size);
    372 	}
    373 
    374 	header = AS_BLOCK_HEADER(free_block, free_block->size);
    375 	if (SLJIT_UNLIKELY(!header->size)) {
    376 		free_block->size += ((struct free_block*)header)->size;
    377 		sljit_remove_free_block((struct free_block*)header);
    378 		header = AS_BLOCK_HEADER(free_block, free_block->size);
    379 		header->prev_size = free_block->size;
    380 	}
    381 
    382 	/* The whole chunk is free. */
    383 	if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) {
    384 		/* If this block is freed, we still have (allocated_size / 2) free space. */
    385 		if (total_size - free_block->size > (allocated_size * 3 / 2)) {
    386 			total_size -= free_block->size;
    387 			sljit_remove_free_block(free_block);
    388 			free_chunk(free_block, free_block->size + sizeof(struct block_header));
    389 		}
    390 	}
    391 
    392 	allocator_release_lock();
    393 }
    394 
    395 SLJIT_API_FUNC_ATTRIBUTE void sljit_free_unused_memory_exec(void)
    396 {
    397 	struct free_block* free_block;
    398 	struct free_block* next_free_block;
    399 
    400 	allocator_grab_lock();
    401 
    402 	free_block = free_blocks;
    403 	while (free_block) {
    404 		next_free_block = free_block->next;
    405 		if (!free_block->header.prev_size &&
    406 				AS_BLOCK_HEADER(free_block, free_block->size)->size == 1) {
    407 			total_size -= free_block->size;
    408 			sljit_remove_free_block(free_block);
    409 			free_chunk(free_block, free_block->size + sizeof(struct block_header));
    410 		}
    411 		free_block = next_free_block;
    412 	}
    413 
    414 	SLJIT_ASSERT((total_size && free_blocks) || (!total_size && !free_blocks));
    415 	allocator_release_lock();
    416 }
    417 
    418 SLJIT_API_FUNC_ATTRIBUTE sljit_sw sljit_exec_offset(void* ptr)
    419 {
    420 	return ((struct block_header *)(ptr))[-1].executable_offset;
    421 }
    422