Home | History | Annotate | Download | only in fio
      1 /*
      2  * simple memory allocator, backed by mmap() so that it hands out memory
      3  * that can be shared across processes and threads
      4  */
      5 #include <sys/mman.h>
      6 #include <stdio.h>
      7 #include <stdlib.h>
      8 #include <assert.h>
      9 #include <string.h>
     10 #include <unistd.h>
     11 #include <inttypes.h>
     12 #include <sys/types.h>
     13 #include <limits.h>
     14 #include <fcntl.h>
     15 
     16 #include "mutex.h"
     17 #include "arch/arch.h"
     18 #include "os/os.h"
     19 #include "smalloc.h"
     20 #include "log.h"
     21 
     22 #define SMALLOC_REDZONE		/* define to detect memory corruption */
     23 
     24 #define SMALLOC_BPB	32	/* block size, bytes-per-bit in bitmap */
     25 #define SMALLOC_BPI	(sizeof(unsigned int) * 8)
     26 #define SMALLOC_BPL	(SMALLOC_BPB * SMALLOC_BPI)
     27 
     28 #define INITIAL_SIZE	16*1024*1024	/* new pool size */
     29 #define MAX_POOLS	8		/* maximum number of pools to setup */
     30 
     31 #define SMALLOC_PRE_RED		0xdeadbeefU
     32 #define SMALLOC_POST_RED	0x5aa55aa5U
     33 
     34 unsigned int smalloc_pool_size = INITIAL_SIZE;
     35 static const int int_mask = sizeof(int) - 1;
     36 
     37 struct pool {
     38 	struct fio_mutex *lock;			/* protects this pool */
     39 	void *map;				/* map of blocks */
     40 	unsigned int *bitmap;			/* blocks free/busy map */
     41 	size_t free_blocks;		/* free blocks */
     42 	size_t nr_blocks;			/* total blocks */
     43 	size_t next_non_full;
     44 	size_t mmap_size;
     45 };
     46 
     47 struct block_hdr {
     48 	size_t size;
     49 #ifdef SMALLOC_REDZONE
     50 	unsigned int prered;
     51 #endif
     52 };
     53 
     54 static struct pool mp[MAX_POOLS];
     55 static unsigned int nr_pools;
     56 static unsigned int last_pool;
     57 static struct fio_rwlock *lock;
     58 
     59 static inline void pool_lock(struct pool *pool)
     60 {
     61 	fio_mutex_down(pool->lock);
     62 }
     63 
     64 static inline void pool_unlock(struct pool *pool)
     65 {
     66 	fio_mutex_up(pool->lock);
     67 }
     68 
     69 static inline void global_read_lock(void)
     70 {
     71 	fio_rwlock_read(lock);
     72 }
     73 
     74 static inline void global_read_unlock(void)
     75 {
     76 	fio_rwlock_unlock(lock);
     77 }
     78 
     79 static inline void global_write_lock(void)
     80 {
     81 	fio_rwlock_write(lock);
     82 }
     83 
     84 static inline void global_write_unlock(void)
     85 {
     86 	fio_rwlock_unlock(lock);
     87 }
     88 
     89 static inline int ptr_valid(struct pool *pool, void *ptr)
     90 {
     91 	unsigned int pool_size = pool->nr_blocks * SMALLOC_BPL;
     92 
     93 	return (ptr >= pool->map) && (ptr < pool->map + pool_size);
     94 }
     95 
     96 static inline size_t size_to_blocks(size_t size)
     97 {
     98 	return (size + SMALLOC_BPB - 1) / SMALLOC_BPB;
     99 }
    100 
    101 static int blocks_iter(struct pool *pool, unsigned int pool_idx,
    102 		       unsigned int idx, size_t nr_blocks,
    103 		       int (*func)(unsigned int *map, unsigned int mask))
    104 {
    105 
    106 	while (nr_blocks) {
    107 		unsigned int this_blocks, mask;
    108 		unsigned int *map;
    109 
    110 		if (pool_idx >= pool->nr_blocks)
    111 			return 0;
    112 
    113 		map = &pool->bitmap[pool_idx];
    114 
    115 		this_blocks = nr_blocks;
    116 		if (this_blocks + idx > SMALLOC_BPI) {
    117 			this_blocks = SMALLOC_BPI - idx;
    118 			idx = SMALLOC_BPI - this_blocks;
    119 		}
    120 
    121 		if (this_blocks == SMALLOC_BPI)
    122 			mask = -1U;
    123 		else
    124 			mask = ((1U << this_blocks) - 1) << idx;
    125 
    126 		if (!func(map, mask))
    127 			return 0;
    128 
    129 		nr_blocks -= this_blocks;
    130 		idx = 0;
    131 		pool_idx++;
    132 	}
    133 
    134 	return 1;
    135 }
    136 
    137 static int mask_cmp(unsigned int *map, unsigned int mask)
    138 {
    139 	return !(*map & mask);
    140 }
    141 
    142 static int mask_clear(unsigned int *map, unsigned int mask)
    143 {
    144 	assert((*map & mask) == mask);
    145 	*map &= ~mask;
    146 	return 1;
    147 }
    148 
    149 static int mask_set(unsigned int *map, unsigned int mask)
    150 {
    151 	assert(!(*map & mask));
    152 	*map |= mask;
    153 	return 1;
    154 }
    155 
    156 static int blocks_free(struct pool *pool, unsigned int pool_idx,
    157 		       unsigned int idx, size_t nr_blocks)
    158 {
    159 	return blocks_iter(pool, pool_idx, idx, nr_blocks, mask_cmp);
    160 }
    161 
    162 static void set_blocks(struct pool *pool, unsigned int pool_idx,
    163 		       unsigned int idx, size_t nr_blocks)
    164 {
    165 	blocks_iter(pool, pool_idx, idx, nr_blocks, mask_set);
    166 }
    167 
    168 static void clear_blocks(struct pool *pool, unsigned int pool_idx,
    169 			 unsigned int idx, size_t nr_blocks)
    170 {
    171 	blocks_iter(pool, pool_idx, idx, nr_blocks, mask_clear);
    172 }
    173 
    174 static int find_next_zero(int word, int start)
    175 {
    176 	assert(word != -1U);
    177 	word >>= start;
    178 	return ffz(word) + start;
    179 }
    180 
    181 static int add_pool(struct pool *pool, unsigned int alloc_size)
    182 {
    183 	int bitmap_blocks;
    184 	int mmap_flags;
    185 	void *ptr;
    186 
    187 #ifdef SMALLOC_REDZONE
    188 	alloc_size += sizeof(unsigned int);
    189 #endif
    190 	alloc_size += sizeof(struct block_hdr);
    191 	if (alloc_size < INITIAL_SIZE)
    192 		alloc_size = INITIAL_SIZE;
    193 
    194 	/* round up to nearest full number of blocks */
    195 	alloc_size = (alloc_size + SMALLOC_BPL - 1) & ~(SMALLOC_BPL - 1);
    196 	bitmap_blocks = alloc_size / SMALLOC_BPL;
    197 	alloc_size += bitmap_blocks * sizeof(unsigned int);
    198 	pool->mmap_size = alloc_size;
    199 
    200 	pool->nr_blocks = bitmap_blocks;
    201 	pool->free_blocks = bitmap_blocks * SMALLOC_BPB;
    202 
    203 	mmap_flags = OS_MAP_ANON;
    204 #ifdef CONFIG_ESX
    205 	mmap_flags |= MAP_PRIVATE;
    206 #else
    207 	mmap_flags |= MAP_SHARED;
    208 #endif
    209 	ptr = mmap(NULL, alloc_size, PROT_READ|PROT_WRITE, mmap_flags, -1, 0);
    210 
    211 	if (ptr == MAP_FAILED)
    212 		goto out_fail;
    213 
    214 	memset(ptr, 0, alloc_size);
    215 	pool->map = ptr;
    216 	pool->bitmap = (void *) ptr + (pool->nr_blocks * SMALLOC_BPL);
    217 
    218 	pool->lock = fio_mutex_init(FIO_MUTEX_UNLOCKED);
    219 	if (!pool->lock)
    220 		goto out_fail;
    221 
    222 	nr_pools++;
    223 	return 0;
    224 out_fail:
    225 	log_err("smalloc: failed adding pool\n");
    226 	if (pool->map)
    227 		munmap(pool->map, pool->mmap_size);
    228 	return 1;
    229 }
    230 
    231 void sinit(void)
    232 {
    233 	int i, ret;
    234 
    235 	lock = fio_rwlock_init();
    236 
    237 	for (i = 0; i < MAX_POOLS; i++) {
    238 		ret = add_pool(&mp[i], INITIAL_SIZE);
    239 		if (ret)
    240 			break;
    241 	}
    242 
    243 	/*
    244 	 * If we added at least one pool, we should be OK for most
    245 	 * cases.
    246 	 */
    247 	assert(i);
    248 }
    249 
    250 static void cleanup_pool(struct pool *pool)
    251 {
    252 	/*
    253 	 * This will also remove the temporary file we used as a backing
    254 	 * store, it was already unlinked
    255 	 */
    256 	munmap(pool->map, pool->mmap_size);
    257 
    258 	if (pool->lock)
    259 		fio_mutex_remove(pool->lock);
    260 }
    261 
    262 void scleanup(void)
    263 {
    264 	unsigned int i;
    265 
    266 	for (i = 0; i < nr_pools; i++)
    267 		cleanup_pool(&mp[i]);
    268 
    269 	if (lock)
    270 		fio_rwlock_remove(lock);
    271 }
    272 
    273 #ifdef SMALLOC_REDZONE
    274 static void *postred_ptr(struct block_hdr *hdr)
    275 {
    276 	uintptr_t ptr;
    277 
    278 	ptr = (uintptr_t) hdr + hdr->size - sizeof(unsigned int);
    279 	ptr = (ptr + int_mask) & ~int_mask;
    280 
    281 	return (void *) ptr;
    282 }
    283 
    284 static void fill_redzone(struct block_hdr *hdr)
    285 {
    286 	unsigned int *postred = postred_ptr(hdr);
    287 
    288 	hdr->prered = SMALLOC_PRE_RED;
    289 	*postred = SMALLOC_POST_RED;
    290 }
    291 
    292 static void sfree_check_redzone(struct block_hdr *hdr)
    293 {
    294 	unsigned int *postred = postred_ptr(hdr);
    295 
    296 	if (hdr->prered != SMALLOC_PRE_RED) {
    297 		log_err("smalloc pre redzone destroyed!\n"
    298 			" ptr=%p, prered=%x, expected %x\n",
    299 				hdr, hdr->prered, SMALLOC_PRE_RED);
    300 		assert(0);
    301 	}
    302 	if (*postred != SMALLOC_POST_RED) {
    303 		log_err("smalloc post redzone destroyed!\n"
    304 			"  ptr=%p, postred=%x, expected %x\n",
    305 				hdr, *postred, SMALLOC_POST_RED);
    306 		assert(0);
    307 	}
    308 }
    309 #else
    310 static void fill_redzone(struct block_hdr *hdr)
    311 {
    312 }
    313 
    314 static void sfree_check_redzone(struct block_hdr *hdr)
    315 {
    316 }
    317 #endif
    318 
    319 static void sfree_pool(struct pool *pool, void *ptr)
    320 {
    321 	struct block_hdr *hdr;
    322 	unsigned int i, idx;
    323 	unsigned long offset;
    324 
    325 	if (!ptr)
    326 		return;
    327 
    328 	ptr -= sizeof(*hdr);
    329 	hdr = ptr;
    330 
    331 	assert(ptr_valid(pool, ptr));
    332 
    333 	sfree_check_redzone(hdr);
    334 
    335 	offset = ptr - pool->map;
    336 	i = offset / SMALLOC_BPL;
    337 	idx = (offset % SMALLOC_BPL) / SMALLOC_BPB;
    338 
    339 	pool_lock(pool);
    340 	clear_blocks(pool, i, idx, size_to_blocks(hdr->size));
    341 	if (i < pool->next_non_full)
    342 		pool->next_non_full = i;
    343 	pool->free_blocks += size_to_blocks(hdr->size);
    344 	pool_unlock(pool);
    345 }
    346 
    347 void sfree(void *ptr)
    348 {
    349 	struct pool *pool = NULL;
    350 	unsigned int i;
    351 
    352 	if (!ptr)
    353 		return;
    354 
    355 	global_read_lock();
    356 
    357 	for (i = 0; i < nr_pools; i++) {
    358 		if (ptr_valid(&mp[i], ptr)) {
    359 			pool = &mp[i];
    360 			break;
    361 		}
    362 	}
    363 
    364 	global_read_unlock();
    365 
    366 	assert(pool);
    367 	sfree_pool(pool, ptr);
    368 }
    369 
    370 static void *__smalloc_pool(struct pool *pool, size_t size)
    371 {
    372 	size_t nr_blocks;
    373 	unsigned int i;
    374 	unsigned int offset;
    375 	unsigned int last_idx;
    376 	void *ret = NULL;
    377 
    378 	pool_lock(pool);
    379 
    380 	nr_blocks = size_to_blocks(size);
    381 	if (nr_blocks > pool->free_blocks)
    382 		goto fail;
    383 
    384 	i = pool->next_non_full;
    385 	last_idx = 0;
    386 	offset = -1U;
    387 	while (i < pool->nr_blocks) {
    388 		unsigned int idx;
    389 
    390 		if (pool->bitmap[i] == -1U) {
    391 			i++;
    392 			pool->next_non_full = i;
    393 			last_idx = 0;
    394 			continue;
    395 		}
    396 
    397 		idx = find_next_zero(pool->bitmap[i], last_idx);
    398 		if (!blocks_free(pool, i, idx, nr_blocks)) {
    399 			idx += nr_blocks;
    400 			if (idx < SMALLOC_BPI)
    401 				last_idx = idx;
    402 			else {
    403 				last_idx = 0;
    404 				while (idx >= SMALLOC_BPI) {
    405 					i++;
    406 					idx -= SMALLOC_BPI;
    407 				}
    408 			}
    409 			continue;
    410 		}
    411 		set_blocks(pool, i, idx, nr_blocks);
    412 		offset = i * SMALLOC_BPL + idx * SMALLOC_BPB;
    413 		break;
    414 	}
    415 
    416 	if (i < pool->nr_blocks) {
    417 		pool->free_blocks -= nr_blocks;
    418 		ret = pool->map + offset;
    419 	}
    420 fail:
    421 	pool_unlock(pool);
    422 	return ret;
    423 }
    424 
    425 static void *smalloc_pool(struct pool *pool, size_t size)
    426 {
    427 	size_t alloc_size = size + sizeof(struct block_hdr);
    428 	void *ptr;
    429 
    430 	/*
    431 	 * Round to int alignment, so that the postred pointer will
    432 	 * be naturally aligned as well.
    433 	 */
    434 #ifdef SMALLOC_REDZONE
    435 	alloc_size += sizeof(unsigned int);
    436 	alloc_size = (alloc_size + int_mask) & ~int_mask;
    437 #endif
    438 
    439 	ptr = __smalloc_pool(pool, alloc_size);
    440 	if (ptr) {
    441 		struct block_hdr *hdr = ptr;
    442 
    443 		hdr->size = alloc_size;
    444 		fill_redzone(hdr);
    445 
    446 		ptr += sizeof(*hdr);
    447 		memset(ptr, 0, size);
    448 	}
    449 
    450 	return ptr;
    451 }
    452 
    453 void *smalloc(size_t size)
    454 {
    455 	unsigned int i, end_pool;
    456 
    457 	if (size != (unsigned int) size)
    458 		return NULL;
    459 
    460 	global_write_lock();
    461 	i = last_pool;
    462 	end_pool = nr_pools;
    463 
    464 	do {
    465 		for (; i < end_pool; i++) {
    466 			void *ptr = smalloc_pool(&mp[i], size);
    467 
    468 			if (ptr) {
    469 				last_pool = i;
    470 				global_write_unlock();
    471 				return ptr;
    472 			}
    473 		}
    474 		if (last_pool) {
    475 			end_pool = last_pool;
    476 			last_pool = i = 0;
    477 			continue;
    478 		}
    479 
    480 		break;
    481 	} while (1);
    482 
    483 	global_write_unlock();
    484 	return NULL;
    485 }
    486 
    487 void *scalloc(size_t nmemb, size_t size)
    488 {
    489 	void *ret;
    490 
    491 	ret = smalloc(nmemb * size);
    492 	if (ret)
    493 		memset(ret, 0, nmemb * size);
    494 
    495 	return ret;
    496 }
    497 
    498 char *smalloc_strdup(const char *str)
    499 {
    500 	char *ptr = NULL;
    501 
    502 	ptr = smalloc(strlen(str) + 1);
    503 	if (ptr)
    504 		strcpy(ptr, str);
    505 	return ptr;
    506 }
    507