Home | History | Annotate | Download | only in fio
      1 /*
      2  * Memory helpers
      3  */
      4 #include <sys/types.h>
      5 #include <sys/stat.h>
      6 #include <fcntl.h>
      7 #include <unistd.h>
      8 #include <sys/mman.h>
      9 
     10 #include "fio.h"
     11 #ifndef FIO_NO_HAVE_SHM_H
     12 #include <sys/shm.h>
     13 #endif
     14 
     15 void fio_unpin_memory(struct thread_data *td)
     16 {
     17 	if (td->pinned_mem) {
     18 		dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem);
     19 		if (munlock(td->pinned_mem, td->o.lockmem) < 0)
     20 			perror("munlock");
     21 		munmap(td->pinned_mem, td->o.lockmem);
     22 		td->pinned_mem = NULL;
     23 	}
     24 }
     25 
     26 int fio_pin_memory(struct thread_data *td)
     27 {
     28 	unsigned long long phys_mem;
     29 
     30 	if (!td->o.lockmem)
     31 		return 0;
     32 
     33 	dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem);
     34 
     35 	/*
     36 	 * Don't allow mlock of more than real_mem-128MiB
     37 	 */
     38 	phys_mem = os_phys_mem();
     39 	if (phys_mem) {
     40 		if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) {
     41 			td->o.lockmem = phys_mem - 128 * 1024 * 1024;
     42 			log_info("fio: limiting mlocked memory to %lluMiB\n",
     43 							td->o.lockmem >> 20);
     44 		}
     45 	}
     46 
     47 	td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE,
     48 				MAP_PRIVATE | OS_MAP_ANON, -1, 0);
     49 	if (td->pinned_mem == MAP_FAILED) {
     50 		perror("malloc locked mem");
     51 		td->pinned_mem = NULL;
     52 		return 1;
     53 	}
     54 	if (mlock(td->pinned_mem, td->o.lockmem) < 0) {
     55 		perror("mlock");
     56 		munmap(td->pinned_mem, td->o.lockmem);
     57 		td->pinned_mem = NULL;
     58 		return 1;
     59 	}
     60 
     61 	return 0;
     62 }
     63 
     64 static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem)
     65 {
     66 #ifndef CONFIG_NO_SHM
     67 	int flags = IPC_CREAT | S_IRUSR | S_IWUSR;
     68 
     69 	if (td->o.mem_type == MEM_SHMHUGE) {
     70 		unsigned long mask = td->o.hugepage_size - 1;
     71 
     72 		flags |= SHM_HUGETLB;
     73 		total_mem = (total_mem + mask) & ~mask;
     74 	}
     75 
     76 	td->shm_id = shmget(IPC_PRIVATE, total_mem, flags);
     77 	dprint(FD_MEM, "shmget %u, %d\n", total_mem, td->shm_id);
     78 	if (td->shm_id < 0) {
     79 		td_verror(td, errno, "shmget");
     80 		if (geteuid() != 0 && (errno == ENOMEM || errno == EPERM))
     81 			log_err("fio: you may need to run this job as root\n");
     82 		if (td->o.mem_type == MEM_SHMHUGE) {
     83 			if (errno == EINVAL) {
     84 				log_err("fio: check that you have free huge"
     85 					" pages and that hugepage-size is"
     86 					" correct.\n");
     87 			} else if (errno == ENOSYS) {
     88 				log_err("fio: your system does not appear to"
     89 					" support huge pages.\n");
     90 			} else if (errno == ENOMEM) {
     91 				log_err("fio: no huge pages available, do you"
     92 					" need to allocate some? See HOWTO.\n");
     93 			}
     94 		}
     95 
     96 		return 1;
     97 	}
     98 
     99 	td->orig_buffer = shmat(td->shm_id, NULL, 0);
    100 	dprint(FD_MEM, "shmat %d, %p\n", td->shm_id, td->orig_buffer);
    101 	if (td->orig_buffer == (void *) -1) {
    102 		td_verror(td, errno, "shmat");
    103 		td->orig_buffer = NULL;
    104 		return 1;
    105 	}
    106 
    107 	return 0;
    108 #else
    109 	log_err("fio: shm not supported\n");
    110 	return 1;
    111 #endif
    112 }
    113 
    114 static void free_mem_shm(struct thread_data *td)
    115 {
    116 #ifndef CONFIG_NO_SHM
    117 	struct shmid_ds sbuf;
    118 
    119 	dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer);
    120 	shmdt(td->orig_buffer);
    121 	shmctl(td->shm_id, IPC_RMID, &sbuf);
    122 #endif
    123 }
    124 
    125 static int alloc_mem_mmap(struct thread_data *td, size_t total_mem)
    126 {
    127 	int flags = 0;
    128 
    129 	td->mmapfd = -1;
    130 
    131 	if (td->o.mem_type == MEM_MMAPHUGE) {
    132 		unsigned long mask = td->o.hugepage_size - 1;
    133 
    134 		/* TODO: make sure the file is a real hugetlbfs file */
    135 		if (!td->o.mmapfile)
    136 			flags |= MAP_HUGETLB;
    137 		total_mem = (total_mem + mask) & ~mask;
    138 	}
    139 
    140 	if (td->o.mmapfile) {
    141 		td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644);
    142 
    143 		if (td->mmapfd < 0) {
    144 			td_verror(td, errno, "open mmap file");
    145 			td->orig_buffer = NULL;
    146 			return 1;
    147 		}
    148 		if (td->o.mem_type != MEM_MMAPHUGE &&
    149 		    td->o.mem_type != MEM_MMAPSHARED &&
    150 		    ftruncate(td->mmapfd, total_mem) < 0) {
    151 			td_verror(td, errno, "truncate mmap file");
    152 			td->orig_buffer = NULL;
    153 			return 1;
    154 		}
    155 		if (td->o.mem_type == MEM_MMAPHUGE ||
    156 		    td->o.mem_type == MEM_MMAPSHARED)
    157 			flags |= MAP_SHARED;
    158 		else
    159 			flags |= MAP_PRIVATE;
    160 	} else
    161 		flags |= OS_MAP_ANON | MAP_PRIVATE;
    162 
    163 	td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags,
    164 				td->mmapfd, 0);
    165 	dprint(FD_MEM, "mmap %llu/%d %p\n", (unsigned long long) total_mem,
    166 						td->mmapfd, td->orig_buffer);
    167 	if (td->orig_buffer == MAP_FAILED) {
    168 		td_verror(td, errno, "mmap");
    169 		td->orig_buffer = NULL;
    170 		if (td->mmapfd != 1 && td->mmapfd != -1) {
    171 			close(td->mmapfd);
    172 			if (td->o.mmapfile)
    173 				unlink(td->o.mmapfile);
    174 		}
    175 
    176 		return 1;
    177 	}
    178 
    179 	return 0;
    180 }
    181 
    182 static void free_mem_mmap(struct thread_data *td, size_t total_mem)
    183 {
    184 	dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem,
    185 						td->orig_buffer);
    186 	munmap(td->orig_buffer, td->orig_buffer_size);
    187 	if (td->o.mmapfile) {
    188 		if (td->mmapfd != -1)
    189 			close(td->mmapfd);
    190 		unlink(td->o.mmapfile);
    191 		free(td->o.mmapfile);
    192 	}
    193 }
    194 
    195 static int alloc_mem_malloc(struct thread_data *td, size_t total_mem)
    196 {
    197 	td->orig_buffer = malloc(total_mem);
    198 	dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem,
    199 							td->orig_buffer);
    200 
    201 	return td->orig_buffer == NULL;
    202 }
    203 
    204 static void free_mem_malloc(struct thread_data *td)
    205 {
    206 	dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer);
    207 	free(td->orig_buffer);
    208 }
    209 
    210 static int alloc_mem_cudamalloc(struct thread_data *td, size_t total_mem)
    211 {
    212 #ifdef CONFIG_CUDA
    213 	CUresult ret;
    214 	char name[128];
    215 
    216 	ret = cuInit(0);
    217 	if (ret != CUDA_SUCCESS) {
    218 		log_err("fio: failed initialize cuda driver api\n");
    219 		return 1;
    220 	}
    221 
    222 	ret = cuDeviceGetCount(&td->gpu_dev_cnt);
    223 	if (ret != CUDA_SUCCESS) {
    224 		log_err("fio: failed get device count\n");
    225 		return 1;
    226 	}
    227 	dprint(FD_MEM, "found %d GPU devices\n", td->gpu_dev_cnt);
    228 
    229 	if (td->gpu_dev_cnt == 0) {
    230 		log_err("fio: no GPU device found. "
    231 			"Can not perform GPUDirect RDMA.\n");
    232 		return 1;
    233 	}
    234 
    235 	td->gpu_dev_id = td->o.gpu_dev_id;
    236 	ret = cuDeviceGet(&td->cu_dev, td->gpu_dev_id);
    237 	if (ret != CUDA_SUCCESS) {
    238 		log_err("fio: failed get GPU device\n");
    239 		return 1;
    240 	}
    241 
    242 	ret = cuDeviceGetName(name, sizeof(name), td->gpu_dev_id);
    243 	if (ret != CUDA_SUCCESS) {
    244 		log_err("fio: failed get device name\n");
    245 		return 1;
    246 	}
    247 	dprint(FD_MEM, "dev_id = [%d], device name = [%s]\n", \
    248 	       td->gpu_dev_id, name);
    249 
    250 	ret = cuCtxCreate(&td->cu_ctx, CU_CTX_MAP_HOST, td->cu_dev);
    251 	if (ret != CUDA_SUCCESS) {
    252 		log_err("fio: failed to create cuda context: %d\n", ret);
    253 		return 1;
    254 	}
    255 
    256 	ret = cuMemAlloc(&td->dev_mem_ptr, total_mem);
    257 	if (ret != CUDA_SUCCESS) {
    258 		log_err("fio: cuMemAlloc %zu bytes failed\n", total_mem);
    259 		return 1;
    260 	}
    261 	td->orig_buffer = (void *) td->dev_mem_ptr;
    262 
    263 	dprint(FD_MEM, "cudaMalloc %llu %p\n",				\
    264 	       (unsigned long long) total_mem, td->orig_buffer);
    265 	return 0;
    266 #else
    267 	return -EINVAL;
    268 #endif
    269 }
    270 
    271 static void free_mem_cudamalloc(struct thread_data *td)
    272 {
    273 #ifdef CONFIG_CUDA
    274 	if (td->dev_mem_ptr != NULL)
    275 		cuMemFree(td->dev_mem_ptr);
    276 
    277 	if (cuCtxDestroy(td->cu_ctx) != CUDA_SUCCESS)
    278 		log_err("fio: failed to destroy cuda context\n");
    279 #endif
    280 }
    281 
    282 /*
    283  * Set up the buffer area we need for io.
    284  */
    285 int allocate_io_mem(struct thread_data *td)
    286 {
    287 	size_t total_mem;
    288 	int ret = 0;
    289 
    290 	if (td_ioengine_flagged(td, FIO_NOIO))
    291 		return 0;
    292 
    293 	total_mem = td->orig_buffer_size;
    294 
    295 	if (td->o.odirect || td->o.mem_align || td->o.oatomic ||
    296 	    td_ioengine_flagged(td, FIO_MEMALIGN)) {
    297 		total_mem += page_mask;
    298 		if (td->o.mem_align && td->o.mem_align > page_size)
    299 			total_mem += td->o.mem_align - page_size;
    300 	}
    301 
    302 	dprint(FD_MEM, "Alloc %llu for buffers\n", (unsigned long long) total_mem);
    303 
    304 	/*
    305 	 * If the IO engine has hooks to allocate/free memory, use those. But
    306 	 * error out if the user explicitly asked for something else.
    307 	 */
    308 	if (td->io_ops->iomem_alloc) {
    309 		if (fio_option_is_set(&td->o, mem_type)) {
    310 			log_err("fio: option 'mem/iomem' conflicts with specified IO engine\n");
    311 			ret = 1;
    312 		} else
    313 			ret = td->io_ops->iomem_alloc(td, total_mem);
    314 	} else if (td->o.mem_type == MEM_MALLOC)
    315 		ret = alloc_mem_malloc(td, total_mem);
    316 	else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
    317 		ret = alloc_mem_shm(td, total_mem);
    318 	else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE ||
    319 		 td->o.mem_type == MEM_MMAPSHARED)
    320 		ret = alloc_mem_mmap(td, total_mem);
    321 	else if (td->o.mem_type == MEM_CUDA_MALLOC)
    322 		ret = alloc_mem_cudamalloc(td, total_mem);
    323 	else {
    324 		log_err("fio: bad mem type: %d\n", td->o.mem_type);
    325 		ret = 1;
    326 	}
    327 
    328 	if (ret)
    329 		td_verror(td, ENOMEM, "iomem allocation");
    330 
    331 	return ret;
    332 }
    333 
    334 void free_io_mem(struct thread_data *td)
    335 {
    336 	unsigned int total_mem;
    337 
    338 	total_mem = td->orig_buffer_size;
    339 	if (td->o.odirect || td->o.oatomic)
    340 		total_mem += page_mask;
    341 
    342 	if (td->io_ops->iomem_alloc) {
    343 		if (td->io_ops->iomem_free)
    344 			td->io_ops->iomem_free(td);
    345 	} else if (td->o.mem_type == MEM_MALLOC)
    346 		free_mem_malloc(td);
    347 	else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE)
    348 		free_mem_shm(td);
    349 	else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE ||
    350 		 td->o.mem_type == MEM_MMAPSHARED)
    351 		free_mem_mmap(td, total_mem);
    352 	else if (td->o.mem_type == MEM_CUDA_MALLOC)
    353 		free_mem_cudamalloc(td);
    354 	else
    355 		log_err("Bad memory type %u\n", td->o.mem_type);
    356 
    357 	td->orig_buffer = NULL;
    358 	td->orig_buffer_size = 0;
    359 }
    360