1 /* 2 * Memory helpers 3 */ 4 #include <sys/types.h> 5 #include <sys/stat.h> 6 #include <fcntl.h> 7 #include <unistd.h> 8 #include <sys/mman.h> 9 10 #include "fio.h" 11 #ifndef FIO_NO_HAVE_SHM_H 12 #include <sys/shm.h> 13 #endif 14 15 void fio_unpin_memory(struct thread_data *td) 16 { 17 if (td->pinned_mem) { 18 dprint(FD_MEM, "unpinning %llu bytes\n", td->o.lockmem); 19 if (munlock(td->pinned_mem, td->o.lockmem) < 0) 20 perror("munlock"); 21 munmap(td->pinned_mem, td->o.lockmem); 22 td->pinned_mem = NULL; 23 } 24 } 25 26 int fio_pin_memory(struct thread_data *td) 27 { 28 unsigned long long phys_mem; 29 30 if (!td->o.lockmem) 31 return 0; 32 33 dprint(FD_MEM, "pinning %llu bytes\n", td->o.lockmem); 34 35 /* 36 * Don't allow mlock of more than real_mem-128MB 37 */ 38 phys_mem = os_phys_mem(); 39 if (phys_mem) { 40 if ((td->o.lockmem + 128 * 1024 * 1024) > phys_mem) { 41 td->o.lockmem = phys_mem - 128 * 1024 * 1024; 42 log_info("fio: limiting mlocked memory to %lluMB\n", 43 td->o.lockmem >> 20); 44 } 45 } 46 47 td->pinned_mem = mmap(NULL, td->o.lockmem, PROT_READ | PROT_WRITE, 48 MAP_PRIVATE | OS_MAP_ANON, -1, 0); 49 if (td->pinned_mem == MAP_FAILED) { 50 perror("malloc locked mem"); 51 td->pinned_mem = NULL; 52 return 1; 53 } 54 if (mlock(td->pinned_mem, td->o.lockmem) < 0) { 55 perror("mlock"); 56 munmap(td->pinned_mem, td->o.lockmem); 57 td->pinned_mem = NULL; 58 return 1; 59 } 60 61 return 0; 62 } 63 64 static int alloc_mem_shm(struct thread_data *td, unsigned int total_mem) 65 { 66 int flags = IPC_CREAT | S_IRUSR | S_IWUSR; 67 68 if (td->o.mem_type == MEM_SHMHUGE) { 69 unsigned long mask = td->o.hugepage_size - 1; 70 71 flags |= SHM_HUGETLB; 72 total_mem = (total_mem + mask) & ~mask; 73 } 74 75 td->shm_id = shmget(IPC_PRIVATE, total_mem, flags); 76 dprint(FD_MEM, "shmget %u, %d\n", total_mem, td->shm_id); 77 if (td->shm_id < 0) { 78 td_verror(td, errno, "shmget"); 79 if (geteuid() != 0 && (errno == ENOMEM || errno == EPERM)) 80 log_err("fio: you may need to run this job as root\n"); 81 if (td->o.mem_type == MEM_SHMHUGE) { 82 if (errno == EINVAL) { 83 log_err("fio: check that you have free huge" 84 " pages and that hugepage-size is" 85 " correct.\n"); 86 } else if (errno == ENOSYS) { 87 log_err("fio: your system does not appear to" 88 " support huge pages.\n"); 89 } else if (errno == ENOMEM) { 90 log_err("fio: no huge pages available, do you" 91 " need to alocate some? See HOWTO.\n"); 92 } 93 } 94 95 return 1; 96 } 97 98 td->orig_buffer = shmat(td->shm_id, NULL, 0); 99 dprint(FD_MEM, "shmat %d, %p\n", td->shm_id, td->orig_buffer); 100 if (td->orig_buffer == (void *) -1) { 101 td_verror(td, errno, "shmat"); 102 td->orig_buffer = NULL; 103 return 1; 104 } 105 106 return 0; 107 } 108 109 static void free_mem_shm(struct thread_data *td) 110 { 111 struct shmid_ds sbuf; 112 113 dprint(FD_MEM, "shmdt/ctl %d %p\n", td->shm_id, td->orig_buffer); 114 shmdt(td->orig_buffer); 115 shmctl(td->shm_id, IPC_RMID, &sbuf); 116 } 117 118 static int alloc_mem_mmap(struct thread_data *td, size_t total_mem) 119 { 120 int flags = 0; 121 122 td->mmapfd = 1; 123 124 if (td->o.mem_type == MEM_MMAPHUGE) { 125 unsigned long mask = td->o.hugepage_size - 1; 126 127 /* TODO: make sure the file is a real hugetlbfs file */ 128 if (!td->o.mmapfile) 129 flags |= MAP_HUGETLB; 130 total_mem = (total_mem + mask) & ~mask; 131 } 132 133 if (td->o.mmapfile) { 134 td->mmapfd = open(td->o.mmapfile, O_RDWR|O_CREAT, 0644); 135 136 if (td->mmapfd < 0) { 137 td_verror(td, errno, "open mmap file"); 138 td->orig_buffer = NULL; 139 return 1; 140 } 141 if (td->o.mem_type != MEM_MMAPHUGE && 142 ftruncate(td->mmapfd, total_mem) < 0) { 143 td_verror(td, errno, "truncate mmap file"); 144 td->orig_buffer = NULL; 145 return 1; 146 } 147 if (td->o.mem_type == MEM_MMAPHUGE) 148 flags |= MAP_SHARED; 149 else 150 flags |= MAP_PRIVATE; 151 } else 152 flags |= OS_MAP_ANON | MAP_PRIVATE; 153 154 td->orig_buffer = mmap(NULL, total_mem, PROT_READ | PROT_WRITE, flags, 155 td->mmapfd, 0); 156 dprint(FD_MEM, "mmap %llu/%d %p\n", (unsigned long long) total_mem, 157 td->mmapfd, td->orig_buffer); 158 if (td->orig_buffer == MAP_FAILED) { 159 td_verror(td, errno, "mmap"); 160 td->orig_buffer = NULL; 161 if (td->mmapfd != 1) { 162 close(td->mmapfd); 163 if (td->o.mmapfile) 164 unlink(td->o.mmapfile); 165 } 166 167 return 1; 168 } 169 170 return 0; 171 } 172 173 static void free_mem_mmap(struct thread_data *td, size_t total_mem) 174 { 175 dprint(FD_MEM, "munmap %llu %p\n", (unsigned long long) total_mem, 176 td->orig_buffer); 177 munmap(td->orig_buffer, td->orig_buffer_size); 178 if (td->o.mmapfile) { 179 close(td->mmapfd); 180 unlink(td->o.mmapfile); 181 free(td->o.mmapfile); 182 } 183 } 184 185 static int alloc_mem_malloc(struct thread_data *td, size_t total_mem) 186 { 187 td->orig_buffer = malloc(total_mem); 188 dprint(FD_MEM, "malloc %llu %p\n", (unsigned long long) total_mem, 189 td->orig_buffer); 190 191 return td->orig_buffer == NULL; 192 } 193 194 static void free_mem_malloc(struct thread_data *td) 195 { 196 dprint(FD_MEM, "free malloc mem %p\n", td->orig_buffer); 197 free(td->orig_buffer); 198 } 199 200 /* 201 * Set up the buffer area we need for io. 202 */ 203 int allocate_io_mem(struct thread_data *td) 204 { 205 size_t total_mem; 206 int ret = 0; 207 208 if (td->io_ops->flags & FIO_NOIO) 209 return 0; 210 211 total_mem = td->orig_buffer_size; 212 213 if (td->o.odirect || td->o.mem_align || td->o.oatomic || 214 (td->io_ops->flags & FIO_MEMALIGN)) { 215 total_mem += page_mask; 216 if (td->o.mem_align && td->o.mem_align > page_size) 217 total_mem += td->o.mem_align - page_size; 218 } 219 220 dprint(FD_MEM, "Alloc %llu for buffers\n", (unsigned long long) total_mem); 221 222 if (td->o.mem_type == MEM_MALLOC) 223 ret = alloc_mem_malloc(td, total_mem); 224 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) 225 ret = alloc_mem_shm(td, total_mem); 226 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE) 227 ret = alloc_mem_mmap(td, total_mem); 228 else { 229 log_err("fio: bad mem type: %d\n", td->o.mem_type); 230 ret = 1; 231 } 232 233 if (ret) 234 td_verror(td, ENOMEM, "iomem allocation"); 235 236 return ret; 237 } 238 239 void free_io_mem(struct thread_data *td) 240 { 241 unsigned int total_mem; 242 243 total_mem = td->orig_buffer_size; 244 if (td->o.odirect || td->o.oatomic) 245 total_mem += page_mask; 246 247 if (td->o.mem_type == MEM_MALLOC) 248 free_mem_malloc(td); 249 else if (td->o.mem_type == MEM_SHM || td->o.mem_type == MEM_SHMHUGE) 250 free_mem_shm(td); 251 else if (td->o.mem_type == MEM_MMAP || td->o.mem_type == MEM_MMAPHUGE) 252 free_mem_mmap(td, total_mem); 253 else 254 log_err("Bad memory type %u\n", td->o.mem_type); 255 256 td->orig_buffer = NULL; 257 td->orig_buffer_size = 0; 258 } 259