1 /* 2 * Copyright (c) 2016 Fujitsu Ltd. 3 * Author: Naoya Horiguchi <n-horiguchi (at) ah.jp.nec.com> 4 * Ported: Guangwen Feng <fenggw-fnst (at) cn.fujitsu.com> 5 * 6 * This program is free software: you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation, either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program, if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 /* 21 * This is a regression test for the race condition between move_pages() 22 * and freeing hugepages, where move_pages() calls follow_page(FOLL_GET) 23 * for hugepages internally and tries to get its refcount without 24 * preventing concurrent freeing. 25 * 26 * This test can crash the buggy kernel, and the bug was fixed in: 27 * 28 * commit e66f17ff71772b209eed39de35aaa99ba819c93d 29 * Author: Naoya Horiguchi <n-horiguchi (at) ah.jp.nec.com> 30 * Date: Wed Feb 11 15:25:22 2015 -0800 31 * 32 * mm/hugetlb: take page table lock in follow_huge_pmd() 33 */ 34 35 #include <errno.h> 36 #include <unistd.h> 37 #include <string.h> 38 #include <stdio.h> 39 #include <sys/types.h> 40 #include <sys/wait.h> 41 42 #include "tst_test.h" 43 #include "move_pages_support.h" 44 #include "lapi/mmap.h" 45 46 #ifdef HAVE_NUMA_V2 47 48 #define LOOPS 1000 49 #define PATH_MEMINFO "/proc/meminfo" 50 #define PATH_NR_HUGEPAGES "/proc/sys/vm/nr_hugepages" 51 #define PATH_HUGEPAGES "/sys/kernel/mm/hugepages/" 52 #define TEST_PAGES 2 53 #define TEST_NODES 2 54 55 static int pgsz, hpsz; 56 static long orig_hugepages = -1; 57 static char path_hugepages_node1[PATH_MAX]; 58 static char path_hugepages_node2[PATH_MAX]; 59 static long orig_hugepages_node1 = -1; 60 static long orig_hugepages_node2 = -1; 61 static unsigned int node1, node2; 62 static void *addr; 63 64 static void do_child(void) 65 { 66 int test_pages = TEST_PAGES * hpsz / pgsz; 67 int i, j; 68 int *nodes, *status; 69 void **pages; 70 pid_t ppid = getppid(); 71 72 pages = SAFE_MALLOC(sizeof(char *) * test_pages); 73 nodes = SAFE_MALLOC(sizeof(int) * test_pages); 74 status = SAFE_MALLOC(sizeof(int) * test_pages); 75 76 for (i = 0; i < test_pages; i++) 77 pages[i] = addr + i * pgsz; 78 79 for (i = 0; ; i++) { 80 for (j = 0; j < test_pages; j++) { 81 if (i % 2 == 0) 82 nodes[j] = node1; 83 else 84 nodes[j] = node2; 85 status[j] = 0; 86 } 87 88 TEST(numa_move_pages(ppid, test_pages, 89 pages, nodes, status, MPOL_MF_MOVE_ALL)); 90 if (TEST_RETURN) { 91 tst_res(TFAIL | TTERRNO, "move_pages failed"); 92 break; 93 } 94 } 95 96 exit(0); 97 } 98 99 static void do_test(void) 100 { 101 int i; 102 pid_t cpid = -1; 103 int status; 104 105 addr = SAFE_MMAP(NULL, TEST_PAGES * hpsz, PROT_READ | PROT_WRITE, 106 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); 107 108 SAFE_MUNMAP(addr, TEST_PAGES * hpsz); 109 110 cpid = SAFE_FORK(); 111 if (cpid == 0) 112 do_child(); 113 114 for (i = 0; i < LOOPS; i++) { 115 void *ptr; 116 117 ptr = SAFE_MMAP(NULL, TEST_PAGES * hpsz, 118 PROT_READ | PROT_WRITE, 119 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); 120 if (ptr != addr) 121 tst_brk(TBROK, "Failed to mmap at desired addr"); 122 123 memset(addr, 0, TEST_PAGES * hpsz); 124 125 SAFE_MUNMAP(addr, TEST_PAGES * hpsz); 126 } 127 128 if (i == LOOPS) { 129 SAFE_KILL(cpid, SIGKILL); 130 SAFE_WAITPID(cpid, &status, 0); 131 if (!WIFEXITED(status)) 132 tst_res(TPASS, "Bug not reproduced"); 133 } 134 } 135 136 static void alloc_free_huge_on_node(unsigned int node, size_t size) 137 { 138 char *mem; 139 long ret; 140 struct bitmask *bm; 141 142 tst_res(TINFO, "Allocating and freeing %zu hugepages on node %u", 143 size / hpsz, node); 144 145 mem = mmap(NULL, size, PROT_READ | PROT_WRITE, 146 MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); 147 if (mem == MAP_FAILED) { 148 if (errno == ENOMEM) 149 tst_brk(TCONF, "Cannot allocate huge pages"); 150 151 tst_brk(TBROK | TERRNO, "mmap(..., MAP_HUGETLB, ...) failed"); 152 } 153 154 bm = numa_bitmask_alloc(numa_max_possible_node() + 1); 155 if (!bm) 156 tst_brk(TBROK | TERRNO, "numa_bitmask_alloc() failed"); 157 158 numa_bitmask_setbit(bm, node); 159 160 ret = mbind(mem, size, MPOL_BIND, bm->maskp, bm->size + 1, 0); 161 if (ret) { 162 if (errno == ENOMEM) 163 tst_brk(TCONF, "Cannot mbind huge pages"); 164 165 tst_brk(TBROK | TERRNO, "mbind() failed"); 166 } 167 168 TEST(mlock(mem, size)); 169 if (TEST_RETURN) { 170 SAFE_MUNMAP(mem, size); 171 if (TEST_ERRNO == ENOMEM || TEST_ERRNO == EAGAIN) 172 tst_brk(TCONF, "Cannot lock huge pages"); 173 tst_brk(TBROK | TTERRNO, "mlock failed"); 174 } 175 176 numa_bitmask_free(bm); 177 178 SAFE_MUNMAP(mem, size); 179 } 180 181 static void setup(void) 182 { 183 int memfree, ret; 184 185 check_config(TEST_NODES); 186 187 if (access(PATH_HUGEPAGES, F_OK)) 188 tst_brk(TCONF, "Huge page not supported"); 189 190 ret = get_allowed_nodes(NH_MEMS, TEST_NODES, &node1, &node2); 191 if (ret < 0) 192 tst_brk(TBROK | TERRNO, "get_allowed_nodes: %d", ret); 193 194 pgsz = (int)get_page_size(); 195 SAFE_FILE_LINES_SCANF(PATH_MEMINFO, "Hugepagesize: %d", &hpsz); 196 197 SAFE_FILE_LINES_SCANF(PATH_MEMINFO, "MemFree: %d", &memfree); 198 tst_res(TINFO, "Free RAM %d kB", memfree); 199 200 if (4 * hpsz > memfree) 201 tst_brk(TBROK, "Not enough free RAM"); 202 203 snprintf(path_hugepages_node1, sizeof(path_hugepages_node1), 204 "/sys/devices/system/node/node%u/hugepages/hugepages-%dkB/nr_hugepages", 205 node1, hpsz); 206 207 snprintf(path_hugepages_node2, sizeof(path_hugepages_node2), 208 "/sys/devices/system/node/node%u/hugepages/hugepages-%dkB/nr_hugepages", 209 node2, hpsz); 210 211 if (!access(path_hugepages_node1, F_OK)) { 212 SAFE_FILE_SCANF(path_hugepages_node1, 213 "%ld", &orig_hugepages_node1); 214 tst_res(TINFO, 215 "Increasing %dkB hugepages pool on node %u to %ld", 216 hpsz, node1, orig_hugepages_node1 + 4); 217 SAFE_FILE_PRINTF(path_hugepages_node1, 218 "%ld", orig_hugepages_node1 + 4); 219 } 220 221 if (!access(path_hugepages_node2, F_OK)) { 222 SAFE_FILE_SCANF(path_hugepages_node2, 223 "%ld", &orig_hugepages_node2); 224 tst_res(TINFO, 225 "Increasing %dkB hugepages pool on node %u to %ld", 226 hpsz, node2, orig_hugepages_node2 + 4); 227 SAFE_FILE_PRINTF(path_hugepages_node2, 228 "%ld", orig_hugepages_node2 + 4); 229 } 230 231 hpsz *= 1024; 232 233 if (orig_hugepages_node1 == -1 || orig_hugepages_node2 == -1) { 234 SAFE_FILE_SCANF(PATH_NR_HUGEPAGES, "%ld", &orig_hugepages); 235 tst_res(TINFO, "Increasing global hugepages pool to %ld", 236 orig_hugepages + 8); 237 SAFE_FILE_PRINTF(PATH_NR_HUGEPAGES, "%ld", orig_hugepages + 8); 238 } 239 240 alloc_free_huge_on_node(node1, 4L * hpsz); 241 alloc_free_huge_on_node(node2, 4L * hpsz); 242 } 243 244 static void cleanup(void) 245 { 246 if (orig_hugepages != -1) 247 SAFE_FILE_PRINTF(PATH_NR_HUGEPAGES, "%ld", orig_hugepages); 248 249 if (orig_hugepages_node1 != -1) { 250 SAFE_FILE_PRINTF(path_hugepages_node1, 251 "%ld", orig_hugepages_node1); 252 } 253 254 if (orig_hugepages_node2 != -1) { 255 SAFE_FILE_PRINTF(path_hugepages_node2, 256 "%ld", orig_hugepages_node2); 257 } 258 } 259 260 static struct tst_test test = { 261 .min_kver = "2.6.32", 262 .needs_root = 1, 263 .forks_child = 1, 264 .setup = setup, 265 .cleanup = cleanup, 266 .test_all = do_test, 267 }; 268 269 #else 270 TST_TEST_TCONF("test requires libnuma >= 2 and it's development packages"); 271 #endif 272