1 #define JEMALLOC_STATS_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 #define CTL_GET(n, v, t) do { \ 5 size_t sz = sizeof(t); \ 6 xmallctl(n, (void *)v, &sz, NULL, 0); \ 7 } while (0) 8 9 #define CTL_M2_GET(n, i, v, t) do { \ 10 size_t mib[6]; \ 11 size_t miblen = sizeof(mib) / sizeof(size_t); \ 12 size_t sz = sizeof(t); \ 13 xmallctlnametomib(n, mib, &miblen); \ 14 mib[2] = (i); \ 15 xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ 16 } while (0) 17 18 #define CTL_M2_M4_GET(n, i, j, v, t) do { \ 19 size_t mib[6]; \ 20 size_t miblen = sizeof(mib) / sizeof(size_t); \ 21 size_t sz = sizeof(t); \ 22 xmallctlnametomib(n, mib, &miblen); \ 23 mib[2] = (i); \ 24 mib[4] = (j); \ 25 xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ 26 } while (0) 27 28 /******************************************************************************/ 29 /* Data. */ 30 31 bool opt_stats_print = false; 32 33 size_t stats_cactive = 0; 34 35 /******************************************************************************/ 36 37 static void 38 stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, 39 bool json, bool large, bool huge, unsigned i) 40 { 41 size_t page; 42 bool config_tcache, in_gap, in_gap_prev; 43 unsigned nbins, j; 44 45 CTL_GET("arenas.page", &page, size_t); 46 47 CTL_GET("arenas.nbins", &nbins, unsigned); 48 if (json) { 49 malloc_cprintf(write_cb, cbopaque, 50 "\t\t\t\t\"bins\": [\n"); 51 } else { 52 CTL_GET("config.tcache", &config_tcache, bool); 53 if (config_tcache) { 54 malloc_cprintf(write_cb, cbopaque, 55 "bins: size ind allocated nmalloc" 56 " ndalloc nrequests curregs" 57 " curruns regs pgs util nfills" 58 " nflushes newruns reruns\n"); 59 } else { 60 malloc_cprintf(write_cb, cbopaque, 61 "bins: size ind allocated nmalloc" 62 " ndalloc nrequests curregs" 63 " curruns regs pgs util newruns" 64 " reruns\n"); 65 } 66 } 67 for (j = 0, in_gap = false; j < nbins; j++) { 68 uint64_t nruns; 69 size_t reg_size, run_size, curregs; 70 size_t curruns; 71 uint32_t nregs; 72 uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; 73 uint64_t nreruns; 74 75 CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns, 76 uint64_t); 77 in_gap_prev = in_gap; 78 in_gap = (nruns == 0); 79 80 if (!json && in_gap_prev && !in_gap) { 81 malloc_cprintf(write_cb, cbopaque, 82 " ---\n"); 83 } 84 85 CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); 86 CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); 87 CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t); 88 89 CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, 90 uint64_t); 91 CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, 92 uint64_t); 93 CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, 94 size_t); 95 CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, 96 &nrequests, uint64_t); 97 if (config_tcache) { 98 CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, 99 &nfills, uint64_t); 100 CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, 101 &nflushes, uint64_t); 102 } 103 CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns, 104 uint64_t); 105 CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns, 106 size_t); 107 108 if (json) { 109 malloc_cprintf(write_cb, cbopaque, 110 "\t\t\t\t\t{\n" 111 "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" 112 "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" 113 "\t\t\t\t\t\t\"curregs\": %zu,\n" 114 "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n", 115 nmalloc, 116 ndalloc, 117 curregs, 118 nrequests); 119 if (config_tcache) { 120 malloc_cprintf(write_cb, cbopaque, 121 "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" 122 "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n", 123 nfills, 124 nflushes); 125 } 126 malloc_cprintf(write_cb, cbopaque, 127 "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n" 128 "\t\t\t\t\t\t\"curruns\": %zu\n" 129 "\t\t\t\t\t}%s\n", 130 nreruns, 131 curruns, 132 (j + 1 < nbins) ? "," : ""); 133 } else if (!in_gap) { 134 size_t availregs, milli; 135 char util[6]; /* "x.yyy". */ 136 137 availregs = nregs * curruns; 138 milli = (availregs != 0) ? (1000 * curregs) / availregs 139 : 1000; 140 assert(milli <= 1000); 141 if (milli < 10) { 142 malloc_snprintf(util, sizeof(util), 143 "0.00%zu", milli); 144 } else if (milli < 100) { 145 malloc_snprintf(util, sizeof(util), "0.0%zu", 146 milli); 147 } else if (milli < 1000) { 148 malloc_snprintf(util, sizeof(util), "0.%zu", 149 milli); 150 } else 151 malloc_snprintf(util, sizeof(util), "1"); 152 153 if (config_tcache) { 154 malloc_cprintf(write_cb, cbopaque, 155 "%20zu %3u %12zu %12"FMTu64 156 " %12"FMTu64" %12"FMTu64" %12zu" 157 " %12zu %4u %3zu %-5s %12"FMTu64 158 " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n", 159 reg_size, j, curregs * reg_size, nmalloc, 160 ndalloc, nrequests, curregs, curruns, nregs, 161 run_size / page, util, nfills, nflushes, 162 nruns, nreruns); 163 } else { 164 malloc_cprintf(write_cb, cbopaque, 165 "%20zu %3u %12zu %12"FMTu64 166 " %12"FMTu64" %12"FMTu64" %12zu" 167 " %12zu %4u %3zu %-5s %12"FMTu64 168 " %12"FMTu64"\n", 169 reg_size, j, curregs * reg_size, nmalloc, 170 ndalloc, nrequests, curregs, curruns, nregs, 171 run_size / page, util, nruns, nreruns); 172 } 173 } 174 } 175 if (json) { 176 malloc_cprintf(write_cb, cbopaque, 177 "\t\t\t\t]%s\n", (large || huge) ? "," : ""); 178 } else { 179 if (in_gap) { 180 malloc_cprintf(write_cb, cbopaque, 181 " ---\n"); 182 } 183 } 184 } 185 186 static void 187 stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, 188 bool json, bool huge, unsigned i) 189 { 190 unsigned nbins, nlruns, j; 191 bool in_gap, in_gap_prev; 192 193 CTL_GET("arenas.nbins", &nbins, unsigned); 194 CTL_GET("arenas.nlruns", &nlruns, unsigned); 195 if (json) { 196 malloc_cprintf(write_cb, cbopaque, 197 "\t\t\t\t\"lruns\": [\n"); 198 } else { 199 malloc_cprintf(write_cb, cbopaque, 200 "large: size ind allocated nmalloc" 201 " ndalloc nrequests curruns\n"); 202 } 203 for (j = 0, in_gap = false; j < nlruns; j++) { 204 uint64_t nmalloc, ndalloc, nrequests; 205 size_t run_size, curruns; 206 207 CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc, 208 uint64_t); 209 CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc, 210 uint64_t); 211 CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j, 212 &nrequests, uint64_t); 213 in_gap_prev = in_gap; 214 in_gap = (nrequests == 0); 215 216 if (!json && in_gap_prev && !in_gap) { 217 malloc_cprintf(write_cb, cbopaque, 218 " ---\n"); 219 } 220 221 CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t); 222 CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns, 223 size_t); 224 if (json) { 225 malloc_cprintf(write_cb, cbopaque, 226 "\t\t\t\t\t{\n" 227 "\t\t\t\t\t\t\"curruns\": %zu\n" 228 "\t\t\t\t\t}%s\n", 229 curruns, 230 (j + 1 < nlruns) ? "," : ""); 231 } else if (!in_gap) { 232 malloc_cprintf(write_cb, cbopaque, 233 "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 234 " %12"FMTu64" %12zu\n", 235 run_size, nbins + j, curruns * run_size, nmalloc, 236 ndalloc, nrequests, curruns); 237 } 238 } 239 if (json) { 240 malloc_cprintf(write_cb, cbopaque, 241 "\t\t\t\t]%s\n", huge ? "," : ""); 242 } else { 243 if (in_gap) { 244 malloc_cprintf(write_cb, cbopaque, 245 " ---\n"); 246 } 247 } 248 } 249 250 static void 251 stats_arena_hchunks_print(void (*write_cb)(void *, const char *), 252 void *cbopaque, bool json, unsigned i) 253 { 254 unsigned nbins, nlruns, nhchunks, j; 255 bool in_gap, in_gap_prev; 256 257 CTL_GET("arenas.nbins", &nbins, unsigned); 258 CTL_GET("arenas.nlruns", &nlruns, unsigned); 259 CTL_GET("arenas.nhchunks", &nhchunks, unsigned); 260 if (json) { 261 malloc_cprintf(write_cb, cbopaque, 262 "\t\t\t\t\"hchunks\": [\n"); 263 } else { 264 malloc_cprintf(write_cb, cbopaque, 265 "huge: size ind allocated nmalloc" 266 " ndalloc nrequests curhchunks\n"); 267 } 268 for (j = 0, in_gap = false; j < nhchunks; j++) { 269 uint64_t nmalloc, ndalloc, nrequests; 270 size_t hchunk_size, curhchunks; 271 272 CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j, 273 &nmalloc, uint64_t); 274 CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j, 275 &ndalloc, uint64_t); 276 CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j, 277 &nrequests, uint64_t); 278 in_gap_prev = in_gap; 279 in_gap = (nrequests == 0); 280 281 if (!json && in_gap_prev && !in_gap) { 282 malloc_cprintf(write_cb, cbopaque, 283 " ---\n"); 284 } 285 286 CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t); 287 CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j, 288 &curhchunks, size_t); 289 if (json) { 290 malloc_cprintf(write_cb, cbopaque, 291 "\t\t\t\t\t{\n" 292 "\t\t\t\t\t\t\"curhchunks\": %zu\n" 293 "\t\t\t\t\t}%s\n", 294 curhchunks, 295 (j + 1 < nhchunks) ? "," : ""); 296 } else if (!in_gap) { 297 malloc_cprintf(write_cb, cbopaque, 298 "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 299 " %12"FMTu64" %12zu\n", 300 hchunk_size, nbins + nlruns + j, 301 curhchunks * hchunk_size, nmalloc, ndalloc, 302 nrequests, curhchunks); 303 } 304 } 305 if (json) { 306 malloc_cprintf(write_cb, cbopaque, 307 "\t\t\t\t]\n"); 308 } else { 309 if (in_gap) { 310 malloc_cprintf(write_cb, cbopaque, 311 " ---\n"); 312 } 313 } 314 } 315 316 static void 317 stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, 318 bool json, unsigned i, bool bins, bool large, bool huge) 319 { 320 unsigned nthreads; 321 const char *dss; 322 ssize_t lg_dirty_mult, decay_time; 323 size_t page, pactive, pdirty, mapped, retained; 324 size_t metadata_mapped, metadata_allocated; 325 uint64_t npurge, nmadvise, purged; 326 size_t small_allocated; 327 uint64_t small_nmalloc, small_ndalloc, small_nrequests; 328 size_t large_allocated; 329 uint64_t large_nmalloc, large_ndalloc, large_nrequests; 330 size_t huge_allocated; 331 uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests; 332 333 CTL_GET("arenas.page", &page, size_t); 334 335 CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); 336 if (json) { 337 malloc_cprintf(write_cb, cbopaque, 338 "\t\t\t\t\"nthreads\": %u,\n", nthreads); 339 } else { 340 malloc_cprintf(write_cb, cbopaque, 341 "assigned threads: %u\n", nthreads); 342 } 343 344 CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); 345 if (json) { 346 malloc_cprintf(write_cb, cbopaque, 347 "\t\t\t\t\"dss\": \"%s\",\n", dss); 348 } else { 349 malloc_cprintf(write_cb, cbopaque, 350 "dss allocation precedence: %s\n", dss); 351 } 352 353 CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t); 354 if (json) { 355 malloc_cprintf(write_cb, cbopaque, 356 "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult); 357 } else { 358 if (opt_purge == purge_mode_ratio) { 359 if (lg_dirty_mult >= 0) { 360 malloc_cprintf(write_cb, cbopaque, 361 "min active:dirty page ratio: %u:1\n", 362 (1U << lg_dirty_mult)); 363 } else { 364 malloc_cprintf(write_cb, cbopaque, 365 "min active:dirty page ratio: N/A\n"); 366 } 367 } 368 } 369 370 CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t); 371 if (json) { 372 malloc_cprintf(write_cb, cbopaque, 373 "\t\t\t\t\"decay_time\": %zd,\n", decay_time); 374 } else { 375 if (opt_purge == purge_mode_decay) { 376 if (decay_time >= 0) { 377 malloc_cprintf(write_cb, cbopaque, 378 "decay time: %zd\n", decay_time); 379 } else { 380 malloc_cprintf(write_cb, cbopaque, 381 "decay time: N/A\n"); 382 } 383 } 384 } 385 386 CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); 387 CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); 388 CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t); 389 CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t); 390 CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t); 391 if (json) { 392 malloc_cprintf(write_cb, cbopaque, 393 "\t\t\t\t\"pactive\": %zu,\n", pactive); 394 malloc_cprintf(write_cb, cbopaque, 395 "\t\t\t\t\"pdirty\": %zu,\n", pdirty); 396 malloc_cprintf(write_cb, cbopaque, 397 "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge); 398 malloc_cprintf(write_cb, cbopaque, 399 "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise); 400 malloc_cprintf(write_cb, cbopaque, 401 "\t\t\t\t\"purged\": %"FMTu64",\n", purged); 402 } else { 403 malloc_cprintf(write_cb, cbopaque, 404 "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64 405 ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged); 406 } 407 408 CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, 409 size_t); 410 CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); 411 CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); 412 CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, 413 uint64_t); 414 if (json) { 415 malloc_cprintf(write_cb, cbopaque, 416 "\t\t\t\t\"small\": {\n"); 417 418 malloc_cprintf(write_cb, cbopaque, 419 "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); 420 malloc_cprintf(write_cb, cbopaque, 421 "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); 422 malloc_cprintf(write_cb, cbopaque, 423 "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); 424 malloc_cprintf(write_cb, cbopaque, 425 "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); 426 427 malloc_cprintf(write_cb, cbopaque, 428 "\t\t\t\t},\n"); 429 } else { 430 malloc_cprintf(write_cb, cbopaque, 431 " allocated nmalloc" 432 " ndalloc nrequests\n"); 433 malloc_cprintf(write_cb, cbopaque, 434 "small: %12zu %12"FMTu64" %12"FMTu64 435 " %12"FMTu64"\n", 436 small_allocated, small_nmalloc, small_ndalloc, 437 small_nrequests); 438 } 439 440 CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, 441 size_t); 442 CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); 443 CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); 444 CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, 445 uint64_t); 446 if (json) { 447 malloc_cprintf(write_cb, cbopaque, 448 "\t\t\t\t\"large\": {\n"); 449 450 malloc_cprintf(write_cb, cbopaque, 451 "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); 452 malloc_cprintf(write_cb, cbopaque, 453 "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); 454 malloc_cprintf(write_cb, cbopaque, 455 "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); 456 malloc_cprintf(write_cb, cbopaque, 457 "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); 458 459 malloc_cprintf(write_cb, cbopaque, 460 "\t\t\t\t},\n"); 461 } else { 462 malloc_cprintf(write_cb, cbopaque, 463 "large: %12zu %12"FMTu64" %12"FMTu64 464 " %12"FMTu64"\n", 465 large_allocated, large_nmalloc, large_ndalloc, 466 large_nrequests); 467 } 468 469 CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t); 470 CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t); 471 CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t); 472 CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests, 473 uint64_t); 474 if (json) { 475 malloc_cprintf(write_cb, cbopaque, 476 "\t\t\t\t\"huge\": {\n"); 477 478 malloc_cprintf(write_cb, cbopaque, 479 "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated); 480 malloc_cprintf(write_cb, cbopaque, 481 "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc); 482 malloc_cprintf(write_cb, cbopaque, 483 "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc); 484 malloc_cprintf(write_cb, cbopaque, 485 "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests); 486 487 malloc_cprintf(write_cb, cbopaque, 488 "\t\t\t\t},\n"); 489 } else { 490 malloc_cprintf(write_cb, cbopaque, 491 "huge: %12zu %12"FMTu64" %12"FMTu64 492 " %12"FMTu64"\n", 493 huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests); 494 malloc_cprintf(write_cb, cbopaque, 495 "total: %12zu %12"FMTu64" %12"FMTu64 496 " %12"FMTu64"\n", 497 small_allocated + large_allocated + huge_allocated, 498 small_nmalloc + large_nmalloc + huge_nmalloc, 499 small_ndalloc + large_ndalloc + huge_ndalloc, 500 small_nrequests + large_nrequests + huge_nrequests); 501 } 502 if (!json) { 503 malloc_cprintf(write_cb, cbopaque, 504 "active: %12zu\n", pactive * page); 505 } 506 507 CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); 508 if (json) { 509 malloc_cprintf(write_cb, cbopaque, 510 "\t\t\t\t\"mapped\": %zu,\n", mapped); 511 } else { 512 malloc_cprintf(write_cb, cbopaque, 513 "mapped: %12zu\n", mapped); 514 } 515 516 CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); 517 if (json) { 518 malloc_cprintf(write_cb, cbopaque, 519 "\t\t\t\t\"retained\": %zu,\n", retained); 520 } else { 521 malloc_cprintf(write_cb, cbopaque, 522 "retained: %12zu\n", retained); 523 } 524 525 CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped, 526 size_t); 527 CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated, 528 size_t); 529 if (json) { 530 malloc_cprintf(write_cb, cbopaque, 531 "\t\t\t\t\"metadata\": {\n"); 532 533 malloc_cprintf(write_cb, cbopaque, 534 "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped); 535 malloc_cprintf(write_cb, cbopaque, 536 "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated); 537 538 malloc_cprintf(write_cb, cbopaque, 539 "\t\t\t\t},\n"); 540 } else { 541 malloc_cprintf(write_cb, cbopaque, 542 "metadata: mapped: %zu, allocated: %zu\n", 543 metadata_mapped, metadata_allocated); 544 } 545 546 if (bins) { 547 stats_arena_bins_print(write_cb, cbopaque, json, large, huge, 548 i); 549 } 550 if (large) 551 stats_arena_lruns_print(write_cb, cbopaque, json, huge, i); 552 if (huge) 553 stats_arena_hchunks_print(write_cb, cbopaque, json, i); 554 } 555 556 static void 557 stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, 558 bool json, bool merged, bool unmerged) 559 { 560 const char *cpv; 561 bool bv; 562 unsigned uv; 563 uint32_t u32v; 564 uint64_t u64v; 565 ssize_t ssv; 566 size_t sv, bsz, usz, ssz, sssz, cpsz; 567 568 bsz = sizeof(bool); 569 usz = sizeof(unsigned); 570 ssz = sizeof(size_t); 571 sssz = sizeof(ssize_t); 572 cpsz = sizeof(const char *); 573 574 CTL_GET("version", &cpv, const char *); 575 if (json) { 576 malloc_cprintf(write_cb, cbopaque, 577 "\t\t\"version\": \"%s\",\n", cpv); 578 } else 579 malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); 580 581 /* config. */ 582 #define CONFIG_WRITE_BOOL_JSON(n, c) \ 583 if (json) { \ 584 CTL_GET("config."#n, &bv, bool); \ 585 malloc_cprintf(write_cb, cbopaque, \ 586 "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ 587 (c)); \ 588 } 589 590 if (json) { 591 malloc_cprintf(write_cb, cbopaque, 592 "\t\t\"config\": {\n"); 593 } 594 595 CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") 596 597 CTL_GET("config.debug", &bv, bool); 598 if (json) { 599 malloc_cprintf(write_cb, cbopaque, 600 "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); 601 } else { 602 malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", 603 bv ? "enabled" : "disabled"); 604 } 605 606 CONFIG_WRITE_BOOL_JSON(fill, ",") 607 CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") 608 609 if (json) { 610 malloc_cprintf(write_cb, cbopaque, 611 "\t\t\t\"malloc_conf\": \"%s\",\n", 612 config_malloc_conf); 613 } else { 614 malloc_cprintf(write_cb, cbopaque, 615 "config.malloc_conf: \"%s\"\n", config_malloc_conf); 616 } 617 618 CONFIG_WRITE_BOOL_JSON(munmap, ",") 619 CONFIG_WRITE_BOOL_JSON(prof, ",") 620 CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") 621 CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") 622 CONFIG_WRITE_BOOL_JSON(stats, ",") 623 CONFIG_WRITE_BOOL_JSON(tcache, ",") 624 CONFIG_WRITE_BOOL_JSON(tls, ",") 625 CONFIG_WRITE_BOOL_JSON(utrace, ",") 626 CONFIG_WRITE_BOOL_JSON(valgrind, ",") 627 CONFIG_WRITE_BOOL_JSON(xmalloc, "") 628 629 if (json) { 630 malloc_cprintf(write_cb, cbopaque, 631 "\t\t},\n"); 632 } 633 #undef CONFIG_WRITE_BOOL_JSON 634 635 /* opt. */ 636 #define OPT_WRITE_BOOL(n, c) \ 637 if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ 638 if (json) { \ 639 malloc_cprintf(write_cb, cbopaque, \ 640 "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ 641 "false", (c)); \ 642 } else { \ 643 malloc_cprintf(write_cb, cbopaque, \ 644 " opt."#n": %s\n", bv ? "true" : "false"); \ 645 } \ 646 } 647 #define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ 648 bool bv2; \ 649 if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ 650 je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \ 651 if (json) { \ 652 malloc_cprintf(write_cb, cbopaque, \ 653 "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ 654 "false", (c)); \ 655 } else { \ 656 malloc_cprintf(write_cb, cbopaque, \ 657 " opt."#n": %s ("#m": %s)\n", bv ? "true" \ 658 : "false", bv2 ? "true" : "false"); \ 659 } \ 660 } \ 661 } 662 #define OPT_WRITE_UNSIGNED(n, c) \ 663 if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ 664 if (json) { \ 665 malloc_cprintf(write_cb, cbopaque, \ 666 "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ 667 } else { \ 668 malloc_cprintf(write_cb, cbopaque, \ 669 " opt."#n": %u\n", uv); \ 670 } \ 671 } 672 #define OPT_WRITE_SIZE_T(n, c) \ 673 if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \ 674 if (json) { \ 675 malloc_cprintf(write_cb, cbopaque, \ 676 "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \ 677 } else { \ 678 malloc_cprintf(write_cb, cbopaque, \ 679 " opt."#n": %zu\n", sv); \ 680 } \ 681 } 682 #define OPT_WRITE_SSIZE_T(n, c) \ 683 if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ 684 if (json) { \ 685 malloc_cprintf(write_cb, cbopaque, \ 686 "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ 687 } else { \ 688 malloc_cprintf(write_cb, cbopaque, \ 689 " opt."#n": %zd\n", ssv); \ 690 } \ 691 } 692 #define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ 693 ssize_t ssv2; \ 694 if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ 695 je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ 696 if (json) { \ 697 malloc_cprintf(write_cb, cbopaque, \ 698 "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ 699 } else { \ 700 malloc_cprintf(write_cb, cbopaque, \ 701 " opt."#n": %zd ("#m": %zd)\n", \ 702 ssv, ssv2); \ 703 } \ 704 } \ 705 } 706 #define OPT_WRITE_CHAR_P(n, c) \ 707 if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ 708 if (json) { \ 709 malloc_cprintf(write_cb, cbopaque, \ 710 "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ 711 } else { \ 712 malloc_cprintf(write_cb, cbopaque, \ 713 " opt."#n": \"%s\"\n", cpv); \ 714 } \ 715 } 716 717 if (json) { 718 malloc_cprintf(write_cb, cbopaque, 719 "\t\t\"opt\": {\n"); 720 } else { 721 malloc_cprintf(write_cb, cbopaque, 722 "Run-time option settings:\n"); 723 } 724 OPT_WRITE_BOOL(abort, ",") 725 OPT_WRITE_SIZE_T(lg_chunk, ",") 726 OPT_WRITE_CHAR_P(dss, ",") 727 OPT_WRITE_UNSIGNED(narenas, ",") 728 OPT_WRITE_CHAR_P(purge, ",") 729 if (json || opt_purge == purge_mode_ratio) { 730 OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult, 731 arenas.lg_dirty_mult, ",") 732 } 733 if (json || opt_purge == purge_mode_decay) { 734 OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",") 735 } 736 OPT_WRITE_CHAR_P(junk, ",") 737 OPT_WRITE_SIZE_T(quarantine, ",") 738 OPT_WRITE_BOOL(redzone, ",") 739 OPT_WRITE_BOOL(zero, ",") 740 OPT_WRITE_BOOL(utrace, ",") 741 OPT_WRITE_BOOL(xmalloc, ",") 742 OPT_WRITE_BOOL(tcache, ",") 743 OPT_WRITE_SSIZE_T(lg_tcache_max, ",") 744 OPT_WRITE_BOOL(prof, ",") 745 OPT_WRITE_CHAR_P(prof_prefix, ",") 746 OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") 747 OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, 748 ",") 749 OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") 750 OPT_WRITE_BOOL(prof_accum, ",") 751 OPT_WRITE_SSIZE_T(lg_prof_interval, ",") 752 OPT_WRITE_BOOL(prof_gdump, ",") 753 OPT_WRITE_BOOL(prof_final, ",") 754 OPT_WRITE_BOOL(prof_leak, ",") 755 /* 756 * stats_print is always emitted, so as long as stats_print comes last 757 * it's safe to unconditionally omit the comma here (rather than having 758 * to conditionally omit it elsewhere depending on configuration). 759 */ 760 OPT_WRITE_BOOL(stats_print, "") 761 if (json) { 762 malloc_cprintf(write_cb, cbopaque, 763 "\t\t},\n"); 764 } 765 766 #undef OPT_WRITE_BOOL 767 #undef OPT_WRITE_BOOL_MUTABLE 768 #undef OPT_WRITE_SIZE_T 769 #undef OPT_WRITE_SSIZE_T 770 #undef OPT_WRITE_CHAR_P 771 772 /* arenas. */ 773 if (json) { 774 malloc_cprintf(write_cb, cbopaque, 775 "\t\t\"arenas\": {\n"); 776 } 777 778 CTL_GET("arenas.narenas", &uv, unsigned); 779 if (json) { 780 malloc_cprintf(write_cb, cbopaque, 781 "\t\t\t\"narenas\": %u,\n", uv); 782 } else 783 malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); 784 785 CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t); 786 if (json) { 787 malloc_cprintf(write_cb, cbopaque, 788 "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv); 789 } else if (opt_purge == purge_mode_ratio) { 790 if (ssv >= 0) { 791 malloc_cprintf(write_cb, cbopaque, 792 "Min active:dirty page ratio per arena: " 793 "%u:1\n", (1U << ssv)); 794 } else { 795 malloc_cprintf(write_cb, cbopaque, 796 "Min active:dirty page ratio per arena: " 797 "N/A\n"); 798 } 799 } 800 CTL_GET("arenas.decay_time", &ssv, ssize_t); 801 if (json) { 802 malloc_cprintf(write_cb, cbopaque, 803 "\t\t\t\"decay_time\": %zd,\n", ssv); 804 } else if (opt_purge == purge_mode_decay) { 805 malloc_cprintf(write_cb, cbopaque, 806 "Unused dirty page decay time: %zd%s\n", 807 ssv, (ssv < 0) ? " (no decay)" : ""); 808 } 809 810 CTL_GET("arenas.quantum", &sv, size_t); 811 if (json) { 812 malloc_cprintf(write_cb, cbopaque, 813 "\t\t\t\"quantum\": %zu,\n", sv); 814 } else 815 malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); 816 817 CTL_GET("arenas.page", &sv, size_t); 818 if (json) { 819 malloc_cprintf(write_cb, cbopaque, 820 "\t\t\t\"page\": %zu,\n", sv); 821 } else 822 malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); 823 824 if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { 825 if (json) { 826 malloc_cprintf(write_cb, cbopaque, 827 "\t\t\t\"tcache_max\": %zu,\n", sv); 828 } else { 829 malloc_cprintf(write_cb, cbopaque, 830 "Maximum thread-cached size class: %zu\n", sv); 831 } 832 } 833 834 if (json) { 835 unsigned nbins, nlruns, nhchunks, i; 836 837 CTL_GET("arenas.nbins", &nbins, unsigned); 838 malloc_cprintf(write_cb, cbopaque, 839 "\t\t\t\"nbins\": %u,\n", nbins); 840 841 CTL_GET("arenas.nhbins", &uv, unsigned); 842 malloc_cprintf(write_cb, cbopaque, 843 "\t\t\t\"nhbins\": %u,\n", uv); 844 845 malloc_cprintf(write_cb, cbopaque, 846 "\t\t\t\"bin\": [\n"); 847 for (i = 0; i < nbins; i++) { 848 malloc_cprintf(write_cb, cbopaque, 849 "\t\t\t\t{\n"); 850 851 CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); 852 malloc_cprintf(write_cb, cbopaque, 853 "\t\t\t\t\t\"size\": %zu,\n", sv); 854 855 CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); 856 malloc_cprintf(write_cb, cbopaque, 857 "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); 858 859 CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t); 860 malloc_cprintf(write_cb, cbopaque, 861 "\t\t\t\t\t\"run_size\": %zu\n", sv); 862 863 malloc_cprintf(write_cb, cbopaque, 864 "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); 865 } 866 malloc_cprintf(write_cb, cbopaque, 867 "\t\t\t],\n"); 868 869 CTL_GET("arenas.nlruns", &nlruns, unsigned); 870 malloc_cprintf(write_cb, cbopaque, 871 "\t\t\t\"nlruns\": %u,\n", nlruns); 872 873 malloc_cprintf(write_cb, cbopaque, 874 "\t\t\t\"lrun\": [\n"); 875 for (i = 0; i < nlruns; i++) { 876 malloc_cprintf(write_cb, cbopaque, 877 "\t\t\t\t{\n"); 878 879 CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t); 880 malloc_cprintf(write_cb, cbopaque, 881 "\t\t\t\t\t\"size\": %zu\n", sv); 882 883 malloc_cprintf(write_cb, cbopaque, 884 "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : ""); 885 } 886 malloc_cprintf(write_cb, cbopaque, 887 "\t\t\t],\n"); 888 889 CTL_GET("arenas.nhchunks", &nhchunks, unsigned); 890 malloc_cprintf(write_cb, cbopaque, 891 "\t\t\t\"nhchunks\": %u,\n", nhchunks); 892 893 malloc_cprintf(write_cb, cbopaque, 894 "\t\t\t\"hchunk\": [\n"); 895 for (i = 0; i < nhchunks; i++) { 896 malloc_cprintf(write_cb, cbopaque, 897 "\t\t\t\t{\n"); 898 899 CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t); 900 malloc_cprintf(write_cb, cbopaque, 901 "\t\t\t\t\t\"size\": %zu\n", sv); 902 903 malloc_cprintf(write_cb, cbopaque, 904 "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : ""); 905 } 906 malloc_cprintf(write_cb, cbopaque, 907 "\t\t\t]\n"); 908 909 malloc_cprintf(write_cb, cbopaque, 910 "\t\t},\n"); 911 } 912 913 /* prof. */ 914 if (json) { 915 malloc_cprintf(write_cb, cbopaque, 916 "\t\t\"prof\": {\n"); 917 918 CTL_GET("prof.thread_active_init", &bv, bool); 919 malloc_cprintf(write_cb, cbopaque, 920 "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : 921 "false"); 922 923 CTL_GET("prof.active", &bv, bool); 924 malloc_cprintf(write_cb, cbopaque, 925 "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); 926 927 CTL_GET("prof.gdump", &bv, bool); 928 malloc_cprintf(write_cb, cbopaque, 929 "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); 930 931 CTL_GET("prof.interval", &u64v, uint64_t); 932 malloc_cprintf(write_cb, cbopaque, 933 "\t\t\t\"interval\": %"FMTu64",\n", u64v); 934 935 CTL_GET("prof.lg_sample", &ssv, ssize_t); 936 malloc_cprintf(write_cb, cbopaque, 937 "\t\t\t\"lg_sample\": %zd\n", ssv); 938 939 malloc_cprintf(write_cb, cbopaque, 940 "\t\t}%s\n", (config_stats || merged || unmerged) ? "," : 941 ""); 942 } 943 } 944 945 static void 946 stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, 947 bool json, bool merged, bool unmerged, bool bins, bool large, bool huge) 948 { 949 size_t *cactive; 950 size_t allocated, active, metadata, resident, mapped, retained; 951 952 CTL_GET("stats.cactive", &cactive, size_t *); 953 CTL_GET("stats.allocated", &allocated, size_t); 954 CTL_GET("stats.active", &active, size_t); 955 CTL_GET("stats.metadata", &metadata, size_t); 956 CTL_GET("stats.resident", &resident, size_t); 957 CTL_GET("stats.mapped", &mapped, size_t); 958 CTL_GET("stats.retained", &retained, size_t); 959 if (json) { 960 malloc_cprintf(write_cb, cbopaque, 961 "\t\t\"stats\": {\n"); 962 963 malloc_cprintf(write_cb, cbopaque, 964 "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive)); 965 malloc_cprintf(write_cb, cbopaque, 966 "\t\t\t\"allocated\": %zu,\n", allocated); 967 malloc_cprintf(write_cb, cbopaque, 968 "\t\t\t\"active\": %zu,\n", active); 969 malloc_cprintf(write_cb, cbopaque, 970 "\t\t\t\"metadata\": %zu,\n", metadata); 971 malloc_cprintf(write_cb, cbopaque, 972 "\t\t\t\"resident\": %zu,\n", resident); 973 malloc_cprintf(write_cb, cbopaque, 974 "\t\t\t\"mapped\": %zu,\n", mapped); 975 malloc_cprintf(write_cb, cbopaque, 976 "\t\t\t\"retained\": %zu\n", retained); 977 978 malloc_cprintf(write_cb, cbopaque, 979 "\t\t}%s\n", (merged || unmerged) ? "," : ""); 980 } else { 981 malloc_cprintf(write_cb, cbopaque, 982 "Allocated: %zu, active: %zu, metadata: %zu," 983 " resident: %zu, mapped: %zu, retained: %zu\n", 984 allocated, active, metadata, resident, mapped, retained); 985 malloc_cprintf(write_cb, cbopaque, 986 "Current active ceiling: %zu\n", 987 atomic_read_z(cactive)); 988 } 989 990 if (merged || unmerged) { 991 unsigned narenas; 992 993 if (json) { 994 malloc_cprintf(write_cb, cbopaque, 995 "\t\t\"stats.arenas\": {\n"); 996 } 997 998 CTL_GET("arenas.narenas", &narenas, unsigned); 999 { 1000 VARIABLE_ARRAY(bool, initialized, narenas); 1001 size_t isz; 1002 unsigned i, j, ninitialized; 1003 1004 isz = sizeof(bool) * narenas; 1005 xmallctl("arenas.initialized", (void *)initialized, 1006 &isz, NULL, 0); 1007 for (i = ninitialized = 0; i < narenas; i++) { 1008 if (initialized[i]) 1009 ninitialized++; 1010 } 1011 1012 /* Merged stats. */ 1013 if (merged && (ninitialized > 1 || !unmerged)) { 1014 /* Print merged arena stats. */ 1015 if (json) { 1016 malloc_cprintf(write_cb, cbopaque, 1017 "\t\t\t\"merged\": {\n"); 1018 } else { 1019 malloc_cprintf(write_cb, cbopaque, 1020 "\nMerged arenas stats:\n"); 1021 } 1022 stats_arena_print(write_cb, cbopaque, json, 1023 narenas, bins, large, huge); 1024 if (json) { 1025 malloc_cprintf(write_cb, cbopaque, 1026 "\t\t\t}%s\n", (ninitialized > 1) ? 1027 "," : ""); 1028 } 1029 } 1030 1031 /* Unmerged stats. */ 1032 for (i = j = 0; i < narenas; i++) { 1033 if (initialized[i]) { 1034 if (json) { 1035 j++; 1036 malloc_cprintf(write_cb, 1037 cbopaque, 1038 "\t\t\t\"%u\": {\n", i); 1039 } else { 1040 malloc_cprintf(write_cb, 1041 cbopaque, "\narenas[%u]:\n", 1042 i); 1043 } 1044 stats_arena_print(write_cb, cbopaque, 1045 json, i, bins, large, huge); 1046 if (json) { 1047 malloc_cprintf(write_cb, 1048 cbopaque, 1049 "\t\t\t}%s\n", (j < 1050 ninitialized) ? "," : ""); 1051 } 1052 } 1053 } 1054 } 1055 1056 if (json) { 1057 malloc_cprintf(write_cb, cbopaque, 1058 "\t\t}\n"); 1059 } 1060 } 1061 } 1062 1063 void 1064 stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1065 const char *opts) 1066 { 1067 int err; 1068 uint64_t epoch; 1069 size_t u64sz; 1070 bool json = false; 1071 bool general = true; 1072 bool merged = true; 1073 bool unmerged = true; 1074 bool bins = true; 1075 bool large = true; 1076 bool huge = true; 1077 1078 /* 1079 * Refresh stats, in case mallctl() was called by the application. 1080 * 1081 * Check for OOM here, since refreshing the ctl cache can trigger 1082 * allocation. In practice, none of the subsequent mallctl()-related 1083 * calls in this function will cause OOM if this one succeeds. 1084 * */ 1085 epoch = 1; 1086 u64sz = sizeof(uint64_t); 1087 err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, 1088 sizeof(uint64_t)); 1089 if (err != 0) { 1090 if (err == EAGAIN) { 1091 malloc_write("<jemalloc>: Memory allocation failure in " 1092 "mallctl(\"epoch\", ...)\n"); 1093 return; 1094 } 1095 malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " 1096 "...)\n"); 1097 abort(); 1098 } 1099 1100 if (opts != NULL) { 1101 unsigned i; 1102 1103 for (i = 0; opts[i] != '\0'; i++) { 1104 switch (opts[i]) { 1105 case 'J': 1106 json = true; 1107 break; 1108 case 'g': 1109 general = false; 1110 break; 1111 case 'm': 1112 merged = false; 1113 break; 1114 case 'a': 1115 unmerged = false; 1116 break; 1117 case 'b': 1118 bins = false; 1119 break; 1120 case 'l': 1121 large = false; 1122 break; 1123 case 'h': 1124 huge = false; 1125 break; 1126 default:; 1127 } 1128 } 1129 } 1130 1131 if (json) { 1132 malloc_cprintf(write_cb, cbopaque, 1133 "{\n" 1134 "\t\"jemalloc\": {\n"); 1135 } else { 1136 malloc_cprintf(write_cb, cbopaque, 1137 "___ Begin jemalloc statistics ___\n"); 1138 } 1139 1140 if (general) 1141 stats_general_print(write_cb, cbopaque, json, merged, unmerged); 1142 if (config_stats) { 1143 stats_print_helper(write_cb, cbopaque, json, merged, unmerged, 1144 bins, large, huge); 1145 } 1146 if (json) { 1147 malloc_cprintf(write_cb, cbopaque, 1148 "\t}\n" 1149 "}\n"); 1150 } else { 1151 malloc_cprintf(write_cb, cbopaque, 1152 "--- End jemalloc statistics ---\n"); 1153 } 1154 } 1155