1 /** 2 * mount.c 3 * 4 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include "fsck.h" 12 #include <locale.h> 13 14 void print_inode_info(struct f2fs_inode *inode, int name) 15 { 16 unsigned int i = 0; 17 int namelen = le32_to_cpu(inode->i_namelen); 18 19 if (name && namelen) { 20 inode->i_name[namelen] = '\0'; 21 MSG(0, " - File name : %s\n", inode->i_name); 22 setlocale(LC_ALL, ""); 23 MSG(0, " - File size : %'llu (bytes)\n", 24 le64_to_cpu(inode->i_size)); 25 return; 26 } 27 28 DISP_u32(inode, i_mode); 29 DISP_u32(inode, i_uid); 30 DISP_u32(inode, i_gid); 31 DISP_u32(inode, i_links); 32 DISP_u64(inode, i_size); 33 DISP_u64(inode, i_blocks); 34 35 DISP_u64(inode, i_atime); 36 DISP_u32(inode, i_atime_nsec); 37 DISP_u64(inode, i_ctime); 38 DISP_u32(inode, i_ctime_nsec); 39 DISP_u64(inode, i_mtime); 40 DISP_u32(inode, i_mtime_nsec); 41 42 DISP_u32(inode, i_generation); 43 DISP_u32(inode, i_current_depth); 44 DISP_u32(inode, i_xattr_nid); 45 DISP_u32(inode, i_flags); 46 DISP_u32(inode, i_inline); 47 DISP_u32(inode, i_pino); 48 49 if (namelen) { 50 DISP_u32(inode, i_namelen); 51 inode->i_name[namelen] = '\0'; 52 DISP_utf(inode, i_name); 53 } 54 55 printf("i_ext: fofs:%x blkaddr:%x len:%x\n", 56 inode->i_ext.fofs, 57 inode->i_ext.blk_addr, 58 inode->i_ext.len); 59 60 DISP_u32(inode, i_addr[0]); /* Pointers to data blocks */ 61 DISP_u32(inode, i_addr[1]); /* Pointers to data blocks */ 62 DISP_u32(inode, i_addr[2]); /* Pointers to data blocks */ 63 DISP_u32(inode, i_addr[3]); /* Pointers to data blocks */ 64 65 for (i = 4; i < ADDRS_PER_INODE(inode); i++) { 66 if (inode->i_addr[i] != 0x0) { 67 printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n", 68 i, inode->i_addr[i]); 69 break; 70 } 71 } 72 73 DISP_u32(inode, i_nid[0]); /* direct */ 74 DISP_u32(inode, i_nid[1]); /* direct */ 75 DISP_u32(inode, i_nid[2]); /* indirect */ 76 DISP_u32(inode, i_nid[3]); /* indirect */ 77 DISP_u32(inode, i_nid[4]); /* double indirect */ 78 79 printf("\n"); 80 } 81 82 void print_node_info(struct f2fs_node *node_block) 83 { 84 nid_t ino = le32_to_cpu(node_block->footer.ino); 85 nid_t nid = le32_to_cpu(node_block->footer.nid); 86 /* Is this inode? */ 87 if (ino == nid) { 88 DBG(0, "Node ID [0x%x:%u] is inode\n", nid, nid); 89 print_inode_info(&node_block->i, 0); 90 } else { 91 int i; 92 u32 *dump_blk = (u32 *)node_block; 93 DBG(0, "Node ID [0x%x:%u] is direct node or indirect node.\n", 94 nid, nid); 95 for (i = 0; i <= 10; i++) 96 MSG(0, "[%d]\t\t\t[0x%8x : %d]\n", 97 i, dump_blk[i], dump_blk[i]); 98 } 99 } 100 101 void print_raw_sb_info(struct f2fs_sb_info *sbi) 102 { 103 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 104 105 if (!config.dbg_lv) 106 return; 107 108 printf("\n"); 109 printf("+--------------------------------------------------------+\n"); 110 printf("| Super block |\n"); 111 printf("+--------------------------------------------------------+\n"); 112 113 DISP_u32(sb, magic); 114 DISP_u32(sb, major_ver); 115 DISP_u32(sb, minor_ver); 116 DISP_u32(sb, log_sectorsize); 117 DISP_u32(sb, log_sectors_per_block); 118 119 DISP_u32(sb, log_blocksize); 120 DISP_u32(sb, log_blocks_per_seg); 121 DISP_u32(sb, segs_per_sec); 122 DISP_u32(sb, secs_per_zone); 123 DISP_u32(sb, checksum_offset); 124 DISP_u64(sb, block_count); 125 126 DISP_u32(sb, section_count); 127 DISP_u32(sb, segment_count); 128 DISP_u32(sb, segment_count_ckpt); 129 DISP_u32(sb, segment_count_sit); 130 DISP_u32(sb, segment_count_nat); 131 132 DISP_u32(sb, segment_count_ssa); 133 DISP_u32(sb, segment_count_main); 134 DISP_u32(sb, segment0_blkaddr); 135 136 DISP_u32(sb, cp_blkaddr); 137 DISP_u32(sb, sit_blkaddr); 138 DISP_u32(sb, nat_blkaddr); 139 DISP_u32(sb, ssa_blkaddr); 140 DISP_u32(sb, main_blkaddr); 141 142 DISP_u32(sb, root_ino); 143 DISP_u32(sb, node_ino); 144 DISP_u32(sb, meta_ino); 145 DISP_u32(sb, cp_payload); 146 DISP("%s", sb, version); 147 printf("\n"); 148 } 149 150 void print_ckpt_info(struct f2fs_sb_info *sbi) 151 { 152 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi); 153 154 if (!config.dbg_lv) 155 return; 156 157 printf("\n"); 158 printf("+--------------------------------------------------------+\n"); 159 printf("| Checkpoint |\n"); 160 printf("+--------------------------------------------------------+\n"); 161 162 DISP_u64(cp, checkpoint_ver); 163 DISP_u64(cp, user_block_count); 164 DISP_u64(cp, valid_block_count); 165 DISP_u32(cp, rsvd_segment_count); 166 DISP_u32(cp, overprov_segment_count); 167 DISP_u32(cp, free_segment_count); 168 169 DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]); 170 DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]); 171 DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]); 172 DISP_u32(cp, cur_node_segno[0]); 173 DISP_u32(cp, cur_node_segno[1]); 174 DISP_u32(cp, cur_node_segno[2]); 175 176 DISP_u32(cp, cur_node_blkoff[0]); 177 DISP_u32(cp, cur_node_blkoff[1]); 178 DISP_u32(cp, cur_node_blkoff[2]); 179 180 181 DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]); 182 DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]); 183 DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]); 184 DISP_u32(cp, cur_data_segno[0]); 185 DISP_u32(cp, cur_data_segno[1]); 186 DISP_u32(cp, cur_data_segno[2]); 187 188 DISP_u32(cp, cur_data_blkoff[0]); 189 DISP_u32(cp, cur_data_blkoff[1]); 190 DISP_u32(cp, cur_data_blkoff[2]); 191 192 DISP_u32(cp, ckpt_flags); 193 DISP_u32(cp, cp_pack_total_block_count); 194 DISP_u32(cp, cp_pack_start_sum); 195 DISP_u32(cp, valid_node_count); 196 DISP_u32(cp, valid_inode_count); 197 DISP_u32(cp, next_free_nid); 198 DISP_u32(cp, sit_ver_bitmap_bytesize); 199 DISP_u32(cp, nat_ver_bitmap_bytesize); 200 DISP_u32(cp, checksum_offset); 201 DISP_u64(cp, elapsed_time); 202 203 DISP_u32(cp, sit_nat_version_bitmap[0]); 204 printf("\n\n"); 205 } 206 207 int sanity_check_raw_super(struct f2fs_super_block *raw_super) 208 { 209 unsigned int blocksize; 210 211 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 212 return -1; 213 } 214 215 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 216 return -1; 217 } 218 219 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 220 if (F2FS_BLKSIZE != blocksize) { 221 return -1; 222 } 223 224 if (le32_to_cpu(raw_super->log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE || 225 le32_to_cpu(raw_super->log_sectorsize) < 226 F2FS_MIN_LOG_SECTOR_SIZE) { 227 return -1; 228 } 229 230 if (le32_to_cpu(raw_super->log_sectors_per_block) + 231 le32_to_cpu(raw_super->log_sectorsize) != 232 F2FS_MAX_LOG_SECTOR_SIZE) { 233 return -1; 234 } 235 236 return 0; 237 } 238 239 int validate_super_block(struct f2fs_sb_info *sbi, int block) 240 { 241 u64 offset; 242 243 sbi->raw_super = malloc(sizeof(struct f2fs_super_block)); 244 245 if (block == 0) 246 offset = F2FS_SUPER_OFFSET; 247 else 248 offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET; 249 250 if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block))) 251 return -1; 252 253 if (!sanity_check_raw_super(sbi->raw_super)) { 254 /* get kernel version */ 255 if (config.kd >= 0) { 256 dev_read_version(config.version, 0, VERSION_LEN); 257 get_kernel_version(config.version); 258 } else { 259 memset(config.version, 0, VERSION_LEN); 260 } 261 262 /* build sb version */ 263 memcpy(config.sb_version, sbi->raw_super->version, VERSION_LEN); 264 get_kernel_version(config.sb_version); 265 memcpy(config.init_version, sbi->raw_super->init_version, VERSION_LEN); 266 get_kernel_version(config.init_version); 267 268 MSG(0, "Info: MKFS version\n \"%s\"\n", config.init_version); 269 MSG(0, "Info: FSCK version\n from \"%s\"\n to \"%s\"\n", 270 config.sb_version, config.version); 271 if (memcmp(config.sb_version, config.version, VERSION_LEN)) { 272 int ret; 273 274 memcpy(sbi->raw_super->version, 275 config.version, VERSION_LEN); 276 ret = dev_write(sbi->raw_super, offset, 277 sizeof(struct f2fs_super_block)); 278 ASSERT(ret >= 0); 279 280 config.auto_fix = 0; 281 config.fix_on = 1; 282 } 283 return 0; 284 } 285 286 free(sbi->raw_super); 287 MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block); 288 289 return -EINVAL; 290 } 291 292 int init_sb_info(struct f2fs_sb_info *sbi) 293 { 294 struct f2fs_super_block *raw_super = sbi->raw_super; 295 296 sbi->log_sectors_per_block = 297 le32_to_cpu(raw_super->log_sectors_per_block); 298 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); 299 sbi->blocksize = 1 << sbi->log_blocksize; 300 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 301 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; 302 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 303 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 304 sbi->total_sections = le32_to_cpu(raw_super->section_count); 305 sbi->total_node_count = 306 (le32_to_cpu(raw_super->segment_count_nat) / 2) 307 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; 308 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); 309 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); 310 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); 311 sbi->cur_victim_sec = NULL_SEGNO; 312 return 0; 313 } 314 315 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr, 316 unsigned long long *version) 317 { 318 void *cp_page_1, *cp_page_2; 319 struct f2fs_checkpoint *cp_block; 320 unsigned long blk_size = sbi->blocksize; 321 unsigned long long cur_version = 0, pre_version = 0; 322 unsigned int crc = 0; 323 size_t crc_offset; 324 325 /* Read the 1st cp block in this CP pack */ 326 cp_page_1 = malloc(PAGE_SIZE); 327 if (dev_read_block(cp_page_1, cp_addr) < 0) 328 return NULL; 329 330 cp_block = (struct f2fs_checkpoint *)cp_page_1; 331 crc_offset = le32_to_cpu(cp_block->checksum_offset); 332 if (crc_offset >= blk_size) 333 goto invalid_cp1; 334 335 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); 336 if (f2fs_crc_valid(crc, cp_block, crc_offset)) 337 goto invalid_cp1; 338 339 pre_version = le64_to_cpu(cp_block->checkpoint_ver); 340 341 /* Read the 2nd cp block in this CP pack */ 342 cp_page_2 = malloc(PAGE_SIZE); 343 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; 344 345 if (dev_read_block(cp_page_2, cp_addr) < 0) 346 goto invalid_cp2; 347 348 cp_block = (struct f2fs_checkpoint *)cp_page_2; 349 crc_offset = le32_to_cpu(cp_block->checksum_offset); 350 if (crc_offset >= blk_size) 351 goto invalid_cp2; 352 353 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); 354 if (f2fs_crc_valid(crc, cp_block, crc_offset)) 355 goto invalid_cp2; 356 357 cur_version = le64_to_cpu(cp_block->checkpoint_ver); 358 359 if (cur_version == pre_version) { 360 *version = cur_version; 361 free(cp_page_2); 362 return cp_page_1; 363 } 364 365 invalid_cp2: 366 free(cp_page_2); 367 invalid_cp1: 368 free(cp_page_1); 369 return NULL; 370 } 371 372 int get_valid_checkpoint(struct f2fs_sb_info *sbi) 373 { 374 struct f2fs_super_block *raw_sb = sbi->raw_super; 375 void *cp1, *cp2, *cur_page; 376 unsigned long blk_size = sbi->blocksize; 377 unsigned long long cp1_version = 0, cp2_version = 0; 378 unsigned long long cp_start_blk_no; 379 unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 380 int ret; 381 382 sbi->ckpt = malloc(cp_blks * blk_size); 383 if (!sbi->ckpt) 384 return -ENOMEM; 385 /* 386 * Finding out valid cp block involves read both 387 * sets( cp pack1 and cp pack 2) 388 */ 389 cp_start_blk_no = le32_to_cpu(raw_sb->cp_blkaddr); 390 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); 391 392 /* The second checkpoint pack should start at the next segment */ 393 cp_start_blk_no += 1 << le32_to_cpu(raw_sb->log_blocks_per_seg); 394 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); 395 396 if (cp1 && cp2) { 397 if (ver_after(cp2_version, cp1_version)) { 398 cur_page = cp2; 399 sbi->cur_cp = 2; 400 } else { 401 cur_page = cp1; 402 sbi->cur_cp = 1; 403 } 404 } else if (cp1) { 405 cur_page = cp1; 406 sbi->cur_cp = 1; 407 } else if (cp2) { 408 cur_page = cp2; 409 sbi->cur_cp = 2; 410 } else { 411 free(cp1); 412 free(cp2); 413 goto fail_no_cp; 414 } 415 416 memcpy(sbi->ckpt, cur_page, blk_size); 417 418 if (cp_blks > 1) { 419 unsigned int i; 420 unsigned long long cp_blk_no; 421 422 cp_blk_no = le32_to_cpu(raw_sb->cp_blkaddr); 423 if (cur_page == cp2) 424 cp_blk_no += 1 << 425 le32_to_cpu(raw_sb->log_blocks_per_seg); 426 /* copy sit bitmap */ 427 for (i = 1; i < cp_blks; i++) { 428 unsigned char *ckpt = (unsigned char *)sbi->ckpt; 429 ret = dev_read_block(cur_page, cp_blk_no + i); 430 ASSERT(ret >= 0); 431 memcpy(ckpt + i * blk_size, cur_page, blk_size); 432 } 433 } 434 free(cp1); 435 free(cp2); 436 return 0; 437 438 fail_no_cp: 439 free(sbi->ckpt); 440 return -EINVAL; 441 } 442 443 int sanity_check_ckpt(struct f2fs_sb_info *sbi) 444 { 445 unsigned int total, fsmeta; 446 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 447 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 448 449 total = le32_to_cpu(raw_super->segment_count); 450 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); 451 fsmeta += le32_to_cpu(raw_super->segment_count_sit); 452 fsmeta += le32_to_cpu(raw_super->segment_count_nat); 453 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); 454 fsmeta += le32_to_cpu(raw_super->segment_count_ssa); 455 456 if (fsmeta >= total) 457 return 1; 458 459 return 0; 460 } 461 462 int init_node_manager(struct f2fs_sb_info *sbi) 463 { 464 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 465 struct f2fs_nm_info *nm_i = NM_I(sbi); 466 unsigned char *version_bitmap; 467 unsigned int nat_segs, nat_blocks; 468 469 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 470 471 /* segment_count_nat includes pair segment so divide to 2. */ 472 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 473 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 474 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 475 nm_i->fcnt = 0; 476 nm_i->nat_cnt = 0; 477 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 478 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 479 480 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 481 482 nm_i->nat_bitmap = malloc(nm_i->bitmap_size); 483 if (!nm_i->nat_bitmap) 484 return -ENOMEM; 485 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 486 if (!version_bitmap) 487 return -EFAULT; 488 489 /* copy version bitmap */ 490 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size); 491 return 0; 492 } 493 494 int build_node_manager(struct f2fs_sb_info *sbi) 495 { 496 int err; 497 sbi->nm_info = malloc(sizeof(struct f2fs_nm_info)); 498 if (!sbi->nm_info) 499 return -ENOMEM; 500 501 err = init_node_manager(sbi); 502 if (err) 503 return err; 504 505 return 0; 506 } 507 508 int build_sit_info(struct f2fs_sb_info *sbi) 509 { 510 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 511 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 512 struct sit_info *sit_i; 513 unsigned int sit_segs, start; 514 char *src_bitmap, *dst_bitmap; 515 unsigned int bitmap_size; 516 517 sit_i = malloc(sizeof(struct sit_info)); 518 if (!sit_i) 519 return -ENOMEM; 520 521 SM_I(sbi)->sit_info = sit_i; 522 523 sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1); 524 525 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 526 sit_i->sentries[start].cur_valid_map 527 = calloc(SIT_VBLOCK_MAP_SIZE, 1); 528 sit_i->sentries[start].ckpt_valid_map 529 = calloc(SIT_VBLOCK_MAP_SIZE, 1); 530 if (!sit_i->sentries[start].cur_valid_map 531 || !sit_i->sentries[start].ckpt_valid_map) 532 return -ENOMEM; 533 } 534 535 sit_segs = le32_to_cpu(raw_sb->segment_count_sit) >> 1; 536 bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 537 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 538 539 dst_bitmap = malloc(bitmap_size); 540 memcpy(dst_bitmap, src_bitmap, bitmap_size); 541 542 sit_i->sit_base_addr = le32_to_cpu(raw_sb->sit_blkaddr); 543 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 544 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 545 sit_i->sit_bitmap = dst_bitmap; 546 sit_i->bitmap_size = bitmap_size; 547 sit_i->dirty_sentries = 0; 548 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 549 sit_i->elapsed_time = le64_to_cpu(ckpt->elapsed_time); 550 return 0; 551 } 552 553 void reset_curseg(struct f2fs_sb_info *sbi, int type) 554 { 555 struct curseg_info *curseg = CURSEG_I(sbi, type); 556 struct summary_footer *sum_footer; 557 struct seg_entry *se; 558 559 sum_footer = &(curseg->sum_blk->footer); 560 memset(sum_footer, 0, sizeof(struct summary_footer)); 561 if (IS_DATASEG(type)) 562 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); 563 if (IS_NODESEG(type)) 564 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); 565 se = get_seg_entry(sbi, curseg->segno); 566 se->type = type; 567 } 568 569 static void read_compacted_summaries(struct f2fs_sb_info *sbi) 570 { 571 struct curseg_info *curseg; 572 unsigned int i, j, offset; 573 block_t start; 574 char *kaddr; 575 int ret; 576 577 start = start_sum_block(sbi); 578 579 kaddr = (char *)malloc(PAGE_SIZE); 580 ret = dev_read_block(kaddr, start++); 581 ASSERT(ret >= 0); 582 583 curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 584 memcpy(&curseg->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 585 586 curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 587 memcpy(&curseg->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, 588 SUM_JOURNAL_SIZE); 589 590 offset = 2 * SUM_JOURNAL_SIZE; 591 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 592 unsigned short blk_off; 593 struct curseg_info *curseg = CURSEG_I(sbi, i); 594 595 reset_curseg(sbi, i); 596 597 if (curseg->alloc_type == SSR) 598 blk_off = sbi->blocks_per_seg; 599 else 600 blk_off = curseg->next_blkoff; 601 602 for (j = 0; j < blk_off; j++) { 603 struct f2fs_summary *s; 604 s = (struct f2fs_summary *)(kaddr + offset); 605 curseg->sum_blk->entries[j] = *s; 606 offset += SUMMARY_SIZE; 607 if (offset + SUMMARY_SIZE <= 608 PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) 609 continue; 610 memset(kaddr, 0, PAGE_SIZE); 611 ret = dev_read_block(kaddr, start++); 612 ASSERT(ret >= 0); 613 offset = 0; 614 } 615 } 616 free(kaddr); 617 } 618 619 static void restore_node_summary(struct f2fs_sb_info *sbi, 620 unsigned int segno, struct f2fs_summary_block *sum_blk) 621 { 622 struct f2fs_node *node_blk; 623 struct f2fs_summary *sum_entry; 624 block_t addr; 625 unsigned int i; 626 int ret; 627 628 node_blk = malloc(F2FS_BLKSIZE); 629 ASSERT(node_blk); 630 631 /* scan the node segment */ 632 addr = START_BLOCK(sbi, segno); 633 sum_entry = &sum_blk->entries[0]; 634 635 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) { 636 ret = dev_read_block(node_blk, addr); 637 ASSERT(ret >= 0); 638 sum_entry->nid = node_blk->footer.nid; 639 addr++; 640 } 641 free(node_blk); 642 } 643 644 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type) 645 { 646 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 647 struct f2fs_summary_block *sum_blk; 648 struct curseg_info *curseg; 649 unsigned int segno = 0; 650 block_t blk_addr = 0; 651 int ret; 652 653 if (IS_DATASEG(type)) { 654 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 655 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 656 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 657 else 658 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 659 } else { 660 segno = le32_to_cpu(ckpt->cur_node_segno[type - 661 CURSEG_HOT_NODE]); 662 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 663 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, 664 type - CURSEG_HOT_NODE); 665 else 666 blk_addr = GET_SUM_BLKADDR(sbi, segno); 667 } 668 669 sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE); 670 ret = dev_read_block(sum_blk, blk_addr); 671 ASSERT(ret >= 0); 672 673 if (IS_NODESEG(type) && !is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 674 restore_node_summary(sbi, segno, sum_blk); 675 676 curseg = CURSEG_I(sbi, type); 677 memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE); 678 reset_curseg(sbi, type); 679 free(sum_blk); 680 } 681 682 static void restore_curseg_summaries(struct f2fs_sb_info *sbi) 683 { 684 int type = CURSEG_HOT_DATA; 685 686 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 687 read_compacted_summaries(sbi); 688 type = CURSEG_HOT_NODE; 689 } 690 691 for (; type <= CURSEG_COLD_NODE; type++) 692 read_normal_summaries(sbi, type); 693 } 694 695 static void build_curseg(struct f2fs_sb_info *sbi) 696 { 697 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 698 struct curseg_info *array; 699 unsigned short blk_off; 700 unsigned int segno; 701 int i; 702 703 array = malloc(sizeof(*array) * NR_CURSEG_TYPE); 704 ASSERT(array); 705 706 SM_I(sbi)->curseg_array = array; 707 708 for (i = 0; i < NR_CURSEG_TYPE; i++) { 709 array[i].sum_blk = malloc(PAGE_CACHE_SIZE); 710 ASSERT(array[i].sum_blk); 711 if (i <= CURSEG_COLD_DATA) { 712 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 713 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 714 } 715 if (i > CURSEG_COLD_DATA) { 716 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[i - 717 CURSEG_HOT_NODE]); 718 segno = le32_to_cpu(ckpt->cur_node_segno[i - 719 CURSEG_HOT_NODE]); 720 } 721 array[i].segno = segno; 722 array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno); 723 array[i].next_segno = NULL_SEGNO; 724 array[i].next_blkoff = blk_off; 725 array[i].alloc_type = ckpt->alloc_type[i]; 726 } 727 restore_curseg_summaries(sbi); 728 } 729 730 inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 731 { 732 unsigned int end_segno = SM_I(sbi)->segment_count - 1; 733 ASSERT(segno <= end_segno); 734 } 735 736 static struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi, 737 unsigned int segno) 738 { 739 struct sit_info *sit_i = SIT_I(sbi); 740 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 741 block_t blk_addr = sit_i->sit_base_addr + offset; 742 struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1); 743 int ret; 744 745 check_seg_range(sbi, segno); 746 747 /* calculate sit block address */ 748 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 749 blk_addr += sit_i->sit_blocks; 750 751 ret = dev_read_block(sit_blk, blk_addr); 752 ASSERT(ret >= 0); 753 754 return sit_blk; 755 } 756 757 void rewrite_current_sit_page(struct f2fs_sb_info *sbi, 758 unsigned int segno, struct f2fs_sit_block *sit_blk) 759 { 760 struct sit_info *sit_i = SIT_I(sbi); 761 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 762 block_t blk_addr = sit_i->sit_base_addr + offset; 763 int ret; 764 765 /* calculate sit block address */ 766 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 767 blk_addr += sit_i->sit_blocks; 768 769 ret = dev_write_block(sit_blk, blk_addr); 770 ASSERT(ret >= 0); 771 } 772 773 void check_block_count(struct f2fs_sb_info *sbi, 774 unsigned int segno, struct f2fs_sit_entry *raw_sit) 775 { 776 struct f2fs_sm_info *sm_info = SM_I(sbi); 777 unsigned int end_segno = sm_info->segment_count - 1; 778 int valid_blocks = 0; 779 unsigned int i; 780 781 /* check segment usage */ 782 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg) 783 ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u", 784 segno, GET_SIT_VBLOCKS(raw_sit)); 785 786 /* check boundary of a given segment number */ 787 if (segno > end_segno) 788 ASSERT_MSG("Invalid SEGNO: 0x%x", segno); 789 790 /* check bitmap with valid block count */ 791 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++) 792 valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]); 793 794 if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks) 795 ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u", 796 segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks); 797 798 if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE) 799 ASSERT_MSG("Wrong SIT type: segno=0x%x, %u", 800 segno, GET_SIT_TYPE(raw_sit)); 801 } 802 803 void seg_info_from_raw_sit(struct seg_entry *se, 804 struct f2fs_sit_entry *raw_sit) 805 { 806 se->valid_blocks = GET_SIT_VBLOCKS(raw_sit); 807 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit); 808 memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE); 809 memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE); 810 se->type = GET_SIT_TYPE(raw_sit); 811 se->orig_type = GET_SIT_TYPE(raw_sit); 812 se->mtime = le64_to_cpu(raw_sit->mtime); 813 } 814 815 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 816 unsigned int segno) 817 { 818 struct sit_info *sit_i = SIT_I(sbi); 819 return &sit_i->sentries[segno]; 820 } 821 822 int get_sum_block(struct f2fs_sb_info *sbi, unsigned int segno, 823 struct f2fs_summary_block *sum_blk) 824 { 825 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 826 struct curseg_info *curseg; 827 int type, ret; 828 u64 ssa_blk; 829 830 ssa_blk = GET_SUM_BLKADDR(sbi, segno); 831 for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) { 832 if (segno == ckpt->cur_node_segno[type]) { 833 curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type); 834 if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) { 835 ASSERT_MSG("segno [0x%x] indicates a data " 836 "segment, but should be node", 837 segno); 838 return -EINVAL; 839 } 840 memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ); 841 return SEG_TYPE_CUR_NODE; 842 } 843 } 844 845 for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) { 846 if (segno == ckpt->cur_data_segno[type]) { 847 curseg = CURSEG_I(sbi, type); 848 if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) { 849 ASSERT_MSG("segno [0x%x] indicates a node " 850 "segment, but should be data", 851 segno); 852 return -EINVAL; 853 } 854 DBG(2, "segno [0x%x] is current data seg[0x%x]\n", 855 segno, type); 856 memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ); 857 return SEG_TYPE_CUR_DATA; 858 } 859 } 860 861 ret = dev_read_block(sum_blk, ssa_blk); 862 ASSERT(ret >= 0); 863 864 if (IS_SUM_NODE_SEG(sum_blk->footer)) 865 return SEG_TYPE_NODE; 866 else 867 return SEG_TYPE_DATA; 868 869 } 870 871 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr, 872 struct f2fs_summary *sum_entry) 873 { 874 struct f2fs_summary_block *sum_blk; 875 u32 segno, offset; 876 int ret; 877 878 segno = GET_SEGNO(sbi, blk_addr); 879 offset = OFFSET_IN_SEG(sbi, blk_addr); 880 881 sum_blk = calloc(BLOCK_SZ, 1); 882 883 ret = get_sum_block(sbi, segno, sum_blk); 884 memcpy(sum_entry, &(sum_blk->entries[offset]), 885 sizeof(struct f2fs_summary)); 886 free(sum_blk); 887 return ret; 888 } 889 890 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 891 struct f2fs_nat_entry *raw_nat) 892 { 893 struct f2fs_nm_info *nm_i = NM_I(sbi); 894 struct f2fs_nat_block *nat_block; 895 pgoff_t block_off; 896 pgoff_t block_addr; 897 int seg_off, entry_off; 898 int ret; 899 900 if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0) 901 return; 902 903 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1); 904 905 block_off = nid / NAT_ENTRY_PER_BLOCK; 906 entry_off = nid % NAT_ENTRY_PER_BLOCK; 907 908 seg_off = block_off >> sbi->log_blocks_per_seg; 909 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 910 (seg_off << sbi->log_blocks_per_seg << 1) + 911 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 912 913 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 914 block_addr += sbi->blocks_per_seg; 915 916 ret = dev_read_block(nat_block, block_addr); 917 ASSERT(ret >= 0); 918 919 memcpy(raw_nat, &nat_block->entries[entry_off], 920 sizeof(struct f2fs_nat_entry)); 921 free(nat_block); 922 } 923 924 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 925 { 926 struct f2fs_nat_entry raw_nat; 927 get_nat_entry(sbi, nid, &raw_nat); 928 ni->nid = nid; 929 node_info_from_raw_nat(ni, &raw_nat); 930 } 931 932 void build_sit_entries(struct f2fs_sb_info *sbi) 933 { 934 struct sit_info *sit_i = SIT_I(sbi); 935 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 936 struct f2fs_summary_block *sum = curseg->sum_blk; 937 unsigned int segno; 938 939 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) { 940 struct seg_entry *se = &sit_i->sentries[segno]; 941 struct f2fs_sit_block *sit_blk; 942 struct f2fs_sit_entry sit; 943 int i; 944 945 for (i = 0; i < sits_in_cursum(sum); i++) { 946 if (le32_to_cpu(segno_in_journal(sum, i)) == segno) { 947 sit = sit_in_journal(sum, i); 948 goto got_it; 949 } 950 } 951 sit_blk = get_current_sit_page(sbi, segno); 952 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)]; 953 free(sit_blk); 954 got_it: 955 check_block_count(sbi, segno, &sit); 956 seg_info_from_raw_sit(se, &sit); 957 } 958 959 } 960 961 int build_segment_manager(struct f2fs_sb_info *sbi) 962 { 963 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 964 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 965 struct f2fs_sm_info *sm_info; 966 967 sm_info = malloc(sizeof(struct f2fs_sm_info)); 968 if (!sm_info) 969 return -ENOMEM; 970 971 /* init sm info */ 972 sbi->sm_info = sm_info; 973 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 974 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 975 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 976 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 977 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 978 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 979 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 980 981 build_sit_info(sbi); 982 983 build_curseg(sbi); 984 985 build_sit_entries(sbi); 986 987 return 0; 988 } 989 990 void build_sit_area_bitmap(struct f2fs_sb_info *sbi) 991 { 992 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 993 struct f2fs_sm_info *sm_i = SM_I(sbi); 994 unsigned int segno = 0; 995 char *ptr = NULL; 996 u32 sum_vblocks = 0; 997 u32 free_segs = 0; 998 struct seg_entry *se; 999 1000 fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE; 1001 fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz); 1002 ptr = fsck->sit_area_bitmap; 1003 1004 ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz); 1005 1006 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) { 1007 se = get_seg_entry(sbi, segno); 1008 1009 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 1010 ptr += SIT_VBLOCK_MAP_SIZE; 1011 1012 if (se->valid_blocks == 0x0) { 1013 if (sbi->ckpt->cur_node_segno[0] == segno || 1014 sbi->ckpt->cur_data_segno[0] == segno || 1015 sbi->ckpt->cur_node_segno[1] == segno || 1016 sbi->ckpt->cur_data_segno[1] == segno || 1017 sbi->ckpt->cur_node_segno[2] == segno || 1018 sbi->ckpt->cur_data_segno[2] == segno) { 1019 continue; 1020 } else { 1021 free_segs++; 1022 } 1023 } else { 1024 sum_vblocks += se->valid_blocks; 1025 } 1026 } 1027 fsck->chk.sit_valid_blocks = sum_vblocks; 1028 fsck->chk.sit_free_segs = free_segs; 1029 1030 DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n", 1031 sum_vblocks, sum_vblocks, 1032 free_segs, free_segs); 1033 } 1034 1035 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi) 1036 { 1037 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 1038 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 1039 struct sit_info *sit_i = SIT_I(sbi); 1040 unsigned int segno = 0; 1041 struct f2fs_summary_block *sum = curseg->sum_blk; 1042 char *ptr = NULL; 1043 1044 /* remove sit journal */ 1045 sum->n_sits = 0; 1046 1047 fsck->chk.free_segs = 0; 1048 1049 ptr = fsck->main_area_bitmap; 1050 1051 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) { 1052 struct f2fs_sit_block *sit_blk; 1053 struct f2fs_sit_entry *sit; 1054 struct seg_entry *se; 1055 u16 valid_blocks = 0; 1056 u16 type; 1057 int i; 1058 1059 sit_blk = get_current_sit_page(sbi, segno); 1060 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)]; 1061 memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE); 1062 1063 /* update valid block count */ 1064 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++) 1065 valid_blocks += get_bits_in_byte(sit->valid_map[i]); 1066 1067 se = get_seg_entry(sbi, segno); 1068 type = se->type; 1069 if (type >= NO_CHECK_TYPE) { 1070 ASSERT_MSG("Invalide type and valid blocks=%x,%x", 1071 segno, valid_blocks); 1072 type = 0; 1073 } 1074 sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) | 1075 valid_blocks); 1076 rewrite_current_sit_page(sbi, segno, sit_blk); 1077 free(sit_blk); 1078 1079 if (valid_blocks == 0 && 1080 sbi->ckpt->cur_node_segno[0] != segno && 1081 sbi->ckpt->cur_data_segno[0] != segno && 1082 sbi->ckpt->cur_node_segno[1] != segno && 1083 sbi->ckpt->cur_data_segno[1] != segno && 1084 sbi->ckpt->cur_node_segno[2] != segno && 1085 sbi->ckpt->cur_data_segno[2] != segno) 1086 fsck->chk.free_segs++; 1087 1088 ptr += SIT_VBLOCK_MAP_SIZE; 1089 } 1090 } 1091 1092 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid, 1093 struct f2fs_nat_entry *raw_nat) 1094 { 1095 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1096 struct f2fs_summary_block *sum = curseg->sum_blk; 1097 int i = 0; 1098 1099 for (i = 0; i < nats_in_cursum(sum); i++) { 1100 if (le32_to_cpu(nid_in_journal(sum, i)) == nid) { 1101 memcpy(raw_nat, &nat_in_journal(sum, i), 1102 sizeof(struct f2fs_nat_entry)); 1103 DBG(3, "==> Found nid [0x%x] in nat cache\n", nid); 1104 return i; 1105 } 1106 } 1107 return -1; 1108 } 1109 1110 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid) 1111 { 1112 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1113 struct f2fs_summary_block *sum = curseg->sum_blk; 1114 struct f2fs_nm_info *nm_i = NM_I(sbi); 1115 struct f2fs_nat_block *nat_block; 1116 pgoff_t block_off; 1117 pgoff_t block_addr; 1118 int seg_off, entry_off; 1119 int ret; 1120 int i = 0; 1121 1122 /* check in journal */ 1123 for (i = 0; i < nats_in_cursum(sum); i++) { 1124 if (le32_to_cpu(nid_in_journal(sum, i)) == nid) { 1125 memset(&nat_in_journal(sum, i), 0, 1126 sizeof(struct f2fs_nat_entry)); 1127 FIX_MSG("Remove nid [0x%x] in nat journal\n", nid); 1128 return; 1129 } 1130 } 1131 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1); 1132 1133 block_off = nid / NAT_ENTRY_PER_BLOCK; 1134 entry_off = nid % NAT_ENTRY_PER_BLOCK; 1135 1136 seg_off = block_off >> sbi->log_blocks_per_seg; 1137 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 1138 (seg_off << sbi->log_blocks_per_seg << 1) + 1139 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 1140 1141 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 1142 block_addr += sbi->blocks_per_seg; 1143 1144 ret = dev_read_block(nat_block, block_addr); 1145 ASSERT(ret >= 0); 1146 1147 memset(&nat_block->entries[entry_off], 0, 1148 sizeof(struct f2fs_nat_entry)); 1149 1150 ret = dev_write_block(nat_block, block_addr); 1151 ASSERT(ret >= 0); 1152 free(nat_block); 1153 } 1154 1155 void build_nat_area_bitmap(struct f2fs_sb_info *sbi) 1156 { 1157 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 1158 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 1159 struct f2fs_nm_info *nm_i = NM_I(sbi); 1160 struct f2fs_nat_block *nat_block; 1161 u32 nid, nr_nat_blks; 1162 pgoff_t block_off; 1163 pgoff_t block_addr; 1164 int seg_off; 1165 int ret; 1166 unsigned int i; 1167 1168 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1); 1169 ASSERT(nat_block); 1170 1171 /* Alloc & build nat entry bitmap */ 1172 nr_nat_blks = (le32_to_cpu(raw_sb->segment_count_nat) / 2) << 1173 sbi->log_blocks_per_seg; 1174 1175 fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK; 1176 fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8; 1177 fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1); 1178 ASSERT(fsck->nat_area_bitmap != NULL); 1179 1180 for (block_off = 0; block_off < nr_nat_blks; block_off++) { 1181 1182 seg_off = block_off >> sbi->log_blocks_per_seg; 1183 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 1184 (seg_off << sbi->log_blocks_per_seg << 1) + 1185 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 1186 1187 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 1188 block_addr += sbi->blocks_per_seg; 1189 1190 ret = dev_read_block(nat_block, block_addr); 1191 ASSERT(ret >= 0); 1192 1193 nid = block_off * NAT_ENTRY_PER_BLOCK; 1194 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { 1195 struct f2fs_nat_entry raw_nat; 1196 struct node_info ni; 1197 ni.nid = nid + i; 1198 1199 if ((nid + i) == F2FS_NODE_INO(sbi) || 1200 (nid + i) == F2FS_META_INO(sbi)) { 1201 ASSERT(nat_block->entries[i].block_addr != 0x0); 1202 continue; 1203 } 1204 1205 if (lookup_nat_in_journal(sbi, nid + i, 1206 &raw_nat) >= 0) { 1207 node_info_from_raw_nat(&ni, &raw_nat); 1208 if (ni.blk_addr != 0x0) { 1209 f2fs_set_bit(nid + i, 1210 fsck->nat_area_bitmap); 1211 fsck->chk.valid_nat_entry_cnt++; 1212 DBG(3, "nid[0x%x] in nat cache\n", 1213 nid + i); 1214 } 1215 } else { 1216 node_info_from_raw_nat(&ni, 1217 &nat_block->entries[i]); 1218 if (ni.blk_addr == 0) 1219 continue; 1220 ASSERT(nid + i != 0x0); 1221 1222 DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n", 1223 nid + i, ni.blk_addr, ni.ino); 1224 f2fs_set_bit(nid + i, fsck->nat_area_bitmap); 1225 fsck->chk.valid_nat_entry_cnt++; 1226 } 1227 } 1228 } 1229 free(nat_block); 1230 1231 DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n", 1232 fsck->chk.valid_nat_entry_cnt, 1233 fsck->chk.valid_nat_entry_cnt); 1234 } 1235 1236 int f2fs_do_mount(struct f2fs_sb_info *sbi) 1237 { 1238 int ret; 1239 1240 sbi->active_logs = NR_CURSEG_TYPE; 1241 ret = validate_super_block(sbi, 0); 1242 if (ret) { 1243 ret = validate_super_block(sbi, 1); 1244 if (ret) 1245 return -1; 1246 } 1247 1248 print_raw_sb_info(sbi); 1249 1250 init_sb_info(sbi); 1251 1252 ret = get_valid_checkpoint(sbi); 1253 if (ret) { 1254 ERR_MSG("Can't find valid checkpoint\n"); 1255 return -1; 1256 } 1257 1258 if (sanity_check_ckpt(sbi)) { 1259 ERR_MSG("Checkpoint is polluted\n"); 1260 return -1; 1261 } 1262 1263 print_ckpt_info(sbi); 1264 1265 if (config.auto_fix) { 1266 u32 flag = le32_to_cpu(sbi->ckpt->ckpt_flags); 1267 1268 if (flag & CP_FSCK_FLAG) 1269 config.fix_on = 1; 1270 else 1271 return 1; 1272 } 1273 1274 config.bug_on = 0; 1275 1276 sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); 1277 sbi->total_valid_inode_count = 1278 le32_to_cpu(sbi->ckpt->valid_inode_count); 1279 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); 1280 sbi->total_valid_block_count = 1281 le64_to_cpu(sbi->ckpt->valid_block_count); 1282 sbi->last_valid_block_count = sbi->total_valid_block_count; 1283 sbi->alloc_valid_block_count = 0; 1284 1285 if (build_segment_manager(sbi)) { 1286 ERR_MSG("build_segment_manager failed\n"); 1287 return -1; 1288 } 1289 1290 if (build_node_manager(sbi)) { 1291 ERR_MSG("build_segment_manager failed\n"); 1292 return -1; 1293 } 1294 1295 return 0; 1296 } 1297 1298 void f2fs_do_umount(struct f2fs_sb_info *sbi) 1299 { 1300 struct sit_info *sit_i = SIT_I(sbi); 1301 struct f2fs_sm_info *sm_i = SM_I(sbi); 1302 struct f2fs_nm_info *nm_i = NM_I(sbi); 1303 unsigned int i; 1304 1305 /* free nm_info */ 1306 free(nm_i->nat_bitmap); 1307 free(sbi->nm_info); 1308 1309 /* free sit_info */ 1310 for (i = 0; i < TOTAL_SEGS(sbi); i++) { 1311 free(sit_i->sentries[i].cur_valid_map); 1312 free(sit_i->sentries[i].ckpt_valid_map); 1313 } 1314 free(sit_i->sit_bitmap); 1315 free(sm_i->sit_info); 1316 1317 /* free sm_info */ 1318 for (i = 0; i < NR_CURSEG_TYPE; i++) 1319 free(sm_i->curseg_array[i].sum_blk); 1320 1321 free(sm_i->curseg_array); 1322 free(sbi->sm_info); 1323 1324 free(sbi->ckpt); 1325 free(sbi->raw_super); 1326 } 1327