1 /** 2 * mount.c 3 * 4 * Copyright (c) 2013 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com/ 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 #include "fsck.h" 12 13 void print_inode_info(struct f2fs_inode *inode) 14 { 15 unsigned int i = 0; 16 int namelen = le32_to_cpu(inode->i_namelen); 17 18 DISP_u32(inode, i_mode); 19 DISP_u32(inode, i_uid); 20 DISP_u32(inode, i_gid); 21 DISP_u32(inode, i_links); 22 DISP_u64(inode, i_size); 23 DISP_u64(inode, i_blocks); 24 25 DISP_u64(inode, i_atime); 26 DISP_u32(inode, i_atime_nsec); 27 DISP_u64(inode, i_ctime); 28 DISP_u32(inode, i_ctime_nsec); 29 DISP_u64(inode, i_mtime); 30 DISP_u32(inode, i_mtime_nsec); 31 32 DISP_u32(inode, i_generation); 33 DISP_u32(inode, i_current_depth); 34 DISP_u32(inode, i_xattr_nid); 35 DISP_u32(inode, i_flags); 36 DISP_u32(inode, i_pino); 37 38 if (namelen) { 39 DISP_u32(inode, i_namelen); 40 inode->i_name[namelen] = '\0'; 41 DISP_utf(inode, i_name); 42 } 43 44 printf("i_ext: fofs:%x blkaddr:%x len:%x\n", 45 inode->i_ext.fofs, 46 inode->i_ext.blk_addr, 47 inode->i_ext.len); 48 49 DISP_u32(inode, i_addr[0]); /* Pointers to data blocks */ 50 DISP_u32(inode, i_addr[1]); /* Pointers to data blocks */ 51 DISP_u32(inode, i_addr[2]); /* Pointers to data blocks */ 52 DISP_u32(inode, i_addr[3]); /* Pointers to data blocks */ 53 54 for (i = 4; i < ADDRS_PER_INODE(inode); i++) { 55 if (inode->i_addr[i] != 0x0) { 56 printf("i_addr[0x%x] points data block\r\t\t\t\t[0x%4x]\n", 57 i, inode->i_addr[i]); 58 break; 59 } 60 } 61 62 DISP_u32(inode, i_nid[0]); /* direct */ 63 DISP_u32(inode, i_nid[1]); /* direct */ 64 DISP_u32(inode, i_nid[2]); /* indirect */ 65 DISP_u32(inode, i_nid[3]); /* indirect */ 66 DISP_u32(inode, i_nid[4]); /* double indirect */ 67 68 printf("\n"); 69 } 70 71 void print_node_info(struct f2fs_node *node_block) 72 { 73 nid_t ino = le32_to_cpu(node_block->footer.ino); 74 nid_t nid = le32_to_cpu(node_block->footer.nid); 75 /* Is this inode? */ 76 if (ino == nid) { 77 DBG(0, "Node ID [0x%x:%u] is inode\n", nid, nid); 78 print_inode_info(&node_block->i); 79 } else { 80 int i; 81 u32 *dump_blk = (u32 *)node_block; 82 DBG(0, "Node ID [0x%x:%u] is direct node or indirect node.\n", nid, nid); 83 for (i = 0; i <= 10; i++) 84 MSG(0, "[%d]\t\t\t[0x%8x : %d]\n", i, dump_blk[i], dump_blk[i]); 85 } 86 } 87 88 void print_raw_sb_info(struct f2fs_sb_info *sbi) 89 { 90 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 91 92 if (!config.dbg_lv) 93 return; 94 95 printf("\n"); 96 printf("+--------------------------------------------------------+\n"); 97 printf("| Super block |\n"); 98 printf("+--------------------------------------------------------+\n"); 99 100 DISP_u32(sb, magic); 101 DISP_u32(sb, major_ver); 102 DISP_u32(sb, minor_ver); 103 DISP_u32(sb, log_sectorsize); 104 DISP_u32(sb, log_sectors_per_block); 105 106 DISP_u32(sb, log_blocksize); 107 DISP_u32(sb, log_blocks_per_seg); 108 DISP_u32(sb, segs_per_sec); 109 DISP_u32(sb, secs_per_zone); 110 DISP_u32(sb, checksum_offset); 111 DISP_u64(sb, block_count); 112 113 DISP_u32(sb, section_count); 114 DISP_u32(sb, segment_count); 115 DISP_u32(sb, segment_count_ckpt); 116 DISP_u32(sb, segment_count_sit); 117 DISP_u32(sb, segment_count_nat); 118 119 DISP_u32(sb, segment_count_ssa); 120 DISP_u32(sb, segment_count_main); 121 DISP_u32(sb, segment0_blkaddr); 122 123 DISP_u32(sb, cp_blkaddr); 124 DISP_u32(sb, sit_blkaddr); 125 DISP_u32(sb, nat_blkaddr); 126 DISP_u32(sb, ssa_blkaddr); 127 DISP_u32(sb, main_blkaddr); 128 129 DISP_u32(sb, root_ino); 130 DISP_u32(sb, node_ino); 131 DISP_u32(sb, meta_ino); 132 DISP_u32(sb, cp_payload); 133 printf("\n"); 134 } 135 136 void print_ckpt_info(struct f2fs_sb_info *sbi) 137 { 138 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi); 139 140 if (!config.dbg_lv) 141 return; 142 143 printf("\n"); 144 printf("+--------------------------------------------------------+\n"); 145 printf("| Checkpoint |\n"); 146 printf("+--------------------------------------------------------+\n"); 147 148 DISP_u64(cp, checkpoint_ver); 149 DISP_u64(cp, user_block_count); 150 DISP_u64(cp, valid_block_count); 151 DISP_u32(cp, rsvd_segment_count); 152 DISP_u32(cp, overprov_segment_count); 153 DISP_u32(cp, free_segment_count); 154 155 DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]); 156 DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]); 157 DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]); 158 DISP_u32(cp, cur_node_segno[0]); 159 DISP_u32(cp, cur_node_segno[1]); 160 DISP_u32(cp, cur_node_segno[2]); 161 162 DISP_u32(cp, cur_node_blkoff[0]); 163 DISP_u32(cp, cur_node_blkoff[1]); 164 DISP_u32(cp, cur_node_blkoff[2]); 165 166 167 DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]); 168 DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]); 169 DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]); 170 DISP_u32(cp, cur_data_segno[0]); 171 DISP_u32(cp, cur_data_segno[1]); 172 DISP_u32(cp, cur_data_segno[2]); 173 174 DISP_u32(cp, cur_data_blkoff[0]); 175 DISP_u32(cp, cur_data_blkoff[1]); 176 DISP_u32(cp, cur_data_blkoff[2]); 177 178 DISP_u32(cp, ckpt_flags); 179 DISP_u32(cp, cp_pack_total_block_count); 180 DISP_u32(cp, cp_pack_start_sum); 181 DISP_u32(cp, valid_node_count); 182 DISP_u32(cp, valid_inode_count); 183 DISP_u32(cp, next_free_nid); 184 DISP_u32(cp, sit_ver_bitmap_bytesize); 185 DISP_u32(cp, nat_ver_bitmap_bytesize); 186 DISP_u32(cp, checksum_offset); 187 DISP_u64(cp, elapsed_time); 188 189 DISP_u32(cp, sit_nat_version_bitmap[0]); 190 printf("\n\n"); 191 } 192 193 int sanity_check_raw_super(struct f2fs_super_block *raw_super) 194 { 195 unsigned int blocksize; 196 197 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 198 return -1; 199 } 200 201 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { 202 return -1; 203 } 204 205 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 206 if (F2FS_BLKSIZE != blocksize) { 207 return -1; 208 } 209 210 if (F2FS_LOG_SECTOR_SIZE != le32_to_cpu(raw_super->log_sectorsize)) { 211 return -1; 212 } 213 214 if (F2FS_LOG_SECTORS_PER_BLOCK != le32_to_cpu(raw_super->log_sectors_per_block)) { 215 return -1; 216 } 217 218 return 0; 219 } 220 221 int validate_super_block(struct f2fs_sb_info *sbi, int block) 222 { 223 u64 offset = (block + 1) * F2FS_SUPER_OFFSET; 224 sbi->raw_super = malloc(sizeof(struct f2fs_super_block)); 225 226 if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block))) 227 return -1; 228 229 if (!sanity_check_raw_super(sbi->raw_super)) 230 return 0; 231 232 free(sbi->raw_super); 233 MSG(0, "\tCan't find a valid F2FS filesystem in %d superblock\n", block); 234 235 return -EINVAL; 236 } 237 238 int init_sb_info(struct f2fs_sb_info *sbi) 239 { 240 struct f2fs_super_block *raw_super = sbi->raw_super; 241 242 sbi->log_sectors_per_block = 243 le32_to_cpu(raw_super->log_sectors_per_block); 244 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); 245 sbi->blocksize = 1 << sbi->log_blocksize; 246 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 247 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; 248 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); 249 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); 250 sbi->total_sections = le32_to_cpu(raw_super->section_count); 251 sbi->total_node_count = 252 (le32_to_cpu(raw_super->segment_count_nat) / 2) 253 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; 254 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); 255 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); 256 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); 257 sbi->cur_victim_sec = NULL_SEGNO; 258 return 0; 259 } 260 261 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr, unsigned long long *version) 262 { 263 void *cp_page_1, *cp_page_2; 264 struct f2fs_checkpoint *cp_block; 265 unsigned long blk_size = sbi->blocksize; 266 unsigned long long cur_version = 0, pre_version = 0; 267 unsigned int crc = 0; 268 size_t crc_offset; 269 270 /* Read the 1st cp block in this CP pack */ 271 cp_page_1 = malloc(PAGE_SIZE); 272 if (dev_read_block(cp_page_1, cp_addr) < 0) 273 return NULL; 274 275 cp_block = (struct f2fs_checkpoint *)cp_page_1; 276 crc_offset = le32_to_cpu(cp_block->checksum_offset); 277 if (crc_offset >= blk_size) 278 goto invalid_cp1; 279 280 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); 281 if (f2fs_crc_valid(crc, cp_block, crc_offset)) 282 goto invalid_cp1; 283 284 pre_version = le64_to_cpu(cp_block->checkpoint_ver); 285 286 /* Read the 2nd cp block in this CP pack */ 287 cp_page_2 = malloc(PAGE_SIZE); 288 cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1; 289 290 if (dev_read_block(cp_page_2, cp_addr) < 0) 291 goto invalid_cp2; 292 293 cp_block = (struct f2fs_checkpoint *)cp_page_2; 294 crc_offset = le32_to_cpu(cp_block->checksum_offset); 295 if (crc_offset >= blk_size) 296 goto invalid_cp2; 297 298 crc = *(unsigned int *)((unsigned char *)cp_block + crc_offset); 299 if (f2fs_crc_valid(crc, cp_block, crc_offset)) 300 goto invalid_cp2; 301 302 cur_version = le64_to_cpu(cp_block->checkpoint_ver); 303 304 if (cur_version == pre_version) { 305 *version = cur_version; 306 free(cp_page_2); 307 return cp_page_1; 308 } 309 310 invalid_cp2: 311 free(cp_page_2); 312 invalid_cp1: 313 free(cp_page_1); 314 return NULL; 315 } 316 317 int get_valid_checkpoint(struct f2fs_sb_info *sbi) 318 { 319 struct f2fs_super_block *raw_sb = sbi->raw_super; 320 void *cp1, *cp2, *cur_page; 321 unsigned long blk_size = sbi->blocksize; 322 unsigned long long cp1_version = 0, cp2_version = 0; 323 unsigned long long cp_start_blk_no; 324 unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload); 325 326 sbi->ckpt = malloc(cp_blks * blk_size); 327 if (!sbi->ckpt) 328 return -ENOMEM; 329 /* 330 * Finding out valid cp block involves read both 331 * sets( cp pack1 and cp pack 2) 332 */ 333 cp_start_blk_no = le32_to_cpu(raw_sb->cp_blkaddr); 334 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version); 335 336 /* The second checkpoint pack should start at the next segment */ 337 cp_start_blk_no += 1 << le32_to_cpu(raw_sb->log_blocks_per_seg); 338 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version); 339 340 if (cp1 && cp2) { 341 if (ver_after(cp2_version, cp1_version)) 342 cur_page = cp2; 343 else 344 cur_page = cp1; 345 } else if (cp1) { 346 cur_page = cp1; 347 } else if (cp2) { 348 cur_page = cp2; 349 } else { 350 free(cp1); 351 free(cp2); 352 goto fail_no_cp; 353 } 354 355 memcpy(sbi->ckpt, cur_page, blk_size); 356 357 if (cp_blks > 1) { 358 int i; 359 unsigned long long cp_blk_no; 360 361 cp_blk_no = le32_to_cpu(raw_sb->cp_blkaddr); 362 if (cur_page == cp2) 363 cp_blk_no += 1 << le32_to_cpu(raw_sb->log_blocks_per_seg); 364 /* copy sit bitmap */ 365 for (i = 1; i < cp_blks; i++) { 366 unsigned char *ckpt = (unsigned char *)sbi->ckpt; 367 dev_read_block(cur_page, cp_blk_no + i); 368 memcpy(ckpt + i * blk_size, cur_page, blk_size); 369 } 370 } 371 free(cp1); 372 free(cp2); 373 return 0; 374 375 fail_no_cp: 376 free(sbi->ckpt); 377 return -EINVAL; 378 } 379 380 int sanity_check_ckpt(struct f2fs_sb_info *sbi) 381 { 382 unsigned int total, fsmeta; 383 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 384 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 385 386 total = le32_to_cpu(raw_super->segment_count); 387 fsmeta = le32_to_cpu(raw_super->segment_count_ckpt); 388 fsmeta += le32_to_cpu(raw_super->segment_count_sit); 389 fsmeta += le32_to_cpu(raw_super->segment_count_nat); 390 fsmeta += le32_to_cpu(ckpt->rsvd_segment_count); 391 fsmeta += le32_to_cpu(raw_super->segment_count_ssa); 392 393 if (fsmeta >= total) 394 return 1; 395 396 return 0; 397 } 398 399 int init_node_manager(struct f2fs_sb_info *sbi) 400 { 401 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 402 struct f2fs_nm_info *nm_i = NM_I(sbi); 403 unsigned char *version_bitmap; 404 unsigned int nat_segs, nat_blocks; 405 406 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 407 408 /* segment_count_nat includes pair segment so divide to 2. */ 409 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 410 nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 411 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 412 nm_i->fcnt = 0; 413 nm_i->nat_cnt = 0; 414 nm_i->init_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 415 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 416 417 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 418 419 nm_i->nat_bitmap = malloc(nm_i->bitmap_size); 420 if (!nm_i->nat_bitmap) 421 return -ENOMEM; 422 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 423 if (!version_bitmap) 424 return -EFAULT; 425 426 /* copy version bitmap */ 427 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size); 428 return 0; 429 } 430 431 int build_node_manager(struct f2fs_sb_info *sbi) 432 { 433 int err; 434 sbi->nm_info = malloc(sizeof(struct f2fs_nm_info)); 435 if (!sbi->nm_info) 436 return -ENOMEM; 437 438 err = init_node_manager(sbi); 439 if (err) 440 return err; 441 442 return 0; 443 } 444 445 int build_sit_info(struct f2fs_sb_info *sbi) 446 { 447 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 448 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 449 struct sit_info *sit_i; 450 unsigned int sit_segs, start; 451 char *src_bitmap, *dst_bitmap; 452 unsigned int bitmap_size; 453 454 sit_i = malloc(sizeof(struct sit_info)); 455 if (!sit_i) 456 return -ENOMEM; 457 458 SM_I(sbi)->sit_info = sit_i; 459 460 sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1); 461 462 for (start = 0; start < TOTAL_SEGS(sbi); start++) { 463 sit_i->sentries[start].cur_valid_map 464 = calloc(SIT_VBLOCK_MAP_SIZE, 1); 465 sit_i->sentries[start].ckpt_valid_map 466 = calloc(SIT_VBLOCK_MAP_SIZE, 1); 467 if (!sit_i->sentries[start].cur_valid_map 468 || !sit_i->sentries[start].ckpt_valid_map) 469 return -ENOMEM; 470 } 471 472 sit_segs = le32_to_cpu(raw_sb->segment_count_sit) >> 1; 473 bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 474 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 475 476 dst_bitmap = malloc(bitmap_size); 477 memcpy(dst_bitmap, src_bitmap, bitmap_size); 478 479 sit_i->sit_base_addr = le32_to_cpu(raw_sb->sit_blkaddr); 480 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 481 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count); 482 sit_i->sit_bitmap = dst_bitmap; 483 sit_i->bitmap_size = bitmap_size; 484 sit_i->dirty_sentries = 0; 485 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 486 sit_i->elapsed_time = le64_to_cpu(ckpt->elapsed_time); 487 return 0; 488 } 489 490 void reset_curseg(struct f2fs_sb_info *sbi, int type) 491 { 492 struct curseg_info *curseg = CURSEG_I(sbi, type); 493 494 curseg->segno = curseg->next_segno; 495 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno); 496 curseg->next_blkoff = 0; 497 curseg->next_segno = NULL_SEGNO; 498 499 } 500 501 int read_compacted_summaries(struct f2fs_sb_info *sbi) 502 { 503 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 504 struct curseg_info *curseg; 505 block_t start; 506 char *kaddr; 507 unsigned int i, j, offset; 508 509 start = start_sum_block(sbi); 510 511 kaddr = (char *)malloc(PAGE_SIZE); 512 dev_read_block(kaddr, start++); 513 514 curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 515 memcpy(&curseg->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE); 516 517 curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 518 memcpy(&curseg->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); 519 520 offset = 2 * SUM_JOURNAL_SIZE; 521 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { 522 unsigned short blk_off; 523 unsigned int segno; 524 525 curseg = CURSEG_I(sbi, i); 526 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 527 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 528 curseg->next_segno = segno; 529 reset_curseg(sbi, i); 530 curseg->alloc_type = ckpt->alloc_type[i]; 531 curseg->next_blkoff = blk_off; 532 533 if (curseg->alloc_type == SSR) 534 blk_off = sbi->blocks_per_seg; 535 536 for (j = 0; j < blk_off; j++) { 537 struct f2fs_summary *s; 538 s = (struct f2fs_summary *)(kaddr + offset); 539 curseg->sum_blk->entries[j] = *s; 540 offset += SUMMARY_SIZE; 541 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) 542 continue; 543 memset(kaddr, 0, PAGE_SIZE); 544 dev_read_block(kaddr, start++); 545 offset = 0; 546 } 547 } 548 549 free(kaddr); 550 return 0; 551 } 552 553 int restore_node_summary(struct f2fs_sb_info *sbi, 554 unsigned int segno, struct f2fs_summary_block *sum_blk) 555 { 556 struct f2fs_node *node_blk; 557 struct f2fs_summary *sum_entry; 558 void *page; 559 block_t addr; 560 unsigned int i; 561 562 page = malloc(PAGE_SIZE); 563 if (!page) 564 return -ENOMEM; 565 566 /* scan the node segment */ 567 addr = START_BLOCK(sbi, segno); 568 sum_entry = &sum_blk->entries[0]; 569 570 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) { 571 if (dev_read_block(page, addr)) 572 goto out; 573 574 node_blk = (struct f2fs_node *)page; 575 sum_entry->nid = node_blk->footer.nid; 576 /* do not change original value */ 577 #if 0 578 sum_entry->version = 0; 579 sum_entry->ofs_in_node = 0; 580 #endif 581 addr++; 582 583 } 584 out: 585 free(page); 586 return 0; 587 } 588 589 int read_normal_summaries(struct f2fs_sb_info *sbi, int type) 590 { 591 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 592 struct f2fs_summary_block *sum_blk; 593 struct curseg_info *curseg; 594 unsigned short blk_off; 595 unsigned int segno = 0; 596 block_t blk_addr = 0; 597 598 if (IS_DATASEG(type)) { 599 segno = le32_to_cpu(ckpt->cur_data_segno[type]); 600 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); 601 602 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 603 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); 604 else 605 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); 606 } else { 607 segno = le32_to_cpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]); 608 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); 609 610 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) 611 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); 612 else 613 blk_addr = GET_SUM_BLKADDR(sbi, segno); 614 } 615 616 sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE); 617 dev_read_block(sum_blk, blk_addr); 618 619 if (IS_NODESEG(type)) { 620 if (is_set_ckpt_flags(ckpt, CP_UMOUNT_FLAG)) { 621 struct f2fs_summary *sum_entry = &sum_blk->entries[0]; 622 unsigned int i; 623 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) { 624 /* do not change original value */ 625 #if 0 626 sum_entry->version = 0; 627 sum_entry->ofs_in_node = 0; 628 #endif 629 } 630 } else { 631 if (restore_node_summary(sbi, segno, sum_blk)) { 632 free(sum_blk); 633 return -EINVAL; 634 } 635 } 636 } 637 638 curseg = CURSEG_I(sbi, type); 639 memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE); 640 curseg->next_segno = segno; 641 reset_curseg(sbi, type); 642 curseg->alloc_type = ckpt->alloc_type[type]; 643 curseg->next_blkoff = blk_off; 644 free(sum_blk); 645 646 return 0; 647 } 648 649 int restore_curseg_summaries(struct f2fs_sb_info *sbi) 650 { 651 int type = CURSEG_HOT_DATA; 652 653 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) { 654 if (read_compacted_summaries(sbi)) 655 return -EINVAL; 656 type = CURSEG_HOT_NODE; 657 } 658 659 for (; type <= CURSEG_COLD_NODE; type++) { 660 if (read_normal_summaries(sbi, type)) 661 return -EINVAL; 662 } 663 return 0; 664 } 665 666 int build_curseg(struct f2fs_sb_info *sbi) 667 { 668 struct curseg_info *array; 669 int i; 670 671 array = malloc(sizeof(*array) * NR_CURSEG_TYPE); 672 673 SM_I(sbi)->curseg_array = array; 674 675 for (i = 0; i < NR_CURSEG_TYPE; i++) { 676 array[i].sum_blk = malloc(PAGE_CACHE_SIZE); 677 if (!array[i].sum_blk) 678 return -ENOMEM; 679 array[i].segno = NULL_SEGNO; 680 array[i].next_blkoff = 0; 681 } 682 return restore_curseg_summaries(sbi); 683 } 684 685 inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno) 686 { 687 unsigned int end_segno = SM_I(sbi)->segment_count - 1; 688 ASSERT(segno <= end_segno); 689 } 690 691 struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi, unsigned int segno) 692 { 693 struct sit_info *sit_i = SIT_I(sbi); 694 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno); 695 block_t blk_addr = sit_i->sit_base_addr + offset; 696 struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1); 697 698 check_seg_range(sbi, segno); 699 700 /* calculate sit block address */ 701 if (f2fs_test_bit(offset, sit_i->sit_bitmap)) 702 blk_addr += sit_i->sit_blocks; 703 704 dev_read_block(sit_blk, blk_addr); 705 706 return sit_blk; 707 } 708 709 void check_block_count(struct f2fs_sb_info *sbi, 710 unsigned int segno, struct f2fs_sit_entry *raw_sit) 711 { 712 struct f2fs_sm_info *sm_info = SM_I(sbi); 713 unsigned int end_segno = sm_info->segment_count - 1; 714 int valid_blocks = 0; 715 unsigned int i; 716 717 718 /* check segment usage */ 719 ASSERT(GET_SIT_VBLOCKS(raw_sit) <= sbi->blocks_per_seg); 720 721 /* check boundary of a given segment number */ 722 ASSERT(segno <= end_segno); 723 724 /* check bitmap with valid block count */ 725 for (i = 0; i < sbi->blocks_per_seg; i++) 726 if (f2fs_test_bit(i, (char *)raw_sit->valid_map)) 727 valid_blocks++; 728 ASSERT(GET_SIT_VBLOCKS(raw_sit) == valid_blocks); 729 } 730 731 void seg_info_from_raw_sit(struct seg_entry *se, 732 struct f2fs_sit_entry *raw_sit) 733 { 734 se->valid_blocks = GET_SIT_VBLOCKS(raw_sit); 735 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit); 736 memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE); 737 memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE); 738 se->type = GET_SIT_TYPE(raw_sit); 739 se->mtime = le64_to_cpu(raw_sit->mtime); 740 } 741 742 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi, 743 unsigned int segno) 744 { 745 struct sit_info *sit_i = SIT_I(sbi); 746 return &sit_i->sentries[segno]; 747 } 748 749 int get_sum_block(struct f2fs_sb_info *sbi, unsigned int segno, struct f2fs_summary_block *sum_blk) 750 { 751 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 752 struct curseg_info *curseg; 753 int type, ret; 754 u64 ssa_blk; 755 756 ssa_blk = GET_SUM_BLKADDR(sbi, segno); 757 for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) { 758 if (segno == ckpt->cur_node_segno[type]) { 759 curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type); 760 memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ); 761 return SEG_TYPE_CUR_NODE; /* current node seg was not stored */ 762 } 763 } 764 765 for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) { 766 if (segno == ckpt->cur_data_segno[type]) { 767 curseg = CURSEG_I(sbi, type); 768 memcpy(sum_blk, curseg->sum_blk, BLOCK_SZ); 769 ASSERT(!IS_SUM_NODE_SEG(sum_blk->footer)); 770 DBG(2, "segno [0x%x] is current data seg[0x%x]\n", segno, type); 771 return SEG_TYPE_CUR_DATA; /* current data seg was not stored */ 772 } 773 } 774 775 ret = dev_read_block(sum_blk, ssa_blk); 776 ASSERT(ret >= 0); 777 778 if (IS_SUM_NODE_SEG(sum_blk->footer)) 779 return SEG_TYPE_NODE; 780 else 781 return SEG_TYPE_DATA; 782 783 } 784 785 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr, struct f2fs_summary *sum_entry) 786 { 787 struct f2fs_summary_block *sum_blk; 788 u32 segno, offset; 789 int ret; 790 791 segno = GET_SEGNO(sbi, blk_addr); 792 offset = OFFSET_IN_SEG(sbi, blk_addr); 793 794 sum_blk = calloc(BLOCK_SZ, 1); 795 796 ret = get_sum_block(sbi, segno, sum_blk); 797 798 memcpy(sum_entry, &(sum_blk->entries[offset]), sizeof(struct f2fs_summary)); 799 800 free(sum_blk); 801 return ret; 802 } 803 804 int get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nat_entry *raw_nat) 805 { 806 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 807 struct f2fs_nm_info *nm_i = NM_I(sbi); 808 struct f2fs_nat_block *nat_block; 809 pgoff_t block_off; 810 pgoff_t block_addr; 811 int seg_off, entry_off; 812 int ret; 813 814 if ((nid / NAT_ENTRY_PER_BLOCK) > fsck->nr_nat_entries) { 815 DBG(0, "nid is over max nid\n"); 816 return -EINVAL; 817 } 818 819 if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0) 820 return 0; 821 822 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1); 823 824 block_off = nid / NAT_ENTRY_PER_BLOCK; 825 entry_off = nid % NAT_ENTRY_PER_BLOCK; 826 827 seg_off = block_off >> sbi->log_blocks_per_seg; 828 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 829 (seg_off << sbi->log_blocks_per_seg << 1) + 830 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 831 832 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 833 block_addr += sbi->blocks_per_seg; 834 835 ret = dev_read_block(nat_block, block_addr); 836 ASSERT(ret >= 0); 837 838 memcpy(raw_nat, &nat_block->entries[entry_off], sizeof(struct f2fs_nat_entry)); 839 free(nat_block); 840 841 return 0; 842 } 843 844 int get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) 845 { 846 struct f2fs_nat_entry raw_nat; 847 int ret; 848 849 ret = get_nat_entry(sbi, nid, &raw_nat); 850 ni->nid = nid; 851 node_info_from_raw_nat(ni, &raw_nat); 852 return ret; 853 } 854 855 void build_sit_entries(struct f2fs_sb_info *sbi) 856 { 857 struct sit_info *sit_i = SIT_I(sbi); 858 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); 859 struct f2fs_summary_block *sum = curseg->sum_blk; 860 unsigned int segno; 861 862 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) { 863 struct seg_entry *se = &sit_i->sentries[segno]; 864 struct f2fs_sit_block *sit_blk; 865 struct f2fs_sit_entry sit; 866 int i; 867 868 for (i = 0; i < sits_in_cursum(sum); i++) { 869 if (le32_to_cpu(segno_in_journal(sum, i)) == segno) { 870 sit = sit_in_journal(sum, i); 871 goto got_it; 872 } 873 } 874 sit_blk = get_current_sit_page(sbi, segno); 875 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)]; 876 free(sit_blk); 877 got_it: 878 check_block_count(sbi, segno, &sit); 879 seg_info_from_raw_sit(se, &sit); 880 } 881 882 } 883 884 int build_segment_manager(struct f2fs_sb_info *sbi) 885 { 886 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 887 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 888 struct f2fs_sm_info *sm_info; 889 890 sm_info = malloc(sizeof(struct f2fs_sm_info)); 891 if (!sm_info) 892 return -ENOMEM; 893 894 /* init sm info */ 895 sbi->sm_info = sm_info; 896 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 897 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 898 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 899 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); 900 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); 901 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); 902 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); 903 904 build_sit_info(sbi); 905 906 build_curseg(sbi); 907 908 build_sit_entries(sbi); 909 910 return 0; 911 } 912 913 int build_sit_area_bitmap(struct f2fs_sb_info *sbi) 914 { 915 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 916 struct f2fs_sm_info *sm_i = SM_I(sbi); 917 unsigned int segno = 0; 918 int j = 0; 919 char *ptr = NULL; 920 921 u32 sum_vblocks = 0; 922 u32 free_segs = 0; 923 u32 vblocks = 0; 924 925 struct seg_entry *se; 926 927 fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE; 928 fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz); 929 ptr = fsck->sit_area_bitmap; 930 931 ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz); 932 933 for (segno = 0; segno < sm_i->main_segments; segno++) { 934 se = get_seg_entry(sbi, segno); 935 936 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 937 ptr += SIT_VBLOCK_MAP_SIZE; 938 939 vblocks = 0; 940 for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) { 941 vblocks += get_bits_in_byte(se->cur_valid_map[j]); 942 } 943 ASSERT(vblocks == se->valid_blocks); 944 945 if (se->valid_blocks == 0x0) { 946 947 if (sbi->ckpt->cur_node_segno[0] == segno || 948 sbi->ckpt->cur_data_segno[0] == segno || 949 sbi->ckpt->cur_node_segno[1] == segno || 950 sbi->ckpt->cur_data_segno[1] == segno || 951 sbi->ckpt->cur_node_segno[2] == segno || 952 sbi->ckpt->cur_data_segno[2] == segno) { 953 continue; 954 } else { 955 free_segs++; 956 } 957 958 } else { 959 ASSERT(se->valid_blocks <= 512); 960 sum_vblocks += se->valid_blocks; 961 } 962 } 963 964 fsck->chk.sit_valid_blocks = sum_vblocks; 965 fsck->chk.sit_free_segs = free_segs; 966 967 DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n", sum_vblocks, sum_vblocks, 968 free_segs, free_segs); 969 return 0; 970 } 971 972 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid, struct f2fs_nat_entry *raw_nat) 973 { 974 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 975 struct f2fs_summary_block *sum = curseg->sum_blk; 976 int i = 0; 977 978 for (i = 0; i < nats_in_cursum(sum); i++) { 979 if (le32_to_cpu(nid_in_journal(sum, i)) == nid) { 980 memcpy(raw_nat, &nat_in_journal(sum, i), sizeof(struct f2fs_nat_entry)); 981 DBG(3, "==> Found nid [0x%x] in nat cache\n", nid); 982 return i; 983 } 984 } 985 return -1; 986 } 987 988 void build_nat_area_bitmap(struct f2fs_sb_info *sbi) 989 { 990 struct f2fs_fsck *fsck = F2FS_FSCK(sbi); 991 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); 992 struct f2fs_nm_info *nm_i = NM_I(sbi); 993 struct f2fs_nat_block *nat_block; 994 u32 nid, nr_nat_blks; 995 996 pgoff_t block_off; 997 pgoff_t block_addr; 998 int seg_off; 999 int ret; 1000 unsigned int i; 1001 1002 1003 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1); 1004 1005 /* Alloc & build nat entry bitmap */ 1006 nr_nat_blks = (le32_to_cpu(raw_sb->segment_count_nat) / 2) << sbi->log_blocks_per_seg; 1007 1008 fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK; 1009 fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8; 1010 fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1); 1011 ASSERT(fsck->nat_area_bitmap != NULL); 1012 1013 for (block_off = 0; block_off < nr_nat_blks; block_off++) { 1014 1015 seg_off = block_off >> sbi->log_blocks_per_seg; 1016 block_addr = (pgoff_t)(nm_i->nat_blkaddr + 1017 (seg_off << sbi->log_blocks_per_seg << 1) + 1018 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 1019 1020 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 1021 block_addr += sbi->blocks_per_seg; 1022 1023 ret = dev_read_block(nat_block, block_addr); 1024 ASSERT(ret >= 0); 1025 1026 nid = block_off * NAT_ENTRY_PER_BLOCK; 1027 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { 1028 struct f2fs_nat_entry raw_nat; 1029 struct node_info ni; 1030 ni.nid = nid + i; 1031 1032 if ((nid + i) == F2FS_NODE_INO(sbi) || (nid + i) == F2FS_META_INO(sbi)) { 1033 ASSERT(nat_block->entries[i].block_addr != 0x0); 1034 continue; 1035 } 1036 1037 if (lookup_nat_in_journal(sbi, nid + i, &raw_nat) >= 0) { 1038 node_info_from_raw_nat(&ni, &raw_nat); 1039 if (ni.blk_addr != 0x0) { 1040 f2fs_set_bit(nid + i, fsck->nat_area_bitmap); 1041 fsck->chk.valid_nat_entry_cnt++; 1042 DBG(3, "nid[0x%x] in nat cache\n", nid + i); 1043 } 1044 } else { 1045 node_info_from_raw_nat(&ni, &nat_block->entries[i]); 1046 if (ni.blk_addr != 0) { 1047 ASSERT(nid + i != 0x0); 1048 1049 DBG(3, "nid[0x%8x] in nat entry [0x%16x] [0x%8x]\n", 1050 nid + i, 1051 ni.blk_addr, 1052 ni.ino); 1053 1054 f2fs_set_bit(nid + i, fsck->nat_area_bitmap); 1055 fsck->chk.valid_nat_entry_cnt++; 1056 } 1057 } 1058 } 1059 } 1060 free(nat_block); 1061 1062 DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n", 1063 fsck->chk.valid_nat_entry_cnt, fsck->chk.valid_nat_entry_cnt); 1064 1065 } 1066 1067 int f2fs_do_mount(struct f2fs_sb_info *sbi) 1068 { 1069 int ret; 1070 sbi->active_logs = NR_CURSEG_TYPE; 1071 ret = validate_super_block(sbi, 0); 1072 if (ret) { 1073 ret = validate_super_block(sbi, 1); 1074 if (ret) 1075 return -1; 1076 } 1077 1078 print_raw_sb_info(sbi); 1079 1080 init_sb_info(sbi); 1081 1082 ret = get_valid_checkpoint(sbi); 1083 if (ret) { 1084 ERR_MSG("Can't find valid checkpoint\n"); 1085 return -1; 1086 } 1087 1088 if (sanity_check_ckpt(sbi)) { 1089 ERR_MSG("Checkpoint is polluted\n"); 1090 return -1; 1091 } 1092 1093 print_ckpt_info(sbi); 1094 1095 sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); 1096 sbi->total_valid_inode_count = le32_to_cpu(sbi->ckpt->valid_inode_count); 1097 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); 1098 sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); 1099 sbi->last_valid_block_count = sbi->total_valid_block_count; 1100 sbi->alloc_valid_block_count = 0; 1101 1102 if (build_segment_manager(sbi)) { 1103 ERR_MSG("build_segment_manager failed\n"); 1104 return -1; 1105 } 1106 1107 if (build_node_manager(sbi)) { 1108 ERR_MSG("build_segment_manager failed\n"); 1109 return -1; 1110 } 1111 1112 return ret; 1113 } 1114 1115 void f2fs_do_umount(struct f2fs_sb_info *sbi) 1116 { 1117 struct sit_info *sit_i = SIT_I(sbi); 1118 struct f2fs_sm_info *sm_i = SM_I(sbi); 1119 struct f2fs_nm_info *nm_i = NM_I(sbi); 1120 unsigned int i; 1121 1122 /* free nm_info */ 1123 free(nm_i->nat_bitmap); 1124 free(sbi->nm_info); 1125 1126 /* free sit_info */ 1127 for (i = 0; i < TOTAL_SEGS(sbi); i++) { 1128 free(sit_i->sentries[i].cur_valid_map); 1129 free(sit_i->sentries[i].ckpt_valid_map); 1130 } 1131 free(sit_i->sit_bitmap); 1132 free(sm_i->sit_info); 1133 1134 /* free sm_info */ 1135 for (i = 0; i < NR_CURSEG_TYPE; i++) 1136 free(sm_i->curseg_array[i].sum_blk); 1137 1138 free(sm_i->curseg_array); 1139 free(sbi->sm_info); 1140 1141 free(sbi->ckpt); 1142 free(sbi->raw_super); 1143 } 1144