Home | History | Annotate | Download | only in fsck
      1 /**
      2  * mount.c
      3  *
      4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
      5  *             http://www.samsung.com/
      6  *
      7  * This program is free software; you can redistribute it and/or modify
      8  * it under the terms of the GNU General Public License version 2 as
      9  * published by the Free Software Foundation.
     10  */
     11 #include "fsck.h"
     12 #include <locale.h>
     13 
     14 u32 get_free_segments(struct f2fs_sb_info *sbi)
     15 {
     16 	u32 i, free_segs = 0;
     17 
     18 	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
     19 		struct seg_entry *se = get_seg_entry(sbi, i);
     20 
     21 		if (se->valid_blocks == 0x0 &&
     22 				!IS_CUR_SEGNO(sbi, i, NO_CHECK_TYPE))
     23 			free_segs++;
     24 	}
     25 	return free_segs;
     26 }
     27 
     28 void update_free_segments(struct f2fs_sb_info *sbi)
     29 {
     30 	char *progress = "-*|*-";
     31 	static int i = 0;
     32 
     33 	MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
     34 	fflush(stdout);
     35 	i++;
     36 }
     37 
     38 void print_inode_info(struct f2fs_inode *inode, int name)
     39 {
     40 	unsigned char en[F2FS_NAME_LEN + 1];
     41 	unsigned int i = 0;
     42 	int namelen = le32_to_cpu(inode->i_namelen);
     43 	int enc_name = file_enc_name(inode);
     44 
     45 	namelen = convert_encrypted_name(inode->i_name, namelen, en, enc_name);
     46 	en[namelen] = '\0';
     47 	if (name && namelen) {
     48 		inode->i_name[namelen] = '\0';
     49 		MSG(0, " - File name         : %s%s\n", en,
     50 				enc_name ? " <encrypted>" : "");
     51 		setlocale(LC_ALL, "");
     52 		MSG(0, " - File size         : %'llu (bytes)\n",
     53 				le64_to_cpu(inode->i_size));
     54 		return;
     55 	}
     56 
     57 	DISP_u32(inode, i_mode);
     58 	DISP_u32(inode, i_advise);
     59 	DISP_u32(inode, i_uid);
     60 	DISP_u32(inode, i_gid);
     61 	DISP_u32(inode, i_links);
     62 	DISP_u64(inode, i_size);
     63 	DISP_u64(inode, i_blocks);
     64 
     65 	DISP_u64(inode, i_atime);
     66 	DISP_u32(inode, i_atime_nsec);
     67 	DISP_u64(inode, i_ctime);
     68 	DISP_u32(inode, i_ctime_nsec);
     69 	DISP_u64(inode, i_mtime);
     70 	DISP_u32(inode, i_mtime_nsec);
     71 
     72 	DISP_u32(inode, i_generation);
     73 	DISP_u32(inode, i_current_depth);
     74 	DISP_u32(inode, i_xattr_nid);
     75 	DISP_u32(inode, i_flags);
     76 	DISP_u32(inode, i_inline);
     77 	DISP_u32(inode, i_pino);
     78 	DISP_u32(inode, i_dir_level);
     79 
     80 	if (namelen) {
     81 		DISP_u32(inode, i_namelen);
     82 		printf("%-30s\t\t[%s]\n", "i_name", en);
     83 	}
     84 
     85 	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
     86 			le32_to_cpu(inode->i_ext.fofs),
     87 			le32_to_cpu(inode->i_ext.blk_addr),
     88 			le32_to_cpu(inode->i_ext.len));
     89 
     90 	DISP_u32(inode, i_addr[0]);	/* Pointers to data blocks */
     91 	DISP_u32(inode, i_addr[1]);	/* Pointers to data blocks */
     92 	DISP_u32(inode, i_addr[2]);	/* Pointers to data blocks */
     93 	DISP_u32(inode, i_addr[3]);	/* Pointers to data blocks */
     94 
     95 	for (i = 4; i < ADDRS_PER_INODE(inode); i++) {
     96 		if (inode->i_addr[i] != 0x0) {
     97 			printf("i_addr[0x%x] points data block\r\t\t[0x%4x]\n",
     98 					i, le32_to_cpu(inode->i_addr[i]));
     99 			break;
    100 		}
    101 	}
    102 
    103 	DISP_u32(inode, i_nid[0]);	/* direct */
    104 	DISP_u32(inode, i_nid[1]);	/* direct */
    105 	DISP_u32(inode, i_nid[2]);	/* indirect */
    106 	DISP_u32(inode, i_nid[3]);	/* indirect */
    107 	DISP_u32(inode, i_nid[4]);	/* double indirect */
    108 
    109 	printf("\n");
    110 }
    111 
    112 void print_node_info(struct f2fs_node *node_block, int verbose)
    113 {
    114 	nid_t ino = le32_to_cpu(node_block->footer.ino);
    115 	nid_t nid = le32_to_cpu(node_block->footer.nid);
    116 	/* Is this inode? */
    117 	if (ino == nid) {
    118 		DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
    119 		print_inode_info(&node_block->i, verbose);
    120 	} else {
    121 		int i;
    122 		u32 *dump_blk = (u32 *)node_block;
    123 		DBG(verbose,
    124 			"Node ID [0x%x:%u] is direct node or indirect node.\n",
    125 								nid, nid);
    126 		for (i = 0; i <= 10; i++)
    127 			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
    128 						i, dump_blk[i], dump_blk[i]);
    129 	}
    130 }
    131 
    132 static void DISP_label(u_int16_t *name)
    133 {
    134 	char buffer[MAX_VOLUME_NAME];
    135 
    136 	utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
    137 	printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
    138 }
    139 
    140 void print_raw_sb_info(struct f2fs_super_block *sb)
    141 {
    142 	if (!c.dbg_lv)
    143 		return;
    144 
    145 	printf("\n");
    146 	printf("+--------------------------------------------------------+\n");
    147 	printf("| Super block                                            |\n");
    148 	printf("+--------------------------------------------------------+\n");
    149 
    150 	DISP_u32(sb, magic);
    151 	DISP_u32(sb, major_ver);
    152 
    153 	DISP_label(sb->volume_name);
    154 
    155 	DISP_u32(sb, minor_ver);
    156 	DISP_u32(sb, log_sectorsize);
    157 	DISP_u32(sb, log_sectors_per_block);
    158 
    159 	DISP_u32(sb, log_blocksize);
    160 	DISP_u32(sb, log_blocks_per_seg);
    161 	DISP_u32(sb, segs_per_sec);
    162 	DISP_u32(sb, secs_per_zone);
    163 	DISP_u32(sb, checksum_offset);
    164 	DISP_u64(sb, block_count);
    165 
    166 	DISP_u32(sb, section_count);
    167 	DISP_u32(sb, segment_count);
    168 	DISP_u32(sb, segment_count_ckpt);
    169 	DISP_u32(sb, segment_count_sit);
    170 	DISP_u32(sb, segment_count_nat);
    171 
    172 	DISP_u32(sb, segment_count_ssa);
    173 	DISP_u32(sb, segment_count_main);
    174 	DISP_u32(sb, segment0_blkaddr);
    175 
    176 	DISP_u32(sb, cp_blkaddr);
    177 	DISP_u32(sb, sit_blkaddr);
    178 	DISP_u32(sb, nat_blkaddr);
    179 	DISP_u32(sb, ssa_blkaddr);
    180 	DISP_u32(sb, main_blkaddr);
    181 
    182 	DISP_u32(sb, root_ino);
    183 	DISP_u32(sb, node_ino);
    184 	DISP_u32(sb, meta_ino);
    185 	DISP_u32(sb, cp_payload);
    186 	DISP("%s", sb, version);
    187 	printf("\n");
    188 }
    189 
    190 void print_ckpt_info(struct f2fs_sb_info *sbi)
    191 {
    192 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    193 
    194 	if (!c.dbg_lv)
    195 		return;
    196 
    197 	printf("\n");
    198 	printf("+--------------------------------------------------------+\n");
    199 	printf("| Checkpoint                                             |\n");
    200 	printf("+--------------------------------------------------------+\n");
    201 
    202 	DISP_u64(cp, checkpoint_ver);
    203 	DISP_u64(cp, user_block_count);
    204 	DISP_u64(cp, valid_block_count);
    205 	DISP_u32(cp, rsvd_segment_count);
    206 	DISP_u32(cp, overprov_segment_count);
    207 	DISP_u32(cp, free_segment_count);
    208 
    209 	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
    210 	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
    211 	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
    212 	DISP_u32(cp, cur_node_segno[0]);
    213 	DISP_u32(cp, cur_node_segno[1]);
    214 	DISP_u32(cp, cur_node_segno[2]);
    215 
    216 	DISP_u32(cp, cur_node_blkoff[0]);
    217 	DISP_u32(cp, cur_node_blkoff[1]);
    218 	DISP_u32(cp, cur_node_blkoff[2]);
    219 
    220 
    221 	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
    222 	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
    223 	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
    224 	DISP_u32(cp, cur_data_segno[0]);
    225 	DISP_u32(cp, cur_data_segno[1]);
    226 	DISP_u32(cp, cur_data_segno[2]);
    227 
    228 	DISP_u32(cp, cur_data_blkoff[0]);
    229 	DISP_u32(cp, cur_data_blkoff[1]);
    230 	DISP_u32(cp, cur_data_blkoff[2]);
    231 
    232 	DISP_u32(cp, ckpt_flags);
    233 	DISP_u32(cp, cp_pack_total_block_count);
    234 	DISP_u32(cp, cp_pack_start_sum);
    235 	DISP_u32(cp, valid_node_count);
    236 	DISP_u32(cp, valid_inode_count);
    237 	DISP_u32(cp, next_free_nid);
    238 	DISP_u32(cp, sit_ver_bitmap_bytesize);
    239 	DISP_u32(cp, nat_ver_bitmap_bytesize);
    240 	DISP_u32(cp, checksum_offset);
    241 	DISP_u64(cp, elapsed_time);
    242 
    243 	DISP_u32(cp, sit_nat_version_bitmap[0]);
    244 	printf("\n\n");
    245 }
    246 
    247 void print_cp_state(u32 flag)
    248 {
    249 	MSG(0, "Info: checkpoint state = %x : ", flag);
    250 	if (flag & CP_FSCK_FLAG)
    251 		MSG(0, "%s", " fsck");
    252 	if (flag & CP_ERROR_FLAG)
    253 		MSG(0, "%s", " error");
    254 	if (flag & CP_COMPACT_SUM_FLAG)
    255 		MSG(0, "%s", " compacted_summary");
    256 	if (flag & CP_ORPHAN_PRESENT_FLAG)
    257 		MSG(0, "%s", " orphan_inodes");
    258 	if (flag & CP_FASTBOOT_FLAG)
    259 		MSG(0, "%s", " fastboot");
    260 	if (flag & CP_NAT_BITS_FLAG)
    261 		MSG(0, "%s", " nat_bits");
    262 	if (flag & CP_TRIMMED_FLAG)
    263 		MSG(0, "%s", " trimmed");
    264 	if (flag & CP_UMOUNT_FLAG)
    265 		MSG(0, "%s", " unmount");
    266 	else
    267 		MSG(0, "%s", " sudden-power-off");
    268 	MSG(0, "\n");
    269 }
    270 
    271 void print_sb_state(struct f2fs_super_block *sb)
    272 {
    273 	__le32 f = sb->feature;
    274 	int i;
    275 
    276 	MSG(0, "Info: superblock features = %x : ", f);
    277 	if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
    278 		MSG(0, "%s", " encrypt");
    279 	}
    280 	if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
    281 		MSG(0, "%s", " zoned block device");
    282 	}
    283 	MSG(0, "\n");
    284 	MSG(0, "Info: superblock encrypt level = %d, salt = ",
    285 					sb->encryption_level);
    286 	for (i = 0; i < 16; i++)
    287 		MSG(0, "%02x", sb->encrypt_pw_salt[i]);
    288 	MSG(0, "\n");
    289 }
    290 
    291 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
    292 							u64 offset)
    293 {
    294 	u32 segment0_blkaddr = get_sb(segment0_blkaddr);
    295 	u32 cp_blkaddr = get_sb(cp_blkaddr);
    296 	u32 sit_blkaddr = get_sb(sit_blkaddr);
    297 	u32 nat_blkaddr = get_sb(nat_blkaddr);
    298 	u32 ssa_blkaddr = get_sb(ssa_blkaddr);
    299 	u32 main_blkaddr = get_sb(main_blkaddr);
    300 	u32 segment_count_ckpt = get_sb(segment_count_ckpt);
    301 	u32 segment_count_sit = get_sb(segment_count_sit);
    302 	u32 segment_count_nat = get_sb(segment_count_nat);
    303 	u32 segment_count_ssa = get_sb(segment_count_ssa);
    304 	u32 segment_count_main = get_sb(segment_count_main);
    305 	u32 segment_count = get_sb(segment_count);
    306 	u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
    307 	u64 main_end_blkaddr = main_blkaddr +
    308 				(segment_count_main << log_blocks_per_seg);
    309 	u64 seg_end_blkaddr = segment0_blkaddr +
    310 				(segment_count << log_blocks_per_seg);
    311 
    312 	if (segment0_blkaddr != cp_blkaddr) {
    313 		MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
    314 				segment0_blkaddr, cp_blkaddr);
    315 		return -1;
    316 	}
    317 
    318 	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
    319 							sit_blkaddr) {
    320 		MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
    321 			cp_blkaddr, sit_blkaddr,
    322 			segment_count_ckpt << log_blocks_per_seg);
    323 		return -1;
    324 	}
    325 
    326 	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
    327 							nat_blkaddr) {
    328 		MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
    329 			sit_blkaddr, nat_blkaddr,
    330 			segment_count_sit << log_blocks_per_seg);
    331 		return -1;
    332 	}
    333 
    334 	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
    335 							ssa_blkaddr) {
    336 		MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
    337 			nat_blkaddr, ssa_blkaddr,
    338 			segment_count_nat << log_blocks_per_seg);
    339 		return -1;
    340 	}
    341 
    342 	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
    343 							main_blkaddr) {
    344 		MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
    345 			ssa_blkaddr, main_blkaddr,
    346 			segment_count_ssa << log_blocks_per_seg);
    347 		return -1;
    348 	}
    349 
    350 	if (main_end_blkaddr > seg_end_blkaddr) {
    351 		MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
    352 			main_blkaddr,
    353 			segment0_blkaddr +
    354 				(segment_count << log_blocks_per_seg),
    355 			segment_count_main << log_blocks_per_seg);
    356 		return -1;
    357 	} else if (main_end_blkaddr < seg_end_blkaddr) {
    358 		int err;
    359 
    360 		set_sb(segment_count, (main_end_blkaddr -
    361 				segment0_blkaddr) >> log_blocks_per_seg);
    362 
    363 		err = dev_write(sb, offset, sizeof(struct f2fs_super_block));
    364 		MSG(0, "Info: Fix alignment: %s, start(%u) end(%u) block(%u)\n",
    365 			err ? "failed": "done",
    366 			main_blkaddr,
    367 			segment0_blkaddr +
    368 				(segment_count << log_blocks_per_seg),
    369 			segment_count_main << log_blocks_per_seg);
    370 	}
    371 	return 0;
    372 }
    373 
    374 int sanity_check_raw_super(struct f2fs_super_block *sb, u64 offset)
    375 {
    376 	unsigned int blocksize;
    377 
    378 	if (F2FS_SUPER_MAGIC != get_sb(magic))
    379 		return -1;
    380 
    381 	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE)
    382 		return -1;
    383 
    384 	blocksize = 1 << get_sb(log_blocksize);
    385 	if (F2FS_BLKSIZE != blocksize)
    386 		return -1;
    387 
    388 	/* check log blocks per segment */
    389 	if (get_sb(log_blocks_per_seg) != 9)
    390 		return -1;
    391 
    392 	/* Currently, support 512/1024/2048/4096 bytes sector size */
    393 	if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
    394 			get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE)
    395 		return -1;
    396 
    397 	if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
    398 						F2FS_MAX_LOG_SECTOR_SIZE)
    399 		return -1;
    400 
    401 	/* check reserved ino info */
    402 	if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
    403 					get_sb(root_ino) != 3)
    404 		return -1;
    405 
    406 	/* Check zoned block device feature */
    407 	if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
    408 			!(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
    409 		MSG(0, "\tMissing zoned block device feature\n");
    410 		return -1;
    411 	}
    412 
    413 	if (get_sb(segment_count) > F2FS_MAX_SEGMENT)
    414 		return -1;
    415 
    416 	if (sanity_check_area_boundary(sb, offset))
    417 		return -1;
    418 	return 0;
    419 }
    420 
    421 int validate_super_block(struct f2fs_sb_info *sbi, int block)
    422 {
    423 	u64 offset;
    424 
    425 	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
    426 
    427 	if (block == 0)
    428 		offset = F2FS_SUPER_OFFSET;
    429 	else
    430 		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
    431 
    432 	if (dev_read(sbi->raw_super, offset, sizeof(struct f2fs_super_block)))
    433 		return -1;
    434 
    435 	if (!sanity_check_raw_super(sbi->raw_super, offset)) {
    436 		/* get kernel version */
    437 		if (c.kd >= 0) {
    438 			dev_read_version(c.version, 0, VERSION_LEN);
    439 			get_kernel_version(c.version);
    440 		} else {
    441 			memset(c.version, 0, VERSION_LEN);
    442 		}
    443 
    444 		/* build sb version */
    445 		memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
    446 		get_kernel_version(c.sb_version);
    447 		memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
    448 		get_kernel_version(c.init_version);
    449 
    450 		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
    451 		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
    452 					c.sb_version, c.version);
    453 		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
    454 			int ret;
    455 
    456 			memcpy(sbi->raw_super->version,
    457 						c.version, VERSION_LEN);
    458 			ret = dev_write(sbi->raw_super, offset,
    459 					sizeof(struct f2fs_super_block));
    460 			ASSERT(ret >= 0);
    461 
    462 			c.auto_fix = 0;
    463 			c.fix_on = 1;
    464 		}
    465 		print_sb_state(sbi->raw_super);
    466 		return 0;
    467 	}
    468 
    469 	free(sbi->raw_super);
    470 	sbi->raw_super = NULL;
    471 	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
    472 
    473 	return -EINVAL;
    474 }
    475 
    476 int init_sb_info(struct f2fs_sb_info *sbi)
    477 {
    478 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    479 	u64 total_sectors;
    480 	int i;
    481 
    482 	sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
    483 	sbi->log_blocksize = get_sb(log_blocksize);
    484 	sbi->blocksize = 1 << sbi->log_blocksize;
    485 	sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
    486 	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
    487 	sbi->segs_per_sec = get_sb(segs_per_sec);
    488 	sbi->secs_per_zone = get_sb(secs_per_zone);
    489 	sbi->total_sections = get_sb(section_count);
    490 	sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
    491 				sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
    492 	sbi->root_ino_num = get_sb(root_ino);
    493 	sbi->node_ino_num = get_sb(node_ino);
    494 	sbi->meta_ino_num = get_sb(meta_ino);
    495 	sbi->cur_victim_sec = NULL_SEGNO;
    496 
    497 	for (i = 0; i < MAX_DEVICES; i++) {
    498 		if (!sb->devs[i].path[0])
    499 			break;
    500 
    501 		if (i) {
    502 			c.devices[i].path = strdup((char *)sb->devs[i].path);
    503 			if (get_device_info(i))
    504 				ASSERT(0);
    505 		} else {
    506 			ASSERT(!strcmp((char *)sb->devs[i].path,
    507 						(char *)c.devices[i].path));
    508 		}
    509 
    510 		c.devices[i].total_segments =
    511 			le32_to_cpu(sb->devs[i].total_segments);
    512 		if (i)
    513 			c.devices[i].start_blkaddr =
    514 				c.devices[i - 1].end_blkaddr + 1;
    515 		c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
    516 			c.devices[i].total_segments *
    517 			c.blks_per_seg - 1;
    518 		if (i == 0)
    519 			c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
    520 
    521 		c.ndevs = i + 1;
    522 		MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
    523 				i, c.devices[i].path,
    524 				c.devices[i].start_blkaddr,
    525 				c.devices[i].end_blkaddr);
    526 	}
    527 
    528 	total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
    529 	MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
    530 				total_sectors, total_sectors >>
    531 						(20 - get_sb(log_sectorsize)));
    532 	return 0;
    533 }
    534 
    535 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
    536 				unsigned long long *version)
    537 {
    538 	void *cp_page_1, *cp_page_2;
    539 	struct f2fs_checkpoint *cp;
    540 	unsigned long blk_size = sbi->blocksize;
    541 	unsigned long long cur_version = 0, pre_version = 0;
    542 	unsigned int crc = 0;
    543 	size_t crc_offset;
    544 
    545 	/* Read the 1st cp block in this CP pack */
    546 	cp_page_1 = malloc(PAGE_SIZE);
    547 	if (dev_read_block(cp_page_1, cp_addr) < 0)
    548 		goto invalid_cp1;
    549 
    550 	cp = (struct f2fs_checkpoint *)cp_page_1;
    551 	crc_offset = get_cp(checksum_offset);
    552 	if (crc_offset > (blk_size - sizeof(__le32)))
    553 		goto invalid_cp1;
    554 
    555 	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
    556 	if (f2fs_crc_valid(crc, cp, crc_offset))
    557 		goto invalid_cp1;
    558 
    559 	pre_version = get_cp(checkpoint_ver);
    560 
    561 	/* Read the 2nd cp block in this CP pack */
    562 	cp_page_2 = malloc(PAGE_SIZE);
    563 	cp_addr += get_cp(cp_pack_total_block_count) - 1;
    564 
    565 	if (dev_read_block(cp_page_2, cp_addr) < 0)
    566 		goto invalid_cp2;
    567 
    568 	cp = (struct f2fs_checkpoint *)cp_page_2;
    569 	crc_offset = get_cp(checksum_offset);
    570 	if (crc_offset > (blk_size - sizeof(__le32)))
    571 		goto invalid_cp2;
    572 
    573 	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
    574 	if (f2fs_crc_valid(crc, cp, crc_offset))
    575 		goto invalid_cp2;
    576 
    577 	cur_version = get_cp(checkpoint_ver);
    578 
    579 	if (cur_version == pre_version) {
    580 		*version = cur_version;
    581 		free(cp_page_2);
    582 		return cp_page_1;
    583 	}
    584 
    585 invalid_cp2:
    586 	free(cp_page_2);
    587 invalid_cp1:
    588 	free(cp_page_1);
    589 	return NULL;
    590 }
    591 
    592 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
    593 {
    594 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    595 	void *cp1, *cp2, *cur_page;
    596 	unsigned long blk_size = sbi->blocksize;
    597 	unsigned long long cp1_version = 0, cp2_version = 0, version;
    598 	unsigned long long cp_start_blk_no;
    599 	unsigned int cp_payload, cp_blks;
    600 	int ret;
    601 
    602 	cp_payload = get_sb(cp_payload);
    603 	if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
    604 		return -EINVAL;
    605 
    606 	cp_blks = 1 + cp_payload;
    607 	sbi->ckpt = malloc(cp_blks * blk_size);
    608 	if (!sbi->ckpt)
    609 		return -ENOMEM;
    610 	/*
    611 	 * Finding out valid cp block involves read both
    612 	 * sets( cp pack1 and cp pack 2)
    613 	 */
    614 	cp_start_blk_no = get_sb(cp_blkaddr);
    615 	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
    616 
    617 	/* The second checkpoint pack should start at the next segment */
    618 	cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
    619 	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
    620 
    621 	if (cp1 && cp2) {
    622 		if (ver_after(cp2_version, cp1_version)) {
    623 			cur_page = cp2;
    624 			sbi->cur_cp = 2;
    625 			version = cp2_version;
    626 		} else {
    627 			cur_page = cp1;
    628 			sbi->cur_cp = 1;
    629 			version = cp1_version;
    630 		}
    631 	} else if (cp1) {
    632 		cur_page = cp1;
    633 		sbi->cur_cp = 1;
    634 		version = cp1_version;
    635 	} else if (cp2) {
    636 		cur_page = cp2;
    637 		sbi->cur_cp = 2;
    638 		version = cp2_version;
    639 	} else
    640 		goto fail_no_cp;
    641 
    642 	MSG(0, "Info: CKPT version = %llx\n", version);
    643 
    644 	memcpy(sbi->ckpt, cur_page, blk_size);
    645 
    646 	if (cp_blks > 1) {
    647 		unsigned int i;
    648 		unsigned long long cp_blk_no;
    649 
    650 		cp_blk_no = get_sb(cp_blkaddr);
    651 		if (cur_page == cp2)
    652 			cp_blk_no += 1 << get_sb(log_blocks_per_seg);
    653 
    654 		/* copy sit bitmap */
    655 		for (i = 1; i < cp_blks; i++) {
    656 			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
    657 			ret = dev_read_block(cur_page, cp_blk_no + i);
    658 			ASSERT(ret >= 0);
    659 			memcpy(ckpt + i * blk_size, cur_page, blk_size);
    660 		}
    661 	}
    662 	if (cp1)
    663 		free(cp1);
    664 	if (cp2)
    665 		free(cp2);
    666 	return 0;
    667 
    668 fail_no_cp:
    669 	free(sbi->ckpt);
    670 	sbi->ckpt = NULL;
    671 	return -EINVAL;
    672 }
    673 
    674 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
    675 {
    676 	unsigned int total, fsmeta;
    677 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    678 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    679 
    680 	total = get_sb(segment_count);
    681 	fsmeta = get_sb(segment_count_ckpt);
    682 	fsmeta += get_sb(segment_count_sit);
    683 	fsmeta += get_sb(segment_count_nat);
    684 	fsmeta += get_cp(rsvd_segment_count);
    685 	fsmeta += get_sb(segment_count_ssa);
    686 
    687 	if (fsmeta >= total)
    688 		return 1;
    689 
    690 	return 0;
    691 }
    692 
    693 static pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
    694 {
    695 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    696 	pgoff_t block_off;
    697 	pgoff_t block_addr;
    698 	int seg_off;
    699 
    700 	block_off = NAT_BLOCK_OFFSET(start);
    701 	seg_off = block_off >> sbi->log_blocks_per_seg;
    702 
    703 	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
    704 			(seg_off << sbi->log_blocks_per_seg << 1) +
    705 			(block_off & ((1 << sbi->log_blocks_per_seg) -1)));
    706 
    707 	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
    708 		block_addr += sbi->blocks_per_seg;
    709 
    710 	return block_addr;
    711 }
    712 
    713 static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
    714 {
    715 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    716 	int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
    717 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
    718 	struct f2fs_summary_block *sum = curseg->sum_blk;
    719 	struct f2fs_journal *journal = &sum->journal;
    720 	struct f2fs_nat_block nat_block;
    721 	block_t start_blk;
    722 	nid_t nid;
    723 	int i;
    724 
    725 	if (!(c.func == SLOAD))
    726 		return 0;
    727 
    728 	nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
    729 	if (!nm_i->nid_bitmap)
    730 		return -ENOMEM;
    731 
    732 	/* arbitrarily set 0 bit */
    733 	f2fs_set_bit(0, nm_i->nid_bitmap);
    734 
    735 	memset((void *)&nat_block, 0, sizeof(struct f2fs_nat_block));
    736 
    737 	for (nid = 0; nid < nm_i->max_nid; nid++) {
    738 		if (!(nid % NAT_ENTRY_PER_BLOCK)) {
    739 			int ret;
    740 
    741 			start_blk = current_nat_addr(sbi, nid);
    742 			ret = dev_read_block((void *)&nat_block, start_blk);
    743 			ASSERT(ret >= 0);
    744 		}
    745 
    746 		if (nat_block.entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
    747 			f2fs_set_bit(nid, nm_i->nid_bitmap);
    748 	}
    749 
    750 	for (i = 0; i < nats_in_cursum(journal); i++) {
    751 		block_t addr;
    752 
    753 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
    754 		nid = le32_to_cpu(nid_in_journal(journal, i));
    755 		if (addr != NULL_ADDR)
    756 			f2fs_set_bit(nid, nm_i->nid_bitmap);
    757 	}
    758 	return 0;
    759 }
    760 
    761 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
    762 				struct f2fs_checkpoint *cp, u32 flags)
    763 {
    764 	u_int32_t nat_bits_bytes, nat_bits_blocks;
    765 
    766 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
    767 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
    768 						F2FS_BLKSIZE - 1);
    769 	if (get_cp(cp_pack_total_block_count) <=
    770 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
    771 		flags |= CP_NAT_BITS_FLAG;
    772 	else
    773 		flags &= (~CP_NAT_BITS_FLAG);
    774 
    775 	return flags;
    776 }
    777 
    778 /* should call flush_journal_entries() bfore this */
    779 void write_nat_bits(struct f2fs_sb_info *sbi,
    780 	struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
    781 {
    782 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    783 	u_int32_t nat_blocks = get_sb(segment_count_nat) <<
    784 				(get_sb(log_blocks_per_seg) - 1);
    785 	u_int32_t nat_bits_bytes = nat_blocks >> 3;
    786 	u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
    787 					8 + F2FS_BLKSIZE - 1);
    788 	unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
    789 	struct f2fs_nat_block *nat_block;
    790 	u_int32_t i, j;
    791 	block_t blkaddr;
    792 	int ret;
    793 
    794 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
    795 	ASSERT(nat_bits);
    796 
    797 	nat_block = malloc(F2FS_BLKSIZE);
    798 	ASSERT(nat_block);
    799 
    800 	full_nat_bits = nat_bits + 8;
    801 	empty_nat_bits = full_nat_bits + nat_bits_bytes;
    802 
    803 	memset(full_nat_bits, 0, nat_bits_bytes);
    804 	memset(empty_nat_bits, 0, nat_bits_bytes);
    805 
    806 	for (i = 0; i < nat_blocks; i++) {
    807 		int seg_off = i >> get_sb(log_blocks_per_seg);
    808 		int valid = 0;
    809 
    810 		blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
    811 				(seg_off << get_sb(log_blocks_per_seg) << 1) +
    812 				(i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
    813 
    814 		if (f2fs_test_bit(i, nm_i->nat_bitmap))
    815 			blkaddr += (1 << get_sb(log_blocks_per_seg));
    816 
    817 		ret = dev_read_block(nat_block, blkaddr);
    818 		ASSERT(ret >= 0);
    819 
    820 		for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
    821 			if ((i == 0 && j == 0) ||
    822 				nat_block->entries[j].block_addr != NULL_ADDR)
    823 				valid++;
    824 		}
    825 		if (valid == 0)
    826 			test_and_set_bit_le(i, empty_nat_bits);
    827 		else if (valid == NAT_ENTRY_PER_BLOCK)
    828 			test_and_set_bit_le(i, full_nat_bits);
    829 	}
    830 	*(__le64 *)nat_bits = get_cp_crc(cp);
    831 	free(nat_block);
    832 
    833 	blkaddr = get_sb(segment0_blkaddr) + (set <<
    834 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
    835 
    836 	DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
    837 
    838 	for (i = 0; i < nat_bits_blocks; i++) {
    839 		if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
    840 			ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
    841 	}
    842 	MSG(0, "Info: Write valid nat_bits in checkpoint\n");
    843 
    844 	free(nat_bits);
    845 }
    846 
    847 int init_node_manager(struct f2fs_sb_info *sbi)
    848 {
    849 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    850 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    851 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    852 	unsigned char *version_bitmap;
    853 	unsigned int nat_segs, nat_blocks;
    854 
    855 	nm_i->nat_blkaddr = get_sb(nat_blkaddr);
    856 
    857 	/* segment_count_nat includes pair segment so divide to 2. */
    858 	nat_segs = get_sb(segment_count_nat) >> 1;
    859 	nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
    860 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks;
    861 	nm_i->fcnt = 0;
    862 	nm_i->nat_cnt = 0;
    863 	nm_i->init_scan_nid = get_cp(next_free_nid);
    864 	nm_i->next_scan_nid = get_cp(next_free_nid);
    865 
    866 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
    867 
    868 	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
    869 	if (!nm_i->nat_bitmap)
    870 		return -ENOMEM;
    871 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
    872 	if (!version_bitmap)
    873 		return -EFAULT;
    874 
    875 	/* copy version bitmap */
    876 	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
    877 	return f2fs_init_nid_bitmap(sbi);
    878 }
    879 
    880 int build_node_manager(struct f2fs_sb_info *sbi)
    881 {
    882 	int err;
    883 	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
    884 	if (!sbi->nm_info)
    885 		return -ENOMEM;
    886 
    887 	err = init_node_manager(sbi);
    888 	if (err)
    889 		return err;
    890 
    891 	return 0;
    892 }
    893 
    894 int build_sit_info(struct f2fs_sb_info *sbi)
    895 {
    896 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    897 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    898 	struct sit_info *sit_i;
    899 	unsigned int sit_segs, start;
    900 	char *src_bitmap, *dst_bitmap;
    901 	unsigned int bitmap_size;
    902 
    903 	sit_i = malloc(sizeof(struct sit_info));
    904 	if (!sit_i)
    905 		return -ENOMEM;
    906 
    907 	SM_I(sbi)->sit_info = sit_i;
    908 
    909 	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
    910 	if (!sit_i->sentries)
    911 		return -ENOMEM;
    912 
    913 	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
    914 		sit_i->sentries[start].cur_valid_map
    915 			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
    916 		sit_i->sentries[start].ckpt_valid_map
    917 			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
    918 		if (!sit_i->sentries[start].cur_valid_map
    919 				|| !sit_i->sentries[start].ckpt_valid_map)
    920 			return -ENOMEM;
    921 	}
    922 
    923 	sit_segs = get_sb(segment_count_sit) >> 1;
    924 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
    925 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
    926 
    927 	dst_bitmap = malloc(bitmap_size);
    928 	memcpy(dst_bitmap, src_bitmap, bitmap_size);
    929 
    930 	sit_i->sit_base_addr = get_sb(sit_blkaddr);
    931 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
    932 	sit_i->written_valid_blocks = get_cp(valid_block_count);
    933 	sit_i->sit_bitmap = dst_bitmap;
    934 	sit_i->bitmap_size = bitmap_size;
    935 	sit_i->dirty_sentries = 0;
    936 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
    937 	sit_i->elapsed_time = get_cp(elapsed_time);
    938 	return 0;
    939 }
    940 
    941 void reset_curseg(struct f2fs_sb_info *sbi, int type)
    942 {
    943 	struct curseg_info *curseg = CURSEG_I(sbi, type);
    944 	struct summary_footer *sum_footer;
    945 	struct seg_entry *se;
    946 
    947 	sum_footer = &(curseg->sum_blk->footer);
    948 	memset(sum_footer, 0, sizeof(struct summary_footer));
    949 	if (IS_DATASEG(type))
    950 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
    951 	if (IS_NODESEG(type))
    952 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
    953 	se = get_seg_entry(sbi, curseg->segno);
    954 	se->type = type;
    955 }
    956 
    957 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
    958 {
    959 	struct curseg_info *curseg;
    960 	unsigned int i, j, offset;
    961 	block_t start;
    962 	char *kaddr;
    963 	int ret;
    964 
    965 	start = start_sum_block(sbi);
    966 
    967 	kaddr = (char *)malloc(PAGE_SIZE);
    968 	ret = dev_read_block(kaddr, start++);
    969 	ASSERT(ret >= 0);
    970 
    971 	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
    972 	memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
    973 
    974 	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
    975 	memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
    976 						SUM_JOURNAL_SIZE);
    977 
    978 	offset = 2 * SUM_JOURNAL_SIZE;
    979 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
    980 		unsigned short blk_off;
    981 		struct curseg_info *curseg = CURSEG_I(sbi, i);
    982 
    983 		reset_curseg(sbi, i);
    984 
    985 		if (curseg->alloc_type == SSR)
    986 			blk_off = sbi->blocks_per_seg;
    987 		else
    988 			blk_off = curseg->next_blkoff;
    989 
    990 		ASSERT(blk_off <= ENTRIES_IN_SUM);
    991 
    992 		for (j = 0; j < blk_off; j++) {
    993 			struct f2fs_summary *s;
    994 			s = (struct f2fs_summary *)(kaddr + offset);
    995 			curseg->sum_blk->entries[j] = *s;
    996 			offset += SUMMARY_SIZE;
    997 			if (offset + SUMMARY_SIZE <=
    998 					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
    999 				continue;
   1000 			memset(kaddr, 0, PAGE_SIZE);
   1001 			ret = dev_read_block(kaddr, start++);
   1002 			ASSERT(ret >= 0);
   1003 			offset = 0;
   1004 		}
   1005 	}
   1006 	free(kaddr);
   1007 }
   1008 
   1009 static void restore_node_summary(struct f2fs_sb_info *sbi,
   1010 		unsigned int segno, struct f2fs_summary_block *sum_blk)
   1011 {
   1012 	struct f2fs_node *node_blk;
   1013 	struct f2fs_summary *sum_entry;
   1014 	block_t addr;
   1015 	unsigned int i;
   1016 	int ret;
   1017 
   1018 	node_blk = malloc(F2FS_BLKSIZE);
   1019 	ASSERT(node_blk);
   1020 
   1021 	/* scan the node segment */
   1022 	addr = START_BLOCK(sbi, segno);
   1023 	sum_entry = &sum_blk->entries[0];
   1024 
   1025 	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
   1026 		ret = dev_read_block(node_blk, addr);
   1027 		ASSERT(ret >= 0);
   1028 		sum_entry->nid = node_blk->footer.nid;
   1029 		addr++;
   1030 	}
   1031 	free(node_blk);
   1032 }
   1033 
   1034 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
   1035 {
   1036 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1037 	struct f2fs_summary_block *sum_blk;
   1038 	struct curseg_info *curseg;
   1039 	unsigned int segno = 0;
   1040 	block_t blk_addr = 0;
   1041 	int ret;
   1042 
   1043 	if (IS_DATASEG(type)) {
   1044 		segno = get_cp(cur_data_segno[type]);
   1045 		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1046 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
   1047 		else
   1048 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
   1049 	} else {
   1050 		segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
   1051 		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1052 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
   1053 							type - CURSEG_HOT_NODE);
   1054 		else
   1055 			blk_addr = GET_SUM_BLKADDR(sbi, segno);
   1056 	}
   1057 
   1058 	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
   1059 	ret = dev_read_block(sum_blk, blk_addr);
   1060 	ASSERT(ret >= 0);
   1061 
   1062 	if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1063 		restore_node_summary(sbi, segno, sum_blk);
   1064 
   1065 	curseg = CURSEG_I(sbi, type);
   1066 	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
   1067 	reset_curseg(sbi, type);
   1068 	free(sum_blk);
   1069 }
   1070 
   1071 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
   1072 					struct f2fs_summary *sum)
   1073 {
   1074 	struct f2fs_summary_block *sum_blk;
   1075 	u32 segno, offset;
   1076 	int type, ret;
   1077 	struct seg_entry *se;
   1078 
   1079 	segno = GET_SEGNO(sbi, blk_addr);
   1080 	offset = OFFSET_IN_SEG(sbi, blk_addr);
   1081 
   1082 	se = get_seg_entry(sbi, segno);
   1083 
   1084 	sum_blk = get_sum_block(sbi, segno, &type);
   1085 	memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
   1086 	sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
   1087 							SUM_TYPE_DATA;
   1088 
   1089 	/* write SSA all the time */
   1090 	ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
   1091 	ASSERT(ret >= 0);
   1092 
   1093 	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
   1094 					type == SEG_TYPE_MAX)
   1095 		free(sum_blk);
   1096 }
   1097 
   1098 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
   1099 {
   1100 	int type = CURSEG_HOT_DATA;
   1101 
   1102 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
   1103 		read_compacted_summaries(sbi);
   1104 		type = CURSEG_HOT_NODE;
   1105 	}
   1106 
   1107 	for (; type <= CURSEG_COLD_NODE; type++)
   1108 		read_normal_summaries(sbi, type);
   1109 }
   1110 
   1111 static void build_curseg(struct f2fs_sb_info *sbi)
   1112 {
   1113 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1114 	struct curseg_info *array;
   1115 	unsigned short blk_off;
   1116 	unsigned int segno;
   1117 	int i;
   1118 
   1119 	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
   1120 	ASSERT(array);
   1121 
   1122 	SM_I(sbi)->curseg_array = array;
   1123 
   1124 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
   1125 		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
   1126 		ASSERT(array[i].sum_blk);
   1127 		if (i <= CURSEG_COLD_DATA) {
   1128 			blk_off = get_cp(cur_data_blkoff[i]);
   1129 			segno = get_cp(cur_data_segno[i]);
   1130 		}
   1131 		if (i > CURSEG_COLD_DATA) {
   1132 			blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
   1133 			segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
   1134 		}
   1135 		ASSERT(segno < TOTAL_SEGS(sbi));
   1136 		ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
   1137 
   1138 		array[i].segno = segno;
   1139 		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
   1140 		array[i].next_segno = NULL_SEGNO;
   1141 		array[i].next_blkoff = blk_off;
   1142 		array[i].alloc_type = cp->alloc_type[i];
   1143 	}
   1144 	restore_curseg_summaries(sbi);
   1145 }
   1146 
   1147 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
   1148 {
   1149 	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
   1150 	ASSERT(segno <= end_segno);
   1151 }
   1152 
   1153 struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
   1154 						unsigned int segno)
   1155 {
   1156 	struct sit_info *sit_i = SIT_I(sbi);
   1157 	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
   1158 	block_t blk_addr = sit_i->sit_base_addr + offset;
   1159 	struct f2fs_sit_block *sit_blk;
   1160 	int ret;
   1161 
   1162 	sit_blk = calloc(BLOCK_SZ, 1);
   1163 	ASSERT(sit_blk);
   1164 	check_seg_range(sbi, segno);
   1165 
   1166 	/* calculate sit block address */
   1167 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
   1168 		blk_addr += sit_i->sit_blocks;
   1169 
   1170 	ret = dev_read_block(sit_blk, blk_addr);
   1171 	ASSERT(ret >= 0);
   1172 
   1173 	return sit_blk;
   1174 }
   1175 
   1176 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
   1177 			unsigned int segno, struct f2fs_sit_block *sit_blk)
   1178 {
   1179 	struct sit_info *sit_i = SIT_I(sbi);
   1180 	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
   1181 	block_t blk_addr = sit_i->sit_base_addr + offset;
   1182 	int ret;
   1183 
   1184 	/* calculate sit block address */
   1185 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
   1186 		blk_addr += sit_i->sit_blocks;
   1187 
   1188 	ret = dev_write_block(sit_blk, blk_addr);
   1189 	ASSERT(ret >= 0);
   1190 }
   1191 
   1192 void check_block_count(struct f2fs_sb_info *sbi,
   1193 		unsigned int segno, struct f2fs_sit_entry *raw_sit)
   1194 {
   1195 	struct f2fs_sm_info *sm_info = SM_I(sbi);
   1196 	unsigned int end_segno = sm_info->segment_count - 1;
   1197 	int valid_blocks = 0;
   1198 	unsigned int i;
   1199 
   1200 	/* check segment usage */
   1201 	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
   1202 		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
   1203 				segno, GET_SIT_VBLOCKS(raw_sit));
   1204 
   1205 	/* check boundary of a given segment number */
   1206 	if (segno > end_segno)
   1207 		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
   1208 
   1209 	/* check bitmap with valid block count */
   1210 	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
   1211 		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
   1212 
   1213 	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
   1214 		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
   1215 				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
   1216 
   1217 	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
   1218 		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
   1219 				segno, GET_SIT_TYPE(raw_sit));
   1220 }
   1221 
   1222 void seg_info_from_raw_sit(struct seg_entry *se,
   1223 		struct f2fs_sit_entry *raw_sit)
   1224 {
   1225 	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
   1226 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(raw_sit);
   1227 	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
   1228 	memcpy(se->ckpt_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
   1229 	se->type = GET_SIT_TYPE(raw_sit);
   1230 	se->orig_type = GET_SIT_TYPE(raw_sit);
   1231 	se->mtime = le64_to_cpu(raw_sit->mtime);
   1232 }
   1233 
   1234 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
   1235 		unsigned int segno)
   1236 {
   1237 	struct sit_info *sit_i = SIT_I(sbi);
   1238 	return &sit_i->sentries[segno];
   1239 }
   1240 
   1241 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
   1242 				unsigned int segno, int *ret_type)
   1243 {
   1244 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1245 	struct f2fs_summary_block *sum_blk;
   1246 	struct curseg_info *curseg;
   1247 	int type, ret;
   1248 	u64 ssa_blk;
   1249 
   1250 	*ret_type= SEG_TYPE_MAX;
   1251 
   1252 	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
   1253 	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
   1254 		if (segno == get_cp(cur_node_segno[type])) {
   1255 			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
   1256 			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
   1257 				ASSERT_MSG("segno [0x%x] indicates a data "
   1258 						"segment, but should be node",
   1259 						segno);
   1260 				*ret_type = -SEG_TYPE_CUR_NODE;
   1261 			} else {
   1262 				*ret_type = SEG_TYPE_CUR_NODE;
   1263 			}
   1264 			return curseg->sum_blk;
   1265 		}
   1266 	}
   1267 
   1268 	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
   1269 		if (segno == get_cp(cur_data_segno[type])) {
   1270 			curseg = CURSEG_I(sbi, type);
   1271 			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
   1272 				ASSERT_MSG("segno [0x%x] indicates a node "
   1273 						"segment, but should be data",
   1274 						segno);
   1275 				*ret_type = -SEG_TYPE_CUR_DATA;
   1276 			} else {
   1277 				*ret_type = SEG_TYPE_CUR_DATA;
   1278 			}
   1279 			return curseg->sum_blk;
   1280 		}
   1281 	}
   1282 
   1283 	sum_blk = calloc(BLOCK_SZ, 1);
   1284 	ASSERT(sum_blk);
   1285 
   1286 	ret = dev_read_block(sum_blk, ssa_blk);
   1287 	ASSERT(ret >= 0);
   1288 
   1289 	if (IS_SUM_NODE_SEG(sum_blk->footer))
   1290 		*ret_type = SEG_TYPE_NODE;
   1291 	else if (IS_SUM_DATA_SEG(sum_blk->footer))
   1292 		*ret_type = SEG_TYPE_DATA;
   1293 
   1294 	return sum_blk;
   1295 }
   1296 
   1297 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
   1298 				struct f2fs_summary *sum_entry)
   1299 {
   1300 	struct f2fs_summary_block *sum_blk;
   1301 	u32 segno, offset;
   1302 	int type;
   1303 
   1304 	segno = GET_SEGNO(sbi, blk_addr);
   1305 	offset = OFFSET_IN_SEG(sbi, blk_addr);
   1306 
   1307 	sum_blk = get_sum_block(sbi, segno, &type);
   1308 	memcpy(sum_entry, &(sum_blk->entries[offset]),
   1309 				sizeof(struct f2fs_summary));
   1310 	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
   1311 					type == SEG_TYPE_MAX)
   1312 		free(sum_blk);
   1313 	return type;
   1314 }
   1315 
   1316 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
   1317 				struct f2fs_nat_entry *raw_nat)
   1318 {
   1319 	struct f2fs_nat_block *nat_block;
   1320 	pgoff_t block_addr;
   1321 	int entry_off;
   1322 	int ret;
   1323 
   1324 	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
   1325 		return;
   1326 
   1327 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1328 	ASSERT(nat_block);
   1329 
   1330 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1331 	block_addr = current_nat_addr(sbi, nid);
   1332 
   1333 	ret = dev_read_block(nat_block, block_addr);
   1334 	ASSERT(ret >= 0);
   1335 
   1336 	memcpy(raw_nat, &nat_block->entries[entry_off],
   1337 					sizeof(struct f2fs_nat_entry));
   1338 	free(nat_block);
   1339 }
   1340 
   1341 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
   1342 				u16 ofs_in_node, block_t newaddr)
   1343 {
   1344 	struct f2fs_node *node_blk = NULL;
   1345 	struct node_info ni;
   1346 	block_t oldaddr, startaddr, endaddr;
   1347 	int ret;
   1348 
   1349 	node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
   1350 	ASSERT(node_blk);
   1351 
   1352 	get_node_info(sbi, nid, &ni);
   1353 
   1354 	/* read node_block */
   1355 	ret = dev_read_block(node_blk, ni.blk_addr);
   1356 	ASSERT(ret >= 0);
   1357 
   1358 	/* check its block address */
   1359 	if (node_blk->footer.nid == node_blk->footer.ino) {
   1360 		oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs_in_node]);
   1361 		node_blk->i.i_addr[ofs_in_node] = cpu_to_le32(newaddr);
   1362 	} else {
   1363 		oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
   1364 		node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
   1365 	}
   1366 
   1367 	ret = dev_write_block(node_blk, ni.blk_addr);
   1368 	ASSERT(ret >= 0);
   1369 
   1370 	/* check extent cache entry */
   1371 	if (node_blk->footer.nid != node_blk->footer.ino) {
   1372 		get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
   1373 
   1374 		/* read inode block */
   1375 		ret = dev_read_block(node_blk, ni.blk_addr);
   1376 		ASSERT(ret >= 0);
   1377 	}
   1378 
   1379 	startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
   1380 	endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
   1381 	if (oldaddr >= startaddr && oldaddr < endaddr) {
   1382 		node_blk->i.i_ext.len = 0;
   1383 
   1384 		/* update inode block */
   1385 		ret = dev_write_block(node_blk, ni.blk_addr);
   1386 		ASSERT(ret >= 0);
   1387 	}
   1388 	free(node_blk);
   1389 }
   1390 
   1391 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
   1392 					nid_t nid, block_t newaddr)
   1393 {
   1394 	struct f2fs_nat_block *nat_block;
   1395 	pgoff_t block_addr;
   1396 	int entry_off;
   1397 	int ret;
   1398 
   1399 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1400 	ASSERT(nat_block);
   1401 
   1402 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1403 	block_addr = current_nat_addr(sbi, nid);
   1404 
   1405 	ret = dev_read_block(nat_block, block_addr);
   1406 	ASSERT(ret >= 0);
   1407 
   1408 	if (ino)
   1409 		nat_block->entries[entry_off].ino = cpu_to_le32(ino);
   1410 	nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
   1411 
   1412 	ret = dev_write_block(nat_block, block_addr);
   1413 	ASSERT(ret >= 0);
   1414 	free(nat_block);
   1415 }
   1416 
   1417 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
   1418 {
   1419 	struct f2fs_nat_entry raw_nat;
   1420 	get_nat_entry(sbi, nid, &raw_nat);
   1421 	ni->nid = nid;
   1422 	node_info_from_raw_nat(ni, &raw_nat);
   1423 }
   1424 
   1425 void build_sit_entries(struct f2fs_sb_info *sbi)
   1426 {
   1427 	struct sit_info *sit_i = SIT_I(sbi);
   1428 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1429 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1430 	struct seg_entry *se;
   1431 	struct f2fs_sit_entry sit;
   1432 	unsigned int i, segno;
   1433 
   1434 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1435 		se = &sit_i->sentries[segno];
   1436 		struct f2fs_sit_block *sit_blk;
   1437 
   1438 		sit_blk = get_current_sit_page(sbi, segno);
   1439 		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1440 		free(sit_blk);
   1441 
   1442 		check_block_count(sbi, segno, &sit);
   1443 		seg_info_from_raw_sit(se, &sit);
   1444 	}
   1445 
   1446 	for (i = 0; i < sits_in_cursum(journal); i++) {
   1447 		segno = le32_to_cpu(segno_in_journal(journal, i));
   1448 		se = &sit_i->sentries[segno];
   1449 		sit = sit_in_journal(journal, i);
   1450 
   1451 		check_block_count(sbi, segno, &sit);
   1452 		seg_info_from_raw_sit(se, &sit);
   1453 	}
   1454 
   1455 }
   1456 
   1457 int build_segment_manager(struct f2fs_sb_info *sbi)
   1458 {
   1459 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1460 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1461 	struct f2fs_sm_info *sm_info;
   1462 
   1463 	sm_info = malloc(sizeof(struct f2fs_sm_info));
   1464 	if (!sm_info)
   1465 		return -ENOMEM;
   1466 
   1467 	/* init sm info */
   1468 	sbi->sm_info = sm_info;
   1469 	sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
   1470 	sm_info->main_blkaddr = get_sb(main_blkaddr);
   1471 	sm_info->segment_count = get_sb(segment_count);
   1472 	sm_info->reserved_segments = get_cp(rsvd_segment_count);
   1473 	sm_info->ovp_segments = get_cp(overprov_segment_count);
   1474 	sm_info->main_segments = get_sb(segment_count_main);
   1475 	sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
   1476 
   1477 	build_sit_info(sbi);
   1478 
   1479 	build_curseg(sbi);
   1480 
   1481 	build_sit_entries(sbi);
   1482 
   1483 	return 0;
   1484 }
   1485 
   1486 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
   1487 {
   1488 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   1489 	struct f2fs_sm_info *sm_i = SM_I(sbi);
   1490 	unsigned int segno = 0;
   1491 	char *ptr = NULL;
   1492 	u32 sum_vblocks = 0;
   1493 	u32 free_segs = 0;
   1494 	struct seg_entry *se;
   1495 
   1496 	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
   1497 	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
   1498 	ASSERT(fsck->sit_area_bitmap);
   1499 	ptr = fsck->sit_area_bitmap;
   1500 
   1501 	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
   1502 
   1503 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1504 		se = get_seg_entry(sbi, segno);
   1505 
   1506 		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1507 		ptr += SIT_VBLOCK_MAP_SIZE;
   1508 
   1509 		if (se->valid_blocks == 0x0) {
   1510 			if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
   1511 				le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
   1512 				le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
   1513 				le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
   1514 				le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
   1515 				le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
   1516 				continue;
   1517 			} else {
   1518 				free_segs++;
   1519 			}
   1520 		} else {
   1521 			sum_vblocks += se->valid_blocks;
   1522 		}
   1523 	}
   1524 	fsck->chk.sit_valid_blocks = sum_vblocks;
   1525 	fsck->chk.sit_free_segs = free_segs;
   1526 
   1527 	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
   1528 			sum_vblocks, sum_vblocks,
   1529 			free_segs, free_segs);
   1530 }
   1531 
   1532 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
   1533 {
   1534 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   1535 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1536 	struct sit_info *sit_i = SIT_I(sbi);
   1537 	unsigned int segno = 0;
   1538 	struct f2fs_summary_block *sum = curseg->sum_blk;
   1539 	char *ptr = NULL;
   1540 
   1541 	/* remove sit journal */
   1542 	sum->journal.n_sits = 0;
   1543 
   1544 	ptr = fsck->main_area_bitmap;
   1545 
   1546 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1547 		struct f2fs_sit_block *sit_blk;
   1548 		struct f2fs_sit_entry *sit;
   1549 		struct seg_entry *se;
   1550 		u16 valid_blocks = 0;
   1551 		u16 type;
   1552 		int i;
   1553 
   1554 		sit_blk = get_current_sit_page(sbi, segno);
   1555 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1556 		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
   1557 
   1558 		/* update valid block count */
   1559 		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
   1560 			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
   1561 
   1562 		se = get_seg_entry(sbi, segno);
   1563 		memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
   1564 		se->valid_blocks = valid_blocks;
   1565 		type = se->type;
   1566 		if (type >= NO_CHECK_TYPE) {
   1567 			ASSERT_MSG("Invalide type and valid blocks=%x,%x",
   1568 					segno, valid_blocks);
   1569 			type = 0;
   1570 		}
   1571 		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
   1572 								valid_blocks);
   1573 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1574 		free(sit_blk);
   1575 
   1576 		ptr += SIT_VBLOCK_MAP_SIZE;
   1577 	}
   1578 }
   1579 
   1580 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
   1581 {
   1582 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1583 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1584 	struct sit_info *sit_i = SIT_I(sbi);
   1585 	unsigned int segno;
   1586 	int i;
   1587 
   1588 	for (i = 0; i < sits_in_cursum(journal); i++) {
   1589 		struct f2fs_sit_block *sit_blk;
   1590 		struct f2fs_sit_entry *sit;
   1591 		struct seg_entry *se;
   1592 
   1593 		segno = segno_in_journal(journal, i);
   1594 		se = get_seg_entry(sbi, segno);
   1595 
   1596 		sit_blk = get_current_sit_page(sbi, segno);
   1597 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1598 
   1599 		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1600 		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
   1601 							se->valid_blocks);
   1602 		sit->mtime = cpu_to_le64(se->mtime);
   1603 
   1604 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1605 		free(sit_blk);
   1606 	}
   1607 
   1608 	journal->n_sits = 0;
   1609 	return i;
   1610 }
   1611 
   1612 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
   1613 {
   1614 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1615 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1616 	struct f2fs_nat_block *nat_block;
   1617 	pgoff_t block_addr;
   1618 	int entry_off;
   1619 	nid_t nid;
   1620 	int ret;
   1621 	int i = 0;
   1622 
   1623 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1624 	ASSERT(nat_block);
   1625 next:
   1626 	if (i >= nats_in_cursum(journal)) {
   1627 		free(nat_block);
   1628 		journal->n_nats = 0;
   1629 		return i;
   1630 	}
   1631 
   1632 	nid = le32_to_cpu(nid_in_journal(journal, i));
   1633 
   1634 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1635 	block_addr = current_nat_addr(sbi, nid);
   1636 
   1637 	ret = dev_read_block(nat_block, block_addr);
   1638 	ASSERT(ret >= 0);
   1639 
   1640 	memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
   1641 					sizeof(struct f2fs_nat_entry));
   1642 
   1643 	ret = dev_write_block(nat_block, block_addr);
   1644 	ASSERT(ret >= 0);
   1645 	i++;
   1646 	goto next;
   1647 }
   1648 
   1649 void flush_journal_entries(struct f2fs_sb_info *sbi)
   1650 {
   1651 	int n_nats = flush_nat_journal_entries(sbi);
   1652 	int n_sits = flush_sit_journal_entries(sbi);
   1653 
   1654 	if (n_nats || n_sits)
   1655 		write_checkpoint(sbi);
   1656 }
   1657 
   1658 void flush_sit_entries(struct f2fs_sb_info *sbi)
   1659 {
   1660 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1661 	struct sit_info *sit_i = SIT_I(sbi);
   1662 	unsigned int segno = 0;
   1663 	u32 free_segs = 0;
   1664 
   1665 	/* update free segments */
   1666 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1667 		struct f2fs_sit_block *sit_blk;
   1668 		struct f2fs_sit_entry *sit;
   1669 		struct seg_entry *se;
   1670 
   1671 		se = get_seg_entry(sbi, segno);
   1672 
   1673 		if (!se->dirty)
   1674 			continue;
   1675 
   1676 		sit_blk = get_current_sit_page(sbi, segno);
   1677 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1678 		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1679 		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
   1680 							se->valid_blocks);
   1681 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1682 		free(sit_blk);
   1683 
   1684 		if (se->valid_blocks == 0x0 &&
   1685 				!IS_CUR_SEGNO(sbi, segno, NO_CHECK_TYPE))
   1686 			free_segs++;
   1687 	}
   1688 
   1689 	set_cp(free_segment_count, free_segs);
   1690 }
   1691 
   1692 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left, int type)
   1693 {
   1694 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1695 	struct seg_entry *se;
   1696 	u32 segno;
   1697 	u64 offset;
   1698 	int not_enough = 0;
   1699 	u64 end_blkaddr = (get_sb(segment_count_main) <<
   1700 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
   1701 
   1702 	if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
   1703 		not_enough = 1;
   1704 
   1705 	while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
   1706 		segno = GET_SEGNO(sbi, *to);
   1707 		offset = OFFSET_IN_SEG(sbi, *to);
   1708 
   1709 		se = get_seg_entry(sbi, segno);
   1710 
   1711 		if (se->valid_blocks == sbi->blocks_per_seg ||
   1712 				IS_CUR_SEGNO(sbi, segno, type)) {
   1713 			*to = left ? START_BLOCK(sbi, segno) - 1:
   1714 						START_BLOCK(sbi, segno + 1);
   1715 			continue;
   1716 		}
   1717 
   1718 		if (se->valid_blocks == 0 && not_enough) {
   1719 			*to = left ? START_BLOCK(sbi, segno) - 1:
   1720 						START_BLOCK(sbi, segno + 1);
   1721 			continue;
   1722 		}
   1723 
   1724 		if (se->valid_blocks == 0 && !(segno % sbi->segs_per_sec)) {
   1725 			struct seg_entry *se2;
   1726 			unsigned int i;
   1727 
   1728 			for (i = 1; i < sbi->segs_per_sec; i++) {
   1729 				se2 = get_seg_entry(sbi, segno + i);
   1730 				if (se2->valid_blocks)
   1731 					break;
   1732 			}
   1733 			if (i == sbi->segs_per_sec)
   1734 				return 0;
   1735 		}
   1736 
   1737 		if (se->type == type &&
   1738 			!f2fs_test_bit(offset, (const char *)se->cur_valid_map))
   1739 			return 0;
   1740 
   1741 		*to = left ? *to - 1: *to + 1;
   1742 	}
   1743 	return -1;
   1744 }
   1745 
   1746 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from)
   1747 {
   1748 	int i, ret;
   1749 
   1750 	/* update summary blocks having nullified journal entries */
   1751 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   1752 		struct curseg_info *curseg = CURSEG_I(sbi, i);
   1753 		struct f2fs_summary_block buf;
   1754 		u32 old_segno;
   1755 		u64 ssa_blk, to;
   1756 
   1757 		/* update original SSA too */
   1758 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   1759 		ret = dev_write_block(curseg->sum_blk, ssa_blk);
   1760 		ASSERT(ret >= 0);
   1761 
   1762 		to = from;
   1763 		ret = find_next_free_block(sbi, &to, 0, i);
   1764 		ASSERT(ret == 0);
   1765 
   1766 		old_segno = curseg->segno;
   1767 		curseg->segno = GET_SEGNO(sbi, to);
   1768 		curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
   1769 		curseg->alloc_type = SSR;
   1770 
   1771 		/* update new segno */
   1772 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   1773 		ret = dev_read_block(&buf, ssa_blk);
   1774 		ASSERT(ret >= 0);
   1775 
   1776 		memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
   1777 
   1778 		/* update se->types */
   1779 		reset_curseg(sbi, i);
   1780 
   1781 		DBG(1, "Move curseg[%d] %x -> %x after %"PRIx64"\n",
   1782 				i, old_segno, curseg->segno, from);
   1783 	}
   1784 }
   1785 
   1786 void zero_journal_entries(struct f2fs_sb_info *sbi)
   1787 {
   1788 	int i;
   1789 
   1790 	for (i = 0; i < NO_CHECK_TYPE; i++)
   1791 		CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
   1792 }
   1793 
   1794 void write_curseg_info(struct f2fs_sb_info *sbi)
   1795 {
   1796 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1797 	int i;
   1798 
   1799 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   1800 		cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
   1801 		if (i < CURSEG_HOT_NODE) {
   1802 			set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
   1803 			set_cp(cur_data_blkoff[i],
   1804 					CURSEG_I(sbi, i)->next_blkoff);
   1805 		} else {
   1806 			int n = i - CURSEG_HOT_NODE;
   1807 
   1808 			set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
   1809 			set_cp(cur_node_blkoff[n],
   1810 					CURSEG_I(sbi, i)->next_blkoff);
   1811 		}
   1812 	}
   1813 }
   1814 
   1815 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
   1816 					struct f2fs_nat_entry *raw_nat)
   1817 {
   1818 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1819 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1820 	int i = 0;
   1821 
   1822 	for (i = 0; i < nats_in_cursum(journal); i++) {
   1823 		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
   1824 			memcpy(raw_nat, &nat_in_journal(journal, i),
   1825 						sizeof(struct f2fs_nat_entry));
   1826 			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
   1827 			return i;
   1828 		}
   1829 	}
   1830 	return -1;
   1831 }
   1832 
   1833 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
   1834 {
   1835 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1836 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1837 	struct f2fs_nat_block *nat_block;
   1838 	pgoff_t block_addr;
   1839 	int entry_off;
   1840 	int ret;
   1841 	int i = 0;
   1842 
   1843 	/* check in journal */
   1844 	for (i = 0; i < nats_in_cursum(journal); i++) {
   1845 		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
   1846 			memset(&nat_in_journal(journal, i), 0,
   1847 					sizeof(struct f2fs_nat_entry));
   1848 			FIX_MSG("Remove nid [0x%x] in nat journal\n", nid);
   1849 			return;
   1850 		}
   1851 	}
   1852 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1853 	ASSERT(nat_block);
   1854 
   1855 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1856 	block_addr = current_nat_addr(sbi, nid);
   1857 
   1858 	ret = dev_read_block(nat_block, block_addr);
   1859 	ASSERT(ret >= 0);
   1860 
   1861 	memset(&nat_block->entries[entry_off], 0,
   1862 					sizeof(struct f2fs_nat_entry));
   1863 
   1864 	ret = dev_write_block(nat_block, block_addr);
   1865 	ASSERT(ret >= 0);
   1866 	free(nat_block);
   1867 }
   1868 
   1869 void write_checkpoint(struct f2fs_sb_info *sbi)
   1870 {
   1871 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1872 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1873 	block_t orphan_blks = 0;
   1874 	unsigned long long cp_blk_no;
   1875 	u32 flags = CP_UMOUNT_FLAG;
   1876 	int i, ret;
   1877 	u_int32_t crc = 0;
   1878 
   1879 	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
   1880 		orphan_blks = __start_sum_addr(sbi) - 1;
   1881 		flags |= CP_ORPHAN_PRESENT_FLAG;
   1882 	}
   1883 
   1884 	set_cp(free_segment_count, get_free_segments(sbi));
   1885 	set_cp(valid_block_count, sbi->total_valid_block_count);
   1886 	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
   1887 
   1888 	flags = update_nat_bits_flags(sb, cp, flags);
   1889 	set_cp(ckpt_flags, flags);
   1890 
   1891 	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
   1892 	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) = cpu_to_le32(crc);
   1893 
   1894 	cp_blk_no = get_sb(cp_blkaddr);
   1895 	if (sbi->cur_cp == 2)
   1896 		cp_blk_no += 1 << get_sb(log_blocks_per_seg);
   1897 
   1898 	/* write the first cp */
   1899 	ret = dev_write_block(cp, cp_blk_no++);
   1900 	ASSERT(ret >= 0);
   1901 
   1902 	/* skip payload */
   1903 	cp_blk_no += get_sb(cp_payload);
   1904 	/* skip orphan blocks */
   1905 	cp_blk_no += orphan_blks;
   1906 
   1907 	/* update summary blocks having nullified journal entries */
   1908 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   1909 		struct curseg_info *curseg = CURSEG_I(sbi, i);
   1910 		u64 ssa_blk;
   1911 
   1912 		ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
   1913 		ASSERT(ret >= 0);
   1914 
   1915 		/* update original SSA too */
   1916 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   1917 		ret = dev_write_block(curseg->sum_blk, ssa_blk);
   1918 		ASSERT(ret >= 0);
   1919 	}
   1920 
   1921 	/* write the last cp */
   1922 	ret = dev_write_block(cp, cp_blk_no++);
   1923 	ASSERT(ret >= 0);
   1924 
   1925 	/* Write nat bits */
   1926 	if (flags & CP_NAT_BITS_FLAG)
   1927 		write_nat_bits(sbi, sb, cp, sbi->cur_cp);
   1928 }
   1929 
   1930 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
   1931 {
   1932 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1933 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1934 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   1935 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1936 	struct f2fs_nm_info *nm_i = NM_I(sbi);
   1937 	struct f2fs_nat_block *nat_block;
   1938 	struct node_info ni;
   1939 	u32 nid, nr_nat_blks;
   1940 	pgoff_t block_off;
   1941 	pgoff_t block_addr;
   1942 	int seg_off;
   1943 	int ret;
   1944 	unsigned int i;
   1945 
   1946 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1947 	ASSERT(nat_block);
   1948 
   1949 	/* Alloc & build nat entry bitmap */
   1950 	nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
   1951 					sbi->log_blocks_per_seg;
   1952 
   1953 	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
   1954 	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
   1955 	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
   1956 	ASSERT(fsck->nat_area_bitmap);
   1957 
   1958 	fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
   1959 					fsck->nr_nat_entries);
   1960 	ASSERT(fsck->entries);
   1961 
   1962 	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
   1963 
   1964 		seg_off = block_off >> sbi->log_blocks_per_seg;
   1965 		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
   1966 			(seg_off << sbi->log_blocks_per_seg << 1) +
   1967 			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
   1968 
   1969 		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
   1970 			block_addr += sbi->blocks_per_seg;
   1971 
   1972 		ret = dev_read_block(nat_block, block_addr);
   1973 		ASSERT(ret >= 0);
   1974 
   1975 		nid = block_off * NAT_ENTRY_PER_BLOCK;
   1976 		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
   1977 			ni.nid = nid + i;
   1978 
   1979 			if ((nid + i) == F2FS_NODE_INO(sbi) ||
   1980 					(nid + i) == F2FS_META_INO(sbi)) {
   1981 				/* block_addr of node/meta inode should be 0x1 */
   1982 				if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
   1983 					FIX_MSG("ino: 0x%x node/meta inode, block_addr= 0x%x -> 0x1",
   1984 							nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
   1985 					nat_block->entries[i].block_addr = cpu_to_le32(0x1);
   1986 					ret = dev_write_block(nat_block, block_addr);
   1987 					ASSERT(ret >= 0);
   1988 				}
   1989 				continue;
   1990 			}
   1991 
   1992 			node_info_from_raw_nat(&ni, &nat_block->entries[i]);
   1993 			if (ni.blk_addr == 0x0)
   1994 				continue;
   1995 			if (ni.ino == 0x0) {
   1996 				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
   1997 					" is invalid\n", ni.ino, ni.blk_addr);
   1998 			}
   1999 			if (ni.ino == (nid + i)) {
   2000 				fsck->nat_valid_inode_cnt++;
   2001 				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
   2002 			}
   2003 			if (nid + i == 0) {
   2004 				/*
   2005 				 * nat entry [0] must be null.  If
   2006 				 * it is corrupted, set its bit in
   2007 				 * nat_area_bitmap, fsck_verify will
   2008 				 * nullify it
   2009 				 */
   2010 				ASSERT_MSG("Invalid nat entry[0]: "
   2011 					"blk_addr[0x%x]\n", ni.blk_addr);
   2012 				c.fix_on = 1;
   2013 				fsck->chk.valid_nat_entry_cnt--;
   2014 			}
   2015 
   2016 			DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
   2017 				nid + i, ni.blk_addr, ni.ino);
   2018 			f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
   2019 			fsck->chk.valid_nat_entry_cnt++;
   2020 
   2021 			fsck->entries[nid + i] = nat_block->entries[i];
   2022 		}
   2023 	}
   2024 
   2025 	/* Traverse nat journal, update the corresponding entries */
   2026 	for (i = 0; i < nats_in_cursum(journal); i++) {
   2027 		struct f2fs_nat_entry raw_nat;
   2028 		nid = le32_to_cpu(nid_in_journal(journal, i));
   2029 		ni.nid = nid;
   2030 
   2031 		DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
   2032 
   2033 		/* Clear the original bit and count */
   2034 		if (fsck->entries[nid].block_addr != 0x0) {
   2035 			fsck->chk.valid_nat_entry_cnt--;
   2036 			f2fs_clear_bit(nid, fsck->nat_area_bitmap);
   2037 			if (fsck->entries[nid].ino == nid)
   2038 				fsck->nat_valid_inode_cnt--;
   2039 		}
   2040 
   2041 		/* Use nat entries in journal */
   2042 		memcpy(&raw_nat, &nat_in_journal(journal, i),
   2043 					sizeof(struct f2fs_nat_entry));
   2044 		node_info_from_raw_nat(&ni, &raw_nat);
   2045 		if (ni.blk_addr != 0x0) {
   2046 			if (ni.ino == 0x0)
   2047 				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
   2048 					" is invalid\n", ni.ino, ni.blk_addr);
   2049 			if (ni.ino == nid) {
   2050 				fsck->nat_valid_inode_cnt++;
   2051 				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
   2052 			}
   2053 			f2fs_set_bit(nid, fsck->nat_area_bitmap);
   2054 			fsck->chk.valid_nat_entry_cnt++;
   2055 			DBG(3, "nid[0x%x] in nat cache\n", nid);
   2056 		}
   2057 		fsck->entries[nid] = raw_nat;
   2058 	}
   2059 	free(nat_block);
   2060 
   2061 	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
   2062 			fsck->chk.valid_nat_entry_cnt,
   2063 			fsck->chk.valid_nat_entry_cnt);
   2064 }
   2065 
   2066 static int check_sector_size(struct f2fs_super_block *sb)
   2067 {
   2068 	int index;
   2069 	u_int32_t log_sectorsize, log_sectors_per_block;
   2070 	u_int8_t *zero_buff;
   2071 
   2072 	log_sectorsize = log_base_2(c.sector_size);
   2073 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
   2074 
   2075 	if (log_sectorsize == get_sb(log_sectorsize) &&
   2076 			log_sectors_per_block == get_sb(log_sectors_per_block))
   2077 		return 0;
   2078 
   2079 	zero_buff = calloc(F2FS_BLKSIZE, 1);
   2080 	ASSERT(zero_buff);
   2081 
   2082 	set_sb(log_sectorsize, log_sectorsize);
   2083 	set_sb(log_sectors_per_block, log_sectors_per_block);
   2084 
   2085 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
   2086 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
   2087 	for (index = 0; index < 2; index++) {
   2088 		if (dev_write(zero_buff, index * F2FS_BLKSIZE, F2FS_BLKSIZE)) {
   2089 			MSG(1, "\tError: Failed while writing supe_blk "
   2090 				"on disk!!! index : %d\n", index);
   2091 			free(zero_buff);
   2092 			return -1;
   2093 		}
   2094 	}
   2095 
   2096 	free(zero_buff);
   2097 	return 0;
   2098 }
   2099 
   2100 int f2fs_do_mount(struct f2fs_sb_info *sbi)
   2101 {
   2102 	struct f2fs_checkpoint *cp = NULL;
   2103 	struct f2fs_super_block *sb = NULL;
   2104 	int ret;
   2105 
   2106 	sbi->active_logs = NR_CURSEG_TYPE;
   2107 	ret = validate_super_block(sbi, 0);
   2108 	if (ret) {
   2109 		ret = validate_super_block(sbi, 1);
   2110 		if (ret)
   2111 			return -1;
   2112 	}
   2113 	sb = F2FS_RAW_SUPER(sbi);
   2114 
   2115 	ret = check_sector_size(sb);
   2116 	if (ret)
   2117 		return -1;
   2118 
   2119 	print_raw_sb_info(sb);
   2120 
   2121 	init_sb_info(sbi);
   2122 
   2123 	ret = get_valid_checkpoint(sbi);
   2124 	if (ret) {
   2125 		ERR_MSG("Can't find valid checkpoint\n");
   2126 		return -1;
   2127 	}
   2128 
   2129 	if (sanity_check_ckpt(sbi)) {
   2130 		ERR_MSG("Checkpoint is polluted\n");
   2131 		return -1;
   2132 	}
   2133 	cp = F2FS_CKPT(sbi);
   2134 
   2135 	print_ckpt_info(sbi);
   2136 
   2137 	if (c.auto_fix || c.preen_mode) {
   2138 		u32 flag = get_cp(ckpt_flags);
   2139 
   2140 		if (flag & CP_FSCK_FLAG)
   2141 			c.fix_on = 1;
   2142 		else if (!c.preen_mode)
   2143 			return 1;
   2144 	}
   2145 
   2146 	c.bug_on = 0;
   2147 
   2148 	sbi->total_valid_node_count = get_cp(valid_node_count);
   2149 	sbi->total_valid_inode_count = get_cp(valid_inode_count);
   2150 	sbi->user_block_count = get_cp(user_block_count);
   2151 	sbi->total_valid_block_count = get_cp(valid_block_count);
   2152 	sbi->last_valid_block_count = sbi->total_valid_block_count;
   2153 	sbi->alloc_valid_block_count = 0;
   2154 
   2155 	if (build_segment_manager(sbi)) {
   2156 		ERR_MSG("build_segment_manager failed\n");
   2157 		return -1;
   2158 	}
   2159 
   2160 	if (build_node_manager(sbi)) {
   2161 		ERR_MSG("build_node_manager failed\n");
   2162 		return -1;
   2163 	}
   2164 
   2165 	/* Check nat_bits */
   2166 	if (is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
   2167 		u_int32_t nat_bits_bytes, nat_bits_blocks;
   2168 		__le64 *kaddr;
   2169 		u_int32_t blk;
   2170 
   2171 		blk = get_sb(cp_blkaddr) + (1 << get_sb(log_blocks_per_seg));
   2172 		if (sbi->cur_cp == 2)
   2173 			blk += 1 << get_sb(log_blocks_per_seg);
   2174 
   2175 		nat_bits_bytes = get_sb(segment_count_nat) << 5;
   2176 		nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
   2177 				F2FS_BLKSIZE - 1);
   2178 		blk -= nat_bits_blocks;
   2179 
   2180 		kaddr = malloc(PAGE_SIZE);
   2181 		ret = dev_read_block(kaddr, blk);
   2182 		ASSERT(ret >= 0);
   2183 		if (*kaddr != get_cp_crc(cp))
   2184 			write_nat_bits(sbi, sb, cp, sbi->cur_cp);
   2185 		else
   2186 			MSG(0, "Info: Found valid nat_bits in checkpoint\n");
   2187 		free(kaddr);
   2188 	}
   2189 	return 0;
   2190 }
   2191 
   2192 void f2fs_do_umount(struct f2fs_sb_info *sbi)
   2193 {
   2194 	struct sit_info *sit_i = SIT_I(sbi);
   2195 	struct f2fs_sm_info *sm_i = SM_I(sbi);
   2196 	struct f2fs_nm_info *nm_i = NM_I(sbi);
   2197 	unsigned int i;
   2198 
   2199 	/* free nm_info */
   2200 	if (c.func == SLOAD)
   2201 		free(nm_i->nid_bitmap);
   2202 	free(nm_i->nat_bitmap);
   2203 	free(sbi->nm_info);
   2204 
   2205 	/* free sit_info */
   2206 	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
   2207 		free(sit_i->sentries[i].cur_valid_map);
   2208 		free(sit_i->sentries[i].ckpt_valid_map);
   2209 	}
   2210 	free(sit_i->sit_bitmap);
   2211 	free(sm_i->sit_info);
   2212 
   2213 	/* free sm_info */
   2214 	for (i = 0; i < NR_CURSEG_TYPE; i++)
   2215 		free(sm_i->curseg_array[i].sum_blk);
   2216 
   2217 	free(sm_i->curseg_array);
   2218 	free(sbi->sm_info);
   2219 
   2220 	free(sbi->ckpt);
   2221 	free(sbi->raw_super);
   2222 }
   2223