Home | History | Annotate | Download | only in fsck
      1 /**
      2  * mount.c
      3  *
      4  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
      5  *             http://www.samsung.com/
      6  *
      7  * This program is free software; you can redistribute it and/or modify
      8  * it under the terms of the GNU General Public License version 2 as
      9  * published by the Free Software Foundation.
     10  */
     11 #include "fsck.h"
     12 #include "xattr.h"
     13 #include <locale.h>
     14 #ifdef HAVE_LINUX_POSIX_ACL_H
     15 #include <linux/posix_acl.h>
     16 #endif
     17 #ifdef HAVE_SYS_ACL_H
     18 #include <sys/acl.h>
     19 #endif
     20 
     21 #ifndef ACL_UNDEFINED_TAG
     22 #define ACL_UNDEFINED_TAG	(0x00)
     23 #define ACL_USER_OBJ		(0x01)
     24 #define ACL_USER		(0x02)
     25 #define ACL_GROUP_OBJ		(0x04)
     26 #define ACL_GROUP		(0x08)
     27 #define ACL_MASK		(0x10)
     28 #define ACL_OTHER		(0x20)
     29 #endif
     30 
     31 u32 get_free_segments(struct f2fs_sb_info *sbi)
     32 {
     33 	u32 i, free_segs = 0;
     34 
     35 	for (i = 0; i < TOTAL_SEGS(sbi); i++) {
     36 		struct seg_entry *se = get_seg_entry(sbi, i);
     37 
     38 		if (se->valid_blocks == 0x0 &&
     39 				!IS_CUR_SEGNO(sbi, i, NO_CHECK_TYPE))
     40 			free_segs++;
     41 	}
     42 	return free_segs;
     43 }
     44 
     45 void update_free_segments(struct f2fs_sb_info *sbi)
     46 {
     47 	char *progress = "-*|*-";
     48 	static int i = 0;
     49 
     50 	if (c.dbg_lv)
     51 		return;
     52 
     53 	MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
     54 	fflush(stdout);
     55 	i++;
     56 }
     57 
     58 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
     59 void print_acl(char *value, int size)
     60 {
     61 	struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
     62 	struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
     63 	const char *end = value + size;
     64 	int i, count;
     65 
     66 	if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) {
     67 		MSG(0, "Invalid ACL version [0x%x : 0x%x]\n",
     68 				le32_to_cpu(hdr->a_version), F2FS_ACL_VERSION);
     69 		return;
     70 	}
     71 
     72 	count = f2fs_acl_count(size);
     73 	if (count <= 0) {
     74 		MSG(0, "Invalid ACL value size %d\n", size);
     75 		return;
     76 	}
     77 
     78 	for (i = 0; i < count; i++) {
     79 		if ((char *)entry > end) {
     80 			MSG(0, "Invalid ACL entries count %d\n", count);
     81 			return;
     82 		}
     83 
     84 		switch (le16_to_cpu(entry->e_tag)) {
     85 		case ACL_USER_OBJ:
     86 		case ACL_GROUP_OBJ:
     87 		case ACL_MASK:
     88 		case ACL_OTHER:
     89 			MSG(0, "tag:0x%x perm:0x%x\n",
     90 					le16_to_cpu(entry->e_tag),
     91 					le16_to_cpu(entry->e_perm));
     92 			entry = (struct f2fs_acl_entry *)((char *)entry +
     93 					sizeof(struct f2fs_acl_entry_short));
     94 			break;
     95 		case ACL_USER:
     96 			MSG(0, "tag:0x%x perm:0x%x uid:%u\n",
     97 					le16_to_cpu(entry->e_tag),
     98 					le16_to_cpu(entry->e_perm),
     99 					le32_to_cpu(entry->e_id));
    100 			entry = (struct f2fs_acl_entry *)((char *)entry +
    101 					sizeof(struct f2fs_acl_entry));
    102 			break;
    103 		case ACL_GROUP:
    104 			MSG(0, "tag:0x%x perm:0x%x gid:%u\n",
    105 					le16_to_cpu(entry->e_tag),
    106 					le16_to_cpu(entry->e_perm),
    107 					le32_to_cpu(entry->e_id));
    108 			entry = (struct f2fs_acl_entry *)((char *)entry +
    109 					sizeof(struct f2fs_acl_entry));
    110 			break;
    111 		default:
    112 			MSG(0, "Unknown ACL tag 0x%x\n",
    113 					le16_to_cpu(entry->e_tag));
    114 			return;
    115 		}
    116 	}
    117 }
    118 #else
    119 #define print_acl(value, size) do {		\
    120 	int i;					\
    121 	for (i = 0; i < size; i++)		\
    122 		MSG(0, "%02X", value[i]);	\
    123 	MSG(0, "\n");				\
    124 } while (0)
    125 #endif
    126 
    127 void print_xattr_entry(struct f2fs_xattr_entry *ent)
    128 {
    129 	char *value = (char *)(ent->e_name + le16_to_cpu(ent->e_name_len));
    130 	struct fscrypt_context *ctx;
    131 	int i;
    132 
    133 	MSG(0, "\nxattr: e_name_index:%d e_name:", ent->e_name_index);
    134 	for (i = 0; i < le16_to_cpu(ent->e_name_len); i++)
    135 		MSG(0, "%c", ent->e_name[i]);
    136 	MSG(0, " e_name_len:%d e_value_size:%d e_value:\n",
    137 			ent->e_name_len, le16_to_cpu(ent->e_value_size));
    138 
    139 	switch (ent->e_name_index) {
    140 	case F2FS_XATTR_INDEX_POSIX_ACL_ACCESS:
    141 	case F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT:
    142 		print_acl(value, le16_to_cpu(ent->e_value_size));
    143 		break;
    144 	case F2FS_XATTR_INDEX_USER:
    145 	case F2FS_XATTR_INDEX_SECURITY:
    146 	case F2FS_XATTR_INDEX_TRUSTED:
    147 	case F2FS_XATTR_INDEX_LUSTRE:
    148 		for (i = 0; i < le16_to_cpu(ent->e_value_size); i++)
    149 			MSG(0, "%02X", value[i]);
    150 		MSG(0, "\n");
    151 		break;
    152 	case F2FS_XATTR_INDEX_ENCRYPTION:
    153 		ctx = (struct fscrypt_context *)value;
    154 		MSG(0, "format: %d\n", ctx->format);
    155 		MSG(0, "contents_encryption_mode: 0x%x\n", ctx->contents_encryption_mode);
    156 		MSG(0, "filenames_encryption_mode: 0x%x\n", ctx->filenames_encryption_mode);
    157 		MSG(0, "flags: 0x%x\n", ctx->flags);
    158 		MSG(0, "master_key_descriptor: ");
    159 		for (i = 0; i < FS_KEY_DESCRIPTOR_SIZE; i++)
    160 			MSG(0, "%02X", ctx->master_key_descriptor[i]);
    161 		MSG(0, "\nnonce: ");
    162 		for (i = 0; i < FS_KEY_DERIVATION_NONCE_SIZE; i++)
    163 			MSG(0, "%02X", ctx->nonce[i]);
    164 		MSG(0, "\n");
    165 		break;
    166 	default:
    167 		break;
    168 	}
    169 }
    170 
    171 void print_inode_info(struct f2fs_sb_info *sbi,
    172 			struct f2fs_node *node, int name)
    173 {
    174 	struct f2fs_inode *inode = &node->i;
    175 	void *xattr_addr;
    176 	struct f2fs_xattr_entry *ent;
    177 	unsigned char en[F2FS_NAME_LEN + 1];
    178 	unsigned int i = 0;
    179 	int namelen = le32_to_cpu(inode->i_namelen);
    180 	int enc_name = file_enc_name(inode);
    181 	int ofs = __get_extra_isize(inode);
    182 
    183 	namelen = convert_encrypted_name(inode->i_name, namelen, en, enc_name);
    184 	en[namelen] = '\0';
    185 	if (name && namelen) {
    186 		inode->i_name[namelen] = '\0';
    187 		MSG(0, " - File name         : %s%s\n", en,
    188 				enc_name ? " <encrypted>" : "");
    189 		setlocale(LC_ALL, "");
    190 		MSG(0, " - File size         : %'llu (bytes)\n",
    191 				le64_to_cpu(inode->i_size));
    192 		return;
    193 	}
    194 
    195 	DISP_u32(inode, i_mode);
    196 	DISP_u32(inode, i_advise);
    197 	DISP_u32(inode, i_uid);
    198 	DISP_u32(inode, i_gid);
    199 	DISP_u32(inode, i_links);
    200 	DISP_u64(inode, i_size);
    201 	DISP_u64(inode, i_blocks);
    202 
    203 	DISP_u64(inode, i_atime);
    204 	DISP_u32(inode, i_atime_nsec);
    205 	DISP_u64(inode, i_ctime);
    206 	DISP_u32(inode, i_ctime_nsec);
    207 	DISP_u64(inode, i_mtime);
    208 	DISP_u32(inode, i_mtime_nsec);
    209 
    210 	DISP_u32(inode, i_generation);
    211 	DISP_u32(inode, i_current_depth);
    212 	DISP_u32(inode, i_xattr_nid);
    213 	DISP_u32(inode, i_flags);
    214 	DISP_u32(inode, i_inline);
    215 	DISP_u32(inode, i_pino);
    216 	DISP_u32(inode, i_dir_level);
    217 
    218 	if (namelen) {
    219 		DISP_u32(inode, i_namelen);
    220 		printf("%-30s\t\t[%s]\n", "i_name", en);
    221 	}
    222 
    223 	printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
    224 			le32_to_cpu(inode->i_ext.fofs),
    225 			le32_to_cpu(inode->i_ext.blk_addr),
    226 			le32_to_cpu(inode->i_ext.len));
    227 
    228 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
    229 		DISP_u16(inode, i_extra_isize);
    230 		if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
    231 			DISP_u16(inode, i_inline_xattr_size);
    232 		if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
    233 			DISP_u32(inode, i_projid);
    234 		if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
    235 			DISP_u32(inode, i_inode_checksum);
    236 		if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
    237 			DISP_u64(inode, i_crtime);
    238 			DISP_u32(inode, i_crtime_nsec);
    239 		}
    240 	}
    241 
    242 	DISP_u32(inode, i_addr[ofs]);		/* Pointers to data blocks */
    243 	DISP_u32(inode, i_addr[ofs + 1]);	/* Pointers to data blocks */
    244 	DISP_u32(inode, i_addr[ofs + 2]);	/* Pointers to data blocks */
    245 	DISP_u32(inode, i_addr[ofs + 3]);	/* Pointers to data blocks */
    246 
    247 	for (i = ofs + 3; i < ADDRS_PER_INODE(inode); i++) {
    248 		if (inode->i_addr[i] == 0x0)
    249 			break;
    250 		printf("i_addr[0x%x] points data block\t\t[0x%4x]\n",
    251 				i, le32_to_cpu(inode->i_addr[i]));
    252 	}
    253 
    254 	DISP_u32(inode, i_nid[0]);	/* direct */
    255 	DISP_u32(inode, i_nid[1]);	/* direct */
    256 	DISP_u32(inode, i_nid[2]);	/* indirect */
    257 	DISP_u32(inode, i_nid[3]);	/* indirect */
    258 	DISP_u32(inode, i_nid[4]);	/* double indirect */
    259 
    260 	xattr_addr = read_all_xattrs(sbi, node);
    261 	list_for_each_xattr(ent, xattr_addr) {
    262 		print_xattr_entry(ent);
    263 	}
    264 	free(xattr_addr);
    265 
    266 	printf("\n");
    267 }
    268 
    269 void print_node_info(struct f2fs_sb_info *sbi,
    270 			struct f2fs_node *node_block, int verbose)
    271 {
    272 	nid_t ino = le32_to_cpu(node_block->footer.ino);
    273 	nid_t nid = le32_to_cpu(node_block->footer.nid);
    274 	/* Is this inode? */
    275 	if (ino == nid) {
    276 		DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
    277 		print_inode_info(sbi, node_block, verbose);
    278 	} else {
    279 		int i;
    280 		u32 *dump_blk = (u32 *)node_block;
    281 		DBG(verbose,
    282 			"Node ID [0x%x:%u] is direct node or indirect node.\n",
    283 								nid, nid);
    284 		for (i = 0; i <= 10; i++)
    285 			MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
    286 						i, dump_blk[i], dump_blk[i]);
    287 	}
    288 }
    289 
    290 static void DISP_label(u_int16_t *name)
    291 {
    292 	char buffer[MAX_VOLUME_NAME];
    293 
    294 	utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
    295 	printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
    296 }
    297 
    298 void print_raw_sb_info(struct f2fs_super_block *sb)
    299 {
    300 	if (!c.dbg_lv)
    301 		return;
    302 
    303 	printf("\n");
    304 	printf("+--------------------------------------------------------+\n");
    305 	printf("| Super block                                            |\n");
    306 	printf("+--------------------------------------------------------+\n");
    307 
    308 	DISP_u32(sb, magic);
    309 	DISP_u32(sb, major_ver);
    310 
    311 	DISP_label(sb->volume_name);
    312 
    313 	DISP_u32(sb, minor_ver);
    314 	DISP_u32(sb, log_sectorsize);
    315 	DISP_u32(sb, log_sectors_per_block);
    316 
    317 	DISP_u32(sb, log_blocksize);
    318 	DISP_u32(sb, log_blocks_per_seg);
    319 	DISP_u32(sb, segs_per_sec);
    320 	DISP_u32(sb, secs_per_zone);
    321 	DISP_u32(sb, checksum_offset);
    322 	DISP_u64(sb, block_count);
    323 
    324 	DISP_u32(sb, section_count);
    325 	DISP_u32(sb, segment_count);
    326 	DISP_u32(sb, segment_count_ckpt);
    327 	DISP_u32(sb, segment_count_sit);
    328 	DISP_u32(sb, segment_count_nat);
    329 
    330 	DISP_u32(sb, segment_count_ssa);
    331 	DISP_u32(sb, segment_count_main);
    332 	DISP_u32(sb, segment0_blkaddr);
    333 
    334 	DISP_u32(sb, cp_blkaddr);
    335 	DISP_u32(sb, sit_blkaddr);
    336 	DISP_u32(sb, nat_blkaddr);
    337 	DISP_u32(sb, ssa_blkaddr);
    338 	DISP_u32(sb, main_blkaddr);
    339 
    340 	DISP_u32(sb, root_ino);
    341 	DISP_u32(sb, node_ino);
    342 	DISP_u32(sb, meta_ino);
    343 	DISP_u32(sb, cp_payload);
    344 	DISP("%s", sb, version);
    345 	printf("\n");
    346 }
    347 
    348 void print_ckpt_info(struct f2fs_sb_info *sbi)
    349 {
    350 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    351 
    352 	if (!c.dbg_lv)
    353 		return;
    354 
    355 	printf("\n");
    356 	printf("+--------------------------------------------------------+\n");
    357 	printf("| Checkpoint                                             |\n");
    358 	printf("+--------------------------------------------------------+\n");
    359 
    360 	DISP_u64(cp, checkpoint_ver);
    361 	DISP_u64(cp, user_block_count);
    362 	DISP_u64(cp, valid_block_count);
    363 	DISP_u32(cp, rsvd_segment_count);
    364 	DISP_u32(cp, overprov_segment_count);
    365 	DISP_u32(cp, free_segment_count);
    366 
    367 	DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
    368 	DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
    369 	DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
    370 	DISP_u32(cp, cur_node_segno[0]);
    371 	DISP_u32(cp, cur_node_segno[1]);
    372 	DISP_u32(cp, cur_node_segno[2]);
    373 
    374 	DISP_u32(cp, cur_node_blkoff[0]);
    375 	DISP_u32(cp, cur_node_blkoff[1]);
    376 	DISP_u32(cp, cur_node_blkoff[2]);
    377 
    378 
    379 	DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
    380 	DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
    381 	DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
    382 	DISP_u32(cp, cur_data_segno[0]);
    383 	DISP_u32(cp, cur_data_segno[1]);
    384 	DISP_u32(cp, cur_data_segno[2]);
    385 
    386 	DISP_u32(cp, cur_data_blkoff[0]);
    387 	DISP_u32(cp, cur_data_blkoff[1]);
    388 	DISP_u32(cp, cur_data_blkoff[2]);
    389 
    390 	DISP_u32(cp, ckpt_flags);
    391 	DISP_u32(cp, cp_pack_total_block_count);
    392 	DISP_u32(cp, cp_pack_start_sum);
    393 	DISP_u32(cp, valid_node_count);
    394 	DISP_u32(cp, valid_inode_count);
    395 	DISP_u32(cp, next_free_nid);
    396 	DISP_u32(cp, sit_ver_bitmap_bytesize);
    397 	DISP_u32(cp, nat_ver_bitmap_bytesize);
    398 	DISP_u32(cp, checksum_offset);
    399 	DISP_u64(cp, elapsed_time);
    400 
    401 	DISP_u32(cp, sit_nat_version_bitmap[0]);
    402 	printf("\n\n");
    403 }
    404 
    405 void print_cp_state(u32 flag)
    406 {
    407 	MSG(0, "Info: checkpoint state = %x : ", flag);
    408 	if (flag & CP_NOCRC_RECOVERY_FLAG)
    409 		MSG(0, "%s", " allow_nocrc");
    410 	if (flag & CP_TRIMMED_FLAG)
    411 		MSG(0, "%s", " trimmed");
    412 	if (flag & CP_NAT_BITS_FLAG)
    413 		MSG(0, "%s", " nat_bits");
    414 	if (flag & CP_CRC_RECOVERY_FLAG)
    415 		MSG(0, "%s", " crc");
    416 	if (flag & CP_FASTBOOT_FLAG)
    417 		MSG(0, "%s", " fastboot");
    418 	if (flag & CP_FSCK_FLAG)
    419 		MSG(0, "%s", " fsck");
    420 	if (flag & CP_ERROR_FLAG)
    421 		MSG(0, "%s", " error");
    422 	if (flag & CP_COMPACT_SUM_FLAG)
    423 		MSG(0, "%s", " compacted_summary");
    424 	if (flag & CP_ORPHAN_PRESENT_FLAG)
    425 		MSG(0, "%s", " orphan_inodes");
    426 	if (flag & CP_UMOUNT_FLAG)
    427 		MSG(0, "%s", " unmount");
    428 	else
    429 		MSG(0, "%s", " sudden-power-off");
    430 	MSG(0, "\n");
    431 }
    432 
    433 void print_sb_state(struct f2fs_super_block *sb)
    434 {
    435 	__le32 f = sb->feature;
    436 	int i;
    437 
    438 	MSG(0, "Info: superblock features = %x : ", f);
    439 	if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
    440 		MSG(0, "%s", " encrypt");
    441 	}
    442 	if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
    443 		MSG(0, "%s", " verity");
    444 	}
    445 	if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
    446 		MSG(0, "%s", " blkzoned");
    447 	}
    448 	if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
    449 		MSG(0, "%s", " extra_attr");
    450 	}
    451 	if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
    452 		MSG(0, "%s", " project_quota");
    453 	}
    454 	if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
    455 		MSG(0, "%s", " inode_checksum");
    456 	}
    457 	if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
    458 		MSG(0, "%s", " flexible_inline_xattr");
    459 	}
    460 	if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
    461 		MSG(0, "%s", " quota_ino");
    462 	}
    463 	if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
    464 		MSG(0, "%s", " inode_crtime");
    465 	}
    466 	MSG(0, "\n");
    467 	MSG(0, "Info: superblock encrypt level = %d, salt = ",
    468 					sb->encryption_level);
    469 	for (i = 0; i < 16; i++)
    470 		MSG(0, "%02x", sb->encrypt_pw_salt[i]);
    471 	MSG(0, "\n");
    472 }
    473 
    474 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
    475 							u64 offset)
    476 {
    477 	u32 segment0_blkaddr = get_sb(segment0_blkaddr);
    478 	u32 cp_blkaddr = get_sb(cp_blkaddr);
    479 	u32 sit_blkaddr = get_sb(sit_blkaddr);
    480 	u32 nat_blkaddr = get_sb(nat_blkaddr);
    481 	u32 ssa_blkaddr = get_sb(ssa_blkaddr);
    482 	u32 main_blkaddr = get_sb(main_blkaddr);
    483 	u32 segment_count_ckpt = get_sb(segment_count_ckpt);
    484 	u32 segment_count_sit = get_sb(segment_count_sit);
    485 	u32 segment_count_nat = get_sb(segment_count_nat);
    486 	u32 segment_count_ssa = get_sb(segment_count_ssa);
    487 	u32 segment_count_main = get_sb(segment_count_main);
    488 	u32 segment_count = get_sb(segment_count);
    489 	u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
    490 	u64 main_end_blkaddr = main_blkaddr +
    491 				(segment_count_main << log_blocks_per_seg);
    492 	u64 seg_end_blkaddr = segment0_blkaddr +
    493 				(segment_count << log_blocks_per_seg);
    494 
    495 	if (segment0_blkaddr != cp_blkaddr) {
    496 		MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
    497 				segment0_blkaddr, cp_blkaddr);
    498 		return -1;
    499 	}
    500 
    501 	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
    502 							sit_blkaddr) {
    503 		MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
    504 			cp_blkaddr, sit_blkaddr,
    505 			segment_count_ckpt << log_blocks_per_seg);
    506 		return -1;
    507 	}
    508 
    509 	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
    510 							nat_blkaddr) {
    511 		MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
    512 			sit_blkaddr, nat_blkaddr,
    513 			segment_count_sit << log_blocks_per_seg);
    514 		return -1;
    515 	}
    516 
    517 	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
    518 							ssa_blkaddr) {
    519 		MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
    520 			nat_blkaddr, ssa_blkaddr,
    521 			segment_count_nat << log_blocks_per_seg);
    522 		return -1;
    523 	}
    524 
    525 	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
    526 							main_blkaddr) {
    527 		MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
    528 			ssa_blkaddr, main_blkaddr,
    529 			segment_count_ssa << log_blocks_per_seg);
    530 		return -1;
    531 	}
    532 
    533 	if (main_end_blkaddr > seg_end_blkaddr) {
    534 		MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
    535 			main_blkaddr,
    536 			segment0_blkaddr +
    537 				(segment_count << log_blocks_per_seg),
    538 			segment_count_main << log_blocks_per_seg);
    539 		return -1;
    540 	} else if (main_end_blkaddr < seg_end_blkaddr) {
    541 		int err;
    542 
    543 		set_sb(segment_count, (main_end_blkaddr -
    544 				segment0_blkaddr) >> log_blocks_per_seg);
    545 
    546 		err = dev_write(sb, offset, sizeof(struct f2fs_super_block));
    547 		MSG(0, "Info: Fix alignment: %s, start(%u) end(%u) block(%u)\n",
    548 			err ? "failed": "done",
    549 			main_blkaddr,
    550 			segment0_blkaddr +
    551 				(segment_count << log_blocks_per_seg),
    552 			segment_count_main << log_blocks_per_seg);
    553 	}
    554 	return 0;
    555 }
    556 
    557 int sanity_check_raw_super(struct f2fs_super_block *sb, u64 offset)
    558 {
    559 	unsigned int blocksize;
    560 
    561 	if (F2FS_SUPER_MAGIC != get_sb(magic))
    562 		return -1;
    563 
    564 	if (F2FS_BLKSIZE != PAGE_CACHE_SIZE)
    565 		return -1;
    566 
    567 	blocksize = 1 << get_sb(log_blocksize);
    568 	if (F2FS_BLKSIZE != blocksize)
    569 		return -1;
    570 
    571 	/* check log blocks per segment */
    572 	if (get_sb(log_blocks_per_seg) != 9)
    573 		return -1;
    574 
    575 	/* Currently, support 512/1024/2048/4096 bytes sector size */
    576 	if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
    577 			get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE)
    578 		return -1;
    579 
    580 	if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
    581 						F2FS_MAX_LOG_SECTOR_SIZE)
    582 		return -1;
    583 
    584 	/* check reserved ino info */
    585 	if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
    586 					get_sb(root_ino) != 3)
    587 		return -1;
    588 
    589 	/* Check zoned block device feature */
    590 	if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
    591 			!(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
    592 		MSG(0, "\tMissing zoned block device feature\n");
    593 		return -1;
    594 	}
    595 
    596 	if (get_sb(segment_count) > F2FS_MAX_SEGMENT)
    597 		return -1;
    598 
    599 	if (sanity_check_area_boundary(sb, offset))
    600 		return -1;
    601 	return 0;
    602 }
    603 
    604 int validate_super_block(struct f2fs_sb_info *sbi, int block)
    605 {
    606 	u64 offset;
    607 	char buf[F2FS_BLKSIZE];
    608 
    609 	sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
    610 
    611 	if (block == 0)
    612 		offset = F2FS_SUPER_OFFSET;
    613 	else
    614 		offset = F2FS_BLKSIZE + F2FS_SUPER_OFFSET;
    615 
    616 	if (dev_read_block(buf, block))
    617 		return -1;
    618 
    619 	memcpy(sbi->raw_super, buf + F2FS_SUPER_OFFSET,
    620 					sizeof(struct f2fs_super_block));
    621 
    622 	if (!sanity_check_raw_super(sbi->raw_super, offset)) {
    623 		/* get kernel version */
    624 		if (c.kd >= 0) {
    625 			dev_read_version(c.version, 0, VERSION_LEN);
    626 			get_kernel_version(c.version);
    627 		} else {
    628 			get_kernel_uname_version(c.version);
    629 		}
    630 
    631 		/* build sb version */
    632 		memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
    633 		get_kernel_version(c.sb_version);
    634 		memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
    635 		get_kernel_version(c.init_version);
    636 
    637 		MSG(0, "Info: MKFS version\n  \"%s\"\n", c.init_version);
    638 		MSG(0, "Info: FSCK version\n  from \"%s\"\n    to \"%s\"\n",
    639 					c.sb_version, c.version);
    640 		if (memcmp(c.sb_version, c.version, VERSION_LEN)) {
    641 			int ret;
    642 
    643 			memcpy(sbi->raw_super->version,
    644 						c.version, VERSION_LEN);
    645 			ret = dev_write(sbi->raw_super, offset,
    646 					sizeof(struct f2fs_super_block));
    647 			ASSERT(ret >= 0);
    648 
    649 			c.auto_fix = 0;
    650 			c.fix_on = 1;
    651 		}
    652 		print_sb_state(sbi->raw_super);
    653 		return 0;
    654 	}
    655 
    656 	free(sbi->raw_super);
    657 	sbi->raw_super = NULL;
    658 	MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", block);
    659 
    660 	return -EINVAL;
    661 }
    662 
    663 int init_sb_info(struct f2fs_sb_info *sbi)
    664 {
    665 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    666 	u64 total_sectors;
    667 	int i;
    668 
    669 	sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
    670 	sbi->log_blocksize = get_sb(log_blocksize);
    671 	sbi->blocksize = 1 << sbi->log_blocksize;
    672 	sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
    673 	sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
    674 	sbi->segs_per_sec = get_sb(segs_per_sec);
    675 	sbi->secs_per_zone = get_sb(secs_per_zone);
    676 	sbi->total_sections = get_sb(section_count);
    677 	sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
    678 				sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
    679 	sbi->root_ino_num = get_sb(root_ino);
    680 	sbi->node_ino_num = get_sb(node_ino);
    681 	sbi->meta_ino_num = get_sb(meta_ino);
    682 	sbi->cur_victim_sec = NULL_SEGNO;
    683 
    684 	for (i = 0; i < MAX_DEVICES; i++) {
    685 		if (!sb->devs[i].path[0])
    686 			break;
    687 
    688 		if (i) {
    689 			c.devices[i].path = strdup((char *)sb->devs[i].path);
    690 			if (get_device_info(i))
    691 				ASSERT(0);
    692 		} else {
    693 			ASSERT(!strcmp((char *)sb->devs[i].path,
    694 						(char *)c.devices[i].path));
    695 		}
    696 
    697 		c.devices[i].total_segments =
    698 			le32_to_cpu(sb->devs[i].total_segments);
    699 		if (i)
    700 			c.devices[i].start_blkaddr =
    701 				c.devices[i - 1].end_blkaddr + 1;
    702 		c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
    703 			c.devices[i].total_segments *
    704 			c.blks_per_seg - 1;
    705 		if (i == 0)
    706 			c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
    707 
    708 		c.ndevs = i + 1;
    709 		MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
    710 				i, c.devices[i].path,
    711 				c.devices[i].start_blkaddr,
    712 				c.devices[i].end_blkaddr);
    713 	}
    714 
    715 	total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
    716 	MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
    717 				total_sectors, total_sectors >>
    718 						(20 - get_sb(log_sectorsize)));
    719 	return 0;
    720 }
    721 
    722 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
    723 				unsigned long long *version)
    724 {
    725 	void *cp_page_1, *cp_page_2;
    726 	struct f2fs_checkpoint *cp;
    727 	unsigned long blk_size = sbi->blocksize;
    728 	unsigned long long cur_version = 0, pre_version = 0;
    729 	unsigned int crc = 0;
    730 	size_t crc_offset;
    731 
    732 	/* Read the 1st cp block in this CP pack */
    733 	cp_page_1 = malloc(PAGE_SIZE);
    734 	if (dev_read_block(cp_page_1, cp_addr) < 0)
    735 		goto invalid_cp1;
    736 
    737 	cp = (struct f2fs_checkpoint *)cp_page_1;
    738 	crc_offset = get_cp(checksum_offset);
    739 	if (crc_offset > (blk_size - sizeof(__le32)))
    740 		goto invalid_cp1;
    741 
    742 	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
    743 	if (f2fs_crc_valid(crc, cp, crc_offset))
    744 		goto invalid_cp1;
    745 
    746 	pre_version = get_cp(checkpoint_ver);
    747 
    748 	/* Read the 2nd cp block in this CP pack */
    749 	cp_page_2 = malloc(PAGE_SIZE);
    750 	cp_addr += get_cp(cp_pack_total_block_count) - 1;
    751 
    752 	if (dev_read_block(cp_page_2, cp_addr) < 0)
    753 		goto invalid_cp2;
    754 
    755 	cp = (struct f2fs_checkpoint *)cp_page_2;
    756 	crc_offset = get_cp(checksum_offset);
    757 	if (crc_offset > (blk_size - sizeof(__le32)))
    758 		goto invalid_cp2;
    759 
    760 	crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + crc_offset));
    761 	if (f2fs_crc_valid(crc, cp, crc_offset))
    762 		goto invalid_cp2;
    763 
    764 	cur_version = get_cp(checkpoint_ver);
    765 
    766 	if (cur_version == pre_version) {
    767 		*version = cur_version;
    768 		free(cp_page_2);
    769 		return cp_page_1;
    770 	}
    771 
    772 invalid_cp2:
    773 	free(cp_page_2);
    774 invalid_cp1:
    775 	free(cp_page_1);
    776 	return NULL;
    777 }
    778 
    779 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
    780 {
    781 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    782 	void *cp1, *cp2, *cur_page;
    783 	unsigned long blk_size = sbi->blocksize;
    784 	unsigned long long cp1_version = 0, cp2_version = 0, version;
    785 	unsigned long long cp_start_blk_no;
    786 	unsigned int cp_payload, cp_blks;
    787 	int ret;
    788 
    789 	cp_payload = get_sb(cp_payload);
    790 	if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
    791 		return -EINVAL;
    792 
    793 	cp_blks = 1 + cp_payload;
    794 	sbi->ckpt = malloc(cp_blks * blk_size);
    795 	if (!sbi->ckpt)
    796 		return -ENOMEM;
    797 	/*
    798 	 * Finding out valid cp block involves read both
    799 	 * sets( cp pack1 and cp pack 2)
    800 	 */
    801 	cp_start_blk_no = get_sb(cp_blkaddr);
    802 	cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
    803 
    804 	/* The second checkpoint pack should start at the next segment */
    805 	cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
    806 	cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
    807 
    808 	if (cp1 && cp2) {
    809 		if (ver_after(cp2_version, cp1_version)) {
    810 			cur_page = cp2;
    811 			sbi->cur_cp = 2;
    812 			version = cp2_version;
    813 		} else {
    814 			cur_page = cp1;
    815 			sbi->cur_cp = 1;
    816 			version = cp1_version;
    817 		}
    818 	} else if (cp1) {
    819 		cur_page = cp1;
    820 		sbi->cur_cp = 1;
    821 		version = cp1_version;
    822 	} else if (cp2) {
    823 		cur_page = cp2;
    824 		sbi->cur_cp = 2;
    825 		version = cp2_version;
    826 	} else
    827 		goto fail_no_cp;
    828 
    829 	MSG(0, "Info: CKPT version = %llx\n", version);
    830 
    831 	memcpy(sbi->ckpt, cur_page, blk_size);
    832 
    833 	if (cp_blks > 1) {
    834 		unsigned int i;
    835 		unsigned long long cp_blk_no;
    836 
    837 		cp_blk_no = get_sb(cp_blkaddr);
    838 		if (cur_page == cp2)
    839 			cp_blk_no += 1 << get_sb(log_blocks_per_seg);
    840 
    841 		/* copy sit bitmap */
    842 		for (i = 1; i < cp_blks; i++) {
    843 			unsigned char *ckpt = (unsigned char *)sbi->ckpt;
    844 			ret = dev_read_block(cur_page, cp_blk_no + i);
    845 			ASSERT(ret >= 0);
    846 			memcpy(ckpt + i * blk_size, cur_page, blk_size);
    847 		}
    848 	}
    849 	if (cp1)
    850 		free(cp1);
    851 	if (cp2)
    852 		free(cp2);
    853 	return 0;
    854 
    855 fail_no_cp:
    856 	free(sbi->ckpt);
    857 	sbi->ckpt = NULL;
    858 	return -EINVAL;
    859 }
    860 
    861 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
    862 {
    863 	unsigned int total, fsmeta;
    864 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
    865 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
    866 
    867 	total = get_sb(segment_count);
    868 	fsmeta = get_sb(segment_count_ckpt);
    869 	fsmeta += get_sb(segment_count_sit);
    870 	fsmeta += get_sb(segment_count_nat);
    871 	fsmeta += get_cp(rsvd_segment_count);
    872 	fsmeta += get_sb(segment_count_ssa);
    873 
    874 	if (fsmeta >= total)
    875 		return 1;
    876 
    877 	return 0;
    878 }
    879 
    880 static pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
    881 {
    882 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    883 	pgoff_t block_off;
    884 	pgoff_t block_addr;
    885 	int seg_off;
    886 
    887 	block_off = NAT_BLOCK_OFFSET(start);
    888 	seg_off = block_off >> sbi->log_blocks_per_seg;
    889 
    890 	block_addr = (pgoff_t)(nm_i->nat_blkaddr +
    891 			(seg_off << sbi->log_blocks_per_seg << 1) +
    892 			(block_off & ((1 << sbi->log_blocks_per_seg) -1)));
    893 
    894 	if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
    895 		block_addr += sbi->blocks_per_seg;
    896 
    897 	return block_addr;
    898 }
    899 
    900 static int f2fs_init_nid_bitmap(struct f2fs_sb_info *sbi)
    901 {
    902 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    903 	int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
    904 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
    905 	struct f2fs_summary_block *sum = curseg->sum_blk;
    906 	struct f2fs_journal *journal = &sum->journal;
    907 	struct f2fs_nat_block *nat_block;
    908 	block_t start_blk;
    909 	nid_t nid;
    910 	int i;
    911 
    912 	if (!(c.func == SLOAD || c.func == FSCK))
    913 		return 0;
    914 
    915 	nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
    916 	if (!nm_i->nid_bitmap)
    917 		return -ENOMEM;
    918 
    919 	/* arbitrarily set 0 bit */
    920 	f2fs_set_bit(0, nm_i->nid_bitmap);
    921 
    922 	nat_block = malloc(F2FS_BLKSIZE);
    923 	if (!nat_block) {
    924 		free(nm_i->nid_bitmap);
    925 		return -ENOMEM;
    926 	}
    927 
    928 	for (nid = 0; nid < nm_i->max_nid; nid++) {
    929 		if (!(nid % NAT_ENTRY_PER_BLOCK)) {
    930 			int ret;
    931 
    932 			start_blk = current_nat_addr(sbi, nid);
    933 			ret = dev_read_block(nat_block, start_blk);
    934 			ASSERT(ret >= 0);
    935 		}
    936 
    937 		if (nat_block->entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
    938 			f2fs_set_bit(nid, nm_i->nid_bitmap);
    939 	}
    940 
    941 	for (i = 0; i < nats_in_cursum(journal); i++) {
    942 		block_t addr;
    943 
    944 		addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
    945 		nid = le32_to_cpu(nid_in_journal(journal, i));
    946 		if (addr != NULL_ADDR)
    947 			f2fs_set_bit(nid, nm_i->nid_bitmap);
    948 	}
    949 	free(nat_block);
    950 	return 0;
    951 }
    952 
    953 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
    954 				struct f2fs_checkpoint *cp, u32 flags)
    955 {
    956 	u_int32_t nat_bits_bytes, nat_bits_blocks;
    957 
    958 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
    959 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
    960 						F2FS_BLKSIZE - 1);
    961 	if (get_cp(cp_pack_total_block_count) <=
    962 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
    963 		flags |= CP_NAT_BITS_FLAG;
    964 	else
    965 		flags &= (~CP_NAT_BITS_FLAG);
    966 
    967 	return flags;
    968 }
    969 
    970 /* should call flush_journal_entries() bfore this */
    971 void write_nat_bits(struct f2fs_sb_info *sbi,
    972 	struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
    973 {
    974 	struct f2fs_nm_info *nm_i = NM_I(sbi);
    975 	u_int32_t nat_blocks = get_sb(segment_count_nat) <<
    976 				(get_sb(log_blocks_per_seg) - 1);
    977 	u_int32_t nat_bits_bytes = nat_blocks >> 3;
    978 	u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
    979 					8 + F2FS_BLKSIZE - 1);
    980 	unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
    981 	struct f2fs_nat_block *nat_block;
    982 	u_int32_t i, j;
    983 	block_t blkaddr;
    984 	int ret;
    985 
    986 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
    987 	ASSERT(nat_bits);
    988 
    989 	nat_block = malloc(F2FS_BLKSIZE);
    990 	ASSERT(nat_block);
    991 
    992 	full_nat_bits = nat_bits + 8;
    993 	empty_nat_bits = full_nat_bits + nat_bits_bytes;
    994 
    995 	memset(full_nat_bits, 0, nat_bits_bytes);
    996 	memset(empty_nat_bits, 0, nat_bits_bytes);
    997 
    998 	for (i = 0; i < nat_blocks; i++) {
    999 		int seg_off = i >> get_sb(log_blocks_per_seg);
   1000 		int valid = 0;
   1001 
   1002 		blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
   1003 				(seg_off << get_sb(log_blocks_per_seg) << 1) +
   1004 				(i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
   1005 
   1006 		/*
   1007 		 * Should consider new nat_blocks is larger than old
   1008 		 * nm_i->nat_blocks, since nm_i->nat_bitmap is based on
   1009 		 * old one.
   1010 		 */
   1011 		if (i < nm_i->nat_blocks && f2fs_test_bit(i, nm_i->nat_bitmap))
   1012 			blkaddr += (1 << get_sb(log_blocks_per_seg));
   1013 
   1014 		ret = dev_read_block(nat_block, blkaddr);
   1015 		ASSERT(ret >= 0);
   1016 
   1017 		for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
   1018 			if ((i == 0 && j == 0) ||
   1019 				nat_block->entries[j].block_addr != NULL_ADDR)
   1020 				valid++;
   1021 		}
   1022 		if (valid == 0)
   1023 			test_and_set_bit_le(i, empty_nat_bits);
   1024 		else if (valid == NAT_ENTRY_PER_BLOCK)
   1025 			test_and_set_bit_le(i, full_nat_bits);
   1026 	}
   1027 	*(__le64 *)nat_bits = get_cp_crc(cp);
   1028 	free(nat_block);
   1029 
   1030 	blkaddr = get_sb(segment0_blkaddr) + (set <<
   1031 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
   1032 
   1033 	DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
   1034 
   1035 	for (i = 0; i < nat_bits_blocks; i++) {
   1036 		if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
   1037 			ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
   1038 	}
   1039 	MSG(0, "Info: Write valid nat_bits in checkpoint\n");
   1040 
   1041 	free(nat_bits);
   1042 }
   1043 
   1044 int init_node_manager(struct f2fs_sb_info *sbi)
   1045 {
   1046 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1047 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1048 	struct f2fs_nm_info *nm_i = NM_I(sbi);
   1049 	unsigned char *version_bitmap;
   1050 	unsigned int nat_segs;
   1051 
   1052 	nm_i->nat_blkaddr = get_sb(nat_blkaddr);
   1053 
   1054 	/* segment_count_nat includes pair segment so divide to 2. */
   1055 	nat_segs = get_sb(segment_count_nat) >> 1;
   1056 	nm_i->nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
   1057 	nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
   1058 	nm_i->fcnt = 0;
   1059 	nm_i->nat_cnt = 0;
   1060 	nm_i->init_scan_nid = get_cp(next_free_nid);
   1061 	nm_i->next_scan_nid = get_cp(next_free_nid);
   1062 
   1063 	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
   1064 
   1065 	nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
   1066 	if (!nm_i->nat_bitmap)
   1067 		return -ENOMEM;
   1068 	version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
   1069 	if (!version_bitmap)
   1070 		return -EFAULT;
   1071 
   1072 	/* copy version bitmap */
   1073 	memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
   1074 	return f2fs_init_nid_bitmap(sbi);
   1075 }
   1076 
   1077 int build_node_manager(struct f2fs_sb_info *sbi)
   1078 {
   1079 	int err;
   1080 	sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
   1081 	if (!sbi->nm_info)
   1082 		return -ENOMEM;
   1083 
   1084 	err = init_node_manager(sbi);
   1085 	if (err)
   1086 		return err;
   1087 
   1088 	return 0;
   1089 }
   1090 
   1091 int build_sit_info(struct f2fs_sb_info *sbi)
   1092 {
   1093 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1094 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1095 	struct sit_info *sit_i;
   1096 	unsigned int sit_segs, start;
   1097 	char *src_bitmap, *dst_bitmap;
   1098 	unsigned int bitmap_size;
   1099 
   1100 	sit_i = malloc(sizeof(struct sit_info));
   1101 	if (!sit_i)
   1102 		return -ENOMEM;
   1103 
   1104 	SM_I(sbi)->sit_info = sit_i;
   1105 
   1106 	sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
   1107 	if (!sit_i->sentries)
   1108 		return -ENOMEM;
   1109 
   1110 	for (start = 0; start < TOTAL_SEGS(sbi); start++) {
   1111 		sit_i->sentries[start].cur_valid_map
   1112 			= calloc(SIT_VBLOCK_MAP_SIZE, 1);
   1113 		if (!sit_i->sentries[start].cur_valid_map)
   1114 			return -ENOMEM;
   1115 	}
   1116 
   1117 	sit_segs = get_sb(segment_count_sit) >> 1;
   1118 	bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
   1119 	src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
   1120 
   1121 	dst_bitmap = malloc(bitmap_size);
   1122 	memcpy(dst_bitmap, src_bitmap, bitmap_size);
   1123 
   1124 	sit_i->sit_base_addr = get_sb(sit_blkaddr);
   1125 	sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
   1126 	sit_i->written_valid_blocks = get_cp(valid_block_count);
   1127 	sit_i->sit_bitmap = dst_bitmap;
   1128 	sit_i->bitmap_size = bitmap_size;
   1129 	sit_i->dirty_sentries = 0;
   1130 	sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
   1131 	sit_i->elapsed_time = get_cp(elapsed_time);
   1132 	return 0;
   1133 }
   1134 
   1135 void reset_curseg(struct f2fs_sb_info *sbi, int type)
   1136 {
   1137 	struct curseg_info *curseg = CURSEG_I(sbi, type);
   1138 	struct summary_footer *sum_footer;
   1139 	struct seg_entry *se;
   1140 
   1141 	sum_footer = &(curseg->sum_blk->footer);
   1142 	memset(sum_footer, 0, sizeof(struct summary_footer));
   1143 	if (IS_DATASEG(type))
   1144 		SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
   1145 	if (IS_NODESEG(type))
   1146 		SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
   1147 	se = get_seg_entry(sbi, curseg->segno);
   1148 	se->type = type;
   1149 }
   1150 
   1151 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
   1152 {
   1153 	struct curseg_info *curseg;
   1154 	unsigned int i, j, offset;
   1155 	block_t start;
   1156 	char *kaddr;
   1157 	int ret;
   1158 
   1159 	start = start_sum_block(sbi);
   1160 
   1161 	kaddr = (char *)malloc(PAGE_SIZE);
   1162 	ret = dev_read_block(kaddr, start++);
   1163 	ASSERT(ret >= 0);
   1164 
   1165 	curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1166 	memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
   1167 
   1168 	curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1169 	memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
   1170 						SUM_JOURNAL_SIZE);
   1171 
   1172 	offset = 2 * SUM_JOURNAL_SIZE;
   1173 	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
   1174 		unsigned short blk_off;
   1175 		struct curseg_info *curseg = CURSEG_I(sbi, i);
   1176 
   1177 		reset_curseg(sbi, i);
   1178 
   1179 		if (curseg->alloc_type == SSR)
   1180 			blk_off = sbi->blocks_per_seg;
   1181 		else
   1182 			blk_off = curseg->next_blkoff;
   1183 
   1184 		ASSERT(blk_off <= ENTRIES_IN_SUM);
   1185 
   1186 		for (j = 0; j < blk_off; j++) {
   1187 			struct f2fs_summary *s;
   1188 			s = (struct f2fs_summary *)(kaddr + offset);
   1189 			curseg->sum_blk->entries[j] = *s;
   1190 			offset += SUMMARY_SIZE;
   1191 			if (offset + SUMMARY_SIZE <=
   1192 					PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
   1193 				continue;
   1194 			memset(kaddr, 0, PAGE_SIZE);
   1195 			ret = dev_read_block(kaddr, start++);
   1196 			ASSERT(ret >= 0);
   1197 			offset = 0;
   1198 		}
   1199 	}
   1200 	free(kaddr);
   1201 }
   1202 
   1203 static void restore_node_summary(struct f2fs_sb_info *sbi,
   1204 		unsigned int segno, struct f2fs_summary_block *sum_blk)
   1205 {
   1206 	struct f2fs_node *node_blk;
   1207 	struct f2fs_summary *sum_entry;
   1208 	block_t addr;
   1209 	unsigned int i;
   1210 	int ret;
   1211 
   1212 	node_blk = malloc(F2FS_BLKSIZE);
   1213 	ASSERT(node_blk);
   1214 
   1215 	/* scan the node segment */
   1216 	addr = START_BLOCK(sbi, segno);
   1217 	sum_entry = &sum_blk->entries[0];
   1218 
   1219 	for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
   1220 		ret = dev_read_block(node_blk, addr);
   1221 		ASSERT(ret >= 0);
   1222 		sum_entry->nid = node_blk->footer.nid;
   1223 		addr++;
   1224 	}
   1225 	free(node_blk);
   1226 }
   1227 
   1228 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
   1229 {
   1230 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1231 	struct f2fs_summary_block *sum_blk;
   1232 	struct curseg_info *curseg;
   1233 	unsigned int segno = 0;
   1234 	block_t blk_addr = 0;
   1235 	int ret;
   1236 
   1237 	if (IS_DATASEG(type)) {
   1238 		segno = get_cp(cur_data_segno[type]);
   1239 		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1240 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
   1241 		else
   1242 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
   1243 	} else {
   1244 		segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
   1245 		if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1246 			blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
   1247 							type - CURSEG_HOT_NODE);
   1248 		else
   1249 			blk_addr = GET_SUM_BLKADDR(sbi, segno);
   1250 	}
   1251 
   1252 	sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
   1253 	ret = dev_read_block(sum_blk, blk_addr);
   1254 	ASSERT(ret >= 0);
   1255 
   1256 	if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
   1257 		restore_node_summary(sbi, segno, sum_blk);
   1258 
   1259 	curseg = CURSEG_I(sbi, type);
   1260 	memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
   1261 	reset_curseg(sbi, type);
   1262 	free(sum_blk);
   1263 }
   1264 
   1265 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
   1266 					struct f2fs_summary *sum)
   1267 {
   1268 	struct f2fs_summary_block *sum_blk;
   1269 	u32 segno, offset;
   1270 	int type, ret;
   1271 	struct seg_entry *se;
   1272 
   1273 	segno = GET_SEGNO(sbi, blk_addr);
   1274 	offset = OFFSET_IN_SEG(sbi, blk_addr);
   1275 
   1276 	se = get_seg_entry(sbi, segno);
   1277 
   1278 	sum_blk = get_sum_block(sbi, segno, &type);
   1279 	memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
   1280 	sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
   1281 							SUM_TYPE_DATA;
   1282 
   1283 	/* write SSA all the time */
   1284 	ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
   1285 	ASSERT(ret >= 0);
   1286 
   1287 	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
   1288 					type == SEG_TYPE_MAX)
   1289 		free(sum_blk);
   1290 }
   1291 
   1292 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
   1293 {
   1294 	int type = CURSEG_HOT_DATA;
   1295 
   1296 	if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
   1297 		read_compacted_summaries(sbi);
   1298 		type = CURSEG_HOT_NODE;
   1299 	}
   1300 
   1301 	for (; type <= CURSEG_COLD_NODE; type++)
   1302 		read_normal_summaries(sbi, type);
   1303 }
   1304 
   1305 static void build_curseg(struct f2fs_sb_info *sbi)
   1306 {
   1307 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1308 	struct curseg_info *array;
   1309 	unsigned short blk_off;
   1310 	unsigned int segno;
   1311 	int i;
   1312 
   1313 	array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
   1314 	ASSERT(array);
   1315 
   1316 	SM_I(sbi)->curseg_array = array;
   1317 
   1318 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
   1319 		array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
   1320 		ASSERT(array[i].sum_blk);
   1321 		if (i <= CURSEG_COLD_DATA) {
   1322 			blk_off = get_cp(cur_data_blkoff[i]);
   1323 			segno = get_cp(cur_data_segno[i]);
   1324 		}
   1325 		if (i > CURSEG_COLD_DATA) {
   1326 			blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
   1327 			segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
   1328 		}
   1329 		ASSERT(segno < TOTAL_SEGS(sbi));
   1330 		ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
   1331 
   1332 		array[i].segno = segno;
   1333 		array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
   1334 		array[i].next_segno = NULL_SEGNO;
   1335 		array[i].next_blkoff = blk_off;
   1336 		array[i].alloc_type = cp->alloc_type[i];
   1337 	}
   1338 	restore_curseg_summaries(sbi);
   1339 }
   1340 
   1341 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
   1342 {
   1343 	unsigned int end_segno = SM_I(sbi)->segment_count - 1;
   1344 	ASSERT(segno <= end_segno);
   1345 }
   1346 
   1347 struct f2fs_sit_block *get_current_sit_page(struct f2fs_sb_info *sbi,
   1348 						unsigned int segno)
   1349 {
   1350 	struct sit_info *sit_i = SIT_I(sbi);
   1351 	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
   1352 	block_t blk_addr = sit_i->sit_base_addr + offset;
   1353 	struct f2fs_sit_block *sit_blk;
   1354 	int ret;
   1355 
   1356 	sit_blk = calloc(BLOCK_SZ, 1);
   1357 	ASSERT(sit_blk);
   1358 	check_seg_range(sbi, segno);
   1359 
   1360 	/* calculate sit block address */
   1361 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
   1362 		blk_addr += sit_i->sit_blocks;
   1363 
   1364 	ret = dev_read_block(sit_blk, blk_addr);
   1365 	ASSERT(ret >= 0);
   1366 
   1367 	return sit_blk;
   1368 }
   1369 
   1370 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
   1371 			unsigned int segno, struct f2fs_sit_block *sit_blk)
   1372 {
   1373 	struct sit_info *sit_i = SIT_I(sbi);
   1374 	unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
   1375 	block_t blk_addr = sit_i->sit_base_addr + offset;
   1376 	int ret;
   1377 
   1378 	/* calculate sit block address */
   1379 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
   1380 		blk_addr += sit_i->sit_blocks;
   1381 
   1382 	ret = dev_write_block(sit_blk, blk_addr);
   1383 	ASSERT(ret >= 0);
   1384 }
   1385 
   1386 void check_block_count(struct f2fs_sb_info *sbi,
   1387 		unsigned int segno, struct f2fs_sit_entry *raw_sit)
   1388 {
   1389 	struct f2fs_sm_info *sm_info = SM_I(sbi);
   1390 	unsigned int end_segno = sm_info->segment_count - 1;
   1391 	int valid_blocks = 0;
   1392 	unsigned int i;
   1393 
   1394 	/* check segment usage */
   1395 	if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
   1396 		ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
   1397 				segno, GET_SIT_VBLOCKS(raw_sit));
   1398 
   1399 	/* check boundary of a given segment number */
   1400 	if (segno > end_segno)
   1401 		ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
   1402 
   1403 	/* check bitmap with valid block count */
   1404 	for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
   1405 		valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
   1406 
   1407 	if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
   1408 		ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
   1409 				segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
   1410 
   1411 	if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
   1412 		ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
   1413 				segno, GET_SIT_TYPE(raw_sit));
   1414 }
   1415 
   1416 void seg_info_from_raw_sit(struct seg_entry *se,
   1417 		struct f2fs_sit_entry *raw_sit)
   1418 {
   1419 	se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
   1420 	memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
   1421 	se->type = GET_SIT_TYPE(raw_sit);
   1422 	se->orig_type = GET_SIT_TYPE(raw_sit);
   1423 	se->mtime = le64_to_cpu(raw_sit->mtime);
   1424 }
   1425 
   1426 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
   1427 		unsigned int segno)
   1428 {
   1429 	struct sit_info *sit_i = SIT_I(sbi);
   1430 	return &sit_i->sentries[segno];
   1431 }
   1432 
   1433 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
   1434 				unsigned int segno, int *ret_type)
   1435 {
   1436 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1437 	struct f2fs_summary_block *sum_blk;
   1438 	struct curseg_info *curseg;
   1439 	int type, ret;
   1440 	u64 ssa_blk;
   1441 
   1442 	*ret_type= SEG_TYPE_MAX;
   1443 
   1444 	ssa_blk = GET_SUM_BLKADDR(sbi, segno);
   1445 	for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
   1446 		if (segno == get_cp(cur_node_segno[type])) {
   1447 			curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
   1448 			if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
   1449 				ASSERT_MSG("segno [0x%x] indicates a data "
   1450 						"segment, but should be node",
   1451 						segno);
   1452 				*ret_type = -SEG_TYPE_CUR_NODE;
   1453 			} else {
   1454 				*ret_type = SEG_TYPE_CUR_NODE;
   1455 			}
   1456 			return curseg->sum_blk;
   1457 		}
   1458 	}
   1459 
   1460 	for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
   1461 		if (segno == get_cp(cur_data_segno[type])) {
   1462 			curseg = CURSEG_I(sbi, type);
   1463 			if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
   1464 				ASSERT_MSG("segno [0x%x] indicates a node "
   1465 						"segment, but should be data",
   1466 						segno);
   1467 				*ret_type = -SEG_TYPE_CUR_DATA;
   1468 			} else {
   1469 				*ret_type = SEG_TYPE_CUR_DATA;
   1470 			}
   1471 			return curseg->sum_blk;
   1472 		}
   1473 	}
   1474 
   1475 	sum_blk = calloc(BLOCK_SZ, 1);
   1476 	ASSERT(sum_blk);
   1477 
   1478 	ret = dev_read_block(sum_blk, ssa_blk);
   1479 	ASSERT(ret >= 0);
   1480 
   1481 	if (IS_SUM_NODE_SEG(sum_blk->footer))
   1482 		*ret_type = SEG_TYPE_NODE;
   1483 	else if (IS_SUM_DATA_SEG(sum_blk->footer))
   1484 		*ret_type = SEG_TYPE_DATA;
   1485 
   1486 	return sum_blk;
   1487 }
   1488 
   1489 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
   1490 				struct f2fs_summary *sum_entry)
   1491 {
   1492 	struct f2fs_summary_block *sum_blk;
   1493 	u32 segno, offset;
   1494 	int type;
   1495 
   1496 	segno = GET_SEGNO(sbi, blk_addr);
   1497 	offset = OFFSET_IN_SEG(sbi, blk_addr);
   1498 
   1499 	sum_blk = get_sum_block(sbi, segno, &type);
   1500 	memcpy(sum_entry, &(sum_blk->entries[offset]),
   1501 				sizeof(struct f2fs_summary));
   1502 	if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
   1503 					type == SEG_TYPE_MAX)
   1504 		free(sum_blk);
   1505 	return type;
   1506 }
   1507 
   1508 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
   1509 				struct f2fs_nat_entry *raw_nat)
   1510 {
   1511 	struct f2fs_nat_block *nat_block;
   1512 	pgoff_t block_addr;
   1513 	int entry_off;
   1514 	int ret;
   1515 
   1516 	if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
   1517 		return;
   1518 
   1519 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1520 	ASSERT(nat_block);
   1521 
   1522 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1523 	block_addr = current_nat_addr(sbi, nid);
   1524 
   1525 	ret = dev_read_block(nat_block, block_addr);
   1526 	ASSERT(ret >= 0);
   1527 
   1528 	memcpy(raw_nat, &nat_block->entries[entry_off],
   1529 					sizeof(struct f2fs_nat_entry));
   1530 	free(nat_block);
   1531 }
   1532 
   1533 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
   1534 				u16 ofs_in_node, block_t newaddr)
   1535 {
   1536 	struct f2fs_node *node_blk = NULL;
   1537 	struct node_info ni;
   1538 	block_t oldaddr, startaddr, endaddr;
   1539 	int ret;
   1540 
   1541 	node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
   1542 	ASSERT(node_blk);
   1543 
   1544 	get_node_info(sbi, nid, &ni);
   1545 
   1546 	/* read node_block */
   1547 	ret = dev_read_block(node_blk, ni.blk_addr);
   1548 	ASSERT(ret >= 0);
   1549 
   1550 	/* check its block address */
   1551 	if (node_blk->footer.nid == node_blk->footer.ino) {
   1552 		int ofs = get_extra_isize(node_blk);
   1553 
   1554 		oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
   1555 		node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
   1556 	} else {
   1557 		oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
   1558 		node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
   1559 	}
   1560 
   1561 	ret = dev_write_block(node_blk, ni.blk_addr);
   1562 	ASSERT(ret >= 0);
   1563 
   1564 	/* check extent cache entry */
   1565 	if (node_blk->footer.nid != node_blk->footer.ino) {
   1566 		get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
   1567 
   1568 		/* read inode block */
   1569 		ret = dev_read_block(node_blk, ni.blk_addr);
   1570 		ASSERT(ret >= 0);
   1571 	}
   1572 
   1573 	startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
   1574 	endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
   1575 	if (oldaddr >= startaddr && oldaddr < endaddr) {
   1576 		node_blk->i.i_ext.len = 0;
   1577 
   1578 		/* update inode block */
   1579 		ret = dev_write_block(node_blk, ni.blk_addr);
   1580 		ASSERT(ret >= 0);
   1581 	}
   1582 	free(node_blk);
   1583 }
   1584 
   1585 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
   1586 					nid_t nid, block_t newaddr)
   1587 {
   1588 	struct f2fs_nat_block *nat_block;
   1589 	pgoff_t block_addr;
   1590 	int entry_off;
   1591 	int ret;
   1592 
   1593 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1594 	ASSERT(nat_block);
   1595 
   1596 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1597 	block_addr = current_nat_addr(sbi, nid);
   1598 
   1599 	ret = dev_read_block(nat_block, block_addr);
   1600 	ASSERT(ret >= 0);
   1601 
   1602 	if (ino)
   1603 		nat_block->entries[entry_off].ino = cpu_to_le32(ino);
   1604 	nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
   1605 
   1606 	ret = dev_write_block(nat_block, block_addr);
   1607 	ASSERT(ret >= 0);
   1608 	free(nat_block);
   1609 }
   1610 
   1611 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
   1612 {
   1613 	struct f2fs_nat_entry raw_nat;
   1614 
   1615 	ni->nid = nid;
   1616 	if (c.func == FSCK) {
   1617 		node_info_from_raw_nat(ni, &(F2FS_FSCK(sbi)->entries[nid]));
   1618 		return;
   1619 	}
   1620 
   1621 	get_nat_entry(sbi, nid, &raw_nat);
   1622 	node_info_from_raw_nat(ni, &raw_nat);
   1623 }
   1624 
   1625 void build_sit_entries(struct f2fs_sb_info *sbi)
   1626 {
   1627 	struct sit_info *sit_i = SIT_I(sbi);
   1628 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1629 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1630 	struct seg_entry *se;
   1631 	struct f2fs_sit_entry sit;
   1632 	unsigned int i, segno;
   1633 
   1634 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1635 		se = &sit_i->sentries[segno];
   1636 		struct f2fs_sit_block *sit_blk;
   1637 
   1638 		sit_blk = get_current_sit_page(sbi, segno);
   1639 		sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1640 		free(sit_blk);
   1641 
   1642 		check_block_count(sbi, segno, &sit);
   1643 		seg_info_from_raw_sit(se, &sit);
   1644 	}
   1645 
   1646 	for (i = 0; i < sits_in_cursum(journal); i++) {
   1647 		segno = le32_to_cpu(segno_in_journal(journal, i));
   1648 		se = &sit_i->sentries[segno];
   1649 		sit = sit_in_journal(journal, i);
   1650 
   1651 		check_block_count(sbi, segno, &sit);
   1652 		seg_info_from_raw_sit(se, &sit);
   1653 	}
   1654 
   1655 }
   1656 
   1657 int build_segment_manager(struct f2fs_sb_info *sbi)
   1658 {
   1659 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1660 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1661 	struct f2fs_sm_info *sm_info;
   1662 
   1663 	sm_info = malloc(sizeof(struct f2fs_sm_info));
   1664 	if (!sm_info)
   1665 		return -ENOMEM;
   1666 
   1667 	/* init sm info */
   1668 	sbi->sm_info = sm_info;
   1669 	sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
   1670 	sm_info->main_blkaddr = get_sb(main_blkaddr);
   1671 	sm_info->segment_count = get_sb(segment_count);
   1672 	sm_info->reserved_segments = get_cp(rsvd_segment_count);
   1673 	sm_info->ovp_segments = get_cp(overprov_segment_count);
   1674 	sm_info->main_segments = get_sb(segment_count_main);
   1675 	sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
   1676 
   1677 	build_sit_info(sbi);
   1678 
   1679 	build_curseg(sbi);
   1680 
   1681 	build_sit_entries(sbi);
   1682 
   1683 	return 0;
   1684 }
   1685 
   1686 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
   1687 {
   1688 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   1689 	struct f2fs_sm_info *sm_i = SM_I(sbi);
   1690 	unsigned int segno = 0;
   1691 	char *ptr = NULL;
   1692 	u32 sum_vblocks = 0;
   1693 	u32 free_segs = 0;
   1694 	struct seg_entry *se;
   1695 
   1696 	fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
   1697 	fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
   1698 	ASSERT(fsck->sit_area_bitmap);
   1699 	ptr = fsck->sit_area_bitmap;
   1700 
   1701 	ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
   1702 
   1703 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1704 		se = get_seg_entry(sbi, segno);
   1705 
   1706 		memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1707 		ptr += SIT_VBLOCK_MAP_SIZE;
   1708 
   1709 		if (se->valid_blocks == 0x0) {
   1710 			if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
   1711 				le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
   1712 				le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
   1713 				le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
   1714 				le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
   1715 				le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
   1716 				continue;
   1717 			} else {
   1718 				free_segs++;
   1719 			}
   1720 		} else {
   1721 			sum_vblocks += se->valid_blocks;
   1722 		}
   1723 	}
   1724 	fsck->chk.sit_valid_blocks = sum_vblocks;
   1725 	fsck->chk.sit_free_segs = free_segs;
   1726 
   1727 	DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
   1728 			sum_vblocks, sum_vblocks,
   1729 			free_segs, free_segs);
   1730 }
   1731 
   1732 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
   1733 {
   1734 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   1735 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1736 	struct sit_info *sit_i = SIT_I(sbi);
   1737 	unsigned int segno = 0;
   1738 	struct f2fs_summary_block *sum = curseg->sum_blk;
   1739 	char *ptr = NULL;
   1740 
   1741 	/* remove sit journal */
   1742 	sum->journal.n_sits = 0;
   1743 
   1744 	ptr = fsck->main_area_bitmap;
   1745 
   1746 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1747 		struct f2fs_sit_block *sit_blk;
   1748 		struct f2fs_sit_entry *sit;
   1749 		struct seg_entry *se;
   1750 		u16 valid_blocks = 0;
   1751 		u16 type;
   1752 		int i;
   1753 
   1754 		sit_blk = get_current_sit_page(sbi, segno);
   1755 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1756 		memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
   1757 
   1758 		/* update valid block count */
   1759 		for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
   1760 			valid_blocks += get_bits_in_byte(sit->valid_map[i]);
   1761 
   1762 		se = get_seg_entry(sbi, segno);
   1763 		memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
   1764 		se->valid_blocks = valid_blocks;
   1765 		type = se->type;
   1766 		if (type >= NO_CHECK_TYPE) {
   1767 			ASSERT_MSG("Invalide type and valid blocks=%x,%x",
   1768 					segno, valid_blocks);
   1769 			type = 0;
   1770 		}
   1771 		sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
   1772 								valid_blocks);
   1773 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1774 		free(sit_blk);
   1775 
   1776 		ptr += SIT_VBLOCK_MAP_SIZE;
   1777 	}
   1778 }
   1779 
   1780 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
   1781 {
   1782 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
   1783 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1784 	struct sit_info *sit_i = SIT_I(sbi);
   1785 	unsigned int segno;
   1786 	int i;
   1787 
   1788 	for (i = 0; i < sits_in_cursum(journal); i++) {
   1789 		struct f2fs_sit_block *sit_blk;
   1790 		struct f2fs_sit_entry *sit;
   1791 		struct seg_entry *se;
   1792 
   1793 		segno = segno_in_journal(journal, i);
   1794 		se = get_seg_entry(sbi, segno);
   1795 
   1796 		sit_blk = get_current_sit_page(sbi, segno);
   1797 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1798 
   1799 		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1800 		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
   1801 							se->valid_blocks);
   1802 		sit->mtime = cpu_to_le64(se->mtime);
   1803 
   1804 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1805 		free(sit_blk);
   1806 	}
   1807 
   1808 	journal->n_sits = 0;
   1809 	return i;
   1810 }
   1811 
   1812 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
   1813 {
   1814 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   1815 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   1816 	struct f2fs_nat_block *nat_block;
   1817 	pgoff_t block_addr;
   1818 	int entry_off;
   1819 	nid_t nid;
   1820 	int ret;
   1821 	int i = 0;
   1822 
   1823 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   1824 	ASSERT(nat_block);
   1825 next:
   1826 	if (i >= nats_in_cursum(journal)) {
   1827 		free(nat_block);
   1828 		journal->n_nats = 0;
   1829 		return i;
   1830 	}
   1831 
   1832 	nid = le32_to_cpu(nid_in_journal(journal, i));
   1833 
   1834 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   1835 	block_addr = current_nat_addr(sbi, nid);
   1836 
   1837 	ret = dev_read_block(nat_block, block_addr);
   1838 	ASSERT(ret >= 0);
   1839 
   1840 	memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
   1841 					sizeof(struct f2fs_nat_entry));
   1842 
   1843 	ret = dev_write_block(nat_block, block_addr);
   1844 	ASSERT(ret >= 0);
   1845 	i++;
   1846 	goto next;
   1847 }
   1848 
   1849 void flush_journal_entries(struct f2fs_sb_info *sbi)
   1850 {
   1851 	int n_nats = flush_nat_journal_entries(sbi);
   1852 	int n_sits = flush_sit_journal_entries(sbi);
   1853 
   1854 	if (n_nats || n_sits)
   1855 		write_checkpoint(sbi);
   1856 }
   1857 
   1858 void flush_sit_entries(struct f2fs_sb_info *sbi)
   1859 {
   1860 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1861 	struct sit_info *sit_i = SIT_I(sbi);
   1862 	unsigned int segno = 0;
   1863 	u32 free_segs = 0;
   1864 
   1865 	/* update free segments */
   1866 	for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
   1867 		struct f2fs_sit_block *sit_blk;
   1868 		struct f2fs_sit_entry *sit;
   1869 		struct seg_entry *se;
   1870 
   1871 		se = get_seg_entry(sbi, segno);
   1872 
   1873 		if (!se->dirty)
   1874 			continue;
   1875 
   1876 		sit_blk = get_current_sit_page(sbi, segno);
   1877 		sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
   1878 		memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
   1879 		sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
   1880 							se->valid_blocks);
   1881 		rewrite_current_sit_page(sbi, segno, sit_blk);
   1882 		free(sit_blk);
   1883 
   1884 		if (se->valid_blocks == 0x0 &&
   1885 				!IS_CUR_SEGNO(sbi, segno, NO_CHECK_TYPE))
   1886 			free_segs++;
   1887 	}
   1888 
   1889 	set_cp(free_segment_count, free_segs);
   1890 }
   1891 
   1892 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left, int type)
   1893 {
   1894 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   1895 	struct seg_entry *se;
   1896 	u32 segno;
   1897 	u32 offset;
   1898 	int not_enough = 0;
   1899 	u64 end_blkaddr = (get_sb(segment_count_main) <<
   1900 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
   1901 
   1902 	if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
   1903 		not_enough = 1;
   1904 
   1905 	while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
   1906 		segno = GET_SEGNO(sbi, *to);
   1907 		offset = OFFSET_IN_SEG(sbi, *to);
   1908 
   1909 		se = get_seg_entry(sbi, segno);
   1910 
   1911 		if (se->valid_blocks == sbi->blocks_per_seg ||
   1912 				IS_CUR_SEGNO(sbi, segno, type)) {
   1913 			*to = left ? START_BLOCK(sbi, segno) - 1:
   1914 						START_BLOCK(sbi, segno + 1);
   1915 			continue;
   1916 		}
   1917 
   1918 		if (se->valid_blocks == 0 && not_enough) {
   1919 			*to = left ? START_BLOCK(sbi, segno) - 1:
   1920 						START_BLOCK(sbi, segno + 1);
   1921 			continue;
   1922 		}
   1923 
   1924 		if (se->valid_blocks == 0 && !(segno % sbi->segs_per_sec)) {
   1925 			struct seg_entry *se2;
   1926 			unsigned int i;
   1927 
   1928 			for (i = 1; i < sbi->segs_per_sec; i++) {
   1929 				se2 = get_seg_entry(sbi, segno + i);
   1930 				if (se2->valid_blocks)
   1931 					break;
   1932 			}
   1933 			if (i == sbi->segs_per_sec)
   1934 				return 0;
   1935 		}
   1936 
   1937 		if (se->type == type &&
   1938 			!f2fs_test_bit(offset, (const char *)se->cur_valid_map))
   1939 			return 0;
   1940 
   1941 		*to = left ? *to - 1: *to + 1;
   1942 	}
   1943 	return -1;
   1944 }
   1945 
   1946 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from)
   1947 {
   1948 	int i, ret;
   1949 
   1950 	/* update summary blocks having nullified journal entries */
   1951 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   1952 		struct curseg_info *curseg = CURSEG_I(sbi, i);
   1953 		struct f2fs_summary_block buf;
   1954 		u32 old_segno;
   1955 		u64 ssa_blk, to;
   1956 
   1957 		/* update original SSA too */
   1958 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   1959 		ret = dev_write_block(curseg->sum_blk, ssa_blk);
   1960 		ASSERT(ret >= 0);
   1961 
   1962 		to = from;
   1963 		ret = find_next_free_block(sbi, &to, 0, i);
   1964 		ASSERT(ret == 0);
   1965 
   1966 		old_segno = curseg->segno;
   1967 		curseg->segno = GET_SEGNO(sbi, to);
   1968 		curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
   1969 		curseg->alloc_type = SSR;
   1970 
   1971 		/* update new segno */
   1972 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   1973 		ret = dev_read_block(&buf, ssa_blk);
   1974 		ASSERT(ret >= 0);
   1975 
   1976 		memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
   1977 
   1978 		/* update se->types */
   1979 		reset_curseg(sbi, i);
   1980 
   1981 		DBG(1, "Move curseg[%d] %x -> %x after %"PRIx64"\n",
   1982 				i, old_segno, curseg->segno, from);
   1983 	}
   1984 }
   1985 
   1986 void zero_journal_entries(struct f2fs_sb_info *sbi)
   1987 {
   1988 	int i;
   1989 
   1990 	for (i = 0; i < NO_CHECK_TYPE; i++)
   1991 		CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
   1992 }
   1993 
   1994 void write_curseg_info(struct f2fs_sb_info *sbi)
   1995 {
   1996 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   1997 	int i;
   1998 
   1999 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   2000 		cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
   2001 		if (i < CURSEG_HOT_NODE) {
   2002 			set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
   2003 			set_cp(cur_data_blkoff[i],
   2004 					CURSEG_I(sbi, i)->next_blkoff);
   2005 		} else {
   2006 			int n = i - CURSEG_HOT_NODE;
   2007 
   2008 			set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
   2009 			set_cp(cur_node_blkoff[n],
   2010 					CURSEG_I(sbi, i)->next_blkoff);
   2011 		}
   2012 	}
   2013 }
   2014 
   2015 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
   2016 					struct f2fs_nat_entry *raw_nat)
   2017 {
   2018 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   2019 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   2020 	int i = 0;
   2021 
   2022 	for (i = 0; i < nats_in_cursum(journal); i++) {
   2023 		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
   2024 			memcpy(raw_nat, &nat_in_journal(journal, i),
   2025 						sizeof(struct f2fs_nat_entry));
   2026 			DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
   2027 			return i;
   2028 		}
   2029 	}
   2030 	return -1;
   2031 }
   2032 
   2033 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
   2034 {
   2035 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   2036 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   2037 	struct f2fs_nat_block *nat_block;
   2038 	pgoff_t block_addr;
   2039 	int entry_off;
   2040 	int ret;
   2041 	int i = 0;
   2042 
   2043 	/* check in journal */
   2044 	for (i = 0; i < nats_in_cursum(journal); i++) {
   2045 		if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
   2046 			memset(&nat_in_journal(journal, i), 0,
   2047 					sizeof(struct f2fs_nat_entry));
   2048 			FIX_MSG("Remove nid [0x%x] in nat journal", nid);
   2049 			return;
   2050 		}
   2051 	}
   2052 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   2053 	ASSERT(nat_block);
   2054 
   2055 	entry_off = nid % NAT_ENTRY_PER_BLOCK;
   2056 	block_addr = current_nat_addr(sbi, nid);
   2057 
   2058 	ret = dev_read_block(nat_block, block_addr);
   2059 	ASSERT(ret >= 0);
   2060 
   2061 	if (nid == F2FS_NODE_INO(sbi) || nid == F2FS_META_INO(sbi)) {
   2062 		FIX_MSG("nid [0x%x] block_addr= 0x%x -> 0x1", nid,
   2063 			le32_to_cpu(nat_block->entries[entry_off].block_addr));
   2064 		nat_block->entries[entry_off].block_addr = cpu_to_le32(0x1);
   2065 	} else {
   2066 		memset(&nat_block->entries[entry_off], 0,
   2067 					sizeof(struct f2fs_nat_entry));
   2068 		FIX_MSG("Remove nid [0x%x] in NAT", nid);
   2069 	}
   2070 
   2071 	ret = dev_write_block(nat_block, block_addr);
   2072 	ASSERT(ret >= 0);
   2073 	free(nat_block);
   2074 }
   2075 
   2076 void write_checkpoint(struct f2fs_sb_info *sbi)
   2077 {
   2078 	struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
   2079 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   2080 	block_t orphan_blks = 0;
   2081 	unsigned long long cp_blk_no;
   2082 	u32 flags = CP_UMOUNT_FLAG;
   2083 	int i, ret;
   2084 	u_int32_t crc = 0;
   2085 
   2086 	if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
   2087 		orphan_blks = __start_sum_addr(sbi) - 1;
   2088 		flags |= CP_ORPHAN_PRESENT_FLAG;
   2089 	}
   2090 
   2091 	set_cp(free_segment_count, get_free_segments(sbi));
   2092 	set_cp(valid_block_count, sbi->total_valid_block_count);
   2093 	set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
   2094 
   2095 	flags = update_nat_bits_flags(sb, cp, flags);
   2096 	set_cp(ckpt_flags, flags);
   2097 
   2098 	crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CHECKSUM_OFFSET);
   2099 	*((__le32 *)((unsigned char *)cp + CHECKSUM_OFFSET)) = cpu_to_le32(crc);
   2100 
   2101 	cp_blk_no = get_sb(cp_blkaddr);
   2102 	if (sbi->cur_cp == 2)
   2103 		cp_blk_no += 1 << get_sb(log_blocks_per_seg);
   2104 
   2105 	/* write the first cp */
   2106 	ret = dev_write_block(cp, cp_blk_no++);
   2107 	ASSERT(ret >= 0);
   2108 
   2109 	/* skip payload */
   2110 	cp_blk_no += get_sb(cp_payload);
   2111 	/* skip orphan blocks */
   2112 	cp_blk_no += orphan_blks;
   2113 
   2114 	/* update summary blocks having nullified journal entries */
   2115 	for (i = 0; i < NO_CHECK_TYPE; i++) {
   2116 		struct curseg_info *curseg = CURSEG_I(sbi, i);
   2117 		u64 ssa_blk;
   2118 
   2119 		ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
   2120 		ASSERT(ret >= 0);
   2121 
   2122 		/* update original SSA too */
   2123 		ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
   2124 		ret = dev_write_block(curseg->sum_blk, ssa_blk);
   2125 		ASSERT(ret >= 0);
   2126 	}
   2127 
   2128 	/* Write nat bits */
   2129 	if (flags & CP_NAT_BITS_FLAG)
   2130 		write_nat_bits(sbi, sb, cp, sbi->cur_cp);
   2131 
   2132 	/* in case of sudden power off */
   2133 	ret = f2fs_fsync_device();
   2134 	ASSERT(ret >= 0);
   2135 
   2136 	/* write the last cp */
   2137 	ret = dev_write_block(cp, cp_blk_no++);
   2138 	ASSERT(ret >= 0);
   2139 }
   2140 
   2141 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
   2142 {
   2143 	struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
   2144 	struct f2fs_journal *journal = &curseg->sum_blk->journal;
   2145 	struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
   2146 	struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
   2147 	struct f2fs_nm_info *nm_i = NM_I(sbi);
   2148 	struct f2fs_nat_block *nat_block;
   2149 	struct node_info ni;
   2150 	u32 nid, nr_nat_blks;
   2151 	pgoff_t block_off;
   2152 	pgoff_t block_addr;
   2153 	int seg_off;
   2154 	int ret;
   2155 	unsigned int i;
   2156 
   2157 	nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
   2158 	ASSERT(nat_block);
   2159 
   2160 	/* Alloc & build nat entry bitmap */
   2161 	nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
   2162 					sbi->log_blocks_per_seg;
   2163 
   2164 	fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
   2165 	fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
   2166 	fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
   2167 	ASSERT(fsck->nat_area_bitmap);
   2168 
   2169 	fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
   2170 					fsck->nr_nat_entries);
   2171 	ASSERT(fsck->entries);
   2172 
   2173 	for (block_off = 0; block_off < nr_nat_blks; block_off++) {
   2174 
   2175 		seg_off = block_off >> sbi->log_blocks_per_seg;
   2176 		block_addr = (pgoff_t)(nm_i->nat_blkaddr +
   2177 			(seg_off << sbi->log_blocks_per_seg << 1) +
   2178 			(block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
   2179 
   2180 		if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
   2181 			block_addr += sbi->blocks_per_seg;
   2182 
   2183 		ret = dev_read_block(nat_block, block_addr);
   2184 		ASSERT(ret >= 0);
   2185 
   2186 		nid = block_off * NAT_ENTRY_PER_BLOCK;
   2187 		for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
   2188 			ni.nid = nid + i;
   2189 
   2190 			if ((nid + i) == F2FS_NODE_INO(sbi) ||
   2191 					(nid + i) == F2FS_META_INO(sbi)) {
   2192 				/*
   2193 				 * block_addr of node/meta inode should be 0x1.
   2194 				 * Set this bit, and fsck_verify will fix it.
   2195 				 */
   2196 				if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
   2197 					ASSERT_MSG("\tError: ino[0x%x] block_addr[0x%x] is invalid\n",
   2198 							nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
   2199 					f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
   2200 				}
   2201 				continue;
   2202 			}
   2203 
   2204 			node_info_from_raw_nat(&ni, &nat_block->entries[i]);
   2205 			if (ni.blk_addr == 0x0)
   2206 				continue;
   2207 			if (ni.ino == 0x0) {
   2208 				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
   2209 					" is invalid\n", ni.ino, ni.blk_addr);
   2210 			}
   2211 			if (ni.ino == (nid + i)) {
   2212 				fsck->nat_valid_inode_cnt++;
   2213 				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
   2214 			}
   2215 			if (nid + i == 0) {
   2216 				/*
   2217 				 * nat entry [0] must be null.  If
   2218 				 * it is corrupted, set its bit in
   2219 				 * nat_area_bitmap, fsck_verify will
   2220 				 * nullify it
   2221 				 */
   2222 				ASSERT_MSG("Invalid nat entry[0]: "
   2223 					"blk_addr[0x%x]\n", ni.blk_addr);
   2224 				fsck->chk.valid_nat_entry_cnt--;
   2225 			}
   2226 
   2227 			DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
   2228 				nid + i, ni.blk_addr, ni.ino);
   2229 			f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
   2230 			fsck->chk.valid_nat_entry_cnt++;
   2231 
   2232 			fsck->entries[nid + i] = nat_block->entries[i];
   2233 		}
   2234 	}
   2235 
   2236 	/* Traverse nat journal, update the corresponding entries */
   2237 	for (i = 0; i < nats_in_cursum(journal); i++) {
   2238 		struct f2fs_nat_entry raw_nat;
   2239 		nid = le32_to_cpu(nid_in_journal(journal, i));
   2240 		ni.nid = nid;
   2241 
   2242 		DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
   2243 
   2244 		/* Clear the original bit and count */
   2245 		if (fsck->entries[nid].block_addr != 0x0) {
   2246 			fsck->chk.valid_nat_entry_cnt--;
   2247 			f2fs_clear_bit(nid, fsck->nat_area_bitmap);
   2248 			if (fsck->entries[nid].ino == nid)
   2249 				fsck->nat_valid_inode_cnt--;
   2250 		}
   2251 
   2252 		/* Use nat entries in journal */
   2253 		memcpy(&raw_nat, &nat_in_journal(journal, i),
   2254 					sizeof(struct f2fs_nat_entry));
   2255 		node_info_from_raw_nat(&ni, &raw_nat);
   2256 		if (ni.blk_addr != 0x0) {
   2257 			if (ni.ino == 0x0)
   2258 				ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
   2259 					" is invalid\n", ni.ino, ni.blk_addr);
   2260 			if (ni.ino == nid) {
   2261 				fsck->nat_valid_inode_cnt++;
   2262 				DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
   2263 			}
   2264 			f2fs_set_bit(nid, fsck->nat_area_bitmap);
   2265 			fsck->chk.valid_nat_entry_cnt++;
   2266 			DBG(3, "nid[0x%x] in nat cache\n", nid);
   2267 		}
   2268 		fsck->entries[nid] = raw_nat;
   2269 	}
   2270 	free(nat_block);
   2271 
   2272 	DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
   2273 			fsck->chk.valid_nat_entry_cnt,
   2274 			fsck->chk.valid_nat_entry_cnt);
   2275 }
   2276 
   2277 static int check_sector_size(struct f2fs_super_block *sb)
   2278 {
   2279 	int index;
   2280 	u_int32_t log_sectorsize, log_sectors_per_block;
   2281 	u_int8_t *zero_buff;
   2282 
   2283 	log_sectorsize = log_base_2(c.sector_size);
   2284 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
   2285 
   2286 	if (log_sectorsize == get_sb(log_sectorsize) &&
   2287 			log_sectors_per_block == get_sb(log_sectors_per_block))
   2288 		return 0;
   2289 
   2290 	zero_buff = calloc(F2FS_BLKSIZE, 1);
   2291 	ASSERT(zero_buff);
   2292 
   2293 	set_sb(log_sectorsize, log_sectorsize);
   2294 	set_sb(log_sectors_per_block, log_sectors_per_block);
   2295 
   2296 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
   2297 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
   2298 	for (index = 0; index < 2; index++) {
   2299 		if (dev_write(zero_buff, index * F2FS_BLKSIZE, F2FS_BLKSIZE)) {
   2300 			MSG(1, "\tError: Failed while writing supe_blk "
   2301 				"on disk!!! index : %d\n", index);
   2302 			free(zero_buff);
   2303 			return -1;
   2304 		}
   2305 	}
   2306 
   2307 	free(zero_buff);
   2308 	return 0;
   2309 }
   2310 
   2311 int f2fs_do_mount(struct f2fs_sb_info *sbi)
   2312 {
   2313 	struct f2fs_checkpoint *cp = NULL;
   2314 	struct f2fs_super_block *sb = NULL;
   2315 	int ret;
   2316 
   2317 	sbi->active_logs = NR_CURSEG_TYPE;
   2318 	ret = validate_super_block(sbi, 0);
   2319 	if (ret) {
   2320 		ret = validate_super_block(sbi, 1);
   2321 		if (ret)
   2322 			return -1;
   2323 	}
   2324 	sb = F2FS_RAW_SUPER(sbi);
   2325 
   2326 	ret = check_sector_size(sb);
   2327 	if (ret)
   2328 		return -1;
   2329 
   2330 	print_raw_sb_info(sb);
   2331 
   2332 	init_sb_info(sbi);
   2333 
   2334 	ret = get_valid_checkpoint(sbi);
   2335 	if (ret) {
   2336 		ERR_MSG("Can't find valid checkpoint\n");
   2337 		return -1;
   2338 	}
   2339 
   2340 	if (sanity_check_ckpt(sbi)) {
   2341 		ERR_MSG("Checkpoint is polluted\n");
   2342 		return -1;
   2343 	}
   2344 	cp = F2FS_CKPT(sbi);
   2345 
   2346 	print_ckpt_info(sbi);
   2347 
   2348 	if (c.auto_fix || c.preen_mode) {
   2349 		u32 flag = get_cp(ckpt_flags);
   2350 
   2351 		if (flag & CP_FSCK_FLAG ||
   2352 			(exist_qf_ino(sb) && (!(flag & CP_UMOUNT_FLAG) ||
   2353 						flag & CP_ERROR_FLAG))) {
   2354 			c.fix_on = 1;
   2355 		} else if (!c.preen_mode) {
   2356 			print_cp_state(flag);
   2357 			return 1;
   2358 		}
   2359 	}
   2360 
   2361 	c.bug_on = 0;
   2362 	c.feature = sb->feature;
   2363 
   2364 	/* precompute checksum seed for metadata */
   2365 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
   2366 		c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
   2367 
   2368 	sbi->total_valid_node_count = get_cp(valid_node_count);
   2369 	sbi->total_valid_inode_count = get_cp(valid_inode_count);
   2370 	sbi->user_block_count = get_cp(user_block_count);
   2371 	sbi->total_valid_block_count = get_cp(valid_block_count);
   2372 	sbi->last_valid_block_count = sbi->total_valid_block_count;
   2373 	sbi->alloc_valid_block_count = 0;
   2374 
   2375 	if (build_segment_manager(sbi)) {
   2376 		ERR_MSG("build_segment_manager failed\n");
   2377 		return -1;
   2378 	}
   2379 
   2380 	if (build_node_manager(sbi)) {
   2381 		ERR_MSG("build_node_manager failed\n");
   2382 		return -1;
   2383 	}
   2384 
   2385 	/* Check nat_bits */
   2386 	if (c.func != DUMP && is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
   2387 		u_int32_t nat_bits_bytes, nat_bits_blocks;
   2388 		__le64 *kaddr;
   2389 		u_int32_t blk;
   2390 
   2391 		blk = get_sb(cp_blkaddr) + (1 << get_sb(log_blocks_per_seg));
   2392 		if (sbi->cur_cp == 2)
   2393 			blk += 1 << get_sb(log_blocks_per_seg);
   2394 
   2395 		nat_bits_bytes = get_sb(segment_count_nat) << 5;
   2396 		nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
   2397 				F2FS_BLKSIZE - 1);
   2398 		blk -= nat_bits_blocks;
   2399 
   2400 		kaddr = malloc(PAGE_SIZE);
   2401 		ret = dev_read_block(kaddr, blk);
   2402 		ASSERT(ret >= 0);
   2403 		if (*kaddr != get_cp_crc(cp))
   2404 			write_nat_bits(sbi, sb, cp, sbi->cur_cp);
   2405 		else
   2406 			MSG(0, "Info: Found valid nat_bits in checkpoint\n");
   2407 		free(kaddr);
   2408 	}
   2409 	return 0;
   2410 }
   2411 
   2412 void f2fs_do_umount(struct f2fs_sb_info *sbi)
   2413 {
   2414 	struct sit_info *sit_i = SIT_I(sbi);
   2415 	struct f2fs_sm_info *sm_i = SM_I(sbi);
   2416 	struct f2fs_nm_info *nm_i = NM_I(sbi);
   2417 	unsigned int i;
   2418 
   2419 	/* free nm_info */
   2420 	if (c.func == SLOAD || c.func == FSCK)
   2421 		free(nm_i->nid_bitmap);
   2422 	free(nm_i->nat_bitmap);
   2423 	free(sbi->nm_info);
   2424 
   2425 	/* free sit_info */
   2426 	for (i = 0; i < TOTAL_SEGS(sbi); i++)
   2427 		free(sit_i->sentries[i].cur_valid_map);
   2428 
   2429 	free(sit_i->sit_bitmap);
   2430 	free(sm_i->sit_info);
   2431 
   2432 	/* free sm_info */
   2433 	for (i = 0; i < NR_CURSEG_TYPE; i++)
   2434 		free(sm_i->curseg_array[i].sum_blk);
   2435 
   2436 	free(sm_i->curseg_array);
   2437 	free(sbi->sm_info);
   2438 
   2439 	free(sbi->ckpt);
   2440 	free(sbi->raw_super);
   2441 }
   2442