1 /**
2  * f2fs_format.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * Dual licensed under the GPL or LGPL version 2 licenses.
8  */
9 #define _LARGEFILE64_SOURCE
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <string.h>
15 #include <unistd.h>
16 #ifndef ANDROID_WINDOWS_HOST
17 #include <sys/stat.h>
18 #include <sys/mount.h>
19 #endif
20 #include <time.h>
21 #include <uuid/uuid.h>
22 
23 #include "f2fs_fs.h"
24 #include "quota.h"
25 #include "f2fs_format_utils.h"
26 
27 extern struct f2fs_configuration c;
28 struct f2fs_super_block raw_sb;
29 struct f2fs_super_block *sb = &raw_sb;
30 struct f2fs_checkpoint *cp;
31 
32 /* Return first segment number of each area */
33 #define prev_zone(cur)		(c.cur_seg[cur] - c.segs_per_zone)
34 #define next_zone(cur)		(c.cur_seg[cur] + c.segs_per_zone)
35 #define last_zone(cur)		((cur - 1) * c.segs_per_zone)
36 #define last_section(cur)	(cur + (c.secs_per_zone - 1) * c.segs_per_sec)
37 
38 static unsigned int quotatype_bits = 0;
39 
40 const char *media_ext_lists[] = {
41 	/* common prefix */
42 	"mp", // Covers mp3, mp4, mpeg, mpg
43 	"wm", // Covers wma, wmb, wmv
44 	"og", // Covers oga, ogg, ogm, ogv
45 	"jp", // Covers jpg, jpeg, jp2
46 
47 	/* video */
48 	"avi",
49 	"m4v",
50 	"m4p",
51 	"mkv",
52 	"mov",
53 	"webm",
54 
55 	/* audio */
56 	"wav",
57 	"m4a",
58 	"3gp",
59 	"opus",
60 	"flac",
61 
62 	/* image */
63 	"gif",
64 	"png",
65 	"svg",
66 	"webp",
67 
68 	/* archives */
69 	"jar",
70 	"deb",
71 	"iso",
72 	"gz",
73 	"xz",
74 	"zst",
75 
76 	/* others */
77 	"pdf",
78 	"pyc", // Python bytecode
79 	"ttc",
80 	"ttf",
81 	"exe",
82 
83 	/* android */
84 	"apk",
85 	"cnt", // Image alias
86 	"exo", // YouTube
87 	"odex", // Android RunTime
88 	"vdex", // Android RunTime
89 	"so",
90 
91 	NULL
92 };
93 
94 const char *hot_ext_lists[] = {
95 	"db",
96 	NULL
97 };
98 
99 const char **default_ext_list[] = {
100 	media_ext_lists,
101 	hot_ext_lists
102 };
103 
104 static bool is_extension_exist(const char *name)
105 {
106 	int i;
107 
108 	for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
109 		char *ext = (char *)sb->extension_list[i];
110 		if (!strcmp(ext, name))
111 			return 1;
112 	}
113 
114 	return 0;
115 }
116 
117 static void cure_extension_list(void)
118 {
119 	const char **extlist;
120 	char *ext_str;
121 	char *ue;
122 	int name_len;
123 	int i, pos = 0;
124 
125 	set_sb(extension_count, 0);
126 	memset(sb->extension_list, 0, sizeof(sb->extension_list));
127 
128 	for (i = 0; i < 2; i++) {
129 		ext_str = c.extension_list[i];
130 		extlist = default_ext_list[i];
131 
132 		while (*extlist) {
133 			name_len = strlen(*extlist);
134 			memcpy(sb->extension_list[pos++], *extlist, name_len);
135 			extlist++;
136 		}
137 		if (i == 0)
138 			set_sb(extension_count, pos);
139 		else
140 			sb->hot_ext_count = pos - get_sb(extension_count);;
141 
142 		if (!ext_str)
143 			continue;
144 
145 		/* add user ext list */
146 		ue = strtok(ext_str, ", ");
147 		while (ue != NULL) {
148 			name_len = strlen(ue);
149 			if (name_len >= F2FS_EXTENSION_LEN) {
150 				MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
151 				goto next;
152 			}
153 			if (!is_extension_exist(ue))
154 				memcpy(sb->extension_list[pos++], ue, name_len);
155 next:
156 			ue = strtok(NULL, ", ");
157 			if (pos >= F2FS_MAX_EXTENSION)
158 				break;
159 		}
160 
161 		if (i == 0)
162 			set_sb(extension_count, pos);
163 		else
164 			sb->hot_ext_count = pos - get_sb(extension_count);
165 
166 		free(c.extension_list[i]);
167 	}
168 }
169 
170 static void verify_cur_segs(void)
171 {
172 	int i, j;
173 	int reorder = 0;
174 
175 	for (i = 0; i < NR_CURSEG_TYPE; i++) {
176 		for (j = i + 1; j < NR_CURSEG_TYPE; j++) {
177 			if (c.cur_seg[i] == c.cur_seg[j]) {
178 				reorder = 1;
179 				break;
180 			}
181 		}
182 	}
183 
184 	if (!reorder)
185 		return;
186 
187 	c.cur_seg[0] = 0;
188 	for (i = 1; i < NR_CURSEG_TYPE; i++)
189 		c.cur_seg[i] = next_zone(i - 1);
190 }
191 
192 static int f2fs_prepare_super_block(void)
193 {
194 	u_int32_t blk_size_bytes;
195 	u_int32_t log_sectorsize, log_sectors_per_block;
196 	u_int32_t log_blocksize, log_blks_per_seg;
197 	u_int32_t segment_size_bytes, zone_size_bytes;
198 	u_int32_t sit_segments, nat_segments;
199 	u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
200 	u_int32_t total_valid_blks_available;
201 	u_int64_t zone_align_start_offset, diff;
202 	u_int64_t total_meta_zones, total_meta_segments;
203 	u_int32_t sit_bitmap_size, max_sit_bitmap_size;
204 	u_int32_t max_nat_bitmap_size, max_nat_segments;
205 	u_int32_t total_zones;
206 	enum quota_type qtype;
207 	int i;
208 
209 	set_sb(magic, F2FS_SUPER_MAGIC);
210 	set_sb(major_ver, F2FS_MAJOR_VERSION);
211 	set_sb(minor_ver, F2FS_MINOR_VERSION);
212 
213 	log_sectorsize = log_base_2(c.sector_size);
214 	log_sectors_per_block = log_base_2(c.sectors_per_blk);
215 	log_blocksize = log_sectorsize + log_sectors_per_block;
216 	log_blks_per_seg = log_base_2(c.blks_per_seg);
217 
218 	set_sb(log_sectorsize, log_sectorsize);
219 	set_sb(log_sectors_per_block, log_sectors_per_block);
220 
221 	set_sb(log_blocksize, log_blocksize);
222 	set_sb(log_blocks_per_seg, log_blks_per_seg);
223 
224 	set_sb(segs_per_sec, c.segs_per_sec);
225 	set_sb(secs_per_zone, c.secs_per_zone);
226 
227 	blk_size_bytes = 1 << log_blocksize;
228 	segment_size_bytes = blk_size_bytes * c.blks_per_seg;
229 	zone_size_bytes =
230 		blk_size_bytes * c.secs_per_zone *
231 		c.segs_per_sec * c.blks_per_seg;
232 
233 	set_sb(checksum_offset, 0);
234 
235 	set_sb(block_count, c.total_sectors >> log_sectors_per_block);
236 
237 	zone_align_start_offset =
238 		((u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
239 		2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
240 		zone_size_bytes * zone_size_bytes -
241 		(u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
242 
243 	if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
244 		MSG(1, "\t%s: Align start sector number to the page unit\n",
245 				c.zoned_mode ? "FAIL" : "WARN");
246 		MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
247 				c.start_sector,
248 				c.start_sector % DEFAULT_SECTORS_PER_BLOCK,
249 				DEFAULT_SECTORS_PER_BLOCK);
250 		if (c.zoned_mode)
251 			return -1;
252 	}
253 
254 	set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
255 	sb->cp_blkaddr = sb->segment0_blkaddr;
256 
257 	MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
258 					get_sb(segment0_blkaddr));
259 
260 	if (c.zoned_mode && (get_sb(segment0_blkaddr) + c.start_sector /
261 					DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) {
262 		MSG(1, "\tError: Unaligned segment0 block address %u\n",
263 				get_sb(segment0_blkaddr));
264 		return -1;
265 	}
266 
267 	for (i = 0; i < c.ndevs; i++) {
268 		if (i == 0) {
269 			c.devices[i].total_segments =
270 				(c.devices[i].total_sectors *
271 				c.sector_size - zone_align_start_offset) /
272 				segment_size_bytes;
273 			c.devices[i].start_blkaddr = 0;
274 			c.devices[i].end_blkaddr = c.devices[i].total_segments *
275 						c.blks_per_seg - 1 +
276 						sb->segment0_blkaddr;
277 		} else {
278 			c.devices[i].total_segments =
279 				c.devices[i].total_sectors /
280 				(c.sectors_per_blk * c.blks_per_seg);
281 			c.devices[i].start_blkaddr =
282 					c.devices[i - 1].end_blkaddr + 1;
283 			c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
284 					c.devices[i].total_segments *
285 					c.blks_per_seg - 1;
286 		}
287 		if (c.ndevs > 1) {
288 			memcpy(sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
289 			sb->devs[i].total_segments =
290 					cpu_to_le32(c.devices[i].total_segments);
291 		}
292 
293 		c.total_segments += c.devices[i].total_segments;
294 	}
295 	set_sb(segment_count, (c.total_segments / c.segs_per_zone *
296 						c.segs_per_zone));
297 	set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
298 
299 	set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
300 			get_sb(segment_count_ckpt) * c.blks_per_seg);
301 
302 	blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
303 
304 	sit_segments = SEG_ALIGN(blocks_for_sit);
305 
306 	set_sb(segment_count_sit, sit_segments * 2);
307 
308 	set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
309 			c.blks_per_seg);
310 
311 	total_valid_blks_available = (get_sb(segment_count) -
312 			(get_sb(segment_count_ckpt) +
313 			get_sb(segment_count_sit))) * c.blks_per_seg;
314 
315 	blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
316 			NAT_ENTRY_PER_BLOCK);
317 
318 	if (c.large_nat_bitmap) {
319 		nat_segments = SEG_ALIGN(blocks_for_nat) *
320 						DEFAULT_NAT_ENTRY_RATIO / 100;
321 		set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
322 		max_nat_bitmap_size = (get_sb(segment_count_nat) <<
323 						log_blks_per_seg) / 8;
324 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
325 	} else {
326 		set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
327 		max_nat_bitmap_size = 0;
328 	}
329 
330 	/*
331 	 * The number of node segments should not be exceeded a "Threshold".
332 	 * This number resizes NAT bitmap area in a CP page.
333 	 * So the threshold is determined not to overflow one CP page
334 	 */
335 	sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
336 				log_blks_per_seg) / 8;
337 
338 	if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
339 		max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
340 	else
341 		max_sit_bitmap_size = sit_bitmap_size;
342 
343 	if (c.large_nat_bitmap) {
344 		/* use cp_payload if free space of f2fs_checkpoint is not enough */
345 		if (max_sit_bitmap_size + max_nat_bitmap_size >
346 						MAX_BITMAP_SIZE_IN_CKPT) {
347 			u_int32_t diff =  max_sit_bitmap_size +
348 						max_nat_bitmap_size -
349 						MAX_BITMAP_SIZE_IN_CKPT;
350 			set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
351 		} else {
352 			set_sb(cp_payload, 0);
353 		}
354 	} else {
355 		/*
356 		 * It should be reserved minimum 1 segment for nat.
357 		 * When sit is too large, we should expand cp area.
358 		 * It requires more pages for cp.
359 		 */
360 		if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
361 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT;
362 			set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
363 	        } else {
364 			max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT -
365 							max_sit_bitmap_size;
366 			set_sb(cp_payload, 0);
367 		}
368 		max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
369 
370 		if (get_sb(segment_count_nat) > max_nat_segments)
371 			set_sb(segment_count_nat, max_nat_segments);
372 
373 		set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
374 	}
375 
376 	set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
377 			c.blks_per_seg);
378 
379 	total_valid_blks_available = (get_sb(segment_count) -
380 			(get_sb(segment_count_ckpt) +
381 			get_sb(segment_count_sit) +
382 			get_sb(segment_count_nat))) *
383 			c.blks_per_seg;
384 
385 	blocks_for_ssa = total_valid_blks_available /
386 				c.blks_per_seg + 1;
387 
388 	set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
389 
390 	total_meta_segments = get_sb(segment_count_ckpt) +
391 		get_sb(segment_count_sit) +
392 		get_sb(segment_count_nat) +
393 		get_sb(segment_count_ssa);
394 	diff = total_meta_segments % (c.segs_per_zone);
395 	if (diff)
396 		set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
397 			(c.segs_per_zone - diff));
398 
399 	total_meta_zones = ZONE_ALIGN(total_meta_segments *
400 						c.blks_per_seg);
401 
402 	set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
403 				c.segs_per_zone * c.blks_per_seg);
404 
405 	if (c.zoned_mode) {
406 		/*
407 		 * Make sure there is enough randomly writeable
408 		 * space at the beginning of the disk.
409 		 */
410 		unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
411 
412 		if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
413 				c.devices[0].nr_rnd_zones < main_blkzone) {
414 			MSG(0, "\tError: Device does not have enough random "
415 					"write zones for F2FS volume (%lu needed)\n",
416 					main_blkzone);
417 			return -1;
418 		}
419 	}
420 
421 	total_zones = get_sb(segment_count) / (c.segs_per_zone) -
422 							total_meta_zones;
423 
424 	set_sb(section_count, total_zones * c.secs_per_zone);
425 
426 	set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
427 
428 	/* Let's determine the best reserved and overprovisioned space */
429 	if (c.overprovision == 0)
430 		c.overprovision = get_best_overprovision(sb);
431 
432 	c.reserved_segments =
433 			(2 * (100 / c.overprovision + 1) + NR_CURSEG_TYPE)
434 			* c.segs_per_sec;
435 
436 	if (c.overprovision == 0 || c.total_segments < F2FS_MIN_SEGMENTS ||
437 		(c.devices[0].total_sectors *
438 			c.sector_size < zone_align_start_offset) ||
439 		(get_sb(segment_count_main) - NR_CURSEG_TYPE) <
440 						c.reserved_segments) {
441 		MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
442 		return -1;
443 	}
444 
445 	uuid_generate(sb->uuid);
446 
447 	/* precompute checksum seed for metadata */
448 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
449 		c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
450 
451 	utf8_to_utf16(sb->volume_name, (const char *)c.vol_label,
452 				MAX_VOLUME_NAME, strlen(c.vol_label));
453 	set_sb(node_ino, 1);
454 	set_sb(meta_ino, 2);
455 	set_sb(root_ino, 3);
456 	c.next_free_nid = 4;
457 
458 	if (c.feature & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
459 		quotatype_bits = QUOTA_USR_BIT | QUOTA_GRP_BIT;
460 		if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
461 			quotatype_bits |= QUOTA_PRJ_BIT;
462 	}
463 
464 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
465 		if (!((1 << qtype) & quotatype_bits))
466 			continue;
467 		sb->qf_ino[qtype] = cpu_to_le32(c.next_free_nid++);
468 		MSG(0, "Info: add quota type = %u => %u\n",
469 					qtype, c.next_free_nid - 1);
470 	}
471 
472 	if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND))
473 		c.lpf_ino = c.next_free_nid++;
474 
475 	if (total_zones <= 6) {
476 		MSG(1, "\tError: %d zones: Need more zones "
477 			"by shrinking zone size\n", total_zones);
478 		return -1;
479 	}
480 
481 	if (c.heap) {
482 		c.cur_seg[CURSEG_HOT_NODE] =
483 				last_section(last_zone(total_zones));
484 		c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE);
485 		c.cur_seg[CURSEG_COLD_NODE] = prev_zone(CURSEG_WARM_NODE);
486 		c.cur_seg[CURSEG_HOT_DATA] = prev_zone(CURSEG_COLD_NODE);
487 		c.cur_seg[CURSEG_COLD_DATA] = 0;
488 		c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_COLD_DATA);
489 	} else {
490 		c.cur_seg[CURSEG_HOT_NODE] = 0;
491 		c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
492 		c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
493 		c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
494 		c.cur_seg[CURSEG_COLD_DATA] =
495 				max(last_zone((total_zones >> 2)),
496 					next_zone(CURSEG_HOT_DATA));
497 		c.cur_seg[CURSEG_WARM_DATA] =
498 				max(last_zone((total_zones >> 1)),
499 					next_zone(CURSEG_COLD_DATA));
500 	}
501 
502 	/* if there is redundancy, reassign it */
503 	verify_cur_segs();
504 
505 	cure_extension_list();
506 
507 	/* get kernel version */
508 	if (c.kd >= 0) {
509 		dev_read_version(c.version, 0, VERSION_LEN);
510 		get_kernel_version(c.version);
511 		MSG(0, "Info: format version with\n  \"%s\"\n", c.version);
512 	} else {
513 		get_kernel_uname_version(c.version);
514 	}
515 
516 	memcpy(sb->version, c.version, VERSION_LEN);
517 	memcpy(sb->init_version, c.version, VERSION_LEN);
518 
519 	if (c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
520 		set_sb(s_encoding, c.s_encoding);
521 		set_sb(s_encoding_flags, c.s_encoding_flags);
522 	}
523 
524 	sb->feature = c.feature;
525 
526 	if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
527 		set_sb(checksum_offset, SB_CHKSUM_OFFSET);
528 		set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
529 						SB_CHKSUM_OFFSET));
530 		MSG(1, "Info: SB CRC is set: offset (%d), crc (0x%x)\n",
531 					get_sb(checksum_offset), get_sb(crc));
532 	}
533 
534 	return 0;
535 }
536 
537 static int f2fs_init_sit_area(void)
538 {
539 	u_int32_t blk_size, seg_size;
540 	u_int32_t index = 0;
541 	u_int64_t sit_seg_addr = 0;
542 	u_int8_t *zero_buf = NULL;
543 
544 	blk_size = 1 << get_sb(log_blocksize);
545 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
546 
547 	zero_buf = calloc(sizeof(u_int8_t), seg_size);
548 	if(zero_buf == NULL) {
549 		MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
550 		return -1;
551 	}
552 
553 	sit_seg_addr = get_sb(sit_blkaddr);
554 	sit_seg_addr *= blk_size;
555 
556 	DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
557 	for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
558 		if (dev_fill(zero_buf, sit_seg_addr, seg_size)) {
559 			MSG(1, "\tError: While zeroing out the sit area "
560 					"on disk!!!\n");
561 			free(zero_buf);
562 			return -1;
563 		}
564 		sit_seg_addr += seg_size;
565 	}
566 
567 	free(zero_buf);
568 	return 0 ;
569 }
570 
571 static int f2fs_init_nat_area(void)
572 {
573 	u_int32_t blk_size, seg_size;
574 	u_int32_t index = 0;
575 	u_int64_t nat_seg_addr = 0;
576 	u_int8_t *nat_buf = NULL;
577 
578 	blk_size = 1 << get_sb(log_blocksize);
579 	seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
580 
581 	nat_buf = calloc(sizeof(u_int8_t), seg_size);
582 	if (nat_buf == NULL) {
583 		MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
584 		return -1;
585 	}
586 
587 	nat_seg_addr = get_sb(nat_blkaddr);
588 	nat_seg_addr *= blk_size;
589 
590 	DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
591 	for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
592 		if (dev_fill(nat_buf, nat_seg_addr, seg_size)) {
593 			MSG(1, "\tError: While zeroing out the nat area "
594 					"on disk!!!\n");
595 			free(nat_buf);
596 			return -1;
597 		}
598 		nat_seg_addr = nat_seg_addr + (2 * seg_size);
599 	}
600 
601 	free(nat_buf);
602 	return 0 ;
603 }
604 
605 static int f2fs_write_check_point_pack(void)
606 {
607 	struct f2fs_summary_block *sum = NULL;
608 	struct f2fs_journal *journal;
609 	u_int32_t blk_size_bytes;
610 	u_int32_t nat_bits_bytes, nat_bits_blocks;
611 	unsigned char *nat_bits = NULL, *empty_nat_bits;
612 	u_int64_t cp_seg_blk = 0;
613 	u_int32_t crc = 0, flags;
614 	unsigned int i;
615 	char *cp_payload = NULL;
616 	char *sum_compact, *sum_compact_p;
617 	struct f2fs_summary *sum_entry;
618 	enum quota_type qtype;
619 	int off;
620 	int ret = -1;
621 
622 	cp = calloc(F2FS_BLKSIZE, 1);
623 	if (cp == NULL) {
624 		MSG(1, "\tError: Calloc failed for f2fs_checkpoint!!!\n");
625 		return ret;
626 	}
627 
628 	sum = calloc(F2FS_BLKSIZE, 1);
629 	if (sum == NULL) {
630 		MSG(1, "\tError: Calloc failed for summary_node!!!\n");
631 		goto free_cp;
632 	}
633 
634 	sum_compact = calloc(F2FS_BLKSIZE, 1);
635 	if (sum_compact == NULL) {
636 		MSG(1, "\tError: Calloc failed for summary buffer!!!\n");
637 		goto free_sum;
638 	}
639 	sum_compact_p = sum_compact;
640 
641 	nat_bits_bytes = get_sb(segment_count_nat) << 5;
642 	nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
643 						F2FS_BLKSIZE - 1);
644 	nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
645 	if (nat_bits == NULL) {
646 		MSG(1, "\tError: Calloc failed for nat bits buffer!!!\n");
647 		goto free_sum_compact;
648 	}
649 
650 	cp_payload = calloc(F2FS_BLKSIZE, 1);
651 	if (cp_payload == NULL) {
652 		MSG(1, "\tError: Calloc failed for cp_payload!!!\n");
653 		goto free_nat_bits;
654 	}
655 
656 	/* 1. cp page 1 of checkpoint pack 1 */
657 	srand(time(NULL));
658 	cp->checkpoint_ver = cpu_to_le64(rand() | 0x1);
659 	set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
660 	set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
661 	set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
662 	set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
663 	set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
664 	set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
665 	for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
666 		set_cp(cur_node_segno[i], 0xffffffff);
667 		set_cp(cur_data_segno[i], 0xffffffff);
668 	}
669 
670 	set_cp(cur_node_blkoff[0], 1 + c.quota_inum + c.lpf_inum);
671 	set_cp(cur_data_blkoff[0], 1 + c.quota_dnum + c.lpf_dnum);
672 	set_cp(valid_block_count, 2 + c.quota_inum + c.quota_dnum +
673 			c.lpf_inum + c.lpf_dnum);
674 	set_cp(rsvd_segment_count, c.reserved_segments);
675 	set_cp(overprov_segment_count, (get_sb(segment_count_main) -
676 			get_cp(rsvd_segment_count)) *
677 			c.overprovision / 100);
678 	set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
679 			get_cp(rsvd_segment_count));
680 
681 	MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
682 	MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
683 					get_cp(overprov_segment_count),
684 					c.reserved_segments);
685 
686 	/* main segments - reserved segments - (node + data segments) */
687 	set_cp(free_segment_count, get_sb(segment_count_main) - 6);
688 	set_cp(user_block_count, ((get_cp(free_segment_count) + 6 -
689 			get_cp(overprov_segment_count)) * c.blks_per_seg));
690 	/* cp page (2), data summaries (1), node summaries (3) */
691 	set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
692 	flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
693 	if (get_cp(cp_pack_total_block_count) <=
694 			(1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
695 		flags |= CP_NAT_BITS_FLAG;
696 
697 	if (c.trimmed)
698 		flags |= CP_TRIMMED_FLAG;
699 
700 	if (c.large_nat_bitmap)
701 		flags |= CP_LARGE_NAT_BITMAP_FLAG;
702 
703 	set_cp(ckpt_flags, flags);
704 	set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
705 	set_cp(valid_node_count, 1 + c.quota_inum + c.lpf_inum);
706 	set_cp(valid_inode_count, 1 + c.quota_inum + c.lpf_inum);
707 	set_cp(next_free_nid, c.next_free_nid);
708 	set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
709 			get_sb(log_blocks_per_seg)) / 8);
710 
711 	set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
712 			 get_sb(log_blocks_per_seg)) / 8);
713 
714 	if (c.large_nat_bitmap)
715 		set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
716 	else
717 		set_cp(checksum_offset, CP_CHKSUM_OFFSET);
718 
719 	crc = f2fs_checkpoint_chksum(cp);
720 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
721 							cpu_to_le32(crc);
722 
723 	blk_size_bytes = 1 << get_sb(log_blocksize);
724 
725 	if (blk_size_bytes != F2FS_BLKSIZE) {
726 		MSG(1, "\tError: Wrong block size %d / %d!!!\n",
727 					blk_size_bytes, F2FS_BLKSIZE);
728 		goto free_cp_payload;
729 	}
730 
731 	cp_seg_blk = get_sb(segment0_blkaddr);
732 
733 	DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
734 						cp_seg_blk);
735 	if (dev_write_block(cp, cp_seg_blk)) {
736 		MSG(1, "\tError: While writing the cp to disk!!!\n");
737 		goto free_cp_payload;
738 	}
739 
740 	for (i = 0; i < get_sb(cp_payload); i++) {
741 		cp_seg_blk++;
742 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
743 			MSG(1, "\tError: While zeroing out the sit bitmap area "
744 					"on disk!!!\n");
745 			goto free_cp_payload;
746 		}
747 	}
748 
749 	/* Prepare and write Segment summary for HOT/WARM/COLD DATA
750 	 *
751 	 * The structure of compact summary
752 	 * +-------------------+
753 	 * | nat_journal       |
754 	 * +-------------------+
755 	 * | sit_journal       |
756 	 * +-------------------+
757 	 * | hot data summary  |
758 	 * +-------------------+
759 	 * | warm data summary |
760 	 * +-------------------+
761 	 * | cold data summary |
762 	 * +-------------------+
763 	*/
764 	memset(sum, 0, sizeof(struct f2fs_summary_block));
765 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_DATA);
766 
767 	journal = &sum->journal;
768 	journal->n_nats = cpu_to_le16(1 + c.quota_inum + c.lpf_inum);
769 	journal->nat_j.entries[0].nid = sb->root_ino;
770 	journal->nat_j.entries[0].ne.version = 0;
771 	journal->nat_j.entries[0].ne.ino = sb->root_ino;
772 	journal->nat_j.entries[0].ne.block_addr = cpu_to_le32(
773 			get_sb(main_blkaddr) +
774 			get_cp(cur_node_segno[0]) * c.blks_per_seg);
775 
776 	for (qtype = 0, i = 1; qtype < F2FS_MAX_QUOTAS; qtype++) {
777 		if (sb->qf_ino[qtype] == 0)
778 			continue;
779 		journal->nat_j.entries[i].nid = sb->qf_ino[qtype];
780 		journal->nat_j.entries[i].ne.version = 0;
781 		journal->nat_j.entries[i].ne.ino = sb->qf_ino[qtype];
782 		journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
783 				get_sb(main_blkaddr) +
784 				get_cp(cur_node_segno[0]) *
785 				c.blks_per_seg + i);
786 		i++;
787 	}
788 
789 	if (c.lpf_inum) {
790 		journal->nat_j.entries[i].nid = cpu_to_le32(c.lpf_ino);
791 		journal->nat_j.entries[i].ne.version = 0;
792 		journal->nat_j.entries[i].ne.ino = cpu_to_le32(c.lpf_ino);
793 		journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
794 				get_sb(main_blkaddr) +
795 				get_cp(cur_node_segno[0]) *
796 				c.blks_per_seg + i);
797 	}
798 
799 	memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
800 	sum_compact_p += SUM_JOURNAL_SIZE;
801 
802 	memset(sum, 0, sizeof(struct f2fs_summary_block));
803 	/* inode sit for root */
804 	journal->n_sits = cpu_to_le16(6);
805 	journal->sit_j.entries[0].segno = cp->cur_node_segno[0];
806 	journal->sit_j.entries[0].se.vblocks =
807 				cpu_to_le16((CURSEG_HOT_NODE << 10) |
808 						(1 + c.quota_inum + c.lpf_inum));
809 	f2fs_set_bit(0, (char *)journal->sit_j.entries[0].se.valid_map);
810 	for (i = 1; i <= c.quota_inum; i++)
811 		f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
812 	if (c.lpf_inum)
813 		f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
814 
815 	journal->sit_j.entries[1].segno = cp->cur_node_segno[1];
816 	journal->sit_j.entries[1].se.vblocks =
817 				cpu_to_le16((CURSEG_WARM_NODE << 10));
818 	journal->sit_j.entries[2].segno = cp->cur_node_segno[2];
819 	journal->sit_j.entries[2].se.vblocks =
820 				cpu_to_le16((CURSEG_COLD_NODE << 10));
821 
822 	/* data sit for root */
823 	journal->sit_j.entries[3].segno = cp->cur_data_segno[0];
824 	journal->sit_j.entries[3].se.vblocks =
825 				cpu_to_le16((CURSEG_HOT_DATA << 10) |
826 						(1 + c.quota_dnum + c.lpf_dnum));
827 	f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map);
828 	for (i = 1; i <= c.quota_dnum; i++)
829 		f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
830 	if (c.lpf_dnum)
831 		f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
832 
833 	journal->sit_j.entries[4].segno = cp->cur_data_segno[1];
834 	journal->sit_j.entries[4].se.vblocks =
835 				cpu_to_le16((CURSEG_WARM_DATA << 10));
836 	journal->sit_j.entries[5].segno = cp->cur_data_segno[2];
837 	journal->sit_j.entries[5].se.vblocks =
838 				cpu_to_le16((CURSEG_COLD_DATA << 10));
839 
840 	memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
841 	sum_compact_p += SUM_JOURNAL_SIZE;
842 
843 	/* hot data summary */
844 	sum_entry = (struct f2fs_summary *)sum_compact_p;
845 	sum_entry->nid = sb->root_ino;
846 	sum_entry->ofs_in_node = 0;
847 
848 	off = 1;
849 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
850 		if (sb->qf_ino[qtype] == 0)
851 			continue;
852 		int j;
853 
854 		for (j = 0; j < QUOTA_DATA(qtype); j++) {
855 			(sum_entry + off + j)->nid = sb->qf_ino[qtype];
856 			(sum_entry + off + j)->ofs_in_node = cpu_to_le16(j);
857 		}
858 		off += QUOTA_DATA(qtype);
859 	}
860 
861 	if (c.lpf_dnum) {
862 		(sum_entry + off)->nid = cpu_to_le32(c.lpf_ino);
863 		(sum_entry + off)->ofs_in_node = 0;
864 	}
865 
866 	/* warm data summary, nothing to do */
867 	/* cold data summary, nothing to do */
868 
869 	cp_seg_blk++;
870 	DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
871 			cp_seg_blk);
872 	if (dev_write_block(sum_compact, cp_seg_blk)) {
873 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
874 		goto free_cp_payload;
875 	}
876 
877 	/* Prepare and write Segment summary for HOT_NODE */
878 	memset(sum, 0, sizeof(struct f2fs_summary_block));
879 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
880 
881 	sum->entries[0].nid = sb->root_ino;
882 	sum->entries[0].ofs_in_node = 0;
883 	for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
884 		if (sb->qf_ino[qtype] == 0)
885 			continue;
886 		sum->entries[1 + i].nid = sb->qf_ino[qtype];
887 		sum->entries[1 + i].ofs_in_node = 0;
888 		i++;
889 	}
890 	if (c.lpf_inum) {
891 		i++;
892 		sum->entries[i].nid = cpu_to_le32(c.lpf_ino);
893 		sum->entries[i].ofs_in_node = 0;
894 	}
895 
896 	cp_seg_blk++;
897 	DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
898 			cp_seg_blk);
899 	if (dev_write_block(sum, cp_seg_blk)) {
900 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
901 		goto free_cp_payload;
902 	}
903 
904 	/* Fill segment summary for WARM_NODE to zero. */
905 	memset(sum, 0, sizeof(struct f2fs_summary_block));
906 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
907 
908 	cp_seg_blk++;
909 	DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
910 			cp_seg_blk);
911 	if (dev_write_block(sum, cp_seg_blk)) {
912 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
913 		goto free_cp_payload;
914 	}
915 
916 	/* Fill segment summary for COLD_NODE to zero. */
917 	memset(sum, 0, sizeof(struct f2fs_summary_block));
918 	SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
919 	cp_seg_blk++;
920 	DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
921 			cp_seg_blk);
922 	if (dev_write_block(sum, cp_seg_blk)) {
923 		MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
924 		goto free_cp_payload;
925 	}
926 
927 	/* cp page2 */
928 	cp_seg_blk++;
929 	DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
930 	if (dev_write_block(cp, cp_seg_blk)) {
931 		MSG(1, "\tError: While writing the cp to disk!!!\n");
932 		goto free_cp_payload;
933 	}
934 
935 	/* write NAT bits, if possible */
936 	if (flags & CP_NAT_BITS_FLAG) {
937 		uint32_t i;
938 
939 		*(__le64 *)nat_bits = get_cp_crc(cp);
940 		empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
941 		memset(empty_nat_bits, 0xff, nat_bits_bytes);
942 		test_and_clear_bit_le(0, empty_nat_bits);
943 
944 		/* write the last blocks in cp pack */
945 		cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
946 				get_sb(log_blocks_per_seg)) - nat_bits_blocks;
947 
948 		DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
949 					cp_seg_blk);
950 
951 		for (i = 0; i < nat_bits_blocks; i++) {
952 			if (dev_write_block(nat_bits + i *
953 						F2FS_BLKSIZE, cp_seg_blk + i)) {
954 				MSG(1, "\tError: write NAT bits to disk!!!\n");
955 				goto free_cp_payload;
956 			}
957 		}
958 	}
959 
960 	/* cp page 1 of check point pack 2
961 	 * Initialize other checkpoint pack with version zero
962 	 */
963 	cp->checkpoint_ver = 0;
964 
965 	crc = f2fs_checkpoint_chksum(cp);
966 	*((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
967 							cpu_to_le32(crc);
968 	cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
969 	DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
970 				cp_seg_blk);
971 	if (dev_write_block(cp, cp_seg_blk)) {
972 		MSG(1, "\tError: While writing the cp to disk!!!\n");
973 		goto free_cp_payload;
974 	}
975 
976 	for (i = 0; i < get_sb(cp_payload); i++) {
977 		cp_seg_blk++;
978 		if (dev_fill_block(cp_payload, cp_seg_blk)) {
979 			MSG(1, "\tError: While zeroing out the sit bitmap area "
980 					"on disk!!!\n");
981 			goto free_cp_payload;
982 		}
983 	}
984 
985 	/* cp page 2 of check point pack 2 */
986 	cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
987 					get_sb(cp_payload) - 1);
988 	DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
989 				cp_seg_blk);
990 	if (dev_write_block(cp, cp_seg_blk)) {
991 		MSG(1, "\tError: While writing the cp to disk!!!\n");
992 		goto free_cp_payload;
993 	}
994 
995 	ret = 0;
996 
997 free_cp_payload:
998 	free(cp_payload);
999 free_nat_bits:
1000 	free(nat_bits);
1001 free_sum_compact:
1002 	free(sum_compact);
1003 free_sum:
1004 	free(sum);
1005 free_cp:
1006 	free(cp);
1007 	return ret;
1008 }
1009 
1010 static int f2fs_write_super_block(void)
1011 {
1012 	int index;
1013 	u_int8_t *zero_buff;
1014 
1015 	zero_buff = calloc(F2FS_BLKSIZE, 1);
1016 	if (zero_buff == NULL) {
1017 		MSG(1, "\tError: Calloc Failed for super_blk_zero_buf!!!\n");
1018 		return -1;
1019 	}
1020 
1021 	memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1022 	DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1023 	for (index = 0; index < 2; index++) {
1024 		if (dev_write_block(zero_buff, index)) {
1025 			MSG(1, "\tError: While while writing super_blk "
1026 					"on disk!!! index : %d\n", index);
1027 			free(zero_buff);
1028 			return -1;
1029 		}
1030 	}
1031 
1032 	free(zero_buff);
1033 	return 0;
1034 }
1035 
1036 #ifndef WITH_ANDROID
1037 static int f2fs_discard_obsolete_dnode(void)
1038 {
1039 	struct f2fs_node *raw_node;
1040 	u_int64_t next_blkaddr = 0, offset;
1041 	u64 end_blkaddr = (get_sb(segment_count_main) <<
1042 			get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1043 	u_int64_t start_inode_pos = get_sb(main_blkaddr);
1044 	u_int64_t last_inode_pos;
1045 
1046 	if (c.zoned_mode)
1047 		return 0;
1048 
1049 	raw_node = calloc(sizeof(struct f2fs_node), 1);
1050 	if (raw_node == NULL) {
1051 		MSG(1, "\tError: Calloc Failed for discard_raw_node!!!\n");
1052 		return -1;
1053 	}
1054 
1055 	/* avoid power-off-recovery based on roll-forward policy */
1056 	offset = get_sb(main_blkaddr);
1057 	offset += c.cur_seg[CURSEG_WARM_NODE] * c.blks_per_seg;
1058 
1059 	last_inode_pos = start_inode_pos +
1060 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg + c.quota_inum + c.lpf_inum;
1061 
1062 	do {
1063 		if (offset < get_sb(main_blkaddr) || offset >= end_blkaddr)
1064 			break;
1065 
1066 		if (dev_read_block(raw_node, offset)) {
1067 			MSG(1, "\tError: While traversing direct node!!!\n");
1068 			free(raw_node);
1069 			return -1;
1070 		}
1071 
1072 		next_blkaddr = le32_to_cpu(raw_node->footer.next_blkaddr);
1073 		memset(raw_node, 0, F2FS_BLKSIZE);
1074 
1075 		DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
1076 		if (dev_write_block(raw_node, offset)) {
1077 			MSG(1, "\tError: While discarding direct node!!!\n");
1078 			free(raw_node);
1079 			return -1;
1080 		}
1081 		offset = next_blkaddr;
1082 		/* should avoid recursive chain due to stale data */
1083 		if (offset >= start_inode_pos || offset <= last_inode_pos)
1084 			break;
1085 	} while (1);
1086 
1087 	free(raw_node);
1088 	return 0;
1089 }
1090 #endif
1091 
1092 static int f2fs_write_root_inode(void)
1093 {
1094 	struct f2fs_node *raw_node = NULL;
1095 	u_int64_t blk_size_bytes, data_blk_nor;
1096 	u_int64_t main_area_node_seg_blk_offset = 0;
1097 
1098 	raw_node = calloc(F2FS_BLKSIZE, 1);
1099 	if (raw_node == NULL) {
1100 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1101 		return -1;
1102 	}
1103 
1104 	raw_node->footer.nid = sb->root_ino;
1105 	raw_node->footer.ino = sb->root_ino;
1106 	raw_node->footer.cp_ver = cpu_to_le64(1);
1107 	raw_node->footer.next_blkaddr = cpu_to_le32(
1108 			get_sb(main_blkaddr) +
1109 			c.cur_seg[CURSEG_HOT_NODE] *
1110 			c.blks_per_seg + 1);
1111 
1112 	raw_node->i.i_mode = cpu_to_le16(0x41ed);
1113 	if (c.lpf_ino)
1114 		raw_node->i.i_links = cpu_to_le32(3);
1115 	else
1116 		raw_node->i.i_links = cpu_to_le32(2);
1117 	raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1118 	raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1119 
1120 	blk_size_bytes = 1 << get_sb(log_blocksize);
1121 	raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes); /* dentry */
1122 	raw_node->i.i_blocks = cpu_to_le64(2);
1123 
1124 	raw_node->i.i_atime = cpu_to_le32(time(NULL));
1125 	raw_node->i.i_atime_nsec = 0;
1126 	raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1127 	raw_node->i.i_ctime_nsec = 0;
1128 	raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1129 	raw_node->i.i_mtime_nsec = 0;
1130 	raw_node->i.i_generation = 0;
1131 	raw_node->i.i_xattr_nid = 0;
1132 	raw_node->i.i_flags = 0;
1133 	raw_node->i.i_current_depth = cpu_to_le32(1);
1134 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1135 
1136 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1137 		raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1138 		raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
1139 	}
1140 
1141 	if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1142 		raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1143 
1144 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1145 		raw_node->i.i_crtime = cpu_to_le32(time(NULL));
1146 		raw_node->i.i_crtime_nsec = 0;
1147 	}
1148 
1149 	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
1150 		raw_node->i.i_compress_algrithm = 0;
1151 		raw_node->i.i_log_cluster_size = 0;
1152 		raw_node->i.i_padding = 0;
1153 	}
1154 
1155 	data_blk_nor = get_sb(main_blkaddr) +
1156 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
1157 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1158 
1159 	raw_node->i.i_ext.fofs = 0;
1160 	raw_node->i.i_ext.blk_addr = 0;
1161 	raw_node->i.i_ext.len = 0;
1162 
1163 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1164 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1165 					c.blks_per_seg;
1166 
1167 	DBG(1, "\tWriting root inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1168 			get_sb(main_blkaddr),
1169 			c.cur_seg[CURSEG_HOT_NODE],
1170 			c.blks_per_seg, main_area_node_seg_blk_offset);
1171 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1172 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1173 		free(raw_node);
1174 		return -1;
1175 	}
1176 
1177 	free(raw_node);
1178 	return 0;
1179 }
1180 
1181 static int f2fs_write_default_quota(int qtype, unsigned int blkaddr,
1182 						__le32 raw_id)
1183 {
1184 	char *filebuf = calloc(F2FS_BLKSIZE, 2);
1185 	int file_magics[] = INITQMAGICS;
1186 	struct v2_disk_dqheader ddqheader;
1187 	struct v2_disk_dqinfo ddqinfo;
1188 	struct v2r1_disk_dqblk dqblk;
1189 
1190 	if (filebuf == NULL) {
1191 		MSG(1, "\tError: Calloc Failed for filebuf!!!\n");
1192 		return -1;
1193 	}
1194 
1195 	/* Write basic quota header */
1196 	ddqheader.dqh_magic = cpu_to_le32(file_magics[qtype]);
1197 	/* only support QF_VFSV1 */
1198 	ddqheader.dqh_version = cpu_to_le32(1);
1199 
1200 	memcpy(filebuf, &ddqheader, sizeof(ddqheader));
1201 
1202 	/* Fill Initial quota file content */
1203 	ddqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1204 	ddqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1205 	ddqinfo.dqi_flags = cpu_to_le32(0);
1206 	ddqinfo.dqi_blocks = cpu_to_le32(QT_TREEOFF + 5);
1207 	ddqinfo.dqi_free_blk = cpu_to_le32(0);
1208 	ddqinfo.dqi_free_entry = cpu_to_le32(5);
1209 
1210 	memcpy(filebuf + V2_DQINFOOFF, &ddqinfo, sizeof(ddqinfo));
1211 
1212 	filebuf[1024] = 2;
1213 	filebuf[2048] = 3;
1214 	filebuf[3072] = 4;
1215 	filebuf[4096] = 5;
1216 
1217 	filebuf[5120 + 8] = 1;
1218 
1219 	dqblk.dqb_id = raw_id;
1220 	dqblk.dqb_pad = cpu_to_le32(0);
1221 	dqblk.dqb_ihardlimit = cpu_to_le64(0);
1222 	dqblk.dqb_isoftlimit = cpu_to_le64(0);
1223 	if (c.lpf_ino)
1224 		dqblk.dqb_curinodes = cpu_to_le64(2);
1225 	else
1226 		dqblk.dqb_curinodes = cpu_to_le64(1);
1227 	dqblk.dqb_bhardlimit = cpu_to_le64(0);
1228 	dqblk.dqb_bsoftlimit = cpu_to_le64(0);
1229 	if (c.lpf_ino)
1230 		dqblk.dqb_curspace = cpu_to_le64(8192);
1231 	else
1232 		dqblk.dqb_curspace = cpu_to_le64(4096);
1233 	dqblk.dqb_btime = cpu_to_le64(0);
1234 	dqblk.dqb_itime = cpu_to_le64(0);
1235 
1236 	memcpy(filebuf + 5136, &dqblk, sizeof(struct v2r1_disk_dqblk));
1237 
1238 	/* Write two blocks */
1239 	if (dev_write_block(filebuf, blkaddr) ||
1240 	    dev_write_block(filebuf + F2FS_BLKSIZE, blkaddr + 1)) {
1241 		MSG(1, "\tError: While writing the quota_blk to disk!!!\n");
1242 		free(filebuf);
1243 		return -1;
1244 	}
1245 	DBG(1, "\tWriting quota data, at offset %08x, %08x\n",
1246 					blkaddr, blkaddr + 1);
1247 	free(filebuf);
1248 	c.quota_dnum += QUOTA_DATA(qtype);
1249 	return 0;
1250 }
1251 
1252 static int f2fs_write_qf_inode(int qtype)
1253 {
1254 	struct f2fs_node *raw_node = NULL;
1255 	u_int64_t data_blk_nor;
1256 	u_int64_t main_area_node_seg_blk_offset = 0;
1257 	__le32 raw_id;
1258 	int i;
1259 
1260 	raw_node = calloc(F2FS_BLKSIZE, 1);
1261 	if (raw_node == NULL) {
1262 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1263 		return -1;
1264 	}
1265 
1266 	raw_node->footer.nid = sb->qf_ino[qtype];
1267 	raw_node->footer.ino = sb->qf_ino[qtype];
1268 	raw_node->footer.cp_ver = cpu_to_le64(1);
1269 	raw_node->footer.next_blkaddr = cpu_to_le32(
1270 			get_sb(main_blkaddr) +
1271 			c.cur_seg[CURSEG_HOT_NODE] *
1272 			c.blks_per_seg + 1 + qtype + 1);
1273 
1274 	raw_node->i.i_mode = cpu_to_le16(0x8180);
1275 	raw_node->i.i_links = cpu_to_le32(1);
1276 	raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1277 	raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1278 
1279 	raw_node->i.i_size = cpu_to_le64(1024 * 6); /* Hard coded */
1280 	raw_node->i.i_blocks = cpu_to_le64(1 + QUOTA_DATA(qtype));
1281 
1282 	raw_node->i.i_atime = cpu_to_le32(time(NULL));
1283 	raw_node->i.i_atime_nsec = 0;
1284 	raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1285 	raw_node->i.i_ctime_nsec = 0;
1286 	raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1287 	raw_node->i.i_mtime_nsec = 0;
1288 	raw_node->i.i_generation = 0;
1289 	raw_node->i.i_xattr_nid = 0;
1290 	raw_node->i.i_flags = FS_IMMUTABLE_FL;
1291 	raw_node->i.i_current_depth = cpu_to_le32(0);
1292 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1293 
1294 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1295 		raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1296 		raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
1297 	}
1298 
1299 	if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1300 		raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1301 
1302 	data_blk_nor = get_sb(main_blkaddr) +
1303 		c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg + 1;
1304 
1305 	for (i = 0; i < qtype; i++)
1306 		if (sb->qf_ino[i])
1307 			data_blk_nor += QUOTA_DATA(i);
1308 	if (qtype == 0)
1309 		raw_id = raw_node->i.i_uid;
1310 	else if (qtype == 1)
1311 		raw_id = raw_node->i.i_gid;
1312 	else if (qtype == 2)
1313 		raw_id = raw_node->i.i_projid;
1314 	else
1315 		ASSERT(0);
1316 
1317 	/* write two blocks */
1318 	if (f2fs_write_default_quota(qtype, data_blk_nor, raw_id)) {
1319 		free(raw_node);
1320 		return -1;
1321 	}
1322 
1323 	for (i = 0; i < QUOTA_DATA(qtype); i++)
1324 		raw_node->i.i_addr[get_extra_isize(raw_node) + i] =
1325 					cpu_to_le32(data_blk_nor + i);
1326 	raw_node->i.i_ext.fofs = 0;
1327 	raw_node->i.i_ext.blk_addr = 0;
1328 	raw_node->i.i_ext.len = 0;
1329 
1330 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1331 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1332 					c.blks_per_seg + qtype + 1;
1333 
1334 	DBG(1, "\tWriting quota inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1335 			get_sb(main_blkaddr),
1336 			c.cur_seg[CURSEG_HOT_NODE],
1337 			c.blks_per_seg, main_area_node_seg_blk_offset);
1338 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1339 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1340 		free(raw_node);
1341 		return -1;
1342 	}
1343 
1344 	free(raw_node);
1345 	c.quota_inum++;
1346 	return 0;
1347 }
1348 
1349 static int f2fs_update_nat_root(void)
1350 {
1351 	struct f2fs_nat_block *nat_blk = NULL;
1352 	u_int64_t nat_seg_blk_offset = 0;
1353 	enum quota_type qtype;
1354 	int i;
1355 
1356 	nat_blk = calloc(F2FS_BLKSIZE, 1);
1357 	if(nat_blk == NULL) {
1358 		MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
1359 		return -1;
1360 	}
1361 
1362 	/* update quota */
1363 	for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1364 		if (sb->qf_ino[qtype] == 0)
1365 			continue;
1366 		nat_blk->entries[sb->qf_ino[qtype]].block_addr =
1367 				cpu_to_le32(get_sb(main_blkaddr) +
1368 				c.cur_seg[CURSEG_HOT_NODE] *
1369 				c.blks_per_seg + i + 1);
1370 		nat_blk->entries[sb->qf_ino[qtype]].ino = sb->qf_ino[qtype];
1371 		i++;
1372 	}
1373 
1374 	/* update root */
1375 	nat_blk->entries[get_sb(root_ino)].block_addr = cpu_to_le32(
1376 		get_sb(main_blkaddr) +
1377 		c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg);
1378 	nat_blk->entries[get_sb(root_ino)].ino = sb->root_ino;
1379 
1380 	/* update node nat */
1381 	nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
1382 	nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
1383 
1384 	/* update meta nat */
1385 	nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
1386 	nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
1387 
1388 	nat_seg_blk_offset = get_sb(nat_blkaddr);
1389 
1390 	DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
1391 					nat_seg_blk_offset);
1392 	if (dev_write_block(nat_blk, nat_seg_blk_offset)) {
1393 		MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
1394 		free(nat_blk);
1395 		return -1;
1396 	}
1397 
1398 	free(nat_blk);
1399 	return 0;
1400 }
1401 
1402 static block_t f2fs_add_default_dentry_lpf(void)
1403 {
1404 	struct f2fs_dentry_block *dent_blk;
1405 	uint64_t data_blk_offset;
1406 
1407 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1408 	if (dent_blk == NULL) {
1409 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1410 		return 0;
1411 	}
1412 
1413 	dent_blk->dentry[0].hash_code = 0;
1414 	dent_blk->dentry[0].ino = cpu_to_le32(c.lpf_ino);
1415 	dent_blk->dentry[0].name_len = cpu_to_le16(1);
1416 	dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1417 	memcpy(dent_blk->filename[0], ".", 1);
1418 
1419 	dent_blk->dentry[1].hash_code = 0;
1420 	dent_blk->dentry[1].ino = sb->root_ino;
1421 	dent_blk->dentry[1].name_len = cpu_to_le16(2);
1422 	dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1423 	memcpy(dent_blk->filename[1], "..", 2);
1424 
1425 	test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1426 	test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1427 
1428 	data_blk_offset = get_sb(main_blkaddr);
1429 	data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg +
1430 		1 + c.quota_dnum;
1431 
1432 	DBG(1, "\tWriting default dentry lost+found, at offset 0x%08"PRIx64"\n",
1433 			data_blk_offset);
1434 	if (dev_write_block(dent_blk, data_blk_offset)) {
1435 		MSG(1, "\tError While writing the dentry_blk to disk!!!\n");
1436 		free(dent_blk);
1437 		return 0;
1438 	}
1439 
1440 	free(dent_blk);
1441 	c.lpf_dnum++;
1442 	return data_blk_offset;
1443 }
1444 
1445 static int f2fs_write_lpf_inode(void)
1446 {
1447 	struct f2fs_node *raw_node;
1448 	u_int64_t blk_size_bytes, main_area_node_seg_blk_offset;
1449 	block_t data_blk_nor;
1450 	int err = 0;
1451 
1452 	ASSERT(c.lpf_ino);
1453 
1454 	raw_node = calloc(F2FS_BLKSIZE, 1);
1455 	if (raw_node == NULL) {
1456 		MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1457 		return -1;
1458 	}
1459 
1460 	raw_node->footer.nid = cpu_to_le32(c.lpf_ino);
1461 	raw_node->footer.ino = raw_node->footer.nid;
1462 	raw_node->footer.cp_ver = cpu_to_le64(1);
1463 	raw_node->footer.next_blkaddr = cpu_to_le32(
1464 			get_sb(main_blkaddr) +
1465 			c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg +
1466 			1 + c.quota_inum + 1);
1467 
1468 	raw_node->i.i_mode = cpu_to_le16(0x41c0); /* 0700 */
1469 	raw_node->i.i_links = cpu_to_le32(2);
1470 	raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1471 	raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1472 
1473 	blk_size_bytes = 1 << get_sb(log_blocksize);
1474 	raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes);
1475 	raw_node->i.i_blocks = cpu_to_le64(2);
1476 
1477 	raw_node->i.i_atime = cpu_to_le32(time(NULL));
1478 	raw_node->i.i_atime_nsec = 0;
1479 	raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1480 	raw_node->i.i_ctime_nsec = 0;
1481 	raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1482 	raw_node->i.i_mtime_nsec = 0;
1483 	raw_node->i.i_generation = 0;
1484 	raw_node->i.i_xattr_nid = 0;
1485 	raw_node->i.i_flags = 0;
1486 	raw_node->i.i_pino = le32_to_cpu(sb->root_ino);
1487 	raw_node->i.i_namelen = le32_to_cpu(strlen(LPF));
1488 	memcpy(raw_node->i.i_name, LPF, strlen(LPF));
1489 	raw_node->i.i_current_depth = cpu_to_le32(1);
1490 	raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1491 
1492 	if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1493 		raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1494 		raw_node->i.i_extra_isize = cpu_to_le16(calc_extra_isize());
1495 	}
1496 
1497 	if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1498 		raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1499 
1500 	if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1501 		raw_node->i.i_crtime = cpu_to_le32(time(NULL));
1502 		raw_node->i.i_crtime_nsec = 0;
1503 	}
1504 
1505 	if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
1506 		raw_node->i.i_compress_algrithm = 0;
1507 		raw_node->i.i_log_cluster_size = 0;
1508 		raw_node->i.i_padding = 0;
1509 	}
1510 
1511 	data_blk_nor = f2fs_add_default_dentry_lpf();
1512 	if (data_blk_nor == 0) {
1513 		MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
1514 		err = -1;
1515 		goto exit;
1516 	}
1517 	raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1518 
1519 	main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1520 	main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1521 		c.blks_per_seg + c.quota_inum + 1;
1522 
1523 	DBG(1, "\tWriting lost+found inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1524 			get_sb(main_blkaddr),
1525 			c.cur_seg[CURSEG_HOT_NODE],
1526 			c.blks_per_seg, main_area_node_seg_blk_offset);
1527 	if (write_inode(raw_node, main_area_node_seg_blk_offset) < 0) {
1528 		MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1529 		err = -1;
1530 		goto exit;
1531 	}
1532 
1533 	c.lpf_inum++;
1534 exit:
1535 	free(raw_node);
1536 	return err;
1537 }
1538 
1539 static int f2fs_add_default_dentry_root(void)
1540 {
1541 	struct f2fs_dentry_block *dent_blk = NULL;
1542 	u_int64_t data_blk_offset = 0;
1543 
1544 	dent_blk = calloc(F2FS_BLKSIZE, 1);
1545 	if(dent_blk == NULL) {
1546 		MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1547 		return -1;
1548 	}
1549 
1550 	dent_blk->dentry[0].hash_code = 0;
1551 	dent_blk->dentry[0].ino = sb->root_ino;
1552 	dent_blk->dentry[0].name_len = cpu_to_le16(1);
1553 	dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1554 	memcpy(dent_blk->filename[0], ".", 1);
1555 
1556 	dent_blk->dentry[1].hash_code = 0;
1557 	dent_blk->dentry[1].ino = sb->root_ino;
1558 	dent_blk->dentry[1].name_len = cpu_to_le16(2);
1559 	dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1560 	memcpy(dent_blk->filename[1], "..", 2);
1561 
1562 	/* bitmap for . and .. */
1563 	test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1564 	test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1565 
1566 	if (c.lpf_ino) {
1567 		int len = strlen(LPF);
1568 		f2fs_hash_t hash = f2fs_dentry_hash(0, 0, (unsigned char *)LPF, len);
1569 
1570 		dent_blk->dentry[2].hash_code = cpu_to_le32(hash);
1571 		dent_blk->dentry[2].ino = cpu_to_le32(c.lpf_ino);
1572 		dent_blk->dentry[2].name_len = cpu_to_le16(len);
1573 		dent_blk->dentry[2].file_type = F2FS_FT_DIR;
1574 		memcpy(dent_blk->filename[2], LPF, F2FS_SLOT_LEN);
1575 
1576 		memcpy(dent_blk->filename[3], LPF + F2FS_SLOT_LEN,
1577 				len - F2FS_SLOT_LEN);
1578 
1579 		test_and_set_bit_le(2, dent_blk->dentry_bitmap);
1580 		test_and_set_bit_le(3, dent_blk->dentry_bitmap);
1581 	}
1582 
1583 	data_blk_offset = get_sb(main_blkaddr);
1584 	data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] *
1585 				c.blks_per_seg;
1586 
1587 	DBG(1, "\tWriting default dentry root, at offset 0x%08"PRIx64"\n",
1588 				data_blk_offset);
1589 	if (dev_write_block(dent_blk, data_blk_offset)) {
1590 		MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1591 		free(dent_blk);
1592 		return -1;
1593 	}
1594 
1595 	free(dent_blk);
1596 	return 0;
1597 }
1598 
1599 static int f2fs_create_root_dir(void)
1600 {
1601 	enum quota_type qtype;
1602 	int err = 0;
1603 
1604 	err = f2fs_write_root_inode();
1605 	if (err < 0) {
1606 		MSG(1, "\tError: Failed to write root inode!!!\n");
1607 		goto exit;
1608 	}
1609 
1610 	for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++)  {
1611 		if (sb->qf_ino[qtype] == 0)
1612 			continue;
1613 		err = f2fs_write_qf_inode(qtype);
1614 		if (err < 0) {
1615 			MSG(1, "\tError: Failed to write quota inode!!!\n");
1616 			goto exit;
1617 		}
1618 	}
1619 
1620 	if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
1621 		err = f2fs_write_lpf_inode();
1622 		if (err < 0) {
1623 			MSG(1, "\tError: Failed to write lost+found inode!!!\n");
1624 			goto exit;
1625 		}
1626 	}
1627 
1628 #ifndef WITH_ANDROID
1629 	err = f2fs_discard_obsolete_dnode();
1630 	if (err < 0) {
1631 		MSG(1, "\tError: Failed to discard obsolete dnode!!!\n");
1632 		goto exit;
1633 	}
1634 #endif
1635 
1636 	err = f2fs_update_nat_root();
1637 	if (err < 0) {
1638 		MSG(1, "\tError: Failed to update NAT for root!!!\n");
1639 		goto exit;
1640 	}
1641 
1642 	err = f2fs_add_default_dentry_root();
1643 	if (err < 0) {
1644 		MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1645 		goto exit;
1646 	}
1647 exit:
1648 	if (err)
1649 		MSG(1, "\tError: Could not create the root directory!!!\n");
1650 
1651 	return err;
1652 }
1653 
1654 int f2fs_format_device(void)
1655 {
1656 	int err = 0;
1657 
1658 	err= f2fs_prepare_super_block();
1659 	if (err < 0) {
1660 		MSG(0, "\tError: Failed to prepare a super block!!!\n");
1661 		goto exit;
1662 	}
1663 
1664 	if (c.trim) {
1665 		err = f2fs_trim_devices();
1666 		if (err < 0) {
1667 			MSG(0, "\tError: Failed to trim whole device!!!\n");
1668 			goto exit;
1669 		}
1670 	}
1671 
1672 	err = f2fs_init_sit_area();
1673 	if (err < 0) {
1674 		MSG(0, "\tError: Failed to initialise the SIT AREA!!!\n");
1675 		goto exit;
1676 	}
1677 
1678 	err = f2fs_init_nat_area();
1679 	if (err < 0) {
1680 		MSG(0, "\tError: Failed to initialise the NAT AREA!!!\n");
1681 		goto exit;
1682 	}
1683 
1684 	err = f2fs_create_root_dir();
1685 	if (err < 0) {
1686 		MSG(0, "\tError: Failed to create the root directory!!!\n");
1687 		goto exit;
1688 	}
1689 
1690 	err = f2fs_write_check_point_pack();
1691 	if (err < 0) {
1692 		MSG(0, "\tError: Failed to write the check point pack!!!\n");
1693 		goto exit;
1694 	}
1695 
1696 	err = f2fs_write_super_block();
1697 	if (err < 0) {
1698 		MSG(0, "\tError: Failed to write the super block!!!\n");
1699 		goto exit;
1700 	}
1701 exit:
1702 	if (err)
1703 		MSG(0, "\tError: Could not format the device!!!\n");
1704 
1705 	return err;
1706 }
1707